summaryrefslogtreecommitdiffstats
path: root/python
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 17:20:00 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 17:20:00 +0000
commit8daa83a594a2e98f39d764422bfbdbc62c9efd44 (patch)
tree4099e8021376c7d8c05bdf8503093d80e9c7bad0 /python
parentInitial commit. (diff)
downloadsamba-8daa83a594a2e98f39d764422bfbdbc62c9efd44.tar.xz
samba-8daa83a594a2e98f39d764422bfbdbc62c9efd44.zip
Adding upstream version 2:4.20.0+dfsg.upstream/2%4.20.0+dfsgupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'python')
-rwxr-xr-xpython/examples/dnsserver.py87
-rw-r--r--python/examples/netbios.py28
-rwxr-xr-xpython/examples/samr.py127
-rwxr-xr-xpython/examples/winreg.py94
-rw-r--r--python/modules.c114
-rw-r--r--python/modules.h33
-rw-r--r--python/py3compat.h51
-rw-r--r--python/pyglue.c672
-rw-r--r--python/samba/__init__.py400
-rw-r--r--python/samba/auth_util.py34
-rw-r--r--python/samba/colour.py175
-rw-r--r--python/samba/common.py107
-rw-r--r--python/samba/dbchecker.py2935
-rw-r--r--python/samba/descriptor.py723
-rw-r--r--python/samba/dnsresolver.py68
-rw-r--r--python/samba/dnsserver.py405
-rw-r--r--python/samba/domain_update.py573
-rw-r--r--python/samba/drs_utils.py456
-rw-r--r--python/samba/emulate/__init__.py16
-rw-r--r--python/samba/emulate/traffic.py2415
-rw-r--r--python/samba/emulate/traffic_packets.py973
-rw-r--r--python/samba/forest_update.py543
-rw-r--r--python/samba/functional_level.py83
-rw-r--r--python/samba/getopt.py539
-rw-r--r--python/samba/gkdi.py397
-rw-r--r--python/samba/gp/__init__.py17
-rw-r--r--python/samba/gp/gp_centrify_crontab_ext.py135
-rw-r--r--python/samba/gp/gp_centrify_sudoers_ext.py80
-rw-r--r--python/samba/gp/gp_cert_auto_enroll_ext.py572
-rw-r--r--python/samba/gp/gp_chromium_ext.py473
-rw-r--r--python/samba/gp/gp_drive_maps_ext.py168
-rw-r--r--python/samba/gp/gp_ext_loader.py59
-rw-r--r--python/samba/gp/gp_firefox_ext.py219
-rw-r--r--python/samba/gp/gp_firewalld_ext.py171
-rw-r--r--python/samba/gp/gp_gnome_settings_ext.py418
-rw-r--r--python/samba/gp/gp_msgs_ext.py96
-rw-r--r--python/samba/gp/gp_scripts_ext.py187
-rw-r--r--python/samba/gp/gp_sec_ext.py221
-rw-r--r--python/samba/gp/gp_smb_conf_ext.py127
-rw-r--r--python/samba/gp/gp_sudoers_ext.py116
-rw-r--r--python/samba/gp/gpclass.py1312
-rw-r--r--python/samba/gp/util/logging.py112
-rw-r--r--python/samba/gp/vgp_access_ext.py178
-rw-r--r--python/samba/gp/vgp_files_ext.py140
-rw-r--r--python/samba/gp/vgp_issue_ext.py90
-rw-r--r--python/samba/gp/vgp_motd_ext.py90
-rw-r--r--python/samba/gp/vgp_openssh_ext.py115
-rw-r--r--python/samba/gp/vgp_startup_scripts_ext.py136
-rw-r--r--python/samba/gp/vgp_sudoers_ext.py97
-rw-r--r--python/samba/gp/vgp_symlink_ext.py76
-rw-r--r--python/samba/gp_parse/__init__.py185
-rw-r--r--python/samba/gp_parse/gp_aas.py25
-rw-r--r--python/samba/gp_parse/gp_csv.py102
-rw-r--r--python/samba/gp_parse/gp_inf.py378
-rw-r--r--python/samba/gp_parse/gp_ini.py228
-rw-r--r--python/samba/gp_parse/gp_pol.py151
-rw-r--r--python/samba/graph.py820
-rw-r--r--python/samba/hostconfig.py81
-rw-r--r--python/samba/idmap.py99
-rw-r--r--python/samba/join.py1786
-rw-r--r--python/samba/kcc/__init__.py2754
-rw-r--r--python/samba/kcc/debug.py61
-rw-r--r--python/samba/kcc/graph.py859
-rw-r--r--python/samba/kcc/graph_utils.py343
-rw-r--r--python/samba/kcc/kcc_utils.py2364
-rw-r--r--python/samba/kcc/ldif_import_export.py403
-rw-r--r--python/samba/logger.py69
-rw-r--r--python/samba/mdb_util.py43
-rw-r--r--python/samba/ms_display_specifiers.py195
-rw-r--r--python/samba/ms_forest_updates_markdown.py309
-rw-r--r--python/samba/ms_schema.py337
-rw-r--r--python/samba/ms_schema_markdown.py78
-rw-r--r--python/samba/ndr.py153
-rw-r--r--python/samba/netcmd/__init__.py396
-rw-r--r--python/samba/netcmd/common.py161
-rw-r--r--python/samba/netcmd/computer.py729
-rw-r--r--python/samba/netcmd/contact.py861
-rw-r--r--python/samba/netcmd/dbcheck.py193
-rw-r--r--python/samba/netcmd/delegation.py689
-rw-r--r--python/samba/netcmd/dns.py1394
-rw-r--r--python/samba/netcmd/domain/__init__.py73
-rw-r--r--python/samba/netcmd/domain/auth/__init__.py35
-rw-r--r--python/samba/netcmd/domain/auth/policy.py685
-rw-r--r--python/samba/netcmd/domain/auth/silo.py402
-rw-r--r--python/samba/netcmd/domain/auth/silo_member.py201
-rw-r--r--python/samba/netcmd/domain/backup.py1256
-rw-r--r--python/samba/netcmd/domain/claim/__init__.py35
-rw-r--r--python/samba/netcmd/domain/claim/claim_type.py361
-rw-r--r--python/samba/netcmd/domain/claim/value_type.py105
-rw-r--r--python/samba/netcmd/domain/classicupgrade.py189
-rw-r--r--python/samba/netcmd/domain/common.py64
-rw-r--r--python/samba/netcmd/domain/dcpromo.py90
-rw-r--r--python/samba/netcmd/domain/demote.py335
-rw-r--r--python/samba/netcmd/domain/functional_prep.py145
-rw-r--r--python/samba/netcmd/domain/info.py58
-rw-r--r--python/samba/netcmd/domain/join.py146
-rw-r--r--python/samba/netcmd/domain/keytab.py55
-rw-r--r--python/samba/netcmd/domain/leave.py59
-rw-r--r--python/samba/netcmd/domain/level.py250
-rw-r--r--python/samba/netcmd/domain/models/__init__.py32
-rw-r--r--python/samba/netcmd/domain/models/auth_policy.py109
-rw-r--r--python/samba/netcmd/domain/models/auth_silo.py104
-rw-r--r--python/samba/netcmd/domain/models/claim_type.py58
-rw-r--r--python/samba/netcmd/domain/models/exceptions.py64
-rw-r--r--python/samba/netcmd/domain/models/fields.py507
-rw-r--r--python/samba/netcmd/domain/models/group.py42
-rw-r--r--python/samba/netcmd/domain/models/model.py426
-rw-r--r--python/samba/netcmd/domain/models/query.py81
-rw-r--r--python/samba/netcmd/domain/models/schema.py124
-rw-r--r--python/samba/netcmd/domain/models/site.py47
-rw-r--r--python/samba/netcmd/domain/models/subnet.py45
-rw-r--r--python/samba/netcmd/domain/models/user.py75
-rw-r--r--python/samba/netcmd/domain/models/value_type.py96
-rw-r--r--python/samba/netcmd/domain/passwordsettings.py316
-rw-r--r--python/samba/netcmd/domain/provision.py405
-rw-r--r--python/samba/netcmd/domain/samba3upgrade.py34
-rw-r--r--python/samba/netcmd/domain/schemaupgrade.py350
-rw-r--r--python/samba/netcmd/domain/tombstones.py116
-rw-r--r--python/samba/netcmd/domain/trust.py2338
-rw-r--r--python/samba/netcmd/drs.py874
-rw-r--r--python/samba/netcmd/dsacl.py217
-rw-r--r--python/samba/netcmd/encoders.py49
-rw-r--r--python/samba/netcmd/forest.py167
-rw-r--r--python/samba/netcmd/fsmo.py535
-rw-r--r--python/samba/netcmd/gpcommon.py55
-rw-r--r--python/samba/netcmd/gpo.py4513
-rw-r--r--python/samba/netcmd/group.py1416
-rw-r--r--python/samba/netcmd/ldapcmp.py984
-rw-r--r--python/samba/netcmd/main.py98
-rw-r--r--python/samba/netcmd/nettime.py60
-rw-r--r--python/samba/netcmd/ntacl.py503
-rw-r--r--python/samba/netcmd/ou.py411
-rw-r--r--python/samba/netcmd/processes.py142
-rw-r--r--python/samba/netcmd/pso.py794
-rw-r--r--python/samba/netcmd/rodc.py163
-rw-r--r--python/samba/netcmd/schema.py319
-rw-r--r--python/samba/netcmd/shell.py74
-rw-r--r--python/samba/netcmd/sites.py348
-rw-r--r--python/samba/netcmd/spn.py210
-rw-r--r--python/samba/netcmd/testparm.py236
-rw-r--r--python/samba/netcmd/user/__init__.py70
-rw-r--r--python/samba/netcmd/user/add.py209
-rw-r--r--python/samba/netcmd/user/add_unix_attrs.py244
-rw-r--r--python/samba/netcmd/user/auth/__init__.py35
-rw-r--r--python/samba/netcmd/user/auth/policy.py170
-rw-r--r--python/samba/netcmd/user/auth/silo.py189
-rw-r--r--python/samba/netcmd/user/delete.py87
-rw-r--r--python/samba/netcmd/user/disable.py64
-rw-r--r--python/samba/netcmd/user/edit.py136
-rw-r--r--python/samba/netcmd/user/enable.py94
-rw-r--r--python/samba/netcmd/user/getgroups.py120
-rw-r--r--python/samba/netcmd/user/list.py108
-rw-r--r--python/samba/netcmd/user/move.py106
-rw-r--r--python/samba/netcmd/user/password.py73
-rw-r--r--python/samba/netcmd/user/readpasswords/__init__.py25
-rw-r--r--python/samba/netcmd/user/readpasswords/common.py907
-rw-r--r--python/samba/netcmd/user/readpasswords/get_kerberos_ticket.py146
-rw-r--r--python/samba/netcmd/user/readpasswords/getpassword.py210
-rw-r--r--python/samba/netcmd/user/readpasswords/show.py144
-rw-r--r--python/samba/netcmd/user/readpasswords/syncpasswords.py878
-rw-r--r--python/samba/netcmd/user/rename.py249
-rw-r--r--python/samba/netcmd/user/sensitive.py83
-rw-r--r--python/samba/netcmd/user/setexpiry.py101
-rw-r--r--python/samba/netcmd/user/setpassword.py161
-rw-r--r--python/samba/netcmd/user/setprimarygroup.py138
-rw-r--r--python/samba/netcmd/user/unlock.py99
-rw-r--r--python/samba/netcmd/validators.py66
-rw-r--r--python/samba/netcmd/visualize.py705
-rw-r--r--python/samba/nt_time.py60
-rw-r--r--python/samba/ntacls.py662
-rw-r--r--python/samba/policies.py388
-rw-r--r--python/samba/provision/__init__.py2524
-rw-r--r--python/samba/provision/backend.py87
-rw-r--r--python/samba/provision/common.py91
-rw-r--r--python/samba/provision/kerberos.py104
-rw-r--r--python/samba/provision/sambadns.py1329
-rw-r--r--python/samba/remove_dc.py466
-rw-r--r--python/samba/safe_tarfile.py94
-rw-r--r--python/samba/samba3/__init__.py409
-rw-r--r--python/samba/samba3/libsmb_samba_internal.py130
-rw-r--r--python/samba/samdb.py1623
-rw-r--r--python/samba/schema.py264
-rw-r--r--python/samba/sd_utils.py231
-rw-r--r--python/samba/sites.py126
-rw-r--r--python/samba/subnets.py247
-rw-r--r--python/samba/subunit/__init__.py85
-rwxr-xr-xpython/samba/subunit/run.py682
-rw-r--r--python/samba/tdb_util.py46
-rw-r--r--python/samba/tests/__init__.py824
-rw-r--r--python/samba/tests/audit_log_base.py206
-rw-r--r--python/samba/tests/audit_log_dsdb.py634
-rw-r--r--python/samba/tests/audit_log_pass_change.py331
-rw-r--r--python/samba/tests/auth.py102
-rwxr-xr-xpython/samba/tests/auth_log.py1489
-rw-r--r--python/samba/tests/auth_log_base.py221
-rw-r--r--python/samba/tests/auth_log_ncalrpc.py102
-rw-r--r--python/samba/tests/auth_log_netlogon.py134
-rw-r--r--python/samba/tests/auth_log_netlogon_bad_creds.py190
-rw-r--r--python/samba/tests/auth_log_pass_change.py282
-rw-r--r--python/samba/tests/auth_log_samlogon.py181
-rw-r--r--python/samba/tests/auth_log_winbind.py460
-rwxr-xr-xpython/samba/tests/bin/cepces-submit18
-rwxr-xr-xpython/samba/tests/bin/crontab29
-rwxr-xr-xpython/samba/tests/bin/firewall-cmd114
-rwxr-xr-xpython/samba/tests/bin/getcert84
-rwxr-xr-xpython/samba/tests/bin/gio11
-rw-r--r--python/samba/tests/blackbox/__init__.py17
-rw-r--r--python/samba/tests/blackbox/bug13653.py216
-rw-r--r--python/samba/tests/blackbox/check_output.py108
-rwxr-xr-xpython/samba/tests/blackbox/claims.py526
-rw-r--r--python/samba/tests/blackbox/downgradedatabase.py167
-rw-r--r--python/samba/tests/blackbox/mdsearch.py126
-rw-r--r--python/samba/tests/blackbox/ndrdump.py563
-rw-r--r--python/samba/tests/blackbox/netads_dns.py83
-rw-r--r--python/samba/tests/blackbox/netads_json.py81
-rwxr-xr-xpython/samba/tests/blackbox/rpcd_witness_samba_only.py1338
-rw-r--r--python/samba/tests/blackbox/samba_dnsupdate.py125
-rw-r--r--python/samba/tests/blackbox/smbcacls.py148
-rw-r--r--python/samba/tests/blackbox/smbcacls_basic.py129
-rw-r--r--python/samba/tests/blackbox/smbcacls_dfs_propagate_inherit.py84
-rw-r--r--python/samba/tests/blackbox/smbcacls_propagate_inhertance.py1290
-rw-r--r--python/samba/tests/blackbox/smbcacls_save_restore.py205
-rw-r--r--python/samba/tests/blackbox/smbcontrol.py82
-rw-r--r--python/samba/tests/blackbox/smbcontrol_process.py131
-rw-r--r--python/samba/tests/blackbox/testdata/traffic-sample-very-short.model61
-rw-r--r--python/samba/tests/blackbox/testdata/traffic-sample-very-short.txt50
-rw-r--r--python/samba/tests/blackbox/testdata/traffic_learner.expected61
-rw-r--r--python/samba/tests/blackbox/testdata/traffic_replay-0.expected18
-rw-r--r--python/samba/tests/blackbox/testdata/traffic_replay-1.expected19
-rw-r--r--python/samba/tests/blackbox/testdata/traffic_replay-2.expected17
-rw-r--r--python/samba/tests/blackbox/testdata/traffic_replay-3.expected11
-rw-r--r--python/samba/tests/blackbox/testdata/traffic_replay.expected18
-rw-r--r--python/samba/tests/blackbox/testdata/traffic_summary.expected29
-rw-r--r--python/samba/tests/blackbox/testdata/traffic_summary.pdml4989
-rw-r--r--python/samba/tests/blackbox/traffic_learner.py71
-rw-r--r--python/samba/tests/blackbox/traffic_replay.py100
-rw-r--r--python/samba/tests/blackbox/traffic_summary.py53
-rw-r--r--python/samba/tests/common.py66
-rw-r--r--python/samba/tests/complex_expressions.py487
-rw-r--r--python/samba/tests/compression.py210
-rw-r--r--python/samba/tests/conditional_ace_assembler.py227
-rw-r--r--python/samba/tests/conditional_ace_bytes.py95
-rw-r--r--python/samba/tests/conditional_ace_claims.py901
-rw-r--r--python/samba/tests/core.py83
-rw-r--r--python/samba/tests/cred_opt.py155
-rw-r--r--python/samba/tests/credentials.py501
-rw-r--r--python/samba/tests/dcerpc/__init__.py19
-rw-r--r--python/samba/tests/dcerpc/array.py206
-rw-r--r--python/samba/tests/dcerpc/bare.py61
-rw-r--r--python/samba/tests/dcerpc/binding.py101
-rw-r--r--python/samba/tests/dcerpc/createtrustrelax.py129
-rw-r--r--python/samba/tests/dcerpc/dnsserver.py1314
-rw-r--r--python/samba/tests/dcerpc/integer.py250
-rw-r--r--python/samba/tests/dcerpc/lsa.py333
-rw-r--r--python/samba/tests/dcerpc/mdssvc.py194
-rw-r--r--python/samba/tests/dcerpc/misc.py101
-rwxr-xr-xpython/samba/tests/dcerpc/raw_protocol.py7514
-rw-r--r--python/samba/tests/dcerpc/raw_testcase.py1177
-rw-r--r--python/samba/tests/dcerpc/registry.py51
-rw-r--r--python/samba/tests/dcerpc/rpc_talloc.py86
-rw-r--r--python/samba/tests/dcerpc/rpcecho.py71
-rw-r--r--python/samba/tests/dcerpc/sam.py783
-rw-r--r--python/samba/tests/dcerpc/samr_change_password.py187
-rw-r--r--python/samba/tests/dcerpc/srvsvc.py68
-rw-r--r--python/samba/tests/dcerpc/string_tests.py132
-rw-r--r--python/samba/tests/dcerpc/testrpc.py143
-rw-r--r--python/samba/tests/dcerpc/unix.py43
-rw-r--r--python/samba/tests/dckeytab.py64
-rw-r--r--python/samba/tests/dns.py2247
-rw-r--r--python/samba/tests/dns_aging.py2777
-rw-r--r--python/samba/tests/dns_base.py437
-rw-r--r--python/samba/tests/dns_forwarder.py600
-rw-r--r--python/samba/tests/dns_forwarder_helpers/server.py104
-rw-r--r--python/samba/tests/dns_invalid.py80
-rw-r--r--python/samba/tests/dns_packet.py230
-rw-r--r--python/samba/tests/dns_tkey.py208
-rw-r--r--python/samba/tests/dns_wildcard.py336
-rw-r--r--python/samba/tests/docs.py511
-rw-r--r--python/samba/tests/domain_backup.py624
-rw-r--r--python/samba/tests/domain_backup_offline.py252
-rw-r--r--python/samba/tests/dsdb.py1223
-rw-r--r--python/samba/tests/dsdb_api.py57
-rw-r--r--python/samba/tests/dsdb_dns.py85
-rw-r--r--python/samba/tests/dsdb_lock.py374
-rw-r--r--python/samba/tests/dsdb_schema_attributes.py249
-rw-r--r--python/samba/tests/emulate/__init__.py17
-rw-r--r--python/samba/tests/emulate/traffic.py164
-rw-r--r--python/samba/tests/emulate/traffic_packet.py736
-rw-r--r--python/samba/tests/encrypted_secrets.py83
-rw-r--r--python/samba/tests/gensec.py259
-rw-r--r--python/samba/tests/get_opt.py69
-rw-r--r--python/samba/tests/getdcname.py700
-rw-r--r--python/samba/tests/gkdi.py647
-rw-r--r--python/samba/tests/glue.py90
-rw-r--r--python/samba/tests/gpo.py8192
-rw-r--r--python/samba/tests/gpo_member.py39
-rw-r--r--python/samba/tests/graph.py532
-rw-r--r--python/samba/tests/group_audit.py395
-rw-r--r--python/samba/tests/hostconfig.py74
-rw-r--r--python/samba/tests/imports.py31
-rw-r--r--python/samba/tests/join.py175
-rw-r--r--python/samba/tests/kcc/__init__.py90
-rw-r--r--python/samba/tests/kcc/graph.py67
-rw-r--r--python/samba/tests/kcc/graph_utils.py165
-rw-r--r--python/samba/tests/kcc/kcc_utils.py393
-rw-r--r--python/samba/tests/kcc/ldif_import_export.py240
-rwxr-xr-xpython/samba/tests/krb5/alias_tests.py202
-rwxr-xr-xpython/samba/tests/krb5/as_canonicalization_tests.py474
-rwxr-xr-xpython/samba/tests/krb5/as_req_tests.py606
-rwxr-xr-xpython/samba/tests/krb5/authn_policy_tests.py8903
-rwxr-xr-xpython/samba/tests/krb5/claims_in_pac.py490
-rwxr-xr-xpython/samba/tests/krb5/claims_tests.py2032
-rwxr-xr-xpython/samba/tests/krb5/compatability_tests.py227
-rwxr-xr-xpython/samba/tests/krb5/conditional_ace_tests.py5588
-rwxr-xr-xpython/samba/tests/krb5/device_tests.py2211
-rwxr-xr-xpython/samba/tests/krb5/etype_tests.py597
-rwxr-xr-xpython/samba/tests/krb5/fast_tests.py2108
-rwxr-xr-xpython/samba/tests/krb5/gkdi_tests.py745
-rwxr-xr-xpython/samba/tests/krb5/group_tests.py1967
-rwxr-xr-xpython/samba/tests/krb5/kcrypto.py969
-rw-r--r--python/samba/tests/krb5/kdc_base_test.py3755
-rwxr-xr-xpython/samba/tests/krb5/kdc_tests.py228
-rwxr-xr-xpython/samba/tests/krb5/kdc_tgs_tests.py3506
-rwxr-xr-xpython/samba/tests/krb5/kdc_tgt_tests.py86
-rwxr-xr-xpython/samba/tests/krb5/kpasswd_tests.py983
-rwxr-xr-xpython/samba/tests/krb5/lockout_tests.py1137
-rwxr-xr-xpython/samba/tests/krb5/ms_kile_client_principal_lookup_tests.py818
-rwxr-xr-xpython/samba/tests/krb5/nt_hash_tests.py142
-rwxr-xr-xpython/samba/tests/krb5/pac_align_tests.py93
-rwxr-xr-xpython/samba/tests/krb5/pkinit_tests.py1211
-rwxr-xr-xpython/samba/tests/krb5/protected_users_tests.py1053
-rwxr-xr-xpython/samba/tests/krb5/pyasn1_regen.sh42
-rw-r--r--python/samba/tests/krb5/raw_testcase.py6221
-rw-r--r--python/samba/tests/krb5/rfc4120.asn11908
-rw-r--r--python/samba/tests/krb5/rfc4120_constants.py247
-rw-r--r--python/samba/tests/krb5/rfc4120_pyasn1.py92
-rw-r--r--python/samba/tests/krb5/rfc4120_pyasn1_generated.py2690
-rwxr-xr-xpython/samba/tests/krb5/rodc_tests.py77
-rwxr-xr-xpython/samba/tests/krb5/s4u_tests.py1838
-rwxr-xr-xpython/samba/tests/krb5/salt_tests.py469
-rwxr-xr-xpython/samba/tests/krb5/simple_tests.py185
-rwxr-xr-xpython/samba/tests/krb5/spn_tests.py212
-rwxr-xr-xpython/samba/tests/krb5/test_ccache.py173
-rwxr-xr-xpython/samba/tests/krb5/test_idmap_nss.py232
-rwxr-xr-xpython/samba/tests/krb5/test_ldap.py168
-rwxr-xr-xpython/samba/tests/krb5/test_min_domain_uid.py122
-rwxr-xr-xpython/samba/tests/krb5/test_rpc.py138
-rwxr-xr-xpython/samba/tests/krb5/test_smb.py153
-rwxr-xr-xpython/samba/tests/krb5/xrealm_tests.py187
-rw-r--r--python/samba/tests/krb5_credentials.py111
-rw-r--r--python/samba/tests/ldap_raw.py939
-rw-r--r--python/samba/tests/ldap_referrals.py87
-rw-r--r--python/samba/tests/ldap_spn.py924
-rw-r--r--python/samba/tests/ldap_upn_sam_account.py510
-rw-r--r--python/samba/tests/ldap_whoami.py38
-rw-r--r--python/samba/tests/libsmb-basic.py268
-rw-r--r--python/samba/tests/libsmb.py55
-rw-r--r--python/samba/tests/loadparm.py84
-rw-r--r--python/samba/tests/logfiles.py381
-rw-r--r--python/samba/tests/lsa_string.py68
-rw-r--r--python/samba/tests/messaging.py174
-rwxr-xr-xpython/samba/tests/ndr/gkdi.py397
-rwxr-xr-xpython/samba/tests/ndr/gmsa.py99
-rw-r--r--python/samba/tests/ndr/wbint.py139
-rw-r--r--python/samba/tests/net_join.py63
-rw-r--r--python/samba/tests/net_join_no_spnego.py90
-rw-r--r--python/samba/tests/netbios.py65
-rw-r--r--python/samba/tests/netcmd.py165
-rw-r--r--python/samba/tests/netlogonsvc.py66
-rw-r--r--python/samba/tests/ntacls.py87
-rw-r--r--python/samba/tests/ntacls_backup.py198
-rw-r--r--python/samba/tests/ntlm_auth.py342
-rw-r--r--python/samba/tests/ntlm_auth_base.py210
-rw-r--r--python/samba/tests/ntlm_auth_krb5.py83
-rw-r--r--python/samba/tests/ntlmdisabled.py84
-rw-r--r--python/samba/tests/pam_winbind.py72
-rw-r--r--python/samba/tests/pam_winbind_chauthtok.py42
-rw-r--r--python/samba/tests/pam_winbind_setcred.py56
-rw-r--r--python/samba/tests/pam_winbind_warn_pwd_expire.py52
-rw-r--r--python/samba/tests/param.py107
-rw-r--r--python/samba/tests/password_hash.py335
-rw-r--r--python/samba/tests/password_hash_fl2003.py196
-rw-r--r--python/samba/tests/password_hash_fl2008.py207
-rw-r--r--python/samba/tests/password_hash_gpgme.py293
-rw-r--r--python/samba/tests/password_hash_ldap.py129
-rw-r--r--python/samba/tests/password_quality.py52
-rw-r--r--python/samba/tests/password_test.py59
-rw-r--r--python/samba/tests/policy.py34
-rw-r--r--python/samba/tests/posixacl.py878
-rw-r--r--python/samba/tests/prefork_restart.py462
-rw-r--r--python/samba/tests/process_limits.py70
-rw-r--r--python/samba/tests/provision.py201
-rw-r--r--python/samba/tests/pso.py272
-rw-r--r--python/samba/tests/py_credentials.py677
-rw-r--r--python/samba/tests/registry.py79
-rw-r--r--python/samba/tests/reparsepoints.py241
-rw-r--r--python/samba/tests/s3_net_join.py77
-rw-r--r--python/samba/tests/s3idmapdb.py57
-rw-r--r--python/samba/tests/s3param.py50
-rw-r--r--python/samba/tests/s3passdb.py138
-rw-r--r--python/samba/tests/s3registry.py53
-rw-r--r--python/samba/tests/s3windb.py45
-rw-r--r--python/samba/tests/safe_tarfile.py81
-rw-r--r--python/samba/tests/samba3sam.py1125
-rw-r--r--python/samba/tests/samba_startup_fl_change.py180
-rw-r--r--python/samba/tests/samba_tool/__init__.py15
-rw-r--r--python/samba/tests/samba_tool/base.py137
-rw-r--r--python/samba/tests/samba_tool/computer.py378
-rwxr-xr-xpython/samba/tests/samba_tool/computer_edit.sh197
-rw-r--r--python/samba/tests/samba_tool/contact.py468
-rwxr-xr-xpython/samba/tests/samba_tool/contact_edit.sh183
-rw-r--r--python/samba/tests/samba_tool/demote.py106
-rw-r--r--python/samba/tests/samba_tool/dnscmd.py1506
-rw-r--r--python/samba/tests/samba_tool/domain_auth_policy.py1517
-rw-r--r--python/samba/tests/samba_tool/domain_auth_silo.py618
-rw-r--r--python/samba/tests/samba_tool/domain_claim.py608
-rw-r--r--python/samba/tests/samba_tool/domain_models.py416
-rw-r--r--python/samba/tests/samba_tool/drs_clone_dc_data_lmdb_size.py119
-rw-r--r--python/samba/tests/samba_tool/dsacl.py211
-rw-r--r--python/samba/tests/samba_tool/forest.py70
-rw-r--r--python/samba/tests/samba_tool/fsmo.py52
-rw-r--r--python/samba/tests/samba_tool/gpo.py1847
-rw-r--r--python/samba/tests/samba_tool/gpo_exts.py202
-rw-r--r--python/samba/tests/samba_tool/group.py613
-rwxr-xr-xpython/samba/tests/samba_tool/group_edit.sh228
-rw-r--r--python/samba/tests/samba_tool/help.py81
-rw-r--r--python/samba/tests/samba_tool/join.py31
-rw-r--r--python/samba/tests/samba_tool/join_lmdb_size.py152
-rw-r--r--python/samba/tests/samba_tool/join_member.py71
-rw-r--r--python/samba/tests/samba_tool/ntacl.py247
-rw-r--r--python/samba/tests/samba_tool/ou.py291
-rw-r--r--python/samba/tests/samba_tool/passwordsettings.py484
-rw-r--r--python/samba/tests/samba_tool/processes.py42
-rw-r--r--python/samba/tests/samba_tool/promote_dc_lmdb_size.py174
-rw-r--r--python/samba/tests/samba_tool/provision_lmdb_size.py132
-rw-r--r--python/samba/tests/samba_tool/provision_password_check.py57
-rw-r--r--python/samba/tests/samba_tool/provision_userPassword_crypt.py67
-rw-r--r--python/samba/tests/samba_tool/rodc.py131
-rw-r--r--python/samba/tests/samba_tool/schema.py109
-rw-r--r--python/samba/tests/samba_tool/silo_base.py229
-rw-r--r--python/samba/tests/samba_tool/sites.py205
-rw-r--r--python/samba/tests/samba_tool/timecmd.py44
-rw-r--r--python/samba/tests/samba_tool/user.py1246
-rw-r--r--python/samba/tests/samba_tool/user_auth_policy.py86
-rw-r--r--python/samba/tests/samba_tool/user_auth_silo.py84
-rw-r--r--python/samba/tests/samba_tool/user_check_password_script.py106
-rwxr-xr-xpython/samba/tests/samba_tool/user_edit.sh198
-rw-r--r--python/samba/tests/samba_tool/user_get_kerberos_ticket.py195
-rw-r--r--python/samba/tests/samba_tool/user_getpassword_gmsa.py171
-rw-r--r--python/samba/tests/samba_tool/user_virtualCryptSHA.py516
-rw-r--r--python/samba/tests/samba_tool/user_virtualCryptSHA_base.py99
-rw-r--r--python/samba/tests/samba_tool/user_virtualCryptSHA_gpg.py262
-rw-r--r--python/samba/tests/samba_tool/user_virtualCryptSHA_userPassword.py188
-rw-r--r--python/samba/tests/samba_tool/user_wdigest.py450
-rw-r--r--python/samba/tests/samba_tool/visualize.py618
-rw-r--r--python/samba/tests/samba_tool/visualize_drs.py636
-rw-r--r--python/samba/tests/samba_upgradedns_lmdb.py75
-rw-r--r--python/samba/tests/samdb.py66
-rw-r--r--python/samba/tests/samdb_api.py148
-rw-r--r--python/samba/tests/sddl.py894
-rw-r--r--python/samba/tests/sddl_conditional_ace.py52
-rw-r--r--python/samba/tests/security.py209
-rw-r--r--python/samba/tests/security_descriptors.py216
-rw-r--r--python/samba/tests/segfault.py243
-rw-r--r--python/samba/tests/sid_strings.py608
-rwxr-xr-xpython/samba/tests/smb-notify.py429
-rw-r--r--python/samba/tests/smb.py236
-rw-r--r--python/samba/tests/smb1posix.py71
-rw-r--r--python/samba/tests/smb2symlink.py216
-rw-r--r--python/samba/tests/smb3unix.py418
-rw-r--r--python/samba/tests/smbconf.py352
-rw-r--r--python/samba/tests/smbd_base.py48
-rw-r--r--python/samba/tests/smbd_fuzztest.py76
-rw-r--r--python/samba/tests/source.py242
-rwxr-xr-xpython/samba/tests/source_chars.py326
-rw-r--r--python/samba/tests/strings.py99
-rw-r--r--python/samba/tests/subunitrun.py63
-rw-r--r--python/samba/tests/tdb_util.py50
-rwxr-xr-xpython/samba/tests/test_pam_winbind.sh46
-rwxr-xr-xpython/samba/tests/test_pam_winbind_chauthtok.sh77
-rwxr-xr-xpython/samba/tests/test_pam_winbind_setcred.sh46
-rwxr-xr-xpython/samba/tests/test_pam_winbind_warn_pwd_expire.sh75
-rw-r--r--python/samba/tests/token_factory.py256
-rw-r--r--python/samba/tests/upgrade.py40
-rw-r--r--python/samba/tests/upgradeprovision.py155
-rw-r--r--python/samba/tests/upgradeprovisionneeddc.py181
-rw-r--r--python/samba/tests/usage.py380
-rw-r--r--python/samba/tests/xattr.py159
-rw-r--r--python/samba/trust_utils.py62
-rw-r--r--python/samba/upgrade.py849
-rw-r--r--python/samba/upgradehelpers.py834
-rw-r--r--python/samba/uptodateness.py201
-rw-r--r--python/samba/xattr.py60
-rw-r--r--python/wscript140
494 files changed, 219600 insertions, 0 deletions
diff --git a/python/examples/dnsserver.py b/python/examples/dnsserver.py
new file mode 100755
index 0000000..caca998
--- /dev/null
+++ b/python/examples/dnsserver.py
@@ -0,0 +1,87 @@
+#!/usr/bin/env python3
+
+# script to test the dnsserver RPC protocol
+
+import sys
+from optparse import OptionParser
+
+sys.path.insert(0, "bin/python")
+
+import samba.getopt as options
+from samba.dcerpc import dnsserver, dnsp
+
+
+########### main code ###########
+if __name__ == "__main__":
+ parser = OptionParser("dnsserver [options] server")
+ sambaopts = options.SambaOptions(parser)
+ credopts = options.CredentialsOptionsDouble(parser)
+ parser.add_option_group(credopts)
+
+ (opts, args) = parser.parse_args()
+
+ if len(args) < 3:
+ print("Usage: dnsserver.py [options] DNSSERVER DNSZONE NEWNAME")
+ sys.exit(1)
+
+ server = args[0]
+ dnszone = args[1]
+ newname = args[2]
+
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+
+ if not creds.authentication_requested():
+ parser.error("You must supply credentials")
+
+ binding_str = "ncacn_ip_tcp:%s[print,sign]" % server
+
+ dns_conn = dnsserver.dnsserver(binding_str, lp, creds)
+
+ print("querying a NS record")
+ dns_conn.DnssrvEnumRecords2(0x00070000,
+ 0,
+ server,
+ dnszone,
+ newname,
+ None,
+ dnsp.DNS_TYPE_NS,
+ 0x0f,
+ None,
+ None)
+
+ print("adding a NS glue record")
+ name = dnsserver.DNS_RPC_NAME()
+ name.str = newname
+
+ addrec = dnsserver.DNS_RPC_RECORD()
+ addrec.wType = dnsp.DNS_TYPE_NS
+ addrec.dwFlags = 0
+ addrec.dwSerial = 0
+ addrec.dwTtlSeconds = 3600
+ addrec.dwTimeStamp = 0
+ addrec.dwReserved = 0
+ addrec.data = name
+
+ addrecbuf = dnsserver.DNS_RPC_RECORD_BUF()
+ addrecbuf.rec = addrec
+
+ dns_conn.DnssrvUpdateRecord2(0x00070000,
+ 0,
+ server,
+ dnszone,
+ newname,
+ addrecbuf,
+ None)
+
+ print("querying the NS record")
+ dns_conn.DnssrvEnumRecords2(0x00070000,
+ 0,
+ server,
+ dnszone,
+ newname,
+ None,
+ dnsp.DNS_TYPE_NS,
+ 0x0f,
+ None,
+ None)
diff --git a/python/examples/netbios.py b/python/examples/netbios.py
new file mode 100644
index 0000000..740d8e3
--- /dev/null
+++ b/python/examples/netbios.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python3
+
+# Unix SMB/CIFS implementation.
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2008
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from samba.netbios import Node
+
+n = Node()
+(reply_from, names, addresses) = n.query_name("GANIEDA", "192.168.4.0",
+ timeout=4)
+
+print("Received reply from %s:" % (reply_from, ))
+print("Names: %r" % (names, ))
+print("Addresses: %r" % (addresses, ))
diff --git a/python/examples/samr.py b/python/examples/samr.py
new file mode 100755
index 0000000..cbfbb1a
--- /dev/null
+++ b/python/examples/samr.py
@@ -0,0 +1,127 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+# Unix SMB/CIFS implementation.
+# Copyright © Jelmer Vernooij <jelmer@samba.org> 2008
+#
+# Based on samr.js © Andrew Tridgell <tridge@samba.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+
+sys.path.insert(0, "bin/python")
+
+from samba.dcerpc import samr, security
+
+
+def display_lsa_string(str):
+ return str.string
+
+
+def FillUserInfo(samr, dom_handle, users, level):
+ """fill a user array with user information from samrQueryUserInfo"""
+ for i in range(len(users)):
+ user_handle = samr.OpenUser(handle, security.SEC_FLAG_MAXIMUM_ALLOWED, users[i].idx)
+ info = samr.QueryUserInfo(user_handle, level)
+ info.name = users[i].name
+ info.idx = users[i].idx
+ users[i] = info
+ samr.Close(user_handle)
+
+
+def toArray(handle, array, num_entries):
+ ret = []
+ for x in range(num_entries):
+ ret.append((array.entries[x].idx, array.entries[x].name))
+ return ret
+
+
+def test_Connect(samr):
+ """test the samr_Connect interface"""
+ print("Testing samr_Connect")
+ return samr.Connect2(None, security.SEC_FLAG_MAXIMUM_ALLOWED)
+
+
+def test_LookupDomain(samr, handle, domain):
+ """test the samr_LookupDomain interface"""
+ print("Testing samr_LookupDomain")
+ return samr.LookupDomain(handle, domain)
+
+
+def test_OpenDomain(samr, handle, sid):
+ """test the samr_OpenDomain interface"""
+ print("Testing samr_OpenDomain")
+ return samr.OpenDomain(handle, security.SEC_FLAG_MAXIMUM_ALLOWED, sid)
+
+
+def test_EnumDomainUsers(samr, dom_handle):
+ """test the samr_EnumDomainUsers interface"""
+ print("Testing samr_EnumDomainUsers")
+ users = toArray(*samr.EnumDomainUsers(dom_handle, 0, 0, 0xffffffff))
+ print("Found %d users" % len(users))
+ for idx, user in users:
+ print("\t%s\t(%d)" % (user.string, idx))
+
+
+def test_EnumDomainGroups(samr, dom_handle):
+ """test the samr_EnumDomainGroups interface"""
+ print("Testing samr_EnumDomainGroups")
+ groups = toArray(*samr.EnumDomainGroups(dom_handle, 0, 0))
+ print("Found %d groups" % len(groups))
+ for idx, group in groups:
+ print("\t%s\t(%d)" % (group.string, idx))
+
+
+def test_domain_ops(samr, dom_handle):
+ """test domain specific ops"""
+ test_EnumDomainUsers(samr, dom_handle)
+ test_EnumDomainGroups(samr, dom_handle)
+
+
+def test_EnumDomains(samr, handle):
+ """test the samr_EnumDomains interface"""
+ print("Testing samr_EnumDomains")
+
+ domains = toArray(*samr.EnumDomains(handle, 0, 0xffffffff))
+ print("Found %d domains" % len(domains))
+ for idx, domain in domains:
+ print("\t%s (%d)" % (display_lsa_string(domain), idx))
+ for idx, domain in domains:
+ print("Testing domain %s" % display_lsa_string(domain))
+ sid = samr.LookupDomain(handle, domain)
+ dom_handle = test_OpenDomain(samr, handle, sid)
+ test_domain_ops(samr, dom_handle)
+ samr.Close(dom_handle)
+
+
+if len(sys.argv) != 2:
+ print("Usage: samr.js <BINDING>")
+ sys.exit(1)
+
+binding = sys.argv[1]
+
+print("Connecting to %s" % binding)
+try:
+ samr = samr.samr(binding)
+except Exception as e:
+ print("Failed to connect to %s: %s" % (binding, e.message))
+ sys.exit(1)
+
+handle = test_Connect(samr)
+test_EnumDomains(samr, handle)
+samr.Close(handle)
+
+print("All OK")
diff --git a/python/examples/winreg.py b/python/examples/winreg.py
new file mode 100755
index 0000000..46c1e02
--- /dev/null
+++ b/python/examples/winreg.py
@@ -0,0 +1,94 @@
+#!/usr/bin/env python3
+#
+# tool to manipulate a remote registry
+# Copyright Andrew Tridgell 2005
+# Copyright Jelmer Vernooij 2007
+# Released under the GNU GPL v3 or later
+#
+
+import sys
+
+# Find right directory when running from source tree
+sys.path.insert(0, "bin/python")
+
+from samba.dcerpc import winreg,misc
+import optparse
+import samba.getopt as options
+
+parser = optparse.OptionParser("%s <BINDING> [path]" % sys.argv[0])
+sambaopts = options.SambaOptions(parser)
+parser.add_option_group(sambaopts)
+parser.add_option("--createkey", type="string", metavar="KEYNAME",
+ help="create a key")
+
+opts, args = parser.parse_args()
+
+if len(args) < 1:
+ parser.print_usage()
+ sys.exit(-1)
+
+binding = args[0]
+
+print("Connecting to " + binding)
+conn = winreg.winreg(binding, sambaopts.get_loadparm())
+
+def list_values(key):
+ (num_values, max_valnamelen, max_valbufsize) = conn.QueryInfoKey(key, winreg.String())[4:7]
+ for i in range(num_values):
+ name = winreg.ValNameBuf()
+ name.size = max_valnamelen
+ (name, type, data, _, data_len) = conn.EnumValue(key, i, name, 0, [], max_valbufsize, 0)
+ print("\ttype=%-30s size=%4d '%s'" % (type, name.size, name))
+ if type in (misc.REG_SZ, misc.REG_EXPAND_SZ):
+ print("\t\t'%s'" % data)
+
+
+def list_path(key, path):
+ count = 0
+ (num_subkeys, max_subkeylen, max_classlen) = conn.QueryInfoKey(key, winreg.String())[1:4]
+ for i in range(num_subkeys):
+ name = winreg.StringBuf()
+ name.size = max_subkeylen+2 # utf16 0-terminator
+ keyclass = winreg.StringBuf()
+ keyclass.size = max_classlen+2 # utf16 0-terminator
+ (name, _, _) = conn.EnumKey(key, i, name, keyclass=keyclass, last_changed_time=None)
+ name2 = winreg.String()
+ name2.name = name.name
+ subkey = conn.OpenKey(key, name2, 0, winreg.KEY_QUERY_VALUE | winreg.KEY_ENUMERATE_SUB_KEYS)
+ count += list_path(subkey, "%s\\%s" % (path, name))
+ list_values(subkey)
+ return count
+
+
+if len(args) > 1:
+ root = args[1]
+else:
+ root = "HKLM"
+
+if opts.createkey:
+ name = winreg.String()
+ name.name = "SOFTWARE"
+
+ # Just sample code, "HKLM\SOFTWARE" should already exist
+
+ root = conn.OpenHKLM(
+ None, winreg.KEY_QUERY_VALUE | winreg.KEY_ENUMERATE_SUB_KEYS)
+ conn.CreateKey(
+ root,
+ name,
+ keyclass=winreg.String(),
+ options=0,
+ access_mask=0,
+ secdesc=None,
+ action_taken=0)
+else:
+ print("Listing registry tree '%s'" % root)
+ try:
+ root_key = getattr(conn, "Open%s" % root)(None, winreg.KEY_QUERY_VALUE | winreg.KEY_ENUMERATE_SUB_KEYS)
+ except AttributeError:
+ print("Unknown root key name %s" % root)
+ sys.exit(1)
+ count = list_path(root_key, root)
+ if count == 0:
+ print("No entries found")
+ sys.exit(1)
diff --git a/python/modules.c b/python/modules.c
new file mode 100644
index 0000000..7934f3e
--- /dev/null
+++ b/python/modules.c
@@ -0,0 +1,114 @@
+/*
+ Unix SMB/CIFS implementation.
+ Samba utility functions
+ Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "lib/replace/system/python.h"
+#include "py3compat.h"
+#include "includes.h"
+#include "python/modules.h"
+#include "dynconfig/dynconfig.h"
+
+static bool PySys_PathPrepend(PyObject *list, const char *path)
+{
+ bool ok;
+ PyObject *py_path = PyUnicode_FromString(path);
+ if (py_path == NULL) {
+ return false;
+ }
+ ok = PyList_Insert(list, 0, py_path) == 0;
+ Py_XDECREF(py_path);
+ return ok;
+}
+
+bool py_update_path(void)
+{
+ PyObject *mod_sys = NULL;
+ PyObject *py_path = NULL;
+
+ mod_sys = PyImport_ImportModule("sys");
+ if (mod_sys == NULL) {
+ return false;
+ }
+
+ py_path = PyObject_GetAttrString(mod_sys, "path");
+ if (py_path == NULL) {
+ goto error;
+ }
+
+ if (!PyList_Check(py_path)) {
+ goto error;
+ }
+
+ if (!PySys_PathPrepend(py_path, dyn_PYTHONDIR)) {
+ goto error;
+ }
+
+ if (strcmp(dyn_PYTHONARCHDIR, dyn_PYTHONDIR) != 0) {
+ if (!PySys_PathPrepend(py_path, dyn_PYTHONARCHDIR)) {
+ goto error;
+ }
+ }
+ Py_XDECREF(py_path);
+ Py_XDECREF(mod_sys);
+ return true;
+error:
+ Py_XDECREF(py_path);
+ Py_XDECREF(mod_sys);
+ return false;
+}
+
+char **PyList_AsStringList(TALLOC_CTX *mem_ctx, PyObject *list,
+ const char *paramname)
+{
+ char **ret;
+ Py_ssize_t i;
+ if (!PyList_Check(list)) {
+ PyErr_Format(PyExc_TypeError, "%s is not a list", paramname);
+ return NULL;
+ }
+ ret = talloc_array(NULL, char *, PyList_Size(list)+1);
+ if (ret == NULL) {
+ PyErr_NoMemory();
+ return NULL;
+ }
+
+ for (i = 0; i < PyList_Size(list); i++) {
+ const char *value;
+ Py_ssize_t size;
+ PyObject *item = PyList_GetItem(list, i);
+ if (!PyUnicode_Check(item)) {
+ PyErr_Format(PyExc_TypeError, "%s should be strings", paramname);
+ talloc_free(ret);
+ return NULL;
+ }
+ value = PyUnicode_AsUTF8AndSize(item, &size);
+ if (value == NULL) {
+ talloc_free(ret);
+ return NULL;
+ }
+ ret[i] = talloc_strndup(ret, value, size);
+ if (ret[i] == NULL) {
+ PyErr_NoMemory();
+ talloc_free(ret);
+ return NULL;
+ }
+ }
+ ret[i] = NULL;
+ return ret;
+}
+
diff --git a/python/modules.h b/python/modules.h
new file mode 100644
index 0000000..356937d
--- /dev/null
+++ b/python/modules.h
@@ -0,0 +1,33 @@
+/*
+ Unix SMB/CIFS implementation.
+ Samba utility functions
+ Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef __SAMBA_PYTHON_MODULES_H__
+#define __SAMBA_PYTHON_MODULES_H__
+
+#include <talloc.h>
+
+bool py_update_path(void);
+/* discard signature of 'func' in favour of 'target_sig' */
+#define PY_DISCARD_FUNC_SIG(target_sig, func) (target_sig)(void(*)(void))func
+
+char **PyList_AsStringList(TALLOC_CTX *mem_ctx, PyObject *list,
+ const char *paramname);
+
+#endif /* __SAMBA_PYTHON_MODULES_H__ */
+
diff --git a/python/py3compat.h b/python/py3compat.h
new file mode 100644
index 0000000..bfee82f
--- /dev/null
+++ b/python/py3compat.h
@@ -0,0 +1,51 @@
+/*
+ Unix SMB/CIFS implementation.
+ Python 3 compatibility macros
+ Copyright (C) Petr Viktorin <pviktori@redhat.com> 2015
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef _SAMBA_PY3COMPAT_H_
+#define _SAMBA_PY3COMPAT_H_
+#include "lib/replace/system/python.h"
+
+/* Quick docs:
+ * Syntax for module initialization is as in Python 3, except the entrypoint
+ * function definition and declaration:
+ * PyMODINIT_FUNC PyInit_modulename(void);
+ * PyMODINIT_FUNC PyInit_modulename(void)
+ * {
+ * ...
+ * }
+ * is replaced by:
+ * MODULE_INIT_FUNC(modulename)
+ * {
+ * ...
+ * }
+ *
+ * In the entrypoint, create a module using PyModule_Create and PyModuleDef,
+ * and return it. See Python 3 documentation for details.
+ */
+
+#define MODULE_INIT_FUNC(name) \
+ PyMODINIT_FUNC PyInit_ ## name(void); \
+ PyMODINIT_FUNC PyInit_ ## name(void)
+
+/* PyArg_ParseTuple/Py_BuildValue argument */
+
+#define PYARG_BYTES_LEN "y#"
+#define PYARG_STR_UNI "es"
+
+#endif
diff --git a/python/pyglue.c b/python/pyglue.c
new file mode 100644
index 0000000..c24d1b0
--- /dev/null
+++ b/python/pyglue.c
@@ -0,0 +1,672 @@
+/*
+ Unix SMB/CIFS implementation.
+ Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007
+ Copyright (C) Matthias Dieter Wallnöfer 2009
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "lib/replace/system/python.h"
+#include "python/py3compat.h"
+#include "includes.h"
+#include "python/modules.h"
+#include "version.h"
+#include "param/pyparam.h"
+#include "lib/socket/netif.h"
+#include "lib/util/debug.h"
+#include "librpc/ndr/ndr_private.h"
+#include "lib/cmdline/cmdline.h"
+#include "lib/crypto/gkdi.h"
+
+void init_glue(void);
+static PyObject *PyExc_NTSTATUSError;
+static PyObject *PyExc_WERRORError;
+static PyObject *PyExc_HRESULTError;
+static PyObject *PyExc_DsExtendedError;
+
+static PyObject *py_generate_random_str(PyObject *self, PyObject *args)
+{
+ Py_ssize_t len;
+ PyObject *ret;
+ char *retstr;
+
+ if (!PyArg_ParseTuple(args, "n", &len)) {
+ return NULL;
+ }
+ if (len < 0) {
+ PyErr_Format(PyExc_ValueError,
+ "random string length should be positive, not %zd",
+ len);
+ return NULL;
+ }
+ retstr = generate_random_str(NULL, len);
+ if (retstr == NULL) {
+ return PyErr_NoMemory();
+ }
+ ret = PyUnicode_FromStringAndSize(retstr, len);
+ talloc_free(retstr);
+ return ret;
+}
+
+static PyObject *py_generate_random_password(PyObject *self, PyObject *args)
+{
+ Py_ssize_t min, max;
+ PyObject *ret;
+ char *retstr;
+
+ if (!PyArg_ParseTuple(args, "nn", &min, &max)) {
+ return NULL;
+ }
+ if (max < 0 || min < 0) {
+ /*
+ * The real range checks happens in generate_random_password().
+ * Here just filter out any negative numbers.
+ */
+ PyErr_Format(PyExc_ValueError,
+ "invalid range: %zd - %zd",
+ min, max);
+ return NULL;
+ }
+
+ retstr = generate_random_password(NULL, min, max);
+ if (retstr == NULL) {
+ if (errno == EINVAL) {
+ return PyErr_Format(PyExc_ValueError,
+ "invalid range: %zd - %zd",
+ min, max);
+ }
+ return PyErr_NoMemory();
+ }
+ ret = PyUnicode_FromString(retstr);
+ talloc_free(retstr);
+ return ret;
+}
+
+static PyObject *py_generate_random_machine_password(PyObject *self, PyObject *args)
+{
+ Py_ssize_t min, max;
+ PyObject *ret;
+ char *retstr;
+
+ if (!PyArg_ParseTuple(args, "nn", &min, &max)) {
+ return NULL;
+ }
+ if (max < 0 || min < 0) {
+ /*
+ * The real range checks happens in
+ * generate_random_machine_password().
+ * Here we just filter out any negative numbers.
+ */
+ PyErr_Format(PyExc_ValueError,
+ "invalid range: %zd - %zd",
+ min, max);
+ return NULL;
+ }
+
+ retstr = generate_random_machine_password(NULL, min, max);
+ if (retstr == NULL) {
+ if (errno == EINVAL) {
+ return PyErr_Format(PyExc_ValueError,
+ "invalid range: %zd - %zd",
+ min, max);
+ }
+ return PyErr_NoMemory();
+ }
+ ret = PyUnicode_FromString(retstr);
+ talloc_free(retstr);
+ return ret;
+}
+
+static PyObject *py_check_password_quality(PyObject *self, PyObject *args)
+{
+ char *pass;
+
+ if (!PyArg_ParseTuple(args, "s", &pass)) {
+ return NULL;
+ }
+
+ return PyBool_FromLong(check_password_quality(pass));
+}
+
+static PyObject *py_generate_random_bytes(PyObject *self, PyObject *args)
+{
+ Py_ssize_t len;
+ PyObject *ret;
+ uint8_t *bytes = NULL;
+
+ if (!PyArg_ParseTuple(args, "n", &len)) {
+ return NULL;
+ }
+ if (len < 0) {
+ PyErr_Format(PyExc_ValueError,
+ "random bytes length should be positive, not %zd",
+ len);
+ return NULL;
+ }
+ bytes = talloc_zero_size(NULL, len);
+ if (bytes == NULL) {
+ PyErr_NoMemory();
+ return NULL;
+ }
+ generate_random_buffer(bytes, len);
+ ret = PyBytes_FromStringAndSize((const char *)bytes, len);
+ talloc_free(bytes);
+ return ret;
+}
+
+static PyObject *py_unix2nttime(PyObject *self, PyObject *args)
+{
+ time_t t;
+ unsigned int _t;
+ NTTIME nt;
+
+ if (!PyArg_ParseTuple(args, "I", &_t)) {
+ return NULL;
+ }
+ t = _t;
+
+ unix_to_nt_time(&nt, t);
+
+ return PyLong_FromLongLong((uint64_t)nt);
+}
+
+static PyObject *py_nttime2unix(PyObject *self, PyObject *args)
+{
+ time_t t;
+ NTTIME nt;
+ if (!PyArg_ParseTuple(args, "K", &nt))
+ return NULL;
+
+ t = nt_time_to_unix(nt);
+
+ return PyLong_FromLong((uint64_t)t);
+}
+
+static PyObject *py_float2nttime(PyObject *self, PyObject *args)
+{
+ double ft = 0;
+ double ft_sec = 0;
+ double ft_nsec = 0;
+ struct timespec ts;
+ NTTIME nt = 0;
+
+ if (!PyArg_ParseTuple(args, "d", &ft)) {
+ return NULL;
+ }
+
+ ft_sec = (double)(int)ft;
+ ft_nsec = (ft - ft_sec) * 1.0e+9;
+
+ ts.tv_sec = (int)ft_sec;
+ ts.tv_nsec = (int)ft_nsec;
+
+ nt = full_timespec_to_nt_time(&ts);
+
+ return PyLong_FromLongLong((uint64_t)nt);
+}
+
+static PyObject *py_nttime2float(PyObject *self, PyObject *args)
+{
+ double ft = 0;
+ struct timespec ts;
+ const struct timespec ts_zero = { .tv_sec = 0, };
+ NTTIME nt = 0;
+
+ if (!PyArg_ParseTuple(args, "K", &nt)) {
+ return NULL;
+ }
+
+ ts = nt_time_to_full_timespec(nt);
+ if (is_omit_timespec(&ts)) {
+ return PyFloat_FromDouble(1.0);
+ }
+ ft = timespec_elapsed2(&ts_zero, &ts);
+
+ return PyFloat_FromDouble(ft);
+}
+
+static PyObject *py_nttime2string(PyObject *self, PyObject *args)
+{
+ PyObject *ret;
+ NTTIME nt;
+ TALLOC_CTX *tmp_ctx;
+ const char *string;
+ if (!PyArg_ParseTuple(args, "K", &nt))
+ return NULL;
+
+ tmp_ctx = talloc_new(NULL);
+ if (tmp_ctx == NULL) {
+ PyErr_NoMemory();
+ return NULL;
+ }
+
+ string = nt_time_string(tmp_ctx, nt);
+ ret = PyUnicode_FromString(string);
+
+ talloc_free(tmp_ctx);
+
+ return ret;
+}
+
+static PyObject *py_set_debug_level(PyObject *self, PyObject *args)
+{
+ unsigned level;
+ if (!PyArg_ParseTuple(args, "I", &level))
+ return NULL;
+ debuglevel_set(level);
+ Py_RETURN_NONE;
+}
+
+static PyObject *py_get_debug_level(PyObject *self,
+ PyObject *Py_UNUSED(ignored))
+{
+ return PyLong_FromLong(debuglevel_get());
+}
+
+static PyObject *py_fault_setup(PyObject *self,
+ PyObject *Py_UNUSED(ignored))
+{
+ static bool done;
+ if (!done) {
+ fault_setup();
+ done = true;
+ }
+ Py_RETURN_NONE;
+}
+
+static PyObject *py_is_ntvfs_fileserver_built(PyObject *self,
+ PyObject *Py_UNUSED(ignored))
+{
+#ifdef WITH_NTVFS_FILESERVER
+ Py_RETURN_TRUE;
+#else
+ Py_RETURN_FALSE;
+#endif
+}
+
+static PyObject *py_is_heimdal_built(PyObject *self,
+ PyObject *Py_UNUSED(ignored))
+{
+#ifdef SAMBA4_USES_HEIMDAL
+ Py_RETURN_TRUE;
+#else
+ Py_RETURN_FALSE;
+#endif
+}
+
+static PyObject *py_is_ad_dc_built(PyObject *self,
+ PyObject *Py_UNUSED(ignored))
+{
+#ifdef AD_DC_BUILD_IS_ENABLED
+ Py_RETURN_TRUE;
+#else
+ Py_RETURN_FALSE;
+#endif
+}
+
+static PyObject *py_is_selftest_enabled(PyObject *self,
+ PyObject *Py_UNUSED(ignored))
+{
+#ifdef ENABLE_SELFTEST
+ Py_RETURN_TRUE;
+#else
+ Py_RETURN_FALSE;
+#endif
+}
+
+static PyObject *py_ndr_token_max_list_size(PyObject *self,
+ PyObject *Py_UNUSED(ignored))
+{
+ return PyLong_FromLong(ndr_token_max_list_size());
+}
+
+/*
+ return the list of interface IPs we have configured
+ takes an loadparm context, returns a list of IPs in string form
+
+ Does not return addresses on 127.0.0.0/8
+ */
+static PyObject *py_interface_ips(PyObject *self, PyObject *args)
+{
+ PyObject *pylist;
+ int count;
+ TALLOC_CTX *tmp_ctx;
+ PyObject *py_lp_ctx;
+ struct loadparm_context *lp_ctx;
+ struct interface *ifaces;
+ int i, ifcount;
+ int all_interfaces = 1;
+
+ if (!PyArg_ParseTuple(args, "O|i", &py_lp_ctx, &all_interfaces))
+ return NULL;
+
+ tmp_ctx = talloc_new(NULL);
+ if (tmp_ctx == NULL) {
+ PyErr_NoMemory();
+ return NULL;
+ }
+
+ lp_ctx = lpcfg_from_py_object(tmp_ctx, py_lp_ctx);
+ if (lp_ctx == NULL) {
+ talloc_free(tmp_ctx);
+ return PyErr_NoMemory();
+ }
+
+ load_interface_list(tmp_ctx, lp_ctx, &ifaces);
+
+ count = iface_list_count(ifaces);
+
+ /* first count how many are not loopback addresses */
+ for (ifcount = i = 0; i<count; i++) {
+ const char *ip = iface_list_n_ip(ifaces, i);
+
+ if (all_interfaces) {
+ ifcount++;
+ continue;
+ }
+
+ if (iface_list_same_net(ip, "127.0.0.1", "255.0.0.0")) {
+ continue;
+ }
+
+ if (iface_list_same_net(ip, "169.254.0.0", "255.255.0.0")) {
+ continue;
+ }
+
+ if (iface_list_same_net(ip, "::1", "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff")) {
+ continue;
+ }
+
+ if (iface_list_same_net(ip, "fe80::", "ffff:ffff:ffff:ffff::")) {
+ continue;
+ }
+
+ ifcount++;
+ }
+
+ pylist = PyList_New(ifcount);
+ for (ifcount = i = 0; i<count; i++) {
+ const char *ip = iface_list_n_ip(ifaces, i);
+
+ if (all_interfaces) {
+ PyList_SetItem(pylist, ifcount, PyUnicode_FromString(ip));
+ ifcount++;
+ continue;
+ }
+
+ if (iface_list_same_net(ip, "127.0.0.1", "255.0.0.0")) {
+ continue;
+ }
+
+ if (iface_list_same_net(ip, "169.254.0.0", "255.255.0.0")) {
+ continue;
+ }
+
+ if (iface_list_same_net(ip, "::1", "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff")) {
+ continue;
+ }
+
+ if (iface_list_same_net(ip, "fe80::", "ffff:ffff:ffff:ffff::")) {
+ continue;
+ }
+
+ PyList_SetItem(pylist, ifcount, PyUnicode_FromString(ip));
+ ifcount++;
+ }
+ talloc_free(tmp_ctx);
+ return pylist;
+}
+
+static PyObject *py_strcasecmp_m(PyObject *self, PyObject *args)
+{
+ char *s1 = NULL;
+ char *s2 = NULL;
+ long cmp_result = 0;
+ if (!PyArg_ParseTuple(args, PYARG_STR_UNI
+ PYARG_STR_UNI,
+ "utf8", &s1, "utf8", &s2)) {
+ return NULL;
+ }
+
+ cmp_result = strcasecmp_m(s1, s2);
+ PyMem_Free(s1);
+ PyMem_Free(s2);
+ return PyLong_FromLong(cmp_result);
+}
+
+static PyObject *py_strstr_m(PyObject *self, PyObject *args)
+{
+ char *s1 = NULL;
+ char *s2 = NULL;
+ char *strstr_ret = NULL;
+ PyObject *result = NULL;
+ if (!PyArg_ParseTuple(args, PYARG_STR_UNI
+ PYARG_STR_UNI,
+ "utf8", &s1, "utf8", &s2))
+ return NULL;
+
+ strstr_ret = strstr_m(s1, s2);
+ if (!strstr_ret) {
+ PyMem_Free(s1);
+ PyMem_Free(s2);
+ Py_RETURN_NONE;
+ }
+ result = PyUnicode_FromString(strstr_ret);
+ PyMem_Free(s1);
+ PyMem_Free(s2);
+ return result;
+}
+
+static PyObject *py_get_burnt_commandline(PyObject *self, PyObject *args)
+{
+ PyObject *cmdline_as_list, *ret;
+ char *burnt_cmdline = NULL;
+ Py_ssize_t i, argc;
+ char **argv = NULL;
+ TALLOC_CTX *frame = talloc_stackframe();
+ bool burnt;
+
+ if (!PyArg_ParseTuple(args, "O!", &PyList_Type, &cmdline_as_list))
+ {
+ TALLOC_FREE(frame);
+ return NULL;
+ }
+
+ argc = PyList_GET_SIZE(cmdline_as_list);
+
+ if (argc == 0) {
+ TALLOC_FREE(frame);
+ Py_RETURN_NONE;
+ }
+
+ argv = PyList_AsStringList(frame, cmdline_as_list, "sys.argv");
+ if (argv == NULL) {
+ TALLOC_FREE(frame);
+ return NULL;
+ }
+
+ burnt = samba_cmdline_burn(argc, argv);
+ if (!burnt) {
+ TALLOC_FREE(frame);
+ Py_RETURN_NONE;
+ }
+
+ for (i = 0; i < argc; i++) {
+ if (i == 0) {
+ burnt_cmdline = talloc_strdup(frame,
+ argv[i]);
+ } else {
+ burnt_cmdline
+ = talloc_asprintf_append(burnt_cmdline,
+ " %s",
+ argv[i]);
+ }
+ if (burnt_cmdline == NULL) {
+ PyErr_NoMemory();
+ TALLOC_FREE(frame);
+ return NULL;
+ }
+ }
+
+ ret = PyUnicode_FromString(burnt_cmdline);
+ TALLOC_FREE(frame);
+
+ return ret;
+}
+
+static PyMethodDef py_misc_methods[] = {
+ { "generate_random_str", (PyCFunction)py_generate_random_str, METH_VARARGS,
+ "generate_random_str(len) -> string\n"
+ "Generate random string with specified length." },
+ { "generate_random_password", (PyCFunction)py_generate_random_password,
+ METH_VARARGS, "generate_random_password(min, max) -> string\n"
+ "Generate random password (based on printable ascii characters) "
+ "with a length >= min and <= max." },
+ { "generate_random_machine_password", (PyCFunction)py_generate_random_machine_password,
+ METH_VARARGS, "generate_random_machine_password(min, max) -> string\n"
+ "Generate random password "
+ "(based on random utf16 characters converted to utf8 or "
+ "random ascii characters if 'unix charset' is not 'utf8') "
+ "with a length >= min (at least 14) and <= max (at most 255)." },
+ { "check_password_quality", (PyCFunction)py_check_password_quality,
+ METH_VARARGS, "check_password_quality(pass) -> bool\n"
+ "Check password quality against Samba's check_password_quality, "
+ "the implementation of Microsoft's rules: "
+ "http://msdn.microsoft.com/en-us/subscriptions/cc786468%28v=ws.10%29.aspx"
+ },
+ { "unix2nttime", (PyCFunction)py_unix2nttime, METH_VARARGS,
+ "unix2nttime(timestamp) -> nttime" },
+ { "nttime2unix", (PyCFunction)py_nttime2unix, METH_VARARGS,
+ "nttime2unix(nttime) -> timestamp" },
+ { "float2nttime", (PyCFunction)py_float2nttime, METH_VARARGS,
+ "pytime2nttime(floattimestamp) -> nttime" },
+ { "nttime2float", (PyCFunction)py_nttime2float, METH_VARARGS,
+ "nttime2pytime(nttime) -> floattimestamp" },
+ { "nttime2string", (PyCFunction)py_nttime2string, METH_VARARGS,
+ "nttime2string(nttime) -> string" },
+ { "set_debug_level", (PyCFunction)py_set_debug_level, METH_VARARGS,
+ "set debug level" },
+ { "get_debug_level", (PyCFunction)py_get_debug_level, METH_NOARGS,
+ "get debug level" },
+ { "fault_setup", (PyCFunction)py_fault_setup, METH_NOARGS,
+ "setup the default samba panic handler" },
+ { "interface_ips", (PyCFunction)py_interface_ips, METH_VARARGS,
+ "interface_ips(lp_ctx[, all_interfaces) -> list_of_ifaces\n"
+ "\n"
+ "get interface IP address list"},
+ { "strcasecmp_m", (PyCFunction)py_strcasecmp_m, METH_VARARGS,
+ "(for testing) compare two strings using Samba's strcasecmp_m()"},
+ { "strstr_m", (PyCFunction)py_strstr_m, METH_VARARGS,
+ "(for testing) find one string in another with Samba's strstr_m()"},
+ { "is_ntvfs_fileserver_built", (PyCFunction)py_is_ntvfs_fileserver_built, METH_NOARGS,
+ "is the NTVFS file server built in this installation?" },
+ { "is_heimdal_built", (PyCFunction)py_is_heimdal_built, METH_NOARGS,
+ "is Samba built with Heimdal Kerberos?" },
+ { "generate_random_bytes",
+ (PyCFunction)py_generate_random_bytes,
+ METH_VARARGS,
+ "generate_random_bytes(len) -> bytes\n"
+ "Generate random bytes with specified length." },
+ { "is_ad_dc_built", (PyCFunction)py_is_ad_dc_built, METH_NOARGS,
+ "is Samba built with AD DC?" },
+ { "is_selftest_enabled", (PyCFunction)py_is_selftest_enabled,
+ METH_NOARGS, "is Samba built with selftest enabled?" },
+ { "ndr_token_max_list_size", (PyCFunction)py_ndr_token_max_list_size,
+ METH_NOARGS, "How many NDR internal tokens is too many for this build?" },
+ { "get_burnt_commandline", (PyCFunction)py_get_burnt_commandline,
+ METH_VARARGS, "Return a redacted commandline to feed to setproctitle (None if no redaction required)" },
+ {0}
+};
+
+static struct PyModuleDef moduledef = {
+ PyModuleDef_HEAD_INIT,
+ .m_name = "_glue",
+ .m_doc = "Python bindings for miscellaneous Samba functions.",
+ .m_size = -1,
+ .m_methods = py_misc_methods,
+};
+
+MODULE_INIT_FUNC(_glue)
+{
+ PyObject *m;
+ PyObject *py_obj = NULL;
+ int ret;
+
+ debug_setup_talloc_log();
+
+ m = PyModule_Create(&moduledef);
+ if (m == NULL)
+ return NULL;
+
+ PyModule_AddObject(m, "version",
+ PyUnicode_FromString(SAMBA_VERSION_STRING));
+ PyExc_NTSTATUSError = PyErr_NewException("samba.NTSTATUSError", PyExc_RuntimeError, NULL);
+ if (PyExc_NTSTATUSError != NULL) {
+ Py_INCREF(PyExc_NTSTATUSError);
+ PyModule_AddObject(m, "NTSTATUSError", PyExc_NTSTATUSError);
+ }
+
+ PyExc_WERRORError = PyErr_NewException("samba.WERRORError", PyExc_RuntimeError, NULL);
+ if (PyExc_WERRORError != NULL) {
+ Py_INCREF(PyExc_WERRORError);
+ PyModule_AddObject(m, "WERRORError", PyExc_WERRORError);
+ }
+
+ PyExc_HRESULTError = PyErr_NewException("samba.HRESULTError", PyExc_RuntimeError, NULL);
+ if (PyExc_HRESULTError != NULL) {
+ Py_INCREF(PyExc_HRESULTError);
+ PyModule_AddObject(m, "HRESULTError", PyExc_HRESULTError);
+ }
+
+ PyExc_DsExtendedError = PyErr_NewException("samba.DsExtendedError", PyExc_RuntimeError, NULL);
+ if (PyExc_DsExtendedError != NULL) {
+ Py_INCREF(PyExc_DsExtendedError);
+ PyModule_AddObject(m, "DsExtendedError", PyExc_DsExtendedError);
+ }
+
+ ret = PyModule_AddIntConstant(m, "GKDI_L1_KEY_ITERATION", gkdi_l1_key_iteration);
+ if (ret) {
+ Py_DECREF(m);
+ return NULL;
+ }
+ ret = PyModule_AddIntConstant(m, "GKDI_L2_KEY_ITERATION", gkdi_l2_key_iteration);
+ if (ret) {
+ Py_DECREF(m);
+ return NULL;
+ }
+ py_obj = PyLong_FromLongLong(gkdi_key_cycle_duration);
+ if (py_obj == NULL) {
+ Py_DECREF(m);
+ return NULL;
+ }
+ ret = PyModule_AddObject(m, "GKDI_KEY_CYCLE_DURATION", py_obj);
+ if (ret) {
+ Py_DECREF(py_obj);
+ Py_DECREF(m);
+ return NULL;
+ }
+ py_obj = PyLong_FromLongLong(gkdi_max_clock_skew);
+ if (py_obj == NULL) {
+ Py_DECREF(m);
+ return NULL;
+ }
+ ret = PyModule_AddObject(m, "GKDI_MAX_CLOCK_SKEW", py_obj);
+ if (ret) {
+ Py_DECREF(py_obj);
+ Py_DECREF(m);
+ return NULL;
+ }
+
+ return m;
+}
diff --git a/python/samba/__init__.py b/python/samba/__init__.py
new file mode 100644
index 0000000..3e6ea7d
--- /dev/null
+++ b/python/samba/__init__.py
@@ -0,0 +1,400 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007-2008
+#
+# Based on the original in EJS:
+# Copyright (C) Andrew Tridgell <tridge@samba.org> 2005
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Samba 4."""
+
+__docformat__ = "restructuredText"
+
+import os
+import time
+import ldb
+import samba.param
+from samba import _glue
+from samba._ldb import Ldb as _Ldb
+
+
+def source_tree_topdir():
+ """Return the top level source directory."""
+ paths = ["../../..", "../../../.."]
+ for p in paths:
+ topdir = os.path.normpath(os.path.join(os.path.dirname(__file__), p))
+ if os.path.exists(os.path.join(topdir, 'source4')):
+ return topdir
+ raise RuntimeError("unable to find top level source directory")
+
+
+def in_source_tree():
+ """Return True if we are running from within the samba source tree"""
+ try:
+ topdir = source_tree_topdir()
+ except RuntimeError:
+ return False
+ return True
+
+
+class Ldb(_Ldb):
+ """Simple Samba-specific LDB subclass that takes care
+ of setting up the modules dir, credentials pointers, etc.
+
+ Please note that this is intended to be for all Samba LDB files,
+ not necessarily the Sam database. For Sam-specific helper
+ functions see samdb.py.
+ """
+
+ def __init__(self, url=None, lp=None, modules_dir=None, session_info=None,
+ credentials=None, flags=0, options=None):
+ """Opens a Samba Ldb file.
+
+ :param url: Optional LDB URL to open
+ :param lp: Optional loadparm object
+ :param modules_dir: Optional modules directory
+ :param session_info: Optional session information
+ :param credentials: Optional credentials, defaults to anonymous.
+ :param flags: Optional LDB flags
+ :param options: Additional options (optional)
+
+ This is different from a regular Ldb file in that the Samba-specific
+ modules-dir is used by default and that credentials and session_info
+ can be passed through (required by some modules).
+ """
+
+ if modules_dir is not None:
+ self.set_modules_dir(modules_dir)
+ else:
+ self.set_modules_dir(os.path.join(samba.param.modules_dir(), "ldb"))
+
+ if session_info is not None:
+ self.set_session_info(session_info)
+
+ if credentials is not None:
+ self.set_credentials(credentials)
+
+ if lp is not None:
+ self.set_loadparm(lp)
+
+ # This must be done before we load the schema, as these handlers for
+ # objectSid and objectGUID etc must take precedence over the 'binary
+ # attribute' declaration in the schema
+ self.register_samba_handlers()
+
+ # TODO set debug
+ def msg(l, text):
+ print(text)
+ # self.set_debug(msg)
+
+ self.set_utf8_casefold()
+
+ # Allow admins to force non-sync ldb for all databases
+ if lp is not None:
+ nosync_p = lp.get("ldb:nosync")
+ if nosync_p is not None and nosync_p:
+ flags |= ldb.FLG_NOSYNC
+
+ self.set_create_perms(0o600)
+
+ if url is not None:
+ self.connect(url, flags, options)
+
+ def searchone(self, attribute, basedn=None, expression=None,
+ scope=ldb.SCOPE_BASE):
+ """Search for one attribute as a string.
+
+ :param basedn: BaseDN for the search.
+ :param attribute: Name of the attribute
+ :param expression: Optional search expression.
+ :param scope: Search scope (defaults to base).
+ :return: Value of attribute as a string or None if it wasn't found.
+ """
+ res = self.search(basedn, scope, expression, [attribute])
+ if len(res) != 1 or res[0][attribute] is None:
+ return None
+ values = set(res[0][attribute])
+ assert len(values) == 1
+ return self.schema_format_value(attribute, values.pop())
+
+ def erase_users_computers(self, dn):
+ """Erases user and computer objects from our AD.
+
+ This is needed since the 'samldb' module denies the deletion of primary
+ groups. Therefore all groups shouldn't be primary somewhere anymore.
+ """
+
+ try:
+ res = self.search(base=dn, scope=ldb.SCOPE_SUBTREE, attrs=[],
+ expression="(|(objectclass=user)(objectclass=computer))")
+ except ldb.LdbError as error:
+ (errno, estr) = error.args
+ if errno == ldb.ERR_NO_SUCH_OBJECT:
+ # Ignore no such object errors
+ return
+ else:
+ raise
+
+ try:
+ for msg in res:
+ self.delete(msg.dn, ["relax:0"])
+ except ldb.LdbError as error:
+ (errno, estr) = error.args
+ if errno != ldb.ERR_NO_SUCH_OBJECT:
+ # Ignore no such object errors
+ raise
+
+ def erase_except_schema_controlled(self):
+ """Erase this ldb.
+
+ :note: Removes all records, except those that are controlled by
+ Samba4's schema.
+ """
+
+ basedn = ""
+
+ # Try to delete user/computer accounts to allow deletion of groups
+ self.erase_users_computers(basedn)
+
+ # Delete the 'visible' records, and the invisible 'deleted' records (if
+ # this DB supports it)
+ for msg in self.search(basedn, ldb.SCOPE_SUBTREE,
+ "(&(|(objectclass=*)(distinguishedName=*))(!(distinguishedName=@BASEINFO)))",
+ [], controls=["show_deleted:0", "show_recycled:0"]):
+ try:
+ self.delete(msg.dn, ["relax:0"])
+ except ldb.LdbError as error:
+ (errno, estr) = error.args
+ if errno != ldb.ERR_NO_SUCH_OBJECT:
+ # Ignore no such object errors
+ raise
+
+ res = self.search(basedn, ldb.SCOPE_SUBTREE,
+ "(&(|(objectclass=*)(distinguishedName=*))(!(distinguishedName=@BASEINFO)))",
+ [], controls=["show_deleted:0", "show_recycled:0"])
+ assert len(res) == 0
+
+ # delete the specials
+ for attr in ["@SUBCLASSES", "@MODULES",
+ "@OPTIONS", "@PARTITION", "@KLUDGEACL"]:
+ try:
+ self.delete(attr, ["relax:0"])
+ except ldb.LdbError as error:
+ (errno, estr) = error.args
+ if errno != ldb.ERR_NO_SUCH_OBJECT:
+ # Ignore missing dn errors
+ raise
+
+ def erase(self):
+ """Erase this ldb, removing all records."""
+ self.erase_except_schema_controlled()
+
+ # delete the specials
+ for attr in ["@INDEXLIST", "@ATTRIBUTES"]:
+ try:
+ self.delete(attr, ["relax:0"])
+ except ldb.LdbError as error:
+ (errno, estr) = error.args
+ if errno != ldb.ERR_NO_SUCH_OBJECT:
+ # Ignore missing dn errors
+ raise
+
+ def load_ldif_file_add(self, ldif_path):
+ """Load a LDIF file.
+
+ :param ldif_path: Path to LDIF file.
+ """
+ with open(ldif_path, 'r') as ldif_file:
+ self.add_ldif(ldif_file.read())
+
+ def add_ldif(self, ldif, controls=None):
+ """Add data based on a LDIF string.
+
+ :param ldif: LDIF text.
+ """
+ for changetype, msg in self.parse_ldif(ldif):
+ assert changetype == ldb.CHANGETYPE_NONE
+ self.add(msg, controls)
+
+ def modify_ldif(self, ldif, controls=None):
+ """Modify database based on a LDIF string.
+
+ :param ldif: LDIF text.
+ """
+ for changetype, msg in self.parse_ldif(ldif):
+ if changetype == ldb.CHANGETYPE_NONE:
+ changetype = ldb.CHANGETYPE_MODIFY
+
+ if changetype == ldb.CHANGETYPE_ADD:
+ self.add(msg, controls)
+ elif changetype == ldb.CHANGETYPE_MODIFY:
+ self.modify(msg, controls)
+ elif changetype == ldb.CHANGETYPE_DELETE:
+ deldn = msg
+ self.delete(deldn, controls)
+ elif changetype == ldb.CHANGETYPE_MODRDN:
+ olddn = msg["olddn"]
+ deleteoldrdn = msg["deleteoldrdn"]
+ newdn = msg["newdn"]
+ if deleteoldrdn is False:
+ raise ValueError("Invalid ldb.CHANGETYPE_MODRDN with deleteoldrdn=False")
+ self.rename(olddn, newdn, controls)
+ else:
+ raise ValueError("Invalid ldb.CHANGETYPE_%u: %s" % (changetype, msg))
+
+
+def substitute_var(text, values):
+ """Substitute strings of the form ${NAME} in str, replacing
+ with substitutions from values.
+
+ :param text: Text in which to substitute.
+ :param values: Dictionary with keys and values.
+ """
+
+ for (name, value) in values.items():
+ assert isinstance(name, str), "%r is not a string" % name
+ assert isinstance(value, str), "Value %r for %s is not a string" % (value, name)
+ text = text.replace("${%s}" % name, value)
+
+ return text
+
+
+def check_all_substituted(text):
+ """Check that all substitution variables in a string have been replaced.
+
+ If not, raise an exception.
+
+ :param text: The text to search for substitution variables
+ """
+ if "${" not in text:
+ return
+
+ var_start = text.find("${")
+ var_end = text.find("}", var_start)
+
+ raise Exception("Not all variables substituted: %s" %
+ text[var_start:var_end + 1])
+
+
+def read_and_sub_file(file_name, subst_vars):
+ """Read a file and sub in variables found in it
+
+ :param file_name: File to be read (typically from setup directory)
+ :param subst_vars: Optional variables to substitute in the file.
+ """
+ with open(file_name, 'r', encoding="utf-8") as data_file:
+ data = data_file.read()
+ if subst_vars is not None:
+ data = substitute_var(data, subst_vars)
+ check_all_substituted(data)
+ return data
+
+
+def setup_file(template, fname, subst_vars=None):
+ """Setup a file in the private dir.
+
+ :param template: Path of the template file.
+ :param fname: Path of the file to create.
+ :param subst_vars: Substitution variables.
+ """
+ if os.path.exists(fname):
+ os.unlink(fname)
+
+ data = read_and_sub_file(template, subst_vars)
+ f = open(fname, 'w')
+ try:
+ f.write(data)
+ finally:
+ f.close()
+
+
+MAX_NETBIOS_NAME_LEN = 15
+
+
+def is_valid_netbios_char(c):
+ return (c.isalnum() or c in " !#$%&'()-.@^_{}~")
+
+
+def valid_netbios_name(name):
+ """Check whether a name is valid as a NetBIOS name. """
+ # See crh's book (1.4.1.1)
+ if len(name) > MAX_NETBIOS_NAME_LEN:
+ return False
+ for x in name:
+ if not is_valid_netbios_char(x):
+ return False
+ return True
+
+
+def dn_from_dns_name(dnsdomain):
+ """return a DN from a DNS name domain/forest root"""
+ return "DC=" + ",DC=".join(dnsdomain.split("."))
+
+
+def current_unix_time():
+ return int(time.time())
+
+
+def string_to_byte_array(string):
+ return [c if isinstance(c, int) else ord(c) for c in string]
+
+
+def arcfour_encrypt(key, data):
+ from samba.crypto import arcfour_crypt_blob
+ return arcfour_crypt_blob(data, key)
+
+
+def enable_net_export_keytab():
+ """This function modifies the samba.net.Net class to contain
+ an export_keytab() method."""
+ # This looks very strange because it is.
+ #
+ # The dckeytab modules contains nothing, but the act of importing
+ # it pushes a method into samba.net.Net. It ended up this way
+ # because Net.export_keytab() only works on Heimdal builds, and
+ # people sometimes want to compile Samba without Heimdal while
+ # still having a working samba-tool.
+ #
+ # There is probably a better way to do this than a magic module
+ # import (yes, that's a FIXME if you can be bothered).
+ from samba import net
+ from samba import dckeytab
+
+
+version = _glue.version
+interface_ips = _glue.interface_ips
+fault_setup = _glue.fault_setup
+set_debug_level = _glue.set_debug_level
+get_debug_level = _glue.get_debug_level
+float2nttime = _glue.float2nttime
+nttime2float = _glue.nttime2float
+nttime2string = _glue.nttime2string
+nttime2unix = _glue.nttime2unix
+unix2nttime = _glue.unix2nttime
+generate_random_password = _glue.generate_random_password
+generate_random_machine_password = _glue.generate_random_machine_password
+check_password_quality = _glue.check_password_quality
+generate_random_bytes = _glue.generate_random_bytes
+strcasecmp_m = _glue.strcasecmp_m
+strstr_m = _glue.strstr_m
+is_ntvfs_fileserver_built = _glue.is_ntvfs_fileserver_built
+is_heimdal_built = _glue.is_heimdal_built
+is_ad_dc_built = _glue.is_ad_dc_built
+is_selftest_enabled = _glue.is_selftest_enabled
+
+NTSTATUSError = _glue.NTSTATUSError
+HRESULTError = _glue.HRESULTError
+WERRORError = _glue.WERRORError
+DsExtendedError = _glue.DsExtendedError
diff --git a/python/samba/auth_util.py b/python/samba/auth_util.py
new file mode 100644
index 0000000..f616bb4
--- /dev/null
+++ b/python/samba/auth_util.py
@@ -0,0 +1,34 @@
+# Unix SMB/CIFS implementation.
+# auth util helpers
+#
+# Copyright (C) Ralph Boehme <slow@samba.org> 2019
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from samba.auth import (
+ system_session,
+ session_info_fill_unix,
+ copy_session_info,
+)
+
+def system_session_unix():
+ """
+ Return a copy of the system session_info with a valid UNIX token
+ """
+
+ session_info = system_session()
+ session_info_unix = copy_session_info(session_info)
+ session_info_fill_unix(session_info_unix, None)
+
+ return session_info_unix
diff --git a/python/samba/colour.py b/python/samba/colour.py
new file mode 100644
index 0000000..1fb6f24
--- /dev/null
+++ b/python/samba/colour.py
@@ -0,0 +1,175 @@
+# ANSI codes for 4 bit and xterm-256color
+#
+# Copyright (C) Andrew Bartlett 2018
+#
+# Originally written by Douglas Bagnall
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# The 4 bit colours are available as global variables with names like
+# RED, DARK_RED, REV_RED (for red background), and REV_DARK_RED. If
+# switch_colour_off() is called, these names will all point to the
+# empty string. switch_colour_on() restores the default values.
+#
+# The 256-colour codes are obtained using xterm_256_color(n), where n
+# is the number of the desired colour.
+
+
+def _gen_ansi_colours():
+ g = globals()
+ for i, name in enumerate(('BLACK', 'RED', 'GREEN', 'YELLOW', 'BLUE',
+ 'MAGENTA', 'CYAN', 'WHITE')):
+ g[name] = "\033[1;3%dm" % i
+ g['DARK_' + name] = "\033[3%dm" % i
+ g['REV_' + name] = "\033[1;4%dm" % i
+ g['REV_DARK_' + name] = "\033[4%dm" % i
+
+ # kcc.debug uses these aliases (which make visual sense)
+ g['PURPLE'] = DARK_MAGENTA
+ g['GREY'] = DARK_WHITE
+
+ # C_NORMAL resets to normal, whatever that is
+ g['C_NORMAL'] = "\033[0m"
+
+ # Non-colour ANSI codes.
+ g['UNDERLINE'] = "\033[4m"
+
+
+_gen_ansi_colours()
+
+# Generate functions that colour a string. The functions look like
+# this:
+#
+# c_BLUE("hello") # "\033[1;34mhello\033[0m" -> blue text
+# c_DARK_RED(3) # 3 will be stringified and coloured
+#
+# but if colour is switched off, no colour codes are added.
+#
+# c_BLUE("hello") # "hello"
+#
+# The definition of the functions looks a little odd, because we want
+# to bake in the name of the colour but not its actual value.
+
+for _k in list(globals().keys()):
+ if _k.isupper():
+ def _f(s, name=_k):
+ return "%s%s%s" % (globals()[name], s, C_NORMAL)
+ globals()['c_%s' % _k] = _f
+
+del _k, _f
+
+
+def switch_colour_off():
+ """Convert all the ANSI colour codes into empty strings."""
+ g = globals()
+ for k, v in list(g.items()):
+ if k.isupper() and isinstance(v, str) and v.startswith('\033'):
+ g[k] = ''
+
+
+def switch_colour_on():
+ """Regenerate all the ANSI colour codes."""
+ _gen_ansi_colours()
+
+
+def xterm_256_colour(n, bg=False, bold=False):
+ weight = '01;' if bold else ''
+ target = '48' if bg else '38'
+
+ return "\033[%s%s;5;%dm" % (weight, target, int(n))
+
+
+def is_colour_wanted(*streams, hint='auto'):
+ """The hint is presumably a --color argument.
+
+ The streams to be considered can be file objects or file names,
+ with '-' being a special filename indicating stdout.
+
+ We follow the behaviour of GNU `ls` in what we accept.
+ * `git` is stricter, accepting only {always,never,auto}.
+ * `grep` is looser, accepting mixed case variants.
+ * historically we have used {yes,no,auto}.
+ * {always,never,auto} appears the commonest convention.
+ * if the caller tries to opt out of choosing and sets hint to None
+ or '', we assume 'auto'.
+ """
+ if hint in ('no', 'never', 'none'):
+ return False
+
+ if hint in ('yes', 'always', 'force'):
+ return True
+
+ if hint not in ('auto', 'tty', 'if-tty', None, ''):
+ raise ValueError(f"unexpected colour hint: {hint}; "
+ "try always|never|auto")
+
+ from os import environ
+ if environ.get('NO_COLOR'):
+ # Note: per spec, we treat the empty string as if unset.
+ return False
+
+ for stream in streams:
+ if isinstance(stream, str):
+ # This function can be passed filenames instead of file
+ # objects, in which case we treat '-' as stdout, and test
+ # that. Any other string is not regarded as a tty.
+ if stream != '-':
+ return False
+ import sys
+ stream = sys.stdout
+
+ if not stream.isatty():
+ return False
+ return True
+
+
+def colour_if_wanted(*streams, hint='auto'):
+ wanted = is_colour_wanted(*streams, hint=hint)
+ if wanted:
+ switch_colour_on()
+ else:
+ switch_colour_off()
+ return wanted
+
+
+def colourdiff(a, b):
+ """Generate a string comparing two strings or byte sequences, with
+ differences coloured to indicate what changed.
+
+ Byte sequences are printed as hex pairs separated by colons.
+ """
+ from difflib import SequenceMatcher
+ out = []
+ if isinstance(a, bytes):
+ a = a.hex(':')
+ if isinstance(b, bytes):
+ b = b.hex(':')
+ a = a.replace(' ', '␠')
+ b = b.replace(' ', '␠')
+
+ s = SequenceMatcher(None, a, b)
+ for op, al, ar, bl, br in s.get_opcodes():
+ if op == 'equal':
+ out.append(a[al: ar])
+ elif op == 'delete':
+ out.append(c_RED(a[al: ar]))
+ elif op == 'insert':
+ out.append(c_GREEN(b[bl: br]))
+ elif op == 'replace':
+ out.append(c_RED(a[al: ar]))
+ out.append(c_GREEN(b[bl: br]))
+ else:
+ out.append(f' --unknown diff op {op}!-- ')
+
+ return ''.join(out)
diff --git a/python/samba/common.py b/python/samba/common.py
new file mode 100644
index 0000000..c46f6cb
--- /dev/null
+++ b/python/samba/common.py
@@ -0,0 +1,107 @@
+# Samba common functions
+#
+# Copyright (C) Matthieu Patou <mat@matws.net>
+# Copyright (C) Lumir Balhar <lbalhar@redhat.com> 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+
+def cmp(x, y):
+ """
+ Replacement for built-in function cmp that was removed in Python 3
+
+ Compare the two objects x and y and return an integer according to
+ the outcome. The return value is negative if x < y, zero if x == y
+ and strictly positive if x > y.
+ """
+
+ return (x > y) - (x < y)
+
+
+def confirm(msg, forced=False, allow_all=False):
+ """confirm an action with the user
+
+ :param msg: A string to print to the user
+ :param forced: Are the answer forced
+ """
+ if forced:
+ print("%s [YES]" % msg)
+ return True
+
+ mapping = {
+ 'Y': True,
+ 'YES': True,
+ '': False,
+ 'N': False,
+ 'NO': False,
+ }
+
+ prompt = '[y/N]'
+
+ if allow_all:
+ mapping['ALL'] = 'ALL'
+ mapping['NONE'] = 'NONE'
+ prompt = '[y/N/all/none]'
+
+ while True:
+ v = input(msg + ' %s ' % prompt)
+ v = v.upper()
+ if v in mapping:
+ return mapping[v]
+ print("Unknown response '%s'" % v)
+
+
+def normalise_int32(ivalue):
+ """normalise a ldap integer to signed 32 bit"""
+ if int(ivalue) & 0x80000000 and int(ivalue) > 0:
+ return str(int(ivalue) - 0x100000000)
+ return str(ivalue)
+
+
+# Sometimes in PY3 we have variables whose content can be 'bytes' or
+# 'str' and we can't be sure which. Generally this is because the
+# code variable can be initialised (or reassigned) a value from different
+# api(s) or functions depending on complex conditions or logic. Or another
+# common case is in PY2 the variable is 'type <str>' and in PY3 it is
+# 'class <str>' and the function to use e.g. b64encode requires 'bytes'
+# in PY3. In such cases it would be nice to avoid excessive testing in
+# the client code. Calling such a helper function should be avoided
+# if possible but sometimes this just isn't possible.
+# If a 'str' object is passed in it is encoded using 'utf8' or if 'bytes'
+# is passed in it is returned unchanged.
+# Using this function is PY2/PY3 code should ensure in most cases
+# the PY2 code runs unchanged in PY2 whereas the code in PY3 possibly
+# encodes the variable (see PY2 implementation of this function below)
+def get_bytes(bytesorstring):
+ tmp = bytesorstring
+ if isinstance(bytesorstring, str):
+ tmp = bytesorstring.encode('utf8')
+ elif not isinstance(bytesorstring, bytes):
+ raise ValueError('Expected bytes or string for %s:%s' % (type(bytesorstring), bytesorstring))
+ return tmp
+
+# helper function to get a string from a variable that maybe 'str' or
+# 'bytes' if 'bytes' then it is decoded using 'utf8'. If 'str' is passed
+# it is returned unchanged
+# Using this function is PY2/PY3 code should ensure in most cases
+# the PY2 code runs unchanged in PY2 whereas the code in PY3 possibly
+# decodes the variable (see PY2 implementation of this function below)
+def get_string(bytesorstring):
+ tmp = bytesorstring
+ if isinstance(bytesorstring, bytes):
+ tmp = bytesorstring.decode('utf8')
+ elif not isinstance(bytesorstring, str):
+ raise ValueError('Expected bytes or string for %s:%s' % (type(bytesorstring), bytesorstring))
+ return tmp
diff --git a/python/samba/dbchecker.py b/python/samba/dbchecker.py
new file mode 100644
index 0000000..e07afdc
--- /dev/null
+++ b/python/samba/dbchecker.py
@@ -0,0 +1,2935 @@
+# Samba4 AD database checker
+#
+# Copyright (C) Andrew Tridgell 2011
+# Copyright (C) Matthieu Patou <mat@matws.net> 2011
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import ldb
+import samba
+import time
+from base64 import b64decode, b64encode
+from samba import dsdb
+from samba import common
+from samba.dcerpc import misc
+from samba.dcerpc import drsuapi
+from samba.ndr import ndr_unpack, ndr_pack
+from samba.dcerpc import drsblobs
+from samba.samdb import dsdb_Dn
+from samba.dcerpc import security
+from samba.descriptor import (
+ get_wellknown_sds,
+ get_deletedobjects_descriptor,
+ get_diff_sds
+)
+from samba.auth import system_session, admin_session
+from samba.netcmd import CommandError
+from samba.netcmd.fsmo import get_fsmo_roleowner
+from samba.colour import c_RED, c_DARK_YELLOW, c_DARK_CYAN, c_DARK_GREEN
+
+def dump_attr_values(vals):
+ """Stringify a value list, using utf-8 if possible (which some tests
+ want), or the python bytes representation otherwise (with leading
+ 'b' and escapes like b'\x00').
+ """
+ result = []
+ for value in vals:
+ try:
+ result.append(value.decode('utf-8'))
+ except UnicodeDecodeError:
+ result.append(repr(value))
+ return ','.join(result)
+
+
+class dbcheck(object):
+ """check a SAM database for errors"""
+
+ def __init__(self, samdb, samdb_schema=None, verbose=False, fix=False,
+ yes=False, quiet=False, in_transaction=False,
+ quick_membership_checks=False,
+ reset_well_known_acls=False,
+ check_expired_tombstones=False,
+ colour=False):
+ self.samdb = samdb
+ self.dict_oid_name = None
+ self.samdb_schema = (samdb_schema or samdb)
+ self.verbose = verbose
+ self.fix = fix
+ self.yes = yes
+ self.quiet = quiet
+ self.colour = colour
+ self.remove_all_unknown_attributes = False
+ self.remove_all_empty_attributes = False
+ self.fix_all_normalisation = False
+ self.fix_all_duplicates = False
+ self.fix_all_DN_GUIDs = False
+ self.fix_all_binary_dn = False
+ self.remove_implausible_deleted_DN_links = False
+ self.remove_plausible_deleted_DN_links = False
+ self.fix_all_string_dn_component_mismatch = False
+ self.fix_all_GUID_dn_component_mismatch = False
+ self.fix_all_SID_dn_component_mismatch = False
+ self.fix_all_SID_dn_component_missing = False
+ self.fix_all_old_dn_string_component_mismatch = False
+ self.fix_all_metadata = False
+ self.fix_time_metadata = False
+ self.fix_undead_linked_attributes = False
+ self.fix_all_missing_backlinks = False
+ self.fix_all_orphaned_backlinks = False
+ self.fix_all_missing_forward_links = False
+ self.duplicate_link_cache = dict()
+ self.recover_all_forward_links = False
+ self.fix_rmd_flags = False
+ self.fix_ntsecuritydescriptor = False
+ self.fix_ntsecuritydescriptor_owner_group = False
+ self.seize_fsmo_role = False
+ self.move_to_lost_and_found = False
+ self.fix_instancetype = False
+ self.fix_replmetadata_zero_invocationid = False
+ self.fix_replmetadata_duplicate_attid = False
+ self.fix_replmetadata_wrong_attid = False
+ self.fix_replmetadata_unsorted_attid = False
+ self.fix_deleted_deleted_objects = False
+ self.fix_dn = False
+ self.fix_base64_userparameters = False
+ self.fix_utf8_userparameters = False
+ self.fix_doubled_userparameters = False
+ self.fix_sid_rid_set_conflict = False
+ self.quick_membership_checks = quick_membership_checks
+ self.reset_well_known_acls = reset_well_known_acls
+ self.check_expired_tombstones = check_expired_tombstones
+ self.expired_tombstones = 0
+ self.reset_all_well_known_acls = False
+ self.in_transaction = in_transaction
+ self.infrastructure_dn = ldb.Dn(samdb, "CN=Infrastructure," + samdb.domain_dn())
+ self.naming_dn = ldb.Dn(samdb, "CN=Partitions,%s" % samdb.get_config_basedn())
+ self.schema_dn = samdb.get_schema_basedn()
+ self.rid_dn = ldb.Dn(samdb, "CN=RID Manager$,CN=System," + samdb.domain_dn())
+ self.ntds_dsa = ldb.Dn(samdb, samdb.get_dsServiceName())
+ self.class_schemaIDGUID = {}
+ self.wellknown_sds = get_wellknown_sds(self.samdb)
+ self.fix_all_missing_objectclass = False
+ self.fix_missing_deleted_objects = False
+ self.fix_replica_locations = False
+ self.fix_missing_rid_set_master = False
+ self.fix_changes_after_deletion_bug = False
+
+ self.dn_set = set()
+ self.link_id_cache = {}
+ self.name_map = {}
+ try:
+ base_dn = "CN=DnsAdmins,%s" % samdb.get_wellknown_dn(
+ samdb.get_default_basedn(),
+ dsdb.DS_GUID_USERS_CONTAINER)
+ res = samdb.search(base=base_dn, scope=ldb.SCOPE_BASE,
+ attrs=["objectSid"])
+ dnsadmins_sid = ndr_unpack(security.dom_sid, res[0]["objectSid"][0])
+ self.name_map['DnsAdmins'] = str(dnsadmins_sid)
+ except ldb.LdbError as e5:
+ (enum, estr) = e5.args
+ if enum != ldb.ERR_NO_SUCH_OBJECT:
+ raise
+
+ self.system_session_info = system_session()
+ self.admin_session_info = admin_session(None, samdb.get_domain_sid())
+
+ res = self.samdb.search(base=self.ntds_dsa, scope=ldb.SCOPE_BASE, attrs=['msDS-hasMasterNCs', 'hasMasterNCs'])
+ if "msDS-hasMasterNCs" in res[0]:
+ self.write_ncs = res[0]["msDS-hasMasterNCs"]
+ else:
+ # If the Forest Level is less than 2003 then there is no
+ # msDS-hasMasterNCs, so we fall back to hasMasterNCs
+ # no need to merge as all the NCs that are in hasMasterNCs must
+ # also be in msDS-hasMasterNCs (but not the opposite)
+ if "hasMasterNCs" in res[0]:
+ self.write_ncs = res[0]["hasMasterNCs"]
+ else:
+ self.write_ncs = None
+
+ res = self.samdb.search(base="", scope=ldb.SCOPE_BASE, attrs=['namingContexts'])
+ self.deleted_objects_containers = []
+ self.ncs_lacking_deleted_containers = []
+ self.dns_partitions = []
+ try:
+ self.ncs = res[0]["namingContexts"]
+ except KeyError:
+ pass
+ except IndexError:
+ pass
+
+ for nc in self.ncs:
+ try:
+ dn = self.samdb.get_wellknown_dn(ldb.Dn(self.samdb, nc.decode('utf8')),
+ dsdb.DS_GUID_DELETED_OBJECTS_CONTAINER)
+ self.deleted_objects_containers.append(dn)
+ except KeyError:
+ self.ncs_lacking_deleted_containers.append(ldb.Dn(self.samdb, nc.decode('utf8')))
+
+ domaindns_zone = 'DC=DomainDnsZones,%s' % self.samdb.get_default_basedn()
+ forestdns_zone = 'DC=ForestDnsZones,%s' % self.samdb.get_root_basedn()
+ domain = self.samdb.search(scope=ldb.SCOPE_ONELEVEL,
+ attrs=["msDS-NC-Replica-Locations", "msDS-NC-RO-Replica-Locations"],
+ base=self.samdb.get_partitions_dn(),
+ expression="(&(objectClass=crossRef)(ncName=%s))" % domaindns_zone)
+ if len(domain) == 1:
+ self.dns_partitions.append((ldb.Dn(self.samdb, forestdns_zone), domain[0]))
+
+ forest = self.samdb.search(scope=ldb.SCOPE_ONELEVEL,
+ attrs=["msDS-NC-Replica-Locations", "msDS-NC-RO-Replica-Locations"],
+ base=self.samdb.get_partitions_dn(),
+ expression="(&(objectClass=crossRef)(ncName=%s))" % forestdns_zone)
+ if len(forest) == 1:
+ self.dns_partitions.append((ldb.Dn(self.samdb, domaindns_zone), forest[0]))
+
+ fsmo_dn = ldb.Dn(self.samdb, "CN=RID Manager$,CN=System," + self.samdb.domain_dn())
+ rid_master = get_fsmo_roleowner(self.samdb, fsmo_dn, "rid")
+ if ldb.Dn(self.samdb, self.samdb.get_dsServiceName()) == rid_master:
+ self.is_rid_master = True
+ else:
+ self.is_rid_master = False
+
+ # To get your rid set
+ # 1. Get server name
+ res = self.samdb.search(base=ldb.Dn(self.samdb, self.samdb.get_serverName()),
+ scope=ldb.SCOPE_BASE, attrs=["serverReference"])
+ # 2. Get server reference
+ self.server_ref_dn = ldb.Dn(self.samdb, res[0]['serverReference'][0].decode('utf8'))
+
+ # 3. Get RID Set
+ res = self.samdb.search(base=self.server_ref_dn,
+ scope=ldb.SCOPE_BASE, attrs=['rIDSetReferences'])
+ if "rIDSetReferences" in res[0]:
+ self.rid_set_dn = ldb.Dn(self.samdb, res[0]['rIDSetReferences'][0].decode('utf8'))
+ else:
+ self.rid_set_dn = None
+
+ ntds_service_dn = "CN=Directory Service,CN=Windows NT,CN=Services,%s" % \
+ self.samdb.get_config_basedn().get_linearized()
+ res = samdb.search(base=ntds_service_dn,
+ scope=ldb.SCOPE_BASE,
+ expression="(objectClass=nTDSService)",
+ attrs=["tombstoneLifetime"])
+ if "tombstoneLifetime" in res[0]:
+ self.tombstoneLifetime = int(res[0]["tombstoneLifetime"][0])
+ else:
+ self.tombstoneLifetime = 180
+
+ self.compatibleFeatures = []
+ self.requiredFeatures = []
+
+ try:
+ res = self.samdb.search(scope=ldb.SCOPE_BASE,
+ base="@SAMBA_DSDB",
+ attrs=["compatibleFeatures",
+ "requiredFeatures"])
+ if "compatibleFeatures" in res[0]:
+ self.compatibleFeatures = res[0]["compatibleFeatures"]
+ if "requiredFeatures" in res[0]:
+ self.requiredFeatures = res[0]["requiredFeatures"]
+ except ldb.LdbError as e6:
+ (enum, estr) = e6.args
+ if enum != ldb.ERR_NO_SUCH_OBJECT:
+ raise
+
+ def check_database(self, DN=None, scope=ldb.SCOPE_SUBTREE, controls=None,
+ attrs=None):
+ """perform a database check, returning the number of errors found"""
+ res = self.samdb.search(base=DN, scope=scope, attrs=['dn'], controls=controls)
+ self.report('Checking %u objects' % len(res))
+ error_count = 0
+ self.unfixable_errors = 0
+
+ error_count += self.check_deleted_objects_containers()
+
+ self.attribute_or_class_ids = set()
+
+ for object in res:
+ self.dn_set.add(str(object.dn))
+ error_count += self.check_object(object.dn, requested_attrs=attrs)
+
+ if DN is None:
+ error_count += self.check_rootdse()
+
+ if self.expired_tombstones > 0:
+ self.report("NOTICE: found %d expired tombstones, "
+ "'samba' will remove them daily, "
+ "'samba-tool domain tombstones expunge' "
+ "would do that immediately." % (
+ self.expired_tombstones))
+
+ self.report('Checked %u objects (%u errors)' %
+ (len(res), error_count + self.unfixable_errors))
+
+ if self.unfixable_errors != 0:
+ self.report(f"WARNING: {self.unfixable_errors} "
+ "of these errors cannot be automatically fixed.")
+
+ if error_count != 0 and not self.fix:
+ self.report("Please use 'samba-tool dbcheck --fix' to fix "
+ f"{error_count} errors")
+
+ return error_count
+
+ def check_deleted_objects_containers(self):
+ """This function only fixes conflicts on the Deleted Objects
+ containers, not the attributes"""
+ error_count = 0
+ for nc in self.ncs_lacking_deleted_containers:
+ if nc == self.schema_dn:
+ continue
+ error_count += 1
+ self.report("ERROR: NC %s lacks a reference to a Deleted Objects container" % nc)
+ if not self.confirm_all('Fix missing Deleted Objects container for %s?' % (nc), 'fix_missing_deleted_objects'):
+ continue
+
+ dn = ldb.Dn(self.samdb, "CN=Deleted Objects")
+ dn.add_base(nc)
+
+ conflict_dn = None
+ try:
+ # If something already exists here, add a conflict
+ res = self.samdb.search(base=dn, scope=ldb.SCOPE_BASE, attrs=[],
+ controls=["show_deleted:1", "extended_dn:1:1",
+ "show_recycled:1", "reveal_internals:0"])
+ if len(res) != 0:
+ guid = res[0].dn.get_extended_component("GUID")
+ conflict_dn = ldb.Dn(self.samdb,
+ "CN=Deleted Objects\\0ACNF:%s" % str(misc.GUID(guid)))
+ conflict_dn.add_base(nc)
+
+ except ldb.LdbError as e2:
+ (enum, estr) = e2.args
+ if enum == ldb.ERR_NO_SUCH_OBJECT:
+ pass
+ else:
+ self.report("Couldn't check for conflicting Deleted Objects container: %s" % estr)
+ return 1
+
+ if conflict_dn is not None:
+ try:
+ self.samdb.rename(dn, conflict_dn, ["show_deleted:1", "relax:0", "show_recycled:1"])
+ except ldb.LdbError as e1:
+ (enum, estr) = e1.args
+ self.report("Couldn't move old Deleted Objects placeholder: %s to %s: %s" % (dn, conflict_dn, estr))
+ return 1
+
+ # Refresh wellKnownObjects links
+ res = self.samdb.search(base=nc, scope=ldb.SCOPE_BASE,
+ attrs=['wellKnownObjects'],
+ controls=["show_deleted:1", "extended_dn:0",
+ "show_recycled:1", "reveal_internals:0"])
+ if len(res) != 1:
+ self.report("wellKnownObjects was not found for NC %s" % nc)
+ return 1
+
+ # Prevent duplicate deleted objects containers just in case
+ wko = res[0]["wellKnownObjects"]
+ listwko = []
+ proposed_objectguid = None
+ for o in wko:
+ dsdb_dn = dsdb_Dn(self.samdb, o.decode('utf8'), dsdb.DSDB_SYNTAX_BINARY_DN)
+ if self.is_deleted_objects_dn(dsdb_dn):
+ self.report("wellKnownObjects had duplicate Deleted Objects value %s" % o)
+ # We really want to put this back in the same spot
+ # as the original one, so that on replication we
+ # merge, rather than conflict.
+ proposed_objectguid = dsdb_dn.dn.get_extended_component("GUID")
+ listwko.append(str(o))
+
+ if proposed_objectguid is not None:
+ guid_suffix = "\nobjectGUID: %s" % str(misc.GUID(proposed_objectguid))
+ else:
+ wko_prefix = "B:32:%s" % dsdb.DS_GUID_DELETED_OBJECTS_CONTAINER
+ listwko.append('%s:%s' % (wko_prefix, dn))
+ guid_suffix = ""
+
+
+ domain_sid = security.dom_sid(self.samdb.get_domain_sid())
+ sec_desc = get_deletedobjects_descriptor(domain_sid,
+ name_map=self.name_map)
+ sec_desc_b64 = b64encode(sec_desc).decode('utf8')
+
+ # Insert a brand new Deleted Objects container
+ self.samdb.add_ldif("""dn: %s
+objectClass: top
+objectClass: container
+description: Container for deleted objects
+isDeleted: TRUE
+isCriticalSystemObject: TRUE
+showInAdvancedViewOnly: TRUE
+nTSecurityDescriptor:: %s
+systemFlags: -1946157056%s""" % (dn, sec_desc_b64, guid_suffix),
+ controls=["relax:0", "provision:0"])
+
+ delta = ldb.Message()
+ delta.dn = ldb.Dn(self.samdb, str(res[0]["dn"]))
+ delta["wellKnownObjects"] = ldb.MessageElement(listwko,
+ ldb.FLAG_MOD_REPLACE,
+ "wellKnownObjects")
+
+ # Insert the link to the brand new container
+ if self.do_modify(delta, ["relax:0"],
+ "NC %s lacks Deleted Objects WKGUID" % nc,
+ validate=False):
+ self.report("Added %s well known guid link" % dn)
+
+ self.deleted_objects_containers.append(dn)
+
+ return error_count
+
+ def report(self, msg):
+ """print a message unless quiet is set"""
+ if self.quiet:
+ return
+ if self.colour:
+ if msg.startswith('ERROR'):
+ msg = c_RED('ERROR') + msg[5:]
+ elif msg.startswith('WARNING'):
+ msg = c_DARK_YELLOW('WARNING') + msg[7:]
+ elif msg.startswith('INFO'):
+ msg = c_DARK_CYAN('INFO') + msg[4:]
+ elif msg.startswith('NOTICE'):
+ msg = c_DARK_CYAN('NOTICE') + msg[6:]
+ elif msg.startswith('NOTE'):
+ msg = c_DARK_CYAN('NOTE') + msg[4:]
+ elif msg.startswith('SKIPPING'):
+ msg = c_DARK_GREEN('SKIPPING') + msg[8:]
+
+ print(msg)
+
+ def confirm(self, msg, allow_all=False, forced=False):
+ """confirm a change"""
+ if not self.fix:
+ return False
+ if self.quiet:
+ return self.yes
+ if self.yes:
+ forced = True
+ return common.confirm(msg, forced=forced, allow_all=allow_all)
+
+ ################################################################
+ # a local confirm function with support for 'all'
+ def confirm_all(self, msg, all_attr):
+ """confirm a change with support for "all" """
+ if not self.fix:
+ return False
+ if getattr(self, all_attr) == 'NONE':
+ return False
+ if getattr(self, all_attr) == 'ALL':
+ forced = True
+ else:
+ forced = self.yes
+ if self.quiet:
+ return forced
+ c = common.confirm(msg, forced=forced, allow_all=True)
+ if c == 'ALL':
+ setattr(self, all_attr, 'ALL')
+ return True
+ if c == 'NONE':
+ setattr(self, all_attr, 'NONE')
+ return False
+ return c
+
+ def do_delete(self, dn, controls, msg):
+ """delete dn with optional verbose output"""
+ if self.verbose:
+ self.report("delete DN %s" % dn)
+ try:
+ controls = controls + ["local_oid:%s:0" % dsdb.DSDB_CONTROL_DBCHECK]
+ self.samdb.delete(dn, controls=controls)
+ except Exception as err:
+ if self.in_transaction:
+ raise CommandError("%s : %s" % (msg, err))
+ self.report("%s : %s" % (msg, err))
+ return False
+ return True
+
+ def do_modify(self, m, controls, msg, validate=True):
+ """perform a modify with optional verbose output"""
+ controls = controls + ["local_oid:%s:0" % dsdb.DSDB_CONTROL_DBCHECK]
+ if self.verbose:
+ self.report(self.samdb.write_ldif(m, ldb.CHANGETYPE_MODIFY))
+ self.report("controls: %r" % controls)
+ try:
+ self.samdb.modify(m, controls=controls, validate=validate)
+ except Exception as err:
+ if self.in_transaction:
+ raise CommandError("%s : %s" % (msg, err))
+ self.report("%s : %s" % (msg, err))
+ return False
+ return True
+
+ def do_rename(self, from_dn, to_rdn, to_base, controls, msg):
+ """perform a rename with optional verbose output"""
+ if self.verbose:
+ self.report("""dn: %s
+changeType: modrdn
+newrdn: %s
+deleteOldRdn: 1
+newSuperior: %s""" % (str(from_dn), str(to_rdn), str(to_base)))
+ try:
+ to_dn = to_rdn + to_base
+ controls = controls + ["local_oid:%s:0" % dsdb.DSDB_CONTROL_DBCHECK]
+ self.samdb.rename(from_dn, to_dn, controls=controls)
+ except Exception as err:
+ if self.in_transaction:
+ raise CommandError("%s : %s" % (msg, err))
+ self.report("%s : %s" % (msg, err))
+ return False
+ return True
+
+ def get_attr_linkID_and_reverse_name(self, attrname):
+ if attrname in self.link_id_cache:
+ return self.link_id_cache[attrname]
+ linkID = self.samdb_schema.get_linkId_from_lDAPDisplayName(attrname)
+ if linkID:
+ revname = self.samdb_schema.get_backlink_from_lDAPDisplayName(attrname)
+ else:
+ revname = None
+ self.link_id_cache[attrname] = (linkID, revname)
+ return linkID, revname
+
+ def err_empty_attribute(self, dn, attrname):
+ """fix empty attributes"""
+ self.report("ERROR: Empty attribute %s in %s" % (attrname, dn))
+ if not self.confirm_all('Remove empty attribute %s from %s?' % (attrname, dn), 'remove_all_empty_attributes'):
+ self.report("Not fixing empty attribute %s" % attrname)
+ return
+
+ m = ldb.Message()
+ m.dn = dn
+ m[attrname] = ldb.MessageElement('', ldb.FLAG_MOD_DELETE, attrname)
+ if self.do_modify(m, ["relax:0", "show_recycled:1"],
+ "Failed to remove empty attribute %s" % attrname, validate=False):
+ self.report("Removed empty attribute %s" % attrname)
+
+ def err_normalise_mismatch(self, dn, attrname, values):
+ """fix attribute normalisation errors, without altering sort order"""
+ self.report("ERROR: Normalisation error for attribute %s in %s" % (attrname, dn))
+ mod_list = []
+ for val in values:
+ normalised = self.samdb.dsdb_normalise_attributes(
+ self.samdb_schema, attrname, [val])
+ if len(normalised) != 1:
+ self.report("Unable to normalise value '%s'" % val)
+ mod_list.append((val, ''))
+ elif (normalised[0] != val):
+ self.report("value '%s' should be '%s'" % (val, normalised[0]))
+ mod_list.append((val, normalised[0]))
+ if not self.confirm_all('Fix normalisation for %s from %s?' % (attrname, dn), 'fix_all_normalisation'):
+ self.report("Not fixing attribute %s" % attrname)
+ return
+
+ m = ldb.Message()
+ m.dn = dn
+ for i in range(0, len(mod_list)):
+ (val, nval) = mod_list[i]
+ m['value_%u' % i] = ldb.MessageElement(val, ldb.FLAG_MOD_DELETE, attrname)
+ if nval != '':
+ m['normv_%u' % i] = ldb.MessageElement(nval, ldb.FLAG_MOD_ADD,
+ attrname)
+
+ if self.do_modify(m, ["relax:0", "show_recycled:1"],
+ "Failed to normalise attribute %s" % attrname,
+ validate=False):
+ self.report("Normalised attribute %s" % attrname)
+
+ def err_normalise_mismatch_replace(self, dn, attrname, values):
+ """fix attribute normalisation and/or sort errors"""
+ normalised = self.samdb.dsdb_normalise_attributes(self.samdb_schema, attrname, values)
+ if list(normalised) == values:
+ # how we got here is a mystery.
+ return
+ self.report("ERROR: Normalisation error for attribute '%s' in '%s'" % (attrname, dn))
+ self.report("Values/Order of values do/does not match: %s/%s!" % (values, list(normalised)))
+ if not self.confirm_all("Fix normalisation for '%s' from '%s'?" % (attrname, dn), 'fix_all_normalisation'):
+ self.report("Not fixing attribute '%s'" % attrname)
+ return
+
+ m = ldb.Message()
+ m.dn = dn
+ m[attrname] = ldb.MessageElement(normalised, ldb.FLAG_MOD_REPLACE, attrname)
+
+ if self.do_modify(m, ["relax:0", "show_recycled:1"],
+ "Failed to normalise attribute %s" % attrname,
+ validate=False):
+ self.report("Normalised attribute %s" % attrname)
+
+ def err_duplicate_values(self, dn, attrname, dup_values, values):
+ """fix duplicate attribute values"""
+ self.report("ERROR: Duplicate values for attribute '%s' in '%s'" % (attrname, dn))
+ self.report("Values contain a duplicate: [%s]/[%s]!" %
+ (dump_attr_values(dup_values), dump_attr_values(values)))
+ if not self.confirm_all("Fix duplicates for '%s' from '%s'?" % (attrname, dn), 'fix_all_duplicates'):
+ self.report("Not fixing attribute '%s'" % attrname)
+ return
+
+ m = ldb.Message()
+ m.dn = dn
+ m[attrname] = ldb.MessageElement(values, ldb.FLAG_MOD_REPLACE, attrname)
+
+ if self.do_modify(m, ["relax:0", "show_recycled:1"],
+ "Failed to remove duplicate value on attribute %s" % attrname,
+ validate=False):
+ self.report("Removed duplicate value on attribute %s" % attrname)
+
+ def is_deleted_objects_dn(self, dsdb_dn):
+ """see if a dsdb_Dn is the special Deleted Objects DN"""
+ return dsdb_dn.prefix == "B:32:%s:" % dsdb.DS_GUID_DELETED_OBJECTS_CONTAINER
+
+ def err_missing_objectclass(self, dn):
+ """handle object without objectclass"""
+ self.report("ERROR: missing objectclass in object %s. If you have another working DC, please run 'samba-tool drs replicate --full-sync --local <destinationDC> <sourceDC> %s'" % (dn, self.samdb.get_nc_root(dn)))
+ if not self.confirm_all("If you cannot re-sync from another DC, do you wish to delete object '%s'?" % dn, 'fix_all_missing_objectclass'):
+ self.report("Not deleting object with missing objectclass '%s'" % dn)
+ return
+ if self.do_delete(dn, ["relax:0"],
+ "Failed to remove DN %s" % dn):
+ self.report("Removed DN %s" % dn)
+
+ def err_deleted_dn(self, dn, attrname, val, dsdb_dn, correct_dn, remove_plausible=False):
+ """handle a DN pointing to a deleted object"""
+ if not remove_plausible:
+ self.report("ERROR: target DN is deleted for %s in object %s - %s" % (attrname, dn, val))
+ self.report("Target GUID points at deleted DN %r" % str(correct_dn))
+ if not self.confirm_all('Remove DN link?', 'remove_implausible_deleted_DN_links'):
+ self.report("Not removing")
+ return
+ else:
+ self.report("WARNING: target DN is deleted for %s in object %s - %s" % (attrname, dn, val))
+ self.report("Target GUID points at deleted DN %r" % str(correct_dn))
+ if not self.confirm_all('Remove stale DN link?', 'remove_plausible_deleted_DN_links'):
+ self.report("Not removing")
+ return
+
+ m = ldb.Message()
+ m.dn = dn
+ m['old_value'] = ldb.MessageElement(val, ldb.FLAG_MOD_DELETE, attrname)
+ if self.do_modify(m, ["show_recycled:1",
+ "local_oid:%s:0" % dsdb.DSDB_CONTROL_REPLMD_VANISH_LINKS],
+ "Failed to remove deleted DN attribute %s" % attrname):
+ self.report("Removed deleted DN on attribute %s" % attrname)
+
+ def err_missing_target_dn_or_GUID(self, dn, attrname, val, dsdb_dn):
+ """handle a missing target DN (if specified, GUID form can't be found,
+ and otherwise DN string form can't be found)"""
+
+ # Don't change anything if the object itself is deleted
+ if str(dn).find('\\0ADEL') != -1:
+ # We don't bump the error count as Samba produces these
+ # in normal operation
+ self.report("WARNING: no target object found for GUID "
+ "component link %s in deleted object "
+ "%s - %s" % (attrname, dn, val))
+ self.report("Not removing dangling one-way "
+ "link on deleted object "
+ "(tombstone garbage collection in progress?)")
+ return 0
+
+ # check if its a backlink
+ linkID, _ = self.get_attr_linkID_and_reverse_name(attrname)
+ if (linkID & 1 == 0) and str(dsdb_dn).find('\\0ADEL') == -1:
+
+ linkID, reverse_link_name \
+ = self.get_attr_linkID_and_reverse_name(attrname)
+ if reverse_link_name is not None:
+ self.report("WARNING: no target object found for GUID "
+ "component for one-way forward link "
+ "%s in object "
+ "%s - %s" % (attrname, dn, val))
+ self.report("Not removing dangling forward link")
+ return 0
+
+ nc_root = self.samdb.get_nc_root(dn)
+ try:
+ target_nc_root = self.samdb.get_nc_root(dsdb_dn.dn)
+ except ldb.LdbError as e:
+ (enum, estr) = e.args
+ if enum != ldb.ERR_NO_SUCH_OBJECT:
+ raise
+ target_nc_root = None
+
+ if target_nc_root is None:
+ # We don't bump the error count as Samba produces
+ # these in normal operation creating a lab domain (due
+ # to the way the rename is handled, links to
+ # now-expunged objects will never be fixed to stay
+ # inside the NC
+ self.report("WARNING: no target object found for GUID "
+ "component for link "
+ "%s in object to %s outside our NCs"
+ "%s - %s" % (attrname, dsdb_dn.dn, dn, val))
+ self.report("Not removing dangling one-way "
+ "left-over link outside our NCs "
+ "(we might be building a renamed/lab domain)")
+ return 0
+
+ if nc_root != target_nc_root:
+ # We don't bump the error count as Samba produces these
+ # in normal operation
+ self.report("WARNING: no target object found for GUID "
+ "component for cross-partition link "
+ "%s in object "
+ "%s - %s" % (attrname, dn, val))
+ self.report("Not removing dangling one-way "
+ "cross-partition link "
+ "(we might be mid-replication)")
+ return 0
+
+ # Due to our link handling one-way links pointing to
+ # missing objects are plausible.
+ #
+ # We don't bump the error count as Samba produces these
+ # in normal operation
+ self.report("WARNING: no target object found for GUID "
+ "component for DN value %s in object "
+ "%s - %s" % (attrname, dn, val))
+ self.err_deleted_dn(dn, attrname, val,
+ dsdb_dn, dsdb_dn, True)
+ return 0
+
+ # We bump the error count here, as we should have deleted this
+ self.report("ERROR: no target object found for GUID "
+ "component for link %s in object "
+ "%s - %s" % (attrname, dn, val))
+ self.err_deleted_dn(dn, attrname, val, dsdb_dn, dsdb_dn, False)
+ return 1
+
+ def err_missing_dn_GUID_component(self, dn, attrname, val, dsdb_dn, errstr):
+ """handle a missing GUID extended DN component"""
+ self.report("ERROR: %s component for %s in object %s - %s" % (errstr, attrname, dn, val))
+ controls = ["extended_dn:1:1", "show_recycled:1"]
+ try:
+ res = self.samdb.search(base=str(dsdb_dn.dn), scope=ldb.SCOPE_BASE,
+ attrs=[], controls=controls)
+ except ldb.LdbError as e7:
+ (enum, estr) = e7.args
+ self.report("unable to find object for DN %s - (%s)" % (dsdb_dn.dn, estr))
+ if enum != ldb.ERR_NO_SUCH_OBJECT:
+ raise
+ self.err_missing_target_dn_or_GUID(dn, attrname, val, dsdb_dn)
+ return
+ if len(res) == 0:
+ self.report("unable to find object for DN %s" % dsdb_dn.dn)
+ self.err_missing_target_dn_or_GUID(dn, attrname, val, dsdb_dn)
+ return
+ dsdb_dn.dn = res[0].dn
+
+ if not self.confirm_all('Change DN to %s?' % str(dsdb_dn), 'fix_all_DN_GUIDs'):
+ self.report("Not fixing %s" % errstr)
+ return
+ m = ldb.Message()
+ m.dn = dn
+ m['old_value'] = ldb.MessageElement(val, ldb.FLAG_MOD_DELETE, attrname)
+ m['new_value'] = ldb.MessageElement(str(dsdb_dn), ldb.FLAG_MOD_ADD, attrname)
+
+ if self.do_modify(m, ["show_recycled:1"],
+ "Failed to fix %s on attribute %s" % (errstr, attrname)):
+ self.report("Fixed %s on attribute %s" % (errstr, attrname))
+
+ def err_incorrect_binary_dn(self, dn, attrname, val, dsdb_dn, errstr):
+ """handle an incorrect binary DN component"""
+ self.report("ERROR: %s binary component for %s in object %s - %s" % (errstr, attrname, dn, val))
+
+ if not self.confirm_all('Change DN to %s?' % str(dsdb_dn), 'fix_all_binary_dn'):
+ self.report("Not fixing %s" % errstr)
+ return
+ m = ldb.Message()
+ m.dn = dn
+ m['old_value'] = ldb.MessageElement(val, ldb.FLAG_MOD_DELETE, attrname)
+ m['new_value'] = ldb.MessageElement(str(dsdb_dn), ldb.FLAG_MOD_ADD, attrname)
+
+ if self.do_modify(m, ["show_recycled:1"],
+ "Failed to fix %s on attribute %s" % (errstr, attrname)):
+ self.report("Fixed %s on attribute %s" % (errstr, attrname))
+
+ def err_dn_string_component_old(self, dn, attrname, val, dsdb_dn, correct_dn):
+ """handle a DN string being incorrect due to a rename or delete"""
+ self.report("NOTE: old (due to rename or delete) DN string component for %s in object %s - %s" % (attrname, dn, val))
+ dsdb_dn.dn = correct_dn
+
+ if not self.confirm_all('Change DN to %s?' % str(dsdb_dn),
+ 'fix_all_old_dn_string_component_mismatch'):
+ self.report("Not fixing old string component")
+ return
+ m = ldb.Message()
+ m.dn = dn
+ m['old_value'] = ldb.MessageElement(val, ldb.FLAG_MOD_DELETE, attrname)
+ m['new_value'] = ldb.MessageElement(str(dsdb_dn), ldb.FLAG_MOD_ADD, attrname)
+ if self.do_modify(m, ["show_recycled:1",
+ "local_oid:%s:1" % dsdb.DSDB_CONTROL_DBCHECK_FIX_LINK_DN_NAME],
+ "Failed to fix old DN string on attribute %s" % (attrname)):
+ self.report("Fixed old DN string on attribute %s" % (attrname))
+
+ def err_dn_component_target_mismatch(self, dn, attrname, val, dsdb_dn, correct_dn, mismatch_type):
+ """handle a DN string being incorrect"""
+ self.report("ERROR: incorrect DN %s component for %s in object %s - %s" % (mismatch_type, attrname, dn, val))
+ dsdb_dn.dn = correct_dn
+
+ if not self.confirm_all('Change DN to %s?' % str(dsdb_dn),
+ 'fix_all_%s_dn_component_mismatch' % mismatch_type):
+ self.report("Not fixing %s component mismatch" % mismatch_type)
+ return
+ m = ldb.Message()
+ m.dn = dn
+ m['old_value'] = ldb.MessageElement(val, ldb.FLAG_MOD_DELETE, attrname)
+ m['new_value'] = ldb.MessageElement(str(dsdb_dn), ldb.FLAG_MOD_ADD, attrname)
+ if self.do_modify(m, ["show_recycled:1"],
+ "Failed to fix incorrect DN %s on attribute %s" % (mismatch_type, attrname)):
+ self.report("Fixed incorrect DN %s on attribute %s" % (mismatch_type, attrname))
+
+ def err_dn_component_missing_target_sid(self, dn, attrname, val, dsdb_dn, target_sid_blob):
+ """fix missing <SID=...> on linked attributes"""
+ self.report("ERROR: missing DN SID component for %s in object %s - %s" % (attrname, dn, val))
+
+ if len(dsdb_dn.prefix) != 0:
+ self.report("Not fixing missing DN SID on DN+BINARY or DN+STRING")
+ return
+
+ correct_dn = ldb.Dn(self.samdb, dsdb_dn.dn.extended_str())
+ correct_dn.set_extended_component("SID", target_sid_blob)
+
+ if not self.confirm_all('Change DN to %s?' % correct_dn.extended_str(),
+ 'fix_all_SID_dn_component_missing'):
+ self.report("Not fixing missing DN SID component")
+ return
+
+ target_guid_blob = correct_dn.get_extended_component("GUID")
+ guid_sid_dn = ldb.Dn(self.samdb, "")
+ guid_sid_dn.set_extended_component("GUID", target_guid_blob)
+ guid_sid_dn.set_extended_component("SID", target_sid_blob)
+
+ m = ldb.Message()
+ m.dn = dn
+ m['new_value'] = ldb.MessageElement(guid_sid_dn.extended_str(), ldb.FLAG_MOD_ADD, attrname)
+ controls = [
+ "show_recycled:1",
+ "local_oid:%s:1" % dsdb.DSDB_CONTROL_DBCHECK_FIX_LINK_DN_SID
+ ]
+ if self.do_modify(m, controls,
+ "Failed to ADD missing DN SID on attribute %s" % (attrname)):
+ self.report("Fixed missing DN SID on attribute %s" % (attrname))
+
+ def err_unknown_attribute(self, obj, attrname):
+ """handle an unknown attribute error"""
+ self.report("ERROR: unknown attribute '%s' in %s" % (attrname, obj.dn))
+ if not self.confirm_all('Remove unknown attribute %s' % attrname, 'remove_all_unknown_attributes'):
+ self.report("Not removing %s" % attrname)
+ return
+ m = ldb.Message()
+ m.dn = obj.dn
+ m['old_value'] = ldb.MessageElement([], ldb.FLAG_MOD_DELETE, attrname)
+ if self.do_modify(m, ["relax:0", "show_recycled:1"],
+ "Failed to remove unknown attribute %s" % attrname):
+ self.report("Removed unknown attribute %s" % (attrname))
+
+ def err_undead_linked_attribute(self, obj, attrname, val):
+ """handle a link that should not be there on a deleted object"""
+ self.report("ERROR: linked attribute '%s' to '%s' is present on "
+ "deleted object %s" % (attrname, val, obj.dn))
+ if not self.confirm_all('Remove linked attribute %s' % attrname, 'fix_undead_linked_attributes'):
+ self.report("Not removing linked attribute %s" % attrname)
+ return
+ m = ldb.Message()
+ m.dn = obj.dn
+ m['old_value'] = ldb.MessageElement(val, ldb.FLAG_MOD_DELETE, attrname)
+
+ if self.do_modify(m, ["show_recycled:1", "show_deleted:1", "reveal_internals:0",
+ "local_oid:%s:0" % dsdb.DSDB_CONTROL_REPLMD_VANISH_LINKS],
+ "Failed to delete forward link %s" % attrname):
+ self.report("Fixed undead forward link %s" % (attrname))
+
+ def err_missing_backlink(self, obj, attrname, val, backlink_name, target_dn):
+ """handle a missing backlink value"""
+ self.report("ERROR: missing backlink attribute '%s' in %s for link %s in %s" % (backlink_name, target_dn, attrname, obj.dn))
+ if not self.confirm_all('Fix missing backlink %s' % backlink_name, 'fix_all_missing_backlinks'):
+ self.report("Not fixing missing backlink %s" % backlink_name)
+ return
+ m = ldb.Message()
+ m.dn = target_dn
+ m['new_value'] = ldb.MessageElement(val, ldb.FLAG_MOD_ADD, backlink_name)
+ if self.do_modify(m, ["show_recycled:1", "relax:0"],
+ "Failed to fix missing backlink %s" % backlink_name):
+ self.report("Fixed missing backlink %s" % (backlink_name))
+
+ def err_incorrect_rmd_flags(self, obj, attrname, revealed_dn):
+ """handle a incorrect RMD_FLAGS value"""
+ rmd_flags = int(revealed_dn.dn.get_extended_component("RMD_FLAGS"))
+ self.report("ERROR: incorrect RMD_FLAGS value %u for attribute '%s' in %s for link %s" % (rmd_flags, attrname, obj.dn, revealed_dn.dn.extended_str()))
+ if not self.confirm_all('Fix incorrect RMD_FLAGS %u' % rmd_flags, 'fix_rmd_flags'):
+ self.report("Not fixing incorrect RMD_FLAGS %u" % rmd_flags)
+ return
+ m = ldb.Message()
+ m.dn = obj.dn
+ m['old_value'] = ldb.MessageElement(str(revealed_dn), ldb.FLAG_MOD_DELETE, attrname)
+ if self.do_modify(m, ["show_recycled:1", "reveal_internals:0", "show_deleted:0"],
+ "Failed to fix incorrect RMD_FLAGS %u" % rmd_flags):
+ self.report("Fixed incorrect RMD_FLAGS %u" % (rmd_flags))
+
+ def err_orphaned_backlink(self, obj_dn, backlink_attr, backlink_val,
+ target_dn, forward_attr, forward_syntax,
+ check_duplicates=True):
+ """handle a orphaned backlink value"""
+ if check_duplicates is True and self.has_duplicate_links(target_dn, forward_attr, forward_syntax):
+ self.report("WARNING: Keep orphaned backlink attribute " +
+ "'%s' in '%s' for link '%s' in '%s'" % (
+ backlink_attr, obj_dn, forward_attr, target_dn))
+ return
+ self.report("ERROR: orphaned backlink attribute '%s' in %s for link %s in %s" % (backlink_attr, obj_dn, forward_attr, target_dn))
+ if not self.confirm_all('Remove orphaned backlink %s' % backlink_attr, 'fix_all_orphaned_backlinks'):
+ self.report("Not removing orphaned backlink %s" % backlink_attr)
+ return
+ m = ldb.Message()
+ m.dn = obj_dn
+ m['value'] = ldb.MessageElement(backlink_val, ldb.FLAG_MOD_DELETE, backlink_attr)
+ if self.do_modify(m, ["show_recycled:1", "relax:0"],
+ "Failed to fix orphaned backlink %s" % backlink_attr):
+ self.report("Fixed orphaned backlink %s" % (backlink_attr))
+
+ def err_recover_forward_links(self, obj, forward_attr, forward_vals):
+ """handle a duplicate links value"""
+
+ self.report("RECHECK: 'Missing/Duplicate/Correct link' lines above for attribute '%s' in '%s'" % (forward_attr, obj.dn))
+
+ if not self.confirm_all("Commit fixes for (missing/duplicate) forward links in attribute '%s'" % forward_attr, 'recover_all_forward_links'):
+ self.report("Not fixing corrupted (missing/duplicate) forward links in attribute '%s' of '%s'" % (
+ forward_attr, obj.dn))
+ return
+ m = ldb.Message()
+ m.dn = obj.dn
+ m['value'] = ldb.MessageElement(forward_vals, ldb.FLAG_MOD_REPLACE, forward_attr)
+ if self.do_modify(m, ["local_oid:%s:1" % dsdb.DSDB_CONTROL_DBCHECK_FIX_DUPLICATE_LINKS],
+ "Failed to fix duplicate links in attribute '%s'" % forward_attr):
+ self.report("Fixed duplicate links in attribute '%s'" % (forward_attr))
+ duplicate_cache_key = "%s:%s" % (str(obj.dn), forward_attr)
+ assert duplicate_cache_key in self.duplicate_link_cache
+ self.duplicate_link_cache[duplicate_cache_key] = False
+
+ def err_no_fsmoRoleOwner(self, obj):
+ """handle a missing fSMORoleOwner"""
+ self.report("ERROR: fSMORoleOwner not found for role %s" % (obj.dn))
+ res = self.samdb.search("",
+ scope=ldb.SCOPE_BASE, attrs=["dsServiceName"])
+ assert len(res) == 1
+ serviceName = str(res[0]["dsServiceName"][0])
+ if not self.confirm_all('Seize role %s onto current DC by adding fSMORoleOwner=%s' % (obj.dn, serviceName), 'seize_fsmo_role'):
+ self.report("Not Seizing role %s onto current DC by adding fSMORoleOwner=%s" % (obj.dn, serviceName))
+ return
+ m = ldb.Message()
+ m.dn = obj.dn
+ m['value'] = ldb.MessageElement(serviceName, ldb.FLAG_MOD_ADD, 'fSMORoleOwner')
+ if self.do_modify(m, [],
+ "Failed to seize role %s onto current DC by adding fSMORoleOwner=%s" % (obj.dn, serviceName)):
+ self.report("Seized role %s onto current DC by adding fSMORoleOwner=%s" % (obj.dn, serviceName))
+
+ def err_missing_parent(self, obj):
+ """handle a missing parent"""
+ self.report("ERROR: parent object not found for %s" % (obj.dn))
+ if not self.confirm_all('Move object %s into LostAndFound?' % (obj.dn), 'move_to_lost_and_found'):
+ self.report('Not moving object %s into LostAndFound' % (obj.dn))
+ return
+
+ keep_transaction = False
+ self.samdb.transaction_start()
+ try:
+ nc_root = self.samdb.get_nc_root(obj.dn)
+ lost_and_found = self.samdb.get_wellknown_dn(nc_root, dsdb.DS_GUID_LOSTANDFOUND_CONTAINER)
+ new_dn = ldb.Dn(self.samdb, str(obj.dn))
+ new_dn.remove_base_components(len(new_dn) - 1)
+ if self.do_rename(obj.dn, new_dn, lost_and_found, ["show_deleted:0", "relax:0"],
+ "Failed to rename object %s into lostAndFound at %s" % (obj.dn, new_dn + lost_and_found)):
+ self.report("Renamed object %s into lostAndFound at %s" % (obj.dn, new_dn + lost_and_found))
+
+ m = ldb.Message()
+ m.dn = obj.dn
+ m['lastKnownParent'] = ldb.MessageElement(str(obj.dn.parent()), ldb.FLAG_MOD_REPLACE, 'lastKnownParent')
+
+ if self.do_modify(m, [],
+ "Failed to set lastKnownParent on lostAndFound object at %s" % (new_dn + lost_and_found)):
+ self.report("Set lastKnownParent on lostAndFound object at %s" % (new_dn + lost_and_found))
+ keep_transaction = True
+ except:
+ self.samdb.transaction_cancel()
+ raise
+
+ if keep_transaction:
+ self.samdb.transaction_commit()
+ else:
+ self.samdb.transaction_cancel()
+
+ def err_wrong_dn(self, obj, new_dn, rdn_attr, rdn_val, name_val, controls):
+ """handle a wrong dn"""
+
+ new_rdn = ldb.Dn(self.samdb, str(new_dn))
+ new_rdn.remove_base_components(len(new_rdn) - 1)
+ new_parent = new_dn.parent()
+
+ attributes = ""
+ if rdn_val != name_val:
+ attributes += "%s=%r " % (rdn_attr, rdn_val)
+ attributes += "name=%r" % (name_val)
+
+ self.report("ERROR: wrong dn[%s] %s new_dn[%s]" % (obj.dn, attributes, new_dn))
+ if not self.confirm_all("Rename %s to %s?" % (obj.dn, new_dn), 'fix_dn'):
+ self.report("Not renaming %s to %s" % (obj.dn, new_dn))
+ return
+
+ if self.do_rename(obj.dn, new_rdn, new_parent, controls,
+ "Failed to rename object %s into %s" % (obj.dn, new_dn)):
+ self.report("Renamed %s into %s" % (obj.dn, new_dn))
+
+ def err_wrong_instancetype(self, obj, calculated_instancetype):
+ """handle a wrong instanceType"""
+ self.report("ERROR: wrong instanceType %s on %s, should be %d" % (obj["instanceType"], obj.dn, calculated_instancetype))
+ if not self.confirm_all('Change instanceType from %s to %d on %s?' % (obj["instanceType"], calculated_instancetype, obj.dn), 'fix_instancetype'):
+ self.report('Not changing instanceType from %s to %d on %s' % (obj["instanceType"], calculated_instancetype, obj.dn))
+ return
+
+ m = ldb.Message()
+ m.dn = obj.dn
+ m['value'] = ldb.MessageElement(str(calculated_instancetype), ldb.FLAG_MOD_REPLACE, 'instanceType')
+ if self.do_modify(m, ["local_oid:%s:0" % dsdb.DSDB_CONTROL_DBCHECK_MODIFY_RO_REPLICA],
+ "Failed to correct missing instanceType on %s by setting instanceType=%d" % (obj.dn, calculated_instancetype)):
+ self.report("Corrected instancetype on %s by setting instanceType=%d" % (obj.dn, calculated_instancetype))
+
+ def err_short_userParameters(self, obj, attrname, value):
+ # This is a truncated userParameters due to a pre 4.1 replication bug
+ self.report("ERROR: incorrect userParameters value on object %s. If you have another working DC that does not give this warning, please run 'samba-tool drs replicate --full-sync --local <destinationDC> <sourceDC> %s'" % (obj.dn, self.samdb.get_nc_root(obj.dn)))
+
+ def err_base64_userParameters(self, obj, attrname, value):
+ """handle a userParameters that is wrongly base64 encoded"""
+ self.report("ERROR: wrongly formatted userParameters %s on %s, should not be base64-encoded" % (value, obj.dn))
+ if not self.confirm_all('Convert userParameters from base64 encoding on %s?' % (obj.dn), 'fix_base64_userparameters'):
+ self.report('Not changing userParameters from base64 encoding on %s' % (obj.dn))
+ return
+
+ m = ldb.Message()
+ m.dn = obj.dn
+ m['value'] = ldb.MessageElement(b64decode(obj[attrname][0]), ldb.FLAG_MOD_REPLACE, 'userParameters')
+ if self.do_modify(m, [],
+ "Failed to correct base64-encoded userParameters on %s by converting from base64" % (obj.dn)):
+ self.report("Corrected base64-encoded userParameters on %s by converting from base64" % (obj.dn))
+
+ def err_utf8_userParameters(self, obj, attrname, value):
+ """handle a userParameters that is wrongly utf-8 encoded"""
+ self.report("ERROR: wrongly formatted userParameters on %s, "
+ "should not be pseudo-UTF8 encoded" % (obj.dn))
+ if not self.confirm_all('Convert userParameters from UTF8 encoding on %s?' % (obj.dn), 'fix_utf8_userparameters'):
+ self.report('Not changing userParameters from UTF8 encoding on %s' % (obj.dn))
+ return
+
+ m = ldb.Message()
+ m.dn = obj.dn
+ m['value'] = ldb.MessageElement(obj[attrname][0].decode('utf8').encode('utf-16-le'),
+ ldb.FLAG_MOD_REPLACE, 'userParameters')
+ if self.do_modify(m, [],
+ "Failed to correct psudo-UTF8 encoded userParameters on %s by converting from UTF8" % (obj.dn)):
+ self.report("Corrected psudo-UTF8 encoded userParameters on %s by converting from UTF8" % (obj.dn))
+
+ def err_doubled_userParameters(self, obj, attrname, value):
+ """handle a userParameters that has been utf-16 encoded twice"""
+ self.report("ERROR: wrongly formatted userParameters on %s, should not be double UTF16 encoded" % (obj.dn))
+ if not self.confirm_all('Convert userParameters from doubled UTF-16 encoding on %s?' % (obj.dn), 'fix_doubled_userparameters'):
+ self.report('Not changing userParameters from doubled UTF-16 encoding on %s' % (obj.dn))
+ return
+
+ m = ldb.Message()
+ m.dn = obj.dn
+ # m['value'] = ldb.MessageElement(obj[attrname][0].decode('utf-16-le').decode('utf-16-le').encode('utf-16-le'),
+ # hmm the above old python2 code doesn't make sense to me and cannot
+ # work in python3 because a string doesn't have a decode method.
+ # However in python2 for some unknown reason this double decode
+ # followed by encode seems to result in what looks like utf8.
+ # In python2 just .decode('utf-16-le').encode('utf-16-le') does nothing
+ # but trigger the 'double UTF16 encoded' condition again :/
+ #
+ # In python2 and python3 value.decode('utf-16-le').encode('utf8') seems
+ # to do the trick and work as expected.
+ m['value'] = ldb.MessageElement(obj[attrname][0].decode('utf-16-le').encode('utf8'),
+ ldb.FLAG_MOD_REPLACE, 'userParameters')
+
+ if self.do_modify(m, [],
+ "Failed to correct doubled-UTF16 encoded userParameters on %s by converting" % (obj.dn)):
+ self.report("Corrected doubled-UTF16 encoded userParameters on %s by converting" % (obj.dn))
+
+ def err_odd_userParameters(self, obj, attrname):
+ """Fix a truncated userParameters due to a pre 4.1 replication bug"""
+ self.report("ERROR: incorrect userParameters value on object %s (odd length). If you have another working DC that does not give this warning, please run 'samba-tool drs replicate --full-sync --local <destinationDC> <sourceDC> %s'" % (obj.dn, self.samdb.get_nc_root(obj.dn)))
+
+ def find_revealed_link(self, dn, attrname, guid):
+ """return a revealed link in an object"""
+ res = self.samdb.search(base=dn, scope=ldb.SCOPE_BASE, attrs=[attrname],
+ controls=["show_deleted:0", "extended_dn:0", "reveal_internals:0"])
+ syntax_oid = self.samdb_schema.get_syntax_oid_from_lDAPDisplayName(attrname)
+ for val in res[0][attrname]:
+ dsdb_dn = dsdb_Dn(self.samdb, val.decode('utf8'), syntax_oid)
+ guid2 = dsdb_dn.dn.get_extended_component("GUID")
+ if guid == guid2:
+ return dsdb_dn
+ return None
+
+ def check_duplicate_links(self, obj, forward_attr, forward_syntax, forward_linkID, backlink_attr):
+ """check a linked values for duplicate forward links"""
+ error_count = 0
+
+ duplicate_dict = dict()
+ unique_dict = dict()
+
+ # Only forward links can have this problem
+ if forward_linkID & 1:
+ # If we got the reverse, skip it
+ return (error_count, duplicate_dict, unique_dict)
+
+ if backlink_attr is None:
+ return (error_count, duplicate_dict, unique_dict)
+
+ duplicate_cache_key = "%s:%s" % (str(obj.dn), forward_attr)
+ if duplicate_cache_key not in self.duplicate_link_cache:
+ self.duplicate_link_cache[duplicate_cache_key] = False
+
+ for val in obj[forward_attr]:
+ dsdb_dn = dsdb_Dn(self.samdb, val.decode('utf8'), forward_syntax)
+
+ # all DNs should have a GUID component
+ guid = dsdb_dn.dn.get_extended_component("GUID")
+ if guid is None:
+ continue
+ guidstr = str(misc.GUID(guid))
+ keystr = guidstr + dsdb_dn.prefix
+ if keystr not in unique_dict:
+ unique_dict[keystr] = dsdb_dn
+ continue
+ error_count += 1
+ if keystr not in duplicate_dict:
+ duplicate_dict[keystr] = dict()
+ duplicate_dict[keystr]["keep"] = None
+ duplicate_dict[keystr]["delete"] = list()
+
+ # Now check for the highest RMD_VERSION
+ v1 = int(unique_dict[keystr].dn.get_extended_component("RMD_VERSION"))
+ v2 = int(dsdb_dn.dn.get_extended_component("RMD_VERSION"))
+ if v1 > v2:
+ duplicate_dict[keystr]["keep"] = unique_dict[keystr]
+ duplicate_dict[keystr]["delete"].append(dsdb_dn)
+ continue
+ if v1 < v2:
+ duplicate_dict[keystr]["keep"] = dsdb_dn
+ duplicate_dict[keystr]["delete"].append(unique_dict[keystr])
+ unique_dict[keystr] = dsdb_dn
+ continue
+ # Fallback to the highest RMD_LOCAL_USN
+ u1 = int(unique_dict[keystr].dn.get_extended_component("RMD_LOCAL_USN"))
+ u2 = int(dsdb_dn.dn.get_extended_component("RMD_LOCAL_USN"))
+ if u1 >= u2:
+ duplicate_dict[keystr]["keep"] = unique_dict[keystr]
+ duplicate_dict[keystr]["delete"].append(dsdb_dn)
+ continue
+ duplicate_dict[keystr]["keep"] = dsdb_dn
+ duplicate_dict[keystr]["delete"].append(unique_dict[keystr])
+ unique_dict[keystr] = dsdb_dn
+
+ if error_count != 0:
+ self.duplicate_link_cache[duplicate_cache_key] = True
+
+ return (error_count, duplicate_dict, unique_dict)
+
+ def has_duplicate_links(self, dn, forward_attr, forward_syntax):
+ """check a linked values for duplicate forward links"""
+ error_count = 0
+
+ duplicate_cache_key = "%s:%s" % (str(dn), forward_attr)
+ if duplicate_cache_key in self.duplicate_link_cache:
+ return self.duplicate_link_cache[duplicate_cache_key]
+
+ forward_linkID, backlink_attr = self.get_attr_linkID_and_reverse_name(forward_attr)
+
+ attrs = [forward_attr]
+ controls = ["extended_dn:1:1", "reveal_internals:0"]
+
+ # check its the right GUID
+ try:
+ res = self.samdb.search(base=str(dn), scope=ldb.SCOPE_BASE,
+ attrs=attrs, controls=controls)
+ except ldb.LdbError as e8:
+ (enum, estr) = e8.args
+ if enum != ldb.ERR_NO_SUCH_OBJECT:
+ raise
+
+ return False
+
+ obj = res[0]
+ error_count, duplicate_dict, unique_dict = \
+ self.check_duplicate_links(obj, forward_attr, forward_syntax, forward_linkID, backlink_attr)
+
+ if duplicate_cache_key in self.duplicate_link_cache:
+ return self.duplicate_link_cache[duplicate_cache_key]
+
+ return False
+
+ def find_missing_forward_links_from_backlinks(self, obj,
+ forward_attr,
+ forward_syntax,
+ backlink_attr,
+ forward_unique_dict):
+ """Find all backlinks linking to obj_guid_str not already in forward_unique_dict"""
+ missing_forward_links = []
+ error_count = 0
+
+ if backlink_attr is None:
+ return (missing_forward_links, error_count)
+
+ if forward_syntax != ldb.SYNTAX_DN:
+ self.report("Not checking for missing forward links for syntax: %s" %
+ forward_syntax)
+ return (missing_forward_links, error_count)
+
+ if "sortedLinks" in self.compatibleFeatures:
+ self.report("Not checking for missing forward links because the db " +
+ "has the sortedLinks feature")
+ return (missing_forward_links, error_count)
+
+ try:
+ obj_guid = obj['objectGUID'][0]
+ obj_guid_str = str(ndr_unpack(misc.GUID, obj_guid))
+ filter = "(%s=<GUID=%s>)" % (backlink_attr, obj_guid_str)
+
+ res = self.samdb.search(expression=filter,
+ scope=ldb.SCOPE_SUBTREE, attrs=["objectGUID"],
+ controls=["extended_dn:1:1",
+ "search_options:1:2",
+ "paged_results:1:1000"])
+ except ldb.LdbError as e9:
+ (enum, estr) = e9.args
+ raise
+
+ for r in res:
+ target_dn = dsdb_Dn(self.samdb, r.dn.extended_str(), forward_syntax)
+
+ guid = target_dn.dn.get_extended_component("GUID")
+ guidstr = str(misc.GUID(guid))
+ if guidstr in forward_unique_dict:
+ continue
+
+ # A valid forward link looks like this:
+ #
+ # <GUID=9f92d30a-fc23-11e4-a5f6-30be15454808>;
+ # <RMD_ADDTIME=131607546230000000>;
+ # <RMD_CHANGETIME=131607546230000000>;
+ # <RMD_FLAGS=0>;
+ # <RMD_INVOCID=4e4496a3-7fb8-4f97-8a33-d238db8b5e2d>;
+ # <RMD_LOCAL_USN=3765>;
+ # <RMD_ORIGINATING_USN=3765>;
+ # <RMD_VERSION=1>;
+ # <SID=S-1-5-21-4177067393-1453636373-93818738-1124>;
+ # CN=unsorted-u8,CN=Users,DC=release-4-5-0-pre1,DC=samba,DC=corp
+ #
+ # Note that versions older than Samba 4.8 create
+ # links with RMD_VERSION=0.
+ #
+ # Try to get the local_usn and time from objectClass
+ # if possible and fallback to any other one.
+ repl = ndr_unpack(drsblobs.replPropertyMetaDataBlob,
+ obj['replPropertyMetadata'][0])
+ for o in repl.ctr.array:
+ local_usn = o.local_usn
+ t = o.originating_change_time
+ if o.attid == drsuapi.DRSUAPI_ATTID_objectClass:
+ break
+
+ # We use a magic invocationID for restoring missing
+ # forward links to recover from bug #13228.
+ # This should allow some more future magic to fix the
+ # problem.
+ #
+ # It also means it looses the conflict resolution
+ # against almost every real invocation, if the
+ # version is also 0.
+ originating_invocid = misc.GUID("ffffffff-4700-4700-4700-000000b13228")
+ originating_usn = 1
+
+ rmd_addtime = t
+ rmd_changetime = t
+ rmd_flags = 0
+ rmd_invocid = originating_invocid
+ rmd_originating_usn = originating_usn
+ rmd_local_usn = local_usn
+ rmd_version = 0
+
+ target_dn.dn.set_extended_component("RMD_ADDTIME", str(rmd_addtime))
+ target_dn.dn.set_extended_component("RMD_CHANGETIME", str(rmd_changetime))
+ target_dn.dn.set_extended_component("RMD_FLAGS", str(rmd_flags))
+ target_dn.dn.set_extended_component("RMD_INVOCID", ndr_pack(rmd_invocid))
+ target_dn.dn.set_extended_component("RMD_ORIGINATING_USN", str(rmd_originating_usn))
+ target_dn.dn.set_extended_component("RMD_LOCAL_USN", str(rmd_local_usn))
+ target_dn.dn.set_extended_component("RMD_VERSION", str(rmd_version))
+
+ error_count += 1
+ missing_forward_links.append(target_dn)
+
+ return (missing_forward_links, error_count)
+
+ def check_dn(self, obj, attrname, syntax_oid):
+ """check a DN attribute for correctness"""
+ error_count = 0
+ obj_guid = obj['objectGUID'][0]
+
+ linkID, reverse_link_name = self.get_attr_linkID_and_reverse_name(attrname)
+ if reverse_link_name is not None:
+ reverse_syntax_oid = self.samdb_schema.get_syntax_oid_from_lDAPDisplayName(reverse_link_name)
+ else:
+ reverse_syntax_oid = None
+
+ is_member_link = attrname in ("member", "memberOf")
+ if is_member_link and self.quick_membership_checks:
+ duplicate_dict = {}
+ else:
+ error_count, duplicate_dict, unique_dict = \
+ self.check_duplicate_links(obj, attrname, syntax_oid,
+ linkID, reverse_link_name)
+
+ if len(duplicate_dict) != 0:
+
+ missing_forward_links, missing_error_count = \
+ self.find_missing_forward_links_from_backlinks(obj,
+ attrname, syntax_oid,
+ reverse_link_name,
+ unique_dict)
+ error_count += missing_error_count
+
+ forward_links = [dn for dn in unique_dict.values()]
+
+ if missing_error_count != 0:
+ self.report("ERROR: Missing and duplicate forward link values for attribute '%s' in '%s'" % (
+ attrname, obj.dn))
+ else:
+ self.report("ERROR: Duplicate forward link values for attribute '%s' in '%s'" % (attrname, obj.dn))
+ for m in missing_forward_links:
+ self.report("Missing link '%s'" % (m))
+ if not self.confirm_all("Schedule re-adding missing forward link for attribute %s" % attrname,
+ 'fix_all_missing_forward_links'):
+ self.err_orphaned_backlink(m.dn, reverse_link_name,
+ obj.dn.extended_str(), obj.dn,
+ attrname, syntax_oid,
+ check_duplicates=False)
+ continue
+ forward_links += [m]
+ for keystr in duplicate_dict.keys():
+ d = duplicate_dict[keystr]
+ for dd in d["delete"]:
+ self.report("Duplicate link '%s'" % dd)
+ self.report("Correct link '%s'" % d["keep"])
+
+ # We now construct the sorted dn values.
+ # They're sorted by the objectGUID of the target
+ # See dsdb_Dn.__cmp__()
+ vals = [str(dn) for dn in sorted(forward_links)]
+ self.err_recover_forward_links(obj, attrname, vals)
+ # We should continue with the fixed values
+ obj[attrname] = ldb.MessageElement(vals, 0, attrname)
+
+ for val in obj[attrname]:
+ dsdb_dn = dsdb_Dn(self.samdb, val.decode('utf8'), syntax_oid)
+
+ # all DNs should have a GUID component
+ guid = dsdb_dn.dn.get_extended_component("GUID")
+ if guid is None:
+ error_count += 1
+ self.err_missing_dn_GUID_component(obj.dn, attrname, val, dsdb_dn,
+ "missing GUID")
+ continue
+
+ guidstr = str(misc.GUID(guid))
+ attrs = ['isDeleted', 'replPropertyMetaData']
+
+ if (str(attrname).lower() == 'msds-hasinstantiatedncs') and (obj.dn == self.ntds_dsa):
+ fixing_msDS_HasInstantiatedNCs = True
+ attrs.append("instanceType")
+ else:
+ fixing_msDS_HasInstantiatedNCs = False
+
+ if reverse_link_name is not None:
+ attrs.append(reverse_link_name)
+
+ # check its the right GUID
+ try:
+ res = self.samdb.search(base="<GUID=%s>" % guidstr, scope=ldb.SCOPE_BASE,
+ attrs=attrs, controls=["extended_dn:1:1", "show_recycled:1",
+ "reveal_internals:0"
+ ])
+ except ldb.LdbError as e3:
+ (enum, estr) = e3.args
+ if enum != ldb.ERR_NO_SUCH_OBJECT:
+ raise
+
+ # We don't always want to
+ error_count += self.err_missing_target_dn_or_GUID(obj.dn,
+ attrname,
+ val,
+ dsdb_dn)
+ continue
+
+ if fixing_msDS_HasInstantiatedNCs:
+ dsdb_dn.prefix = "B:8:%08X:" % int(res[0]['instanceType'][0])
+ dsdb_dn.binary = "%08X" % int(res[0]['instanceType'][0])
+
+ if str(dsdb_dn) != str(val):
+ error_count += 1
+ self.err_incorrect_binary_dn(obj.dn, attrname, val, dsdb_dn, "incorrect instanceType part of Binary DN")
+ continue
+
+ # now we have two cases - the source object might or might not be deleted
+ is_deleted = 'isDeleted' in obj and str(obj['isDeleted'][0]).upper() == 'TRUE'
+ target_is_deleted = 'isDeleted' in res[0] and str(res[0]['isDeleted'][0]).upper() == 'TRUE'
+
+ if is_deleted and obj.dn not in self.deleted_objects_containers and linkID:
+ # A fully deleted object should not have any linked
+ # attributes. (MS-ADTS 3.1.1.5.5.1.1 Tombstone
+ # Requirements and 3.1.1.5.5.1.3 Recycled-Object
+ # Requirements)
+ self.err_undead_linked_attribute(obj, attrname, val)
+ error_count += 1
+ continue
+ elif target_is_deleted and not self.is_deleted_objects_dn(dsdb_dn) and linkID:
+ # the target DN is not allowed to be deleted, unless the target DN is the
+ # special Deleted Objects container
+ error_count += 1
+ local_usn = dsdb_dn.dn.get_extended_component("RMD_LOCAL_USN")
+ if local_usn:
+ if 'replPropertyMetaData' in res[0]:
+ repl = ndr_unpack(drsblobs.replPropertyMetaDataBlob,
+ res[0]['replPropertyMetadata'][0])
+ found_data = False
+ for o in repl.ctr.array:
+ if o.attid == drsuapi.DRSUAPI_ATTID_isDeleted:
+ deleted_usn = o.local_usn
+ if deleted_usn >= int(local_usn):
+ # If the object was deleted after the link
+ # was last modified then, clean it up here
+ found_data = True
+ break
+
+ if found_data:
+ self.err_deleted_dn(obj.dn, attrname,
+ val, dsdb_dn, res[0].dn, True)
+ continue
+
+ self.err_deleted_dn(obj.dn, attrname, val, dsdb_dn, res[0].dn, False)
+ continue
+
+ # We should not check for incorrect
+ # components on deleted links, as these are allowed to
+ # go stale (we just need the GUID, not the name)
+ rmd_blob = dsdb_dn.dn.get_extended_component("RMD_FLAGS")
+ rmd_flags = 0
+ if rmd_blob is not None:
+ rmd_flags = int(rmd_blob)
+
+ # assert the DN matches in string form, where a reverse
+ # link exists, otherwise (below) offer to fix it as a non-error.
+ # The string form is essentially only kept for forensics,
+ # as we always re-resolve by GUID in normal operations.
+ if not rmd_flags & 1 and reverse_link_name is not None:
+ if str(res[0].dn) != str(dsdb_dn.dn):
+ error_count += 1
+ self.err_dn_component_target_mismatch(obj.dn, attrname, val, dsdb_dn,
+ res[0].dn, "string")
+ continue
+
+ if res[0].dn.get_extended_component("GUID") != dsdb_dn.dn.get_extended_component("GUID"):
+ error_count += 1
+ self.err_dn_component_target_mismatch(obj.dn, attrname, val, dsdb_dn,
+ res[0].dn, "GUID")
+ continue
+
+ target_sid = res[0].dn.get_extended_component("SID")
+ link_sid = dsdb_dn.dn.get_extended_component("SID")
+ if link_sid is None and target_sid is not None:
+ error_count += 1
+ self.err_dn_component_missing_target_sid(obj.dn, attrname, val,
+ dsdb_dn, target_sid)
+ continue
+ if link_sid != target_sid:
+ error_count += 1
+ self.err_dn_component_target_mismatch(obj.dn, attrname, val, dsdb_dn,
+ res[0].dn, "SID")
+ continue
+
+ # Only for non-links, not even forward-only links
+ # (otherwise this breaks repl_meta_data):
+ #
+ # Now we have checked the GUID and SID, offer to fix old
+ # DN strings as a non-error (DNs, not links so no
+ # backlink). Samba does not maintain this string
+ # otherwise, so we don't increment error_count.
+ if reverse_link_name is None:
+ if linkID == 0 and str(res[0].dn) != str(dsdb_dn.dn):
+ # Pass in the old/bad DN without the <GUID=...> part,
+ # otherwise the LDB code will correct it on the way through
+ # (Note: we still want to preserve the DSDB DN prefix in the
+ # case of binary DNs)
+ bad_dn = dsdb_dn.prefix + dsdb_dn.dn.get_linearized()
+ self.err_dn_string_component_old(obj.dn, attrname, bad_dn,
+ dsdb_dn, res[0].dn)
+ continue
+
+ if is_member_link and self.quick_membership_checks:
+ continue
+
+ # check the reverse_link is correct if there should be one
+ match_count = 0
+ if reverse_link_name in res[0]:
+ for v in res[0][reverse_link_name]:
+ v_dn = dsdb_Dn(self.samdb, v.decode('utf8'))
+ v_guid = v_dn.dn.get_extended_component("GUID")
+ v_blob = v_dn.dn.get_extended_component("RMD_FLAGS")
+ v_rmd_flags = 0
+ if v_blob is not None:
+ v_rmd_flags = int(v_blob)
+ if v_rmd_flags & 1:
+ continue
+ if v_guid == obj_guid:
+ match_count += 1
+
+ if match_count != 1:
+ if syntax_oid == dsdb.DSDB_SYNTAX_BINARY_DN or reverse_syntax_oid == dsdb.DSDB_SYNTAX_BINARY_DN:
+ if not linkID & 1:
+ # Forward binary multi-valued linked attribute
+ forward_count = 0
+ for w in obj[attrname]:
+ w_guid = dsdb_Dn(self.samdb, w.decode('utf8')).dn.get_extended_component("GUID")
+ if w_guid == guid:
+ forward_count += 1
+
+ if match_count == forward_count:
+ continue
+ expected_count = 0
+ for v in obj[attrname]:
+ v_dn = dsdb_Dn(self.samdb, v.decode('utf8'))
+ v_guid = v_dn.dn.get_extended_component("GUID")
+ v_blob = v_dn.dn.get_extended_component("RMD_FLAGS")
+ v_rmd_flags = 0
+ if v_blob is not None:
+ v_rmd_flags = int(v_blob)
+ if v_rmd_flags & 1:
+ continue
+ if v_guid == guid:
+ expected_count += 1
+
+ if match_count == expected_count:
+ continue
+
+ diff_count = expected_count - match_count
+
+ if linkID & 1:
+ # If there's a backward link on binary multi-valued linked attribute,
+ # let the check on the forward link remedy the value.
+ # UNLESS, there is no forward link detected.
+ if match_count == 0:
+ error_count += 1
+ self.err_orphaned_backlink(obj.dn, attrname,
+ val, dsdb_dn.dn,
+ reverse_link_name,
+ reverse_syntax_oid)
+ continue
+ # Only warn here and let the forward link logic fix it.
+ self.report("WARNING: Link (back) mismatch for '%s' (%d) on '%s' to '%s' (%d) on '%s'" % (
+ attrname, expected_count, str(obj.dn),
+ reverse_link_name, match_count, str(dsdb_dn.dn)))
+ continue
+
+ assert not target_is_deleted
+
+ self.report("ERROR: Link (forward) mismatch for '%s' (%d) on '%s' to '%s' (%d) on '%s'" % (
+ attrname, expected_count, str(obj.dn),
+ reverse_link_name, match_count, str(dsdb_dn.dn)))
+
+ # Loop until the difference between the forward and
+ # the backward links is resolved.
+ while diff_count != 0:
+ error_count += 1
+ if diff_count > 0:
+ if match_count > 0 or diff_count > 1:
+ # TODO no method to fix these right now
+ self.report("ERROR: Can't fix missing "
+ "multi-valued backlinks on %s" % str(dsdb_dn.dn))
+ break
+ self.err_missing_backlink(obj, attrname,
+ obj.dn.extended_str(),
+ reverse_link_name,
+ dsdb_dn.dn)
+ diff_count -= 1
+ else:
+ self.err_orphaned_backlink(res[0].dn, reverse_link_name,
+ obj.dn.extended_str(), obj.dn,
+ attrname, syntax_oid)
+ diff_count += 1
+
+ return error_count
+
+ def find_repl_attid(self, repl, attid):
+ for o in repl.ctr.array:
+ if o.attid == attid:
+ return o
+
+ return None
+
+ def get_originating_time(self, val, attid):
+ """Read metadata properties and return the originating time for
+ a given attributeId.
+
+ :return: the originating time or 0 if not found
+ """
+
+ repl = ndr_unpack(drsblobs.replPropertyMetaDataBlob, val)
+ o = self.find_repl_attid(repl, attid)
+ if o is not None:
+ return o.originating_change_time
+ return 0
+
+ def process_metadata(self, dn, val):
+ """Read metadata properties and list attributes in it.
+ raises KeyError if the attid is unknown."""
+
+ set_att = set()
+ wrong_attids = set()
+ list_attid = []
+ in_schema_nc = dn.is_child_of(self.schema_dn)
+
+ repl = ndr_unpack(drsblobs.replPropertyMetaDataBlob, val)
+
+ for o in repl.ctr.array:
+ att = self.samdb_schema.get_lDAPDisplayName_by_attid(o.attid)
+ set_att.add(att.lower())
+ list_attid.append(o.attid)
+ correct_attid = self.samdb_schema.get_attid_from_lDAPDisplayName(att,
+ is_schema_nc=in_schema_nc)
+ if correct_attid != o.attid:
+ wrong_attids.add(o.attid)
+
+ return (set_att, list_attid, wrong_attids)
+
+ def fix_metadata(self, obj, attr):
+ """re-write replPropertyMetaData elements for a single attribute for a
+ object. This is used to fix missing replPropertyMetaData elements"""
+ guid_str = str(ndr_unpack(misc.GUID, obj['objectGUID'][0]))
+ dn = ldb.Dn(self.samdb, "<GUID=%s>" % guid_str)
+ res = self.samdb.search(base=dn, scope=ldb.SCOPE_BASE, attrs=[attr],
+ controls=["search_options:1:2",
+ "show_recycled:1"])
+ msg = res[0]
+ nmsg = ldb.Message()
+ nmsg.dn = dn
+ nmsg[attr] = ldb.MessageElement(msg[attr], ldb.FLAG_MOD_REPLACE, attr)
+ if self.do_modify(nmsg, ["relax:0", "provision:0", "show_recycled:1"],
+ "Failed to fix metadata for attribute %s" % attr):
+ self.report("Fixed metadata for attribute %s" % attr)
+
+ def ace_get_effective_inherited_type(self, ace):
+ if ace.flags & security.SEC_ACE_FLAG_INHERIT_ONLY:
+ return None
+
+ check = False
+ if ace.type == security.SEC_ACE_TYPE_ACCESS_ALLOWED_OBJECT:
+ check = True
+ elif ace.type == security.SEC_ACE_TYPE_ACCESS_DENIED_OBJECT:
+ check = True
+ elif ace.type == security.SEC_ACE_TYPE_SYSTEM_AUDIT_OBJECT:
+ check = True
+ elif ace.type == security.SEC_ACE_TYPE_SYSTEM_ALARM_OBJECT:
+ check = True
+
+ if not check:
+ return None
+
+ if not ace.object.flags & security.SEC_ACE_INHERITED_OBJECT_TYPE_PRESENT:
+ return None
+
+ return str(ace.object.inherited_type)
+
+ def lookup_class_schemaIDGUID(self, cls):
+ if cls in self.class_schemaIDGUID:
+ return self.class_schemaIDGUID[cls]
+
+ flt = "(&(ldapDisplayName=%s)(objectClass=classSchema))" % cls
+ res = self.samdb.search(base=self.schema_dn,
+ expression=flt,
+ attrs=["schemaIDGUID"])
+ t = str(ndr_unpack(misc.GUID, res[0]["schemaIDGUID"][0]))
+
+ self.class_schemaIDGUID[cls] = t
+ return t
+
+ def process_sd(self, dn, obj):
+ sd_attr = "nTSecurityDescriptor"
+ sd_val = obj[sd_attr]
+
+ sd = ndr_unpack(security.descriptor, sd_val[0])
+
+ is_deleted = 'isDeleted' in obj and str(obj['isDeleted'][0]).upper() == 'TRUE'
+ if is_deleted:
+ # we don't fix deleted objects
+ return (sd, None)
+
+ sd_clean = security.descriptor()
+ sd_clean.owner_sid = sd.owner_sid
+ sd_clean.group_sid = sd.group_sid
+ sd_clean.type = sd.type
+ sd_clean.revision = sd.revision
+
+ broken = False
+ last_inherited_type = None
+
+ aces = []
+ if sd.sacl is not None:
+ aces = sd.sacl.aces
+ for i in range(0, len(aces)):
+ ace = aces[i]
+
+ if not ace.flags & security.SEC_ACE_FLAG_INHERITED_ACE:
+ sd_clean.sacl_add(ace)
+ continue
+
+ t = self.ace_get_effective_inherited_type(ace)
+ if t is None:
+ continue
+
+ if last_inherited_type is not None:
+ if t != last_inherited_type:
+ # if it inherited from more than
+ # one type it's very likely to be broken
+ #
+ # If not the recalculation will calculate
+ # the same result.
+ broken = True
+ continue
+
+ last_inherited_type = t
+
+ aces = []
+ if sd.dacl is not None:
+ aces = sd.dacl.aces
+ for i in range(0, len(aces)):
+ ace = aces[i]
+
+ if not ace.flags & security.SEC_ACE_FLAG_INHERITED_ACE:
+ sd_clean.dacl_add(ace)
+ continue
+
+ t = self.ace_get_effective_inherited_type(ace)
+ if t is None:
+ continue
+
+ if last_inherited_type is not None:
+ if t != last_inherited_type:
+ # if it inherited from more than
+ # one type it's very likely to be broken
+ #
+ # If not the recalculation will calculate
+ # the same result.
+ broken = True
+ continue
+
+ last_inherited_type = t
+
+ if broken:
+ return (sd_clean, sd)
+
+ if last_inherited_type is None:
+ # ok
+ return (sd, None)
+
+ cls = None
+ try:
+ cls = obj["objectClass"][-1]
+ except KeyError as e:
+ pass
+
+ if cls is None:
+ res = self.samdb.search(base=dn, scope=ldb.SCOPE_BASE,
+ attrs=["isDeleted", "objectClass"],
+ controls=["show_recycled:1"])
+ o = res[0]
+ is_deleted = 'isDeleted' in o and str(o['isDeleted'][0]).upper() == 'TRUE'
+ if is_deleted:
+ # we don't fix deleted objects
+ return (sd, None)
+ cls = o["objectClass"][-1]
+
+ t = self.lookup_class_schemaIDGUID(cls)
+
+ if t != last_inherited_type:
+ # broken
+ return (sd_clean, sd)
+
+ # ok
+ return (sd, None)
+
+ def err_wrong_sd(self, dn, sd, sd_broken):
+ """re-write the SD due to incorrect inherited ACEs"""
+ sd_attr = "nTSecurityDescriptor"
+ sd_val = ndr_pack(sd)
+ sd_flags = security.SECINFO_DACL | security.SECINFO_SACL
+
+ if not self.confirm_all('Fix %s on %s?' % (sd_attr, dn), 'fix_ntsecuritydescriptor'):
+ self.report('Not fixing %s on %s\n' % (sd_attr, dn))
+ return
+
+ nmsg = ldb.Message()
+ nmsg.dn = dn
+ nmsg[sd_attr] = ldb.MessageElement(sd_val, ldb.FLAG_MOD_REPLACE, sd_attr)
+ if self.do_modify(nmsg, ["sd_flags:1:%d" % sd_flags],
+ "Failed to fix attribute %s" % sd_attr):
+ self.report("Fixed attribute '%s' of '%s'\n" % (sd_attr, dn))
+
+ def err_wrong_default_sd(self, dn, sd, diff):
+ """re-write the SD due to not matching the default (optional mode for fixing an incorrect provision)"""
+ sd_attr = "nTSecurityDescriptor"
+ sd_val = ndr_pack(sd)
+ sd_flags = security.SECINFO_DACL | security.SECINFO_SACL
+ if sd.owner_sid is not None:
+ sd_flags |= security.SECINFO_OWNER
+ if sd.group_sid is not None:
+ sd_flags |= security.SECINFO_GROUP
+
+ if not self.confirm_all('Reset %s on %s back to provision default?\n%s' % (sd_attr, dn, diff), 'reset_all_well_known_acls'):
+ self.report('Not resetting %s on %s\n' % (sd_attr, dn))
+ return
+
+ m = ldb.Message()
+ m.dn = dn
+ m[sd_attr] = ldb.MessageElement(sd_val, ldb.FLAG_MOD_REPLACE, sd_attr)
+ if self.do_modify(m, ["sd_flags:1:%d" % sd_flags],
+ "Failed to reset attribute %s" % sd_attr):
+ self.report("Fixed attribute '%s' of '%s'\n" % (sd_attr, dn))
+
+ def err_missing_sd_owner(self, dn, sd):
+ """re-write the SD due to a missing owner or group"""
+ sd_attr = "nTSecurityDescriptor"
+ sd_val = ndr_pack(sd)
+ sd_flags = security.SECINFO_OWNER | security.SECINFO_GROUP
+
+ if not self.confirm_all('Fix missing owner or group in %s on %s?' % (sd_attr, dn), 'fix_ntsecuritydescriptor_owner_group'):
+ self.report('Not fixing missing owner or group %s on %s\n' % (sd_attr, dn))
+ return
+
+ nmsg = ldb.Message()
+ nmsg.dn = dn
+ nmsg[sd_attr] = ldb.MessageElement(sd_val, ldb.FLAG_MOD_REPLACE, sd_attr)
+
+ # By setting the session_info to admin_session_info and
+ # setting the security.SECINFO_OWNER | security.SECINFO_GROUP
+ # flags we cause the descriptor module to set the correct
+ # owner and group on the SD, replacing the None/NULL values
+ # for owner_sid and group_sid currently present.
+ #
+ # The admin_session_info matches that used in provision, and
+ # is the best guess we can make for an existing object that
+ # hasn't had something specifically set.
+ #
+ # This is important for the dns related naming contexts.
+ self.samdb.set_session_info(self.admin_session_info)
+ if self.do_modify(nmsg, ["sd_flags:1:%d" % sd_flags],
+ "Failed to fix metadata for attribute %s" % sd_attr):
+ self.report("Fixed attribute '%s' of '%s'\n" % (sd_attr, dn))
+ self.samdb.set_session_info(self.system_session_info)
+
+ def is_expired_tombstone(self, dn, repl_val):
+ if self.check_expired_tombstones:
+ # This is not the default, it's just
+ # used to keep dbcheck tests work with
+ # old static provision dumps
+ return False
+
+ if dn in self.deleted_objects_containers:
+ # The Deleted Objects container will look like an expired
+ # tombstone
+ return False
+
+ repl = ndr_unpack(drsblobs.replPropertyMetaDataBlob, repl_val)
+
+ isDeleted = self.find_repl_attid(repl, drsuapi.DRSUAPI_ATTID_isDeleted)
+
+ delete_time = samba.nttime2unix(isDeleted.originating_change_time)
+ current_time = time.time()
+
+ tombstone_delta = self.tombstoneLifetime * (24 * 60 * 60)
+
+ delta = current_time - delete_time
+ if delta <= tombstone_delta:
+ return False
+
+ expunge_time = delete_time + tombstone_delta
+
+ delta_days = delta / (24 * 60 * 60)
+
+ if delta_days <= 2:
+ self.report("SKIPPING additional checks on object "
+ "%s which very recently "
+ "became an expired tombstone (normal)" % dn)
+ self.report("INFO: it is expected this will be expunged "
+ "by the next daily task some time after %s, "
+ "%d hours ago"
+ % (time.ctime(expunge_time), delta // (60 * 60)))
+ else:
+ self.report("SKIPPING: object %s is an expired tombstone" % dn)
+ self.report("INFO: it was expected this object would have "
+ "been expunged soon after"
+ "%s, %d days ago"
+ % (time.ctime(expunge_time), delta_days))
+
+ self.report("isDeleted: attid=0x%08x version=%d invocation=%s usn=%s (local=%s) at %s" % (
+ isDeleted.attid,
+ isDeleted.version,
+ isDeleted.originating_invocation_id,
+ isDeleted.originating_usn,
+ isDeleted.local_usn,
+ time.ctime(samba.nttime2unix(isDeleted.originating_change_time))))
+ self.expired_tombstones += 1
+ return True
+
+ def find_changes_after_deletion(self, repl_val):
+ repl = ndr_unpack(drsblobs.replPropertyMetaDataBlob, repl_val)
+
+ isDeleted = self.find_repl_attid(repl, drsuapi.DRSUAPI_ATTID_isDeleted)
+
+ delete_time = samba.nttime2unix(isDeleted.originating_change_time)
+
+ tombstone_delta = self.tombstoneLifetime * (24 * 60 * 60)
+
+ found = []
+ for o in repl.ctr.array:
+ if o.attid == drsuapi.DRSUAPI_ATTID_isDeleted:
+ continue
+
+ if o.local_usn <= isDeleted.local_usn:
+ continue
+
+ if o.originating_change_time <= isDeleted.originating_change_time:
+ continue
+
+ change_time = samba.nttime2unix(o.originating_change_time)
+
+ delta = change_time - delete_time
+ if delta <= tombstone_delta:
+ continue
+
+ # If the modification happened after the tombstone lifetime
+ # has passed, we have a bug as the object might be deleted
+ # already on other DCs and won't be able to replicate
+ # back
+ found.append(o)
+
+ return found, isDeleted
+
+ def has_changes_after_deletion(self, dn, repl_val):
+ found, isDeleted = self.find_changes_after_deletion(repl_val)
+ if len(found) == 0:
+ return False
+
+ def report_attid(o):
+ try:
+ attname = self.samdb_schema.get_lDAPDisplayName_by_attid(o.attid)
+ except KeyError:
+ attname = "<unknown:0x%x08x>" % o.attid
+
+ self.report("%s: attid=0x%08x version=%d invocation=%s usn=%s (local=%s) at %s" % (
+ attname, o.attid, o.version,
+ o.originating_invocation_id,
+ o.originating_usn,
+ o.local_usn,
+ time.ctime(samba.nttime2unix(o.originating_change_time))))
+
+ self.report("ERROR: object %s, has changes after deletion" % dn)
+ report_attid(isDeleted)
+ for o in found:
+ report_attid(o)
+
+ return True
+
+ def err_changes_after_deletion(self, dn, repl_val):
+ found, isDeleted = self.find_changes_after_deletion(repl_val)
+
+ in_schema_nc = dn.is_child_of(self.schema_dn)
+ rdn_attr = dn.get_rdn_name()
+ rdn_attid = self.samdb_schema.get_attid_from_lDAPDisplayName(rdn_attr,
+ is_schema_nc=in_schema_nc)
+
+ unexpected = []
+ for o in found:
+ if o.attid == rdn_attid:
+ continue
+ if o.attid == drsuapi.DRSUAPI_ATTID_name:
+ continue
+ if o.attid == drsuapi.DRSUAPI_ATTID_lastKnownParent:
+ continue
+ try:
+ attname = self.samdb_schema.get_lDAPDisplayName_by_attid(o.attid)
+ except KeyError:
+ attname = "<unknown:0x%x08x>" % o.attid
+ unexpected.append(attname)
+
+ if len(unexpected) > 0:
+ self.report('Unexpeted attributes: %s' % ",".join(unexpected))
+ self.report('Not fixing changes after deletion bug')
+ return
+
+ if not self.confirm_all('Delete broken tombstone object %s deleted %s days ago?' % (
+ dn, self.tombstoneLifetime), 'fix_changes_after_deletion_bug'):
+ self.report('Not fixing changes after deletion bug')
+ return
+
+ if self.do_delete(dn, ["relax:0"],
+ "Failed to remove DN %s" % dn):
+ self.report("Removed DN %s" % dn)
+
+ def has_replmetadata_zero_invocationid(self, dn, repl_meta_data):
+ repl = ndr_unpack(drsblobs.replPropertyMetaDataBlob,
+ repl_meta_data)
+ ctr = repl.ctr
+ found = False
+ for o in ctr.array:
+ # Search for a zero invocationID
+ if o.originating_invocation_id != misc.GUID("00000000-0000-0000-0000-000000000000"):
+ continue
+
+ found = True
+ self.report("""ERROR: on replPropertyMetaData of %s, the instanceType on attribute 0x%08x,
+ version %d changed at %s is 00000000-0000-0000-0000-000000000000,
+ but should be non-zero. Proposed fix is to set to our invocationID (%s)."""
+ % (dn, o.attid, o.version,
+ time.ctime(samba.nttime2unix(o.originating_change_time)),
+ self.samdb.get_invocation_id()))
+
+ return found
+
+ def err_replmetadata_zero_invocationid(self, dn, attr, repl_meta_data):
+ repl = ndr_unpack(drsblobs.replPropertyMetaDataBlob,
+ repl_meta_data)
+ ctr = repl.ctr
+ now = samba.unix2nttime(int(time.time()))
+ found = False
+ for o in ctr.array:
+ # Search for a zero invocationID
+ if o.originating_invocation_id != misc.GUID("00000000-0000-0000-0000-000000000000"):
+ continue
+
+ found = True
+ seq = self.samdb.sequence_number(ldb.SEQ_NEXT)
+ o.version = o.version + 1
+ o.originating_change_time = now
+ o.originating_invocation_id = misc.GUID(self.samdb.get_invocation_id())
+ o.originating_usn = seq
+ o.local_usn = seq
+
+ if found:
+ replBlob = ndr_pack(repl)
+ msg = ldb.Message()
+ msg.dn = dn
+
+ if not self.confirm_all('Fix %s on %s by setting originating_invocation_id on some elements to our invocationID %s?'
+ % (attr, dn, self.samdb.get_invocation_id()), 'fix_replmetadata_zero_invocationid'):
+ self.report('Not fixing zero originating_invocation_id in %s on %s\n' % (attr, dn))
+ return
+
+ nmsg = ldb.Message()
+ nmsg.dn = dn
+ nmsg[attr] = ldb.MessageElement(replBlob, ldb.FLAG_MOD_REPLACE, attr)
+ if self.do_modify(nmsg, ["local_oid:%s:0" % dsdb.DSDB_CONTROL_DBCHECK_MODIFY_RO_REPLICA,
+ "local_oid:1.3.6.1.4.1.7165.4.3.14:0"],
+ "Failed to fix attribute %s" % attr):
+ self.report("Fixed attribute '%s' of '%s'\n" % (attr, dn))
+
+ def err_replmetadata_unknown_attid(self, dn, attr, repl_meta_data):
+ repl = ndr_unpack(drsblobs.replPropertyMetaDataBlob,
+ repl_meta_data)
+ ctr = repl.ctr
+ for o in ctr.array:
+ # Search for an invalid attid
+ try:
+ att = self.samdb_schema.get_lDAPDisplayName_by_attid(o.attid)
+ except KeyError:
+ self.report('ERROR: attributeID 0X%0X is not known in our schema, not fixing %s on %s\n' % (o.attid, attr, dn))
+ return
+
+ def err_replmetadata_incorrect_attid(self, dn, attr, repl_meta_data, wrong_attids):
+ repl = ndr_unpack(drsblobs.replPropertyMetaDataBlob,
+ repl_meta_data)
+ fix = False
+
+ set_att = set()
+ remove_attid = set()
+ hash_att = {}
+
+ in_schema_nc = dn.is_child_of(self.schema_dn)
+
+ ctr = repl.ctr
+ # Sort the array, except for the last element. This strange
+ # construction, creating a new list, due to bugs in samba's
+ # array handling in IDL generated objects.
+ ctr.array = sorted(ctr.array[:], key=lambda o: o.attid)
+ # Now walk it in reverse, so we see the low (and so incorrect,
+ # the correct values are above 0x80000000) values first and
+ # remove the 'second' value we see.
+ for o in reversed(ctr.array):
+ print("%s: 0x%08x" % (dn, o.attid))
+ att = self.samdb_schema.get_lDAPDisplayName_by_attid(o.attid)
+ if att.lower() in set_att:
+ self.report('ERROR: duplicate attributeID values for %s in %s on %s\n' % (att, attr, dn))
+ if not self.confirm_all('Fix %s on %s by removing the duplicate value 0x%08x for %s (keeping 0x%08x)?'
+ % (attr, dn, o.attid, att, hash_att[att].attid),
+ 'fix_replmetadata_duplicate_attid'):
+ self.report('Not fixing duplicate value 0x%08x for %s in %s on %s\n'
+ % (o.attid, att, attr, dn))
+ return
+ fix = True
+ remove_attid.add(o.attid)
+ # We want to set the metadata for the most recent
+ # update to have been applied locally, that is the metadata
+ # matching the (eg string) value in the attribute
+ if o.local_usn > hash_att[att].local_usn:
+ # This is always what we would have sent over DRS,
+ # because the DRS server will have sent the
+ # msDS-IntID, but with the values from both
+ # attribute entries.
+ hash_att[att].version = o.version
+ hash_att[att].originating_change_time = o.originating_change_time
+ hash_att[att].originating_invocation_id = o.originating_invocation_id
+ hash_att[att].originating_usn = o.originating_usn
+ hash_att[att].local_usn = o.local_usn
+
+ # Do not re-add the value to the set or overwrite the hash value
+ continue
+
+ hash_att[att] = o
+ set_att.add(att.lower())
+
+ # Generate a real list we can sort on properly
+ new_list = [o for o in ctr.array if o.attid not in remove_attid]
+
+ if (len(wrong_attids) > 0):
+ for o in new_list:
+ if o.attid in wrong_attids:
+ att = self.samdb_schema.get_lDAPDisplayName_by_attid(o.attid)
+ correct_attid = self.samdb_schema.get_attid_from_lDAPDisplayName(att, is_schema_nc=in_schema_nc)
+ self.report('ERROR: incorrect attributeID values in %s on %s\n' % (attr, dn))
+ if not self.confirm_all('Fix %s on %s by replacing incorrect value 0x%08x for %s (new 0x%08x)?'
+ % (attr, dn, o.attid, att, hash_att[att].attid), 'fix_replmetadata_wrong_attid'):
+ self.report('Not fixing incorrect value 0x%08x with 0x%08x for %s in %s on %s\n'
+ % (o.attid, correct_attid, att, attr, dn))
+ return
+ fix = True
+ o.attid = correct_attid
+ if fix:
+ # Sort the array, (we changed the value so must re-sort)
+ new_list[:] = sorted(new_list[:], key=lambda o: o.attid)
+
+ # If we did not already need to fix it, then ask about sorting
+ if not fix:
+ self.report('ERROR: unsorted attributeID values in %s on %s\n' % (attr, dn))
+ if not self.confirm_all('Fix %s on %s by sorting the attribute list?'
+ % (attr, dn), 'fix_replmetadata_unsorted_attid'):
+ self.report('Not fixing %s on %s\n' % (attr, dn))
+ return
+
+ # The actual sort done is done at the top of the function
+
+ ctr.count = len(new_list)
+ ctr.array = new_list
+ replBlob = ndr_pack(repl)
+
+ nmsg = ldb.Message()
+ nmsg.dn = dn
+ nmsg[attr] = ldb.MessageElement(replBlob, ldb.FLAG_MOD_REPLACE, attr)
+ if self.do_modify(nmsg, ["local_oid:%s:0" % dsdb.DSDB_CONTROL_DBCHECK_MODIFY_RO_REPLICA,
+ "local_oid:1.3.6.1.4.1.7165.4.3.14:0",
+ "local_oid:1.3.6.1.4.1.7165.4.3.25:0"],
+ "Failed to fix attribute %s" % attr):
+ self.report("Fixed attribute '%s' of '%s'\n" % (attr, dn))
+
+ def is_deleted_deleted_objects(self, obj):
+ faulty = False
+ if "description" not in obj:
+ self.report("ERROR: description not present on Deleted Objects container %s" % obj.dn)
+ faulty = True
+ if "showInAdvancedViewOnly" not in obj or str(obj['showInAdvancedViewOnly'][0]).upper() == 'FALSE':
+ self.report("ERROR: showInAdvancedViewOnly not present on Deleted Objects container %s" % obj.dn)
+ faulty = True
+ if "objectCategory" not in obj:
+ self.report("ERROR: objectCategory not present on Deleted Objects container %s" % obj.dn)
+ faulty = True
+ if "isCriticalSystemObject" not in obj or str(obj['isCriticalSystemObject'][0]).upper() == 'FALSE':
+ self.report("ERROR: isCriticalSystemObject not present on Deleted Objects container %s" % obj.dn)
+ faulty = True
+ if "isRecycled" in obj:
+ self.report("ERROR: isRecycled present on Deleted Objects container %s" % obj.dn)
+ faulty = True
+ if "isDeleted" in obj and str(obj['isDeleted'][0]).upper() == 'FALSE':
+ self.report("ERROR: isDeleted not set on Deleted Objects container %s" % obj.dn)
+ faulty = True
+ if "objectClass" not in obj or (len(obj['objectClass']) != 2 or
+ str(obj['objectClass'][0]) != 'top' or
+ str(obj['objectClass'][1]) != 'container'):
+ self.report("ERROR: objectClass incorrectly set on Deleted Objects container %s" % obj.dn)
+ faulty = True
+ if "systemFlags" not in obj or str(obj['systemFlags'][0]) != '-1946157056':
+ self.report("ERROR: systemFlags incorrectly set on Deleted Objects container %s" % obj.dn)
+ faulty = True
+ return faulty
+
+ def err_deleted_deleted_objects(self, obj):
+ nmsg = ldb.Message()
+ nmsg.dn = dn = obj.dn
+
+ if "description" not in obj:
+ nmsg["description"] = ldb.MessageElement("Container for deleted objects", ldb.FLAG_MOD_REPLACE, "description")
+ if "showInAdvancedViewOnly" not in obj:
+ nmsg["showInAdvancedViewOnly"] = ldb.MessageElement("TRUE", ldb.FLAG_MOD_REPLACE, "showInAdvancedViewOnly")
+ if "objectCategory" not in obj:
+ nmsg["objectCategory"] = ldb.MessageElement("CN=Container,%s" % self.schema_dn, ldb.FLAG_MOD_REPLACE, "objectCategory")
+ if "isCriticalSystemObject" not in obj:
+ nmsg["isCriticalSystemObject"] = ldb.MessageElement("TRUE", ldb.FLAG_MOD_REPLACE, "isCriticalSystemObject")
+ if "isRecycled" in obj:
+ nmsg["isRecycled"] = ldb.MessageElement("TRUE", ldb.FLAG_MOD_DELETE, "isRecycled")
+
+ nmsg["isDeleted"] = ldb.MessageElement("TRUE", ldb.FLAG_MOD_REPLACE, "isDeleted")
+ nmsg["systemFlags"] = ldb.MessageElement("-1946157056", ldb.FLAG_MOD_REPLACE, "systemFlags")
+ nmsg["objectClass"] = ldb.MessageElement(["top", "container"], ldb.FLAG_MOD_REPLACE, "objectClass")
+
+ if not self.confirm_all('Fix Deleted Objects container %s by restoring default attributes?'
+ % (dn), 'fix_deleted_deleted_objects'):
+ self.report('Not fixing missing/incorrect attributes on %s\n' % (dn))
+ return
+
+ if self.do_modify(nmsg, ["relax:0"],
+ "Failed to fix Deleted Objects container %s" % dn):
+ self.report("Fixed Deleted Objects container '%s'\n" % (dn))
+
+ def err_replica_locations(self, obj, cross_ref, attr):
+ nmsg = ldb.Message()
+ nmsg.dn = cross_ref
+ target = self.samdb.get_dsServiceName()
+
+ if self.samdb.am_rodc():
+ self.report('Not fixing %s %s for the RODC' % (attr, obj.dn))
+ return
+
+ if not self.confirm_all('Add yourself to the replica locations for %s?'
+ % (obj.dn), 'fix_replica_locations'):
+ self.report('Not fixing missing/incorrect attributes on %s\n' % (obj.dn))
+ return
+
+ nmsg[attr] = ldb.MessageElement(target, ldb.FLAG_MOD_ADD, attr)
+ if self.do_modify(nmsg, [], "Failed to add %s for %s" % (attr, obj.dn)):
+ self.report("Fixed %s for %s" % (attr, obj.dn))
+
+ def is_fsmo_role(self, dn):
+ if dn == self.samdb.domain_dn:
+ return True
+ if dn == self.infrastructure_dn:
+ return True
+ if dn == self.naming_dn:
+ return True
+ if dn == self.schema_dn:
+ return True
+ if dn == self.rid_dn:
+ return True
+
+ return False
+
+ def calculate_instancetype(self, dn):
+ instancetype = 0
+ nc_root = self.samdb.get_nc_root(dn)
+ if dn == nc_root:
+ instancetype |= dsdb.INSTANCE_TYPE_IS_NC_HEAD
+ try:
+ self.samdb.search(base=dn.parent(), scope=ldb.SCOPE_BASE, attrs=[], controls=["show_recycled:1"])
+ except ldb.LdbError as e4:
+ (enum, estr) = e4.args
+ if enum != ldb.ERR_NO_SUCH_OBJECT:
+ raise
+ else:
+ instancetype |= dsdb.INSTANCE_TYPE_NC_ABOVE
+ if self.write_ncs is not None and str(nc_root) in [str(x) for x in self.write_ncs]:
+ instancetype |= dsdb.INSTANCE_TYPE_WRITE
+
+ return instancetype
+
+ def get_wellknown_sd(self, dn):
+ for [sd_dn, descriptor_fn] in self.wellknown_sds:
+ if dn == sd_dn:
+ domain_sid = security.dom_sid(self.samdb.get_domain_sid())
+ return ndr_unpack(security.descriptor,
+ descriptor_fn(domain_sid,
+ name_map=self.name_map))
+
+ raise KeyError
+
+ def find_checkable_attrs(self, dn, requested_attrs):
+ """A helper function for check_object() that calculates the list of
+ attributes that need to be checked, and returns that as a list
+ in the original case, and a set normalised to lowercase (for
+ easy existence checks).
+ """
+ if requested_attrs is None:
+ attrs = ['*']
+ else:
+ attrs = list(requested_attrs)
+
+ lc_attrs = set(x.lower() for x in attrs)
+
+ def add_attr(a):
+ if a.lower() not in lc_attrs:
+ attrs.append(a)
+ lc_attrs.add(a.lower())
+
+ if ("dn" in lc_attrs or
+ "distinguishedname" in lc_attrs or
+ dn.get_rdn_name().lower() in lc_attrs):
+ attrs.append("name")
+ lc_attrs.add('name')
+
+ if 'name' in lc_attrs:
+ for a in (dn.get_rdn_name(),
+ "isDeleted",
+ "systemFlags"):
+ add_attr(a)
+
+ need_replPropertyMetaData = False
+ if '*' in lc_attrs:
+ need_replPropertyMetaData = True
+ else:
+ for a in attrs:
+ linkID, _ = self.get_attr_linkID_and_reverse_name(a)
+ if linkID == 0:
+ continue
+ if linkID & 1:
+ continue
+ need_replPropertyMetaData = True
+ break
+ if need_replPropertyMetaData:
+ add_attr("replPropertyMetaData")
+
+ add_attr("objectGUID")
+
+ return attrs, lc_attrs
+
+ def check_object(self, dn, requested_attrs=None):
+ """check one object"""
+ if self.verbose:
+ self.report("Checking object %s" % dn)
+
+ # search attrs are used to find the attributes, lc_attrs are
+ # used for existence checks
+ search_attrs, lc_attrs = self.find_checkable_attrs(dn, requested_attrs)
+
+ try:
+ sd_flags = 0
+ sd_flags |= security.SECINFO_OWNER
+ sd_flags |= security.SECINFO_GROUP
+ sd_flags |= security.SECINFO_DACL
+ sd_flags |= security.SECINFO_SACL
+
+ res = self.samdb.search(base=dn, scope=ldb.SCOPE_BASE,
+ controls=[
+ "extended_dn:1:1",
+ "show_recycled:1",
+ "show_deleted:1",
+ "sd_flags:1:%d" % sd_flags,
+ "reveal_internals:0",
+ ],
+ attrs=search_attrs)
+ except ldb.LdbError as e10:
+ (enum, estr) = e10.args
+ if enum == ldb.ERR_NO_SUCH_OBJECT:
+ if self.in_transaction:
+ self.report("ERROR: Object %s disappeared during check" % dn)
+ return 1
+ return 0
+ raise
+ if len(res) != 1:
+ self.report("ERROR: Object %s failed to load during check" % dn)
+ return 1
+ obj = res[0]
+ error_count = 0
+ set_attrs_from_md = set()
+ set_attrs_seen = set()
+ got_objectclass = False
+
+ nc_dn = self.samdb.get_nc_root(obj.dn)
+ try:
+ deleted_objects_dn = self.samdb.get_wellknown_dn(nc_dn,
+ samba.dsdb.DS_GUID_DELETED_OBJECTS_CONTAINER)
+ except KeyError:
+ # We have no deleted objects DN for schema, and we check for this above for the other
+ # NCs
+ deleted_objects_dn = None
+
+ object_rdn_attr = None
+ object_rdn_val = None
+ name_val = None
+ isDeleted = False
+ systemFlags = 0
+ repl_meta_data_val = None
+
+ for attrname in obj:
+ if attrname.lower() == 'isdeleted':
+ if str(obj[attrname][0]) != "FALSE":
+ isDeleted = True
+
+ if attrname.lower() == 'systemflags':
+ systemFlags = int(obj[attrname][0])
+
+ if attrname.lower() == 'replpropertymetadata':
+ repl_meta_data_val = obj[attrname][0]
+
+ if isDeleted and repl_meta_data_val:
+ if self.has_changes_after_deletion(dn, repl_meta_data_val):
+ error_count += 1
+ self.err_changes_after_deletion(dn, repl_meta_data_val)
+ return error_count
+ if self.is_expired_tombstone(dn, repl_meta_data_val):
+ return error_count
+
+ for attrname in obj:
+ if attrname == 'dn' or attrname == "distinguishedName":
+ continue
+
+ if attrname.lower() == 'objectclass':
+ got_objectclass = True
+
+ if attrname.lower() == "name":
+ if len(obj[attrname]) != 1:
+ self.unfixable_errors += 1
+ self.report("ERROR: Not fixing num_values(%d) for '%s' on '%s'" %
+ (len(obj[attrname]), attrname, str(obj.dn)))
+ else:
+ name_val = str(obj[attrname][0])
+
+ if attrname.lower() == str(obj.dn.get_rdn_name()).lower():
+ object_rdn_attr = attrname
+ if len(obj[attrname]) != 1:
+ self.unfixable_errors += 1
+ self.report("ERROR: Not fixing num_values(%d) for '%s' on '%s'" %
+ (len(obj[attrname]), attrname, str(obj.dn)))
+ else:
+ object_rdn_val = str(obj[attrname][0])
+
+ if attrname.lower() == 'replpropertymetadata':
+ if self.has_replmetadata_zero_invocationid(dn, obj[attrname][0]):
+ error_count += 1
+ self.err_replmetadata_zero_invocationid(dn, attrname, obj[attrname][0])
+ # We don't continue, as we may also have other fixes for this attribute
+ # based on what other attributes we see.
+
+ try:
+ (set_attrs_from_md, list_attid_from_md, wrong_attids) \
+ = self.process_metadata(dn, obj[attrname][0])
+ except KeyError:
+ error_count += 1
+ self.err_replmetadata_unknown_attid(dn, attrname, obj[attrname])
+ continue
+
+ if len(set_attrs_from_md) < len(list_attid_from_md) \
+ or len(wrong_attids) > 0 \
+ or sorted(list_attid_from_md) != list_attid_from_md:
+ error_count += 1
+ self.err_replmetadata_incorrect_attid(dn, attrname, obj[attrname][0], wrong_attids)
+
+ else:
+ # Here we check that the first attid is 0
+ # (objectClass).
+ if list_attid_from_md[0] != 0:
+ self.unfixable_errors += 1
+ self.report("ERROR: Not fixing incorrect initial attributeID in '%s' on '%s', it should be objectClass" %
+ (attrname, str(dn)))
+
+ continue
+
+ if attrname.lower() == 'ntsecuritydescriptor':
+ (sd, sd_broken) = self.process_sd(dn, obj)
+ if sd_broken is not None:
+ self.err_wrong_sd(dn, sd, sd_broken)
+ error_count += 1
+ continue
+
+ if sd.owner_sid is None or sd.group_sid is None:
+ self.err_missing_sd_owner(dn, sd)
+ error_count += 1
+ continue
+
+ if dn == deleted_objects_dn or self.reset_well_known_acls:
+ try:
+ well_known_sd = self.get_wellknown_sd(dn)
+ except KeyError:
+ continue
+
+ current_sd = ndr_unpack(security.descriptor,
+ obj[attrname][0])
+
+ ignoreAdditionalACEs = False
+ if not self.reset_well_known_acls:
+ ignoreAdditionalACEs = True
+
+ diff = get_diff_sds(well_known_sd, current_sd,
+ security.dom_sid(self.samdb.get_domain_sid()),
+ ignoreAdditionalACEs=ignoreAdditionalACEs)
+ if diff != "":
+ self.err_wrong_default_sd(dn, well_known_sd, diff)
+ error_count += 1
+ continue
+ continue
+
+ if attrname.lower() == 'objectclass':
+ normalised = self.samdb.dsdb_normalise_attributes(self.samdb_schema, attrname, obj[attrname])
+ # Do not consider the attribute incorrect if:
+ # - The sorted (alphabetically) list is the same, including case
+ # - The first and last elements are the same
+ #
+ # This avoids triggering an error due to
+ # non-determinism in the sort routine in (at least)
+ # 4.3 and earlier, and the fact that any AUX classes
+ # in these attributes are also not sorted when
+ # imported from Windows (they are just in the reverse
+ # order of last set)
+ if sorted(normalised) != sorted(obj[attrname]) \
+ or normalised[0] != obj[attrname][0] \
+ or normalised[-1] != obj[attrname][-1]:
+ self.err_normalise_mismatch_replace(dn, attrname, list(obj[attrname]))
+ error_count += 1
+ continue
+
+ if attrname.lower() == 'userparameters':
+ userparams = obj[attrname][0]
+ if userparams == b' ':
+ error_count += 1
+ self.err_short_userParameters(obj, attrname, obj[attrname])
+ continue
+
+ elif userparams[:16] == b'\x20\x00' * 8:
+ # This is the correct, normal prefix
+ continue
+
+ elif userparams[:20] == b'IAAgACAAIAAgACAAIAAg':
+ # this is the typical prefix from a windows migration
+ error_count += 1
+ self.err_base64_userParameters(obj, attrname, obj[attrname])
+ continue
+
+ #43:00:00:00:74:00:00:00:78
+ elif (userparams[1] != 0 and
+ userparams[3] != 0 and
+ userparams[5] != 0 and
+ userparams[7] != 0 and
+ userparams[9] != 0):
+ # This is a prefix that is not in UTF-16 format
+ # for the space or munged dialback prefix
+ error_count += 1
+ self.err_utf8_userParameters(obj, attrname, obj[attrname])
+ continue
+
+ elif len(userparams) % 2 != 0:
+ # This is a value that isn't even in length
+ error_count += 1
+ self.err_odd_userParameters(obj, attrname)
+ continue
+
+ elif (userparams[1] == 0 and
+ userparams[2] == 0 and
+ userparams[3] == 0 and
+ userparams[4] != 0 and
+ userparams[5] == 0):
+ # This is a prefix that would happen if a
+ # SAMR-written value was replicated from a Samba
+ # 4.1 server to a working server
+ error_count += 1
+ self.err_doubled_userParameters(obj, attrname, obj[attrname])
+ continue
+
+ if attrname.lower() == 'attributeid' or attrname.lower() == 'governsid':
+ if obj[attrname][0] in self.attribute_or_class_ids:
+ self.unfixable_errors += 1
+ self.report('Error: %s %s on %s already exists as an attributeId or governsId'
+ % (attrname, obj.dn, obj[attrname][0]))
+ else:
+ self.attribute_or_class_ids.add(obj[attrname][0])
+
+ # check for empty attributes
+ for val in obj[attrname]:
+ if val == b'':
+ self.err_empty_attribute(dn, attrname)
+ error_count += 1
+ continue
+
+ # get the syntax oid for the attribute, so we can can have
+ # special handling for some specific attribute types
+ try:
+ syntax_oid = self.samdb_schema.get_syntax_oid_from_lDAPDisplayName(attrname)
+ except Exception as msg:
+ self.err_unknown_attribute(obj, attrname)
+ error_count += 1
+ continue
+
+ linkID, reverse_link_name = self.get_attr_linkID_and_reverse_name(attrname)
+
+ flag = self.samdb_schema.get_systemFlags_from_lDAPDisplayName(attrname)
+ if (not flag & dsdb.DS_FLAG_ATTR_NOT_REPLICATED
+ and not flag & dsdb.DS_FLAG_ATTR_IS_CONSTRUCTED
+ and not linkID):
+ set_attrs_seen.add(attrname.lower())
+
+ if syntax_oid in [dsdb.DSDB_SYNTAX_BINARY_DN, dsdb.DSDB_SYNTAX_OR_NAME,
+ dsdb.DSDB_SYNTAX_STRING_DN, ldb.SYNTAX_DN]:
+ # it's some form of DN, do specialised checking on those
+ error_count += self.check_dn(obj, attrname, syntax_oid)
+ else:
+
+ values = set()
+ # check for incorrectly normalised attributes
+ for val in obj[attrname]:
+ values.add(val)
+
+ normalised = self.samdb.dsdb_normalise_attributes(self.samdb_schema, attrname, [val])
+ if len(normalised) != 1 or normalised[0] != val:
+ self.err_normalise_mismatch(dn, attrname, obj[attrname])
+ error_count += 1
+ break
+
+ if len(obj[attrname]) != len(values):
+ self.err_duplicate_values(dn, attrname, obj[attrname], list(values))
+ error_count += 1
+ break
+
+ if attrname.lower() == "instancetype":
+ calculated_instancetype = self.calculate_instancetype(dn)
+ if len(obj["instanceType"]) != 1 or int(obj["instanceType"][0]) != calculated_instancetype:
+ error_count += 1
+ self.err_wrong_instancetype(obj, calculated_instancetype)
+
+ if not got_objectclass and ("*" in lc_attrs or "objectclass" in lc_attrs):
+ error_count += 1
+ self.err_missing_objectclass(dn)
+
+ if ("*" in lc_attrs or "name" in lc_attrs):
+ if name_val is None:
+ self.unfixable_errors += 1
+ self.report("ERROR: Not fixing missing 'name' on '%s'" % (str(obj.dn)))
+ if object_rdn_attr is None:
+ self.unfixable_errors += 1
+ self.report("ERROR: Not fixing missing '%s' on '%s'" % (obj.dn.get_rdn_name(), str(obj.dn)))
+
+ if name_val is not None:
+ parent_dn = None
+ controls = ["show_recycled:1", "relax:0"]
+ if isDeleted:
+ if not (systemFlags & samba.dsdb.SYSTEM_FLAG_DISALLOW_MOVE_ON_DELETE):
+ parent_dn = deleted_objects_dn
+ controls += ["local_oid:%s:1" % dsdb.DSDB_CONTROL_DBCHECK_FIX_LINK_DN_NAME]
+ if parent_dn is None:
+ parent_dn = obj.dn.parent()
+
+ try:
+ expected_dn = ldb.Dn(self.samdb, "RDN=RDN,%s" % (parent_dn))
+ except ValueError as e:
+ self.unfixable_errors += 1
+ self.report(f"ERROR: could not handle parent DN '{parent_dn}': "
+ "skipping RDN checks")
+ else:
+ expected_dn.set_component(0, obj.dn.get_rdn_name(), name_val)
+
+ if obj.dn == deleted_objects_dn:
+ expected_dn = obj.dn
+
+ if expected_dn != obj.dn:
+ error_count += 1
+ self.err_wrong_dn(obj, expected_dn, object_rdn_attr,
+ object_rdn_val, name_val, controls)
+ elif obj.dn.get_rdn_value() != object_rdn_val:
+ self.unfixable_errors += 1
+ self.report("ERROR: Not fixing %s=%r on '%s'" % (object_rdn_attr,
+ object_rdn_val,
+ obj.dn))
+
+ show_dn = True
+ if repl_meta_data_val:
+ if obj.dn == deleted_objects_dn:
+ isDeletedAttId = 131120
+ # It's 29/12/9999 at 23:59:59 UTC as specified in MS-ADTS 7.1.1.4.2 Deleted Objects Container
+
+ expectedTimeDo = 2650466015990000000
+ originating = self.get_originating_time(repl_meta_data_val, isDeletedAttId)
+ if originating != expectedTimeDo:
+ if self.confirm_all("Fix isDeleted originating_change_time on '%s'" % str(dn), 'fix_time_metadata'):
+ nmsg = ldb.Message()
+ nmsg.dn = dn
+ nmsg["isDeleted"] = ldb.MessageElement("TRUE", ldb.FLAG_MOD_REPLACE, "isDeleted")
+ error_count += 1
+ self.samdb.modify(nmsg, controls=["provision:0"])
+
+ else:
+ self.report("Not fixing isDeleted originating_change_time on '%s'" % str(dn))
+
+ for att in set_attrs_seen.difference(set_attrs_from_md):
+ if show_dn:
+ self.report("On object %s" % dn)
+ show_dn = False
+ error_count += 1
+ self.report("ERROR: Attribute %s not present in replication metadata" % att)
+ if not self.confirm_all("Fix missing replPropertyMetaData element '%s'" % att, 'fix_all_metadata'):
+ self.report("Not fixing missing replPropertyMetaData element '%s'" % att)
+ continue
+ self.fix_metadata(obj, att)
+
+ if self.is_fsmo_role(dn):
+ if "fSMORoleOwner" not in obj and ("*" in lc_attrs or "fsmoroleowner" in lc_attrs):
+ self.err_no_fsmoRoleOwner(obj)
+ error_count += 1
+
+ try:
+ if dn != self.samdb.get_root_basedn() and str(dn.parent()) not in self.dn_set:
+ res = self.samdb.search(base=dn.parent(), scope=ldb.SCOPE_BASE,
+ controls=["show_recycled:1", "show_deleted:1"])
+ except ldb.LdbError as e11:
+ (enum, estr) = e11.args
+ if enum == ldb.ERR_NO_SUCH_OBJECT:
+ if isDeleted:
+ self.report("WARNING: parent object not found for %s" % (obj.dn))
+ self.report("Not moving to LostAndFound "
+ "(tombstone garbage collection in progress?)")
+ else:
+ self.err_missing_parent(obj)
+ error_count += 1
+ else:
+ raise
+
+ if dn in self.deleted_objects_containers and '*' in lc_attrs:
+ if self.is_deleted_deleted_objects(obj):
+ self.err_deleted_deleted_objects(obj)
+ error_count += 1
+
+ for (dns_part, msg) in self.dns_partitions:
+ if dn == dns_part and 'repsFrom' in obj:
+ location = "msDS-NC-Replica-Locations"
+ if self.samdb.am_rodc():
+ location = "msDS-NC-RO-Replica-Locations"
+
+ if location not in msg:
+ # There are no replica locations!
+ self.err_replica_locations(obj, msg.dn, location)
+ error_count += 1
+ continue
+
+ found = False
+ for loc in msg[location]:
+ if str(loc) == self.samdb.get_dsServiceName():
+ found = True
+ if not found:
+ # This DC is not in the replica locations
+ self.err_replica_locations(obj, msg.dn, location)
+ error_count += 1
+
+ if dn == self.server_ref_dn:
+ # Check we have a valid RID Set
+ if "*" in lc_attrs or "ridsetreferences" in lc_attrs:
+ if "rIDSetReferences" not in obj:
+ # NO RID SET reference
+ # We are RID master, allocate it.
+ error_count += 1
+
+ if self.is_rid_master:
+ # Allocate a RID Set
+ if self.confirm_all('Allocate the missing RID set for '
+ 'RID master?',
+ 'fix_missing_rid_set_master'):
+
+ # We don't have auto-transaction logic on
+ # extended operations, so we have to do it
+ # here.
+
+ self.samdb.transaction_start()
+
+ try:
+ self.samdb.create_own_rid_set()
+
+ except:
+ self.samdb.transaction_cancel()
+ raise
+
+ self.samdb.transaction_commit()
+
+ elif not self.samdb.am_rodc():
+ self.report("No RID Set found for this server: %s, "
+ "and we are not the RID Master (so can "
+ "not self-allocate)" % dn)
+
+ # Check some details of our own RID Set
+ #
+ # Note that the attributes have very bad names. From ridalloc.c:
+ #
+ # Note: the RID allocation attributes in AD are very badly named.
+ # Here is what we think they really do:
+ #
+ # in RID Set object:
+ # - rIDPreviousAllocationPool: the pool which a DC is currently
+ # pulling RIDs from. Managed by client DC
+ #
+ # - rIDAllocationPool: the pool that the DC will switch to next,
+ # when rIDPreviousAllocationPool is exhausted. Managed by RID
+ # Manager.
+ #
+ # - rIDNextRID: the last RID allocated by this DC. Managed by
+ # client DC
+ #
+ # in RID Manager object:
+ # - rIDAvailablePool: the pool where the RID Manager gets new rID
+ # pools from when it gets a EXOP_RID_ALLOC getncchanges call
+ # (or locally when the DC is the RID Manager)
+
+ if dn == self.rid_set_dn:
+ pool_attrs = ["rIDAllocationPool", "rIDPreviousAllocationPool"]
+
+ res = self.samdb.search(base=self.rid_set_dn, scope=ldb.SCOPE_BASE,
+ attrs=pool_attrs)
+
+ for pool_attr in pool_attrs:
+ if pool_attr not in res[0]:
+ continue
+
+ pool = int(res[0][pool_attr][0])
+
+ high = pool >> 32
+ low = 0xFFFFFFFF & pool
+
+ if pool != 0 and low >= high:
+ self.report("Invalid RID pool %d-%d, %d >= %d!" %
+ (low, high, low, high))
+ self.unfixable_errors += 1
+
+ if "rIDAllocationPool" not in res[0]:
+ self.report("No rIDAllocationPool found in %s" % dn)
+ self.unfixable_errors += 1
+
+ try:
+ next_free_rid, high = self.samdb.free_rid_bounds()
+ except ldb.LdbError as err:
+ enum, estr = err.args
+ self.report("Couldn't get available RIDs: %s" % estr)
+ self.unfixable_errors += 1
+ else:
+ # Check the remainder of this pool for conflicts. If
+ # ridalloc_allocate_rid() moves to a new pool, this
+ # will be above high, so we will stop.
+ domain_sid = self.samdb.get_domain_sid()
+ while next_free_rid <= high:
+ sid = "%s-%d" % (domain_sid, next_free_rid)
+ try:
+ res = self.samdb.search(base="<SID=%s>" % sid,
+ scope=ldb.SCOPE_BASE,
+ attrs=[])
+ except ldb.LdbError as e:
+ (enum, estr) = e.args
+ if enum != ldb.ERR_NO_SUCH_OBJECT:
+ raise
+ res = None
+ if res is not None:
+ self.report("SID %s for %s conflicts with our current "
+ "RID set in %s" % (sid, res[0].dn, dn))
+ error_count += 1
+
+ if self.confirm_all('Fix conflict between SID %s and '
+ 'RID pool in %s by allocating a '
+ 'new RID?'
+ % (sid, dn),
+ 'fix_sid_rid_set_conflict'):
+ self.samdb.transaction_start()
+
+ # This will burn RIDs, which will move
+ # past the conflict. We then check again
+ # to see if the new RID conflicts, until
+ # the end of the current pool. We don't
+ # look at the next pool to avoid burning
+ # all RIDs in one go in some strange
+ # failure case.
+ try:
+ while True:
+ allocated_rid = self.samdb.allocate_rid()
+ if allocated_rid >= next_free_rid:
+ next_free_rid = allocated_rid + 1
+ break
+ except:
+ self.samdb.transaction_cancel()
+ raise
+
+ self.samdb.transaction_commit()
+ else:
+ break
+ else:
+ next_free_rid += 1
+
+ return error_count
+
+ ################################################################
+ # check special @ROOTDSE attributes
+ def check_rootdse(self):
+ """check the @ROOTDSE special object"""
+ dn = ldb.Dn(self.samdb, '@ROOTDSE')
+ if self.verbose:
+ self.report("Checking object %s" % dn)
+ res = self.samdb.search(base=dn, scope=ldb.SCOPE_BASE)
+ if len(res) != 1:
+ self.report("Object %s disappeared during check" % dn)
+ return 1
+ obj = res[0]
+ error_count = 0
+
+ # check that the dsServiceName is in GUID form
+ if 'dsServiceName' not in obj:
+ self.report('ERROR: dsServiceName missing in @ROOTDSE')
+ return error_count + 1
+
+ if not str(obj['dsServiceName'][0]).startswith('<GUID='):
+ self.report('ERROR: dsServiceName not in GUID form in @ROOTDSE')
+ error_count += 1
+ if not self.confirm('Change dsServiceName to GUID form?'):
+ return error_count
+ res = self.samdb.search(base=ldb.Dn(self.samdb, obj['dsServiceName'][0].decode('utf8')),
+ scope=ldb.SCOPE_BASE, attrs=['objectGUID'])
+ guid_str = str(ndr_unpack(misc.GUID, res[0]['objectGUID'][0]))
+ m = ldb.Message()
+ m.dn = dn
+ m['dsServiceName'] = ldb.MessageElement("<GUID=%s>" % guid_str,
+ ldb.FLAG_MOD_REPLACE, 'dsServiceName')
+ if self.do_modify(m, [], "Failed to change dsServiceName to GUID form", validate=False):
+ self.report("Changed dsServiceName to GUID form")
+ return error_count
+
+ ###############################################
+ # re-index the database
+
+ def reindex_database(self):
+ """re-index the whole database"""
+ m = ldb.Message()
+ m.dn = ldb.Dn(self.samdb, "@ATTRIBUTES")
+ m['add'] = ldb.MessageElement('NONE', ldb.FLAG_MOD_ADD, 'force_reindex')
+ m['delete'] = ldb.MessageElement('NONE', ldb.FLAG_MOD_DELETE, 'force_reindex')
+ return self.do_modify(m, [], 're-indexed database', validate=False)
+
+ ###############################################
+ # reset @MODULES
+ def reset_modules(self):
+ """reset @MODULES to that needed for current sam.ldb (to read a very old database)"""
+ m = ldb.Message()
+ m.dn = ldb.Dn(self.samdb, "@MODULES")
+ m['@LIST'] = ldb.MessageElement('samba_dsdb', ldb.FLAG_MOD_REPLACE, '@LIST')
+ return self.do_modify(m, [], 'reset @MODULES on database', validate=False)
diff --git a/python/samba/descriptor.py b/python/samba/descriptor.py
new file mode 100644
index 0000000..362510c
--- /dev/null
+++ b/python/samba/descriptor.py
@@ -0,0 +1,723 @@
+
+# Unix SMB/CIFS implementation.
+# backend code for provisioning a Samba4 server
+
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007-2010
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2008-2009
+# Copyright (C) Oliver Liebel <oliver@itc.li> 2008-2009
+# Copyright (C) Amitay Isaacs <amitay@samba.org> 2011
+#
+# Based on the original in EJS:
+# Copyright (C) Andrew Tridgell <tridge@samba.org> 2005
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Functions for setting up a Samba configuration (security descriptors)."""
+
+from samba.dcerpc import security
+from samba.ndr import ndr_pack
+from samba.schema import get_schema_descriptor
+import ldb
+import re
+
+# Descriptors of naming contexts and other important objects
+
+
+def sddl2binary(sddl_in, domain_sid, name_map):
+ sddl = "%s" % sddl_in
+
+ for [name, sid] in name_map.items():
+ sddl = sddl.replace(name, sid)
+
+ sec = security.descriptor.from_sddl(sddl, domain_sid)
+ return ndr_pack(sec)
+
+
+def get_empty_descriptor(domain_sid, name_map=None):
+ if name_map is None:
+ name_map = {}
+
+ sddl = ""
+ return sddl2binary(sddl, domain_sid, name_map)
+
+# "get_schema_descriptor" is located in "schema.py"
+
+
+def get_deletedobjects_descriptor(domain_sid, name_map=None):
+ if name_map is None:
+ name_map = {}
+
+ sddl = "O:SYG:SYD:PAI" \
+ "(A;;RPWPCCDCLCRCWOWDSDSW;;;SY)" \
+ "(A;;RPLC;;;BA)"
+ return sddl2binary(sddl, domain_sid, name_map)
+
+
+def get_config_descriptor(domain_sid, name_map=None):
+ if name_map is None:
+ name_map = {}
+
+ sddl = "O:EAG:EAD:(OA;;CR;1131f6aa-9c07-11d1-f79f-00c04fc2dcd2;;ED)" \
+ "(OA;;CR;1131f6ab-9c07-11d1-f79f-00c04fc2dcd2;;ED)" \
+ "(OA;;CR;1131f6ac-9c07-11d1-f79f-00c04fc2dcd2;;ED)" \
+ "(OA;;CR;1131f6aa-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \
+ "(OA;;CR;1131f6ab-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \
+ "(OA;;CR;1131f6ac-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \
+ "(A;;RPLCLORC;;;AU)(A;CI;RPWPCRCCDCLCLORCWOWDSDDTSW;;;EA)" \
+ "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)(A;CIIO;RPWPCRCCLCLORCWOWDSDSW;;;DA)" \
+ "(OA;;CR;1131f6ad-9c07-11d1-f79f-00c04fc2dcd2;;ED)" \
+ "(OA;;CR;89e95b76-444d-4c62-991a-0facbeda640c;;ED)" \
+ "(OA;;CR;1131f6ad-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \
+ "(OA;;CR;89e95b76-444d-4c62-991a-0facbeda640c;;BA)" \
+ "(OA;;CR;1131f6aa-9c07-11d1-f79f-00c04fc2dcd2;;RO)" \
+ "S:(AU;SA;WPWOWD;;;WD)(AU;SA;CR;;;BA)(AU;SA;CR;;;DU)" \
+ "(OU;SA;CR;45ec5156-db7e-47bb-b53f-dbeb2d03c40f;;WD)"
+ return sddl2binary(sddl, domain_sid, name_map)
+
+
+def get_config_partitions_descriptor(domain_sid, name_map=None):
+ if name_map is None:
+ name_map = {}
+
+ sddl = "D:" \
+ "(A;;LCLORC;;;AU)" \
+ "(OA;;RP;e48d0154-bcf8-11d1-8702-00c04fb96050;;AU)" \
+ "(OA;;RP;d31a8757-2447-4545-8081-3bb610cacbf2;;AU)" \
+ "(OA;;RP;66171887-8f3c-11d0-afda-00c04fd930c9;;AU)" \
+ "(OA;;RP;032160bf-9824-11d1-aec0-0000f80367c1;;AU)" \
+ "(OA;;RP;789ee1eb-8c8e-4e4c-8cec-79b31b7617b5;;AU)" \
+ "(OA;;RP;5706aeaf-b940-4fb2-bcfc-5268683ad9fe;;AU)" \
+ "(A;;RPWPCRCCLCLORCWOWDSW;;;EA)" \
+ "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)" \
+ "(A;;CC;;;ED)" \
+ "(OA;CIIO;WP;3df793df-9858-4417-a701-735a1ecebf74;bf967a8d-0de6-11d0-a285-00aa003049e2;BA)" \
+ "S:" \
+ "(AU;CISA;WPCRCCDCWOWDSDDT;;;WD)"
+ return sddl2binary(sddl, domain_sid, name_map)
+
+
+def get_config_sites_descriptor(domain_sid, name_map=None):
+ if name_map is None:
+ name_map = {}
+
+ sddl = "D:" \
+ "(A;;RPLCLORC;;;AU)" \
+ "(OA;CIIO;SW;d31a8757-2447-4545-8081-3bb610cacbf2;f0f8ffab-1191-11d0-a060-00aa006c33ed;RO)" \
+ "(A;;RPWPCRCCLCLORCWOWDSW;;;EA)" \
+ "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)" \
+ "S:" \
+ "(AU;CISA;CCDCSDDT;;;WD)" \
+ "(OU;CIIOSA;CR;;f0f8ffab-1191-11d0-a060-00aa006c33ed;WD)" \
+ "(OU;CIIOSA;WP;f30e3bbe-9ff0-11d1-b603-0000f80367c1;bf967ab3-0de6-11d0-a285-00aa003049e2;WD)" \
+ "(OU;CIIOSA;WP;f30e3bbf-9ff0-11d1-b603-0000f80367c1;bf967ab3-0de6-11d0-a285-00aa003049e2;WD)" \
+ "(OU;CIIOSA;WP;3e10944c-c354-11d0-aff8-0000f80367c1;b7b13124-b82e-11d0-afee-0000f80367c1;WD)"
+ return sddl2binary(sddl, domain_sid, name_map)
+
+
+def get_config_ntds_quotas_descriptor(domain_sid, name_map=None):
+ if name_map is None:
+ name_map = {}
+
+ sddl = "D:" \
+ "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;EA)" \
+ "(A;;RPLCLORC;;;BA)" \
+ "(OA;;CR;4ecc03fe-ffc0-4947-b630-eb672a8a9dbc;;WD)"
+ return sddl2binary(sddl, domain_sid, name_map)
+
+
+def get_config_delete_protected1_descriptor(domain_sid, name_map=None):
+ if name_map is None:
+ name_map = {}
+
+ sddl = "D:AI" \
+ "(A;;RPLCLORC;;;AU)" \
+ "(A;;RPWPCRCCLCLORCWOWDSW;;;EA)" \
+ "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)"
+ return sddl2binary(sddl, domain_sid, name_map)
+
+
+def get_config_delete_protected1wd_descriptor(domain_sid, name_map=None):
+ if name_map is None:
+ name_map = {}
+
+ sddl = "D:AI" \
+ "(A;;RPLCLORC;;;WD)" \
+ "(A;;RPWPCRCCLCLORCWOWDSW;;;EA)" \
+ "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)"
+ return sddl2binary(sddl, domain_sid, name_map)
+
+
+def get_config_delete_protected2_descriptor(domain_sid, name_map=None):
+ if name_map is None:
+ name_map = {}
+
+ sddl = "D:AI" \
+ "(A;;RPLCLORC;;;AU)" \
+ "(A;;RPWPCRCCDCLCLORCWOWDSW;;;EA)" \
+ "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)"
+ return sddl2binary(sddl, domain_sid, name_map)
+
+
+def get_domain_descriptor(domain_sid, name_map=None):
+ if name_map is None:
+ name_map = {}
+
+ sddl = "O:BAG:BAD:AI(OA;CIIO;RP;4c164200-20c0-11d0-a768-00aa006e0529;4828cc14-1437-45bc-9b07-ad6f015e5f28;RU)" \
+ "(OA;CIIO;RP;4c164200-20c0-11d0-a768-00aa006e0529;bf967aba-0de6-11d0-a285-00aa003049e2;RU)" \
+ "(OA;CIIO;RP;5f202010-79a5-11d0-9020-00c04fc2d4cf;4828cc14-1437-45bc-9b07-ad6f015e5f28;RU)" \
+ "(OA;CIIO;RP;5f202010-79a5-11d0-9020-00c04fc2d4cf;bf967aba-0de6-11d0-a285-00aa003049e2;RU)" \
+ "(OA;CIIO;RP;bc0ac240-79a9-11d0-9020-00c04fc2d4cf;4828cc14-1437-45bc-9b07-ad6f015e5f28;RU)" \
+ "(OA;CIIO;RP;bc0ac240-79a9-11d0-9020-00c04fc2d4cf;bf967aba-0de6-11d0-a285-00aa003049e2;RU)" \
+ "(OA;CIIO;RP;59ba2f42-79a2-11d0-9020-00c04fc2d3cf;4828cc14-1437-45bc-9b07-ad6f015e5f28;RU)" \
+ "(OA;CIIO;RP;59ba2f42-79a2-11d0-9020-00c04fc2d3cf;bf967aba-0de6-11d0-a285-00aa003049e2;RU)" \
+ "(OA;CIIO;RP;037088f8-0ae1-11d2-b422-00a0c968f939;4828cc14-1437-45bc-9b07-ad6f015e5f28;RU)" \
+ "(OA;CIIO;RP;037088f8-0ae1-11d2-b422-00a0c968f939;bf967aba-0de6-11d0-a285-00aa003049e2;RU)" \
+ "(OA;;CR;1131f6aa-9c07-11d1-f79f-00c04fc2dcd2;;RO)" \
+ "(OA;;CR;1131f6ad-9c07-11d1-f79f-00c04fc2dcd2;;DD)" \
+ "(OA;CIIO;RP;b7c69e6d-2cc7-11d2-854e-00a0c983f608;bf967a86-0de6-11d0-a285-00aa003049e2;ED)" \
+ "(OA;CIIO;RP;b7c69e6d-2cc7-11d2-854e-00a0c983f608;bf967a9c-0de6-11d0-a285-00aa003049e2;ED)" \
+ "(OA;CIIO;RP;b7c69e6d-2cc7-11d2-854e-00a0c983f608;bf967aba-0de6-11d0-a285-00aa003049e2;ED)" \
+ "(OA;;CR;89e95b76-444d-4c62-991a-0facbeda640c;;BA)" \
+ "(OA;;CR;1131f6aa-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \
+ "(OA;;CR;1131f6ab-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \
+ "(OA;;CR;1131f6ac-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \
+ "(OA;;CR;1131f6ad-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \
+ "(OA;;CR;1131f6ae-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \
+ "(OA;;CR;e2a36dc9-ae17-47c3-b58b-be34c55ba633;;S-1-5-32-557)" \
+ "(OA;;RP;c7407360-20bf-11d0-a768-00aa006e0529;;RU)" \
+ "(OA;;RP;b8119fd0-04f6-4762-ab7a-4986c76b3f9a;;RU)" \
+ "(OA;CIIO;RPLCLORC;;4828cc14-1437-45bc-9b07-ad6f015e5f28;RU)" \
+ "(OA;CIIO;RPLCLORC;;bf967a9c-0de6-11d0-a285-00aa003049e2;RU)" \
+ "(OA;CIIO;RPLCLORC;;bf967aba-0de6-11d0-a285-00aa003049e2;RU)" \
+ "(OA;;CR;05c74c5e-4deb-43b4-bd9f-86664c2a7fd5;;AU)" \
+ "(OA;;CR;89e95b76-444d-4c62-991a-0facbeda640c;;ED)" \
+ "(OA;;CR;ccc2dc7d-a6ad-4a7a-8846-c04e3cc53501;;AU)" \
+ "(OA;;CR;280f369c-67c7-438e-ae98-1d46f3c6f541;;AU)" \
+ "(OA;;CR;1131f6aa-9c07-11d1-f79f-00c04fc2dcd2;;ED)" \
+ "(OA;;CR;1131f6ab-9c07-11d1-f79f-00c04fc2dcd2;;ED)" \
+ "(OA;;CR;1131f6ac-9c07-11d1-f79f-00c04fc2dcd2;;ED)" \
+ "(OA;;CR;1131f6ae-9c07-11d1-f79f-00c04fc2dcd2;;ED)" \
+ "(OA;;RP;b8119fd0-04f6-4762-ab7a-4986c76b3f9a;;AU)" \
+ "(OA;CIIO;RPWPCR;91e647de-d96f-4b70-9557-d63ff4f3ccd8;;PS)" \
+ "(OA;CIIO;WP;ea1b7b93-5e48-46d5-bc6c-4df4fda78a35;bf967a86-0de6-11d0-a285-00aa003049e2;PS)" \
+ "(OA;;CR;3e0f7e18-2c7a-4c10-ba82-4d926db99a3e;;CN)" \
+ "(OA;OICI;RPWP;3f78c3e5-f79a-46bd-a0b8-9d18116ddc79;;PS)" \
+ "(OA;CI;RPWP;5b47d60f-6090-40b2-9f37-2a4de88f3063;;KA)" \
+ "(OA;CI;RPWP;5b47d60f-6090-40b2-9f37-2a4de88f3063;;EK)" \
+ "(OA;CIIO;SW;9b026da6-0d3c-465c-8bee-5199d7165cba;bf967a86-0de6-11d0-a285-00aa003049e2;PS)" \
+ "(OA;CIIO;SW;9b026da6-0d3c-465c-8bee-5199d7165cba;bf967a86-0de6-11d0-a285-00aa003049e2;CO)" \
+ "(A;;RPWPCRCCLCLORCWOWDSW;;;DA)" \
+ "(A;CI;RPWPCRCCDCLCLORCWOWDSDDTSW;;;EA)" \
+ "(A;;RPRC;;;RU)" \
+ "(A;CI;LC;;;RU)" \
+ "(A;CI;RPWPCRCCLCLORCWOWDSDSW;;;BA)" \
+ "(A;;RP;;;WD)" \
+ "(A;;RPLCLORC;;;ED)" \
+ "(A;;RPLCLORC;;;AU)" \
+ "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)" \
+ "S:AI(OU;CISA;WP;f30e3bbe-9ff0-11d1-b603-0000f80367c1;bf967aa5-0de6-11d0-a285-00aa003049e2;WD)" \
+ "(OU;CISA;WP;f30e3bbf-9ff0-11d1-b603-0000f80367c1;bf967aa5-0de6-11d0-a285-00aa003049e2;WD)" \
+ "(AU;SA;CR;;;DU)(AU;SA;CR;;;BA)(AU;SA;WPWOWD;;;WD)"
+ return sddl2binary(sddl, domain_sid, name_map)
+
+
+def get_domain_infrastructure_descriptor(domain_sid, name_map=None):
+ if name_map is None:
+ name_map = {}
+
+ sddl = "D:" \
+ "(A;;RPLCLORC;;;AU)" \
+ "(A;;RPWPCRCCLCLORCWOWDSW;;;DA)" \
+ "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)" \
+ "S:" \
+ "(AU;SA;WPCR;;;WD)"
+ return sddl2binary(sddl, domain_sid, name_map)
+
+
+def get_domain_builtin_descriptor(domain_sid, name_map=None):
+ if name_map is None:
+ name_map = {}
+
+ sddl = "D:" \
+ "(OA;CIIO;RP;4c164200-20c0-11d0-a768-00aa006e0529;4828cc14-1437-45bc-9b07-ad6f015e5f28;RU)" \
+ "(OA;CIIO;RP;4c164200-20c0-11d0-a768-00aa006e0529;bf967aba-0de6-11d0-a285-00aa003049e2;RU)" \
+ "(OA;CIIO;RP;5f202010-79a5-11d0-9020-00c04fc2d4cf;4828cc14-1437-45bc-9b07-ad6f015e5f28;RU)" \
+ "(OA;CIIO;RP;5f202010-79a5-11d0-9020-00c04fc2d4cf;bf967aba-0de6-11d0-a285-00aa003049e2;RU)" \
+ "(OA;CIIO;RP;bc0ac240-79a9-11d0-9020-00c04fc2d4cf;4828cc14-1437-45bc-9b07-ad6f015e5f28;RU)" \
+ "(OA;CIIO;RP;bc0ac240-79a9-11d0-9020-00c04fc2d4cf;bf967aba-0de6-11d0-a285-00aa003049e2;RU)" \
+ "(OA;CIIO;RP;59ba2f42-79a2-11d0-9020-00c04fc2d3cf;4828cc14-1437-45bc-9b07-ad6f015e5f28;RU)" \
+ "(OA;CIIO;RP;59ba2f42-79a2-11d0-9020-00c04fc2d3cf;bf967aba-0de6-11d0-a285-00aa003049e2;RU)" \
+ "(OA;CIIO;RP;037088f8-0ae1-11d2-b422-00a0c968f939;4828cc14-1437-45bc-9b07-ad6f015e5f28;RU)" \
+ "(OA;CIIO;RP;037088f8-0ae1-11d2-b422-00a0c968f939;bf967aba-0de6-11d0-a285-00aa003049e2;RU)" \
+ "(OA;;CR;1131f6aa-9c07-11d1-f79f-00c04fc2dcd2;;RO)" \
+ "(OA;;CR;1131f6ad-9c07-11d1-f79f-00c04fc2dcd2;;DD)" \
+ "(OA;CIIO;RP;b7c69e6d-2cc7-11d2-854e-00a0c983f608;bf967a86-0de6-11d0-a285-00aa003049e2;ED)" \
+ "(OA;CIIO;RP;b7c69e6d-2cc7-11d2-854e-00a0c983f608;bf967a9c-0de6-11d0-a285-00aa003049e2;ED)" \
+ "(OA;CIIO;RP;b7c69e6d-2cc7-11d2-854e-00a0c983f608;bf967aba-0de6-11d0-a285-00aa003049e2;ED)" \
+ "(OA;;CR;89e95b76-444d-4c62-991a-0facbeda640c;;BA)" \
+ "(OA;;CR;1131f6aa-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \
+ "(OA;;CR;1131f6ab-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \
+ "(OA;;CR;1131f6ac-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \
+ "(OA;;CR;1131f6ad-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \
+ "(OA;;CR;1131f6ae-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \
+ "(OA;;CR;e2a36dc9-ae17-47c3-b58b-be34c55ba633;;S-1-5-32-557)" \
+ "(OA;;RP;c7407360-20bf-11d0-a768-00aa006e0529;;RU)" \
+ "(OA;;RP;b8119fd0-04f6-4762-ab7a-4986c76b3f9a;;RU)" \
+ "(OA;CIIO;RPLCLORC;;4828cc14-1437-45bc-9b07-ad6f015e5f28;RU)" \
+ "(OA;CIIO;RPLCLORC;;bf967a9c-0de6-11d0-a285-00aa003049e2;RU)" \
+ "(OA;CIIO;RPLCLORC;;bf967aba-0de6-11d0-a285-00aa003049e2;RU)" \
+ "(OA;;CR;05c74c5e-4deb-43b4-bd9f-86664c2a7fd5;;AU)" \
+ "(OA;;CR;89e95b76-444d-4c62-991a-0facbeda640c;;ED)" \
+ "(OA;;CR;ccc2dc7d-a6ad-4a7a-8846-c04e3cc53501;;AU)" \
+ "(OA;;CR;280f369c-67c7-438e-ae98-1d46f3c6f541;;AU)" \
+ "(OA;;CR;1131f6aa-9c07-11d1-f79f-00c04fc2dcd2;;ED)" \
+ "(OA;;CR;1131f6ab-9c07-11d1-f79f-00c04fc2dcd2;;ED)" \
+ "(OA;;CR;1131f6ac-9c07-11d1-f79f-00c04fc2dcd2;;ED)" \
+ "(OA;;CR;1131f6ae-9c07-11d1-f79f-00c04fc2dcd2;;ED)" \
+ "(OA;;RP;b8119fd0-04f6-4762-ab7a-4986c76b3f9a;;AU)" \
+ "(OA;CIIO;RPWPCR;91e647de-d96f-4b70-9557-d63ff4f3ccd8;;PS)" \
+ "(OA;CIIO;SW;9b026da6-0d3c-465c-8bee-5199d7165cba;bf967a86-0de6-11d0-a285-00aa003049e2;CO)" \
+ "(OA;OICI;RPWP;3f78c3e5-f79a-46bd-a0b8-9d18116ddc79;;PS)" \
+ "(OA;CIIO;SW;9b026da6-0d3c-465c-8bee-5199d7165cba;bf967a86-0de6-11d0-a285-00aa003049e2;PS)" \
+ "(OA;CIIO;WP;ea1b7b93-5e48-46d5-bc6c-4df4fda78a35;bf967a86-0de6-11d0-a285-00aa003049e2;PS)" \
+ "(A;;RPWPCRCCLCLORCWOWDSW;;;DA)" \
+ "(A;CI;RPWPCRCCDCLCLORCWOWDSDDTSW;;;EA)" \
+ "(A;;RPRC;;;RU)" \
+ "(A;CI;LC;;;RU)" \
+ "(A;CI;RPWPCRCCLCLORCWOWDSDSW;;;BA)" \
+ "(A;;RP;;;WD)" \
+ "(A;;RPLCLORC;;;ED)" \
+ "(A;;RPLCLORC;;;AU)" \
+ "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)" \
+ "S:" \
+ "(OU;CISA;WP;f30e3bbe-9ff0-11d1-b603-0000f80367c1;bf967aa5-0de6-11d0-a285-00aa003049e2;WD)" \
+ "(OU;CISA;WP;f30e3bbf-9ff0-11d1-b603-0000f80367c1;bf967aa5-0de6-11d0-a285-00aa003049e2;WD)" \
+ "(AU;SA;CR;;;DU)" \
+ "(AU;SA;CR;;;BA)" \
+ "(AU;SA;WPWOWD;;;WD)"
+ return sddl2binary(sddl, domain_sid, name_map)
+
+
+def get_domain_computers_descriptor(domain_sid, name_map=None):
+ if name_map is None:
+ name_map = {}
+
+ sddl = "D:" \
+ "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)" \
+ "(A;;RPWPCRCCDCLCLORCWOWDSW;;;DA)" \
+ "(OA;;CCDC;bf967a86-0de6-11d0-a285-00aa003049e2;;AO)" \
+ "(OA;;CCDC;bf967aba-0de6-11d0-a285-00aa003049e2;;AO)" \
+ "(OA;;CCDC;bf967a9c-0de6-11d0-a285-00aa003049e2;;AO)" \
+ "(OA;;CCDC;bf967aa8-0de6-11d0-a285-00aa003049e2;;PO)" \
+ "(A;;RPLCLORC;;;AU)" \
+ "(OA;;CCDC;4828cc14-1437-45bc-9b07-ad6f015e5f28;;AO)" \
+ "S:"
+ return sddl2binary(sddl, domain_sid, name_map)
+
+
+def get_domain_users_descriptor(domain_sid, name_map=None):
+ if name_map is None:
+ name_map = {}
+
+ sddl = "D:" \
+ "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)" \
+ "(A;;RPWPCRCCDCLCLORCWOWDSW;;;DA)" \
+ "(OA;;CCDC;bf967aba-0de6-11d0-a285-00aa003049e2;;AO)" \
+ "(OA;;CCDC;bf967a9c-0de6-11d0-a285-00aa003049e2;;AO)" \
+ "(OA;;CCDC;bf967aa8-0de6-11d0-a285-00aa003049e2;;PO)" \
+ "(A;;RPLCLORC;;;AU)" \
+ "(OA;;CCDC;4828cc14-1437-45bc-9b07-ad6f015e5f28;;AO)" \
+ "S:"
+ return sddl2binary(sddl, domain_sid, name_map)
+
+
+def get_managed_service_accounts_descriptor(domain_sid, name_map=None):
+ if name_map is None:
+ name_map = {}
+
+ sddl = "D:" \
+ "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)" \
+ "(A;;RPWPCRCCDCLCLORCWOWDSW;;;DA)" \
+ "(OA;;CCDC;ce206244-5827-4a86-ba1c-1c0c386c1b64;;AO)" \
+ "(OA;;CCDC;bf967aba-0de6-11d0-a285-00aa003049e2;;AO)" \
+ "(OA;;CCDC;bf967a9c-0de6-11d0-a285-00aa003049e2;;AO)" \
+ "(A;;RPLCLORC;;;AU)" \
+ "S:"
+ return sddl2binary(sddl, domain_sid, name_map)
+
+
+def get_domain_controllers_descriptor(domain_sid, name_map=None):
+ if name_map is None:
+ name_map = {}
+
+ sddl = "D:" \
+ "(A;;RPLCLORC;;;AU)" \
+ "(A;;RPWPCRCCLCLORCWOWDSW;;;DA)" \
+ "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)" \
+ "(A;;RPLCLORC;;;ED)" \
+ "S:" \
+ "(AU;SA;CCDCWOWDSDDT;;;WD)" \
+ "(AU;CISA;WP;;;WD)"
+ return sddl2binary(sddl, domain_sid, name_map)
+
+
+def get_domain_delete_protected1_descriptor(domain_sid, name_map=None):
+ if name_map is None:
+ name_map = {}
+
+ sddl = "D:AI" \
+ "(A;;RPLCLORC;;;AU)" \
+ "(A;;RPWPCRCCLCLORCWOWDSW;;;DA)" \
+ "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)"
+ return sddl2binary(sddl, domain_sid, name_map)
+
+
+def get_domain_delete_protected2_descriptor(domain_sid, name_map=None):
+ if name_map is None:
+ name_map = {}
+
+ sddl = "D:AI" \
+ "(A;;RPLCLORC;;;AU)" \
+ "(A;;RPWPCRCCDCLCLORCWOWDSW;;;DA)" \
+ "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)"
+ return sddl2binary(sddl, domain_sid, name_map)
+
+
+def get_dns_partition_descriptor(domain_sid, name_map=None):
+ if name_map is None:
+ name_map = {}
+
+ sddl = "O:SYG:BAD:AI" \
+ "(OA;CIIO;RP;4c164200-20c0-11d0-a768-00aa006e0529;4828cc14-1437-45bc-9b07-ad6f015e5f28;RU)" \
+ "(OA;CIIO;RP;4c164200-20c0-11d0-a768-00aa006e0529;bf967aba-0de6-11d0-a285-00aa003049e2;RU)" \
+ "(OA;CIIO;RP;5f202010-79a5-11d0-9020-00c04fc2d4cf;4828cc14-1437-45bc-9b07-ad6f015e5f28;RU)" \
+ "(OA;CIIO;RP;5f202010-79a5-11d0-9020-00c04fc2d4cf;bf967aba-0de6-11d0-a285-00aa003049e2;RU)" \
+ "(OA;CIIO;RP;bc0ac240-79a9-11d0-9020-00c04fc2d4cf;4828cc14-1437-45bc-9b07-ad6f015e5f28;RU)" \
+ "(OA;CIIO;RP;bc0ac240-79a9-11d0-9020-00c04fc2d4cf;bf967aba-0de6-11d0-a285-00aa003049e2;RU)" \
+ "(OA;CIIO;RP;59ba2f42-79a2-11d0-9020-00c04fc2d3cf;4828cc14-1437-45bc-9b07-ad6f015e5f28;RU)" \
+ "(OA;CIIO;RP;59ba2f42-79a2-11d0-9020-00c04fc2d3cf;bf967aba-0de6-11d0-a285-00aa003049e2;RU)" \
+ "(OA;CIIO;RP;037088f8-0ae1-11d2-b422-00a0c968f939;4828cc14-1437-45bc-9b07-ad6f015e5f28;RU)" \
+ "(OA;CIIO;RP;037088f8-0ae1-11d2-b422-00a0c968f939;bf967aba-0de6-11d0-a285-00aa003049e2;RU)" \
+ "(OA;;CR;1131f6aa-9c07-11d1-f79f-00c04fc2dcd2;;RO)" \
+ "(OA;CIIO;RP;b7c69e6d-2cc7-11d2-854e-00a0c983f608;bf967a86-0de6-11d0-a285-00aa003049e2;ED)" \
+ "(OA;CIIO;RP;b7c69e6d-2cc7-11d2-854e-00a0c983f608;bf967a9c-0de6-11d0-a285-00aa003049e2;ED)" \
+ "(OA;CIIO;RP;b7c69e6d-2cc7-11d2-854e-00a0c983f608;bf967aba-0de6-11d0-a285-00aa003049e2;ED)" \
+ "(OA;;CR;89e95b76-444d-4c62-991a-0facbeda640c;;BA)" \
+ "(OA;;CR;1131f6aa-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \
+ "(OA;;CR;1131f6ab-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \
+ "(OA;;CR;1131f6ac-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \
+ "(OA;;CR;1131f6ad-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \
+ "(OA;;CR;1131f6ae-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \
+ "(OA;;CR;e2a36dc9-ae17-47c3-b58b-be34c55ba633;;S-1-5-32-557)" \
+ "(OA;;RP;c7407360-20bf-11d0-a768-00aa006e0529;;RU)" \
+ "(OA;;RP;b8119fd0-04f6-4762-ab7a-4986c76b3f9a;;RU)" \
+ "(OA;CIIO;RPLCLORC;;4828cc14-1437-45bc-9b07-ad6f015e5f28;RU)" \
+ "(OA;CIIO;RPLCLORC;;bf967a9c-0de6-11d0-a285-00aa003049e2;RU)" \
+ "(OA;CIIO;RPLCLORC;;bf967aba-0de6-11d0-a285-00aa003049e2;RU)" \
+ "(OA;;CR;05c74c5e-4deb-43b4-bd9f-86664c2a7fd5;;AU)" \
+ "(OA;;CR;89e95b76-444d-4c62-991a-0facbeda640c;;ED)" \
+ "(OA;;CR;ccc2dc7d-a6ad-4a7a-8846-c04e3cc53501;;AU)" \
+ "(OA;;CR;280f369c-67c7-438e-ae98-1d46f3c6f541;;AU)" \
+ "(OA;;CR;1131f6aa-9c07-11d1-f79f-00c04fc2dcd2;;ED)" \
+ "(OA;;CR;1131f6ab-9c07-11d1-f79f-00c04fc2dcd2;;ED)" \
+ "(OA;;CR;1131f6ac-9c07-11d1-f79f-00c04fc2dcd2;;ED)" \
+ "(OA;;CR;1131f6ad-9c07-11d1-f79f-00c04fc2dcd2;;ED)" \
+ "(OA;;CR;1131f6ae-9c07-11d1-f79f-00c04fc2dcd2;;ED)" \
+ "(OA;;RP;b8119fd0-04f6-4762-ab7a-4986c76b3f9a;;AU)" \
+ "(OA;CIIO;RPWPCR;91e647de-d96f-4b70-9557-d63ff4f3ccd8;;PS)" \
+ "(OA;CIIO;SW;9b026da6-0d3c-465c-8bee-5199d7165cba;bf967a86-0de6-11d0-a285-00aa003049e2;CO)" \
+ "(OA;OICI;RPWP;3f78c3e5-f79a-46bd-a0b8-9d18116ddc79;;PS)" \
+ "(OA;CIIO;SW;9b026da6-0d3c-465c-8bee-5199d7165cba;bf967a86-0de6-11d0-a285-00aa003049e2;PS)" \
+ "(OA;CIIO;WP;ea1b7b93-5e48-46d5-bc6c-4df4fda78a35;bf967a86-0de6-11d0-a285-00aa003049e2;PS)" \
+ "(A;;RPWPCRCCLCLORCWOWDSW;;;DA)" \
+ "(A;CI;RPWPCRCCDCLCLORCWOWDSDDTSW;;;EA)" \
+ "(A;;RPRC;;;RU)" \
+ "(A;CI;LC;;;RU)" \
+ "(A;CI;RPWPCRCCLCLORCWOWDSDSW;;;BA)" \
+ "(A;;RP;;;WD)" \
+ "(A;;RPLCLORC;;;ED)" \
+ "(A;;RPLCLORC;;;AU)" \
+ "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)" \
+ "S:AI" \
+ "(OU;CISA;WP;f30e3bbe-9ff0-11d1-b603-0000f80367c1;bf967aa5-0de6-11d0-a285-00aa003049e2;WD)" \
+ "(OU;CISA;WP;f30e3bbf-9ff0-11d1-b603-0000f80367c1;bf967aa5-0de6-11d0-a285-00aa003049e2;WD)" \
+ "(AU;SA;CR;;;DU)(AU;SA;CR;;;BA)(AU;SA;WPWOWD;;;WD)"
+ return sddl2binary(sddl, domain_sid, name_map)
+
+
+def get_dns_forest_microsoft_dns_descriptor(domain_sid, name_map=None):
+ if name_map is None:
+ name_map = {}
+
+ sddl = "O:SYG:SYD:AI" \
+ "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)" \
+ "(A;CI;RPWPCRCCDCLCRCWOWDSDDTSW;;;ED)"
+ return sddl2binary(sddl, domain_sid, name_map)
+
+
+def get_dns_domain_microsoft_dns_descriptor(domain_sid, name_map=None):
+ if name_map is None:
+ name_map = {}
+
+ sddl = "O:SYG:SYD:AI" \
+ "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;DA)" \
+ "(A;CI;RPWPCRCCDCLCRCWOWDSDDTSW;;;DnsAdmins)" \
+ "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)" \
+ "(A;CI;RPWPCRCCDCLCRCWOWDSDDTSW;;;ED)"
+ return sddl2binary(sddl, domain_sid, name_map)
+
+
+def get_paritions_crossref_subdomain_descriptor(domain_sid, name_map=None):
+ if name_map is None:
+ name_map = {}
+
+ sddl = "O:SubdomainAdminsG:SubdomainAdminsD:AI" \
+ "(A;;RPWPCRCCLCLORCWOWDSW;;;SubdomainAdmins)" \
+ "(A;;RPLCLORC;;;AU)" \
+ "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)"
+ return sddl2binary(sddl, domain_sid, name_map)
+
+
+def get_wellknown_sds(samdb):
+
+ # Then subcontainers
+ subcontainers = [
+ (ldb.Dn(samdb, "%s" % str(samdb.domain_dn())), get_domain_descriptor),
+ (ldb.Dn(samdb, "CN=Deleted Objects,%s" % str(samdb.domain_dn())), get_deletedobjects_descriptor),
+ (ldb.Dn(samdb, "CN=LostAndFound,%s" % str(samdb.domain_dn())), get_domain_delete_protected2_descriptor),
+ (ldb.Dn(samdb, "CN=System,%s" % str(samdb.domain_dn())), get_domain_delete_protected1_descriptor),
+ (ldb.Dn(samdb, "CN=Infrastructure,%s" % str(samdb.domain_dn())), get_domain_infrastructure_descriptor),
+ (ldb.Dn(samdb, "CN=Builtin,%s" % str(samdb.domain_dn())), get_domain_builtin_descriptor),
+ (ldb.Dn(samdb, "CN=Computers,%s" % str(samdb.domain_dn())), get_domain_computers_descriptor),
+ (ldb.Dn(samdb, "CN=Users,%s" % str(samdb.domain_dn())), get_domain_users_descriptor),
+ (ldb.Dn(samdb, "OU=Domain Controllers,%s" % str(samdb.domain_dn())), get_domain_controllers_descriptor),
+ (ldb.Dn(samdb, "CN=MicrosoftDNS,CN=System,%s" % str(samdb.domain_dn())), get_dns_domain_microsoft_dns_descriptor),
+
+ (ldb.Dn(samdb, "%s" % str(samdb.get_config_basedn())), get_config_descriptor),
+ (ldb.Dn(samdb, "CN=Deleted Objects,%s" % str(samdb.get_config_basedn())), get_deletedobjects_descriptor),
+ (ldb.Dn(samdb, "CN=NTDS Quotas,%s" % str(samdb.get_config_basedn())), get_config_ntds_quotas_descriptor),
+ (ldb.Dn(samdb, "CN=LostAndFoundConfig,%s" % str(samdb.get_config_basedn())), get_config_delete_protected1wd_descriptor),
+ (ldb.Dn(samdb, "CN=Services,%s" % str(samdb.get_config_basedn())), get_config_delete_protected1_descriptor),
+ (ldb.Dn(samdb, "CN=Physical Locations,%s" % str(samdb.get_config_basedn())), get_config_delete_protected1wd_descriptor),
+ (ldb.Dn(samdb, "CN=WellKnown Security Principals,%s" % str(samdb.get_config_basedn())), get_config_delete_protected1wd_descriptor),
+ (ldb.Dn(samdb, "CN=ForestUpdates,%s" % str(samdb.get_config_basedn())), get_config_delete_protected1wd_descriptor),
+ (ldb.Dn(samdb, "CN=DisplaySpecifiers,%s" % str(samdb.get_config_basedn())), get_config_delete_protected2_descriptor),
+ (ldb.Dn(samdb, "CN=Extended-Rights,%s" % str(samdb.get_config_basedn())), get_config_delete_protected2_descriptor),
+ (ldb.Dn(samdb, "CN=Partitions,%s" % str(samdb.get_config_basedn())), get_config_partitions_descriptor),
+ (ldb.Dn(samdb, "CN=Sites,%s" % str(samdb.get_config_basedn())), get_config_sites_descriptor),
+
+ (ldb.Dn(samdb, "%s" % str(samdb.get_schema_basedn())), get_schema_descriptor),
+ ]
+
+ current = samdb.search(expression="(objectClass=*)",
+ base="", scope=ldb.SCOPE_BASE,
+ attrs=["namingContexts"])
+
+ for nc in current[0]["namingContexts"]:
+
+ dnsforestdn = ldb.Dn(samdb, "DC=ForestDnsZones,%s" % (str(samdb.get_root_basedn())))
+ if ldb.Dn(samdb, nc.decode('utf8')) == dnsforestdn:
+ c = (ldb.Dn(samdb, "%s" % str(dnsforestdn)), get_dns_partition_descriptor)
+ subcontainers.append(c)
+ c = (ldb.Dn(samdb, "CN=Deleted Objects,%s" % str(dnsforestdn)),
+ get_deletedobjects_descriptor)
+ subcontainers.append(c)
+ c = (ldb.Dn(samdb, "CN=Infrastructure,%s" % str(dnsforestdn)),
+ get_domain_delete_protected1_descriptor)
+ subcontainers.append(c)
+ c = (ldb.Dn(samdb, "CN=LostAndFound,%s" % str(dnsforestdn)),
+ get_domain_delete_protected2_descriptor)
+ subcontainers.append(c)
+ c = (ldb.Dn(samdb, "CN=MicrosoftDNS,%s" % str(dnsforestdn)),
+ get_dns_forest_microsoft_dns_descriptor)
+ subcontainers.append(c)
+ continue
+
+ dnsdomaindn = ldb.Dn(samdb, "DC=DomainDnsZones,%s" % (str(samdb.domain_dn())))
+ if ldb.Dn(samdb, nc.decode('utf8')) == dnsdomaindn:
+ c = (ldb.Dn(samdb, "%s" % str(dnsdomaindn)), get_dns_partition_descriptor)
+ subcontainers.append(c)
+ c = (ldb.Dn(samdb, "CN=Deleted Objects,%s" % str(dnsdomaindn)),
+ get_deletedobjects_descriptor)
+ subcontainers.append(c)
+ c = (ldb.Dn(samdb, "CN=Infrastructure,%s" % str(dnsdomaindn)),
+ get_domain_delete_protected1_descriptor)
+ subcontainers.append(c)
+ c = (ldb.Dn(samdb, "CN=LostAndFound,%s" % str(dnsdomaindn)),
+ get_domain_delete_protected2_descriptor)
+ subcontainers.append(c)
+ c = (ldb.Dn(samdb, "CN=MicrosoftDNS,%s" % str(dnsdomaindn)),
+ get_dns_domain_microsoft_dns_descriptor)
+ subcontainers.append(c)
+
+ return subcontainers
+
+
+def chunck_acl(acl):
+ """Return separate ACE of an ACL
+
+ :param acl: A string representing the ACL
+ :return: A hash with different parts
+ """
+
+ p = re.compile(r'(\w+)?(\(.*?\))')
+ tab = p.findall(acl)
+
+ hash = {}
+ hash["aces"] = []
+ for e in tab:
+ if len(e[0]) > 0:
+ hash["flags"] = e[0]
+ hash["aces"].append(e[1])
+
+ return hash
+
+
+def chunck_sddl(sddl):
+ """ Return separate parts of the SDDL (owner, group, ...)
+
+ :param sddl: An string containing the SDDL to chunk
+ :return: A hash with the different chunk
+ """
+
+ p = re.compile(r'([OGDS]:)(.*?)(?=(?:[GDS]:|$))')
+ tab = p.findall(sddl)
+
+ hash = {}
+ for e in tab:
+ if e[0] == "O:":
+ hash["owner"] = e[1]
+ if e[0] == "G:":
+ hash["group"] = e[1]
+ if e[0] == "D:":
+ hash["dacl"] = e[1]
+ if e[0] == "S:":
+ hash["sacl"] = e[1]
+
+ return hash
+
+
+def get_clean_sd(sd):
+ """Get the SD without any inherited ACEs
+
+ :param sd: SD to strip
+ :return: An SD with inherited ACEs stripped
+ """
+
+ sd_clean = security.descriptor()
+ sd_clean.owner_sid = sd.owner_sid
+ sd_clean.group_sid = sd.group_sid
+ sd_clean.type = sd.type
+ sd_clean.revision = sd.revision
+
+ aces = []
+ if sd.sacl is not None:
+ aces = sd.sacl.aces
+ for i in range(0, len(aces)):
+ ace = aces[i]
+
+ if not ace.flags & security.SEC_ACE_FLAG_INHERITED_ACE:
+ sd_clean.sacl_add(ace)
+ continue
+
+ aces = []
+ if sd.dacl is not None:
+ aces = sd.dacl.aces
+ for i in range(0, len(aces)):
+ ace = aces[i]
+
+ if not ace.flags & security.SEC_ACE_FLAG_INHERITED_ACE:
+ sd_clean.dacl_add(ace)
+ continue
+ return sd_clean
+
+
+def get_diff_sds(refsd, cursd, domainsid, checkSacl=True,
+ ignoreAdditionalACEs=False):
+ """Get the difference between 2 sd
+
+ This function split the textual representation of ACL into smaller
+ chunk in order to not to report a simple permutation as a difference
+
+ :param refsddl: First sddl to compare
+ :param cursddl: Second sddl to compare
+ :param checkSacl: If false we skip the sacl checks
+ :return: A string that explain difference between sddls
+ """
+
+ cursddl = get_clean_sd(cursd).as_sddl(domainsid)
+ refsddl = get_clean_sd(refsd).as_sddl(domainsid)
+
+ txt = ""
+ hash_cur = chunck_sddl(cursddl)
+ hash_ref = chunck_sddl(refsddl)
+
+ if "owner" not in hash_cur:
+ txt = "\tNo owner in current SD"
+ elif "owner" in hash_ref and hash_cur["owner"] != hash_ref["owner"]:
+ txt = "\tOwner mismatch: %s (in ref) %s" \
+ "(in current)\n" % (hash_ref["owner"], hash_cur["owner"])
+
+ if "group" not in hash_cur:
+ txt = "%s\tNo group in current SD" % txt
+ elif "group" in hash_ref and hash_cur["group"] != hash_ref["group"]:
+ txt = "%s\tGroup mismatch: %s (in ref) %s" \
+ "(in current)\n" % (txt, hash_ref["group"], hash_cur["group"])
+
+ parts = ["dacl"]
+ if checkSacl:
+ parts.append("sacl")
+ for part in parts:
+ if part in hash_cur and part in hash_ref:
+
+ # both are present, check if they contain the same ACE
+ h_cur = set()
+ h_ref = set()
+ c_cur = chunck_acl(hash_cur[part])
+ c_ref = chunck_acl(hash_ref[part])
+
+ for elem in c_cur["aces"]:
+ h_cur.add(elem)
+
+ for elem in c_ref["aces"]:
+ h_ref.add(elem)
+
+ for k in set(h_ref):
+ if k in h_cur:
+ h_cur.remove(k)
+ h_ref.remove(k)
+
+ if len(h_cur) + len(h_ref) > 0:
+ if txt == "" and len(h_ref) == 0:
+ if ignoreAdditionalACEs:
+ return ""
+
+ txt = "%s\tPart %s is different between reference" \
+ " and current here is the detail:\n" % (txt, part)
+
+ for item in h_cur:
+ txt = "%s\t\t%s ACE is not present in the" \
+ " reference\n" % (txt, item)
+
+ for item in h_ref:
+ txt = "%s\t\t%s ACE is not present in the" \
+ " current\n" % (txt, item)
+
+ elif part in hash_cur and part not in hash_ref:
+ txt = "%s\tReference ACL hasn't a %s part\n" % (txt, part)
+ elif part not in hash_cur and part in hash_ref:
+ txt = "%s\tCurrent ACL hasn't a %s part\n" % (txt, part)
+
+ return txt
diff --git a/python/samba/dnsresolver.py b/python/samba/dnsresolver.py
new file mode 100644
index 0000000..a627555
--- /dev/null
+++ b/python/samba/dnsresolver.py
@@ -0,0 +1,68 @@
+# Samba wrapper for DNS resolvers
+#
+# Copyright (C) Stanislav Levin <slev@altlinux.org>
+# Copyright (C) Alexander Bokovoy <ab@samba.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import dns.resolver
+import dns.rdatatype
+import dns.reversename
+
+class DNSResolver(dns.resolver.Resolver):
+ """DNS stub resolver compatible with both dnspython < 2.0.0
+ and dnspython >= 2.0.0.
+
+ Set `use_search_by_default` attribute to `True`, which
+ determines the default for whether the search list configured
+ in the system's resolver configuration is used for relative
+ names, and whether the resolver's domain may be added to relative
+ names.
+
+ Increase the default lifetime which determines the number of seconds
+ to spend trying to get an answer to the question. dnspython 2.0.0
+ changes this to 5sec, while the previous one was 30sec.
+ """
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.reset_defaults()
+ self.resolve = getattr(super(), "resolve", self.query)
+ self.resolve_address = getattr(
+ super(),
+ "resolve_address",
+ self._resolve_address
+ )
+
+ def reset_defaults(self):
+ self.use_search_by_default = True
+ # the default is 5sec
+ self.lifetime = 15
+
+ def reset(self):
+ super().reset()
+ self.reset_defaults()
+
+ def _resolve_address(self, ip_address, *args, **kwargs):
+ """Query nameservers for PTR records.
+
+ :param ip_address: IPv4 or IPv6 address
+ :type ip_address: str
+ """
+ return self.resolve(
+ dns.reversename.from_address(ip_address),
+ rdtype=dns.rdatatype.PTR,
+ *args,
+ **kwargs,
+ )
diff --git a/python/samba/dnsserver.py b/python/samba/dnsserver.py
new file mode 100644
index 0000000..d907f8e
--- /dev/null
+++ b/python/samba/dnsserver.py
@@ -0,0 +1,405 @@
+# helper for DNS management tool
+#
+# Copyright (C) Amitay Isaacs 2011-2012
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import shlex
+import socket
+from samba.dcerpc import dnsserver, dnsp
+from samba import WERRORError, werror
+
+# Note: these are not quite the same as similar looking classes in
+# provision/sambadns.py -- those ones are based on
+# dnsp.DnssrvRpcRecord, these are based on dnsserver.DNS_RPC_RECORD.
+# They encode the same information in slightly different ways.
+#
+# DNS_RPC_RECORD structures ([MS-DNSP]2.2.2.2.5 "DNS_RPC_RECORD") are
+# used on the wire by DnssrvEnumRecords2. The dnsp.DnssrvRpcRecord
+# versions have the in-database version of the same information, where
+# the flags field is unpacked, and the struct ordering is different.
+# See [MS-DNSP] 2.3.2.2 "DnsRecord".
+#
+# In both cases the structure and contents of .data depend on .wType.
+# For example, if .wType is DNS_TYPE_A, .data is an IPv4 address. If
+# the .wType is changed to DNS_TYPE_CNAME, the contents of .data will
+# be interpreted as a cname blob, but the bytes there will still be
+# those of the IPv4 address. If you don't also set the .data you may
+# encounter stability problems. These DNS_RPC_RECORD subclasses
+# attempt to hide that from you, but are only pretending -- any of
+# them can represent any type of record.
+
+
+class DNSParseError(ValueError):
+ pass
+
+
+class ARecord(dnsserver.DNS_RPC_RECORD):
+ def __init__(self, ip_addr, serial=1, ttl=900, rank=dnsp.DNS_RANK_ZONE,
+ node_flag=0):
+ super().__init__()
+ self.wType = dnsp.DNS_TYPE_A
+ self.dwFlags = rank | node_flag
+ self.dwSerial = serial
+ self.dwTtlSeconds = ttl
+ self.data = ip_addr
+
+ @classmethod
+ def from_string(cls, data, sep=None, **kwargs):
+ return cls(data, **kwargs)
+
+
+class AAAARecord(dnsserver.DNS_RPC_RECORD):
+
+ def __init__(self, ip6_addr, serial=1, ttl=900, rank=dnsp.DNS_RANK_ZONE,
+ node_flag=0):
+ super().__init__()
+ self.wType = dnsp.DNS_TYPE_AAAA
+ self.dwFlags = rank | node_flag
+ self.dwSerial = serial
+ self.dwTtlSeconds = ttl
+ self.data = ip6_addr
+
+ @classmethod
+ def from_string(cls, data, sep=None, **kwargs):
+ return cls(data, **kwargs)
+
+
+class PTRRecord(dnsserver.DNS_RPC_RECORD):
+
+ def __init__(self, ptr, serial=1, ttl=900, rank=dnsp.DNS_RANK_ZONE,
+ node_flag=0):
+ super().__init__()
+ self.wType = dnsp.DNS_TYPE_PTR
+ self.dwFlags = rank | node_flag
+ self.dwSerial = serial
+ self.dwTtlSeconds = ttl
+ ptr_name = dnsserver.DNS_RPC_NAME()
+ ptr_name.str = ptr
+ ptr_name.len = len(ptr)
+ self.data = ptr_name
+
+ @classmethod
+ def from_string(cls, data, sep=None, **kwargs):
+ return cls(data, **kwargs)
+
+
+class CNAMERecord(dnsserver.DNS_RPC_RECORD):
+
+ def __init__(self, cname, serial=1, ttl=900, rank=dnsp.DNS_RANK_ZONE,
+ node_flag=0):
+ super().__init__()
+ self.wType = dnsp.DNS_TYPE_CNAME
+ self.dwFlags = rank | node_flag
+ self.dwSerial = serial
+ self.dwTtlSeconds = ttl
+ cname_name = dnsserver.DNS_RPC_NAME()
+ cname_name.str = cname
+ cname_name.len = len(cname)
+ self.data = cname_name
+
+ @classmethod
+ def from_string(cls, data, sep=None, **kwargs):
+ return cls(data, **kwargs)
+
+
+class NSRecord(dnsserver.DNS_RPC_RECORD):
+
+ def __init__(self, dns_server, serial=1, ttl=900, rank=dnsp.DNS_RANK_ZONE,
+ node_flag=0):
+ super().__init__()
+ self.wType = dnsp.DNS_TYPE_NS
+ self.dwFlags = rank | node_flag
+ self.dwSerial = serial
+ self.dwTtlSeconds = ttl
+ ns = dnsserver.DNS_RPC_NAME()
+ ns.str = dns_server
+ ns.len = len(dns_server)
+ self.data = ns
+
+ @classmethod
+ def from_string(cls, data, sep=None, **kwargs):
+ return cls(data, **kwargs)
+
+
+class MXRecord(dnsserver.DNS_RPC_RECORD):
+
+ def __init__(self, mail_server, preference, serial=1, ttl=900,
+ rank=dnsp.DNS_RANK_ZONE, node_flag=0):
+ super().__init__()
+ self.wType = dnsp.DNS_TYPE_MX
+ self.dwFlags = rank | node_flag
+ self.dwSerial = serial
+ self.dwTtlSeconds = ttl
+ mx = dnsserver.DNS_RPC_RECORD_NAME_PREFERENCE()
+ mx.wPreference = preference
+ mx.nameExchange.str = mail_server
+ mx.nameExchange.len = len(mail_server)
+ self.data = mx
+
+ @classmethod
+ def from_string(cls, data, sep=None, **kwargs):
+ try:
+ server, priority = data.split(sep)
+ priority = int(priority)
+ except ValueError as e:
+ raise DNSParseError("MX data must have server and priority "
+ "(space separated), not %r" % data) from e
+ return cls(server, priority, **kwargs)
+
+
+class SOARecord(dnsserver.DNS_RPC_RECORD):
+
+ def __init__(self, mname, rname, serial=1, refresh=900, retry=600,
+ expire=86400, minimum=3600, ttl=3600, rank=dnsp.DNS_RANK_ZONE,
+ node_flag=dnsp.DNS_RPC_FLAG_AUTH_ZONE_ROOT):
+ super().__init__()
+ self.wType = dnsp.DNS_TYPE_SOA
+ self.dwFlags = rank | node_flag
+ self.dwSerial = serial
+ self.dwTtlSeconds = ttl
+ soa = dnsserver.DNS_RPC_RECORD_SOA()
+ soa.dwSerialNo = serial
+ soa.dwRefresh = refresh
+ soa.dwRetry = retry
+ soa.dwExpire = expire
+ soa.dwMinimumTtl = minimum
+ soa.NamePrimaryServer.str = mname
+ soa.NamePrimaryServer.len = len(mname)
+ soa.ZoneAdministratorEmail.str = rname
+ soa.ZoneAdministratorEmail.len = len(rname)
+ self.data = soa
+
+ @classmethod
+ def from_string(cls, data, sep=None, **kwargs):
+ args = data.split(sep)
+ if len(args) != 7:
+ raise DNSParseError('Data requires 7 space separated elements - '
+ 'nameserver, email, serial, '
+ 'refresh, retry, expire, minimumttl')
+ try:
+ for i in range(2, 7):
+ args[i] = int(args[i])
+ except ValueError as e:
+ raise DNSParseError("SOA serial, refresh, retry, expire, minimumttl' "
+ "should be integers") from e
+ return cls(*args, **kwargs)
+
+
+class SRVRecord(dnsserver.DNS_RPC_RECORD):
+
+ def __init__(self, target, port, priority=0, weight=100, serial=1, ttl=900,
+ rank=dnsp.DNS_RANK_ZONE, node_flag=0):
+ super().__init__()
+ self.wType = dnsp.DNS_TYPE_SRV
+ self.dwFlags = rank | node_flag
+ self.dwSerial = serial
+ self.dwTtlSeconds = ttl
+ srv = dnsserver.DNS_RPC_RECORD_SRV()
+ srv.wPriority = priority
+ srv.wWeight = weight
+ srv.wPort = port
+ srv.nameTarget.str = target
+ srv.nameTarget.len = len(target)
+ self.data = srv
+
+ @classmethod
+ def from_string(cls, data, sep=None, **kwargs):
+ try:
+ target, port, priority, weight = data.split(sep)
+ except ValueError as e:
+ raise DNSParseError("SRV data must have four space "
+ "separated elements: "
+ "server, port, priority, weight; "
+ "not %r" % data) from e
+ try:
+ args = (target, int(port), int(priority), int(weight))
+ except ValueError as e:
+ raise DNSParseError("SRV port, priority, and weight "
+ "must be integers") from e
+
+ return cls(*args, **kwargs)
+
+
+class TXTRecord(dnsserver.DNS_RPC_RECORD):
+
+ def __init__(self, slist, serial=1, ttl=900, rank=dnsp.DNS_RANK_ZONE,
+ node_flag=0):
+ super().__init__()
+ self.wType = dnsp.DNS_TYPE_TXT
+ self.dwFlags = rank | node_flag
+ self.dwSerial = serial
+ self.dwTtlSeconds = ttl
+ if isinstance(slist, str):
+ slist = [slist]
+ names = []
+ for s in slist:
+ name = dnsserver.DNS_RPC_NAME()
+ name.str = s
+ name.len = len(s)
+ names.append(name)
+ txt = dnsserver.DNS_RPC_RECORD_STRING()
+ txt.count = len(slist)
+ txt.str = names
+ self.data = txt
+
+ @classmethod
+ def from_string(cls, data, sep=None, **kwargs):
+ slist = shlex.split(data)
+ return cls(slist, **kwargs)
+
+
+#
+# Don't add new Record types after this line
+
+_RECORD_TYPE_LUT = {}
+def _setup_record_type_lut():
+ for k, v in globals().items():
+ if k[-6:] == 'Record':
+ k = k[:-6]
+ flag = getattr(dnsp, 'DNS_TYPE_' + k)
+ _RECORD_TYPE_LUT[k] = v
+ _RECORD_TYPE_LUT[flag] = v
+
+_setup_record_type_lut()
+del _setup_record_type_lut
+
+
+def record_from_string(t, data, sep=None, **kwargs):
+ """Get a DNS record of type t based on the data string.
+ Additional keywords (ttl, rank, etc) can be passed in.
+
+ t can be a dnsp.DNS_TYPE_* integer or a string like "A", "TXT", etc.
+ """
+ if isinstance(t, str):
+ t = t.upper()
+ try:
+ Record = _RECORD_TYPE_LUT[t]
+ except KeyError as e:
+ raise DNSParseError("Unsupported record type") from e
+
+ return Record.from_string(data, sep=sep, **kwargs)
+
+
+def flag_from_string(rec_type):
+ rtype = rec_type.upper()
+ try:
+ return getattr(dnsp, 'DNS_TYPE_' + rtype)
+ except AttributeError as e:
+ raise DNSParseError('Unknown type of DNS record %s' % rec_type) from e
+
+
+def recbuf_from_string(*args, **kwargs):
+ rec = record_from_string(*args, **kwargs)
+ buf = dnsserver.DNS_RPC_RECORD_BUF()
+ buf.rec = rec
+ return buf
+
+
+def dns_name_equal(n1, n2):
+ """Match dns name (of type DNS_RPC_NAME)"""
+ return n1.str.rstrip('.').lower() == n2.str.rstrip('.').lower()
+
+
+def ipv6_normalise(addr):
+ """Convert an AAAA address into a canonical form."""
+ packed = socket.inet_pton(socket.AF_INET6, addr)
+ return socket.inet_ntop(socket.AF_INET6, packed)
+
+
+def dns_record_match(dns_conn, server, zone, name, record_type, data):
+ """Find a dns record that matches the specified data"""
+
+ # The matching is not as precises as that offered by
+ # dsdb_dns.match_record, which, for example, compares IPv6 records
+ # semantically rather than as strings. However that function
+ # compares database DnssrvRpcRecord structures, not wire
+ # DNS_RPC_RECORD structures.
+ #
+ # While it would be possible, perhaps desirable, to wrap that
+ # function for use in samba-tool, there is value in having a
+ # separate implementation for tests, to avoid the circularity of
+ # asserting the function matches itself.
+
+ urec = record_from_string(record_type, data)
+
+ select_flags = dnsserver.DNS_RPC_VIEW_AUTHORITY_DATA
+
+ try:
+ buflen, res = dns_conn.DnssrvEnumRecords2(
+ dnsserver.DNS_CLIENT_VERSION_LONGHORN, 0, server, zone, name, None,
+ record_type, select_flags, None, None)
+ except WERRORError as e:
+ if e.args[0] == werror.WERR_DNS_ERROR_NAME_DOES_NOT_EXIST:
+ # Either the zone doesn't exist, or there were no records.
+ # We can't differentiate the two.
+ return None
+ raise e
+
+ if not res or res.count == 0:
+ return None
+
+ for rec in res.rec[0].records:
+ if rec.wType != record_type:
+ continue
+
+ found = False
+ if record_type == dnsp.DNS_TYPE_A:
+ if rec.data == urec.data:
+ found = True
+ elif record_type == dnsp.DNS_TYPE_AAAA:
+ if ipv6_normalise(rec.data) == ipv6_normalise(urec.data):
+ found = True
+ elif record_type == dnsp.DNS_TYPE_PTR:
+ if dns_name_equal(rec.data, urec.data):
+ found = True
+ elif record_type == dnsp.DNS_TYPE_CNAME:
+ if dns_name_equal(rec.data, urec.data):
+ found = True
+ elif record_type == dnsp.DNS_TYPE_NS:
+ if dns_name_equal(rec.data, urec.data):
+ found = True
+ elif record_type == dnsp.DNS_TYPE_MX:
+ if dns_name_equal(rec.data.nameExchange, urec.data.nameExchange) and \
+ rec.data.wPreference == urec.data.wPreference:
+ found = True
+ elif record_type == dnsp.DNS_TYPE_SRV:
+ if rec.data.wPriority == urec.data.wPriority and \
+ rec.data.wWeight == urec.data.wWeight and \
+ rec.data.wPort == urec.data.wPort and \
+ dns_name_equal(rec.data.nameTarget, urec.data.nameTarget):
+ found = True
+ elif record_type == dnsp.DNS_TYPE_SOA:
+ if rec.data.dwSerialNo == urec.data.dwSerialNo and \
+ rec.data.dwRefresh == urec.data.dwRefresh and \
+ rec.data.dwRetry == urec.data.dwRetry and \
+ rec.data.dwExpire == urec.data.dwExpire and \
+ rec.data.dwMinimumTtl == urec.data.dwMinimumTtl and \
+ dns_name_equal(rec.data.NamePrimaryServer,
+ urec.data.NamePrimaryServer) and \
+ dns_name_equal(rec.data.ZoneAdministratorEmail,
+ urec.data.ZoneAdministratorEmail):
+ found = True
+ elif record_type == dnsp.DNS_TYPE_TXT:
+ if rec.data.count == urec.data.count:
+ found = True
+ for i in range(rec.data.count):
+ found = found and \
+ (rec.data.str[i].str == urec.data.str[i].str)
+
+ if found:
+ return rec
+
+ return None
diff --git a/python/samba/domain_update.py b/python/samba/domain_update.py
new file mode 100644
index 0000000..e91bdf4
--- /dev/null
+++ b/python/samba/domain_update.py
@@ -0,0 +1,573 @@
+# Samba4 Domain update checker
+#
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import ldb
+from base64 import b64encode
+from samba import sd_utils
+from samba.dcerpc import security
+from samba.descriptor import (
+ get_managed_service_accounts_descriptor,
+)
+from samba.dsdb import (
+ DS_DOMAIN_FUNCTION_2008,
+ DS_DOMAIN_FUNCTION_2008_R2,
+ DS_DOMAIN_FUNCTION_2012,
+ DS_DOMAIN_FUNCTION_2012_R2,
+ DS_DOMAIN_FUNCTION_2016,
+)
+
+MIN_UPDATE = 75
+MAX_UPDATE = 89
+
+update_map = {
+ # Missing updates from 2008 R2 - version 5
+ 75: "5e1574f6-55df-493e-a671-aaeffca6a100",
+ 76: "d262aae8-41f7-48ed-9f35-56bbb677573d",
+ 77: "82112ba0-7e4c-4a44-89d9-d46c9612bf91",
+ # Windows Server 2012 - version 9
+ 78: "c3c927a6-cc1d-47c0-966b-be8f9b63d991",
+ 79: "54afcfb9-637a-4251-9f47-4d50e7021211",
+ 80: "f4728883-84dd-483c-9897-274f2ebcf11e",
+ 81: "ff4f9d27-7157-4cb0-80a9-5d6f2b14c8ff",
+ # Windows Server 2012 R2 - version 10
+ # No updates
+ # Windows Server 2016 - version 15
+ 82: "83c53da7-427e-47a4-a07a-a324598b88f7",
+ # from the documentation and a fresh installation
+ # 83 is this:
+ # c81fc9cc-0130-4fd1-b272-634d74818133
+ # adprep will use this on the wire:
+ # c81fc9cc-0130-f4d1-b272-634d74818133
+ 83: "c81fc9cc-0130-4fd1-b272-634d74818133",
+ 84: "e5f9e791-d96d-4fc9-93c9-d53e1dc439ba",
+ 85: "e6d5fd00-385d-4e65-b02d-9da3493ed850",
+ 86: "3a6b3fbf-3168-4312-a10d-dd5b3393952d",
+ 87: "7f950403-0ab3-47f9-9730-5d7b0269f9bd",
+ 88: "434bb40d-dbc9-4fe7-81d4-d57229f7b080",
+ # Windows Server 2016 - version 16
+ 89: "a0c238ba-9e30-4ee6-80a6-43f731e9a5cd",
+}
+
+
+functional_level_to_max_update = {
+ DS_DOMAIN_FUNCTION_2008: 74,
+ DS_DOMAIN_FUNCTION_2008_R2: 77,
+ DS_DOMAIN_FUNCTION_2012: 81,
+ DS_DOMAIN_FUNCTION_2012_R2: 81,
+ DS_DOMAIN_FUNCTION_2016: 89,
+}
+
+functional_level_to_version = {
+ DS_DOMAIN_FUNCTION_2008: 3,
+ DS_DOMAIN_FUNCTION_2008_R2: 5,
+ DS_DOMAIN_FUNCTION_2012: 9,
+ DS_DOMAIN_FUNCTION_2012_R2: 10,
+ DS_DOMAIN_FUNCTION_2016: 16,
+}
+
+# No update numbers have been skipped over
+missing_updates = []
+
+
+class DomainUpdateException(Exception):
+ pass
+
+
+class DomainUpdate(object):
+ """Check and update a SAM database for domain updates"""
+
+ def __init__(self, samdb, fix=False,
+ add_update_container=True):
+ """
+ :param samdb: LDB database
+ :param fix: Apply the update if the container is missing
+ :param add_update_container: Add the container at the end of the change
+ :raise DomainUpdateException:
+ """
+ self.samdb = samdb
+ self.fix = fix
+ self.add_update_container = add_update_container
+ # TODO: In future we should check for inconsistencies when it claims it has been done
+ self.check_update_applied = False
+
+ self.config_dn = self.samdb.get_config_basedn()
+ self.domain_dn = self.samdb.domain_dn()
+ self.schema_dn = self.samdb.get_schema_basedn()
+
+ self.sd_utils = sd_utils.SDUtils(samdb)
+ self.domain_sid = security.dom_sid(samdb.get_domain_sid())
+
+ self.domainupdate_container = self.samdb.get_root_basedn()
+ try:
+ self.domainupdate_container.add_child("CN=Operations,CN=DomainUpdates,CN=System")
+ except ldb.LdbError:
+ raise DomainUpdateException("Failed to add domain update container child")
+
+ self.revision_object = self.samdb.get_root_basedn()
+ try:
+ self.revision_object.add_child("CN=ActiveDirectoryUpdate,CN=DomainUpdates,CN=System")
+ except ldb.LdbError:
+ raise DomainUpdateException("Failed to add revision object child")
+
+ def check_updates_functional_level(self, functional_level,
+ old_functional_level=None,
+ update_revision=False):
+ """
+ Apply all updates for a given old and new functional level
+ :param functional_level: constant
+ :param old_functional_level: constant
+ :param update_revision: modify the stored version
+ :raise DomainUpdateException:
+ """
+ res = self.samdb.search(base=self.revision_object,
+ attrs=["revision"], scope=ldb.SCOPE_BASE)
+
+ expected_update = functional_level_to_max_update[functional_level]
+
+ if old_functional_level:
+ min_update = functional_level_to_max_update[old_functional_level]
+ min_update += 1
+ else:
+ min_update = MIN_UPDATE
+
+ self.check_updates_range(min_update, expected_update)
+
+ expected_version = functional_level_to_version[functional_level]
+ found_version = int(res[0]['revision'][0])
+ if update_revision and found_version < expected_version:
+ if not self.fix:
+ raise DomainUpdateException("Revision is not high enough. Fix is set to False."
+ "\nExpected: %dGot: %d" % (expected_version,
+ found_version))
+ self.samdb.modify_ldif("""dn: %s
+changetype: modify
+replace: revision
+revision: %d
+""" % (str(self.revision_object), expected_version))
+
+ def check_updates_iterator(self, iterator):
+ """
+ Apply a list of updates which must be within the valid range of updates
+ :param iterator: Iterable specifying integer update numbers to apply
+ :raise DomainUpdateException:
+ """
+ for op in iterator:
+ if op < MIN_UPDATE or op > MAX_UPDATE:
+ raise DomainUpdateException("Update number invalid.")
+
+ # No LDIF file exists for the change
+ getattr(self, "operation_%d" % op)(op)
+
+ def check_updates_range(self, start=0, end=0):
+ """
+ Apply a range of updates which must be within the valid range of updates
+ :param start: integer update to begin
+ :param end: integer update to end (inclusive)
+ :raise DomainUpdateException:
+ """
+ op = start
+ if start < MIN_UPDATE or start > end or end > MAX_UPDATE:
+ raise DomainUpdateException("Update number invalid.")
+ while op <= end:
+ if op not in missing_updates:
+ # No LDIF file exists for the change
+ getattr(self, "operation_%d" % op)(op)
+
+ op += 1
+
+ def update_exists(self, op):
+ """
+ :param op: Integer update number
+ :return: True if update exists else False
+ """
+ update_dn = "CN=%s,%s" % (update_map[op], self.domainupdate_container)
+ try:
+ res = self.samdb.search(base=update_dn,
+ scope=ldb.SCOPE_BASE,
+ attrs=[])
+ except ldb.LdbError as e:
+ (num, msg) = e.args
+ if num != ldb.ERR_NO_SUCH_OBJECT:
+ raise
+ return False
+
+ assert len(res) == 1
+ print("Skip Domain Update %u: %s" % (op, update_map[op]))
+ return True
+
+ def update_add(self, op):
+ """
+ Add the corresponding container object for the given update
+ :param op: Integer update
+ """
+ self.samdb.add_ldif("""dn: CN=%s,%s
+objectClass: container
+""" % (update_map[op], str(self.domainupdate_container)))
+ print("Applied Domain Update %u: %s" % (op, update_map[op]))
+
+ def raise_if_not_fix(self, op):
+ """
+ Raises an exception if not set to fix.
+ :param op: Integer operation
+ :raise DomainUpdateException:
+ """
+ if not self.fix:
+ raise DomainUpdateException("Missing operation %d. Fix is currently set to False" % op)
+
+ # Create a new object CN=TPM Devices in the Domain partition.
+ def operation_78(self, op):
+ if self.update_exists(op):
+ return
+ self.raise_if_not_fix(op)
+
+ self.samdb.add_ldif("""dn: CN=TPM Devices,%s
+objectClass: top
+objectClass: msTPM-InformationObjectsContainer
+""" % self.domain_dn,
+ controls=["relax:0", "provision:0"])
+
+ if self.add_update_container:
+ self.update_add(op)
+
+ # Created an access control entry for the TPM service.
+ def operation_79(self, op):
+ if self.update_exists(op):
+ return
+ self.raise_if_not_fix(op)
+
+ ace = "(OA;CIIO;WP;ea1b7b93-5e48-46d5-bc6c-4df4fda78a35;bf967a86-0de6-11d0-a285-00aa003049e2;PS)"
+
+ self.sd_utils.update_aces_in_dacl(self.domain_dn, add_aces=[ace])
+
+ if self.add_update_container:
+ self.update_add(op)
+
+ # Grant "Clone DC" extended right to Cloneable Domain Controllers group
+ def operation_80(self, op):
+ if self.update_exists(op):
+ return
+ self.raise_if_not_fix(op)
+
+ ace = "(OA;;CR;3e0f7e18-2c7a-4c10-ba82-4d926db99a3e;;CN)"
+
+ self.sd_utils.update_aces_in_dacl(self.domain_dn, add_aces=[ace])
+
+ if self.add_update_container:
+ self.update_add(op)
+
+ # Grant ms-DS-Allowed-To-Act-On-Behalf-Of-Other-Identity to Principal Self
+ # on all objects
+ def operation_81(self, op):
+ if self.update_exists(op):
+ return
+ self.raise_if_not_fix(op)
+
+ ace = "(OA;CIOI;RPWP;3f78c3e5-f79a-46bd-a0b8-9d18116ddc79;;PS)"
+
+ self.sd_utils.update_aces_in_dacl(self.domain_dn, add_aces=[ace])
+
+ if self.add_update_container:
+ self.update_add(op)
+
+ #
+ # THE FOLLOWING ARE MISSING UPDATES FROM 2008 R2
+ #
+
+ # Add Managed Service Accounts container
+ def operation_75(self, op):
+ if self.update_exists(op):
+ return
+ self.raise_if_not_fix(op)
+
+ descriptor = get_managed_service_accounts_descriptor(self.domain_sid)
+ managedservice_descr = b64encode(descriptor).decode('utf8')
+ managed_service_dn = "CN=Managed Service Accounts,%s" % \
+ str(self.domain_dn)
+
+ self.samdb.modify_ldif("""dn: %s
+changetype: add
+objectClass: container
+description: Default container for managed service accounts
+showInAdvancedViewOnly: FALSE
+nTSecurityDescriptor:: %s""" % (managed_service_dn, managedservice_descr),
+ controls=["relax:0", "provision:0"])
+
+ if self.add_update_container:
+ self.update_add(op)
+
+ # Add the otherWellKnownObjects reference to MSA
+ def operation_76(self, op):
+ if self.update_exists(op):
+ return
+ self.raise_if_not_fix(op)
+
+ managed_service_dn = "CN=Managed Service Accounts,%s" % \
+ str(self.domain_dn)
+
+ self.samdb.modify_ldif("""dn: %s
+changetype: modify
+add: otherWellKnownObjects
+otherWellKnownObjects: B:32:1EB93889E40C45DF9F0C64D23BBB6237:%s
+""" % (str(self.domain_dn), managed_service_dn), controls=["relax:0",
+ "provision:0"])
+
+ if self.add_update_container:
+ self.update_add(op)
+
+ # Add the PSPs object in the System container
+ def operation_77(self, op):
+ if self.update_exists(op):
+ return
+ self.raise_if_not_fix(op)
+
+ self.samdb.add_ldif("""dn: CN=PSPs,CN=System,%s
+objectClass: top
+objectClass: msImaging-PSPs
+""" % str(self.domain_dn), controls=["relax:0", "provision:0"])
+
+ if self.add_update_container:
+ self.update_add(op)
+
+ ## ## Windows Server 2016: Domain-wide updates
+ ##
+ ## After the operations that are performed by domainprep in Windows
+ ## Server 2016 (operations 82-88) complete, the revision attribute for the
+ ## CN=ActiveDirectoryUpdate,CN=DomainUpdates,CN=System,DC=ForestRootDomain
+ ## object is set to 15.
+
+ ## Operation 82: {83c53da7-427e-47a4-a07a-a324598b88f7}
+ ##
+ ## Create CN=Keys container at root of domain
+ ##
+ ## - objectClass: container
+ ## - description: Default container for key credential objects
+ ## - ShowInAdvancedViewOnly: TRUE
+ ##
+ ## (A;CI;RPWPCRLCLOCCDCRCWDWOSDDTSW;;;EA)
+ ## (A;CI;RPWPCRLCLOCCDCRCWDWOSDDTSW;;;DA)
+ ## (A;CI;RPWPCRLCLOCCDCRCWDWOSDDTSW;;;SY)
+ ## (A;CI;RPWPCRLCLOCCDCRCWDWOSDDTSW;;;DD)
+ ## (A;CI;RPWPCRLCLOCCDCRCWDWOSDDTSW;;;ED)
+ ##
+ def operation_82(self, op):
+ if self.update_exists(op):
+ return
+ self.raise_if_not_fix(op)
+
+ keys_dn = "CN=Keys,%s" % str(self.domain_dn)
+
+ sddl = "O:DA"
+ sddl += "D:"
+ sddl += "(A;CI;RPWPCRLCLOCCDCRCWDWOSDDTSW;;;EA)"
+ sddl += "(A;CI;RPWPCRLCLOCCDCRCWDWOSDDTSW;;;DA)"
+ sddl += "(A;CI;RPWPCRLCLOCCDCRCWDWOSDDTSW;;;SY)"
+ sddl += "(A;CI;RPWPCRLCLOCCDCRCWDWOSDDTSW;;;DD)"
+ sddl += "(A;CI;RPWPCRLCLOCCDCRCWDWOSDDTSW;;;ED)"
+
+ ldif = """
+dn: %s
+objectClass: container
+description: Default container for key credential objects
+ShowInAdvancedViewOnly: TRUE
+nTSecurityDescriptor: %s
+""" % (keys_dn, sddl)
+
+ self.samdb.add_ldif(ldif)
+
+ if self.add_update_container:
+ self.update_add(op)
+
+ ## Operation 83: {c81fc9cc-0130-4fd1-b272-634d74818133}
+ ##
+ ## Add Full Control allow aces to CN=Keys container for "domain\Key Admins"
+ ## and "rootdomain\Enterprise Key Admins".
+ ##
+ ## (A;CI;RPWPCRLCLOCCDCRCWDWOSDDTSW;;;Key Admins)
+ ## (A;CI;RPWPCRLCLOCCDCRCWDWOSDDTSW;;;Enterprise Key Admins)
+ ##
+ def operation_83(self, op):
+ if self.update_exists(op):
+ return
+ self.raise_if_not_fix(op)
+
+ keys_dn = "CN=Keys,%s" % str(self.domain_dn)
+
+ aces = ["(A;CI;RPWPCRLCLOCCDCRCWDWOSDDTSW;;;KA)"]
+ aces += ["(A;CI;RPWPCRLCLOCCDCRCWDWOSDDTSW;;;EK)"]
+
+ self.sd_utils.update_aces_in_dacl(keys_dn, add_aces=aces)
+
+ if self.add_update_container:
+ self.update_add(op)
+
+
+ ## Operation 84: {e5f9e791-d96d-4fc9-93c9-d53e1dc439ba}
+ ##
+ ## Modify otherWellKnownObjects attribute to point to the CN=Keys container.
+ ##
+ ## - otherWellKnownObjects: B:32:683A24E2E8164BD3AF86AC3C2CF3F981:CN=Keys,%ws
+ def operation_84(self, op):
+ if self.update_exists(op):
+ return
+ self.raise_if_not_fix(op)
+
+ keys_dn = "CN=Keys,%s" % str(self.domain_dn)
+
+ ldif = """
+dn: %s
+changetype: modify
+add: otherWellKnownObjects
+otherWellKnownObjects: B:32:683A24E2E8164BD3AF86AC3C2CF3F981:%s
+""" % (str(self.domain_dn), keys_dn)
+
+ self.samdb.modify_ldif(ldif)
+
+ if self.add_update_container:
+ self.update_add(op)
+
+
+ ## Operation 85: {e6d5fd00-385d-4e65-b02d-9da3493ed850}
+ ##
+ ## Modify the domain NC to permit "domain\Key Admins" and
+ ## "rootdomain\Enterprise Key Admins"
+ ## to modify the msds-KeyCredentialLink attribute.
+ ##
+ ## (OA;CI;RPWP;5b47d60f-6090-40b2-9f37-2a4de88f3063;;Key Admins)
+ ## (OA;CI;RPWP;5b47d60f-6090-40b2-9f37-2a4de88f3063;;Enterprise Key Admins)
+ ## in root domain, but in non-root domains resulted in a bogus domain-relative
+ ## ACE with a non-resolvable -527 SID
+ ##
+ def operation_85(self, op):
+ if self.update_exists(op):
+ return
+ self.raise_if_not_fix(op)
+
+ aces = ["(OA;CI;RPWP;5b47d60f-6090-40b2-9f37-2a4de88f3063;;KA)"]
+ # we use an explicit sid in order to replay the windows mistake
+ aces += ["(OA;CI;RPWP;5b47d60f-6090-40b2-9f37-2a4de88f3063;;%s-527)" %
+ str(self.domain_sid)]
+
+ self.sd_utils.update_aces_in_dacl(self.domain_dn, add_aces=aces)
+
+ if self.add_update_container:
+ self.update_add(op)
+
+
+ ## Operation 86: {3a6b3fbf-3168-4312-a10d-dd5b3393952d}
+ ##
+ ## Grant the DS-Validated-Write-Computer CAR to creator owner and self
+ ##
+ ## (OA;CIIO;SW;9b026da6-0d3c-465c-8bee-5199d7165cba;bf967a86-0de6-11d0-a285-00aa003049e2;PS)
+ ## (OA;CIIO;SW;9b026da6-0d3c-465c-8bee-5199d7165cba;bf967a86-0de6-11d0-a285-00aa003049e2;CO)
+ ##
+ def operation_86(self, op):
+ if self.update_exists(op):
+ return
+ self.raise_if_not_fix(op)
+
+ aces = ["(OA;CIIO;SW;9b026da6-0d3c-465c-8bee-5199d7165cba;bf967a86-0de6-11d0-a285-00aa003049e2;PS)"]
+ aces += ["(OA;CIIO;SW;9b026da6-0d3c-465c-8bee-5199d7165cba;bf967a86-0de6-11d0-a285-00aa003049e2;CO)"]
+
+ self.sd_utils.update_aces_in_dacl(self.domain_dn, add_aces=aces)
+
+ if self.add_update_container:
+ self.update_add(op)
+
+ ## Operation 87: {7f950403-0ab3-47f9-9730-5d7b0269f9bd}
+ ##
+ ## Delete the ACE granting Full Control to the incorrect
+ ## domain-relative Enterprise Key Admins group, and add
+ ## an ACE granting Full Control to Enterprise Key Admins group.
+ ##
+ ## Delete (A;CI;RPWPCRLCLOCCDCRCWDWOSDDTSW;;;Enterprise Key Admins)
+ ## Add (A;CI;RPWPCRLCLOCCDCRCWDWOSDDTSW;;;Enterprise Key Admins)
+ ##
+ def operation_87(self, op):
+ if self.update_exists(op):
+ return
+ self.raise_if_not_fix(op)
+
+ # we use an explicit sid in order to replay the windows mistake
+ # note this is also strange for a 2nd reason because it doesn't
+ # delete: ["(OA;CI;RPWP;5b47d60f-6090-40b2-9f37-2a4de88f3063;;%s-527)"
+ # which was added in operation_85, so the del is basically a noop
+ # and the result is one additional ace
+ del_aces = ["(A;CI;RPWPCRLCLOCCDCRCWDWOSDDTSW;;;%s-527)" %
+ str(self.domain_sid)]
+ add_aces = ["(A;CI;RPWPCRLCLOCCDCRCWDWOSDDTSW;;;EK)"]
+
+ self.sd_utils.update_aces_in_dacl(self.domain_dn,
+ del_aces=del_aces,
+ add_aces=add_aces)
+
+ if self.add_update_container:
+ self.update_add(op)
+
+ ## Operation 88: {434bb40d-dbc9-4fe7-81d4-d57229f7b080}
+ ##
+ ## Add "msDS-ExpirePasswordsOnSmartCardOnlyAccounts" on the domain NC object
+ ## and set default value to FALSE
+ ##
+ def operation_88(self, op):
+ if self.update_exists(op):
+ return
+ self.raise_if_not_fix(op)
+
+ ldif = """
+dn: %s
+changetype: modify
+add: msDS-ExpirePasswordsOnSmartCardOnlyAccounts
+msDS-ExpirePasswordsOnSmartCardOnlyAccounts: FALSE
+""" % str(self.domain_dn)
+
+ self.samdb.modify_ldif(ldif)
+
+ if self.add_update_container:
+ self.update_add(op)
+
+ ## Windows Server 2016 (operation 89) complete, the **revision** attribute for the
+ ## CN=ActiveDirectoryUpdate,CN=DomainUpdates,CN=System,DC=ForestRootDomain object
+ ## is set to **16**.
+ ##
+
+ ## Operation 89: {a0c238ba-9e30-4ee6-80a6-43f731e9a5cd}
+ ##
+ ## Delete the ACE granting Full Control to Enterprise Key Admins and
+ ## add an ACE granting Enterprise Key Admins Full Control over just
+ ## the msdsKeyCredentialLink attribute.
+ ##
+ ## Delete (A;CI;RPWPCRLCLOCCDCRCWDWOSDDTSW;;;Enterprise Key Admins)
+ ## Add (OA;CI;RPWP;5b47d60f-6090-40b2-9f37-2a4de88f3063;;Enterprise Key Admins)|
+ ##
+ def operation_89(self, op):
+ if self.update_exists(op):
+ return
+ self.raise_if_not_fix(op)
+
+ # Note this only fixes the mistake from operation_87
+ # but leaves the mistake of operation_85 if we're
+ # not in the root domain...
+ del_aces = ["(A;CI;RPWPCRLCLOCCDCRCWDWOSDDTSW;;;EK)"]
+ add_aces = ["(OA;CI;RPWP;5b47d60f-6090-40b2-9f37-2a4de88f3063;;EK)"]
+
+ self.sd_utils.update_aces_in_dacl(self.domain_dn,
+ del_aces=del_aces,
+ add_aces=add_aces)
+
+ if self.add_update_container:
+ self.update_add(op)
diff --git a/python/samba/drs_utils.py b/python/samba/drs_utils.py
new file mode 100644
index 0000000..06e6928
--- /dev/null
+++ b/python/samba/drs_utils.py
@@ -0,0 +1,456 @@
+# DRS utility code
+#
+# Copyright Andrew Tridgell 2010
+# Copyright Andrew Bartlett 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from samba.dcerpc import drsuapi, misc, drsblobs
+from samba.net import Net
+from samba.ndr import ndr_unpack
+from samba import dsdb
+from samba import werror
+from samba import WERRORError
+import samba
+import ldb
+from samba.dcerpc.drsuapi import (DRSUAPI_ATTID_name,
+ DRSUAPI_SUPPORTED_EXTENSION_GETCHGREQ_V8,
+ DRSUAPI_SUPPORTED_EXTENSION_GETCHGREQ_V10)
+import re
+
+
+class drsException(Exception):
+ """Base element for drs errors"""
+
+ def __init__(self, value):
+ self.value = value
+
+ def __str__(self):
+ return "drsException: " + self.value
+
+
+def drsuapi_connect(server, lp, creds, ip=None):
+ """Make a DRSUAPI connection to the server.
+
+ :param server: the name of the server to connect to
+ :param lp: a samba line parameter object
+ :param creds: credential used for the connection
+ :param ip: Forced target server name
+ :return: A tuple with the drsuapi bind object, the drsuapi handle
+ and the supported extensions.
+ :raise drsException: if the connection fails
+ """
+
+ binding_options = "seal"
+ if lp.log_level() >= 9:
+ binding_options += ",print"
+
+ # Allow forcing the IP
+ if ip is not None:
+ binding_options += f",target_hostname={server}"
+ binding_string = f"ncacn_ip_tcp:{ip}[{binding_options}]"
+ else:
+ binding_string = "ncacn_ip_tcp:%s[%s]" % (server, binding_options)
+
+ try:
+ drsuapiBind = drsuapi.drsuapi(binding_string, lp, creds)
+ (drsuapiHandle, bindSupportedExtensions) = drs_DsBind(drsuapiBind)
+ except Exception as e:
+ raise drsException("DRS connection to %s failed: %s" % (server, e))
+
+ return (drsuapiBind, drsuapiHandle, bindSupportedExtensions)
+
+
+def sendDsReplicaSync(drsuapiBind, drsuapi_handle, source_dsa_guid,
+ naming_context, req_option):
+ """Send DS replica sync request.
+
+ :param drsuapiBind: a drsuapi Bind object
+ :param drsuapi_handle: a drsuapi handle on the drsuapi connection
+ :param source_dsa_guid: the guid of the source dsa for the replication
+ :param naming_context: the DN of the naming context to replicate
+ :param req_options: replication options for the DsReplicaSync call
+ :raise drsException: if any error occur while sending and receiving the
+ reply for the dsReplicaSync
+ """
+
+ nc = drsuapi.DsReplicaObjectIdentifier()
+ nc.dn = naming_context
+
+ req1 = drsuapi.DsReplicaSyncRequest1()
+ req1.naming_context = nc
+ req1.options = req_option
+ req1.source_dsa_guid = misc.GUID(source_dsa_guid)
+
+ try:
+ drsuapiBind.DsReplicaSync(drsuapi_handle, 1, req1)
+ except Exception as estr:
+ raise drsException("DsReplicaSync failed %s" % estr)
+
+
+def drs_DsBind(drs):
+ """make a DsBind call, returning the binding handle"""
+ bind_info = drsuapi.DsBindInfoCtr()
+ bind_info.length = 28
+ bind_info.info = drsuapi.DsBindInfo28()
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_BASE
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_ASYNC_REPLICATION
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_REMOVEAPI
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_MOVEREQ_V2
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_GETCHG_COMPRESS
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_DCINFO_V1
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_RESTORE_USN_OPTIMIZATION
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_KCC_EXECUTE
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_ADDENTRY_V2
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_LINKED_VALUE_REPLICATION
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_DCINFO_V2
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_INSTANCE_TYPE_NOT_REQ_ON_MOD
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_CRYPTO_BIND
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_GET_REPL_INFO
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_STRONG_ENCRYPTION
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_DCINFO_V01
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_TRANSITIVE_MEMBERSHIP
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_ADD_SID_HISTORY
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_POST_BETA3
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_GET_MEMBERSHIPS2
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_GETCHGREQ_V6
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_NONDOMAIN_NCS
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_GETCHGREQ_V8
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_GETCHGREPLY_V5
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_GETCHGREPLY_V6
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_ADDENTRYREPLY_V3
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_GETCHGREPLY_V7
+ bind_info.info.supported_extensions |= drsuapi.DRSUAPI_SUPPORTED_EXTENSION_VERIFY_OBJECT
+ (info, handle) = drs.DsBind(misc.GUID(drsuapi.DRSUAPI_DS_BIND_GUID), bind_info)
+
+ return (handle, info.info.supported_extensions)
+
+
+def drs_get_rodc_partial_attribute_set(samdb):
+ """get a list of attributes for RODC replication"""
+ partial_attribute_set = drsuapi.DsPartialAttributeSet()
+ partial_attribute_set.version = 1
+
+ attids = []
+
+ # the exact list of attids we send is quite critical. Note that
+ # we do ask for the secret attributes, but set SPECIAL_SECRET_PROCESSING
+ # to zero them out
+ schema_dn = samdb.get_schema_basedn()
+ res = samdb.search(base=schema_dn, scope=ldb.SCOPE_SUBTREE,
+ expression="objectClass=attributeSchema",
+ attrs=["lDAPDisplayName", "systemFlags",
+ "searchFlags"])
+
+ for r in res:
+ ldap_display_name = str(r["lDAPDisplayName"][0])
+ if "systemFlags" in r:
+ system_flags = r["systemFlags"][0]
+ if (int(system_flags) & (samba.dsdb.DS_FLAG_ATTR_NOT_REPLICATED |
+ samba.dsdb.DS_FLAG_ATTR_IS_CONSTRUCTED)):
+ continue
+ if "searchFlags" in r:
+ search_flags = r["searchFlags"][0]
+ if (int(search_flags) & samba.dsdb.SEARCH_FLAG_RODC_ATTRIBUTE):
+ continue
+ attid = samdb.get_attid_from_lDAPDisplayName(ldap_display_name)
+ attids.append(int(attid))
+
+ # the attids do need to be sorted, or windows doesn't return
+ # all the attributes we need
+ attids.sort()
+ partial_attribute_set.attids = attids
+ partial_attribute_set.num_attids = len(attids)
+ return partial_attribute_set
+
+
+def drs_copy_highwater_mark(hwm, new_hwm):
+ """
+ Copies the highwater mark by value, rather than by object reference. (This
+ avoids lingering talloc references to old GetNCChanges reply messages).
+ """
+ hwm.tmp_highest_usn = new_hwm.tmp_highest_usn
+ hwm.reserved_usn = new_hwm.reserved_usn
+ hwm.highest_usn = new_hwm.highest_usn
+
+
+class drs_Replicate(object):
+ """DRS replication calls"""
+
+ def __init__(self, binding_string, lp, creds, samdb, invocation_id):
+ self.drs = drsuapi.drsuapi(binding_string, lp, creds)
+ (self.drs_handle, self.supports_ext) = drs_DsBind(self.drs)
+ self.net = Net(creds=creds, lp=lp)
+ self.samdb = samdb
+ if not isinstance(invocation_id, misc.GUID):
+ raise RuntimeError("Must supply GUID for invocation_id")
+ if invocation_id == misc.GUID("00000000-0000-0000-0000-000000000000"):
+ raise RuntimeError("Must not set GUID 00000000-0000-0000-0000-000000000000 as invocation_id")
+ self.replication_state = self.net.replicate_init(self.samdb, lp, self.drs, invocation_id)
+ self.more_flags = 0
+
+ def _should_retry_with_get_tgt(self, error_code, req):
+
+ # If the error indicates we fail to resolve a target object for a
+ # linked attribute, then we should retry the request with GET_TGT
+ # (if we support it and haven't already tried that)
+ supports_ext = self.supports_ext
+
+ return (error_code == werror.WERR_DS_DRA_RECYCLED_TARGET and
+ supports_ext & DRSUAPI_SUPPORTED_EXTENSION_GETCHGREQ_V10 and
+ (req.more_flags & drsuapi.DRSUAPI_DRS_GET_TGT) == 0)
+
+ @staticmethod
+ def _should_calculate_missing_anc_locally(error_code, req):
+ # If the error indicates we fail to resolve the parent object
+ # for a new object, then we assume we are replicating from a
+ # buggy server (Samba 4.5 and earlier) that doesn't really
+ # understand how to implement GET_ANC
+
+ return ((error_code == werror.WERR_DS_DRA_MISSING_PARENT) and
+ (req.replica_flags & drsuapi.DRSUAPI_DRS_GET_ANC) != 0)
+
+
+ def _calculate_missing_anc_locally(self, ctr):
+ self.guids_seen = set()
+
+ # walk objects in ctr, add to guid_seen as we see them
+ # note if an object doesn't have a parent
+
+ object_to_check = ctr.first_object
+
+ while True:
+ if object_to_check is None:
+ break
+
+ self.guids_seen.add(str(object_to_check.object.identifier.guid))
+
+ if object_to_check.parent_object_guid is not None \
+ and object_to_check.parent_object_guid \
+ != misc.GUID("00000000-0000-0000-0000-000000000000") \
+ and str(object_to_check.parent_object_guid) not in self.guids_seen:
+ obj_dn = ldb.Dn(self.samdb, object_to_check.object.identifier.dn)
+ parent_dn = obj_dn.parent()
+ print(f"Object {parent_dn} with "
+ f"GUID {object_to_check.parent_object_guid} "
+ "was not sent by the server in this chunk")
+
+ object_to_check = object_to_check.next_object
+
+
+ def process_chunk(self, level, ctr, schema, req_level, req, first_chunk):
+ """Processes a single chunk of received replication data"""
+ # pass the replication into the py_net.c python bindings for processing
+ self.net.replicate_chunk(self.replication_state, level, ctr,
+ schema=schema, req_level=req_level, req=req)
+
+ def replicate(self, dn, source_dsa_invocation_id, destination_dsa_guid,
+ schema=False, exop=drsuapi.DRSUAPI_EXOP_NONE, rodc=False,
+ replica_flags=None, full_sync=True, sync_forced=False, more_flags=0):
+ """replicate a single DN"""
+
+ # setup for a GetNCChanges call
+ if self.supports_ext & DRSUAPI_SUPPORTED_EXTENSION_GETCHGREQ_V10:
+ req = drsuapi.DsGetNCChangesRequest10()
+ req.more_flags = (more_flags | self.more_flags)
+ req_level = 10
+ else:
+ req_level = 8
+ req = drsuapi.DsGetNCChangesRequest8()
+
+ req.destination_dsa_guid = destination_dsa_guid
+ req.source_dsa_invocation_id = source_dsa_invocation_id
+ req.naming_context = drsuapi.DsReplicaObjectIdentifier()
+ req.naming_context.dn = dn
+
+ # Default to a full replication if we don't find an upToDatenessVector
+ udv = None
+ hwm = drsuapi.DsReplicaHighWaterMark()
+ hwm.tmp_highest_usn = 0
+ hwm.reserved_usn = 0
+ hwm.highest_usn = 0
+
+ if not full_sync:
+ res = self.samdb.search(base=dn, scope=ldb.SCOPE_BASE,
+ attrs=["repsFrom"])
+ if "repsFrom" in res[0]:
+ for reps_from_packed in res[0]["repsFrom"]:
+ reps_from_obj = ndr_unpack(drsblobs.repsFromToBlob, reps_from_packed)
+ if reps_from_obj.ctr.source_dsa_invocation_id == source_dsa_invocation_id:
+ hwm = reps_from_obj.ctr.highwatermark
+
+ udv = drsuapi.DsReplicaCursorCtrEx()
+ udv.version = 1
+ udv.reserved1 = 0
+ udv.reserved2 = 0
+
+ cursors_v1 = []
+ cursors_v2 = dsdb._dsdb_load_udv_v2(self.samdb,
+ self.samdb.get_default_basedn())
+ for cursor_v2 in cursors_v2:
+ cursor_v1 = drsuapi.DsReplicaCursor()
+ cursor_v1.source_dsa_invocation_id = cursor_v2.source_dsa_invocation_id
+ cursor_v1.highest_usn = cursor_v2.highest_usn
+ cursors_v1.append(cursor_v1)
+
+ udv.cursors = cursors_v1
+ udv.count = len(cursors_v1)
+
+ req.highwatermark = hwm
+ req.uptodateness_vector = udv
+
+ if replica_flags is not None:
+ req.replica_flags = replica_flags
+ elif exop == drsuapi.DRSUAPI_EXOP_REPL_SECRET:
+ req.replica_flags = 0
+ else:
+ req.replica_flags = (drsuapi.DRSUAPI_DRS_INIT_SYNC |
+ drsuapi.DRSUAPI_DRS_PER_SYNC |
+ drsuapi.DRSUAPI_DRS_GET_ANC |
+ drsuapi.DRSUAPI_DRS_NEVER_SYNCED |
+ drsuapi.DRSUAPI_DRS_GET_ALL_GROUP_MEMBERSHIP)
+ if rodc:
+ req.replica_flags |= (
+ drsuapi.DRSUAPI_DRS_SPECIAL_SECRET_PROCESSING)
+ else:
+ req.replica_flags |= drsuapi.DRSUAPI_DRS_WRIT_REP
+
+ if sync_forced:
+ req.replica_flags |= drsuapi.DRSUAPI_DRS_SYNC_FORCED
+
+ req.max_object_count = 402
+ req.max_ndr_size = 402116
+ req.extended_op = exop
+ req.fsmo_info = 0
+ req.partial_attribute_set = None
+ req.partial_attribute_set_ex = None
+ req.mapping_ctr.num_mappings = 0
+ req.mapping_ctr.mappings = None
+
+ if not schema and rodc:
+ req.partial_attribute_set = drs_get_rodc_partial_attribute_set(self.samdb)
+
+ if not self.supports_ext & DRSUAPI_SUPPORTED_EXTENSION_GETCHGREQ_V8:
+ req_level = 5
+ req5 = drsuapi.DsGetNCChangesRequest5()
+ for a in dir(req5):
+ if a[0] != '_':
+ setattr(req5, a, getattr(req, a))
+ req = req5
+
+ num_objects = 0
+ num_links = 0
+ first_chunk = True
+
+ while True:
+ (level, ctr) = self.drs.DsGetNCChanges(self.drs_handle, req_level, req)
+ if ctr.first_object is None and ctr.object_count != 0:
+ raise RuntimeError("DsGetNCChanges: NULL first_object with object_count=%u" % (ctr.object_count))
+
+ try:
+ self.process_chunk(level, ctr, schema, req_level, req, first_chunk)
+ except WERRORError as e:
+ # Check if retrying with the GET_TGT flag set might resolve this error
+ if self._should_retry_with_get_tgt(e.args[0], req):
+
+ print("Missing target object - retrying with DRS_GET_TGT")
+ req.more_flags |= drsuapi.DRSUAPI_DRS_GET_TGT
+
+ # try sending the request again (this has the side-effect
+ # of causing the DC to restart the replication from scratch)
+ first_chunk = True
+ continue
+
+ if self._should_calculate_missing_anc_locally(e.args[0],
+ req):
+ print("Missing parent object - calculating missing objects locally")
+
+ self._calculate_missing_anc_locally(ctr)
+ raise e
+
+ first_chunk = False
+ num_objects += ctr.object_count
+
+ # Cope with servers that do not return level 6, so do not return any links
+ try:
+ num_links += ctr.linked_attributes_count
+ except AttributeError:
+ pass
+
+ if ctr.more_data == 0:
+ break
+
+ # update the request's HWM so we get the next chunk
+ drs_copy_highwater_mark(req.highwatermark, ctr.new_highwatermark)
+
+ return (num_objects, num_links)
+
+
+# Handles the special case of creating a new clone of a DB, while also renaming
+# the entire DB's objects on the way through
+class drs_ReplicateRenamer(drs_Replicate):
+ """Uses DRS replication to rename the entire DB"""
+
+ def __init__(self, binding_string, lp, creds, samdb, invocation_id,
+ old_base_dn, new_base_dn):
+ super().__init__(binding_string, lp, creds, samdb, invocation_id)
+ self.old_base_dn = old_base_dn
+ self.new_base_dn = new_base_dn
+
+ # because we're renaming the DNs, we know we're going to have trouble
+ # resolving link targets. Normally we'd get to the end of replication
+ # only to find we need to retry the whole replication with the GET_TGT
+ # flag set. Always setting the GET_TGT flag avoids this extra work.
+ self.more_flags = drsuapi.DRSUAPI_DRS_GET_TGT
+
+ def rename_dn(self, dn_str):
+ """Uses string substitution to replace the base DN"""
+ return re.sub('%s$' % self.old_base_dn, self.new_base_dn, dn_str)
+
+ def update_name_attr(self, base_obj):
+ """Updates the 'name' attribute for the base DN object"""
+ for attr in base_obj.attribute_ctr.attributes:
+ if attr.attid == DRSUAPI_ATTID_name:
+ base_dn = ldb.Dn(self.samdb, base_obj.identifier.dn)
+ new_name = base_dn.get_rdn_value()
+ attr.value_ctr.values[0].blob = new_name.encode('utf-16-le')
+
+ def rename_top_level_object(self, first_obj):
+ """Renames the first/top-level object in a partition"""
+ old_dn = first_obj.identifier.dn
+ first_obj.identifier.dn = self.rename_dn(first_obj.identifier.dn)
+ print("Renaming partition %s --> %s" % (old_dn,
+ first_obj.identifier.dn))
+
+ # we also need to fix up the 'name' attribute for the base DN,
+ # otherwise the RDNs won't match
+ if first_obj.identifier.dn == self.new_base_dn:
+ self.update_name_attr(first_obj)
+
+ def process_chunk(self, level, ctr, schema, req_level, req, first_chunk):
+ """Processes a single chunk of received replication data"""
+
+ # we need to rename the NC in every chunk - this gets used in searches
+ # when applying the chunk
+ if ctr.naming_context:
+ ctr.naming_context.dn = self.rename_dn(ctr.naming_context.dn)
+
+ # rename the first object in each partition. This will cause every
+ # subsequent object in the partition to be renamed as a side-effect
+ if first_chunk and ctr.object_count != 0:
+ self.rename_top_level_object(ctr.first_object.object)
+
+ # then do the normal repl processing to apply this chunk to our DB
+ super().process_chunk(level, ctr, schema, req_level, req, first_chunk)
diff --git a/python/samba/emulate/__init__.py b/python/samba/emulate/__init__.py
new file mode 100644
index 0000000..110e19d
--- /dev/null
+++ b/python/samba/emulate/__init__.py
@@ -0,0 +1,16 @@
+# Package level initialisation
+#
+# Copyright (C) Catalyst IT Ltd. 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
diff --git a/python/samba/emulate/traffic.py b/python/samba/emulate/traffic.py
new file mode 100644
index 0000000..4811fe8
--- /dev/null
+++ b/python/samba/emulate/traffic.py
@@ -0,0 +1,2415 @@
+# -*- encoding: utf-8 -*-
+# Samba traffic replay and learning
+#
+# Copyright (C) Catalyst IT Ltd. 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import time
+import os
+import random
+import json
+import math
+import sys
+import signal
+from errno import ECHILD, ESRCH
+
+from collections import OrderedDict, Counter, defaultdict, namedtuple
+from dns.resolver import query as dns_query
+
+from samba.emulate import traffic_packets
+from samba.samdb import SamDB
+import ldb
+from ldb import LdbError
+from samba.dcerpc import ClientConnection
+from samba.dcerpc import security, drsuapi, lsa
+from samba.dcerpc import netlogon
+from samba.dcerpc.netlogon import netr_Authenticator
+from samba.dcerpc import srvsvc
+from samba.dcerpc import samr
+from samba.drs_utils import drs_DsBind
+import traceback
+from samba.credentials import Credentials, DONT_USE_KERBEROS, MUST_USE_KERBEROS
+from samba.auth import system_session
+from samba.dsdb import (
+ UF_NORMAL_ACCOUNT,
+ UF_SERVER_TRUST_ACCOUNT,
+ UF_TRUSTED_FOR_DELEGATION,
+ UF_WORKSTATION_TRUST_ACCOUNT
+)
+from samba.dcerpc.misc import SEC_CHAN_BDC
+from samba import gensec
+from samba import sd_utils
+from samba.common import get_string
+from samba.logger import get_samba_logger
+import bisect
+
+CURRENT_MODEL_VERSION = 2 # save as this
+REQUIRED_MODEL_VERSION = 2 # load accepts this or greater
+SLEEP_OVERHEAD = 3e-4
+
+# we don't use None, because it complicates [de]serialisation
+NON_PACKET = '-'
+
+CLIENT_CLUES = {
+ ('dns', '0'): 1.0, # query
+ ('smb', '0x72'): 1.0, # Negotiate protocol
+ ('ldap', '0'): 1.0, # bind
+ ('ldap', '3'): 1.0, # searchRequest
+ ('ldap', '2'): 1.0, # unbindRequest
+ ('cldap', '3'): 1.0,
+ ('dcerpc', '11'): 1.0, # bind
+ ('dcerpc', '14'): 1.0, # Alter_context
+ ('nbns', '0'): 1.0, # query
+}
+
+SERVER_CLUES = {
+ ('dns', '1'): 1.0, # response
+ ('ldap', '1'): 1.0, # bind response
+ ('ldap', '4'): 1.0, # search result
+ ('ldap', '5'): 1.0, # search done
+ ('cldap', '5'): 1.0,
+ ('dcerpc', '12'): 1.0, # bind_ack
+ ('dcerpc', '13'): 1.0, # bind_nak
+ ('dcerpc', '15'): 1.0, # Alter_context response
+}
+
+SKIPPED_PROTOCOLS = {"smb", "smb2", "browser", "smb_netlogon"}
+
+WAIT_SCALE = 10.0
+WAIT_THRESHOLD = (1.0 / WAIT_SCALE)
+NO_WAIT_LOG_TIME_RANGE = (-10, -3)
+
+# DEBUG_LEVEL can be changed by scripts with -d
+DEBUG_LEVEL = 0
+
+LOGGER = get_samba_logger(name=__name__)
+
+
+def debug(level, msg, *args):
+ """Print a formatted debug message to standard error.
+
+
+ :param level: The debug level, message will be printed if it is <= the
+ currently set debug level. The debug level can be set with
+ the -d option.
+ :param msg: The message to be logged, can contain C-Style format
+ specifiers
+ :param args: The parameters required by the format specifiers
+ """
+ if level <= DEBUG_LEVEL:
+ if not args:
+ print(msg, file=sys.stderr)
+ else:
+ print(msg % tuple(args), file=sys.stderr)
+
+
+def debug_lineno(*args):
+ """ Print an unformatted log message to stderr, containing the line number
+ """
+ tb = traceback.extract_stack(limit=2)
+ print((" %s:" "\033[01;33m"
+ "%s " "\033[00m" % (tb[0][2], tb[0][1])), end=' ',
+ file=sys.stderr)
+ for a in args:
+ print(a, file=sys.stderr)
+ print(file=sys.stderr)
+ sys.stderr.flush()
+
+
+def random_colour_print(seeds):
+ """Return a function that prints a coloured line to stderr. The colour
+ of the line depends on a sort of hash of the integer arguments."""
+ if seeds:
+ s = 214
+ for x in seeds:
+ s += 17
+ s *= x
+ s %= 214
+ prefix = "\033[38;5;%dm" % (18 + s)
+
+ def p(*args):
+ if DEBUG_LEVEL > 0:
+ for a in args:
+ print("%s%s\033[00m" % (prefix, a), file=sys.stderr)
+ else:
+ def p(*args):
+ if DEBUG_LEVEL > 0:
+ for a in args:
+ print(a, file=sys.stderr)
+
+ return p
+
+
+class FakePacketError(Exception):
+ pass
+
+
+class Packet(object):
+ """Details of a network packet"""
+ __slots__ = ('timestamp',
+ 'ip_protocol',
+ 'stream_number',
+ 'src',
+ 'dest',
+ 'protocol',
+ 'opcode',
+ 'desc',
+ 'extra',
+ 'endpoints')
+ def __init__(self, timestamp, ip_protocol, stream_number, src, dest,
+ protocol, opcode, desc, extra):
+ self.timestamp = timestamp
+ self.ip_protocol = ip_protocol
+ self.stream_number = stream_number
+ self.src = src
+ self.dest = dest
+ self.protocol = protocol
+ self.opcode = opcode
+ self.desc = desc
+ self.extra = extra
+ if self.src < self.dest:
+ self.endpoints = (self.src, self.dest)
+ else:
+ self.endpoints = (self.dest, self.src)
+
+ @classmethod
+ def from_line(cls, line):
+ fields = line.rstrip('\n').split('\t')
+ (timestamp,
+ ip_protocol,
+ stream_number,
+ src,
+ dest,
+ protocol,
+ opcode,
+ desc) = fields[:8]
+ extra = fields[8:]
+
+ timestamp = float(timestamp)
+ src = int(src)
+ dest = int(dest)
+
+ return cls(timestamp, ip_protocol, stream_number, src, dest,
+ protocol, opcode, desc, extra)
+
+ def as_summary(self, time_offset=0.0):
+ """Format the packet as a traffic_summary line.
+ """
+ extra = '\t'.join(self.extra)
+ t = self.timestamp + time_offset
+ return (t, '%f\t%s\t%s\t%d\t%d\t%s\t%s\t%s\t%s' %
+ (t,
+ self.ip_protocol,
+ self.stream_number or '',
+ self.src,
+ self.dest,
+ self.protocol,
+ self.opcode,
+ self.desc,
+ extra))
+
+ def __str__(self):
+ return ("%.3f: %d -> %d; ip %s; strm %s; prot %s; op %s; desc %s %s" %
+ (self.timestamp, self.src, self.dest, self.ip_protocol or '-',
+ self.stream_number, self.protocol, self.opcode, self.desc,
+ ('«' + ' '.join(self.extra) + '»' if self.extra else '')))
+
+ def __repr__(self):
+ return "<Packet @%s>" % self
+
+ def copy(self):
+ return self.__class__(self.timestamp,
+ self.ip_protocol,
+ self.stream_number,
+ self.src,
+ self.dest,
+ self.protocol,
+ self.opcode,
+ self.desc,
+ self.extra)
+
+ def as_packet_type(self):
+ t = '%s:%s' % (self.protocol, self.opcode)
+ return t
+
+ def client_score(self):
+ """A positive number means we think it is a client; a negative number
+ means we think it is a server. Zero means no idea. range: -1 to 1.
+ """
+ key = (self.protocol, self.opcode)
+ if key in CLIENT_CLUES:
+ return CLIENT_CLUES[key]
+ if key in SERVER_CLUES:
+ return -SERVER_CLUES[key]
+ return 0.0
+
+ def play(self, conversation, context):
+ """Send the packet over the network, if required.
+
+ Some packets are ignored, i.e. for protocols not handled,
+ server response messages, or messages that are generated by the
+ protocol layer associated with other packets.
+ """
+ fn_name = 'packet_%s_%s' % (self.protocol, self.opcode)
+ try:
+ fn = getattr(traffic_packets, fn_name)
+
+ except AttributeError as e:
+ print("Conversation(%s) Missing handler %s" %
+ (conversation.conversation_id, fn_name),
+ file=sys.stderr)
+ return
+
+ # Don't display a message for kerberos packets, they're not directly
+ # generated they're used to indicate kerberos should be used
+ if self.protocol != "kerberos":
+ debug(2, "Conversation(%s) Calling handler %s" %
+ (conversation.conversation_id, fn_name))
+
+ start = time.time()
+ try:
+ if fn(self, conversation, context):
+ # Only collect timing data for functions that generate
+ # network traffic, or fail
+ end = time.time()
+ duration = end - start
+ print("%f\t%s\t%s\t%s\t%f\tTrue\t" %
+ (end, conversation.conversation_id, self.protocol,
+ self.opcode, duration))
+ except Exception as e:
+ end = time.time()
+ duration = end - start
+ print("%f\t%s\t%s\t%s\t%f\tFalse\t%s" %
+ (end, conversation.conversation_id, self.protocol,
+ self.opcode, duration, e))
+
+ def __cmp__(self, other):
+ return self.timestamp - other.timestamp
+
+ def is_really_a_packet(self, missing_packet_stats=None):
+ return is_a_real_packet(self.protocol, self.opcode)
+
+
+def is_a_real_packet(protocol, opcode):
+ """Is the packet one that can be ignored?
+
+ If so removing it will have no effect on the replay
+ """
+ if protocol in SKIPPED_PROTOCOLS:
+ # Ignore any packets for the protocols we're not interested in.
+ return False
+ if protocol == "ldap" and opcode == '':
+ # skip ldap continuation packets
+ return False
+
+ fn_name = 'packet_%s_%s' % (protocol, opcode)
+ fn = getattr(traffic_packets, fn_name, None)
+ if fn is None:
+ LOGGER.debug("missing packet %s" % fn_name, file=sys.stderr)
+ return False
+ if fn is traffic_packets.null_packet:
+ return False
+ return True
+
+
+def is_a_traffic_generating_packet(protocol, opcode):
+ """Return true if a packet generates traffic in its own right. Some of
+ these will generate traffic in certain contexts (e.g. ldap unbind
+ after a bind) but not if the conversation consists only of these packets.
+ """
+ if protocol == 'wait':
+ return False
+
+ if (protocol, opcode) in (
+ ('kerberos', ''),
+ ('ldap', '2'),
+ ('dcerpc', '15'),
+ ('dcerpc', '16')):
+ return False
+
+ return is_a_real_packet(protocol, opcode)
+
+
+class ReplayContext(object):
+ """State/Context for a conversation between an simulated client and a
+ server. Some of the context is shared amongst all conversations
+ and should be generated before the fork, while other context is
+ specific to a particular conversation and should be generated
+ *after* the fork, in generate_process_local_config().
+ """
+ def __init__(self,
+ server=None,
+ lp=None,
+ creds=None,
+ total_conversations=None,
+ badpassword_frequency=None,
+ prefer_kerberos=None,
+ tempdir=None,
+ statsdir=None,
+ ou=None,
+ base_dn=None,
+ domain=os.environ.get("DOMAIN"),
+ domain_sid=None,
+ instance_id=None):
+ self.server = server
+ self.netlogon_connection = None
+ self.creds = creds
+ self.lp = lp
+ if prefer_kerberos:
+ self.kerberos_state = MUST_USE_KERBEROS
+ else:
+ self.kerberos_state = DONT_USE_KERBEROS
+ self.ou = ou
+ self.base_dn = base_dn
+ self.domain = domain
+ self.statsdir = statsdir
+ self.global_tempdir = tempdir
+ self.domain_sid = domain_sid
+ self.realm = lp.get('realm')
+ self.instance_id = instance_id
+
+ # Bad password attempt controls
+ self.badpassword_frequency = badpassword_frequency
+ self.last_lsarpc_bad = False
+ self.last_lsarpc_named_bad = False
+ self.last_simple_bind_bad = False
+ self.last_bind_bad = False
+ self.last_srvsvc_bad = False
+ self.last_drsuapi_bad = False
+ self.last_netlogon_bad = False
+ self.last_samlogon_bad = False
+ self.total_conversations = total_conversations
+ self.generate_ldap_search_tables()
+
+ def generate_ldap_search_tables(self):
+ session = system_session()
+
+ db = SamDB(url="ldap://%s" % self.server,
+ session_info=session,
+ credentials=self.creds,
+ lp=self.lp)
+
+ res = db.search(db.domain_dn(),
+ scope=ldb.SCOPE_SUBTREE,
+ controls=["paged_results:1:1000"],
+ attrs=['dn'])
+
+ # find a list of dns for each pattern
+ # e.g. CN,CN,CN,DC,DC
+ dn_map = {}
+ attribute_clue_map = {
+ 'invocationId': []
+ }
+
+ for r in res:
+ dn = str(r.dn)
+ pattern = ','.join(x.lstrip()[:2] for x in dn.split(',')).upper()
+ dns = dn_map.setdefault(pattern, [])
+ dns.append(dn)
+ if dn.startswith('CN=NTDS Settings,'):
+ attribute_clue_map['invocationId'].append(dn)
+
+ # extend the map in case we are working with a different
+ # number of DC components.
+ # for k, v in self.dn_map.items():
+ # print >>sys.stderr, k, len(v)
+
+ for k in list(dn_map.keys()):
+ if k[-3:] != ',DC':
+ continue
+ p = k[:-3]
+ while p[-3:] == ',DC':
+ p = p[:-3]
+ for i in range(5):
+ p += ',DC'
+ if p != k and p in dn_map:
+ print('dn_map collision %s %s' % (k, p),
+ file=sys.stderr)
+ continue
+ dn_map[p] = dn_map[k]
+
+ self.dn_map = dn_map
+ self.attribute_clue_map = attribute_clue_map
+
+ # pre-populate DN-based search filters (it's simplest to generate them
+ # once, when the test starts). These are used by guess_search_filter()
+ # to avoid full-scans
+ self.search_filters = {}
+
+ # lookup all the GPO DNs
+ res = db.search(db.domain_dn(), scope=ldb.SCOPE_SUBTREE, attrs=['dn'],
+ expression='(objectclass=groupPolicyContainer)')
+ gpos_by_dn = "".join("(distinguishedName={0})".format(msg['dn']) for msg in res)
+
+ # a search for the 'gPCFileSysPath' attribute is probably a GPO search
+ # (as per the MS-GPOL spec) which searches for GPOs by DN
+ self.search_filters['gPCFileSysPath'] = "(|{0})".format(gpos_by_dn)
+
+ # likewise, a search for gpLink is probably the Domain SOM search part
+ # of the MS-GPOL, in which case it's looking up a few OUs by DN
+ ou_str = ""
+ for ou in ["Domain Controllers,", "traffic_replay,", ""]:
+ ou_str += "(distinguishedName={0}{1})".format(ou, db.domain_dn())
+ self.search_filters['gpLink'] = "(|{0})".format(ou_str)
+
+ # The CEP Web Service can query the AD DC to get pKICertificateTemplate
+ # objects (as per MS-WCCE)
+ self.search_filters['pKIExtendedKeyUsage'] = \
+ '(objectCategory=pKICertificateTemplate)'
+
+ # assume that anything querying the usnChanged is some kind of
+ # synchronization tool, e.g. AD Change Detection Connector
+ res = db.search('', scope=ldb.SCOPE_BASE, attrs=['highestCommittedUSN'])
+ self.search_filters['usnChanged'] = \
+ '(usnChanged>={0})'.format(res[0]['highestCommittedUSN'])
+
+ # The traffic_learner script doesn't preserve the LDAP search filter, and
+ # having no filter can result in a full DB scan. This is costly for a large
+ # DB, and not necessarily representative of real world traffic. As there
+ # several standard LDAP queries that get used by AD tools, we can apply
+ # some logic and guess what the search filter might have been originally.
+ def guess_search_filter(self, attrs, dn_sig, dn):
+
+ # there are some standard spec-based searches that query fairly unique
+ # attributes. Check if the search is likely one of these
+ for key in self.search_filters.keys():
+ if key in attrs:
+ return self.search_filters[key]
+
+ # if it's the top-level domain, assume we're looking up a single user,
+ # e.g. like powershell Get-ADUser or a similar tool
+ if dn_sig == 'DC,DC':
+ random_user_id = random.random() % self.total_conversations
+ account_name = user_name(self.instance_id, random_user_id)
+ return '(&(sAMAccountName=%s)(objectClass=user))' % account_name
+
+ # otherwise just return everything in the sub-tree
+ return '(objectClass=*)'
+
+ def generate_process_local_config(self, account, conversation):
+ self.ldap_connections = []
+ self.dcerpc_connections = []
+ self.lsarpc_connections = []
+ self.lsarpc_connections_named = []
+ self.drsuapi_connections = []
+ self.srvsvc_connections = []
+ self.samr_contexts = []
+ self.netbios_name = account.netbios_name
+ self.machinepass = account.machinepass
+ self.username = account.username
+ self.userpass = account.userpass
+
+ self.tempdir = mk_masked_dir(self.global_tempdir,
+ 'conversation-%d' %
+ conversation.conversation_id)
+
+ self.lp.set("private dir", self.tempdir)
+ self.lp.set("lock dir", self.tempdir)
+ self.lp.set("state directory", self.tempdir)
+ self.lp.set("tls verify peer", "no_check")
+
+ self.remoteAddress = "/root/ncalrpc_as_system"
+ self.samlogon_dn = ("cn=%s,%s" %
+ (self.netbios_name, self.ou))
+ self.user_dn = ("cn=%s,%s" %
+ (self.username, self.ou))
+
+ self.generate_machine_creds()
+ self.generate_user_creds()
+
+ def with_random_bad_credentials(self, f, good, bad, failed_last_time):
+ """Execute the supplied logon function, randomly choosing the
+ bad credentials.
+
+ Based on the frequency in badpassword_frequency randomly perform the
+ function with the supplied bad credentials.
+ If run with bad credentials, the function is re-run with the good
+ credentials.
+ failed_last_time is used to prevent consecutive bad credential
+ attempts. So the over all bad credential frequency will be lower
+ than that requested, but not significantly.
+ """
+ if not failed_last_time:
+ if (self.badpassword_frequency and
+ random.random() < self.badpassword_frequency):
+ try:
+ f(bad)
+ except Exception:
+ # Ignore any exceptions as the operation may fail
+ # as it's being performed with bad credentials
+ pass
+ failed_last_time = True
+ else:
+ failed_last_time = False
+
+ result = f(good)
+ return (result, failed_last_time)
+
+ def generate_user_creds(self):
+ """Generate the conversation specific user Credentials.
+
+ Each Conversation has an associated user account used to simulate
+ any non Administrative user traffic.
+
+ Generates user credentials with good and bad passwords and ldap
+ simple bind credentials with good and bad passwords.
+ """
+ self.user_creds = Credentials()
+ self.user_creds.guess(self.lp)
+ self.user_creds.set_workstation(self.netbios_name)
+ self.user_creds.set_password(self.userpass)
+ self.user_creds.set_username(self.username)
+ self.user_creds.set_domain(self.domain)
+ self.user_creds.set_kerberos_state(self.kerberos_state)
+
+ self.user_creds_bad = Credentials()
+ self.user_creds_bad.guess(self.lp)
+ self.user_creds_bad.set_workstation(self.netbios_name)
+ self.user_creds_bad.set_password(self.userpass[:-4])
+ self.user_creds_bad.set_username(self.username)
+ self.user_creds_bad.set_kerberos_state(self.kerberos_state)
+
+ # Credentials for ldap simple bind.
+ self.simple_bind_creds = Credentials()
+ self.simple_bind_creds.guess(self.lp)
+ self.simple_bind_creds.set_workstation(self.netbios_name)
+ self.simple_bind_creds.set_password(self.userpass)
+ self.simple_bind_creds.set_username(self.username)
+ self.simple_bind_creds.set_gensec_features(
+ self.simple_bind_creds.get_gensec_features() | gensec.FEATURE_SEAL)
+ self.simple_bind_creds.set_kerberos_state(self.kerberos_state)
+ self.simple_bind_creds.set_bind_dn(self.user_dn)
+
+ self.simple_bind_creds_bad = Credentials()
+ self.simple_bind_creds_bad.guess(self.lp)
+ self.simple_bind_creds_bad.set_workstation(self.netbios_name)
+ self.simple_bind_creds_bad.set_password(self.userpass[:-4])
+ self.simple_bind_creds_bad.set_username(self.username)
+ self.simple_bind_creds_bad.set_gensec_features(
+ self.simple_bind_creds_bad.get_gensec_features() |
+ gensec.FEATURE_SEAL)
+ self.simple_bind_creds_bad.set_kerberos_state(self.kerberos_state)
+ self.simple_bind_creds_bad.set_bind_dn(self.user_dn)
+
+ def generate_machine_creds(self):
+ """Generate the conversation specific machine Credentials.
+
+ Each Conversation has an associated machine account.
+
+ Generates machine credentials with good and bad passwords.
+ """
+
+ self.machine_creds = Credentials()
+ self.machine_creds.guess(self.lp)
+ self.machine_creds.set_workstation(self.netbios_name)
+ self.machine_creds.set_secure_channel_type(SEC_CHAN_BDC)
+ self.machine_creds.set_password(self.machinepass)
+ self.machine_creds.set_username(self.netbios_name + "$")
+ self.machine_creds.set_domain(self.domain)
+ self.machine_creds.set_kerberos_state(self.kerberos_state)
+
+ self.machine_creds_bad = Credentials()
+ self.machine_creds_bad.guess(self.lp)
+ self.machine_creds_bad.set_workstation(self.netbios_name)
+ self.machine_creds_bad.set_secure_channel_type(SEC_CHAN_BDC)
+ self.machine_creds_bad.set_password(self.machinepass[:-4])
+ self.machine_creds_bad.set_username(self.netbios_name + "$")
+ self.machine_creds_bad.set_kerberos_state(self.kerberos_state)
+
+ def get_matching_dn(self, pattern, attributes=None):
+ # If the pattern is an empty string, we assume ROOTDSE,
+ # Otherwise we try adding or removing DC suffixes, then
+ # shorter leading patterns until we hit one.
+ # e.g if there is no CN,CN,CN,CN,DC,DC
+ # we first try CN,CN,CN,CN,DC
+ # and CN,CN,CN,CN,DC,DC,DC
+ # then change to CN,CN,CN,DC,DC
+ # and as last resort we use the base_dn
+ attr_clue = self.attribute_clue_map.get(attributes)
+ if attr_clue:
+ return random.choice(attr_clue)
+
+ pattern = pattern.upper()
+ while pattern:
+ if pattern in self.dn_map:
+ return random.choice(self.dn_map[pattern])
+ # chop one off the front and try it all again.
+ pattern = pattern[3:]
+
+ return self.base_dn
+
+ def get_dcerpc_connection(self, new=False):
+ guid = '12345678-1234-abcd-ef00-01234567cffb' # RPC_NETLOGON UUID
+ if self.dcerpc_connections and not new:
+ return self.dcerpc_connections[-1]
+ c = ClientConnection("ncacn_ip_tcp:%s" % self.server,
+ (guid, 1), self.lp)
+ self.dcerpc_connections.append(c)
+ return c
+
+ def get_srvsvc_connection(self, new=False):
+ if self.srvsvc_connections and not new:
+ return self.srvsvc_connections[-1]
+
+ def connect(creds):
+ return srvsvc.srvsvc("ncacn_np:%s" % (self.server),
+ self.lp,
+ creds)
+
+ (c, self.last_srvsvc_bad) = \
+ self.with_random_bad_credentials(connect,
+ self.user_creds,
+ self.user_creds_bad,
+ self.last_srvsvc_bad)
+
+ self.srvsvc_connections.append(c)
+ return c
+
+ def get_lsarpc_connection(self, new=False):
+ if self.lsarpc_connections and not new:
+ return self.lsarpc_connections[-1]
+
+ def connect(creds):
+ binding_options = 'schannel,seal,sign'
+ return lsa.lsarpc("ncacn_ip_tcp:%s[%s]" %
+ (self.server, binding_options),
+ self.lp,
+ creds)
+
+ (c, self.last_lsarpc_bad) = \
+ self.with_random_bad_credentials(connect,
+ self.machine_creds,
+ self.machine_creds_bad,
+ self.last_lsarpc_bad)
+
+ self.lsarpc_connections.append(c)
+ return c
+
+ def get_lsarpc_named_pipe_connection(self, new=False):
+ if self.lsarpc_connections_named and not new:
+ return self.lsarpc_connections_named[-1]
+
+ def connect(creds):
+ return lsa.lsarpc("ncacn_np:%s" % (self.server),
+ self.lp,
+ creds)
+
+ (c, self.last_lsarpc_named_bad) = \
+ self.with_random_bad_credentials(connect,
+ self.machine_creds,
+ self.machine_creds_bad,
+ self.last_lsarpc_named_bad)
+
+ self.lsarpc_connections_named.append(c)
+ return c
+
+ def get_drsuapi_connection_pair(self, new=False, unbind=False):
+ """get a (drs, drs_handle) tuple"""
+ if self.drsuapi_connections and not new:
+ c = self.drsuapi_connections[-1]
+ return c
+
+ def connect(creds):
+ binding_options = 'seal'
+ binding_string = "ncacn_ip_tcp:%s[%s]" %\
+ (self.server, binding_options)
+ return drsuapi.drsuapi(binding_string, self.lp, creds)
+
+ (drs, self.last_drsuapi_bad) = \
+ self.with_random_bad_credentials(connect,
+ self.user_creds,
+ self.user_creds_bad,
+ self.last_drsuapi_bad)
+
+ (drs_handle, supported_extensions) = drs_DsBind(drs)
+ c = (drs, drs_handle)
+ self.drsuapi_connections.append(c)
+ return c
+
+ def get_ldap_connection(self, new=False, simple=False):
+ if self.ldap_connections and not new:
+ return self.ldap_connections[-1]
+
+ def simple_bind(creds):
+ """
+ To run simple bind against Windows, we need to run
+ following commands in PowerShell:
+
+ Install-windowsfeature ADCS-Cert-Authority
+ Install-AdcsCertificationAuthority -CAType EnterpriseRootCA
+ Restart-Computer
+
+ """
+ return SamDB('ldaps://%s' % self.server,
+ credentials=creds,
+ lp=self.lp)
+
+ def sasl_bind(creds):
+ return SamDB('ldap://%s' % self.server,
+ credentials=creds,
+ lp=self.lp)
+ if simple:
+ (samdb, self.last_simple_bind_bad) = \
+ self.with_random_bad_credentials(simple_bind,
+ self.simple_bind_creds,
+ self.simple_bind_creds_bad,
+ self.last_simple_bind_bad)
+ else:
+ (samdb, self.last_bind_bad) = \
+ self.with_random_bad_credentials(sasl_bind,
+ self.user_creds,
+ self.user_creds_bad,
+ self.last_bind_bad)
+
+ self.ldap_connections.append(samdb)
+ return samdb
+
+ def get_samr_context(self, new=False):
+ if not self.samr_contexts or new:
+ self.samr_contexts.append(
+ SamrContext(self.server, lp=self.lp, creds=self.creds))
+ return self.samr_contexts[-1]
+
+ def get_netlogon_connection(self):
+
+ if self.netlogon_connection:
+ return self.netlogon_connection
+
+ def connect(creds):
+ return netlogon.netlogon("ncacn_ip_tcp:%s[schannel,seal]" %
+ (self.server),
+ self.lp,
+ creds)
+ (c, self.last_netlogon_bad) = \
+ self.with_random_bad_credentials(connect,
+ self.machine_creds,
+ self.machine_creds_bad,
+ self.last_netlogon_bad)
+ self.netlogon_connection = c
+ return c
+
+ def guess_a_dns_lookup(self):
+ return (self.realm, 'A')
+
+ def get_authenticator(self):
+ auth = self.machine_creds.new_client_authenticator()
+ current = netr_Authenticator()
+ current.cred.data = [x if isinstance(x, int) else ord(x)
+ for x in auth["credential"]]
+ current.timestamp = auth["timestamp"]
+
+ subsequent = netr_Authenticator()
+ return (current, subsequent)
+
+ def write_stats(self, filename, **kwargs):
+ """Write arbitrary key/value pairs to a file in our stats directory in
+ order for them to be picked up later by another process working out
+ statistics."""
+ filename = os.path.join(self.statsdir, filename)
+ f = open(filename, 'w')
+ for k, v in kwargs.items():
+ print("%s: %s" % (k, v), file=f)
+ f.close()
+
+
+class SamrContext(object):
+ """State/Context associated with a samr connection.
+ """
+ def __init__(self, server, lp=None, creds=None):
+ self.connection = None
+ self.handle = None
+ self.domain_handle = None
+ self.domain_sid = None
+ self.group_handle = None
+ self.user_handle = None
+ self.rids = None
+ self.server = server
+ self.lp = lp
+ self.creds = creds
+
+ def get_connection(self):
+ if not self.connection:
+ self.connection = samr.samr(
+ "ncacn_ip_tcp:%s[seal]" % (self.server),
+ lp_ctx=self.lp,
+ credentials=self.creds)
+
+ return self.connection
+
+ def get_handle(self):
+ if not self.handle:
+ c = self.get_connection()
+ self.handle = c.Connect2(None, security.SEC_FLAG_MAXIMUM_ALLOWED)
+ return self.handle
+
+
+class Conversation(object):
+ """Details of a converation between a simulated client and a server."""
+ def __init__(self, start_time=None, endpoints=None, seq=(),
+ conversation_id=None):
+ self.start_time = start_time
+ self.endpoints = endpoints
+ self.packets = []
+ self.msg = random_colour_print(endpoints)
+ self.client_balance = 0.0
+ self.conversation_id = conversation_id
+ for p in seq:
+ self.add_short_packet(*p)
+
+ def __cmp__(self, other):
+ if self.start_time is None:
+ if other.start_time is None:
+ return 0
+ return -1
+ if other.start_time is None:
+ return 1
+ return self.start_time - other.start_time
+
+ def add_packet(self, packet):
+ """Add a packet object to this conversation, making a local copy with
+ a conversation-relative timestamp."""
+ p = packet.copy()
+
+ if self.start_time is None:
+ self.start_time = p.timestamp
+
+ if self.endpoints is None:
+ self.endpoints = p.endpoints
+
+ if p.endpoints != self.endpoints:
+ raise FakePacketError("Conversation endpoints %s don't match"
+ "packet endpoints %s" %
+ (self.endpoints, p.endpoints))
+
+ p.timestamp -= self.start_time
+
+ if p.src == p.endpoints[0]:
+ self.client_balance -= p.client_score()
+ else:
+ self.client_balance += p.client_score()
+
+ if p.is_really_a_packet():
+ self.packets.append(p)
+
+ def add_short_packet(self, timestamp, protocol, opcode, extra,
+ client=True, skip_unused_packets=True):
+ """Create a packet from a timestamp, and 'protocol:opcode' pair, and a
+ (possibly empty) list of extra data. If client is True, assume
+ this packet is from the client to the server.
+ """
+ if skip_unused_packets and not is_a_real_packet(protocol, opcode):
+ return
+
+ src, dest = self.guess_client_server()
+ if not client:
+ src, dest = dest, src
+ key = (protocol, opcode)
+ desc = OP_DESCRIPTIONS.get(key, '')
+ ip_protocol = IP_PROTOCOLS.get(protocol, '06')
+ packet = Packet(timestamp - self.start_time, ip_protocol,
+ '', src, dest,
+ protocol, opcode, desc, extra)
+ # XXX we're assuming the timestamp is already adjusted for
+ # this conversation?
+ # XXX should we adjust client balance for guessed packets?
+ if packet.src == packet.endpoints[0]:
+ self.client_balance -= packet.client_score()
+ else:
+ self.client_balance += packet.client_score()
+ if packet.is_really_a_packet():
+ self.packets.append(packet)
+
+ def __str__(self):
+ return ("<Conversation %s %s starting %.3f %d packets>" %
+ (self.conversation_id, self.endpoints, self.start_time,
+ len(self.packets)))
+
+ __repr__ = __str__
+
+ def __iter__(self):
+ return iter(self.packets)
+
+ def __len__(self):
+ return len(self.packets)
+
+ def get_duration(self):
+ if len(self.packets) < 2:
+ return 0
+ return self.packets[-1].timestamp - self.packets[0].timestamp
+
+ def replay_as_summary_lines(self):
+ return [p.as_summary(self.start_time) for p in self.packets]
+
+ def replay_with_delay(self, start, context=None, account=None):
+ """Replay the conversation at the right time.
+ (We're already in a fork)."""
+ # first we sleep until the first packet
+ t = self.start_time
+ now = time.time() - start
+ gap = t - now
+ sleep_time = gap - SLEEP_OVERHEAD
+ if sleep_time > 0:
+ time.sleep(sleep_time)
+
+ miss = (time.time() - start) - t
+ self.msg("starting %s [miss %.3f]" % (self, miss))
+
+ max_gap = 0.0
+ max_sleep_miss = 0.0
+ # packet times are relative to conversation start
+ p_start = time.time()
+ for p in self.packets:
+ now = time.time() - p_start
+ gap = now - p.timestamp
+ if gap > max_gap:
+ max_gap = gap
+ if gap < 0:
+ sleep_time = -gap - SLEEP_OVERHEAD
+ if sleep_time > 0:
+ time.sleep(sleep_time)
+ t = time.time() - p_start
+ if t - p.timestamp > max_sleep_miss:
+ max_sleep_miss = t - p.timestamp
+
+ p.play(self, context)
+
+ return max_gap, miss, max_sleep_miss
+
+ def guess_client_server(self, server_clue=None):
+ """Have a go at deciding who is the server and who is the client.
+ returns (client, server)
+ """
+ a, b = self.endpoints
+
+ if self.client_balance < 0:
+ return (a, b)
+
+ # in the absence of a clue, we will fall through to assuming
+ # the lowest number is the server (which is usually true).
+
+ if self.client_balance == 0 and server_clue == b:
+ return (a, b)
+
+ return (b, a)
+
+ def forget_packets_outside_window(self, s, e):
+ """Prune any packets outside the time window we're interested in
+
+ :param s: start of the window
+ :param e: end of the window
+ """
+ self.packets = [p for p in self.packets if s <= p.timestamp <= e]
+ self.start_time = self.packets[0].timestamp if self.packets else None
+
+ def renormalise_times(self, start_time):
+ """Adjust the packet start times relative to the new start time."""
+ for p in self.packets:
+ p.timestamp -= start_time
+
+ if self.start_time is not None:
+ self.start_time -= start_time
+
+
+class DnsHammer(Conversation):
+ """A lightweight conversation that generates a lot of dns:0 packets on
+ the fly"""
+
+ def __init__(self, dns_rate, duration, query_file=None):
+ n = int(dns_rate * duration)
+ self.times = [random.uniform(0, duration) for i in range(n)]
+ self.times.sort()
+ self.rate = dns_rate
+ self.duration = duration
+ self.start_time = 0
+ self.query_choices = self._get_query_choices(query_file=query_file)
+
+ def __str__(self):
+ return ("<DnsHammer %d packets over %.1fs (rate %.2f)>" %
+ (len(self.times), self.duration, self.rate))
+
+ def _get_query_choices(self, query_file=None):
+ """
+ Read dns query choices from a file, or return default
+
+ rname may contain format string like `{realm}`
+ realm can be fetched from context.realm
+ """
+
+ if query_file:
+ with open(query_file, 'r') as f:
+ text = f.read()
+ choices = []
+ for line in text.splitlines():
+ line = line.strip()
+ if line and not line.startswith('#'):
+ args = line.split(',')
+ assert len(args) == 4
+ choices.append(args)
+ return choices
+ else:
+ return [
+ (0, '{realm}', 'A', 'yes'),
+ (1, '{realm}', 'NS', 'yes'),
+ (2, '*.{realm}', 'A', 'no'),
+ (3, '*.{realm}', 'NS', 'no'),
+ (10, '_msdcs.{realm}', 'A', 'yes'),
+ (11, '_msdcs.{realm}', 'NS', 'yes'),
+ (20, 'nx.realm.com', 'A', 'no'),
+ (21, 'nx.realm.com', 'NS', 'no'),
+ (22, '*.nx.realm.com', 'A', 'no'),
+ (23, '*.nx.realm.com', 'NS', 'no'),
+ ]
+
+ def replay(self, context=None):
+ assert context
+ assert context.realm
+ start = time.time()
+ for t in self.times:
+ now = time.time() - start
+ gap = t - now
+ sleep_time = gap - SLEEP_OVERHEAD
+ if sleep_time > 0:
+ time.sleep(sleep_time)
+
+ opcode, rname, rtype, exist = random.choice(self.query_choices)
+ rname = rname.format(realm=context.realm)
+ success = True
+ packet_start = time.time()
+ try:
+ answers = dns_query(rname, rtype)
+ if exist == 'yes' and not len(answers):
+ # expect answers but didn't get, fail
+ success = False
+ except Exception:
+ success = False
+ finally:
+ end = time.time()
+ duration = end - packet_start
+ print("%f\tDNS\tdns\t%s\t%f\t%s\t" % (end, opcode, duration, success))
+
+
+def ingest_summaries(files, dns_mode='count'):
+ """Load a summary traffic summary file and generated Converations from it.
+ """
+
+ dns_counts = defaultdict(int)
+ packets = []
+ for f in files:
+ if isinstance(f, str):
+ f = open(f)
+ print("Ingesting %s" % (f.name,), file=sys.stderr)
+ for line in f:
+ p = Packet.from_line(line)
+ if p.protocol == 'dns' and dns_mode != 'include':
+ dns_counts[p.opcode] += 1
+ else:
+ packets.append(p)
+
+ f.close()
+
+ if not packets:
+ return [], 0
+
+ start_time = min(p.timestamp for p in packets)
+ last_packet = max(p.timestamp for p in packets)
+
+ print("gathering packets into conversations", file=sys.stderr)
+ conversations = OrderedDict()
+ for i, p in enumerate(packets):
+ p.timestamp -= start_time
+ c = conversations.get(p.endpoints)
+ if c is None:
+ c = Conversation(conversation_id=(i + 2))
+ conversations[p.endpoints] = c
+ c.add_packet(p)
+
+ # We only care about conversations with actual traffic, so we
+ # filter out conversations with nothing to say. We do that here,
+ # rather than earlier, because those empty packets contain useful
+ # hints as to which end of the conversation was the client.
+ conversation_list = []
+ for c in conversations.values():
+ if len(c) != 0:
+ conversation_list.append(c)
+
+ # This is obviously not correct, as many conversations will appear
+ # to start roughly simultaneously at the beginning of the snapshot.
+ # To which we say: oh well, so be it.
+ duration = float(last_packet - start_time)
+ mean_interval = len(conversations) / duration
+
+ return conversation_list, mean_interval, duration, dns_counts
+
+
+def guess_server_address(conversations):
+ # we guess the most common address.
+ addresses = Counter()
+ for c in conversations:
+ addresses.update(c.endpoints)
+ if addresses:
+ return addresses.most_common(1)[0]
+
+
+def stringify_keys(x):
+ y = {}
+ for k, v in x.items():
+ k2 = '\t'.join(k)
+ y[k2] = v
+ return y
+
+
+def unstringify_keys(x):
+ y = {}
+ for k, v in x.items():
+ t = tuple(str(k).split('\t'))
+ y[t] = v
+ return y
+
+
+class TrafficModel(object):
+ def __init__(self, n=3):
+ self.ngrams = {}
+ self.query_details = {}
+ self.n = n
+ self.dns_opcounts = defaultdict(int)
+ self.cumulative_duration = 0.0
+ self.packet_rate = [0, 1]
+
+ def learn(self, conversations, dns_opcounts=None):
+ if dns_opcounts is None:
+ dns_opcounts = {}
+ prev = 0.0
+ cum_duration = 0.0
+ key = (NON_PACKET,) * (self.n - 1)
+
+ server = guess_server_address(conversations)
+
+ for k, v in dns_opcounts.items():
+ self.dns_opcounts[k] += v
+
+ if len(conversations) > 1:
+ first = conversations[0].start_time
+ total = 0
+ last = first + 0.1
+ for c in conversations:
+ total += len(c)
+ last = max(last, c.packets[-1].timestamp)
+
+ self.packet_rate[0] = total
+ self.packet_rate[1] = last - first
+
+ for c in conversations:
+ client, server = c.guess_client_server(server)
+ cum_duration += c.get_duration()
+ key = (NON_PACKET,) * (self.n - 1)
+ for p in c:
+ if p.src != client:
+ continue
+
+ elapsed = p.timestamp - prev
+ prev = p.timestamp
+ if elapsed > WAIT_THRESHOLD:
+ # add the wait as an extra state
+ wait = 'wait:%d' % (math.log(max(1.0,
+ elapsed * WAIT_SCALE)))
+ self.ngrams.setdefault(key, []).append(wait)
+ key = key[1:] + (wait,)
+
+ short_p = p.as_packet_type()
+ self.query_details.setdefault(short_p,
+ []).append(tuple(p.extra))
+ self.ngrams.setdefault(key, []).append(short_p)
+ key = key[1:] + (short_p,)
+
+ self.cumulative_duration += cum_duration
+ # add in the end
+ self.ngrams.setdefault(key, []).append(NON_PACKET)
+
+ def save(self, f):
+ ngrams = {}
+ for k, v in self.ngrams.items():
+ k = '\t'.join(k)
+ ngrams[k] = dict(Counter(v))
+
+ query_details = {}
+ for k, v in self.query_details.items():
+ query_details[k] = dict(Counter('\t'.join(x) if x else '-'
+ for x in v))
+
+ d = {
+ 'ngrams': ngrams,
+ 'query_details': query_details,
+ 'cumulative_duration': self.cumulative_duration,
+ 'packet_rate': self.packet_rate,
+ 'version': CURRENT_MODEL_VERSION
+ }
+ d['dns'] = self.dns_opcounts
+
+ if isinstance(f, str):
+ f = open(f, 'w')
+
+ json.dump(d, f, indent=2)
+
+ def load(self, f):
+ if isinstance(f, str):
+ f = open(f)
+
+ d = json.load(f)
+
+ try:
+ version = d["version"]
+ if version < REQUIRED_MODEL_VERSION:
+ raise ValueError("the model file is version %d; "
+ "version %d is required" %
+ (version, REQUIRED_MODEL_VERSION))
+ except KeyError:
+ raise ValueError("the model file lacks a version number; "
+ "version %d is required" %
+ (REQUIRED_MODEL_VERSION))
+
+ for k, v in d['ngrams'].items():
+ k = tuple(str(k).split('\t'))
+ values = self.ngrams.setdefault(k, [])
+ for p, count in v.items():
+ values.extend([str(p)] * count)
+ values.sort()
+
+ for k, v in d['query_details'].items():
+ values = self.query_details.setdefault(str(k), [])
+ for p, count in v.items():
+ if p == '-':
+ values.extend([()] * count)
+ else:
+ values.extend([tuple(str(p).split('\t'))] * count)
+ values.sort()
+
+ if 'dns' in d:
+ for k, v in d['dns'].items():
+ self.dns_opcounts[k] += v
+
+ self.cumulative_duration = d['cumulative_duration']
+ self.packet_rate = d['packet_rate']
+
+ def construct_conversation_sequence(self, timestamp=0.0,
+ hard_stop=None,
+ replay_speed=1,
+ ignore_before=0,
+ persistence=0):
+ """Construct an individual conversation packet sequence from the
+ model.
+ """
+ c = []
+ key = (NON_PACKET,) * (self.n - 1)
+ if ignore_before is None:
+ ignore_before = timestamp - 1
+
+ while True:
+ p = random.choice(self.ngrams.get(key, (NON_PACKET,)))
+ if p == NON_PACKET:
+ if timestamp < ignore_before:
+ break
+ if random.random() > persistence:
+ print("ending after %s (persistence %.1f)" % (key, persistence),
+ file=sys.stderr)
+ break
+
+ p = 'wait:%d' % random.randrange(5, 12)
+ print("trying %s instead of end" % p, file=sys.stderr)
+
+ if p in self.query_details:
+ extra = random.choice(self.query_details[p])
+ else:
+ extra = []
+
+ protocol, opcode = p.split(':', 1)
+ if protocol == 'wait':
+ log_wait_time = int(opcode) + random.random()
+ wait = math.exp(log_wait_time) / (WAIT_SCALE * replay_speed)
+ timestamp += wait
+ else:
+ log_wait = random.uniform(*NO_WAIT_LOG_TIME_RANGE)
+ wait = math.exp(log_wait) / replay_speed
+ timestamp += wait
+ if hard_stop is not None and timestamp > hard_stop:
+ break
+ if timestamp >= ignore_before:
+ c.append((timestamp, protocol, opcode, extra))
+
+ key = key[1:] + (p,)
+ if key[-2][:5] == 'wait:' and key[-1][:5] == 'wait:':
+ # two waits in a row can only be caused by "persistence"
+ # tricks, and will not result in any packets being found.
+ # Instead we pretend this is a fresh start.
+ key = (NON_PACKET,) * (self.n - 1)
+
+ return c
+
+ def scale_to_packet_rate(self, scale):
+ rate_n, rate_t = self.packet_rate
+ return scale * rate_n / rate_t
+
+ def packet_rate_to_scale(self, pps):
+ rate_n, rate_t = self.packet_rate
+ return pps * rate_t / rate_n
+
+ def generate_conversation_sequences(self, packet_rate, duration, replay_speed=1,
+ persistence=0):
+ """Generate a list of conversation descriptions from the model."""
+
+ # We run the simulation for ten times as long as our desired
+ # duration, and take the section at the end.
+ lead_in = 9 * duration
+ target_packets = int(packet_rate * duration)
+ conversations = []
+ n_packets = 0
+
+ while n_packets < target_packets:
+ start = random.uniform(-lead_in, duration)
+ c = self.construct_conversation_sequence(start,
+ hard_stop=duration,
+ replay_speed=replay_speed,
+ ignore_before=0,
+ persistence=persistence)
+ # will these "packets" generate actual traffic?
+ # some (e.g. ldap unbind) will not generate anything
+ # if the previous packets are not there, and if the
+ # conversation only has those it wastes a process doing nothing.
+ for timestamp, protocol, opcode, extra in c:
+ if is_a_traffic_generating_packet(protocol, opcode):
+ break
+ else:
+ continue
+
+ conversations.append(c)
+ n_packets += len(c)
+
+ scale = self.packet_rate_to_scale(packet_rate)
+ print(("we have %d packets (target %d) in %d conversations at %.1f/s "
+ "(scale %f)" % (n_packets, target_packets, len(conversations),
+ packet_rate, scale)),
+ file=sys.stderr)
+ conversations.sort() # sorts by first element == start time
+ return conversations
+
+
+def seq_to_conversations(seq, server=1, client=2):
+ conversations = []
+ for s in seq:
+ if s:
+ c = Conversation(s[0][0], (server, client), s)
+ client += 1
+ conversations.append(c)
+ return conversations
+
+
+IP_PROTOCOLS = {
+ 'dns': '11',
+ 'rpc_netlogon': '06',
+ 'kerberos': '06', # ratio 16248:258
+ 'smb': '06',
+ 'smb2': '06',
+ 'ldap': '06',
+ 'cldap': '11',
+ 'lsarpc': '06',
+ 'samr': '06',
+ 'dcerpc': '06',
+ 'epm': '06',
+ 'drsuapi': '06',
+ 'browser': '11',
+ 'smb_netlogon': '11',
+ 'srvsvc': '06',
+ 'nbns': '11',
+}
+
+OP_DESCRIPTIONS = {
+ ('browser', '0x01'): 'Host Announcement (0x01)',
+ ('browser', '0x02'): 'Request Announcement (0x02)',
+ ('browser', '0x08'): 'Browser Election Request (0x08)',
+ ('browser', '0x09'): 'Get Backup List Request (0x09)',
+ ('browser', '0x0c'): 'Domain/Workgroup Announcement (0x0c)',
+ ('browser', '0x0f'): 'Local Master Announcement (0x0f)',
+ ('cldap', '3'): 'searchRequest',
+ ('cldap', '5'): 'searchResDone',
+ ('dcerpc', '0'): 'Request',
+ ('dcerpc', '11'): 'Bind',
+ ('dcerpc', '12'): 'Bind_ack',
+ ('dcerpc', '13'): 'Bind_nak',
+ ('dcerpc', '14'): 'Alter_context',
+ ('dcerpc', '15'): 'Alter_context_resp',
+ ('dcerpc', '16'): 'AUTH3',
+ ('dcerpc', '2'): 'Response',
+ ('dns', '0'): 'query',
+ ('dns', '1'): 'response',
+ ('drsuapi', '0'): 'DsBind',
+ ('drsuapi', '12'): 'DsCrackNames',
+ ('drsuapi', '13'): 'DsWriteAccountSpn',
+ ('drsuapi', '1'): 'DsUnbind',
+ ('drsuapi', '2'): 'DsReplicaSync',
+ ('drsuapi', '3'): 'DsGetNCChanges',
+ ('drsuapi', '4'): 'DsReplicaUpdateRefs',
+ ('epm', '3'): 'Map',
+ ('kerberos', ''): '',
+ ('ldap', '0'): 'bindRequest',
+ ('ldap', '1'): 'bindResponse',
+ ('ldap', '2'): 'unbindRequest',
+ ('ldap', '3'): 'searchRequest',
+ ('ldap', '4'): 'searchResEntry',
+ ('ldap', '5'): 'searchResDone',
+ ('ldap', ''): '*** Unknown ***',
+ ('lsarpc', '14'): 'lsa_LookupNames',
+ ('lsarpc', '15'): 'lsa_LookupSids',
+ ('lsarpc', '39'): 'lsa_QueryTrustedDomainInfoBySid',
+ ('lsarpc', '40'): 'lsa_SetTrustedDomainInfo',
+ ('lsarpc', '6'): 'lsa_OpenPolicy',
+ ('lsarpc', '76'): 'lsa_LookupSids3',
+ ('lsarpc', '77'): 'lsa_LookupNames4',
+ ('nbns', '0'): 'query',
+ ('nbns', '1'): 'response',
+ ('rpc_netlogon', '21'): 'NetrLogonDummyRoutine1',
+ ('rpc_netlogon', '26'): 'NetrServerAuthenticate3',
+ ('rpc_netlogon', '29'): 'NetrLogonGetDomainInfo',
+ ('rpc_netlogon', '30'): 'NetrServerPasswordSet2',
+ ('rpc_netlogon', '39'): 'NetrLogonSamLogonEx',
+ ('rpc_netlogon', '40'): 'DsrEnumerateDomainTrusts',
+ ('rpc_netlogon', '45'): 'NetrLogonSamLogonWithFlags',
+ ('rpc_netlogon', '4'): 'NetrServerReqChallenge',
+ ('samr', '0',): 'Connect',
+ ('samr', '16'): 'GetAliasMembership',
+ ('samr', '17'): 'LookupNames',
+ ('samr', '18'): 'LookupRids',
+ ('samr', '19'): 'OpenGroup',
+ ('samr', '1'): 'Close',
+ ('samr', '25'): 'QueryGroupMember',
+ ('samr', '34'): 'OpenUser',
+ ('samr', '36'): 'QueryUserInfo',
+ ('samr', '39'): 'GetGroupsForUser',
+ ('samr', '3'): 'QuerySecurity',
+ ('samr', '5'): 'LookupDomain',
+ ('samr', '64'): 'Connect5',
+ ('samr', '6'): 'EnumDomains',
+ ('samr', '7'): 'OpenDomain',
+ ('samr', '8'): 'QueryDomainInfo',
+ ('smb', '0x04'): 'Close (0x04)',
+ ('smb', '0x24'): 'Locking AndX (0x24)',
+ ('smb', '0x2e'): 'Read AndX (0x2e)',
+ ('smb', '0x32'): 'Trans2 (0x32)',
+ ('smb', '0x71'): 'Tree Disconnect (0x71)',
+ ('smb', '0x72'): 'Negotiate Protocol (0x72)',
+ ('smb', '0x73'): 'Session Setup AndX (0x73)',
+ ('smb', '0x74'): 'Logoff AndX (0x74)',
+ ('smb', '0x75'): 'Tree Connect AndX (0x75)',
+ ('smb', '0xa2'): 'NT Create AndX (0xa2)',
+ ('smb2', '0'): 'NegotiateProtocol',
+ ('smb2', '11'): 'Ioctl',
+ ('smb2', '14'): 'Find',
+ ('smb2', '16'): 'GetInfo',
+ ('smb2', '18'): 'Break',
+ ('smb2', '1'): 'SessionSetup',
+ ('smb2', '2'): 'SessionLogoff',
+ ('smb2', '3'): 'TreeConnect',
+ ('smb2', '4'): 'TreeDisconnect',
+ ('smb2', '5'): 'Create',
+ ('smb2', '6'): 'Close',
+ ('smb2', '8'): 'Read',
+ ('smb_netlogon', '0x12'): 'SAM LOGON request from client (0x12)',
+ ('smb_netlogon', '0x17'): ('SAM Active Directory Response - '
+ 'user unknown (0x17)'),
+ ('srvsvc', '16'): 'NetShareGetInfo',
+ ('srvsvc', '21'): 'NetSrvGetInfo',
+}
+
+
+def expand_short_packet(p, timestamp, src, dest, extra):
+ protocol, opcode = p.split(':', 1)
+ desc = OP_DESCRIPTIONS.get((protocol, opcode), '')
+ ip_protocol = IP_PROTOCOLS.get(protocol, '06')
+
+ line = [timestamp, ip_protocol, '', src, dest, protocol, opcode, desc]
+ line.extend(extra)
+ return '\t'.join(line)
+
+
+def flushing_signal_handler(signal, frame):
+ """Signal handler closes standard out and error.
+
+ Triggered by a sigterm, ensures that the log messages are flushed
+ to disk and not lost.
+ """
+ sys.stderr.close()
+ sys.stdout.close()
+ os._exit(0)
+
+
+def replay_seq_in_fork(cs, start, context, account, client_id, server_id=1):
+ """Fork a new process and replay the conversation sequence."""
+ # We will need to reseed the random number generator or all the
+ # clients will end up using the same sequence of random
+ # numbers. random.randint() is mixed in so the initial seed will
+ # have an effect here.
+ seed = client_id * 1000 + random.randint(0, 999)
+
+ # flush our buffers so messages won't be written by both sides
+ sys.stdout.flush()
+ sys.stderr.flush()
+ pid = os.fork()
+ if pid != 0:
+ return pid
+
+ # we must never return, or we'll end up running parts of the
+ # parent's clean-up code. So we work in a try...finally, and
+ # try to print any exceptions.
+ try:
+ random.seed(seed)
+ endpoints = (server_id, client_id)
+ status = 0
+ t = cs[0][0]
+ c = Conversation(t, endpoints, seq=cs, conversation_id=client_id)
+ signal.signal(signal.SIGTERM, flushing_signal_handler)
+
+ context.generate_process_local_config(account, c)
+ sys.stdin.close()
+ os.close(0)
+ filename = os.path.join(context.statsdir, 'stats-conversation-%d' %
+ c.conversation_id)
+ f = open(filename, 'w')
+ try:
+ sys.stdout.close()
+ os.close(1)
+ except IOError as e:
+ LOGGER.info("stdout closing failed with %s" % e)
+
+ sys.stdout = f
+ now = time.time() - start
+ gap = t - now
+ sleep_time = gap - SLEEP_OVERHEAD
+ if sleep_time > 0:
+ time.sleep(sleep_time)
+
+ max_lag, start_lag, max_sleep_miss = c.replay_with_delay(start=start,
+ context=context)
+ print("Maximum lag: %f" % max_lag)
+ print("Start lag: %f" % start_lag)
+ print("Max sleep miss: %f" % max_sleep_miss)
+
+ except Exception:
+ status = 1
+ print(("EXCEPTION in child PID %d, conversation %s" % (os.getpid(), c)),
+ file=sys.stderr)
+ traceback.print_exc(sys.stderr)
+ sys.stderr.flush()
+ finally:
+ sys.stderr.close()
+ sys.stdout.close()
+ os._exit(status)
+
+
+def dnshammer_in_fork(dns_rate, duration, context, query_file=None):
+ sys.stdout.flush()
+ sys.stderr.flush()
+ pid = os.fork()
+ if pid != 0:
+ return pid
+
+ sys.stdin.close()
+ os.close(0)
+
+ try:
+ sys.stdout.close()
+ os.close(1)
+ except IOError as e:
+ LOGGER.warn("stdout closing failed with %s" % e)
+ filename = os.path.join(context.statsdir, 'stats-dns')
+ sys.stdout = open(filename, 'w')
+
+ try:
+ status = 0
+ signal.signal(signal.SIGTERM, flushing_signal_handler)
+ hammer = DnsHammer(dns_rate, duration, query_file=query_file)
+ hammer.replay(context=context)
+ except Exception:
+ status = 1
+ print(("EXCEPTION in child PID %d, the DNS hammer" % (os.getpid())),
+ file=sys.stderr)
+ traceback.print_exc(sys.stderr)
+ finally:
+ sys.stderr.close()
+ sys.stdout.close()
+ os._exit(status)
+
+
+def replay(conversation_seq,
+ host=None,
+ creds=None,
+ lp=None,
+ accounts=None,
+ dns_rate=0,
+ dns_query_file=None,
+ duration=None,
+ latency_timeout=1.0,
+ stop_on_any_error=False,
+ **kwargs):
+
+ context = ReplayContext(server=host,
+ creds=creds,
+ lp=lp,
+ total_conversations=len(conversation_seq),
+ **kwargs)
+
+ if len(accounts) < len(conversation_seq):
+ raise ValueError(("we have %d accounts but %d conversations" %
+ (len(accounts), len(conversation_seq))))
+
+ # Set the process group so that the calling scripts are not killed
+ # when the forked child processes are killed.
+ os.setpgrp()
+
+ # we delay the start by a bit to allow all the forks to get up and
+ # running.
+ delay = len(conversation_seq) * 0.02
+ start = time.time() + delay
+
+ if duration is None:
+ # end slightly after the last packet of the last conversation
+ # to start. Conversations other than the last could still be
+ # going, but we don't care.
+ duration = conversation_seq[-1][-1][0] + latency_timeout
+
+ print("We will start in %.1f seconds" % delay,
+ file=sys.stderr)
+ print("We will stop after %.1f seconds" % (duration + delay),
+ file=sys.stderr)
+ print("runtime %.1f seconds" % duration,
+ file=sys.stderr)
+
+ # give one second grace for packets to finish before killing begins
+ end = start + duration + 1.0
+
+ LOGGER.info("Replaying traffic for %u conversations over %d seconds"
+ % (len(conversation_seq), duration))
+
+ context.write_stats('intentions',
+ Planned_conversations=len(conversation_seq),
+ Planned_packets=sum(len(x) for x in conversation_seq))
+
+ children = {}
+ try:
+ if dns_rate:
+ pid = dnshammer_in_fork(dns_rate, duration, context,
+ query_file=dns_query_file)
+ children[pid] = 1
+
+ for i, cs in enumerate(conversation_seq):
+ account = accounts[i]
+ client_id = i + 2
+ pid = replay_seq_in_fork(cs, start, context, account, client_id)
+ children[pid] = client_id
+
+ # HERE, we are past all the forks
+ t = time.time()
+ print("all forks done in %.1f seconds, waiting %.1f" %
+ (t - start + delay, t - start),
+ file=sys.stderr)
+
+ while time.time() < end and children:
+ time.sleep(0.003)
+ try:
+ pid, status = os.waitpid(-1, os.WNOHANG)
+ except OSError as e:
+ if e.errno != ECHILD: # no child processes
+ raise
+ break
+ if pid:
+ c = children.pop(pid, None)
+ if DEBUG_LEVEL > 0:
+ print(("process %d finished conversation %d;"
+ " %d to go" %
+ (pid, c, len(children))), file=sys.stderr)
+ if stop_on_any_error and status != 0:
+ break
+
+ except Exception:
+ print("EXCEPTION in parent", file=sys.stderr)
+ traceback.print_exc()
+ finally:
+ context.write_stats('unfinished',
+ Unfinished_conversations=len(children))
+
+ for s in (15, 15, 9):
+ print(("killing %d children with -%d" %
+ (len(children), s)), file=sys.stderr)
+ for pid in children:
+ try:
+ os.kill(pid, s)
+ except OSError as e:
+ if e.errno != ESRCH: # don't fail if it has already died
+ raise
+ time.sleep(0.5)
+ end = time.time() + 1
+ while children:
+ try:
+ pid, status = os.waitpid(-1, os.WNOHANG)
+ except OSError as e:
+ if e.errno != ECHILD:
+ raise
+ if pid != 0:
+ c = children.pop(pid, None)
+ if c is None:
+ print("children is %s, no pid found" % children)
+ sys.stderr.flush()
+ sys.stdout.flush()
+ os._exit(1)
+ print(("kill -%d %d KILLED conversation; "
+ "%d to go" %
+ (s, pid, len(children))),
+ file=sys.stderr)
+ if time.time() >= end:
+ break
+
+ if not children:
+ break
+ time.sleep(1)
+
+ if children:
+ print("%d children are missing" % len(children),
+ file=sys.stderr)
+
+ # there may be stragglers that were forked just as ^C was hit
+ # and don't appear in the list of children. We can get them
+ # with killpg, but that will also kill us, so this is^H^H would be
+ # goodbye, except we cheat and pretend to use ^C (SIG_INTERRUPT),
+ # so as not to have to fuss around writing signal handlers.
+ try:
+ os.killpg(0, 2)
+ except KeyboardInterrupt:
+ print("ignoring fake ^C", file=sys.stderr)
+
+
+def openLdb(host, creds, lp):
+ session = system_session()
+ ldb = SamDB(url="ldap://%s" % host,
+ session_info=session,
+ options=['modules:paged_searches'],
+ credentials=creds,
+ lp=lp)
+ return ldb
+
+
+def ou_name(ldb, instance_id):
+ """Generate an ou name from the instance id"""
+ return "ou=instance-%d,ou=traffic_replay,%s" % (instance_id,
+ ldb.domain_dn())
+
+
+def create_ou(ldb, instance_id):
+ """Create an ou, all created user and machine accounts will belong to it.
+
+ This allows all the created resources to be cleaned up easily.
+ """
+ ou = ou_name(ldb, instance_id)
+ try:
+ ldb.add({"dn": ou.split(',', 1)[1],
+ "objectclass": "organizationalunit"})
+ except LdbError as e:
+ (status, _) = e.args
+ # ignore already exists
+ if status != 68:
+ raise
+ try:
+ ldb.add({"dn": ou,
+ "objectclass": "organizationalunit"})
+ except LdbError as e:
+ (status, _) = e.args
+ # ignore already exists
+ if status != 68:
+ raise
+ return ou
+
+
+# ConversationAccounts holds details of the machine and user accounts
+# associated with a conversation.
+#
+# We use a named tuple to reduce shared memory usage.
+ConversationAccounts = namedtuple('ConversationAccounts',
+ ('netbios_name',
+ 'machinepass',
+ 'username',
+ 'userpass'))
+
+
+def generate_replay_accounts(ldb, instance_id, number, password):
+ """Generate a series of unique machine and user account names."""
+
+ accounts = []
+ for i in range(1, number + 1):
+ netbios_name = machine_name(instance_id, i)
+ username = user_name(instance_id, i)
+
+ account = ConversationAccounts(netbios_name, password, username,
+ password)
+ accounts.append(account)
+ return accounts
+
+
+def create_machine_account(ldb, instance_id, netbios_name, machinepass,
+ traffic_account=True):
+ """Create a machine account via ldap."""
+
+ ou = ou_name(ldb, instance_id)
+ dn = "cn=%s,%s" % (netbios_name, ou)
+ utf16pw = ('"%s"' % get_string(machinepass)).encode('utf-16-le')
+
+ if traffic_account:
+ # we set these bits for the machine account otherwise the replayed
+ # traffic throws up NT_STATUS_NO_TRUST_SAM_ACCOUNT errors
+ account_controls = str(UF_TRUSTED_FOR_DELEGATION |
+ UF_SERVER_TRUST_ACCOUNT)
+
+ else:
+ account_controls = str(UF_WORKSTATION_TRUST_ACCOUNT)
+
+ ldb.add({
+ "dn": dn,
+ "objectclass": "computer",
+ "sAMAccountName": "%s$" % netbios_name,
+ "userAccountControl": account_controls,
+ "unicodePwd": utf16pw})
+
+
+def create_user_account(ldb, instance_id, username, userpass):
+ """Create a user account via ldap."""
+ ou = ou_name(ldb, instance_id)
+ user_dn = "cn=%s,%s" % (username, ou)
+ utf16pw = ('"%s"' % get_string(userpass)).encode('utf-16-le')
+ ldb.add({
+ "dn": user_dn,
+ "objectclass": "user",
+ "sAMAccountName": username,
+ "userAccountControl": str(UF_NORMAL_ACCOUNT),
+ "unicodePwd": utf16pw
+ })
+
+ # grant user write permission to do things like write account SPN
+ sdutils = sd_utils.SDUtils(ldb)
+ sdutils.dacl_add_ace(user_dn, "(A;;WP;;;PS)")
+
+
+def create_group(ldb, instance_id, name):
+ """Create a group via ldap."""
+
+ ou = ou_name(ldb, instance_id)
+ dn = "cn=%s,%s" % (name, ou)
+ ldb.add({
+ "dn": dn,
+ "objectclass": "group",
+ "sAMAccountName": name,
+ })
+
+
+def user_name(instance_id, i):
+ """Generate a user name based in the instance id"""
+ return "STGU-%d-%d" % (instance_id, i)
+
+
+def search_objectclass(ldb, objectclass='user', attr='sAMAccountName'):
+ """Search objectclass, return attr in a set"""
+ objs = ldb.search(
+ expression="(objectClass={})".format(objectclass),
+ attrs=[attr]
+ )
+ return {str(obj[attr]) for obj in objs}
+
+
+def generate_users(ldb, instance_id, number, password):
+ """Add users to the server"""
+ existing_objects = search_objectclass(ldb, objectclass='user')
+ users = 0
+ for i in range(number, 0, -1):
+ name = user_name(instance_id, i)
+ if name not in existing_objects:
+ create_user_account(ldb, instance_id, name, password)
+ users += 1
+ if users % 50 == 0:
+ LOGGER.info("Created %u/%u users" % (users, number))
+
+ return users
+
+
+def machine_name(instance_id, i, traffic_account=True):
+ """Generate a machine account name from instance id."""
+ if traffic_account:
+ # traffic accounts correspond to a given user, and use different
+ # userAccountControl flags to ensure packets get processed correctly
+ # by the DC
+ return "STGM-%d-%d" % (instance_id, i)
+ else:
+ # Otherwise we're just generating computer accounts to simulate a
+ # semi-realistic network. These use the default computer
+ # userAccountControl flags, so we use a different account name so that
+ # we don't try to use them when generating packets
+ return "PC-%d-%d" % (instance_id, i)
+
+
+def generate_machine_accounts(ldb, instance_id, number, password,
+ traffic_account=True):
+ """Add machine accounts to the server"""
+ existing_objects = search_objectclass(ldb, objectclass='computer')
+ added = 0
+ for i in range(number, 0, -1):
+ name = machine_name(instance_id, i, traffic_account)
+ if name + "$" not in existing_objects:
+ create_machine_account(ldb, instance_id, name, password,
+ traffic_account)
+ added += 1
+ if added % 50 == 0:
+ LOGGER.info("Created %u/%u machine accounts" % (added, number))
+
+ return added
+
+
+def group_name(instance_id, i):
+ """Generate a group name from instance id."""
+ return "STGG-%d-%d" % (instance_id, i)
+
+
+def generate_groups(ldb, instance_id, number):
+ """Create the required number of groups on the server."""
+ existing_objects = search_objectclass(ldb, objectclass='group')
+ groups = 0
+ for i in range(number, 0, -1):
+ name = group_name(instance_id, i)
+ if name not in existing_objects:
+ create_group(ldb, instance_id, name)
+ groups += 1
+ if groups % 1000 == 0:
+ LOGGER.info("Created %u/%u groups" % (groups, number))
+
+ return groups
+
+
+def clean_up_accounts(ldb, instance_id):
+ """Remove the created accounts and groups from the server."""
+ ou = ou_name(ldb, instance_id)
+ try:
+ ldb.delete(ou, ["tree_delete:1"])
+ except LdbError as e:
+ (status, _) = e.args
+ # ignore does not exist
+ if status != 32:
+ raise
+
+
+def generate_users_and_groups(ldb, instance_id, password,
+ number_of_users, number_of_groups,
+ group_memberships, max_members,
+ machine_accounts, traffic_accounts=True):
+ """Generate the required users and groups, allocating the users to
+ those groups."""
+ memberships_added = 0
+ groups_added = 0
+ computers_added = 0
+
+ create_ou(ldb, instance_id)
+
+ LOGGER.info("Generating dummy user accounts")
+ users_added = generate_users(ldb, instance_id, number_of_users, password)
+
+ LOGGER.info("Generating dummy machine accounts")
+ computers_added = generate_machine_accounts(ldb, instance_id,
+ machine_accounts, password,
+ traffic_accounts)
+
+ if number_of_groups > 0:
+ LOGGER.info("Generating dummy groups")
+ groups_added = generate_groups(ldb, instance_id, number_of_groups)
+
+ if group_memberships > 0:
+ LOGGER.info("Assigning users to groups")
+ assignments = GroupAssignments(number_of_groups,
+ groups_added,
+ number_of_users,
+ users_added,
+ group_memberships,
+ max_members)
+ LOGGER.info("Adding users to groups")
+ add_users_to_groups(ldb, instance_id, assignments)
+ memberships_added = assignments.total()
+
+ if (groups_added > 0 and users_added == 0 and
+ number_of_groups != groups_added):
+ LOGGER.warning("The added groups will contain no members")
+
+ LOGGER.info("Added %d users (%d machines), %d groups and %d memberships" %
+ (users_added, computers_added, groups_added,
+ memberships_added))
+
+
+class GroupAssignments(object):
+ def __init__(self, number_of_groups, groups_added, number_of_users,
+ users_added, group_memberships, max_members):
+
+ self.count = 0
+ self.generate_group_distribution(number_of_groups)
+ self.generate_user_distribution(number_of_users, group_memberships)
+ self.max_members = max_members
+ self.assignments = defaultdict(list)
+ self.assign_groups(number_of_groups, groups_added, number_of_users,
+ users_added, group_memberships)
+
+ def cumulative_distribution(self, weights):
+ # make sure the probabilities conform to a cumulative distribution
+ # spread between 0.0 and 1.0. Dividing by the weighted total gives each
+ # probability a proportional share of 1.0. Higher probabilities get a
+ # bigger share, so are more likely to be picked. We use the cumulative
+ # value, so we can use random.random() as a simple index into the list
+ dist = []
+ total = sum(weights)
+ if total == 0:
+ return None
+
+ cumulative = 0.0
+ for probability in weights:
+ cumulative += probability
+ dist.append(cumulative / total)
+ return dist
+
+ def generate_user_distribution(self, num_users, num_memberships):
+ """Probability distribution of a user belonging to a group.
+ """
+ # Assign a weighted probability to each user. Use the Pareto
+ # Distribution so that some users are in a lot of groups, and the
+ # bulk of users are in only a few groups. If we're assigning a large
+ # number of group memberships, use a higher shape. This means slightly
+ # fewer outlying users that are in large numbers of groups. The aim is
+ # to have no users belonging to more than ~500 groups.
+ if num_memberships > 5000000:
+ shape = 3.0
+ elif num_memberships > 2000000:
+ shape = 2.5
+ elif num_memberships > 300000:
+ shape = 2.25
+ else:
+ shape = 1.75
+
+ weights = []
+ for x in range(1, num_users + 1):
+ p = random.paretovariate(shape)
+ weights.append(p)
+
+ # convert the weights to a cumulative distribution between 0.0 and 1.0
+ self.user_dist = self.cumulative_distribution(weights)
+
+ def generate_group_distribution(self, n):
+ """Probability distribution of a group containing a user."""
+
+ # Assign a weighted probability to each user. Probability decreases
+ # as the group-ID increases
+ weights = []
+ for x in range(1, n + 1):
+ p = 1 / (x**1.3)
+ weights.append(p)
+
+ # convert the weights to a cumulative distribution between 0.0 and 1.0
+ self.group_weights = weights
+ self.group_dist = self.cumulative_distribution(weights)
+
+ def generate_random_membership(self):
+ """Returns a randomly generated user-group membership"""
+
+ # the list items are cumulative distribution values between 0.0 and
+ # 1.0, which makes random() a handy way to index the list to get a
+ # weighted random user/group. (Here the user/group returned are
+ # zero-based array indexes)
+ user = bisect.bisect(self.user_dist, random.random())
+ group = bisect.bisect(self.group_dist, random.random())
+
+ return user, group
+
+ def users_in_group(self, group):
+ return self.assignments[group]
+
+ def get_groups(self):
+ return self.assignments.keys()
+
+ def cap_group_membership(self, group, max_members):
+ """Prevent the group's membership from exceeding the max specified"""
+ num_members = len(self.assignments[group])
+ if num_members >= max_members:
+ LOGGER.info("Group {0} has {1} members".format(group, num_members))
+
+ # remove this group and then recalculate the cumulative
+ # distribution, so this group is no longer selected
+ self.group_weights[group - 1] = 0
+ new_dist = self.cumulative_distribution(self.group_weights)
+ self.group_dist = new_dist
+
+ def add_assignment(self, user, group):
+ # the assignments are stored in a dictionary where key=group,
+ # value=list-of-users-in-group (indexing by group-ID allows us to
+ # optimize for DB membership writes)
+ if user not in self.assignments[group]:
+ self.assignments[group].append(user)
+ self.count += 1
+
+ # check if there'a cap on how big the groups can grow
+ if self.max_members:
+ self.cap_group_membership(group, self.max_members)
+
+ def assign_groups(self, number_of_groups, groups_added,
+ number_of_users, users_added, group_memberships):
+ """Allocate users to groups.
+
+ The intention is to have a few users that belong to most groups, while
+ the majority of users belong to a few groups.
+
+ A few groups will contain most users, with the remaining only having a
+ few users.
+ """
+
+ if group_memberships <= 0:
+ return
+
+ # Calculate the number of group menberships required
+ group_memberships = math.ceil(
+ float(group_memberships) *
+ (float(users_added) / float(number_of_users)))
+
+ if self.max_members:
+ group_memberships = min(group_memberships,
+ self.max_members * number_of_groups)
+
+ existing_users = number_of_users - users_added - 1
+ existing_groups = number_of_groups - groups_added - 1
+ while self.total() < group_memberships:
+ user, group = self.generate_random_membership()
+
+ if group > existing_groups or user > existing_users:
+ # the + 1 converts the array index to the corresponding
+ # group or user number
+ self.add_assignment(user + 1, group + 1)
+
+ def total(self):
+ return self.count
+
+
+def add_users_to_groups(db, instance_id, assignments):
+ """Takes the assignments of users to groups and applies them to the DB."""
+
+ total = assignments.total()
+ count = 0
+ added = 0
+
+ for group in assignments.get_groups():
+ users_in_group = assignments.users_in_group(group)
+ if len(users_in_group) == 0:
+ continue
+
+ # Split up the users into chunks, so we write no more than 1K at a
+ # time. (Minimizing the DB modifies is more efficient, but writing
+ # 10K+ users to a single group becomes inefficient memory-wise)
+ for chunk in range(0, len(users_in_group), 1000):
+ chunk_of_users = users_in_group[chunk:chunk + 1000]
+ add_group_members(db, instance_id, group, chunk_of_users)
+
+ added += len(chunk_of_users)
+ count += 1
+ if count % 50 == 0:
+ LOGGER.info("Added %u/%u memberships" % (added, total))
+
+def add_group_members(db, instance_id, group, users_in_group):
+ """Adds the given users to group specified."""
+
+ ou = ou_name(db, instance_id)
+
+ def build_dn(name):
+ return("cn=%s,%s" % (name, ou))
+
+ group_dn = build_dn(group_name(instance_id, group))
+ m = ldb.Message()
+ m.dn = ldb.Dn(db, group_dn)
+
+ for user in users_in_group:
+ user_dn = build_dn(user_name(instance_id, user))
+ idx = "member-" + str(user)
+ m[idx] = ldb.MessageElement(user_dn, ldb.FLAG_MOD_ADD, "member")
+
+ db.modify(m)
+
+
+def generate_stats(statsdir, timing_file):
+ """Generate and print the summary stats for a run."""
+ first = sys.float_info.max
+ last = 0
+ successful = 0
+ failed = 0
+ latencies = {}
+ failures = Counter()
+ unique_conversations = set()
+ if timing_file is not None:
+ tw = timing_file.write
+ else:
+ def tw(x):
+ pass
+
+ tw("time\tconv\tprotocol\ttype\tduration\tsuccessful\terror\n")
+
+ float_values = {
+ 'Maximum lag': 0,
+ 'Start lag': 0,
+ 'Max sleep miss': 0,
+ }
+ int_values = {
+ 'Planned_conversations': 0,
+ 'Planned_packets': 0,
+ 'Unfinished_conversations': 0,
+ }
+
+ for filename in os.listdir(statsdir):
+ path = os.path.join(statsdir, filename)
+ with open(path, 'r') as f:
+ for line in f:
+ try:
+ fields = line.rstrip('\n').split('\t')
+ conversation = fields[1]
+ protocol = fields[2]
+ packet_type = fields[3]
+ latency = float(fields[4])
+ t = float(fields[0])
+ first = min(t - latency, first)
+ last = max(t, last)
+
+ op = (protocol, packet_type)
+ latencies.setdefault(op, []).append(latency)
+ if fields[5] == 'True':
+ successful += 1
+ else:
+ failed += 1
+ failures[op] += 1
+
+ unique_conversations.add(conversation)
+
+ tw(line)
+ except (ValueError, IndexError):
+ if ':' in line:
+ k, v = line.split(':', 1)
+ if k in float_values:
+ float_values[k] = max(float(v),
+ float_values[k])
+ elif k in int_values:
+ int_values[k] = max(int(v),
+ int_values[k])
+ else:
+ print(line, file=sys.stderr)
+ else:
+ # not a valid line print and ignore
+ print(line, file=sys.stderr)
+
+ duration = last - first
+ if successful == 0:
+ success_rate = 0
+ else:
+ success_rate = successful / duration
+ if failed == 0:
+ failure_rate = 0
+ else:
+ failure_rate = failed / duration
+
+ conversations = len(unique_conversations)
+
+ print("Total conversations: %10d" % conversations)
+ print("Successful operations: %10d (%.3f per second)"
+ % (successful, success_rate))
+ print("Failed operations: %10d (%.3f per second)"
+ % (failed, failure_rate))
+
+ for k, v in sorted(float_values.items()):
+ print("%-28s %f" % (k.replace('_', ' ') + ':', v))
+ for k, v in sorted(int_values.items()):
+ print("%-28s %d" % (k.replace('_', ' ') + ':', v))
+
+ print("Protocol Op Code Description "
+ " Count Failed Mean Median "
+ "95% Range Max")
+
+ ops = {}
+ for proto, packet in latencies:
+ if proto not in ops:
+ ops[proto] = set()
+ ops[proto].add(packet)
+ protocols = sorted(ops.keys())
+
+ for protocol in protocols:
+ packet_types = sorted(ops[protocol], key=opcode_key)
+ for packet_type in packet_types:
+ op = (protocol, packet_type)
+ values = latencies[op]
+ values = sorted(values)
+ count = len(values)
+ failed = failures[op]
+ mean = sum(values) / count
+ median = calc_percentile(values, 0.50)
+ percentile = calc_percentile(values, 0.95)
+ rng = values[-1] - values[0]
+ maxv = values[-1]
+ desc = OP_DESCRIPTIONS.get(op, '')
+ print("%-12s %4s %-35s %12d %12d %12.6f "
+ "%12.6f %12.6f %12.6f %12.6f"
+ % (protocol,
+ packet_type,
+ desc,
+ count,
+ failed,
+ mean,
+ median,
+ percentile,
+ rng,
+ maxv))
+
+
+def opcode_key(v):
+ """Sort key for the operation code to ensure that it sorts numerically"""
+ try:
+ return "%03d" % int(v)
+ except ValueError:
+ return v
+
+
+def calc_percentile(values, percentile):
+ """Calculate the specified percentile from the list of values.
+
+ Assumes the list is sorted in ascending order.
+ """
+
+ if not values:
+ return 0
+ k = (len(values) - 1) * percentile
+ f = math.floor(k)
+ c = math.ceil(k)
+ if f == c:
+ return values[int(k)]
+ d0 = values[int(f)] * (c - k)
+ d1 = values[int(c)] * (k - f)
+ return d0 + d1
+
+
+def mk_masked_dir(*path):
+ """In a testenv we end up with 0777 directories that look an alarming
+ green colour with ls. Use umask to avoid that."""
+ # py3 os.mkdir can do this
+ d = os.path.join(*path)
+ mask = os.umask(0o077)
+ os.mkdir(d)
+ os.umask(mask)
+ return d
diff --git a/python/samba/emulate/traffic_packets.py b/python/samba/emulate/traffic_packets.py
new file mode 100644
index 0000000..95c7465
--- /dev/null
+++ b/python/samba/emulate/traffic_packets.py
@@ -0,0 +1,973 @@
+# Dispatch for various request types.
+#
+# Copyright (C) Catalyst IT Ltd. 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+import os
+import ctypes
+import random
+
+from samba.net import Net
+from samba.dcerpc import security, drsuapi, nbt, lsa, netlogon, ntlmssp
+from samba.dcerpc.netlogon import netr_WorkstationInformation
+from samba.dcerpc.security import dom_sid
+from samba.netbios import Node
+from samba.ndr import ndr_pack
+from samba.credentials import (
+ CLI_CRED_NTLMv2_AUTH,
+ MUST_USE_KERBEROS,
+ DONT_USE_KERBEROS
+)
+from samba import NTSTATUSError
+from samba.ntstatus import (
+ NT_STATUS_OBJECT_NAME_NOT_FOUND,
+ NT_STATUS_NO_SUCH_DOMAIN
+)
+import samba
+import dns.resolver
+from ldb import SCOPE_BASE
+
+def uint32(v):
+ return ctypes.c_uint32(v).value
+
+
+def check_runtime_error(runtime, val):
+ if runtime is None:
+ return False
+
+ err32 = uint32(runtime.args[0])
+ if err32 == val:
+ return True
+
+ return False
+
+
+name_formats = [
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_FQDN_1779,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_NT4_ACCOUNT,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_DISPLAY,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_GUID,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_CANONICAL,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_USER_PRINCIPAL,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_CANONICAL_EX,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_SERVICE_PRINCIPAL,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_SID_OR_SID_HISTORY,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_DNS_DOMAIN,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_UPN_AND_ALTSECID,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_NT4_ACCOUNT_NAME_SANS_DOMAIN_EX,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_LIST_GLOBAL_CATALOG_SERVERS,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_UPN_FOR_LOGON,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_LIST_SERVERS_WITH_DCS_IN_SITE,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_STRING_SID_NAME,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_ALT_SECURITY_IDENTITIES_NAME,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_LIST_NCS,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_LIST_DOMAINS,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_MAP_SCHEMA_GUID,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_NT4_ACCOUNT_NAME_SANS_DOMAIN,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_LIST_ROLES,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_LIST_INFO_FOR_SERVER,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_LIST_SERVERS_FOR_DOMAIN_IN_SITE,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_LIST_DOMAINS_IN_SITE,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_LIST_SERVERS_IN_SITE,
+ drsuapi.DRSUAPI_DS_NAME_FORMAT_LIST_SITES,
+]
+
+
+def warning(message):
+ print("\033[37;41;1m" "Warning: %s" "\033[00m" % (message))
+
+###############################################################################
+#
+# Packet generation functions:
+#
+# All the packet generation functions have the following form:
+# packet_${protocol}_${opcode}(packet, conversation, context)
+#
+# The functions return true, if statistics should be collected for the packet
+# false, the packet has been ignored.
+#
+# Where:
+# protocol is the protocol, i.e. cldap, dcerpc, ...
+# opcode is the protocol op code i.e. type of the packet to be
+# generated.
+#
+# packet contains data about the captured/generated packet
+# provides any extra data needed to generate the packet
+#
+# conversation Details of the current client/server interaction
+#
+# context state data for the current interaction
+#
+#
+#
+# The following protocols are not currently handled:
+# smb
+# smb2
+# browser
+# smb_netlogon
+#
+# The following drsuapi replication packets are currently ignored:
+# DsReplicaSync
+# DsGetNCChanges
+# DsReplicaUpdateRefs
+
+
+# Packet generators that do NOTHING are assigned to the null_packet
+# function which allows the conversation generators to notice this and
+# avoid a whole lot of pointless work.
+def null_packet(packet, conversation, context):
+ return False
+
+
+def packet_cldap_3(packet, conversation, context):
+ # searchRequest
+ net = Net(creds=context.creds, lp=context.lp)
+ net.finddc(domain=context.lp.get('realm'),
+ flags=(nbt.NBT_SERVER_LDAP |
+ nbt.NBT_SERVER_DS |
+ nbt.NBT_SERVER_WRITABLE))
+ return True
+
+
+packet_cldap_5 = null_packet
+# searchResDone
+
+packet_dcerpc_0 = null_packet
+# Request
+# Can be ignored, it's the continuation of an existing conversation
+
+packet_dcerpc_2 = null_packet
+# Request
+# Server response, so should be ignored
+
+packet_dcerpc_3 = null_packet
+
+packet_dcerpc_11 = null_packet
+# Bind
+# creation of the rpc dcerpc connection is managed by the higher level
+# protocol drivers. So we ignore it when generating traffic
+
+
+packet_dcerpc_12 = null_packet
+# Bind_ack
+# Server response, so should be ignored
+
+
+packet_dcerpc_13 = null_packet
+# Bind_nak
+# Server response, so should be ignored
+
+
+packet_dcerpc_14 = null_packet
+# Alter_context
+# Generated as part of the connect process
+
+
+def packet_dcerpc_15(packet, conversation, context):
+ # Alter_context_resp
+ # This means it was GSSAPI/krb5 (probably)
+ # Check the kerberos_state and issue a diagnostic if kerberos not enabled
+ if context.user_creds.get_kerberos_state() == DONT_USE_KERBEROS:
+ warning("Kerberos disabled but have dcerpc Alter_context_resp "
+ "indicating Kerberos was used")
+ return False
+
+
+def packet_dcerpc_16(packet, conversation, context):
+ # AUTH3
+ # This means it was NTLMSSP
+ # Check the kerberos_state and issue a diagnostic if kerberos enabled
+ if context.user_creds.get_kerberos_state() == MUST_USE_KERBEROS:
+ warning("Kerberos enabled but have dcerpc AUTH3 "
+ "indicating NTLMSSP was used")
+ return False
+
+
+def packet_dns_0(packet, conversation, context):
+ # query
+ name, rtype = context.guess_a_dns_lookup()
+ dns.resolver.query(name, rtype)
+ return True
+
+
+packet_dns_1 = null_packet
+# response
+# Server response, so should be ignored
+
+
+def packet_drsuapi_0(packet, conversation, context):
+ # DsBind
+ context.get_drsuapi_connection_pair(True)
+ return True
+
+
+NAME_FORMATS = [getattr(drsuapi, _x) for _x in dir(drsuapi)
+ if 'NAME_FORMAT' in _x]
+
+
+def packet_drsuapi_12(packet, conversation, context):
+ # DsCrackNames
+ drs, handle = context.get_drsuapi_connection_pair()
+
+ names = drsuapi.DsNameString()
+ names.str = context.server
+
+ req = drsuapi.DsNameRequest1()
+ req.format_flags = 0
+ req.format_offered = 7
+ req.format_desired = random.choice(name_formats)
+ req.codepage = 1252
+ req.language = 1033 # German, I think
+ req.format_flags = 0
+ req.count = 1
+ req.names = [names]
+
+ (result, ctr) = drs.DsCrackNames(handle, 1, req)
+ return True
+
+
+def packet_drsuapi_13(packet, conversation, context):
+ # DsWriteAccountSpn
+ req = drsuapi.DsWriteAccountSpnRequest1()
+ req.operation = drsuapi.DRSUAPI_DS_SPN_OPERATION_REPLACE
+ req.unknown1 = 0 # Unused, must be 0
+ req.object_dn = context.user_dn
+ req.count = 1 # only 1 name
+ spn_name = drsuapi.DsNameString()
+ spn_name.str = 'foo/{}'.format(context.username)
+ req.spn_names = [spn_name]
+ (drs, handle) = context.get_drsuapi_connection_pair()
+ (level, res) = drs.DsWriteAccountSpn(handle, 1, req)
+ return True
+
+
+def packet_drsuapi_1(packet, conversation, context):
+ # DsUnbind
+ (drs, handle) = context.get_drsuapi_connection_pair()
+ drs.DsUnbind(handle)
+ del context.drsuapi_connections[-1]
+ return True
+
+
+packet_drsuapi_2 = null_packet
+# DsReplicaSync
+# This is between DCs, triggered on a DB change
+# Ignoring for now
+
+
+packet_drsuapi_3 = null_packet
+# DsGetNCChanges
+# This is between DCs, trigger with DB operation,
+# or DsReplicaSync between DCs.
+# Ignoring for now
+
+
+packet_drsuapi_4 = null_packet
+# DsReplicaUpdateRefs
+# Ignoring for now
+
+
+packet_epm_3 = null_packet
+# Map
+# Will be generated by higher level protocol calls
+
+
+def packet_kerberos_(packet, conversation, context):
+ # Use the presence of kerberos packets as a hint to enable kerberos
+ # for the rest of the conversation.
+ # i.e. kerberos packets are not explicitly generated.
+ context.user_creds.set_kerberos_state(MUST_USE_KERBEROS)
+ context.user_creds_bad.set_kerberos_state(MUST_USE_KERBEROS)
+ context.machine_creds.set_kerberos_state(MUST_USE_KERBEROS)
+ context.machine_creds_bad.set_kerberos_state(MUST_USE_KERBEROS)
+ context.creds.set_kerberos_state(MUST_USE_KERBEROS)
+ return False
+
+
+packet_ldap_ = null_packet
+# Unknown
+# The ldap payload was probably encrypted so just ignore it.
+
+
+def packet_ldap_0(packet, conversation, context):
+ # bindRequest
+ if packet.extra[5] == "simple":
+ # Perform a simple bind.
+ context.get_ldap_connection(new=True, simple=True)
+ else:
+ # Perform a sasl bind.
+ context.get_ldap_connection(new=True, simple=False)
+ return True
+
+
+packet_ldap_1 = null_packet
+# bindResponse
+# Server response ignored for traffic generation
+
+
+def packet_ldap_2(packet, conversation, context):
+ # unbindRequest
+ # pop the last one off -- most likely we're in a bind/unbind ping.
+ del context.ldap_connections[-1:]
+ return False
+
+
+def packet_ldap_3(packet, conversation, context):
+ # searchRequest
+
+ (scope, dn_sig, filter, attrs, extra, desc, oid) = packet.extra
+ if not scope:
+ scope = SCOPE_BASE
+
+ samdb = context.get_ldap_connection()
+ dn = context.get_matching_dn(dn_sig)
+
+ # try to guess the search expression (don't bother for base searches, as
+ # they're only looking up a single object)
+ if (filter is None or filter == '') and scope != SCOPE_BASE:
+ filter = context.guess_search_filter(attrs, dn_sig, dn)
+
+ samdb.search(dn,
+ expression=filter,
+ scope=int(scope),
+ attrs=attrs.split(','),
+ controls=["paged_results:1:1000"])
+ return True
+
+
+packet_ldap_4 = null_packet
+# searchResEntry
+# Server response ignored for traffic generation
+
+
+packet_ldap_5 = null_packet
+# Server response ignored for traffic generation
+
+packet_ldap_6 = null_packet
+
+packet_ldap_7 = null_packet
+
+packet_ldap_8 = null_packet
+
+packet_ldap_9 = null_packet
+
+packet_ldap_16 = null_packet
+
+packet_lsarpc_0 = null_packet
+# lsarClose
+
+packet_lsarpc_1 = null_packet
+# lsarDelete
+
+packet_lsarpc_2 = null_packet
+# lsarEnumeratePrivileges
+
+packet_lsarpc_3 = null_packet
+# LsarQuerySecurityObject
+
+packet_lsarpc_4 = null_packet
+# LsarSetSecurityObject
+
+packet_lsarpc_5 = null_packet
+# LsarChangePassword
+
+packet_lsarpc_6 = null_packet
+# lsa_OpenPolicy
+# We ignore this, but take it as a hint that the lsarpc handle should
+# be over a named pipe.
+#
+
+
+def packet_lsarpc_14(packet, conversation, context):
+ # lsa_LookupNames
+ c = context.get_lsarpc_named_pipe_connection()
+
+ objectAttr = lsa.ObjectAttribute()
+ pol_handle = c.OpenPolicy2(u'', objectAttr,
+ security.SEC_FLAG_MAXIMUM_ALLOWED)
+
+ sids = lsa.TransSidArray()
+ names = [lsa.String("This Organization"),
+ lsa.String("Digest Authentication")]
+ level = lsa.LSA_LOOKUP_NAMES_ALL
+ count = 0
+ c.LookupNames(pol_handle, names, sids, level, count)
+ return True
+
+
+def packet_lsarpc_15(packet, conversation, context):
+ # lsa_LookupSids
+ c = context.get_lsarpc_named_pipe_connection()
+
+ objectAttr = lsa.ObjectAttribute()
+ pol_handle = c.OpenPolicy2(u'', objectAttr,
+ security.SEC_FLAG_MAXIMUM_ALLOWED)
+
+ sids = lsa.SidArray()
+ sid = lsa.SidPtr()
+
+ x = dom_sid("S-1-5-7")
+ sid.sid = x
+ sids.sids = [sid]
+ sids.num_sids = 1
+ names = lsa.TransNameArray()
+ level = lsa.LSA_LOOKUP_NAMES_ALL
+ count = 0
+
+ c.LookupSids(pol_handle, sids, names, level, count)
+ return True
+
+
+def packet_lsarpc_39(packet, conversation, context):
+ # lsa_QueryTrustedDomainInfoBySid
+ # Samba does not support trusted domains, so this call is expected to fail
+ #
+ c = context.get_lsarpc_named_pipe_connection()
+
+ objectAttr = lsa.ObjectAttribute()
+
+ pol_handle = c.OpenPolicy2(u'', objectAttr,
+ security.SEC_FLAG_MAXIMUM_ALLOWED)
+
+ domsid = security.dom_sid(context.domain_sid)
+ level = 1
+ try:
+ c.QueryTrustedDomainInfoBySid(pol_handle, domsid, level)
+ except NTSTATUSError as error:
+ # Object Not found is the expected result from samba,
+ # while No Such Domain is the expected result from windows,
+ # anything else is a failure.
+ if not check_runtime_error(error, NT_STATUS_OBJECT_NAME_NOT_FOUND) \
+ and not check_runtime_error(error, NT_STATUS_NO_SUCH_DOMAIN):
+ raise
+ return True
+
+
+packet_lsarpc_40 = null_packet
+# lsa_SetTrustedDomainInfo
+# Not currently supported
+
+
+packet_lsarpc_43 = null_packet
+# LsaStorePrivateData
+
+
+packet_lsarpc_44 = null_packet
+# LsaRetrievePrivateData
+
+
+packet_lsarpc_68 = null_packet
+# LsarLookupNames3
+
+
+def packet_lsarpc_76(packet, conversation, context):
+ # lsa_LookupSids3
+ c = context.get_lsarpc_connection()
+ sids = lsa.SidArray()
+ sid = lsa.SidPtr()
+ # Need a set
+ x = dom_sid("S-1-5-7")
+ sid.sid = x
+ sids.sids = [sid]
+ sids.num_sids = 1
+ names = lsa.TransNameArray2()
+ level = lsa.LSA_LOOKUP_NAMES_ALL
+ count = 0
+ lookup_options = lsa.LSA_LOOKUP_OPTION_SEARCH_ISOLATED_NAMES
+ client_revision = lsa.LSA_CLIENT_REVISION_2
+ c.LookupSids3(sids, names, level, count, lookup_options, client_revision)
+ return True
+
+
+def packet_lsarpc_77(packet, conversation, context):
+ # lsa_LookupNames4
+ c = context.get_lsarpc_connection()
+ sids = lsa.TransSidArray3()
+ names = [lsa.String("This Organization"),
+ lsa.String("Digest Authentication")]
+ level = lsa.LSA_LOOKUP_NAMES_ALL
+ count = 0
+ lookup_options = lsa.LSA_LOOKUP_OPTION_SEARCH_ISOLATED_NAMES
+ client_revision = lsa.LSA_CLIENT_REVISION_2
+ c.LookupNames4(names, sids, level, count, lookup_options, client_revision)
+ return True
+
+
+def packet_nbns_0(packet, conversation, context):
+ # query
+ n = Node()
+ try:
+ n.query_name("ANAME", context.server, timeout=4, broadcast=False)
+ except:
+ pass
+ return True
+
+
+packet_nbns_1 = null_packet
+# response
+# Server response, not generated by the client
+
+
+packet_rpc_netlogon_0 = null_packet
+
+packet_rpc_netlogon_1 = null_packet
+
+packet_rpc_netlogon_4 = null_packet
+# NetrServerReqChallenge
+# generated by higher level protocol drivers
+# ignored for traffic generation
+
+packet_rpc_netlogon_14 = null_packet
+
+packet_rpc_netlogon_15 = null_packet
+
+packet_rpc_netlogon_21 = null_packet
+# NetrLogonDummyRoutine1
+# Used to determine security settings. Triggered from schannel setup
+# So no need for an explicit generator
+
+
+packet_rpc_netlogon_26 = null_packet
+# NetrServerAuthenticate3
+# Triggered from schannel set up, no need for an explicit generator
+
+
+def packet_rpc_netlogon_29(packet, conversation, context):
+ # NetrLogonGetDomainInfo [531]
+ c = context.get_netlogon_connection()
+ (auth, succ) = context.get_authenticator()
+ query = netr_WorkstationInformation()
+
+ c.netr_LogonGetDomainInfo(context.server,
+ context.netbios_name,
+ auth,
+ succ,
+ 2, # TODO are there other values?
+ query)
+ return True
+
+
+def packet_rpc_netlogon_30(packet, conversation, context):
+ # NetrServerPasswordSet2
+ c = context.get_netlogon_connection()
+ (auth, succ) = context.get_authenticator()
+ DATA_LEN = 512
+ # Set the new password to the existing password, this generates the same
+ # work load as a new value, and leaves the account password intact for
+ # subsequent runs
+ newpass = context.machine_creds.get_password().encode('utf-16-le')
+ pwd_len = len(newpass)
+ filler = [x if isinstance(x, int) else ord(x) for x in os.urandom(DATA_LEN - pwd_len)]
+ pwd = netlogon.netr_CryptPassword()
+ pwd.length = pwd_len
+ pwd.data = filler + [x if isinstance(x, int) else ord(x) for x in newpass]
+ context.machine_creds.encrypt_netr_crypt_password(pwd)
+ c.netr_ServerPasswordSet2(context.server,
+ # must ends with $, so use get_username instead
+ # of get_workstation here
+ context.machine_creds.get_username(),
+ context.machine_creds.get_secure_channel_type(),
+ context.netbios_name,
+ auth,
+ pwd)
+ return True
+
+
+packet_rpc_netlogon_34 = null_packet
+
+
+def packet_rpc_netlogon_39(packet, conversation, context):
+ # NetrLogonSamLogonEx [4331]
+ def connect(creds):
+ c = context.get_netlogon_connection()
+
+ # Disable Kerberos in cli creds to extract NTLM response
+ old_state = creds.get_kerberos_state()
+ creds.set_kerberos_state(DONT_USE_KERBEROS)
+
+ logon = samlogon_logon_info(context.domain,
+ context.netbios_name,
+ creds)
+ logon_level = netlogon.NetlogonNetworkTransitiveInformation
+ validation_level = netlogon.NetlogonValidationSamInfo4
+ netr_flags = 0
+ c.netr_LogonSamLogonEx(context.server,
+ context.machine_creds.get_workstation(),
+ logon_level,
+ logon,
+ validation_level,
+ netr_flags)
+
+ creds.set_kerberos_state(old_state)
+
+ context.last_samlogon_bad =\
+ context.with_random_bad_credentials(connect,
+ context.user_creds,
+ context.user_creds_bad,
+ context.last_samlogon_bad)
+ return True
+
+
+def samlogon_target(domain_name, computer_name):
+ target_info = ntlmssp.AV_PAIR_LIST()
+ target_info.count = 3
+ computername = ntlmssp.AV_PAIR()
+ computername.AvId = ntlmssp.MsvAvNbComputerName
+ computername.Value = computer_name
+
+ domainname = ntlmssp.AV_PAIR()
+ domainname.AvId = ntlmssp.MsvAvNbDomainName
+ domainname.Value = domain_name
+
+ eol = ntlmssp.AV_PAIR()
+ eol.AvId = ntlmssp.MsvAvEOL
+ target_info.pair = [domainname, computername, eol]
+
+ return ndr_pack(target_info)
+
+
+def samlogon_logon_info(domain_name, computer_name, creds):
+
+ target_info_blob = samlogon_target(domain_name, computer_name)
+
+ challenge = b"abcdefgh"
+ # User account under test
+ response = creds.get_ntlm_response(flags=CLI_CRED_NTLMv2_AUTH,
+ challenge=challenge,
+ target_info=target_info_blob)
+
+ logon = netlogon.netr_NetworkInfo()
+
+ logon.challenge = [x if isinstance(x, int) else ord(x) for x in challenge]
+ logon.nt = netlogon.netr_ChallengeResponse()
+ logon.nt.length = len(response["nt_response"])
+ logon.nt.data = [x if isinstance(x, int) else ord(x) for x in response["nt_response"]]
+
+ logon.identity_info = netlogon.netr_IdentityInfo()
+
+ (username, domain) = creds.get_ntlm_username_domain()
+ logon.identity_info.domain_name.string = domain
+ logon.identity_info.account_name.string = username
+ logon.identity_info.workstation.string = creds.get_workstation()
+
+ return logon
+
+
+def packet_rpc_netlogon_40(packet, conversation, context):
+ # DsrEnumerateDomainTrusts
+ c = context.get_netlogon_connection()
+ c.netr_DsrEnumerateDomainTrusts(
+ context.server,
+ netlogon.NETR_TRUST_FLAG_IN_FOREST |
+ netlogon.NETR_TRUST_FLAG_OUTBOUND |
+ netlogon.NETR_TRUST_FLAG_INBOUND)
+ return True
+
+
+def packet_rpc_netlogon_45(packet, conversation, context):
+ # NetrLogonSamLogonWithFlags [7]
+ def connect(creds):
+ c = context.get_netlogon_connection()
+ (auth, succ) = context.get_authenticator()
+
+ # Disable Kerberos in cli creds to extract NTLM response
+ old_state = creds.get_kerberos_state()
+ creds.set_kerberos_state(DONT_USE_KERBEROS)
+
+ logon = samlogon_logon_info(context.domain,
+ context.netbios_name,
+ creds)
+ logon_level = netlogon.NetlogonNetworkTransitiveInformation
+ validation_level = netlogon.NetlogonValidationSamInfo4
+ netr_flags = 0
+ c.netr_LogonSamLogonWithFlags(context.server,
+ context.machine_creds.get_workstation(),
+ auth,
+ succ,
+ logon_level,
+ logon,
+ validation_level,
+ netr_flags)
+
+ creds.set_kerberos_state(old_state)
+
+ context.last_samlogon_bad =\
+ context.with_random_bad_credentials(connect,
+ context.user_creds,
+ context.user_creds_bad,
+ context.last_samlogon_bad)
+ return True
+
+
+def packet_samr_0(packet, conversation, context):
+ # Open
+ c = context.get_samr_context()
+ c.get_handle()
+ return True
+
+
+def packet_samr_1(packet, conversation, context):
+ # Close
+ c = context.get_samr_context()
+ s = c.get_connection()
+ # close the last opened handle, may not always be accurate
+ # but will do for load simulation
+ if c.user_handle is not None:
+ s.Close(c.user_handle)
+ c.user_handle = None
+ elif c.group_handle is not None:
+ s.Close(c.group_handle)
+ c.group_handle = None
+ elif c.domain_handle is not None:
+ s.Close(c.domain_handle)
+ c.domain_handle = None
+ c.rids = None
+ elif c.handle is not None:
+ s.Close(c.handle)
+ c.handle = None
+ c.domain_sid = None
+ return True
+
+
+def packet_samr_3(packet, conversation, context):
+ # QuerySecurity
+ c = context.get_samr_context()
+ s = c.get_connection()
+ if c.user_handle is None:
+ packet_samr_34(packet, conversation, context)
+ s.QuerySecurity(c.user_handle, 1)
+ return True
+
+
+def packet_samr_5(packet, conversation, context):
+ # LookupDomain
+ c = context.get_samr_context()
+ s = c.get_connection()
+ h = c.get_handle()
+ d = lsa.String()
+ d.string = context.domain
+ c.domain_sid = s.LookupDomain(h, d)
+ return True
+
+
+def packet_samr_6(packet, conversation, context):
+ # EnumDomains
+ c = context.get_samr_context()
+ s = c.get_connection()
+ h = c.get_handle()
+ s.EnumDomains(h, 0, 0)
+ return True
+
+
+def packet_samr_7(packet, conversation, context):
+ # OpenDomain
+ c = context.get_samr_context()
+ s = c.get_connection()
+ h = c.get_handle()
+ if c.domain_sid is None:
+ packet_samr_5(packet, conversation, context)
+
+ c.domain_handle = s.OpenDomain(h,
+ security.SEC_FLAG_MAXIMUM_ALLOWED,
+ c.domain_sid)
+ return True
+
+
+SAMR_QUERY_DOMAIN_INFO_LEVELS = [8, 12]
+
+
+def packet_samr_8(packet, conversation, context):
+ # QueryDomainInfo [228]
+ c = context.get_samr_context()
+ s = c.get_connection()
+ if c.domain_handle is None:
+ packet_samr_7(packet, conversation, context)
+ level = random.choice(SAMR_QUERY_DOMAIN_INFO_LEVELS)
+ s.QueryDomainInfo(c.domain_handle, level)
+ return True
+
+
+packet_samr_14 = null_packet
+# CreateDomainAlias
+# Ignore these for now.
+
+
+def packet_samr_15(packet, conversation, context):
+ # EnumDomainAliases
+ c = context.get_samr_context()
+ s = c.get_connection()
+ if c.domain_handle is None:
+ packet_samr_7(packet, conversation, context)
+
+ s.EnumDomainAliases(c.domain_handle, 100, 0)
+ return True
+
+
+def packet_samr_16(packet, conversation, context):
+ # GetAliasMembership
+ c = context.get_samr_context()
+ s = c.get_connection()
+ if c.domain_handle is None:
+ packet_samr_7(packet, conversation, context)
+
+ sids = lsa.SidArray()
+ sid = lsa.SidPtr()
+ sid.sid = c.domain_sid
+ sids.sids = [sid]
+ s.GetAliasMembership(c.domain_handle, sids)
+ return True
+
+
+def packet_samr_17(packet, conversation, context):
+ # LookupNames
+ c = context.get_samr_context()
+ s = c.get_connection()
+ if c.domain_handle is None:
+ packet_samr_7(packet, conversation, context)
+
+ name = lsa.String(context.username)
+ c.rids = s.LookupNames(c.domain_handle, [name])
+ return True
+
+
+def packet_samr_18(packet, conversation, context):
+ # LookupRids
+ c = context.get_samr_context()
+ s = c.get_connection()
+ if c.rids is None:
+ packet_samr_17(packet, conversation, context)
+ rids = []
+ for r in c.rids:
+ for i in r.ids:
+ rids.append(i)
+ s.LookupRids(c.domain_handle, rids)
+ return True
+
+
+def packet_samr_19(packet, conversation, context):
+ # OpenGroup
+ c = context.get_samr_context()
+ s = c.get_connection()
+ if c.domain_handle is None:
+ packet_samr_7(packet, conversation, context)
+
+ rid = 0x202 # Users I think.
+ c.group_handle = s.OpenGroup(c.domain_handle,
+ security.SEC_FLAG_MAXIMUM_ALLOWED,
+ rid)
+ return True
+
+
+def packet_samr_25(packet, conversation, context):
+ # QueryGroupMember
+ c = context.get_samr_context()
+ s = c.get_connection()
+ if c.group_handle is None:
+ packet_samr_19(packet, conversation, context)
+ s.QueryGroupMember(c.group_handle)
+ return True
+
+
+def packet_samr_34(packet, conversation, context):
+ # OpenUser
+ c = context.get_samr_context()
+ s = c.get_connection()
+ if c.rids is None:
+ packet_samr_17(packet, conversation, context)
+ c.user_handle = s.OpenUser(c.domain_handle,
+ security.SEC_FLAG_MAXIMUM_ALLOWED,
+ c.rids[0].ids[0])
+ return True
+
+
+def packet_samr_36(packet, conversation, context):
+ # QueryUserInfo
+ c = context.get_samr_context()
+ s = c.get_connection()
+ if c.user_handle is None:
+ packet_samr_34(packet, conversation, context)
+ level = 1
+ s.QueryUserInfo(c.user_handle, level)
+ return True
+
+
+packet_samr_37 = null_packet
+
+
+def packet_samr_39(packet, conversation, context):
+ # GetGroupsForUser
+ c = context.get_samr_context()
+ s = c.get_connection()
+ if c.user_handle is None:
+ packet_samr_34(packet, conversation, context)
+ s.GetGroupsForUser(c.user_handle)
+ return True
+
+
+packet_samr_40 = null_packet
+
+packet_samr_44 = null_packet
+
+
+def packet_samr_57(packet, conversation, context):
+ # Connect2
+ c = context.get_samr_context()
+ c.get_handle()
+ return True
+
+
+def packet_samr_64(packet, conversation, context):
+ # Connect5
+ c = context.get_samr_context()
+ c.get_handle()
+ return True
+
+
+packet_samr_68 = null_packet
+
+
+def packet_srvsvc_16(packet, conversation, context):
+ # NetShareGetInfo
+ s = context.get_srvsvc_connection()
+ server_unc = "\\\\" + context.server
+ share_name = "IPC$"
+ level = 1
+ s.NetShareGetInfo(server_unc, share_name, level)
+ return True
+
+
+def packet_srvsvc_21(packet, conversation, context):
+ """NetSrvGetInfo
+
+ FIXME: Level changed from 102 to 101 here, to bypass Windows error.
+
+ Level 102 will cause WERR_ACCESS_DENIED error against Windows, because:
+
+ > If the level is 102 or 502, the Windows implementation checks whether
+ > the caller is a member of one of the groups previously mentioned or
+ > is a member of the Power Users local group.
+
+ It passed against Samba since this check is not implemented by Samba yet.
+
+ refer to:
+
+ https://msdn.microsoft.com/en-us/library/cc247297.aspx#Appendix_A_80
+
+ """
+ srvsvc = context.get_srvsvc_connection()
+ server_unc = "\\\\" + context.server
+ level = 101
+ srvsvc.NetSrvGetInfo(server_unc, level)
+ return True
diff --git a/python/samba/forest_update.py b/python/samba/forest_update.py
new file mode 100644
index 0000000..46de213
--- /dev/null
+++ b/python/samba/forest_update.py
@@ -0,0 +1,543 @@
+# Samba4 Forest update checker
+#
+# Copyright (C) Andrew Bartlett <abarlet@samba.org> 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import ldb
+import samba
+from samba import sd_utils
+from samba.dcerpc import security
+from samba.provision.common import setup_path
+from samba.dsdb import (
+ DS_DOMAIN_FUNCTION_2008,
+ DS_DOMAIN_FUNCTION_2008_R2,
+ DS_DOMAIN_FUNCTION_2012,
+ DS_DOMAIN_FUNCTION_2012_R2,
+ DS_DOMAIN_FUNCTION_2016,
+)
+
+MIN_UPDATE = 11
+MAX_UPDATE = 142
+
+update_map = {
+ # Missing updates from 2008
+ 11: "27a03717-5963-48fc-ba6f-69faa33e70ed",
+ 12: "3467dae5-dedd-4648-9066-f48ac186b20a",
+ 13: "33b7ee33-1386-47cf-baa1-b03e06473253",
+ 14: "e9ee8d55-c2fb-4723-a333-c80ff4dfbf45",
+ 15: "ccfae63a-7fb5-454c-83ab-0e8e1214974e",
+ 16: "ad3c7909-b154-4c16-8bf7-2c3a7870bb3d",
+ 17: "26ad2ebf-f8f5-44a4-b97c-a616c8b9d09a",
+ 18: "4444c516-f43a-4c12-9c4b-b5c064941d61",
+ 19: "436a1a4b-f41a-46e6-ac86-427720ef29f3",
+ 20: "b2b7fb45-f50d-41bc-a73b-8f580f3b636a",
+ 21: "1bdf6366-c3db-4d0b-b8cb-f99ba9bce20f",
+ 22: "63c0f51a-067c-4640-8a4f-044fb33f1049",
+ 23: "dae441c0-366e-482e-98d9-60a99a1898cc",
+ 24: "7dd09ca6-f0d6-43bf-b7f8-ef348f435617",
+ 25: "6b800a81-affe-4a15-8e41-6ea0c7aa89e4",
+ 26: "dd07182c-3174-4c95-902a-d64fee285bbf",
+ 27: "ffa5ee3c-1405-476d-b344-7ad37d69cc25",
+ 28: "099f1587-af70-49c6-ab6c-7b3e82be0fe2",
+ 29: "1a3f6b15-55f2-4752-ba27-3d38a8232c4d",
+ 30: "dee21a17-4e8e-4f40-a58c-c0c009b685a7",
+ 31: "9bd98bb4-4047-4de5-bf4c-7bd1d0f6d21d",
+ 32: "3fe80fbf-bf39-4773-b5bd-3e5767a30d2d",
+ 33: "f02915e2-9141-4f73-b8e7-2804662782da",
+ 34: "39902c52-ef24-4b4b-8033-2c9dfdd173a2",
+ 35: "20bf09b4-6d0b-4cd1-9c09-4231edf1209b",
+ 36: "94f238bb-831c-11d6-977b-00c04f613221",
+ 37: "94f238bc-831c-11d6-977b-00c04f613221",
+ 38: "94f238bd-831c-11d6-977b-00c04f613221",
+ 39: "94f238be-831c-11d6-977b-00c04f613221",
+ 40: "94f238bf-831c-11d6-977b-00c04f613221",
+ 41: "94f238c0-831c-11d6-977b-00c04f613221",
+ 42: "eda27b47-e610-11d6-9793-00c04f613221",
+ 43: "eda27b48-e610-11d6-9793-00c04f613221",
+ 44: "eda27b49-e610-11d6-9793-00c04f613221",
+ 45: "eda27b4a-e610-11d6-9793-00c04f613221",
+ 46: "26d9c510-e61a-11d6-9793-00c04f613221",
+ 47: "26d9c511-e61a-11d6-9793-00c04f613221",
+ 48: "ea08c04c-f474-4212-b19e-5e754f9210d4",
+ 49: "4c0672a2-437c-4944-b953-5db8f111d665",
+ 50: "4c022fd1-adab-4d84-a7f1-9580f03da856",
+ 51: "c03b1f37-c240-4910-93c8-1544a452b4b5",
+ 52: "560cf82d-9572-48a3-9024-6f2b56f1f866",
+ 53: "abd97102-88dd-4013-a009-0e2c2f967ff6",
+ 54: "134428a8-0043-48a6-bcda-63310d9ec4dd",
+ 55: "d668ad1f-cedd-4565-ab02-9385926ce4f5",
+ 56: "8f86b825-c322-4101-adc4-579f12d445db",
+ 57: "9fea28ff-387f-4d57-866d-3893c50f373f",
+ 58: "782370ce-3d38-438d-8b0c-464220a3039d",
+ 59: "002fb291-0d00-4b0c-8c00-fe7f50ce6f8d",
+ 60: "dcb3c95d-deb7-4c51-ad13-43a7d5d06fc7",
+ 61: "ef010a1e-bd88-48c8-a7af-2affd250d77d",
+ 62: "bd3413c0-9559-469b-9f3d-51d7faabd81a",
+ 63: "f814097b-3e3d-49ba-8a3a-092c25085f06",
+ 64: "6eb8eaf9-3403-4ba5-8b4b-ce349a4680ad",
+ 65: "07e57d28-ad40-44fc-8334-8a0dc119b3f4",
+ 66: "6fd48655-1698-497a-ac8d-8267ce01c80b",
+ 67: "10338d31-2423-4dff-b4b5-ef025144b01f",
+ 68: "a96e2ed5-7a7c-4d5c-9d5d-965eca0051da",
+ 69: "613bd063-e8e9-4a62-8f4c-cda566f7eb6f",
+ 70: "2a858903-5696-4364-b4e5-4cac027ca7a6",
+ 71: "0fc5a978-0059-4b0a-9dc2-9896e8e389a1",
+ 72: "4d753a29-26ac-4d1a-bc80-311f947e4f0a",
+ 73: "3b3adbdb-4485-4559-aed8-9811c4bf90e4",
+ 74: "56040c71-fe93-4037-8fe9-1a4d1a283009",
+ 75: "caa2bfad-0cca-483b-8d00-347f943292a8",
+ 76: "2b9e0609-6d75-498a-9727-c9fcc93f0e42",
+ 77: "96541a16-910a-4b66-acde-720a0dff03c7",
+ 78: "429a6334-1a00-4515-bf48-676deb55954a",
+ # Windows Server 2008 R2 - version 5
+ 79: "21ae657c-6649-43c4-bbb3-7f184fdf58c1",
+ 80: "dca8f425-baae-47cd-b424-e3f6c76ed08b",
+ 81: "a662b036-dbbe-4166-b4ba-21abea17f9cc",
+ 82: "9d17b863-18c3-497d-9bde-45ddb95fcb65",
+ 83: "11c39bed-4bee-45f5-b195-8da0e05b573a",
+ # Windows Server 2012 - version 11
+ 84: "4664e973-cb20-4def-b3d5-559d6fe123e0",
+ 85: "2972d92d-a07a-44ac-9cb0-bf243356f345",
+ 86: "09a49cb3-6c54-4b83-ab20-8370838ba149",
+ 87: "77283e65-ce02-4dc3-8c1e-bf99b22527c2",
+ 88: "0afb7f53-96bd-404b-a659-89e65c269420",
+ 89: "c7f717ef-fdbe-4b4b-8dfc-fa8b839fbcfa",
+ 90: "00232167-f3a4-43c6-b503-9acb7a81b01c",
+ 91: "73a9515b-511c-44d2-822b-444a33d3bd33",
+ 92: "e0c60003-2ed7-4fd3-8659-7655a7e79397",
+ 93: "ed0c8cca-80ab-4b6b-ac5a-59b1d317e11f",
+ 94: "b6a6c19a-afc9-476b-8994-61f5b14b3f05",
+ 95: "defc28cd-6cb6-4479-8bcb-aabfb41e9713",
+ 96: "d6bd96d4-e66b-4a38-9c6b-e976ff58c56d",
+ 97: "bb8efc40-3090-4fa2-8a3f-7cd1d380e695",
+ 98: "2d6abe1b-4326-489e-920c-76d5337d2dc5",
+ 99: "6b13dfb5-cecc-4fb8-b28d-0505cea24175",
+ 100: "92e73422-c68b-46c9-b0d5-b55f9c741410",
+ 101: "c0ad80b4-8e84-4cc4-9163-2f84649bcc42",
+ 102: "992fe1d0-6591-4f24-a163-c820fcb7f308",
+ 103: "ede85f96-7061-47bf-b11b-0c0d999595b5",
+ 104: "ee0f3271-eb51-414a-bdac-8f9ba6397a39",
+ 105: "587d52e0-507e-440e-9d67-e6129f33bb68",
+ 106: "ce24f0f6-237e-43d6-ac04-1e918ab04aac",
+ 107: "7f77d431-dd6a-434f-ae4d-ce82928e498f",
+ 108: "ba14e1f6-7cd1-4739-804f-57d0ea74edf4",
+ 109: "156ffa2a-e07c-46fb-a5c4-fbd84a4e5cce",
+ 110: "7771d7dd-2231-4470-aa74-84a6f56fc3b6",
+ 111: "49b2ae86-839a-4ea0-81fe-9171c1b98e83",
+ 112: "1b1de989-57ec-4e96-b933-8279a8119da4",
+ 113: "281c63f0-2c9a-4cce-9256-a238c23c0db9",
+ 114: "4c47881a-f15a-4f6c-9f49-2742f7a11f4b",
+ 115: "2aea2dc6-d1d3-4f0c-9994-66c1da21de0f",
+ 116: "ae78240c-43b9-499e-ae65-2b6e0f0e202a",
+ 117: "261b5bba-3438-4d5c-a3e9-7b871e5f57f0",
+ 118: "3fb79c05-8ea1-438c-8c7a-81f213aa61c2",
+ 119: "0b2be39a-d463-4c23-8290-32186759d3b1",
+ 120: "f0842b44-bc03-46a1-a860-006e8527fccd",
+ 121: "93efec15-4dd9-4850-bc86-a1f2c8e2ebb9",
+ 122: "9e108d96-672f-40f0-b6bd-69ee1f0b7ac4",
+ 123: "1e269508-f862-4c4a-b01f-420d26c4ff8c",
+ 125: "e1ab17ed-5efb-4691-ad2d-0424592c5755",
+ 126: "0e848bd4-7c70-48f2-b8fc-00fbaa82e360",
+ 127: "016f23f7-077d-41fa-a356-de7cfdb01797",
+ 128: "49c140db-2de3-44c2-a99a-bab2e6d2ba81",
+ 129: "e0b11c80-62c5-47f7-ad0d-3734a71b8312",
+ 130: "2ada1a2d-b02f-4731-b4fe-59f955e24f71",
+ # Windows Server 2012 R2 - version 15
+ 131: "b83818c1-01a6-4f39-91b7-a3bb581c3ae3",
+ 132: "bbbb9db0-4009-4368-8c40-6674e980d3c3",
+ 133: "f754861c-3692-4a7b-b2c2-d0fa28ed0b0b",
+ 134: "d32f499f-3026-4af0-a5bd-13fe5a331bd2",
+ 135: "38618886-98ee-4e42-8cf1-d9a2cd9edf8b",
+ # Windows Server 2016 - version 16
+ 136: "328092fb-16e7-4453-9ab8-7592db56e9c4",
+ 137: "3a1c887f-df0a-489f-b3f2-2d0409095f6e",
+ 138: "232e831f-f988-4444-8e3e-8a352e2fd411",
+ 139: "ddddcf0c-bec9-4a5a-ae86-3cfe6cc6e110",
+ 140: "a0a45aac-5550-42df-bb6a-3cc5c46b52f2",
+ 141: "3e7645f3-3ea5-4567-b35a-87630449c70c",
+ 142: "e634067b-e2c4-4d79-b6e8-73c619324d5e",
+}
+
+functional_level_to_max_update = {
+ DS_DOMAIN_FUNCTION_2008: 78,
+ DS_DOMAIN_FUNCTION_2008_R2: 83,
+ DS_DOMAIN_FUNCTION_2012: 130,
+ DS_DOMAIN_FUNCTION_2012_R2: 135,
+ DS_DOMAIN_FUNCTION_2016: 142,
+}
+
+functional_level_to_version = {
+ DS_DOMAIN_FUNCTION_2008: 2,
+ DS_DOMAIN_FUNCTION_2008_R2: 5,
+ DS_DOMAIN_FUNCTION_2012: 11,
+ DS_DOMAIN_FUNCTION_2012_R2: 15,
+ DS_DOMAIN_FUNCTION_2016: 16,
+}
+
+# Documentation says that this update was deprecated
+missing_updates = [124]
+
+
+class ForestUpdateException(Exception):
+ pass
+
+
+class ForestUpdate(object):
+ """Check and update a SAM database for forest updates"""
+
+ def __init__(self, samdb, verbose=False, fix=False,
+ add_update_container=True):
+ """
+ :param samdb: LDB database
+ :param verbose: Show the ldif changes
+ :param fix: Apply the update if the container is missing
+ :param add_update_container: Add the container at the end of the change
+ :raise ForestUpdateException:
+ """
+ from samba.ms_forest_updates_markdown import read_ms_markdown
+
+ self.samdb = samdb
+ self.fix = fix
+ self.verbose = verbose
+ self.add_update_container = add_update_container
+ # TODO In future we should check for inconsistencies when it claims it has been done
+ self.check_update_applied = False
+
+ self.config_dn = self.samdb.get_config_basedn()
+ self.domain_dn = self.samdb.domain_dn()
+ self.schema_dn = self.samdb.get_schema_basedn()
+
+ self.sd_utils = sd_utils.SDUtils(samdb)
+ self.domain_sid = security.dom_sid(samdb.get_domain_sid())
+
+ self.forestupdate_container = self.samdb.get_config_basedn()
+ try:
+ self.forestupdate_container.add_child("CN=Operations,CN=ForestUpdates")
+ except ldb.LdbError:
+ raise ForestUpdateException("Failed to add forest update container child")
+
+ self.revision_object = self.samdb.get_config_basedn()
+ try:
+ self.revision_object.add_child("CN=ActiveDirectoryUpdate,CN=ForestUpdates")
+ except ldb.LdbError:
+ raise ForestUpdateException("Failed to add revision object child")
+
+ # Store the result of parsing the markdown in a dictionary
+ self.stored_ldif = {}
+ read_ms_markdown(setup_path("adprep/WindowsServerDocs/Forest-Wide-Updates.md"),
+ out_dict=self.stored_ldif)
+
+ def check_updates_functional_level(self, functional_level,
+ old_functional_level=None,
+ update_revision=False):
+ """
+ Apply all updates for a given old and new functional level
+ :param functional_level: constant
+ :param old_functional_level: constant
+ :param update_revision: modify the stored version
+ :raise ForestUpdateException:
+ """
+ res = self.samdb.search(base=self.revision_object,
+ attrs=["revision"], scope=ldb.SCOPE_BASE)
+
+ expected_update = functional_level_to_max_update[functional_level]
+
+ if old_functional_level:
+ min_update = functional_level_to_max_update[old_functional_level]
+ min_update += 1
+ else:
+ min_update = MIN_UPDATE
+
+ self.check_updates_range(min_update, expected_update)
+
+ expected_version = functional_level_to_version[functional_level]
+ found_version = int(res[0]['revision'][0])
+ if update_revision and found_version < expected_version:
+ if not self.fix:
+ raise ForestUpdateException("Revision is not high enough. Fix is set to False."
+ "\nExpected: %dGot: %d" % (expected_version,
+ found_version))
+ self.samdb.modify_ldif("""dn: %s
+changetype: modify
+replace: revision
+revision: %d
+ """ % (str(self.revision_object), expected_version))
+
+ def check_updates_iterator(self, iterator):
+ """
+ Apply a list of updates which must be within the valid range of updates
+ :param iterator: Iterable specifying integer update numbers to apply
+ :raise ForestUpdateException:
+ """
+ for op in iterator:
+ if op < MIN_UPDATE or op > MAX_UPDATE:
+ raise ForestUpdateException("Update number invalid.")
+
+ if 84 <= op <= 87:
+ self.operation_ldif(op)
+ elif 91 <= op <= 126:
+ self.operation_ldif(op)
+ elif 131 <= op <= 134:
+ self.operation_ldif(op)
+ elif 136 <= op <= 142:
+ self.operation_ldif(op)
+ else:
+ # No LDIF file exists for the change
+ getattr(self, "operation_%d" % op)(op)
+
+ def check_updates_range(self, start=0, end=0):
+ """
+ Apply a range of updates which must be within the valid range of updates
+ :param start: integer update to begin
+ :param end: integer update to end (inclusive)
+ :raise ForestUpdateException:
+ """
+ op = start
+ if start < MIN_UPDATE or start > end or end > MAX_UPDATE:
+ raise ForestUpdateException("Update number invalid.")
+ while op <= end:
+ if op in missing_updates:
+ pass
+ elif 84 <= op <= 87:
+ self.operation_ldif(op)
+ elif 91 <= op <= 126:
+ self.operation_ldif(op)
+ elif 131 <= op <= 134:
+ self.operation_ldif(op)
+ elif 136 <= op <= 142:
+ self.operation_ldif(op)
+ else:
+ # No LDIF file exists for the change
+ getattr(self, "operation_%d" % op)(op)
+
+ op += 1
+
+ def update_exists(self, op):
+ """
+ :param op: Integer update number
+ :return: True if update exists else False
+ """
+ update_dn = "CN=%s,%s" % (update_map[op], self.forestupdate_container)
+ try:
+ res = self.samdb.search(base=update_dn,
+ scope=ldb.SCOPE_BASE,
+ attrs=[])
+ except ldb.LdbError as e:
+ (num, msg) = e.args
+ if num != ldb.ERR_NO_SUCH_OBJECT:
+ raise
+ return False
+
+ assert len(res) == 1
+ print("Skip Forest Update %u: %s" % (op, update_map[op]))
+ return True
+
+ def update_add(self, op):
+ """
+ Add the corresponding container object for the given update
+ :param op: Integer update
+ """
+ self.samdb.add_ldif("""dn: CN=%s,%s
+objectClass: container
+""" % (update_map[op], str(self.forestupdate_container)))
+ print("Applied Forest Update %u: %s" % (op, update_map[op]))
+
+ def operation_ldif(self, op):
+ if self.update_exists(op):
+ # Assume we have applied it (we have no double checks for these)
+ return True
+
+ guid = update_map[op]
+ if guid in self.stored_ldif:
+ ldif = self.stored_ldif[guid]
+ elif guid.lower() in self.stored_ldif:
+ ldif = self.stored_ldif[guid.lower()]
+ elif guid.upper() in self.stored_ldif:
+ ldif = self.stored_ldif[guid.upper()]
+ else:
+ raise ForestUpdateException("OPERATION %d: ldif for %s not found" %
+ (op, guid))
+
+ sub_ldif = samba.substitute_var(ldif, {"CONFIG_DN":
+ str(self.config_dn),
+ "FOREST_ROOT_DOMAIN":
+ str(self.domain_dn),
+ "SCHEMA_DN":
+ str(self.schema_dn)})
+ if self.verbose:
+ print("UPDATE (LDIF) ------ OPERATION %d" % op)
+ print(sub_ldif)
+
+ try:
+ self.samdb.modify_ldif(sub_ldif)
+ except ldb.LdbError as e:
+ (num, msg) = e.args
+ if num != ldb.ERR_ATTRIBUTE_OR_VALUE_EXISTS:
+ raise e
+ pass
+
+ if self.add_update_container:
+ self.update_add(op)
+
+ def raise_if_not_fix(self, op):
+ """
+ Raises an exception if not set to fix.
+ :param op: Integer operation
+ :raise ForestUpdateException:
+ """
+ if not self.fix:
+ raise ForestUpdateException("Missing operation %d. Fix is currently set to False" % op)
+
+ #
+ # Created a new object CN=Sam-Domain in the Schema partition
+ #
+ # Created the following access control entry (ACE) to grant Write Property
+ # to Principal Self on the object: ...
+ #
+ def operation_88(self, op):
+ if self.update_exists(op):
+ return
+ self.raise_if_not_fix(op)
+
+ ace = "(OA;CIIO;WP;ea1b7b93-5e48-46d5-bc6c-4df4fda78a35;bf967a86-0de6-11d0-a285-00aa003049e2;PS)"
+
+ schema_dn = ldb.Dn(self.samdb, "CN=Sam-Domain,%s" % str(self.schema_dn))
+
+ self.sd_utils.update_aces_in_dacl(schema_dn,
+ sddl_attr="defaultSecurityDescriptor",
+ add_aces=[ace])
+
+ if self.add_update_container:
+ self.update_add(op)
+
+ #
+ # Created a new object CN=Domain-DNS in the Schema partition
+ #
+ # Created the following access control entry (ACE) to grant Write Property
+ # to Principal Self on the object: ...
+ #
+ def operation_89(self, op):
+ if self.update_exists(op):
+ return
+ self.raise_if_not_fix(op)
+
+ ace = "(OA;CIIO;WP;ea1b7b93-5e48-46d5-bc6c-4df4fda78a35;bf967a86-0de6-11d0-a285-00aa003049e2;PS)"
+
+ schema_dn = ldb.Dn(self.samdb, "CN=Domain-DNS,%s" % str(self.schema_dn))
+
+ self.sd_utils.update_aces_in_dacl(schema_dn,
+ sddl_attr="defaultSecurityDescriptor",
+ add_aces=[ace])
+
+ if self.add_update_container:
+ self.update_add(op)
+
+ # Update display specifiers
+ def operation_90(self, op):
+ if self.add_update_container and not self.update_exists(op):
+ self.update_add(op)
+
+ # Update display specifiers
+ def operation_127(self, op):
+ if self.add_update_container and not self.update_exists(op):
+ self.update_add(op)
+
+ # Update appears to already be applied in documentation
+ def operation_128(self, op):
+ if self.add_update_container and not self.update_exists(op):
+ self.update_add(op)
+
+ # Grant ACE (OA;CIOI;RPWP;3f78c3e5-f79a-46bd-a0b8-9d18116ddc79;;PS) to samDomain
+ def operation_129(self, op):
+ if self.update_exists(op):
+ return
+ self.raise_if_not_fix(op)
+
+ ace = "(OA;CIOI;RPWP;3f78c3e5-f79a-46bd-a0b8-9d18116ddc79;;PS)"
+
+ schema_dn = ldb.Dn(self.samdb, "CN=Sam-Domain,%s" % str(self.schema_dn))
+
+ self.sd_utils.update_aces_in_dacl(schema_dn,
+ sddl_attr="defaultSecurityDescriptor",
+ add_aces=[ace])
+
+ if self.add_update_container:
+ self.update_add(op)
+
+ # Grant ACE (OA;CIOI;RPWP;3f78c3e5-f79a-46bd-a0b8-9d18116ddc79;;PS) to domainDNS
+ def operation_130(self, op):
+ if self.update_exists(op):
+ return
+ self.raise_if_not_fix(op)
+
+ ace = "(OA;CIOI;RPWP;3f78c3e5-f79a-46bd-a0b8-9d18116ddc79;;PS)"
+
+ schema_dn = ldb.Dn(self.samdb, "CN=Domain-DNS,%s" % str(self.schema_dn))
+
+ self.sd_utils.update_aces_in_dacl(schema_dn,
+ sddl_attr="defaultSecurityDescriptor",
+ add_aces=[ace])
+
+ if self.add_update_container:
+ self.update_add(op)
+
+ # Set msDS-ClaimIsValueSpaceRestricted on ad://ext/AuthenticationSilo to FALSE
+ def operation_135(self, op):
+ if self.update_exists(op):
+ return
+ self.raise_if_not_fix(op)
+
+ self.samdb.modify_ldif("""dn: CN=ad://ext/AuthenticationSilo,CN=Claim Types,CN=Claims Configuration,CN=Services,%s
+changetype: modify
+replace: msDS-ClaimIsValueSpaceRestricted
+msDS-ClaimIsValueSpaceRestricted: FALSE
+""" % self.config_dn,
+ controls=["relax:0", "provision:0"])
+
+ if self.add_update_container:
+ self.update_add(op)
+
+ #
+ # THE FOLLOWING ARE MISSING UPDATES FROM 2008 + 2008 R2
+ #
+
+ def operation_11(self, op):
+ if self.add_update_container and not self.update_exists(op):
+ self.update_add(op)
+
+ def operation_54(self, op):
+ if self.add_update_container and not self.update_exists(op):
+ self.update_add(op)
+
+ def operation_79(self, op):
+ if self.add_update_container and not self.update_exists(op):
+ self.update_add(op)
+
+ def operation_80(self, op):
+ if self.add_update_container and not self.update_exists(op):
+ self.update_add(op)
+
+ def operation_81(self, op):
+ if self.add_update_container and not self.update_exists(op):
+ self.update_add(op)
+
+ def operation_82(self, op):
+ if self.add_update_container and not self.update_exists(op):
+ self.update_add(op)
+
+ def operation_83(self, op):
+ if self.add_update_container and not self.update_exists(op):
+ self.update_add(op)
diff --git a/python/samba/functional_level.py b/python/samba/functional_level.py
new file mode 100644
index 0000000..e5ccf39
--- /dev/null
+++ b/python/samba/functional_level.py
@@ -0,0 +1,83 @@
+# domain management - common code
+#
+# Copyright Catlayst .Net Ltd 2017-2023
+# Copyright Jelmer Vernooij 2007-2012
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from samba.dsdb import (
+ DS_DOMAIN_FUNCTION_2000,
+ DS_DOMAIN_FUNCTION_2003,
+ DS_DOMAIN_FUNCTION_2008,
+ DS_DOMAIN_FUNCTION_2008_R2,
+ DS_DOMAIN_FUNCTION_2012,
+ DS_DOMAIN_FUNCTION_2012_R2,
+ DS_DOMAIN_FUNCTION_2003_MIXED,
+ DS_DOMAIN_FUNCTION_2016
+)
+
+string_version_to_constant = {
+ "2000": DS_DOMAIN_FUNCTION_2000,
+ "2003": DS_DOMAIN_FUNCTION_2003,
+ "2008": DS_DOMAIN_FUNCTION_2008,
+ "2008_R2": DS_DOMAIN_FUNCTION_2008_R2,
+ "2012": DS_DOMAIN_FUNCTION_2012,
+ "2012_R2": DS_DOMAIN_FUNCTION_2012_R2,
+ "2016": DS_DOMAIN_FUNCTION_2016,
+}
+
+
+def string_to_level(string):
+ """Interpret a string indicating a functional level."""
+ return string_version_to_constant[string]
+
+
+def level_to_string(level):
+ """turn the level enum number into a printable string."""
+ if level < DS_DOMAIN_FUNCTION_2000:
+ return "invalid"
+ strings = {
+ DS_DOMAIN_FUNCTION_2000: "2000",
+ DS_DOMAIN_FUNCTION_2003_MIXED:
+ "2003 with mixed domains/interim (NT4 DC support)",
+ DS_DOMAIN_FUNCTION_2003: "2003",
+ DS_DOMAIN_FUNCTION_2008: "2008",
+ DS_DOMAIN_FUNCTION_2008_R2: "2008 R2",
+ DS_DOMAIN_FUNCTION_2012: "2012",
+ DS_DOMAIN_FUNCTION_2012_R2: "2012 R2",
+ DS_DOMAIN_FUNCTION_2016: "2016",
+ }
+ return strings.get(level, "higher than 2016")
+
+def dc_level_from_lp(lp):
+ """Return the ad dc functional level as an integer from a LoadParm"""
+
+ # I don't like the RuntimeError here, but these "can't happen"
+ # except by a developer stuffup.
+
+ smb_conf_dc_functional_level = lp.get('ad dc functional level')
+ if smb_conf_dc_functional_level is None:
+ # This shouldn't be possible, except if the default option
+ # value is not in the loadparm enum table
+ raise RuntimeError("'ad dc functional level' in smb.conf unrecognised!")
+
+ try:
+ return string_to_level(smb_conf_dc_functional_level)
+ except KeyError:
+ # This shouldn't be possible at all, unless the table in
+ # python/samba/functional_level.py is not a superset of that
+ # in lib/param/param_table.c
+ raise RuntimeError(f"'ad dc functional level = {smb_conf_dc_functional_level}'"
+ " in smb.conf is not valid!")
diff --git a/python/samba/getopt.py b/python/samba/getopt.py
new file mode 100644
index 0000000..0935ed0
--- /dev/null
+++ b/python/samba/getopt.py
@@ -0,0 +1,539 @@
+# Samba-specific bits for optparse
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Support for parsing Samba-related command-line options."""
+
+__docformat__ = "restructuredText"
+
+import optparse
+import os
+import sys
+from abc import ABCMeta, abstractmethod
+from copy import copy
+
+from samba.credentials import (
+ Credentials,
+ AUTO_USE_KERBEROS,
+ DONT_USE_KERBEROS,
+ MUST_USE_KERBEROS,
+)
+from samba._glue import get_burnt_commandline
+
+
+def check_bytes(option, opt, value):
+ """Custom option type to allow the input of sizes using byte, kb, mb ...
+
+ units, e.g. 2Gb, 4KiB ...
+ e.g. Option("--size", type="bytes", metavar="SIZE")
+ """
+
+ multipliers = {"B": 1,
+ "KB": 1024,
+ "MB": 1024 * 1024,
+ "GB": 1024 * 1024 * 1024}
+
+ # strip out any spaces
+ v = value.replace(" ", "")
+
+ # extract the numeric prefix
+ digits = ""
+ while v and v[0:1].isdigit() or v[0:1] == '.':
+ digits += v[0]
+ v = v[1:]
+
+ try:
+ m = float(digits)
+ except ValueError:
+ msg = ("{0} option requires a numeric value, "
+ "with an optional unit suffix").format(opt)
+ raise optparse.OptionValueError(msg)
+
+ # strip out the 'i' and convert to upper case so
+ # kib Kib kb KB are all equivalent
+ suffix = v.upper().replace("I", "")
+ try:
+ return m * multipliers[suffix]
+ except KeyError as k:
+ msg = ("{0} invalid suffix '{1}', "
+ "should be B, Kb, Mb or Gb").format(opt, v)
+ raise optparse.OptionValueError(msg)
+
+
+class OptionMissingError(optparse.OptionValueError):
+ """One or more Options with required=True is missing."""
+
+ def __init__(self, options):
+ """Raised when required Options are missing from the command line.
+
+ :param options: list of 1 or more option
+ """
+ self.options = options
+
+ def __str__(self):
+ if len(self.options) == 1:
+ missing = self.options[0]
+ return f"Argument {missing} is required."
+ else:
+ options = sorted([str(option) for option in self.options])
+ missing = ", ".join(options)
+ return f"The arguments {missing} are required."
+
+
+class ValidationError(Exception):
+ """ValidationError is the exception raised by validators.
+
+ Should be raised from the __call__ method of the Validator subclass.
+ """
+ pass
+
+
+class Validator(metaclass=ABCMeta):
+ """Base class for Validators used by SambaOption.
+
+ Subclass this to make custom validators and implement __call__.
+ """
+
+ @abstractmethod
+ def __call__(self, field, value):
+ pass
+
+
+class Option(optparse.Option):
+ ATTRS = optparse.Option.ATTRS + ["required", "validators"]
+ TYPES = optparse.Option.TYPES + ("bytes",)
+ TYPE_CHECKER = copy(optparse.Option.TYPE_CHECKER)
+ TYPE_CHECKER["bytes"] = check_bytes
+
+ def run_validators(self, opt, value):
+ """Runs the list of validators on the current option."""
+ validators = getattr(self, "validators") or []
+ for validator in validators:
+ validator(opt, value)
+
+ def convert_value(self, opt, value):
+ """Override convert_value to run validators just after.
+
+ This can also be done in process() but there we would have to
+ replace the entire method.
+ """
+ value = super().convert_value(opt, value)
+ self.run_validators(opt, value)
+ return value
+
+
+class OptionParser(optparse.OptionParser):
+ """Samba OptionParser, adding support for required=True on Options."""
+
+ def __init__(self,
+ usage=None,
+ option_list=None,
+ option_class=Option,
+ version=None,
+ conflict_handler="error",
+ description=None,
+ formatter=None,
+ add_help_option=True,
+ prog=None,
+ epilog=None):
+ """
+ Ensure that option_class defaults to the Samba one.
+ """
+ super().__init__(usage, option_list, option_class, version,
+ conflict_handler, description, formatter,
+ add_help_option, prog, epilog)
+
+ def check_values(self, values, args):
+ """Loop through required options if value is missing raise exception."""
+ missing = []
+ for option in self._get_all_options():
+ if option.required:
+ value = getattr(values, option.dest)
+ if value is None:
+ missing.append(option)
+
+ if missing:
+ raise OptionMissingError(missing)
+
+ return super().check_values(values, args)
+
+
+class OptionGroup(optparse.OptionGroup):
+ """Samba OptionGroup base class.
+
+ Provides a generic set_option method to be used as Option callback,
+ so that one doesn't need to be created for every available Option.
+
+ Also overrides the add_option method, so it correctly initialises
+ the defaults on the OptionGroup.
+ """
+
+ def add_option(self, *args, **kwargs):
+ """Override add_option so it applies defaults during constructor."""
+ opt = super().add_option(*args, **kwargs)
+ default = None if opt.default == optparse.NO_DEFAULT else opt.default
+ self.set_option(opt, opt.get_opt_string(), default, self.parser)
+ return opt
+
+ def set_option(self, option, opt_str, arg, parser):
+ """Callback to set the attribute based on the Option dest name."""
+ dest = option.dest or option._long_opts[0][2:].replace("-", "_")
+ setattr(self, dest, arg)
+
+
+class SambaOptions(OptionGroup):
+ """General Samba-related command line options."""
+
+ def __init__(self, parser):
+ from samba import fault_setup
+ fault_setup()
+
+ # This removes passwords from the commandline via
+ # setproctitle() but makes no change to python sys.argv so we
+ # can continue to process as normal
+ #
+ # get_burnt_commandline returns None if no change is needed
+ new_proctitle = get_burnt_commandline(sys.argv)
+ if new_proctitle is not None:
+ try:
+ import setproctitle
+ setproctitle.setproctitle(new_proctitle)
+
+ except ModuleNotFoundError:
+ msg = ("WARNING: Using passwords on command line is insecure. "
+ "Installing the setproctitle python module will hide "
+ "these from shortly after program start.\n")
+ sys.stderr.write(msg)
+ sys.stderr.flush()
+
+ from samba.param import LoadParm
+ super().__init__(parser, "Samba Common Options")
+ self.add_option("-s", "--configfile", action="callback",
+ type=str, metavar="FILE", help="Configuration file",
+ callback=self._load_configfile)
+ self.add_option("-d", "--debuglevel", action="callback",
+ type=str, metavar="DEBUGLEVEL", help="debug level",
+ callback=self._set_debuglevel)
+ self.add_option("--option", action="callback",
+ type=str, metavar="OPTION",
+ help="set smb.conf option from command line",
+ callback=self._set_option)
+ self.add_option("--realm", action="callback",
+ type=str, metavar="REALM", help="set the realm name",
+ callback=self._set_realm)
+ self._configfile = None
+ self._lp = LoadParm()
+ self.realm = None
+
+ def get_loadparm_path(self):
+ """Return path to the smb.conf file specified on the command line."""
+ return self._configfile
+
+ def _load_configfile(self, option, opt_str, arg, parser):
+ self._configfile = arg
+
+ def _set_debuglevel(self, option, opt_str, arg, parser):
+ try:
+ self._lp.set('debug level', arg)
+ except RuntimeError:
+ raise optparse.OptionValueError(
+ f"invalid -d/--debug value: '{arg}'")
+ parser.values.debuglevel = arg
+
+ def _set_realm(self, option, opt_str, arg, parser):
+ try:
+ self._lp.set('realm', arg)
+ except RuntimeError:
+ raise optparse.OptionValueError(
+ f"invalid --realm value: '{arg}'")
+ self.realm = arg
+
+ def _set_option(self, option, opt_str, arg, parser):
+ if arg.find('=') == -1:
+ raise optparse.OptionValueError(
+ "--option option takes a 'a=b' argument")
+ a = arg.split('=', 1)
+ try:
+ self._lp.set(a[0], a[1])
+ except Exception as e:
+ raise optparse.OptionValueError(
+ "invalid --option option value %r: %s" % (arg, e))
+
+ def get_loadparm(self):
+ """Return loadparm object with data specified on the command line."""
+ if self._configfile is not None:
+ self._lp.load(self._configfile)
+ elif os.getenv("SMB_CONF_PATH") is not None:
+ self._lp.load(os.getenv("SMB_CONF_PATH"))
+ else:
+ self._lp.load_default()
+ return self._lp
+
+
+class Samba3Options(SambaOptions):
+ """General Samba-related command line options with an s3 param."""
+
+ def __init__(self, parser):
+ super().__init__(parser)
+ from samba.samba3 import param as s3param
+ self._lp = s3param.get_context()
+
+
+class HostOptions(OptionGroup):
+ """Command line options for connecting to target host or database."""
+
+ def __init__(self, parser):
+ super().__init__(parser, "Host Options")
+
+ self.add_option("-H", "--URL",
+ help="LDB URL for database or target server",
+ type=str, metavar="URL", action="callback",
+ callback=self.set_option, dest="H")
+
+
+class VersionOptions(OptionGroup):
+ """Command line option for printing Samba version."""
+ def __init__(self, parser):
+ super().__init__(parser, "Version Options")
+ self.add_option("-V", "--version", action="callback",
+ callback=self._display_version,
+ help="Display version number")
+
+ def _display_version(self, option, opt_str, arg, parser):
+ import samba
+ print(samba.version)
+ sys.exit(0)
+
+
+def parse_kerberos_arg_legacy(arg, opt_str):
+ if arg.lower() in ["yes", 'true', '1']:
+ return MUST_USE_KERBEROS
+ elif arg.lower() in ["no", 'false', '0']:
+ return DONT_USE_KERBEROS
+ elif arg.lower() in ["auto"]:
+ return AUTO_USE_KERBEROS
+ else:
+ raise optparse.OptionValueError("invalid %s option value: %s" %
+ (opt_str, arg))
+
+
+def parse_kerberos_arg(arg, opt_str):
+ if arg.lower() == 'required':
+ return MUST_USE_KERBEROS
+ elif arg.lower() == 'desired':
+ return AUTO_USE_KERBEROS
+ elif arg.lower() == 'off':
+ return DONT_USE_KERBEROS
+ else:
+ raise optparse.OptionValueError("invalid %s option value: %s" %
+ (opt_str, arg))
+
+
+class CredentialsOptions(OptionGroup):
+ """Command line options for specifying credentials."""
+
+ def __init__(self, parser, special_name=None):
+ self.special_name = special_name
+ if special_name is not None:
+ self.section = "Credentials Options (%s)" % special_name
+ else:
+ self.section = "Credentials Options"
+
+ self.ask_for_password = True
+ self.ipaddress = None
+ self.machine_pass = False
+ super().__init__(parser, self.section)
+ self._add_option("--simple-bind-dn", metavar="DN", action="callback",
+ callback=self._set_simple_bind_dn, type=str,
+ help="DN to use for a simple bind")
+ self._add_option("--password", metavar="PASSWORD", action="callback",
+ help="Password", type=str, callback=self._set_password)
+ self._add_option("-U", "--username", metavar="USERNAME",
+ action="callback", type=str,
+ help="Username", callback=self._parse_username)
+ self._add_option("-W", "--workgroup", metavar="WORKGROUP",
+ action="callback", type=str,
+ help="Workgroup", callback=self._parse_workgroup)
+ self._add_option("-N", "--no-pass", action="callback",
+ help="Don't ask for a password",
+ callback=self._set_no_password)
+ self._add_option("", "--ipaddress", metavar="IPADDRESS",
+ action="callback", type=str,
+ help="IP address of server",
+ callback=self._set_ipaddress)
+ self._add_option("-P", "--machine-pass",
+ action="callback",
+ help="Use stored machine account password",
+ callback=self._set_machine_pass)
+ self._add_option("--use-kerberos", metavar="desired|required|off",
+ action="callback", type=str,
+ help="Use Kerberos authentication", callback=self._set_kerberos)
+ self._add_option("--use-krb5-ccache", metavar="KRB5CCNAME",
+ action="callback", type=str,
+ help="Kerberos Credentials cache",
+ callback=self._set_krb5_ccache)
+ self._add_option("-A", "--authentication-file", metavar="AUTHFILE",
+ action="callback", type=str,
+ help="Authentication file",
+ callback=self._set_auth_file)
+
+ # LEGACY
+ self._add_option("-k", "--kerberos", metavar="KERBEROS",
+ action="callback", type=str,
+ help="DEPRECATED: Migrate to --use-kerberos", callback=self._set_kerberos_legacy)
+ self.creds = Credentials()
+
+ def _add_option(self, *args1, **kwargs):
+ if self.special_name is None:
+ return self.add_option(*args1, **kwargs)
+
+ args2 = ()
+ for a in args1:
+ if not a.startswith("--"):
+ continue
+ args2 += (a.replace("--", "--%s-" % self.special_name),)
+ self.add_option(*args2, **kwargs)
+
+ def _parse_username(self, option, opt_str, arg, parser):
+ self.creds.parse_string(arg)
+ self.machine_pass = False
+
+ def _parse_workgroup(self, option, opt_str, arg, parser):
+ self.creds.set_domain(arg)
+
+ def _set_password(self, option, opt_str, arg, parser):
+ self.creds.set_password(arg)
+ self.ask_for_password = False
+ self.machine_pass = False
+
+ def _set_no_password(self, option, opt_str, arg, parser):
+ self.ask_for_password = False
+
+ def _set_machine_pass(self, option, opt_str, arg, parser):
+ self.machine_pass = True
+
+ def _set_ipaddress(self, option, opt_str, arg, parser):
+ self.ipaddress = arg
+
+ def _set_kerberos_legacy(self, option, opt_str, arg, parser):
+ print('WARNING: The option -k|--kerberos is deprecated!')
+ self.creds.set_kerberos_state(parse_kerberos_arg_legacy(arg, opt_str))
+
+ def _set_kerberos(self, option, opt_str, arg, parser):
+ self.creds.set_kerberos_state(parse_kerberos_arg(arg, opt_str))
+
+ def _set_simple_bind_dn(self, option, opt_str, arg, parser):
+ self.creds.set_bind_dn(arg)
+
+ def _set_krb5_ccache(self, option, opt_str, arg, parser):
+ self.creds.set_kerberos_state(MUST_USE_KERBEROS)
+ self.creds.set_named_ccache(arg)
+
+ def _set_auth_file(self, option, opt_str, arg, parser):
+ if os.path.exists(arg):
+ self.creds.parse_file(arg)
+ self.ask_for_password = False
+ self.machine_pass = False
+
+ def get_credentials(self, lp, fallback_machine=False):
+ """Obtain the credentials set on the command-line.
+
+ :param lp: Loadparm object to use.
+ :return: Credentials object
+ """
+ self.creds.guess(lp)
+ if self.machine_pass:
+ self.creds.set_machine_account(lp)
+ elif self.ask_for_password:
+ self.creds.set_cmdline_callbacks()
+
+ # possibly fallback to using the machine account, if we have
+ # access to the secrets db
+ if fallback_machine and not self.creds.authentication_requested():
+ try:
+ self.creds.set_machine_account(lp)
+ except Exception:
+ pass
+
+ return self.creds
+
+
+class CredentialsOptionsDouble(CredentialsOptions):
+ """Command line options for specifying credentials of two servers."""
+
+ def __init__(self, parser):
+ super().__init__(parser)
+ self.no_pass2 = True
+ self.add_option("--simple-bind-dn2", metavar="DN2", action="callback",
+ callback=self._set_simple_bind_dn2, type=str,
+ help="DN to use for a simple bind")
+ self.add_option("--password2", metavar="PASSWORD2", action="callback",
+ help="Password", type=str,
+ callback=self._set_password2)
+ self.add_option("--username2", metavar="USERNAME2",
+ action="callback", type=str,
+ help="Username for second server",
+ callback=self._parse_username2)
+ self.add_option("--workgroup2", metavar="WORKGROUP2",
+ action="callback", type=str,
+ help="Workgroup for second server",
+ callback=self._parse_workgroup2)
+ self.add_option("--no-pass2", action="store_true",
+ help="Don't ask for a password for the second server")
+ self.add_option("--use-kerberos2", metavar="desired|required|off",
+ action="callback", type=str,
+ help="Use Kerberos authentication", callback=self._set_kerberos2)
+
+ # LEGACY
+ self.add_option("--kerberos2", metavar="KERBEROS2",
+ action="callback", type=str,
+ help="Use Kerberos", callback=self._set_kerberos2_legacy)
+ self.creds2 = Credentials()
+
+ def _parse_username2(self, option, opt_str, arg, parser):
+ self.creds2.parse_string(arg)
+
+ def _parse_workgroup2(self, option, opt_str, arg, parser):
+ self.creds2.set_domain(arg)
+
+ def _set_password2(self, option, opt_str, arg, parser):
+ self.creds2.set_password(arg)
+ self.no_pass2 = False
+
+ def _set_kerberos2_legacy(self, option, opt_str, arg, parser):
+ self.creds2.set_kerberos_state(parse_kerberos_arg(arg, opt_str))
+
+ def _set_kerberos2(self, option, opt_str, arg, parser):
+ self.creds2.set_kerberos_state(parse_kerberos_arg(arg, opt_str))
+
+ def _set_simple_bind_dn2(self, option, opt_str, arg, parser):
+ self.creds2.set_bind_dn(arg)
+
+ def get_credentials2(self, lp, guess=True):
+ """Obtain the credentials set on the command-line.
+
+ :param lp: Loadparm object to use.
+ :param guess: Try guess Credentials from environment
+ :return: Credentials object
+ """
+ if guess:
+ self.creds2.guess(lp)
+ elif not self.creds2.get_username():
+ self.creds2.set_anonymous()
+
+ if self.no_pass2:
+ self.creds2.set_cmdline_callbacks()
+ return self.creds2
diff --git a/python/samba/gkdi.py b/python/samba/gkdi.py
new file mode 100644
index 0000000..4179263
--- /dev/null
+++ b/python/samba/gkdi.py
@@ -0,0 +1,397 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Catalyst.Net Ltd 2023
+#
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <https://www.gnu.org/licenses/>.
+#
+
+"""Group Key Distribution Service module"""
+
+from enum import Enum
+from functools import total_ordering
+from typing import Optional, Tuple
+
+from cryptography.hazmat.primitives import hashes
+
+from samba import _glue
+from samba.dcerpc import gkdi, misc
+from samba.ndr import ndr_pack, ndr_unpack
+from samba.nt_time import NtTime, NtTimeDelta
+
+
+uint64_max: int = 2**64 - 1
+
+L1_KEY_ITERATION: int = _glue.GKDI_L1_KEY_ITERATION
+L2_KEY_ITERATION: int = _glue.GKDI_L2_KEY_ITERATION
+KEY_CYCLE_DURATION: NtTimeDelta = _glue.GKDI_KEY_CYCLE_DURATION
+MAX_CLOCK_SKEW: NtTimeDelta = _glue.GKDI_MAX_CLOCK_SKEW
+
+KEY_LEN_BYTES = 64
+
+
+class Algorithm(Enum):
+ SHA1 = "SHA1"
+ SHA256 = "SHA256"
+ SHA384 = "SHA384"
+ SHA512 = "SHA512"
+
+ def algorithm(self) -> hashes.HashAlgorithm:
+ if self is Algorithm.SHA1:
+ return hashes.SHA1()
+
+ if self is Algorithm.SHA256:
+ return hashes.SHA256()
+
+ if self is Algorithm.SHA384:
+ return hashes.SHA384()
+
+ if self is Algorithm.SHA512:
+ return hashes.SHA512()
+
+ raise RuntimeError("unknown hash algorithm {self}")
+
+ def __repr__(self) -> str:
+ return str(self)
+
+ @staticmethod
+ def from_kdf_parameters(kdf_param: Optional[bytes]) -> "Algorithm":
+ if not kdf_param:
+ return Algorithm.SHA256 # the default used by Windows.
+
+ kdf_parameters = ndr_unpack(gkdi.KdfParameters, kdf_param)
+ return Algorithm(kdf_parameters.hash_algorithm)
+
+
+class GkidType(Enum):
+ DEFAULT = object()
+ L0_SEED_KEY = object()
+ L1_SEED_KEY = object()
+ L2_SEED_KEY = object()
+
+ def description(self) -> str:
+ if self is GkidType.DEFAULT:
+ return "a default GKID"
+
+ if self is GkidType.L0_SEED_KEY:
+ return "an L0 seed key"
+
+ if self is GkidType.L1_SEED_KEY:
+ return "an L1 seed key"
+
+ if self is GkidType.L2_SEED_KEY:
+ return "an L2 seed key"
+
+ raise RuntimeError("unknown GKID type {self}")
+
+
+class InvalidDerivation(Exception):
+ pass
+
+
+class UndefinedStartTime(Exception):
+ pass
+
+
+@total_ordering
+class Gkid:
+ __slots__ = ["_l0_idx", "_l1_idx", "_l2_idx"]
+
+ max_l0_idx = 0x7FFF_FFFF
+
+ def __init__(self, l0_idx: int, l1_idx: int, l2_idx: int) -> None:
+ if not -1 <= l0_idx <= Gkid.max_l0_idx:
+ raise ValueError(f"L0 index {l0_idx} out of range")
+
+ if not -1 <= l1_idx < L1_KEY_ITERATION:
+ raise ValueError(f"L1 index {l1_idx} out of range")
+
+ if not -1 <= l2_idx < L2_KEY_ITERATION:
+ raise ValueError(f"L2 index {l2_idx} out of range")
+
+ if l0_idx == -1 and l1_idx != -1:
+ raise ValueError("invalid combination of negative and non‐negative indices")
+
+ if l1_idx == -1 and l2_idx != -1:
+ raise ValueError("invalid combination of negative and non‐negative indices")
+
+ self._l0_idx = l0_idx
+ self._l1_idx = l1_idx
+ self._l2_idx = l2_idx
+
+ @property
+ def l0_idx(self) -> int:
+ return self._l0_idx
+
+ @property
+ def l1_idx(self) -> int:
+ return self._l1_idx
+
+ @property
+ def l2_idx(self) -> int:
+ return self._l2_idx
+
+ def gkid_type(self) -> GkidType:
+ if self.l0_idx == -1:
+ return GkidType.DEFAULT
+
+ if self.l1_idx == -1:
+ return GkidType.L0_SEED_KEY
+
+ if self.l2_idx == -1:
+ return GkidType.L1_SEED_KEY
+
+ return GkidType.L2_SEED_KEY
+
+ def wrapped_l1_idx(self) -> int:
+ if self.l1_idx == -1:
+ return L1_KEY_ITERATION
+
+ return self.l1_idx
+
+ def wrapped_l2_idx(self) -> int:
+ if self.l2_idx == -1:
+ return L2_KEY_ITERATION
+
+ return self.l2_idx
+
+ def derive_l1_seed_key(self) -> "Gkid":
+ gkid_type = self.gkid_type()
+ if (
+ gkid_type is not GkidType.L0_SEED_KEY
+ and gkid_type is not GkidType.L1_SEED_KEY
+ ):
+ raise InvalidDerivation(
+ "Invalid attempt to derive an L1 seed key from"
+ f" {gkid_type.description()}"
+ )
+
+ if self.l1_idx == 0:
+ raise InvalidDerivation("No further derivation of L1 seed keys is possible")
+
+ return Gkid(self.l0_idx, self.wrapped_l1_idx() - 1, self.l2_idx)
+
+ def derive_l2_seed_key(self) -> "Gkid":
+ gkid_type = self.gkid_type()
+ if (
+ gkid_type is not GkidType.L1_SEED_KEY
+ and gkid_type is not GkidType.L2_SEED_KEY
+ ):
+ raise InvalidDerivation(
+ f"Attempt to derive an L2 seed key from {gkid_type.description()}"
+ )
+
+ if self.l2_idx == 0:
+ raise InvalidDerivation("No further derivation of L2 seed keys is possible")
+
+ return Gkid(self.l0_idx, self.l1_idx, self.wrapped_l2_idx() - 1)
+
+ def __str__(self) -> str:
+ return f"Gkid({self.l0_idx}, {self.l1_idx}, {self.l2_idx})"
+
+ def __repr__(self) -> str:
+ cls = type(self)
+ return (
+ f"{cls.__qualname__}({repr(self.l0_idx)}, {repr(self.l1_idx)},"
+ f" {repr(self.l2_idx)})"
+ )
+
+ def __eq__(self, other: object) -> bool:
+ if not isinstance(other, Gkid):
+ return NotImplemented
+
+ return (self.l0_idx, self.l1_idx, self.l2_idx) == (
+ other.l0_idx,
+ other.l1_idx,
+ other.l2_idx,
+ )
+
+ def __lt__(self, other: object) -> bool:
+ if not isinstance(other, Gkid):
+ return NotImplemented
+
+ def as_tuple(gkid: Gkid) -> Tuple[int, int, int]:
+ l0_idx, l1_idx, l2_idx = gkid.l0_idx, gkid.l1_idx, gkid.l2_idx
+
+ # DEFAULT is considered less than everything else, so that the
+ # lexical ordering requirement in [MS-GKDI] 3.1.4.1.3 (GetKey) makes
+ # sense.
+ if gkid.gkid_type() is not GkidType.DEFAULT:
+ # Use the wrapped indices so that L1 seed keys are considered
+ # greater than their children L2 seed keys, and L0 seed keys are
+ # considered greater than their children L1 seed keys.
+ l1_idx = gkid.wrapped_l1_idx()
+ l2_idx = gkid.wrapped_l2_idx()
+
+ return l0_idx, l1_idx, l2_idx
+
+ return as_tuple(self) < as_tuple(other)
+
+ def __hash__(self) -> int:
+ return hash((self.l0_idx, self.l1_idx, self.l2_idx))
+
+ @staticmethod
+ def default() -> "Gkid":
+ return Gkid(-1, -1, -1)
+
+ @staticmethod
+ def l0_seed_key(l0_idx: int) -> "Gkid":
+ return Gkid(l0_idx, -1, -1)
+
+ @staticmethod
+ def l1_seed_key(l0_idx: int, l1_idx: int) -> "Gkid":
+ return Gkid(l0_idx, l1_idx, -1)
+
+ @staticmethod
+ def from_nt_time(nt_time: NtTime) -> "Gkid":
+ l0 = nt_time // (L1_KEY_ITERATION * L2_KEY_ITERATION * KEY_CYCLE_DURATION)
+ l1 = (
+ nt_time
+ % (L1_KEY_ITERATION * L2_KEY_ITERATION * KEY_CYCLE_DURATION)
+ // (L2_KEY_ITERATION * KEY_CYCLE_DURATION)
+ )
+ l2 = nt_time % (L2_KEY_ITERATION * KEY_CYCLE_DURATION) // KEY_CYCLE_DURATION
+
+ return Gkid(l0, l1, l2)
+
+ def start_nt_time(self) -> NtTime:
+ gkid_type = self.gkid_type()
+ if gkid_type is not GkidType.L2_SEED_KEY:
+ raise UndefinedStartTime(
+ f"{gkid_type.description()} has no defined start time"
+ )
+
+ start_time = NtTime(
+ (
+ self.l0_idx * L1_KEY_ITERATION * L2_KEY_ITERATION
+ + self.l1_idx * L2_KEY_ITERATION
+ + self.l2_idx
+ )
+ * KEY_CYCLE_DURATION
+ )
+
+ if not 0 <= start_time <= uint64_max:
+ raise OverflowError(f"start time {start_time} out of range")
+
+ return start_time
+
+
+class SeedKeyPair:
+ __slots__ = ["l1_key", "l2_key", "gkid", "hash_algorithm", "root_key_id"]
+
+ def __init__(
+ self,
+ l1_key: Optional[bytes],
+ l2_key: Optional[bytes],
+ gkid: Gkid,
+ hash_algorithm: Algorithm,
+ root_key_id: misc.GUID,
+ ) -> None:
+ if l1_key is not None and len(l1_key) != KEY_LEN_BYTES:
+ raise ValueError(f"L1 key ({repr(l1_key)}) must be {KEY_LEN_BYTES} bytes")
+ if l2_key is not None and len(l2_key) != KEY_LEN_BYTES:
+ raise ValueError(f"L2 key ({repr(l2_key)}) must be {KEY_LEN_BYTES} bytes")
+
+ self.l1_key = l1_key
+ self.l2_key = l2_key
+ self.gkid = gkid
+ self.hash_algorithm = hash_algorithm
+ self.root_key_id = root_key_id
+
+ def __str__(self) -> str:
+ l1_key_hex = None if self.l1_key is None else self.l1_key.hex()
+ l2_key_hex = None if self.l2_key is None else self.l2_key.hex()
+
+ return (
+ f"SeedKeyPair(L1Key({l1_key_hex}), L2Key({l2_key_hex}), {self.gkid},"
+ f" {self.root_key_id}, {self.hash_algorithm})"
+ )
+
+ def __repr__(self) -> str:
+ cls = type(self)
+ return (
+ f"{cls.__qualname__}({repr(self.l1_key)}, {repr(self.l2_key)},"
+ f" {repr(self.gkid)}, {repr(self.hash_algorithm)},"
+ f" {repr(self.root_key_id)})"
+ )
+
+ def __eq__(self, other: object) -> bool:
+ if not isinstance(other, SeedKeyPair):
+ return NotImplemented
+
+ return (
+ self.l1_key,
+ self.l2_key,
+ self.gkid,
+ self.hash_algorithm,
+ self.root_key_id,
+ ) == (
+ other.l1_key,
+ other.l2_key,
+ other.gkid,
+ other.hash_algorithm,
+ other.root_key_id,
+ )
+
+ def __hash__(self) -> int:
+ return hash((
+ self.l1_key,
+ self.l2_key,
+ self.gkid,
+ self.hash_algorithm,
+ ndr_pack(self.root_key_id),
+ ))
+
+
+class GroupKey:
+ __slots__ = ["gkid", "key", "hash_algorithm", "root_key_id"]
+
+ def __init__(
+ self, key: bytes, gkid: Gkid, hash_algorithm: Algorithm, root_key_id: misc.GUID
+ ) -> None:
+ if key is not None and len(key) != KEY_LEN_BYTES:
+ raise ValueError(f"Key ({repr(key)}) must be {KEY_LEN_BYTES} bytes")
+
+ self.key = key
+ self.gkid = gkid
+ self.hash_algorithm = hash_algorithm
+ self.root_key_id = root_key_id
+
+ def __str__(self) -> str:
+ return (
+ f"GroupKey(Key({self.key.hex()}), {self.gkid}, {self.hash_algorithm},"
+ f" {self.root_key_id})"
+ )
+
+ def __repr__(self) -> str:
+ cls = type(self)
+ return (
+ f"{cls.__qualname__}({repr(self.key)}, {repr(self.gkid)},"
+ f" {repr(self.hash_algorithm)}, {repr(self.root_key_id)})"
+ )
+
+ def __eq__(self, other: object) -> bool:
+ if not isinstance(other, GroupKey):
+ return NotImplemented
+
+ return (self.key, self.gkid, self.hash_algorithm, self.root_key_id) == (
+ other.key,
+ other.gkid,
+ other.hash_algorithm,
+ other.root_key_id,
+ )
+
+ def __hash__(self) -> int:
+ return hash(
+ (self.key, self.gkid, self.hash_algorithm, ndr_pack(self.root_key_id))
+ )
diff --git a/python/samba/gp/__init__.py b/python/samba/gp/__init__.py
new file mode 100644
index 0000000..af6e639
--- /dev/null
+++ b/python/samba/gp/__init__.py
@@ -0,0 +1,17 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) David Mulder <dmulder@samba.org> 2023
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from samba.gp.gpclass import get_gpo_list
diff --git a/python/samba/gp/gp_centrify_crontab_ext.py b/python/samba/gp/gp_centrify_crontab_ext.py
new file mode 100644
index 0000000..b1055a1
--- /dev/null
+++ b/python/samba/gp/gp_centrify_crontab_ext.py
@@ -0,0 +1,135 @@
+# gp_centrify_crontab_ext samba gpo policy
+# Copyright (C) David Mulder <dmulder@suse.com> 2022
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+from samba.gp.gpclass import gp_pol_ext, drop_privileges, gp_file_applier, \
+ gp_misc_applier
+from tempfile import NamedTemporaryFile
+from samba.gp.gp_scripts_ext import fetch_crontab, install_user_crontab
+
+intro = '''
+### autogenerated by samba
+#
+# This file is generated by the gp_centrify_crontab_ext Group Policy
+# Client Side Extension. To modify the contents of this file,
+# modify the appropriate Group Policy objects which apply
+# to this machine. DO NOT MODIFY THIS FILE DIRECTLY.
+#
+
+'''
+end = '''
+### autogenerated by samba ###
+'''
+
+class gp_centrify_crontab_ext(gp_pol_ext, gp_file_applier):
+ def __str__(self):
+ return 'Centrify/CrontabEntries'
+
+ def process_group_policy(self, deleted_gpo_list, changed_gpo_list,
+ cdir=None):
+ for guid, settings in deleted_gpo_list:
+ if str(self) in settings:
+ for attribute, script in settings[str(self)].items():
+ self.unapply(guid, attribute, script)
+
+ for gpo in changed_gpo_list:
+ if gpo.file_sys_path:
+ section = \
+ 'Software\\Policies\\Centrify\\UnixSettings\\CrontabEntries'
+ pol_file = 'MACHINE/Registry.pol'
+ path = os.path.join(gpo.file_sys_path, pol_file)
+ pol_conf = self.parse(path)
+ if not pol_conf:
+ continue
+ entries = []
+ for e in pol_conf.entries:
+ if e.keyname == section and e.data.strip():
+ entries.append(e.data)
+ def applier_func(entries):
+ cron_dir = '/etc/cron.d' if not cdir else cdir
+ with NamedTemporaryFile(prefix='gp_', mode="w+",
+ delete=False, dir=cron_dir) as f:
+ contents = intro
+ for entry in entries:
+ contents += '%s\n' % entry
+ contents += end
+ f.write(contents)
+ return [f.name]
+ attribute = self.generate_attribute(gpo.name)
+ value_hash = self.generate_value_hash(*entries)
+ self.apply(gpo.name, attribute, value_hash, applier_func,
+ entries)
+
+ # Remove scripts for this GPO which are no longer applied
+ self.clean(gpo.name, keep=attribute)
+
+ def rsop(self, gpo, target='MACHINE'):
+ output = {}
+ section = 'Software\\Policies\\Centrify\\UnixSettings\\CrontabEntries'
+ pol_file = '%s/Registry.pol' % target
+ if gpo.file_sys_path:
+ path = os.path.join(gpo.file_sys_path, pol_file)
+ pol_conf = self.parse(path)
+ if not pol_conf:
+ return output
+ for e in pol_conf.entries:
+ if e.keyname == section and e.data.strip():
+ if str(self) not in output.keys():
+ output[str(self)] = []
+ output[str(self)].append(e.data)
+ return output
+
+class gp_user_centrify_crontab_ext(gp_centrify_crontab_ext, gp_misc_applier):
+ def unapply(self, guid, attribute, entry):
+ others, entries = fetch_crontab(self.username)
+ if entry in entries:
+ entries.remove(entry)
+ install_user_crontab(self.username, others, entries)
+ self.cache_remove_attribute(guid, attribute)
+
+ def apply(self, guid, attribute, entry):
+ old_val = self.cache_get_attribute_value(guid, attribute)
+ others, entries = fetch_crontab(self.username)
+ if not old_val or entry not in entries:
+ entries.append(entry)
+ install_user_crontab(self.username, others, entries)
+ self.cache_add_attribute(guid, attribute, entry)
+
+ def process_group_policy(self, deleted_gpo_list, changed_gpo_list):
+ for guid, settings in deleted_gpo_list:
+ if str(self) in settings:
+ for attribute, entry in settings[str(self)].items():
+ self.unapply(guid, attribute, entry)
+
+ for gpo in changed_gpo_list:
+ if gpo.file_sys_path:
+ section = \
+ 'Software\\Policies\\Centrify\\UnixSettings\\CrontabEntries'
+ pol_file = 'USER/Registry.pol'
+ path = os.path.join(gpo.file_sys_path, pol_file)
+ pol_conf = drop_privileges('root', self.parse, path)
+ if not pol_conf:
+ continue
+ attrs = []
+ for e in pol_conf.entries:
+ if e.keyname == section and e.data.strip():
+ attribute = self.generate_attribute(e.data)
+ attrs.append(attribute)
+ self.apply(gpo.name, attribute, e.data)
+ self.clean(gpo.name, keep=attrs)
+
+ def rsop(self, gpo):
+ return super().rsop(gpo, target='USER')
diff --git a/python/samba/gp/gp_centrify_sudoers_ext.py b/python/samba/gp/gp_centrify_sudoers_ext.py
new file mode 100644
index 0000000..4752f1e
--- /dev/null
+++ b/python/samba/gp/gp_centrify_sudoers_ext.py
@@ -0,0 +1,80 @@
+# gp_centrify_sudoers_ext samba gpo policy
+# Copyright (C) David Mulder <dmulder@suse.com> 2022
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+from samba.gp.gpclass import gp_pol_ext, gp_file_applier
+from samba.gp.gp_sudoers_ext import sudo_applier_func
+
+def ext_enabled(entries):
+ section = 'Software\\Policies\\Centrify\\UnixSettings'
+ for e in entries:
+ if e.keyname == section and e.valuename == 'sudo.enabled':
+ return e.data == 1
+ return False
+
+class gp_centrify_sudoers_ext(gp_pol_ext, gp_file_applier):
+ def __str__(self):
+ return 'Centrify/Sudo Rights'
+
+ def process_group_policy(self, deleted_gpo_list, changed_gpo_list,
+ sdir='/etc/sudoers.d'):
+ for guid, settings in deleted_gpo_list:
+ if str(self) in settings:
+ for attribute, sudoers in settings[str(self)].items():
+ self.unapply(guid, attribute, sudoers)
+
+ for gpo in changed_gpo_list:
+ if gpo.file_sys_path:
+ section = 'Software\\Policies\\Centrify\\UnixSettings\\SuDo'
+ pol_file = 'MACHINE/Registry.pol'
+ path = os.path.join(gpo.file_sys_path, pol_file)
+ pol_conf = self.parse(path)
+ if not pol_conf or not ext_enabled(pol_conf.entries):
+ continue
+ sudo_entries = []
+ for e in pol_conf.entries:
+ if e.keyname == section and e.data.strip():
+ if '**delvals.' in e.valuename:
+ continue
+ sudo_entries.append(e.data)
+ # Each GPO applies only one set of sudoers, in a
+ # set of files, so the attribute does not need uniqueness.
+ attribute = self.generate_attribute(gpo.name, *sudo_entries)
+ # The value hash is generated from the sudo_entries, ensuring
+ # any changes to this GPO will cause the files to be rewritten.
+ value_hash = self.generate_value_hash(*sudo_entries)
+ self.apply(gpo.name, attribute, value_hash, sudo_applier_func,
+ sdir, sudo_entries)
+ # Cleanup any old entries that are no longer part of the policy
+ self.clean(gpo.name, keep=[attribute])
+
+ def rsop(self, gpo):
+ output = {}
+ section = 'Software\\Policies\\Centrify\\UnixSettings\\SuDo'
+ pol_file = 'MACHINE/Registry.pol'
+ if gpo.file_sys_path:
+ path = os.path.join(gpo.file_sys_path, pol_file)
+ pol_conf = self.parse(path)
+ if not pol_conf:
+ return output
+ for e in pol_conf.entries:
+ if e.keyname == section and e.data.strip():
+ if '**delvals.' in e.valuename:
+ continue
+ if str(self) not in output.keys():
+ output[str(self)] = []
+ output[str(self)].append(e.data)
+ return output
diff --git a/python/samba/gp/gp_cert_auto_enroll_ext.py b/python/samba/gp/gp_cert_auto_enroll_ext.py
new file mode 100644
index 0000000..9b743cb
--- /dev/null
+++ b/python/samba/gp/gp_cert_auto_enroll_ext.py
@@ -0,0 +1,572 @@
+# gp_cert_auto_enroll_ext samba group policy
+# Copyright (C) David Mulder <dmulder@suse.com> 2021
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import operator
+import requests
+from samba.gp.gpclass import gp_pol_ext, gp_applier, GPOSTATE
+from samba import Ldb
+from ldb import SCOPE_SUBTREE, SCOPE_BASE
+from samba.auth import system_session
+from samba.gp.gpclass import get_dc_hostname
+import base64
+from shutil import which
+from subprocess import Popen, PIPE
+import re
+import json
+from samba.gp.util.logging import log
+import struct
+try:
+ from cryptography.hazmat.primitives.serialization.pkcs7 import \
+ load_der_pkcs7_certificates
+except ModuleNotFoundError:
+ def load_der_pkcs7_certificates(x): return []
+ log.error('python cryptography missing pkcs7 support. '
+ 'Certificate chain parsing will fail')
+from cryptography.hazmat.primitives.serialization import Encoding
+from cryptography.x509 import load_der_x509_certificate
+from cryptography.hazmat.backends import default_backend
+from samba.common import get_string
+
+cert_wrap = b"""
+-----BEGIN CERTIFICATE-----
+%s
+-----END CERTIFICATE-----"""
+endpoint_re = '(https|HTTPS)://(?P<server>[a-zA-Z0-9.-]+)/ADPolicyProvider' + \
+ '_CEP_(?P<auth>[a-zA-Z]+)/service.svc/CEP'
+
+global_trust_dirs = ['/etc/pki/trust/anchors', # SUSE
+ '/etc/pki/ca-trust/source/anchors', # RHEL/Fedora
+ '/usr/local/share/ca-certificates'] # Debian/Ubuntu
+
+def octet_string_to_objectGUID(data):
+ """Convert an octet string to an objectGUID."""
+ return '%s-%s-%s-%s-%s' % ('%02x' % struct.unpack('<L', data[0:4])[0],
+ '%02x' % struct.unpack('<H', data[4:6])[0],
+ '%02x' % struct.unpack('<H', data[6:8])[0],
+ '%02x' % struct.unpack('>H', data[8:10])[0],
+ '%02x%02x' % struct.unpack('>HL', data[10:]))
+
+
+def group_and_sort_end_point_information(end_point_information):
+ """Group and Sort End Point Information.
+
+ [MS-CAESO] 4.4.5.3.2.3
+ In this step autoenrollment processes the end point information by grouping
+ it by CEP ID and sorting in the order with which it will use the end point
+ to access the CEP information.
+ """
+ # Create groups of the CertificateEnrollmentPolicyEndPoint instances that
+ # have the same value of the EndPoint.PolicyID datum.
+ end_point_groups = {}
+ for e in end_point_information:
+ if e['PolicyID'] not in end_point_groups.keys():
+ end_point_groups[e['PolicyID']] = []
+ end_point_groups[e['PolicyID']].append(e)
+
+ # Sort each group by following these rules:
+ for end_point_group in end_point_groups.values():
+ # Sort the CertificateEnrollmentPolicyEndPoint instances in ascending
+ # order based on the EndPoint.Cost value.
+ end_point_group.sort(key=lambda e: e['Cost'])
+
+ # For instances that have the same EndPoint.Cost:
+ cost_list = [e['Cost'] for e in end_point_group]
+ costs = set(cost_list)
+ for cost in costs:
+ i = cost_list.index(cost)
+ j = len(cost_list)-operator.indexOf(reversed(cost_list), cost)-1
+ if i == j:
+ continue
+
+ # Sort those that have EndPoint.Authentication equal to Kerberos
+ # first. Then sort those that have EndPoint.Authentication equal to
+ # Anonymous. The rest of the CertificateEnrollmentPolicyEndPoint
+ # instances follow in an arbitrary order.
+ def sort_auth(e):
+ # 0x2 - Kerberos
+ if e['AuthFlags'] == 0x2:
+ return 0
+ # 0x1 - Anonymous
+ elif e['AuthFlags'] == 0x1:
+ return 1
+ else:
+ return 2
+ end_point_group[i:j+1] = sorted(end_point_group[i:j+1],
+ key=sort_auth)
+ return list(end_point_groups.values())
+
+def obtain_end_point_information(entries):
+ """Obtain End Point Information.
+
+ [MS-CAESO] 4.4.5.3.2.2
+ In this step autoenrollment initializes the
+ CertificateEnrollmentPolicyEndPoints table.
+ """
+ end_point_information = {}
+ section = 'Software\\Policies\\Microsoft\\Cryptography\\PolicyServers\\'
+ for e in entries:
+ if not e.keyname.startswith(section):
+ continue
+ name = e.keyname.replace(section, '')
+ if name not in end_point_information.keys():
+ end_point_information[name] = {}
+ end_point_information[name][e.valuename] = e.data
+ for ca in end_point_information.values():
+ m = re.match(endpoint_re, ca['URL'])
+ if m:
+ name = '%s-CA' % m.group('server').replace('.', '-')
+ ca['name'] = name
+ ca['hostname'] = m.group('server')
+ ca['auth'] = m.group('auth')
+ elif ca['URL'].lower() != 'ldap:':
+ edata = { 'endpoint': ca['URL'] }
+ log.error('Failed to parse the endpoint', edata)
+ return {}
+ end_point_information = \
+ group_and_sort_end_point_information(end_point_information.values())
+ return end_point_information
+
+def fetch_certification_authorities(ldb):
+ """Initialize CAs.
+
+ [MS-CAESO] 4.4.5.3.1.2
+ """
+ result = []
+ basedn = ldb.get_default_basedn()
+ # Autoenrollment MUST do an LDAP search for the CA information
+ # (pKIEnrollmentService) objects under the following container:
+ dn = 'CN=Enrollment Services,CN=Public Key Services,CN=Services,CN=Configuration,%s' % basedn
+ attrs = ['cACertificate', 'cn', 'dNSHostName']
+ expr = '(objectClass=pKIEnrollmentService)'
+ res = ldb.search(dn, SCOPE_SUBTREE, expr, attrs)
+ if len(res) == 0:
+ return result
+ for es in res:
+ data = { 'name': get_string(es['cn'][0]),
+ 'hostname': get_string(es['dNSHostName'][0]),
+ 'cACertificate': get_string(base64.b64encode(es['cACertificate'][0]))
+ }
+ result.append(data)
+ return result
+
+def fetch_template_attrs(ldb, name, attrs=None):
+ if attrs is None:
+ attrs = ['msPKI-Minimal-Key-Size']
+ basedn = ldb.get_default_basedn()
+ dn = 'CN=Certificate Templates,CN=Public Key Services,CN=Services,CN=Configuration,%s' % basedn
+ expr = '(cn=%s)' % name
+ res = ldb.search(dn, SCOPE_SUBTREE, expr, attrs)
+ if len(res) == 1 and 'msPKI-Minimal-Key-Size' in res[0]:
+ return dict(res[0])
+ else:
+ return {'msPKI-Minimal-Key-Size': ['2048']}
+
+def format_root_cert(cert):
+ return cert_wrap % re.sub(b"(.{64})", b"\\1\n", cert.encode(), 0, re.DOTALL)
+
+def find_cepces_submit():
+ certmonger_dirs = [os.environ.get("PATH"), '/usr/lib/certmonger',
+ '/usr/libexec/certmonger']
+ return which('cepces-submit', path=':'.join(certmonger_dirs))
+
+def get_supported_templates(server):
+ cepces_submit = find_cepces_submit()
+ if not cepces_submit:
+ log.error('Failed to find cepces-submit')
+ return []
+
+ env = os.environ
+ env['CERTMONGER_OPERATION'] = 'GET-SUPPORTED-TEMPLATES'
+ p = Popen([cepces_submit, '--server=%s' % server, '--auth=Kerberos'],
+ env=env, stdout=PIPE, stderr=PIPE)
+ out, err = p.communicate()
+ if p.returncode != 0:
+ data = {'Error': err.decode()}
+ log.error('Failed to fetch the list of supported templates.', data)
+ return out.strip().split()
+
+
+def getca(ca, url, trust_dir):
+ """Fetch Certificate Chain from the CA."""
+ root_cert = os.path.join(trust_dir, '%s.crt' % ca['name'])
+ root_certs = []
+
+ try:
+ r = requests.get(url=url, params={'operation': 'GetCACert',
+ 'message': 'CAIdentifier'})
+ except requests.exceptions.ConnectionError:
+ log.warn('Could not connect to Network Device Enrollment Service.')
+ r = None
+ if r is None or r.content == b'' or r.headers['Content-Type'] == 'text/html':
+ log.warn('Unable to fetch root certificates (requires NDES).')
+ if 'cACertificate' in ca:
+ log.warn('Installing the server certificate only.')
+ der_certificate = base64.b64decode(ca['cACertificate'])
+ try:
+ cert = load_der_x509_certificate(der_certificate)
+ except TypeError:
+ cert = load_der_x509_certificate(der_certificate,
+ default_backend())
+ cert_data = cert.public_bytes(Encoding.PEM)
+ with open(root_cert, 'wb') as w:
+ w.write(cert_data)
+ root_certs.append(root_cert)
+ return root_certs
+
+ if r.headers['Content-Type'] == 'application/x-x509-ca-cert':
+ # Older versions of load_der_x509_certificate require a backend param
+ try:
+ cert = load_der_x509_certificate(r.content)
+ except TypeError:
+ cert = load_der_x509_certificate(r.content, default_backend())
+ cert_data = cert.public_bytes(Encoding.PEM)
+ with open(root_cert, 'wb') as w:
+ w.write(cert_data)
+ root_certs.append(root_cert)
+ elif r.headers['Content-Type'] == 'application/x-x509-ca-ra-cert':
+ certs = load_der_pkcs7_certificates(r.content)
+ for i in range(0, len(certs)):
+ cert = certs[i].public_bytes(Encoding.PEM)
+ filename, extension = root_cert.rsplit('.', 1)
+ dest = '%s.%d.%s' % (filename, i, extension)
+ with open(dest, 'wb') as w:
+ w.write(cert)
+ root_certs.append(dest)
+ else:
+ log.warn('getca: Wrong (or missing) MIME content type')
+
+ return root_certs
+
+
+def find_global_trust_dir():
+ """Return the global trust dir using known paths from various Linux distros."""
+ for trust_dir in global_trust_dirs:
+ if os.path.isdir(trust_dir):
+ return trust_dir
+ return global_trust_dirs[0]
+
+def update_ca_command():
+ """Return the command to update the CA trust store."""
+ return which('update-ca-certificates') or which('update-ca-trust')
+
+def changed(new_data, old_data):
+ """Return True if any key present in both dicts has changed."""
+ return any((new_data[k] != old_data[k] if k in old_data else False)
+ for k in new_data.keys())
+
+def cert_enroll(ca, ldb, trust_dir, private_dir, auth='Kerberos'):
+ """Install the root certificate chain."""
+ data = dict({'files': [], 'templates': []}, **ca)
+ url = 'http://%s/CertSrv/mscep/mscep.dll/pkiclient.exe?' % ca['hostname']
+
+ log.info("Try to get root or server certificates")
+
+ root_certs = getca(ca, url, trust_dir)
+ data['files'].extend(root_certs)
+ global_trust_dir = find_global_trust_dir()
+ for src in root_certs:
+ # Symlink the certs to global trust dir
+ dst = os.path.join(global_trust_dir, os.path.basename(src))
+ try:
+ os.symlink(src, dst)
+ data['files'].append(dst)
+ log.info("Created symlink: %s -> %s" % (src, dst))
+ except PermissionError:
+ log.warn('Failed to symlink root certificate to the'
+ ' admin trust anchors')
+ except FileNotFoundError:
+ log.warn('Failed to symlink root certificate to the'
+ ' admin trust anchors.'
+ ' The directory was not found', global_trust_dir)
+ except FileExistsError:
+ # If we're simply downloading a renewed cert, the symlink
+ # already exists. Ignore the FileExistsError. Preserve the
+ # existing symlink in the unapply data.
+ data['files'].append(dst)
+
+ update = update_ca_command()
+ log.info("Running %s" % (update))
+ if update is not None:
+ ret = Popen([update]).wait()
+ if ret != 0:
+ log.error('Failed to run %s' % (update))
+
+ # Setup Certificate Auto Enrollment
+ getcert = which('getcert')
+ cepces_submit = find_cepces_submit()
+ if getcert is not None and cepces_submit is not None:
+ p = Popen([getcert, 'add-ca', '-c', ca['name'], '-e',
+ '%s --server=%s --auth=%s' % (cepces_submit,
+ ca['hostname'], auth)],
+ stdout=PIPE, stderr=PIPE)
+ out, err = p.communicate()
+ log.debug(out.decode())
+ if p.returncode != 0:
+ if p.returncode == 2:
+ log.info('The CA [%s] already exists' % ca['name'])
+ else:
+ data = {'Error': err.decode(), 'CA': ca['name']}
+ log.error('Failed to add Certificate Authority', data)
+
+ supported_templates = get_supported_templates(ca['hostname'])
+ for template in supported_templates:
+ attrs = fetch_template_attrs(ldb, template)
+ nickname = '%s.%s' % (ca['name'], template.decode())
+ keyfile = os.path.join(private_dir, '%s.key' % nickname)
+ certfile = os.path.join(trust_dir, '%s.crt' % nickname)
+ p = Popen([getcert, 'request', '-c', ca['name'],
+ '-T', template.decode(),
+ '-I', nickname, '-k', keyfile, '-f', certfile,
+ '-g', attrs['msPKI-Minimal-Key-Size'][0]],
+ stdout=PIPE, stderr=PIPE)
+ out, err = p.communicate()
+ log.debug(out.decode())
+ if p.returncode != 0:
+ if p.returncode == 2:
+ log.info('The template [%s] already exists' % (nickname))
+ else:
+ data = {'Error': err.decode(), 'Certificate': nickname}
+ log.error('Failed to request certificate', data)
+
+ data['files'].extend([keyfile, certfile])
+ data['templates'].append(nickname)
+ if update is not None:
+ ret = Popen([update]).wait()
+ if ret != 0:
+ log.error('Failed to run %s' % (update))
+ else:
+ log.warn('certmonger and cepces must be installed for ' +
+ 'certificate auto enrollment to work')
+ return json.dumps(data)
+
+class gp_cert_auto_enroll_ext(gp_pol_ext, gp_applier):
+ def __str__(self):
+ return r'Cryptography\AutoEnrollment'
+
+ def unapply(self, guid, attribute, value):
+ ca_cn = base64.b64decode(attribute)
+ data = json.loads(value)
+ getcert = which('getcert')
+ if getcert is not None:
+ Popen([getcert, 'remove-ca', '-c', ca_cn]).wait()
+ for nickname in data['templates']:
+ Popen([getcert, 'stop-tracking', '-i', nickname]).wait()
+ for f in data['files']:
+ if os.path.exists(f):
+ if os.path.exists(f):
+ os.unlink(f)
+ self.cache_remove_attribute(guid, attribute)
+
+ def apply(self, guid, ca, applier_func, *args, **kwargs):
+ attribute = base64.b64encode(ca['name'].encode()).decode()
+ # If the policy has changed, unapply, then apply new policy
+ old_val = self.cache_get_attribute_value(guid, attribute)
+ old_data = json.loads(old_val) if old_val is not None else {}
+ templates = ['%s.%s' % (ca['name'], t.decode()) for t in get_supported_templates(ca['hostname'])] \
+ if old_val is not None else []
+ new_data = { 'templates': templates, **ca }
+ if changed(new_data, old_data) or self.cache_get_apply_state() == GPOSTATE.ENFORCE:
+ self.unapply(guid, attribute, old_val)
+ # If policy is already applied and unchanged, skip application
+ if old_val is not None and not changed(new_data, old_data) and \
+ self.cache_get_apply_state() != GPOSTATE.ENFORCE:
+ return
+
+ # Apply the policy and log the changes
+ data = applier_func(*args, **kwargs)
+ self.cache_add_attribute(guid, attribute, data)
+
+ def process_group_policy(self, deleted_gpo_list, changed_gpo_list,
+ trust_dir=None, private_dir=None):
+ if trust_dir is None:
+ trust_dir = self.lp.cache_path('certs')
+ if private_dir is None:
+ private_dir = self.lp.private_path('certs')
+ if not os.path.exists(trust_dir):
+ os.mkdir(trust_dir, mode=0o755)
+ if not os.path.exists(private_dir):
+ os.mkdir(private_dir, mode=0o700)
+
+ for guid, settings in deleted_gpo_list:
+ if str(self) in settings:
+ for ca_cn_enc, data in settings[str(self)].items():
+ self.unapply(guid, ca_cn_enc, data)
+
+ for gpo in changed_gpo_list:
+ if gpo.file_sys_path:
+ section = r'Software\Policies\Microsoft\Cryptography\AutoEnrollment'
+ pol_file = 'MACHINE/Registry.pol'
+ path = os.path.join(gpo.file_sys_path, pol_file)
+ pol_conf = self.parse(path)
+ if not pol_conf:
+ continue
+ for e in pol_conf.entries:
+ if e.keyname == section and e.valuename == 'AEPolicy':
+ # This policy applies as specified in [MS-CAESO] 4.4.5.1
+ if e.data & 0x8000:
+ continue # The policy is disabled
+ enroll = e.data & 0x1 == 0x1
+ manage = e.data & 0x2 == 0x2
+ retrive_pending = e.data & 0x4 == 0x4
+ if enroll:
+ ca_names = self.__enroll(gpo.name,
+ pol_conf.entries,
+ trust_dir, private_dir)
+
+ # Cleanup any old CAs that have been removed
+ ca_attrs = [base64.b64encode(n.encode()).decode()
+ for n in ca_names]
+ self.clean(gpo.name, keep=ca_attrs)
+ else:
+ # If enrollment has been disabled for this GPO,
+ # remove any existing policy
+ ca_attrs = \
+ self.cache_get_all_attribute_values(gpo.name)
+ self.clean(gpo.name, remove=list(ca_attrs.keys()))
+
+ def __read_cep_data(self, guid, ldb, end_point_information,
+ trust_dir, private_dir):
+ """Read CEP Data.
+
+ [MS-CAESO] 4.4.5.3.2.4
+ In this step autoenrollment initializes instances of the
+ CertificateEnrollmentPolicy by accessing end points associated with CEP
+ groups created in the previous step.
+ """
+ # For each group created in the previous step:
+ for end_point_group in end_point_information:
+ # Pick an arbitrary instance of the
+ # CertificateEnrollmentPolicyEndPoint from the group
+ e = end_point_group[0]
+
+ # If this instance does not have the AutoEnrollmentEnabled flag set
+ # in the EndPoint.Flags, continue with the next group.
+ if not e['Flags'] & 0x10:
+ continue
+
+ # If the current group contains a
+ # CertificateEnrollmentPolicyEndPoint instance with EndPoint.URI
+ # equal to "LDAP":
+ if any([e['URL'] == 'LDAP:' for e in end_point_group]):
+ # Perform an LDAP search to read the value of the objectGuid
+ # attribute of the root object of the forest root domain NC. If
+ # any errors are encountered, continue with the next group.
+ res = ldb.search('', SCOPE_BASE, '(objectClass=*)',
+ ['rootDomainNamingContext'])
+ if len(res) != 1:
+ continue
+ res2 = ldb.search(res[0]['rootDomainNamingContext'][0],
+ SCOPE_BASE, '(objectClass=*)',
+ ['objectGUID'])
+ if len(res2) != 1:
+ continue
+
+ # Compare the value read in the previous step to the
+ # EndPoint.PolicyId datum CertificateEnrollmentPolicyEndPoint
+ # instance. If the values do not match, continue with the next
+ # group.
+ objectGUID = '{%s}' % \
+ octet_string_to_objectGUID(res2[0]['objectGUID'][0]).upper()
+ if objectGUID != e['PolicyID']:
+ continue
+
+ # For each CertificateEnrollmentPolicyEndPoint instance for that
+ # group:
+ ca_names = []
+ for ca in end_point_group:
+ # If EndPoint.URI equals "LDAP":
+ if ca['URL'] == 'LDAP:':
+ # This is a basic configuration.
+ cas = fetch_certification_authorities(ldb)
+ for _ca in cas:
+ self.apply(guid, _ca, cert_enroll, _ca, ldb, trust_dir,
+ private_dir)
+ ca_names.append(_ca['name'])
+ # If EndPoint.URI starts with "HTTPS//":
+ elif ca['URL'].lower().startswith('https://'):
+ self.apply(guid, ca, cert_enroll, ca, ldb, trust_dir,
+ private_dir, auth=ca['auth'])
+ ca_names.append(ca['name'])
+ else:
+ edata = { 'endpoint': ca['URL'] }
+ log.error('Unrecognized endpoint', edata)
+ return ca_names
+
+ def __enroll(self, guid, entries, trust_dir, private_dir):
+ url = 'ldap://%s' % get_dc_hostname(self.creds, self.lp)
+ ldb = Ldb(url=url, session_info=system_session(),
+ lp=self.lp, credentials=self.creds)
+
+ ca_names = []
+ end_point_information = obtain_end_point_information(entries)
+ if len(end_point_information) > 0:
+ ca_names.extend(self.__read_cep_data(guid, ldb,
+ end_point_information,
+ trust_dir, private_dir))
+ else:
+ cas = fetch_certification_authorities(ldb)
+ for ca in cas:
+ self.apply(guid, ca, cert_enroll, ca, ldb, trust_dir,
+ private_dir)
+ ca_names.append(ca['name'])
+ return ca_names
+
+ def rsop(self, gpo):
+ output = {}
+ pol_file = 'MACHINE/Registry.pol'
+ section = r'Software\Policies\Microsoft\Cryptography\AutoEnrollment'
+ if gpo.file_sys_path:
+ path = os.path.join(gpo.file_sys_path, pol_file)
+ pol_conf = self.parse(path)
+ if not pol_conf:
+ return output
+ for e in pol_conf.entries:
+ if e.keyname == section and e.valuename == 'AEPolicy':
+ enroll = e.data & 0x1 == 0x1
+ if e.data & 0x8000 or not enroll:
+ continue
+ output['Auto Enrollment Policy'] = {}
+ url = 'ldap://%s' % get_dc_hostname(self.creds, self.lp)
+ ldb = Ldb(url=url, session_info=system_session(),
+ lp=self.lp, credentials=self.creds)
+ end_point_information = \
+ obtain_end_point_information(pol_conf.entries)
+ cas = fetch_certification_authorities(ldb)
+ if len(end_point_information) > 0:
+ cas2 = [ep for sl in end_point_information for ep in sl]
+ if any([ca['URL'] == 'LDAP:' for ca in cas2]):
+ cas.extend(cas2)
+ else:
+ cas = cas2
+ for ca in cas:
+ if 'URL' in ca and ca['URL'] == 'LDAP:':
+ continue
+ policy = 'Auto Enrollment Policy'
+ cn = ca['name']
+ if policy not in output:
+ output[policy] = {}
+ output[policy][cn] = {}
+ if 'cACertificate' in ca:
+ output[policy][cn]['CA Certificate'] = \
+ format_root_cert(ca['cACertificate']).decode()
+ output[policy][cn]['Auto Enrollment Server'] = \
+ ca['hostname']
+ supported_templates = \
+ get_supported_templates(ca['hostname'])
+ output[policy][cn]['Templates'] = \
+ [t.decode() for t in supported_templates]
+ return output
diff --git a/python/samba/gp/gp_chromium_ext.py b/python/samba/gp/gp_chromium_ext.py
new file mode 100644
index 0000000..5e54f0f
--- /dev/null
+++ b/python/samba/gp/gp_chromium_ext.py
@@ -0,0 +1,473 @@
+# gp_chromium_ext samba gpo policy
+# Copyright (C) David Mulder <dmulder@suse.com> 2021
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import json
+from samba.gp.gpclass import gp_pol_ext, gp_file_applier
+from samba.dcerpc import misc
+from samba.common import get_string
+from samba.gp.util.logging import log
+from tempfile import NamedTemporaryFile
+
+def parse_entry_data(name, e):
+ dict_entries = ['VirtualKeyboardFeatures',
+ 'DeviceArcDataSnapshotHours',
+ 'RequiredClientCertificateForDevice',
+ 'RequiredClientCertificateForUser',
+ 'RegisteredProtocolHandlers',
+ 'WebUsbAllowDevicesForUrls',
+ 'DeviceAutoUpdateTimeRestrictions',
+ 'DeviceUpdateStagingSchedule',
+ 'DeviceMinimumVersion',
+ 'DeviceDisplayResolution',
+ 'ExtensionSettings',
+ 'KerberosAccounts',
+ 'NetworkFileSharesPreconfiguredShares',
+ 'NetworkThrottlingEnabled',
+ 'TPMFirmwareUpdateSettings',
+ 'DeviceOffHours',
+ 'ParentAccessCodeConfig',
+ 'PerAppTimeLimits',
+ 'PerAppTimeLimitsWhitelist',
+ 'PerAppTimeLimitsAllowlist',
+ 'UsageTimeLimit',
+ 'PluginVmImage',
+ 'DeviceLoginScreenPowerManagement',
+ 'PowerManagementIdleSettings',
+ 'ScreenLockDelays',
+ 'ScreenBrightnessPercent',
+ 'DevicePowerPeakShiftDayConfig',
+ 'DeviceAdvancedBatteryChargeModeDayConfig',
+ 'PrintingPaperSizeDefault',
+ 'AutoLaunchProtocolsFromOrigins',
+ 'BrowsingDataLifetime',
+ 'DataLeakPreventionRulesList',
+ 'DeviceLoginScreenWebUsbAllowDevicesForUrls',
+ 'DeviceScheduledUpdateCheck',
+ 'KeyPermissions',
+ 'ManagedBookmarks',
+ 'ManagedConfigurationPerOrigin',
+ 'ProxySettings',
+ 'SystemProxySettings',
+ 'WebAppInstallForceList']
+ bools = ['ShowAccessibilityOptionsInSystemTrayMenu',
+ 'LargeCursorEnabled',
+ 'SpokenFeedbackEnabled',
+ 'HighContrastEnabled',
+ 'VirtualKeyboardEnabled',
+ 'StickyKeysEnabled',
+ 'KeyboardDefaultToFunctionKeys',
+ 'DictationEnabled',
+ 'SelectToSpeakEnabled',
+ 'KeyboardFocusHighlightEnabled',
+ 'CursorHighlightEnabled',
+ 'CaretHighlightEnabled',
+ 'MonoAudioEnabled',
+ 'AccessibilityShortcutsEnabled',
+ 'AutoclickEnabled',
+ 'DeviceLoginScreenDefaultLargeCursorEnabled',
+ 'DeviceLoginScreenDefaultSpokenFeedbackEnabled',
+ 'DeviceLoginScreenDefaultHighContrastEnabled',
+ 'DeviceLoginScreenDefaultVirtualKeyboardEnabled',
+ 'DeviceLoginScreenLargeCursorEnabled',
+ 'DeviceLoginScreenSpokenFeedbackEnabled',
+ 'DeviceLoginScreenHighContrastEnabled',
+ 'DeviceLoginScreenVirtualKeyboardEnabled',
+ 'DeviceLoginScreenDictationEnabled',
+ 'DeviceLoginScreenSelectToSpeakEnabled',
+ 'DeviceLoginScreenCursorHighlightEnabled',
+ 'DeviceLoginScreenCaretHighlightEnabled',
+ 'DeviceLoginScreenMonoAudioEnabled',
+ 'DeviceLoginScreenAutoclickEnabled',
+ 'DeviceLoginScreenStickyKeysEnabled',
+ 'DeviceLoginScreenKeyboardFocusHighlightEnabled',
+ 'DeviceLoginScreenShowOptionsInSystemTrayMenu',
+ 'DeviceLoginScreenAccessibilityShortcutsEnabled',
+ 'FloatingAccessibilityMenuEnabled',
+ 'ArcEnabled',
+ 'UnaffiliatedArcAllowed',
+ 'AppRecommendationZeroStateEnabled',
+ 'DeviceBorealisAllowed',
+ 'UserBorealisAllowed',
+ 'SystemUse24HourClock',
+ 'DefaultSearchProviderEnabled',
+ 'ChromeOsReleaseChannelDelegated',
+ 'DeviceAutoUpdateDisabled',
+ 'DeviceAutoUpdateP2PEnabled',
+ 'DeviceUpdateHttpDownloadsEnabled',
+ 'RebootAfterUpdate',
+ 'BlockExternalExtensions',
+ 'VoiceInteractionContextEnabled',
+ 'VoiceInteractionHotwordEnabled',
+ 'EnableMediaRouter',
+ 'ShowCastIconInToolbar',
+ 'DriveDisabled',
+ 'DriveDisabledOverCellular',
+ 'DisableAuthNegotiateCnameLookup',
+ 'EnableAuthNegotiatePort',
+ 'BasicAuthOverHttpEnabled',
+ 'AuthNegotiateDelegateByKdcPolicy',
+ 'AllowCrossOriginAuthPrompt',
+ 'NtlmV2Enabled',
+ 'IntegratedWebAuthenticationAllowed',
+ 'BrowserSwitcherEnabled',
+ 'BrowserSwitcherKeepLastChromeTab',
+ 'BrowserSwitcherUseIeSitelist',
+ 'VirtualMachinesAllowed',
+ 'CrostiniAllowed',
+ 'DeviceUnaffiliatedCrostiniAllowed',
+ 'CrostiniExportImportUIAllowed',
+ 'CrostiniPortForwardingAllowed',
+ 'NativeMessagingUserLevelHosts',
+ 'NetworkFileSharesAllowed',
+ 'NetBiosShareDiscoveryEnabled',
+ 'NTLMShareAuthenticationEnabled',
+ 'DeviceDataRoamingEnabled',
+ 'DeviceWiFiFastTransitionEnabled',
+ 'DeviceWiFiAllowed',
+ 'DeviceAllowBluetooth',
+ 'DeviceAllowRedeemChromeOsRegistrationOffers',
+ 'DeviceQuirksDownloadEnabled',
+ 'SuggestedContentEnabled',
+ 'DeviceShowLowDiskSpaceNotification',
+ 'PasswordManagerEnabled',
+ 'PasswordLeakDetectionEnabled',
+ 'PluginVmAllowed',
+ 'PluginVmDataCollectionAllowed',
+ 'UserPluginVmAllowed',
+ 'DeviceRebootOnShutdown',
+ 'PowerManagementUsesAudioActivity',
+ 'PowerManagementUsesVideoActivity',
+ 'AllowWakeLocks',
+ 'AllowScreenWakeLocks',
+ 'WaitForInitialUserActivity',
+ 'PowerSmartDimEnabled',
+ 'DevicePowerPeakShiftEnabled',
+ 'DeviceBootOnAcEnabled',
+ 'DeviceAdvancedBatteryChargeModeEnabled',
+ 'DeviceUsbPowerShareEnabled',
+ 'PrintingEnabled',
+ 'CloudPrintProxyEnabled',
+ 'PrintingSendUsernameAndFilenameEnabled',
+ 'CloudPrintSubmitEnabled',
+ 'DisablePrintPreview',
+ 'PrintHeaderFooter',
+ 'PrintPreviewUseSystemDefaultPrinter',
+ 'UserNativePrintersAllowed',
+ 'UserPrintersAllowed',
+ 'DeletePrintJobHistoryAllowed',
+ 'DeviceLoginScreenPrivacyScreenEnabled',
+ 'PrivacyScreenEnabled',
+ 'PinUnlockWeakPinsAllowed',
+ 'PinUnlockAutosubmitEnabled',
+ 'RemoteAccessHostFirewallTraversal',
+ 'RemoteAccessHostRequireCurtain',
+ 'RemoteAccessHostAllowClientPairing',
+ 'RemoteAccessHostAllowRelayedConnection',
+ 'RemoteAccessHostAllowUiAccessForRemoteAssistance',
+ 'RemoteAccessHostAllowFileTransfer',
+ 'RemoteAccessHostAllowRemoteAccessConnections',
+ 'AttestationEnabledForUser',
+ 'SafeBrowsingEnabled',
+ 'SafeBrowsingExtendedReportingEnabled',
+ 'DeviceGuestModeEnabled',
+ 'DeviceAllowNewUsers',
+ 'DeviceShowUserNamesOnSignin',
+ 'DeviceEphemeralUsersEnabled',
+ 'DeviceShowNumericKeyboardForPassword',
+ 'DeviceFamilyLinkAccountsAllowed',
+ 'ShowHomeButton',
+ 'HomepageIsNewTabPage',
+ 'DeviceMetricsReportingEnabled',
+ 'DeviceWilcoDtcAllowed',
+ 'AbusiveExperienceInterventionEnforce',
+ 'AccessibilityImageLabelsEnabled',
+ 'AdditionalDnsQueryTypesEnabled',
+ 'AdvancedProtectionAllowed',
+ 'AllowDeletingBrowserHistory',
+ 'AllowDinosaurEasterEgg',
+ 'AllowFileSelectionDialogs',
+ 'AllowScreenLock',
+ 'AllowSyncXHRInPageDismissal',
+ 'AlternateErrorPagesEnabled',
+ 'AlwaysOpenPdfExternally',
+ 'AppCacheForceEnabled',
+ 'AudioCaptureAllowed',
+ 'AudioOutputAllowed',
+ 'AudioProcessHighPriorityEnabled',
+ 'AudioSandboxEnabled',
+ 'AutoFillEnabled',
+ 'AutofillAddressEnabled',
+ 'AutofillCreditCardEnabled',
+ 'AutoplayAllowed',
+ 'BackgroundModeEnabled',
+ 'BlockThirdPartyCookies',
+ 'BookmarkBarEnabled',
+ 'BrowserAddPersonEnabled',
+ 'BrowserGuestModeEnabled',
+ 'BrowserGuestModeEnforced',
+ 'BrowserLabsEnabled',
+ 'BrowserNetworkTimeQueriesEnabled',
+ 'BuiltInDnsClientEnabled',
+ 'CECPQ2Enabled',
+ 'CaptivePortalAuthenticationIgnoresProxy',
+ 'ChromeCleanupEnabled',
+ 'ChromeCleanupReportingEnabled',
+ 'ChromeOsLockOnIdleSuspend',
+ 'ClickToCallEnabled',
+ 'CloudManagementEnrollmentMandatory',
+ 'CloudPolicyOverridesPlatformPolicy',
+ 'CloudUserPolicyMerge',
+ 'CommandLineFlagSecurityWarningsEnabled',
+ 'ComponentUpdatesEnabled',
+ 'DNSInterceptionChecksEnabled',
+ 'DataLeakPreventionReportingEnabled',
+ 'DefaultBrowserSettingEnabled',
+ 'DefaultSearchProviderContextMenuAccessAllowed',
+ 'DeveloperToolsDisabled',
+ 'DeviceAllowMGSToStoreDisplayProperties',
+ 'DeviceDebugPacketCaptureAllowed',
+ 'DeviceLocalAccountManagedSessionEnabled',
+ 'DeviceLoginScreenPrimaryMouseButtonSwitch',
+ 'DevicePciPeripheralDataAccessEnabled',
+ 'DevicePowerwashAllowed',
+ 'DeviceSystemWideTracingEnabled',
+ 'Disable3DAPIs',
+ 'DisableSafeBrowsingProceedAnyway',
+ 'DisableScreenshots',
+ 'EasyUnlockAllowed',
+ 'EditBookmarksEnabled',
+ 'EmojiSuggestionEnabled',
+ 'EnableDeprecatedPrivetPrinting',
+ 'EnableOnlineRevocationChecks',
+ 'EnableSyncConsent',
+ 'EnterpriseHardwarePlatformAPIEnabled',
+ 'ExternalProtocolDialogShowAlwaysOpenCheckbox',
+ 'ExternalStorageDisabled',
+ 'ExternalStorageReadOnly',
+ 'ForceBrowserSignin',
+ 'ForceEphemeralProfiles',
+ 'ForceGoogleSafeSearch',
+ 'ForceMaximizeOnFirstRun',
+ 'ForceSafeSearch',
+ 'ForceYouTubeSafetyMode',
+ 'FullscreenAlertEnabled',
+ 'FullscreenAllowed',
+ 'GloballyScopeHTTPAuthCacheEnabled',
+ 'HardwareAccelerationModeEnabled',
+ 'HideWebStoreIcon',
+ 'ImportAutofillFormData',
+ 'ImportBookmarks',
+ 'ImportHistory',
+ 'ImportHomepage',
+ 'ImportSavedPasswords',
+ 'ImportSearchEngine',
+ 'IncognitoEnabled',
+ 'InsecureFormsWarningsEnabled',
+ 'InsecurePrivateNetworkRequestsAllowed',
+ 'InstantTetheringAllowed',
+ 'IntensiveWakeUpThrottlingEnabled',
+ 'JavascriptEnabled',
+ 'LacrosAllowed',
+ 'LacrosSecondaryProfilesAllowed',
+ 'LockScreenMediaPlaybackEnabled',
+ 'LoginDisplayPasswordButtonEnabled',
+ 'ManagedGuestSessionPrivacyWarningsEnabled',
+ 'MediaRecommendationsEnabled',
+ 'MediaRouterCastAllowAllIPs',
+ 'MetricsReportingEnabled',
+ 'NTPCardsVisible',
+ 'NTPCustomBackgroundEnabled',
+ 'NativeWindowOcclusionEnabled',
+ 'NearbyShareAllowed',
+ 'PaymentMethodQueryEnabled',
+ 'PdfAnnotationsEnabled',
+ 'PhoneHubAllowed',
+ 'PhoneHubNotificationsAllowed',
+ 'PhoneHubTaskContinuationAllowed',
+ 'PolicyAtomicGroupsEnabled',
+ 'PrimaryMouseButtonSwitch',
+ 'PromotionalTabsEnabled',
+ 'PromptForDownloadLocation',
+ 'QuicAllowed',
+ 'RendererCodeIntegrityEnabled',
+ 'RequireOnlineRevocationChecksForLocalAnchors',
+ 'RoamingProfileSupportEnabled',
+ 'SSLErrorOverrideAllowed',
+ 'SafeBrowsingForTrustedSourcesEnabled',
+ 'SavingBrowserHistoryDisabled',
+ 'ScreenCaptureAllowed',
+ 'ScrollToTextFragmentEnabled',
+ 'SearchSuggestEnabled',
+ 'SecondaryGoogleAccountSigninAllowed',
+ 'SharedArrayBufferUnrestrictedAccessAllowed',
+ 'SharedClipboardEnabled',
+ 'ShowAppsShortcutInBookmarkBar',
+ 'ShowFullUrlsInAddressBar',
+ 'ShowLogoutButtonInTray',
+ 'SignedHTTPExchangeEnabled',
+ 'SigninAllowed',
+ 'SigninInterceptionEnabled',
+ 'SitePerProcess',
+ 'SmartLockSigninAllowed',
+ 'SmsMessagesAllowed',
+ 'SpellCheckServiceEnabled',
+ 'SpellcheckEnabled',
+ 'StartupBrowserWindowLaunchSuppressed',
+ 'StricterMixedContentTreatmentEnabled',
+ 'SuggestLogoutAfterClosingLastWindow',
+ 'SuppressDifferentOriginSubframeDialogs',
+ 'SuppressUnsupportedOSWarning',
+ 'SyncDisabled',
+ 'TargetBlankImpliesNoOpener',
+ 'TaskManagerEndProcessEnabled',
+ 'ThirdPartyBlockingEnabled',
+ 'TouchVirtualKeyboardEnabled',
+ 'TranslateEnabled',
+ 'TripleDESEnabled',
+ 'UnifiedDesktopEnabledByDefault',
+ 'UrlKeyedAnonymizedDataCollectionEnabled',
+ 'UserAgentClientHintsEnabled',
+ 'UserFeedbackAllowed',
+ 'VideoCaptureAllowed',
+ 'VmManagementCliAllowed',
+ 'VpnConfigAllowed',
+ 'WPADQuickCheckEnabled',
+ 'WebRtcAllowLegacyTLSProtocols',
+ 'WebRtcEventLogCollectionAllowed',
+ 'WifiSyncAndroidAllowed',
+ 'WindowOcclusionEnabled']
+ if name in dict_entries:
+ return json.loads(get_string(e.data))
+ elif e.type == misc.REG_DWORD and name in bools:
+ return e.data == 1
+ return e.data
+
+def assign_entry(policies, e):
+ if e.valuename.isnumeric():
+ name = e.keyname.split('\\')[-1]
+ if name not in policies:
+ policies[name] = []
+ policies[name].append(parse_entry_data(name, e))
+ else:
+ name = e.valuename
+ policies[name] = parse_entry_data(name, e)
+
+def convert_pol_to_json(section, entries):
+ managed = {}
+ recommended = {}
+ recommended_section = '\\'.join([section, 'Recommended'])
+ for e in entries:
+ if '**delvals.' in e.valuename:
+ continue
+ if e.keyname.startswith(recommended_section):
+ assign_entry(recommended, e)
+ elif e.keyname.startswith(section):
+ assign_entry(managed, e)
+ return managed, recommended
+
+class gp_chromium_ext(gp_pol_ext, gp_file_applier):
+ managed_policies_path = '/etc/chromium/policies/managed'
+ recommended_policies_path = '/etc/chromium/policies/recommended'
+
+ def __str__(self):
+ return 'Google/Chromium'
+
+ def process_group_policy(self, deleted_gpo_list, changed_gpo_list,
+ policy_dir=None):
+ if policy_dir is not None:
+ self.recommended_policies_path = os.path.join(policy_dir,
+ 'recommended')
+ self.managed_policies_path = os.path.join(policy_dir, 'managed')
+ # Create the policy directories if necessary
+ if not os.path.exists(self.recommended_policies_path):
+ os.makedirs(self.recommended_policies_path, mode=0o755,
+ exist_ok=True)
+ if not os.path.exists(self.managed_policies_path):
+ os.makedirs(self.managed_policies_path, mode=0o755,
+ exist_ok=True)
+ for guid, settings in deleted_gpo_list:
+ if str(self) in settings:
+ for attribute, policies in settings[str(self)].items():
+ try:
+ json.loads(policies)
+ except json.decoder.JSONDecodeError:
+ self.unapply(guid, attribute, policies)
+ else:
+ # Policies were previously stored all in one file, but
+ # the Chromium documentation says this is not
+ # necessary. Unapply the old policy file if json was
+ # stored in the cache (now we store a hash and file
+ # names instead).
+ if attribute == 'recommended':
+ fname = os.path.join(self.recommended_policies_path,
+ 'policies.json')
+ elif attribute == 'managed':
+ fname = os.path.join(self.managed_policies_path,
+ 'policies.json')
+ self.unapply(guid, attribute, fname)
+
+ for gpo in changed_gpo_list:
+ if gpo.file_sys_path:
+ section = 'Software\\Policies\\Google\\Chrome'
+ pol_file = 'MACHINE/Registry.pol'
+ path = os.path.join(gpo.file_sys_path, pol_file)
+ pol_conf = self.parse(path)
+ if not pol_conf:
+ continue
+
+ managed, recommended = convert_pol_to_json(section,
+ pol_conf.entries)
+ def applier_func(policies, location):
+ try:
+ with NamedTemporaryFile(mode='w+', prefix='gp_',
+ delete=False,
+ dir=location,
+ suffix='.json') as f:
+ json.dump(policies, f)
+ os.chmod(f.name, 0o644)
+ log.debug('Wrote Chromium preferences', policies)
+ return [f.name]
+ except PermissionError:
+ log.debug('Failed to write Chromium preferences',
+ policies)
+ value_hash = self.generate_value_hash(json.dumps(managed))
+ self.apply(gpo.name, 'managed', value_hash, applier_func,
+ managed, self.managed_policies_path)
+ value_hash = self.generate_value_hash(json.dumps(recommended))
+ self.apply(gpo.name, 'recommended', value_hash, applier_func,
+ recommended, self.recommended_policies_path)
+
+ def rsop(self, gpo):
+ output = {}
+ pol_file = 'MACHINE/Registry.pol'
+ section = 'Software\\Policies\\Google\\Chrome'
+ if gpo.file_sys_path:
+ path = os.path.join(gpo.file_sys_path, pol_file)
+ pol_conf = self.parse(path)
+ if not pol_conf:
+ return output
+ for e in pol_conf.entries:
+ if e.keyname.startswith(section):
+ output['%s\\%s' % (e.keyname, e.valuename)] = e.data
+ return output
+
+class gp_chrome_ext(gp_chromium_ext):
+ managed_policies_path = '/etc/opt/chrome/policies/managed'
+ recommended_policies_path = '/etc/opt/chrome/policies/recommended'
+
+ def __str__(self):
+ return 'Google/Chrome'
diff --git a/python/samba/gp/gp_drive_maps_ext.py b/python/samba/gp/gp_drive_maps_ext.py
new file mode 100644
index 0000000..f998d0e
--- /dev/null
+++ b/python/samba/gp/gp_drive_maps_ext.py
@@ -0,0 +1,168 @@
+# gp_drive_maps_user_ext samba gpo policy
+# Copyright (C) David Mulder <dmulder@suse.com> 2020
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import json
+from samba.gp.gpclass import gp_xml_ext, gp_misc_applier, drop_privileges, \
+ expand_pref_variables
+from subprocess import Popen, PIPE
+from samba.gp.gp_scripts_ext import fetch_crontab, install_user_crontab
+from samba.gp.util.logging import log
+from samba.gp import gp_scripts_ext
+gp_scripts_ext.intro = '''
+### autogenerated by samba
+#
+# This file is generated by the gp_drive_maps_user_ext Group Policy
+# Client Side Extension. To modify the contents of this file,
+# modify the appropriate Group Policy objects which apply
+# to this machine. DO NOT MODIFY THIS FILE DIRECTLY.
+#
+
+'''
+
+def mount_drive(uri):
+ log.debug('Mounting drive', uri)
+ out, err = Popen(['gio', 'mount', uri],
+ stdout=PIPE, stderr=PIPE).communicate()
+ if err:
+ if b'Location is already mounted' not in err:
+ raise SystemError(err)
+
+def unmount_drive(uri):
+ log.debug('Unmounting drive', uri)
+ return Popen(['gio', 'mount', uri, '--unmount']).wait()
+
+class gp_drive_maps_user_ext(gp_xml_ext, gp_misc_applier):
+ def parse_value(self, val):
+ vals = super().parse_value(val)
+ if 'props' in vals.keys():
+ vals['props'] = json.loads(vals['props'])
+ if 'run_once' in vals.keys():
+ vals['run_once'] = json.loads(vals['run_once'])
+ return vals
+
+ def unapply(self, guid, uri, val):
+ vals = self.parse_value(val)
+ if 'props' in vals.keys() and \
+ vals['props']['action'] in ['C', 'R', 'U']:
+ unmount_drive(uri)
+ others, entries = fetch_crontab(self.username)
+ if 'crontab' in vals.keys() and vals['crontab'] in entries:
+ entries.remove(vals['crontab'])
+ install_user_crontab(self.username, others, entries)
+ self.cache_remove_attribute(guid, uri)
+
+ def apply(self, guid, uri, props, run_once, entry):
+ old_val = self.cache_get_attribute_value(guid, uri)
+ val = self.generate_value(props=json.dumps(props),
+ run_once=json.dumps(run_once),
+ crontab=entry)
+
+ # The policy has changed, unapply it first
+ if old_val:
+ self.unapply(guid, uri, old_val)
+
+ if props['action'] in ['C', 'R', 'U']:
+ mount_drive(uri)
+ elif props['action'] == 'D':
+ unmount_drive(uri)
+ if not run_once:
+ others, entries = fetch_crontab(self.username)
+ if entry not in entries:
+ entries.append(entry)
+ install_user_crontab(self.username, others, entries)
+ self.cache_add_attribute(guid, uri, val)
+
+ def __str__(self):
+ return 'Preferences/Drives'
+
+ def process_group_policy(self, deleted_gpo_list, changed_gpo_list):
+ for guid, settings in deleted_gpo_list:
+ if str(self) in settings:
+ for uri, val in settings[str(self)].items():
+ self.unapply(guid, uri, val)
+
+ for gpo in changed_gpo_list:
+ if gpo.file_sys_path:
+ xml = 'USER/Preferences/Drives/Drives.xml'
+ path = os.path.join(gpo.file_sys_path, xml)
+ xml_conf = drop_privileges('root', self.parse, path)
+ if not xml_conf:
+ continue
+ drives = xml_conf.findall('Drive')
+ attrs = []
+ for drive in drives:
+ prop = drive.find('Properties')
+ if prop is None:
+ log.warning('Drive is missing Properties', drive.attrib)
+ continue
+ if prop.attrib['thisDrive'] == 'HIDE':
+ log.warning('Drive is hidden', prop.attrib)
+ continue # Don't mount a hidden drive
+ run_once = False
+ filters = drive.find('Filters')
+ if filters:
+ run_once_filter = filters.find('FilterRunOnce')
+ if run_once_filter is not None:
+ run_once = True
+ uri = 'smb:{}'.format(prop.attrib['path'].replace('\\', '/'))
+ # Ensure we expand the preference variables, or fail if we
+ # are unable to (the uri is invalid if we fail).
+ gptpath = os.path.join(gpo.file_sys_path, 'USER')
+ try:
+ uri = expand_pref_variables(uri, gptpath, self.lp,
+ username=self.username)
+ except NameError as e:
+ # If we fail expanding variables, then the URI is
+ # invalid and we can't continue processing this drive
+ # map. We can continue processing other drives, as they
+ # may succeed. This is not a critical error, since some
+ # Windows specific policies won't apply here.
+ log.warn('Failed to expand drive map variables: %s' % e,
+ prop.attrib)
+ continue
+ attrs.append(uri)
+ entry = ''
+ if not run_once:
+ if prop.attrib['action'] in ['C', 'R', 'U']:
+ entry = '@hourly gio mount {}'.format(uri)
+ elif prop.attrib['action'] == 'D':
+ entry = '@hourly gio mount {} --unmount'.format(uri)
+ self.apply(gpo.name, uri, prop.attrib, run_once, entry)
+ self.clean(gpo.name, keep=attrs)
+
+ def rsop(self, gpo):
+ output = {}
+ if gpo.file_sys_path:
+ xml = 'USER/Preferences/Drives/Drives.xml'
+ path = os.path.join(gpo.file_sys_path, xml)
+ xml_conf = self.parse(path)
+ if not xml_conf:
+ return output
+ drives = xml_conf.findall('Drive')
+ for drive in drives:
+ prop = drive.find('Properties')
+ if prop is None:
+ continue
+ if prop.attrib['thisDrive'] == 'HIDE':
+ continue
+ uri = 'smb:{}'.format(prop.attrib['path'].replace('\\', '/'))
+ if prop.attrib['action'] in ['C', 'R', 'U']:
+ output[prop.attrib['label']] = 'gio mount {}'.format(uri)
+ elif prop.attrib['action'] == 'D':
+ output[prop.attrib['label']] = \
+ 'gio mount {} --unmount'.format(uri)
+ return output
diff --git a/python/samba/gp/gp_ext_loader.py b/python/samba/gp/gp_ext_loader.py
new file mode 100644
index 0000000..705b973
--- /dev/null
+++ b/python/samba/gp/gp_ext_loader.py
@@ -0,0 +1,59 @@
+# Group Policy Client Side Extension Loader
+# Copyright (C) David Mulder <dmulder@suse.com> 2018
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from samba.gp.gpclass import list_gp_extensions
+from samba.gp.gpclass import gp_ext
+from samba.gp.util.logging import log
+
+try:
+ import importlib.util
+
+ def import_file(name, location):
+ spec = importlib.util.spec_from_file_location(name, location)
+ module = importlib.util.module_from_spec(spec)
+ spec.loader.exec_module(module)
+ return module
+except ImportError:
+ import imp
+
+ def import_file(name, location):
+ return imp.load_source(name, location)
+
+
+def get_gp_ext_from_module(name, mod):
+ if mod:
+ for k, v in vars(mod).items():
+ if k == name and issubclass(v, gp_ext):
+ return v
+ return None
+
+
+def get_gp_client_side_extensions(smb_conf):
+ user_exts = []
+ machine_exts = []
+ gp_exts = list_gp_extensions(smb_conf)
+ for gp_extension in gp_exts.values():
+ module = import_file(gp_extension['ProcessGroupPolicy'], gp_extension['DllName'])
+ ext = get_gp_ext_from_module(gp_extension['ProcessGroupPolicy'], module)
+ if ext and gp_extension['MachinePolicy']:
+ machine_exts.append(ext)
+ log.info('Loaded machine extension from %s: %s'
+ % (gp_extension['DllName'], ext.__name__))
+ if ext and gp_extension['UserPolicy']:
+ user_exts.append(ext)
+ log.info('Loaded user extension from %s: %s'
+ % (gp_extension['DllName'], ext.__name__))
+ return (machine_exts, user_exts)
diff --git a/python/samba/gp/gp_firefox_ext.py b/python/samba/gp/gp_firefox_ext.py
new file mode 100644
index 0000000..a623314
--- /dev/null
+++ b/python/samba/gp/gp_firefox_ext.py
@@ -0,0 +1,219 @@
+# gp_firefox_ext samba gpo policy
+# Copyright (C) David Mulder <dmulder@suse.com> 2021
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import json
+from samba.gp.gpclass import gp_pol_ext, gp_misc_applier
+from samba.dcerpc import misc
+from samba.common import get_string
+from samba.gp.util.logging import log
+
+def parse_entry_data(e):
+ if e.type == misc.REG_MULTI_SZ:
+ data = get_string(e.data).replace('\x00', '')
+ return json.loads(data)
+ elif e.type == misc.REG_DWORD and e.data in [0, 1]:
+ return e.data == 1
+ return e.data
+
+def convert_pol_to_json(section, entries):
+ result = {}
+ index_map = {}
+ for e in entries:
+ if not e.keyname.startswith(section):
+ continue
+ if '**delvals.' in e.valuename:
+ continue
+ sub_keys = e.keyname.replace(section, '').strip('\\')
+ if sub_keys:
+ sub_keys = sub_keys.split('\\')
+ current = result
+ index = -1
+ if sub_keys[-1].isnumeric():
+ name = '\\'.join(sub_keys[:-1])
+ elif e.valuename.isnumeric():
+ name = e.keyname
+ else:
+ name = '\\'.join([e.keyname, e.valuename])
+ for i in range(len(sub_keys)):
+ if sub_keys[i] == 'PDFjs':
+ sub_keys[i] = 'PSFjs'
+ ctype = dict
+ if i == len(sub_keys)-1 and e.valuename.isnumeric():
+ ctype = list
+ index = int(e.valuename)
+ if i < len(sub_keys)-1 and sub_keys[i+1].isnumeric():
+ ctype = list
+ index = int(sub_keys[i+1])
+ if type(current) == dict:
+ if sub_keys[i] not in current:
+ if ctype == dict:
+ current[sub_keys[i]] = {}
+ else:
+ current[sub_keys[i]] = []
+ current = current[sub_keys[i]]
+ else:
+ if name not in index_map:
+ index_map[name] = {}
+ if index not in index_map[name].keys():
+ if ctype == dict:
+ current.append({})
+ else:
+ current.append([])
+ index_map[name][index] = len(current)-1
+ current = current[index_map[name][index]]
+ if type(current) == list:
+ current.append(parse_entry_data(e))
+ else:
+ current[e.valuename] = parse_entry_data(e)
+ else:
+ result[e.valuename] = parse_entry_data(e)
+ return result
+
+class gp_firefox_ext(gp_pol_ext, gp_misc_applier):
+ firefox_installdir = '/etc/firefox/policies'
+ destfile = os.path.join(firefox_installdir, 'policies.json')
+
+ def __str__(self):
+ return 'Mozilla/Firefox'
+
+ def set_machine_policy(self, policies):
+ try:
+ os.makedirs(self.firefox_installdir, exist_ok=True)
+ with open(self.destfile, 'w') as f:
+ json.dump(policies, f)
+ log.debug('Wrote Firefox preferences', self.destfile)
+ except PermissionError:
+ log.debug('Failed to write Firefox preferences',
+ self.destfile)
+
+ def get_machine_policy(self):
+ if os.path.exists(self.destfile):
+ with open(self.destfile, 'r') as r:
+ policies = json.load(r)
+ log.debug('Read Firefox preferences', self.destfile)
+ else:
+ policies = {'policies': {}}
+ return policies
+
+ def parse_value(self, value):
+ data = super().parse_value(value)
+ for k, v in data.items():
+ try:
+ data[k] = json.loads(v)
+ except json.decoder.JSONDecodeError:
+ pass
+ return data
+
+ def unapply_policy(self, guid, policy, applied_val, val):
+ def set_val(policies, policy, val):
+ if val is None:
+ del policies[policy]
+ else:
+ policies[policy] = val
+ current = self.get_machine_policy()
+ if policy in current['policies'].keys():
+ if applied_val is not None:
+ # Only restore policy if unmodified
+ if current['policies'][policy] == applied_val:
+ set_val(current['policies'], policy, val)
+ else:
+ set_val(current['policies'], policy, val)
+ self.set_machine_policy(current)
+
+ def unapply(self, guid, policy, val):
+ cache = self.parse_value(val)
+ if policy == 'policies.json':
+ current = self.get_machine_policy()
+ for attr in current['policies'].keys():
+ val = cache['old_val']['policies'][attr] \
+ if attr in cache['old_val']['policies'] else None
+ self.unapply_policy(guid, attr, None, val)
+ else:
+ self.unapply_policy(guid, policy,
+ cache['new_val'] if 'new_val' in cache else None,
+ cache['old_val'])
+ self.cache_remove_attribute(guid, policy)
+
+ def apply(self, guid, policy, val):
+ # If the policy has changed, unapply, then apply new policy
+ data = self.cache_get_attribute_value(guid, policy)
+ if data is not None:
+ self.unapply(guid, policy, data)
+
+ current = self.get_machine_policy()
+ before = None
+ if policy in current['policies'].keys():
+ before = current['policies'][policy]
+
+ # Apply the policy and log the changes
+ new_value = self.generate_value(old_val=json.dumps(before),
+ new_val=json.dumps(val))
+ current['policies'][policy] = val
+ self.set_machine_policy(current)
+ self.cache_add_attribute(guid, policy, get_string(new_value))
+
+ def process_group_policy(self, deleted_gpo_list, changed_gpo_list,
+ policy_dir=None):
+ if policy_dir is not None:
+ self.firefox_installdir = policy_dir
+ self.destfile = os.path.join(policy_dir, 'policies.json')
+ for guid, settings in deleted_gpo_list:
+ if str(self) in settings:
+ for policy, val in settings[str(self)].items():
+ self.unapply(guid, policy, val)
+
+ for gpo in changed_gpo_list:
+ if gpo.file_sys_path:
+ pol_file = 'MACHINE/Registry.pol'
+ section = 'Software\\Policies\\Mozilla\\Firefox'
+ path = os.path.join(gpo.file_sys_path, pol_file)
+ pol_conf = self.parse(path)
+ if not pol_conf:
+ continue
+
+ # Unapply the old cache entry, if present
+ data = self.cache_get_attribute_value(gpo.name, 'policies.json')
+ if data is not None:
+ self.unapply(gpo.name, 'policies.json', data)
+
+ policies = convert_pol_to_json(section, pol_conf.entries)
+ for policy, val in policies.items():
+ self.apply(gpo.name, policy, val)
+
+ # cleanup removed policies
+ self.clean(gpo.name, keep=policies.keys())
+
+ def rsop(self, gpo):
+ output = {}
+ pol_file = 'MACHINE/Registry.pol'
+ section = 'Software\\Policies\\Mozilla\\Firefox'
+ if gpo.file_sys_path:
+ path = os.path.join(gpo.file_sys_path, pol_file)
+ pol_conf = self.parse(path)
+ if not pol_conf:
+ return output
+ for e in pol_conf.entries:
+ if e.keyname.startswith(section):
+ output['%s\\%s' % (e.keyname, e.valuename)] = e.data
+ return output
+
+class gp_firefox_old_ext(gp_firefox_ext):
+ firefox_installdir = '/usr/lib64/firefox/distribution'
+ destfile = os.path.join(firefox_installdir, 'policies.json')
+
+ def __str__(self):
+ return 'Mozilla/Firefox (old profile directory)'
diff --git a/python/samba/gp/gp_firewalld_ext.py b/python/samba/gp/gp_firewalld_ext.py
new file mode 100644
index 0000000..5e125b0
--- /dev/null
+++ b/python/samba/gp/gp_firewalld_ext.py
@@ -0,0 +1,171 @@
+# gp_firewalld_ext samba gpo policy
+# Copyright (C) David Mulder <dmulder@suse.com> 2021
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+from subprocess import Popen, PIPE
+from shutil import which
+import json
+from samba.gp.gpclass import gp_pol_ext, gp_applier
+from samba.gp.util.logging import log
+
+def firewall_cmd(*args):
+ fw_cmd = which('firewall-cmd')
+ if fw_cmd is not None:
+ cmd = [fw_cmd]
+ cmd.extend(list(args))
+
+ p = Popen(cmd, stdout=PIPE, stderr=PIPE)
+ stdoutdata, _ = p.communicate()
+ return p.returncode, stdoutdata
+ else:
+ return -1, 'firewall-cmd not found'
+
+def rule_segment_parse(name, rule_segment):
+ if isinstance(rule_segment, str):
+ return ('%s=%s' % (name, rule_segment)) + ' '
+ else:
+ return '%s %s ' % (name,
+ ' '.join(['%s=%s' % (k, v) for k, v in rule_segment.items()]))
+
+class gp_firewalld_ext(gp_pol_ext, gp_applier):
+ def __str__(self):
+ return 'Security/Firewalld'
+
+ def apply_zone(self, guid, zone):
+ zone_attrs = []
+ ret = firewall_cmd('--permanent', '--new-zone=%s' % zone)[0]
+ if ret != 0:
+ log.error('Failed to add new zone', zone)
+ else:
+ attribute = 'zone:%s' % zone
+ self.cache_add_attribute(guid, attribute, zone)
+ zone_attrs.append(attribute)
+ # Default to matching the interface(s) for the default zone
+ ret, out = firewall_cmd('--list-interfaces')
+ if ret != 0:
+ log.error('Failed to set interfaces for zone', zone)
+ for interface in out.strip().split():
+ ret = firewall_cmd('--permanent', '--zone=%s' % zone,
+ '--add-interface=%s' % interface.decode())
+ if ret != 0:
+ log.error('Failed to set interfaces for zone', zone)
+ return zone_attrs
+
+ def apply_rules(self, guid, rule_dict):
+ rule_attrs = []
+ for zone, rules in rule_dict.items():
+ for rule in rules:
+ if 'rule' in rule:
+ rule_parsed = rule_segment_parse('rule', rule['rule'])
+ else:
+ rule_parsed = 'rule '
+ for segment in ['source', 'destination', 'service', 'port',
+ 'protocol', 'icmp-block', 'masquerade',
+ 'icmp-type', 'forward-port', 'source-port',
+ 'log', 'audit']:
+ names = [s for s in rule.keys() if s.startswith(segment)]
+ for name in names:
+ rule_parsed += rule_segment_parse(name, rule[name])
+ actions = set(['accept', 'reject', 'drop', 'mark'])
+ segments = set(rule.keys())
+ action = actions.intersection(segments)
+ if len(action) == 1:
+ rule_parsed += rule_segment_parse(list(action)[0],
+ rule[list(action)[0]])
+ else:
+ log.error('Invalid firewall rule syntax')
+ ret = firewall_cmd('--permanent', '--zone=%s' % zone,
+ '--add-rich-rule', rule_parsed.strip())[0]
+ if ret != 0:
+ log.error('Failed to add firewall rule', rule_parsed)
+ else:
+ rhash = self.generate_value_hash(rule_parsed)
+ attribute = 'rule:%s:%s' % (zone, rhash)
+ self.cache_add_attribute(guid, attribute, rule_parsed)
+ rule_attrs.append(attribute)
+ return rule_attrs
+
+ def unapply(self, guid, attribute, value):
+ if attribute.startswith('zone'):
+ ret = firewall_cmd('--permanent',
+ '--delete-zone=%s' % value)[0]
+ if ret != 0:
+ log.error('Failed to remove zone', value)
+ else:
+ self.cache_remove_attribute(guid, attribute)
+ elif attribute.startswith('rule'):
+ _, zone, _ = attribute.split(':')
+ ret = firewall_cmd('--permanent', '--zone=%s' % zone,
+ '--remove-rich-rule', value)[0]
+ if ret != 0:
+ log.error('Failed to remove firewall rule', value)
+ else:
+ self.cache_remove_attribute(guid, attribute)
+
+ def apply(self, applier_func, *args):
+ return applier_func(*args)
+
+ def process_group_policy(self, deleted_gpo_list, changed_gpo_list):
+ for guid, settings in deleted_gpo_list:
+ if str(self) in settings:
+ for attribute, value in settings[str(self)].items():
+ self.unapply(guid, attribute, value)
+
+ for gpo in changed_gpo_list:
+ if gpo.file_sys_path:
+ section = 'Software\\Policies\\Samba\\Unix Settings\\Firewalld'
+ pol_file = 'MACHINE/Registry.pol'
+ path = os.path.join(gpo.file_sys_path, pol_file)
+ pol_conf = self.parse(path)
+ if not pol_conf:
+ continue
+ attrs = []
+ for e in pol_conf.entries:
+ if e.keyname.startswith(section):
+ if e.keyname.endswith('Rules'):
+ attrs.extend(self.apply(self.apply_rules, gpo.name,
+ json.loads(e.data)))
+ elif e.keyname.endswith('Zones'):
+ if e.valuename == '**delvals.':
+ continue
+ attrs.extend(self.apply(self.apply_zone, gpo.name,
+ e.data))
+
+ # Cleanup all old zones and rules from this GPO
+ self.clean(gpo.name, keep=attrs)
+
+ def rsop(self, gpo):
+ output = {}
+ pol_file = 'MACHINE/Registry.pol'
+ section = 'Software\\Policies\\Samba\\Unix Settings\\Firewalld'
+ if gpo.file_sys_path:
+ path = os.path.join(gpo.file_sys_path, pol_file)
+ pol_conf = self.parse(path)
+ if not pol_conf:
+ return output
+ for e in pol_conf.entries:
+ if e.keyname.startswith(section):
+ if e.keyname.endswith('Zones'):
+ if e.valuename == '**delvals.':
+ continue
+ if 'Zones' not in output.keys():
+ output['Zones'] = []
+ output['Zones'].append(e.data)
+ elif e.keyname.endswith('Rules'):
+ if 'Rules' not in output.keys():
+ output['Rules'] = []
+ output['Rules'].append(json.loads(e.data))
+ return output
diff --git a/python/samba/gp/gp_gnome_settings_ext.py b/python/samba/gp/gp_gnome_settings_ext.py
new file mode 100644
index 0000000..567ab94
--- /dev/null
+++ b/python/samba/gp/gp_gnome_settings_ext.py
@@ -0,0 +1,418 @@
+# gp_gnome_settings_ext samba gpo policy
+# Copyright (C) David Mulder <dmulder@suse.com> 2020
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os, re
+from samba.gp.gpclass import gp_pol_ext, gp_file_applier
+from tempfile import NamedTemporaryFile
+import shutil
+from configparser import ConfigParser
+from subprocess import Popen, PIPE
+from samba.common import get_string
+from glob import glob
+import xml.etree.ElementTree as etree
+from samba.gp.util.logging import log
+
+def dconf_update(test_dir):
+ if test_dir is not None:
+ return
+ dconf = shutil.which('dconf')
+ if dconf is None:
+ log.error('Failed to update dconf. Command not found')
+ return
+ p = Popen([dconf, 'update'], stdout=PIPE, stderr=PIPE)
+ out, err = p.communicate()
+ if p.returncode != 0:
+ log.error('Failed to update dconf', get_string(err))
+
+def create_locks_dir(test_dir):
+ locks_dir = '/etc/dconf/db/local.d/locks'
+ if test_dir is not None:
+ locks_dir = os.path.join(test_dir, locks_dir[1:])
+ os.makedirs(locks_dir, exist_ok=True)
+ return locks_dir
+
+def create_user_profile(test_dir):
+ user_profile = '/etc/dconf/profile/user'
+ if test_dir is not None:
+ user_profile = os.path.join(test_dir, user_profile[1:])
+ if os.path.exists(user_profile):
+ return
+ os.makedirs(os.path.dirname(user_profile), exist_ok=True)
+ with NamedTemporaryFile('w', dir=os.path.dirname(user_profile),
+ delete=False) as w:
+ w.write('user-db:user\nsystem-db:local')
+ os.chmod(w.name, 0o644)
+ fname = w.name
+ shutil.move(fname, user_profile)
+
+def create_local_db(test_dir):
+ local_db = '/etc/dconf/db/local.d'
+ if test_dir is not None:
+ local_db = os.path.join(test_dir, local_db[1:])
+ os.makedirs(local_db, exist_ok=True)
+ return local_db
+
+def select_next_conf(directory, fname=''):
+ configs = [re.match(r'(\d+)%s' % fname, f) for f in os.listdir(directory)]
+ return max([int(m.group(1)) for m in configs if m]+[0])+1
+
+class gp_gnome_settings_ext(gp_pol_ext, gp_file_applier):
+ def __init__(self, *args):
+ super().__init__(*args)
+ self.keys = ['Compose Key',
+ 'Dim Screen when User is Idle',
+ 'Lock Down Specific Settings',
+ 'Whitelisted Online Accounts',
+ 'Enabled Extensions']
+ self.lock_down_settings = {}
+ self.test_dir = None
+
+ def __str__(self):
+ return 'GNOME Settings/Lock Down Settings'
+
+ def __add_lockdown_data(self, k, e):
+ if k not in self.lock_down_settings:
+ self.lock_down_settings[k] = {}
+ self.lock_down_settings[k][e.valuename] = e.data
+
+ def __enable_lockdown_data(self, e):
+ if e.valuename not in self.lock_down_settings:
+ self.lock_down_settings[e.valuename] = {}
+ self.lock_down_settings[e.valuename]['Enabled'] = e.data == 1
+
+ def __apply_compose_key(self, data):
+ create_user_profile(self.test_dir)
+ local_db_dir = create_local_db(self.test_dir)
+
+ conf_id = select_next_conf(local_db_dir, '-input-sources')
+ local_db = os.path.join(local_db_dir,
+ '%010d-input-sources' % conf_id)
+ data_map = { 'Right Alt': 'compose:ralt',
+ 'Left Win': 'compose:lwin',
+ '3rd level of Left Win': 'compose:lwin-altgr',
+ 'Right Win': 'compose:rwin',
+ '3rd level of Right Win': 'compose:rwin-altgr',
+ 'Menu': 'compose:menu',
+ '3rd level of Menu': 'compose:menu-altgr',
+ 'Left Ctrl': 'compose:lctrl',
+ '3rd level of Left Ctrl': 'compose:lctrl-altgr',
+ 'Right Ctrl': 'compose:rctrl',
+ '3rd level of Right Ctrl': 'compose:rctrl-altgr',
+ 'Caps Lock': 'compose:caps',
+ '3rd level of Caps Lock': 'compose:caps-altgr',
+ 'The "< >" key': 'compose:102',
+ '3rd level of the "< >" key': 'compose:102-altgr',
+ 'Pause': 'compose:paus',
+ 'PrtSc': 'compose:prsc',
+ 'Scroll Lock': 'compose:sclk'
+ }
+ if data['Key Name'] not in data_map.keys():
+ log.error('Compose Key not recognized', data)
+ return
+ parser = ConfigParser()
+ section = 'org/gnome/desktop/input-sources'
+ parser.add_section(section)
+ parser.set(section, 'xkb-options',
+ "['%s']" % data_map[data['Key Name']])
+ with open(local_db, 'w') as w:
+ parser.write(w)
+
+ # Lock xkb-options
+ locks_dir = create_locks_dir(self.test_dir)
+ conf_id = select_next_conf(locks_dir)
+ lock = os.path.join(locks_dir, '%010d-input-sources' % conf_id)
+ with open(lock, 'w') as w:
+ w.write('/org/gnome/desktop/input-sources/xkb-options')
+
+ dconf_update(self.test_dir)
+ return [local_db, lock]
+
+ def __apply_dim_idle(self, data):
+ create_user_profile(self.test_dir)
+ local_db_dir = create_local_db(self.test_dir)
+ conf_id = select_next_conf(local_db_dir, '-power')
+ local_power_db = os.path.join(local_db_dir, '%010d-power' % conf_id)
+ parser = ConfigParser()
+ section = 'org/gnome/settings-daemon/plugins/power'
+ parser.add_section(section)
+ parser.set(section, 'idle-dim', 'true')
+ parser.set(section, 'idle-brightness', str(data['Dim Idle Brightness']))
+ with open(local_power_db, 'w') as w:
+ parser.write(w)
+ conf_id = select_next_conf(local_db_dir, '-session')
+ local_session_db = os.path.join(local_db_dir, '%010d-session' % conf_id)
+ parser = ConfigParser()
+ section = 'org/gnome/desktop/session'
+ parser.add_section(section)
+ parser.set(section, 'idle-delay', 'uint32 %d' % data['Delay'])
+ with open(local_session_db, 'w') as w:
+ parser.write(w)
+
+ # Lock power-saving
+ locks_dir = create_locks_dir(self.test_dir)
+ conf_id = select_next_conf(locks_dir)
+ lock = os.path.join(locks_dir, '%010d-power-saving' % conf_id)
+ with open(lock, 'w') as w:
+ w.write('/org/gnome/settings-daemon/plugins/power/idle-dim\n')
+ w.write('/org/gnome/settings-daemon/plugins/power/idle-brightness\n')
+ w.write('/org/gnome/desktop/session/idle-delay')
+
+ dconf_update(self.test_dir)
+ return [local_power_db, local_session_db, lock]
+
+ def __apply_specific_settings(self, data):
+ create_user_profile(self.test_dir)
+ locks_dir = create_locks_dir(self.test_dir)
+ conf_id = select_next_conf(locks_dir, '-group-policy')
+ policy_file = os.path.join(locks_dir, '%010d-group-policy' % conf_id)
+ with open(policy_file, 'w') as w:
+ for key in data.keys():
+ w.write('%s\n' % key)
+ dconf_update(self.test_dir)
+ return [policy_file]
+
+ def __apply_whitelisted_account(self, data):
+ create_user_profile(self.test_dir)
+ local_db_dir = create_local_db(self.test_dir)
+ locks_dir = create_locks_dir(self.test_dir)
+ val = "['%s']" % "', '".join(data.keys())
+ policy_files = self.__lockdown(local_db_dir, locks_dir, 'goa',
+ 'whitelisted-providers', val,
+ 'org/gnome/online-accounts')
+ dconf_update(self.test_dir)
+ return policy_files
+
+ def __apply_enabled_extensions(self, data):
+ create_user_profile(self.test_dir)
+ local_db_dir = create_local_db(self.test_dir)
+ conf_id = select_next_conf(local_db_dir)
+ policy_file = os.path.join(local_db_dir, '%010d-extensions' % conf_id)
+ parser = ConfigParser()
+ section = 'org/gnome/shell'
+ parser.add_section(section)
+ exts = data.keys()
+ parser.set(section, 'enabled-extensions', "['%s']" % "', '".join(exts))
+ parser.set(section, 'development-tools', 'false')
+ with open(policy_file, 'w') as w:
+ parser.write(w)
+ dconf_update(self.test_dir)
+ return [policy_file]
+
+ def __lockdown(self, local_db_dir, locks_dir, name, key, val,
+ section='org/gnome/desktop/lockdown'):
+ policy_files = []
+ conf_id = select_next_conf(local_db_dir)
+ policy_file = os.path.join(local_db_dir,
+ '%010d-%s' % (conf_id, name))
+ policy_files.append(policy_file)
+ conf_id = select_next_conf(locks_dir)
+ lock = os.path.join(locks_dir, '%010d-%s' % (conf_id, name))
+ policy_files.append(lock)
+ parser = ConfigParser()
+ parser.add_section(section)
+ parser.set(section, key, val)
+ with open(policy_file, 'w') as w:
+ parser.write(w)
+ with open(lock, 'w') as w:
+ w.write('/%s/%s' % (section, key))
+ return policy_files
+
+ def __apply_enabled(self, k):
+ policy_files = []
+
+ create_user_profile(self.test_dir)
+ local_db_dir = create_local_db(self.test_dir)
+ locks_dir = create_locks_dir(self.test_dir)
+
+ if k == 'Lock Down Enabled Extensions':
+ conf_id = select_next_conf(locks_dir)
+ policy_file = os.path.join(locks_dir, '%010d-extensions' % conf_id)
+ policy_files.append(policy_file)
+ with open(policy_file, 'w') as w:
+ w.write('/org/gnome/shell/enabled-extensions\n')
+ w.write('/org/gnome/shell/development-tools')
+ elif k == 'Disable Printing':
+ policy_files = self.__lockdown(local_db_dir, locks_dir, 'printing',
+ 'disable-printing', 'true')
+ elif k == 'Disable File Saving':
+ policy_files = self.__lockdown(local_db_dir, locks_dir,
+ 'filesaving',
+ 'disable-save-to-disk', 'true')
+ elif k == 'Disable Command-Line Access':
+ policy_files = self.__lockdown(local_db_dir, locks_dir, 'cmdline',
+ 'disable-command-line', 'true')
+ elif k == 'Disallow Login Using a Fingerprint':
+ policy_files = self.__lockdown(local_db_dir, locks_dir,
+ 'fingerprintreader',
+ 'enable-fingerprint-authentication',
+ 'false',
+ section='org/gnome/login-screen')
+ elif k == 'Disable User Logout':
+ policy_files = self.__lockdown(local_db_dir, locks_dir, 'logout',
+ 'disable-log-out', 'true')
+ elif k == 'Disable User Switching':
+ policy_files = self.__lockdown(local_db_dir, locks_dir, 'logout',
+ 'disable-user-switching', 'true')
+ elif k == 'Disable Repartitioning':
+ actions = '/usr/share/polkit-1/actions'
+ udisk2 = glob(os.path.join(actions,
+ 'org.freedesktop.[u|U][d|D]isks2.policy'))
+ if len(udisk2) == 1:
+ udisk2 = udisk2[0]
+ else:
+ udisk2 = os.path.join(actions,
+ 'org.freedesktop.UDisks2.policy')
+ udisk2_etc = os.path.join('/etc/share/polkit-1/actions',
+ os.path.basename(udisk2))
+ if self.test_dir is not None:
+ udisk2_etc = os.path.join(self.test_dir, udisk2_etc[1:])
+ os.makedirs(os.path.dirname(udisk2_etc), exist_ok=True)
+ xml_data = etree.ElementTree(etree.Element('policyconfig'))
+ if os.path.exists(udisk2):
+ with open(udisk2, 'rb') as f:
+ data = f.read()
+ existing_xml = etree.ElementTree(etree.fromstring(data))
+ root = xml_data.getroot()
+ root.append(existing_xml.find('vendor'))
+ root.append(existing_xml.find('vendor_url'))
+ root.append(existing_xml.find('icon_name'))
+ else:
+ vendor = etree.SubElement(xml_data.getroot(), 'vendor')
+ vendor.text = 'The Udisks Project'
+ vendor_url = etree.SubElement(xml_data.getroot(), 'vendor_url')
+ vendor_url.text = 'https://github.com/storaged-project/udisks'
+ icon_name = etree.SubElement(xml_data.getroot(), 'icon_name')
+ icon_name.text = 'drive-removable-media'
+ action = etree.SubElement(xml_data.getroot(), 'action')
+ action.attrib['id'] = 'org.freedesktop.udisks2.modify-device'
+ description = etree.SubElement(action, 'description')
+ description.text = 'Modify the drive settings'
+ message = etree.SubElement(action, 'message')
+ message.text = 'Authentication is required to modify drive settings'
+ defaults = etree.SubElement(action, 'defaults')
+ allow_any = etree.SubElement(defaults, 'allow_any')
+ allow_any.text = 'no'
+ allow_inactive = etree.SubElement(defaults, 'allow_inactive')
+ allow_inactive.text = 'no'
+ allow_active = etree.SubElement(defaults, 'allow_active')
+ allow_active.text = 'yes'
+ with open(udisk2_etc, 'wb') as w:
+ xml_data.write(w, encoding='UTF-8', xml_declaration=True)
+ policy_files.append(udisk2_etc)
+ else:
+ log.error('Unable to apply', k)
+ return
+ dconf_update(self.test_dir)
+ return policy_files
+
+ def __clean_data(self, k):
+ data = self.lock_down_settings[k]
+ return {i: data[i] for i in data.keys() if i != 'Enabled'}
+
+ def process_group_policy(self, deleted_gpo_list, changed_gpo_list,
+ test_dir=None):
+ if test_dir is not None:
+ self.test_dir = test_dir
+ for guid, settings in deleted_gpo_list:
+ if str(self) in settings:
+ for attribute, value in settings[str(self)].items():
+ self.unapply(guid, attribute, value, sep=';')
+ dconf_update(test_dir)
+
+ for gpo in changed_gpo_list:
+ if gpo.file_sys_path:
+ section_name = 'GNOME Settings\\Lock Down Settings'
+ pol_file = 'MACHINE/Registry.pol'
+ path = os.path.join(gpo.file_sys_path, pol_file)
+ pol_conf = self.parse(path)
+ if not pol_conf:
+ continue
+ for e in pol_conf.entries:
+ if e.keyname.startswith(section_name) and e.data and \
+ '**delvals.' not in e.valuename:
+ for k in self.keys:
+ if e.keyname.endswith(k):
+ self.__add_lockdown_data(k, e)
+ break
+ else:
+ self.__enable_lockdown_data(e)
+ for k in self.lock_down_settings.keys():
+ # Ignore disabled preferences
+ if not self.lock_down_settings[k]['Enabled']:
+ # Unapply the disabled preference if previously applied
+ self.clean(gpo.name, remove=k)
+ continue
+
+ # Apply using the appropriate applier
+ data = str(self.lock_down_settings[k])
+ value_hash = self.generate_value_hash(data)
+ if k == self.keys[0]:
+ self.apply(gpo.name, k, value_hash,
+ self.__apply_compose_key,
+ self.__clean_data(k), sep=';')
+ elif k == self.keys[1]:
+ self.apply(gpo.name, k, value_hash,
+ self.__apply_dim_idle,
+ self.__clean_data(k), sep=';')
+ elif k == self.keys[2]:
+ self.apply(gpo.name, k, value_hash,
+ self.__apply_specific_settings,
+ self.__clean_data(k), sep=';')
+ elif k == self.keys[3]:
+ self.apply(gpo.name, k, value_hash,
+ self.__apply_whitelisted_account,
+ self.__clean_data(k), sep=';')
+ elif k == self.keys[4]:
+ self.apply(gpo.name, k, value_hash,
+ self.__apply_enabled_extensions,
+ self.__clean_data(k), sep=';')
+ else:
+ self.apply(gpo.name, k, value_hash,
+ self.__apply_enabled,
+ k, sep=';')
+
+ # Unapply any policy that has been removed
+ self.clean(gpo.name, keep=self.lock_down_settings.keys())
+
+ def rsop(self, gpo):
+ output = {}
+ if gpo.file_sys_path:
+ section_name = 'GNOME Settings\\Lock Down Settings'
+ pol_file = 'MACHINE/Registry.pol'
+ path = os.path.join(gpo.file_sys_path, pol_file)
+ pol_conf = self.parse(path)
+ if not pol_conf:
+ return output
+ for e in pol_conf.entries:
+ if e.keyname.startswith(section_name) and e.data and \
+ '**delvals.' not in e.valuename:
+ for k in self.keys:
+ if e.keyname.endswith(k):
+ self.__add_lockdown_data(k, e)
+ break
+ else:
+ self.__enable_lockdown_data(e)
+ for k in self.lock_down_settings.keys():
+ if self.lock_down_settings[k]['Enabled']:
+ if len(self.lock_down_settings[k]) > 1:
+ data = self.__clean_data(k)
+ if all([i == data[i] for i in data.keys()]):
+ output[k] = list(data.keys())
+ else:
+ output[k] = data
+ else:
+ output[k] = self.lock_down_settings[k]
+ return output
diff --git a/python/samba/gp/gp_msgs_ext.py b/python/samba/gp/gp_msgs_ext.py
new file mode 100644
index 0000000..9aadddf
--- /dev/null
+++ b/python/samba/gp/gp_msgs_ext.py
@@ -0,0 +1,96 @@
+# gp_msgs_ext samba gpo policy
+# Copyright (C) David Mulder <dmulder@suse.com> 2020
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+from samba.gp.gpclass import gp_pol_ext, gp_misc_applier
+
+class gp_msgs_ext(gp_pol_ext, gp_misc_applier):
+ def unapply(self, guid, cdir, attribute, value):
+ if attribute not in ['motd', 'issue']:
+ raise ValueError('"%s" is not a message attribute' % attribute)
+ data = self.parse_value(value)
+ mfile = os.path.join(cdir, attribute)
+ if os.path.exists(mfile):
+ with open(mfile, 'r') as f:
+ current = f.read()
+ else:
+ current = ''
+ # Only overwrite the msg if it hasn't been modified. It may have been
+ # modified by another GPO.
+ if 'new_val' not in data or current.strip() == data['new_val'].strip():
+ msg = data['old_val']
+ with open(mfile, 'w') as w:
+ if msg:
+ w.write(msg)
+ else:
+ w.truncate()
+ self.cache_remove_attribute(guid, attribute)
+
+ def apply(self, guid, cdir, entries):
+ section_name = 'Software\\Policies\\Samba\\Unix Settings\\Messages'
+ for e in entries:
+ if e.keyname == section_name and e.data.strip():
+ if e.valuename not in ['motd', 'issue']:
+ raise ValueError('"%s" is not a message attribute' %
+ e.valuename)
+ mfile = os.path.join(cdir, e.valuename)
+ if os.path.exists(mfile):
+ with open(mfile, 'r') as f:
+ old_val = f.read()
+ else:
+ old_val = ''
+ # If policy is already applied, skip application
+ if old_val.strip() == e.data.strip():
+ return
+ with open(mfile, 'w') as w:
+ w.write(e.data)
+ data = self.generate_value(old_val=old_val, new_val=e.data)
+ self.cache_add_attribute(guid, e.valuename, data)
+
+ def __str__(self):
+ return 'Unix Settings/Messages'
+
+ def process_group_policy(self, deleted_gpo_list, changed_gpo_list,
+ cdir='/etc'):
+ for guid, settings in deleted_gpo_list:
+ if str(self) in settings:
+ for attribute, msg in settings[str(self)].items():
+ self.unapply(guid, cdir, attribute, msg)
+
+ for gpo in changed_gpo_list:
+ if gpo.file_sys_path:
+ section_name = 'Software\\Policies\\Samba\\Unix Settings\\Messages'
+ pol_file = 'MACHINE/Registry.pol'
+ path = os.path.join(gpo.file_sys_path, pol_file)
+ pol_conf = self.parse(path)
+ if not pol_conf:
+ continue
+ self.apply(gpo.name, cdir, pol_conf.entries)
+
+ def rsop(self, gpo):
+ output = {}
+ if gpo.file_sys_path:
+ section_name = 'Software\\Policies\\Samba\\Unix Settings\\Messages'
+ pol_file = 'MACHINE/Registry.pol'
+ path = os.path.join(gpo.file_sys_path, pol_file)
+ pol_conf = self.parse(path)
+ if not pol_conf:
+ return output
+ for e in pol_conf.entries:
+ if e.keyname == section_name and e.data.strip():
+ mfile = os.path.join('/etc', e.valuename)
+ output[mfile] = e.data
+ return output
diff --git a/python/samba/gp/gp_scripts_ext.py b/python/samba/gp/gp_scripts_ext.py
new file mode 100644
index 0000000..998b9cd
--- /dev/null
+++ b/python/samba/gp/gp_scripts_ext.py
@@ -0,0 +1,187 @@
+# gp_scripts_ext samba gpo policy
+# Copyright (C) David Mulder <dmulder@suse.com> 2020
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os, re
+from subprocess import Popen, PIPE
+from samba.gp.gpclass import gp_pol_ext, drop_privileges, gp_file_applier, \
+ gp_misc_applier
+from tempfile import NamedTemporaryFile
+from samba.gp.util.logging import log
+
+intro = '''
+### autogenerated by samba
+#
+# This file is generated by the gp_scripts_ext Group Policy
+# Client Side Extension. To modify the contents of this file,
+# modify the appropriate Group Policy objects which apply
+# to this machine. DO NOT MODIFY THIS FILE DIRECTLY.
+#
+
+'''
+end = '''
+### autogenerated by samba ###
+'''
+
+class gp_scripts_ext(gp_pol_ext, gp_file_applier):
+ def __str__(self):
+ return 'Unix Settings/Scripts'
+
+ def process_group_policy(self, deleted_gpo_list, changed_gpo_list, cdir=None):
+ for guid, settings in deleted_gpo_list:
+ if str(self) in settings:
+ for attribute, script in settings[str(self)].items():
+ self.unapply(guid, attribute, script)
+
+ for gpo in changed_gpo_list:
+ if gpo.file_sys_path:
+ reg_key = 'Software\\Policies\\Samba\\Unix Settings'
+ sections = { '%s\\Daily Scripts' % reg_key : '/etc/cron.daily',
+ '%s\\Monthly Scripts' % reg_key : '/etc/cron.monthly',
+ '%s\\Weekly Scripts' % reg_key : '/etc/cron.weekly',
+ '%s\\Hourly Scripts' % reg_key : '/etc/cron.hourly' }
+ pol_file = 'MACHINE/Registry.pol'
+ path = os.path.join(gpo.file_sys_path, pol_file)
+ pol_conf = self.parse(path)
+ if not pol_conf:
+ continue
+ policies = {}
+ for e in pol_conf.entries:
+ if e.keyname in sections.keys() and e.data.strip():
+ if e.keyname not in policies:
+ policies[e.keyname] = []
+ policies[e.keyname].append(e.data)
+ def applier_func(keyname, entries):
+ ret = []
+ cron_dir = sections[keyname] if not cdir else cdir
+ for data in entries:
+ with NamedTemporaryFile(prefix='gp_', mode="w+",
+ delete=False, dir=cron_dir) as f:
+ contents = '#!/bin/sh\n%s' % intro
+ contents += '%s\n' % data
+ f.write(contents)
+ os.chmod(f.name, 0o700)
+ ret.append(f.name)
+ return ret
+ for keyname, entries in policies.items():
+ # Each GPO applies only one set of each type of script, so
+ # so the attribute matches the keyname.
+ attribute = keyname
+ # The value hash is generated from the script entries,
+ # ensuring any changes to this GPO will cause the scripts
+ # to be rewritten.
+ value_hash = self.generate_value_hash(*entries)
+ self.apply(gpo.name, attribute, value_hash, applier_func,
+ keyname, entries)
+
+ # Cleanup any old scripts that are no longer part of the policy
+ self.clean(gpo.name, keep=policies.keys())
+
+ def rsop(self, gpo, target='MACHINE'):
+ output = {}
+ pol_file = '%s/Registry.pol' % target
+ if gpo.file_sys_path:
+ path = os.path.join(gpo.file_sys_path, pol_file)
+ pol_conf = self.parse(path)
+ if not pol_conf:
+ return output
+ for e in pol_conf.entries:
+ key = e.keyname.split('\\')[-1]
+ if key.endswith('Scripts') and e.data.strip():
+ if key not in output.keys():
+ output[key] = []
+ output[key].append(e.data)
+ return output
+
+def fetch_crontab(username):
+ p = Popen(['crontab', '-l', '-u', username], stdout=PIPE, stderr=PIPE)
+ out, err = p.communicate()
+ if p.returncode != 0:
+ log.warning('Failed to read the crontab: %s' % err)
+ m = re.findall('%s(.*)%s' % (intro, end), out.decode(), re.DOTALL)
+ if len(m) == 1:
+ entries = m[0].strip().split('\n')
+ else:
+ entries = []
+ m = re.findall('(.*)%s.*%s(.*)' % (intro, end), out.decode(), re.DOTALL)
+ if len(m) == 1:
+ others = '\n'.join([l.strip() for l in m[0]])
+ else:
+ others = out.decode()
+ return others, entries
+
+def install_crontab(fname, username):
+ p = Popen(['crontab', fname, '-u', username], stdout=PIPE, stderr=PIPE)
+ _, err = p.communicate()
+ if p.returncode != 0:
+ raise RuntimeError('Failed to install crontab: %s' % err)
+
+def install_user_crontab(username, others, entries):
+ with NamedTemporaryFile() as f:
+ if len(entries) > 0:
+ f.write('\n'.join([others, intro,
+ '\n'.join(entries), end]).encode())
+ else:
+ f.write(others.encode())
+ f.flush()
+ install_crontab(f.name, username)
+
+class gp_user_scripts_ext(gp_scripts_ext, gp_misc_applier):
+ def unapply(self, guid, attribute, entry):
+ others, entries = fetch_crontab(self.username)
+ if entry in entries:
+ entries.remove(entry)
+ install_user_crontab(self.username, others, entries)
+ self.cache_remove_attribute(guid, attribute)
+
+ def apply(self, guid, attribute, entry):
+ old_val = self.cache_get_attribute_value(guid, attribute)
+ others, entries = fetch_crontab(self.username)
+ if not old_val or entry not in entries:
+ entries.append(entry)
+ install_user_crontab(self.username, others, entries)
+ self.cache_add_attribute(guid, attribute, entry)
+
+ def process_group_policy(self, deleted_gpo_list, changed_gpo_list):
+ for guid, settings in deleted_gpo_list:
+ if str(self) in settings:
+ for attribute, entry in settings[str(self)].items():
+ self.unapply(guid, attribute, entry)
+
+ for gpo in changed_gpo_list:
+ if gpo.file_sys_path:
+ reg_key = 'Software\\Policies\\Samba\\Unix Settings'
+ sections = { '%s\\Daily Scripts' % reg_key : '@daily',
+ '%s\\Monthly Scripts' % reg_key : '@monthly',
+ '%s\\Weekly Scripts' % reg_key : '@weekly',
+ '%s\\Hourly Scripts' % reg_key : '@hourly' }
+ pol_file = 'USER/Registry.pol'
+ path = os.path.join(gpo.file_sys_path, pol_file)
+ pol_conf = drop_privileges('root', self.parse, path)
+ if not pol_conf:
+ continue
+ attrs = []
+ for e in pol_conf.entries:
+ if e.keyname in sections.keys() and e.data.strip():
+ cron_freq = sections[e.keyname]
+ attribute = '%s:%s' % (e.keyname,
+ self.generate_attribute(e.data))
+ attrs.append(attribute)
+ entry = '%s %s' % (cron_freq, e.data)
+ self.apply(gpo.name, attribute, entry)
+ self.clean(gpo.name, keep=attrs)
+
+ def rsop(self, gpo):
+ return super().rsop(gpo, target='USER')
diff --git a/python/samba/gp/gp_sec_ext.py b/python/samba/gp/gp_sec_ext.py
new file mode 100644
index 0000000..39b9cdc
--- /dev/null
+++ b/python/samba/gp/gp_sec_ext.py
@@ -0,0 +1,221 @@
+# gp_sec_ext kdc gpo policy
+# Copyright (C) Luke Morrison <luc785@.hotmail.com> 2013
+# Copyright (C) David Mulder <dmulder@suse.com> 2018
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os.path
+from samba.gp.gpclass import gp_inf_ext
+from samba.auth import system_session
+from samba.common import get_string
+try:
+ from ldb import LdbError
+ from samba.samdb import SamDB
+except ImportError:
+ pass
+from samba.gp.util.logging import log
+
+def mins_to_hours(val):
+ return '%d' % (int(val) / 60)
+
+def days_to_hours(val):
+ return '%d' % (int(val) * 24)
+
+def days2rel_nttime(val):
+ seconds = 60
+ minutes = 60
+ hours = 24
+ sam_add = 10000000
+ val = int(val)
+ return str(-(val * seconds * minutes * hours * sam_add))
+
+class gp_krb_ext(gp_inf_ext):
+ apply_map = { 'MaxTicketAge': 'kdc:user_ticket_lifetime',
+ 'MaxServiceAge': 'kdc:service_ticket_lifetime',
+ 'MaxRenewAge': 'kdc:renewal_lifetime' }
+ def process_group_policy(self, deleted_gpo_list, changed_gpo_list):
+ if self.lp.get('server role') != 'active directory domain controller':
+ return
+ inf_file = 'MACHINE/Microsoft/Windows NT/SecEdit/GptTmpl.inf'
+ for guid, settings in deleted_gpo_list:
+ self.gp_db.set_guid(guid)
+ for section in settings.keys():
+ if section == str(self):
+ for att, value in settings[section].items():
+ self.set_kdc_tdb(att, value)
+ self.gp_db.delete(section, att)
+ self.gp_db.commit()
+
+ for gpo in changed_gpo_list:
+ if gpo.file_sys_path:
+ self.gp_db.set_guid(gpo.name)
+ path = os.path.join(gpo.file_sys_path, inf_file)
+ inf_conf = self.parse(path)
+ if not inf_conf:
+ continue
+ for section in inf_conf.sections():
+ if section == str(self):
+ for key, value in inf_conf.items(section):
+ if key not in gp_krb_ext.apply_map:
+ continue
+ att = gp_krb_ext.apply_map[key]
+ value_func = self.mapper().get(att)
+ self.set_kdc_tdb(att, value_func(value))
+ self.gp_db.commit()
+
+ def set_kdc_tdb(self, attribute, val):
+ old_val = self.gp_db.gpostore.get(attribute)
+ log.info('%s was changed from %s to %s' % (attribute, old_val, val))
+ if val is not None:
+ self.gp_db.gpostore.store(attribute, get_string(val))
+ self.gp_db.store(str(self), attribute, get_string(old_val)
+ if old_val else None)
+ else:
+ self.gp_db.gpostore.delete(attribute)
+ self.gp_db.delete(str(self), attribute)
+
+ def mapper(self):
+ return {'kdc:user_ticket_lifetime': lambda val: val,
+ 'kdc:service_ticket_lifetime': mins_to_hours,
+ 'kdc:renewal_lifetime': days_to_hours,
+ }
+
+ def __str__(self):
+ return 'Kerberos Policy'
+
+ def rsop(self, gpo):
+ output = {}
+ if self.lp.get('server role') != 'active directory domain controller':
+ return output
+ inf_file = 'MACHINE/Microsoft/Windows NT/SecEdit/GptTmpl.inf'
+ if gpo.file_sys_path:
+ path = os.path.join(gpo.file_sys_path, inf_file)
+ inf_conf = self.parse(path)
+ if not inf_conf:
+ return output
+ if str(self) in inf_conf.sections():
+ section = str(self)
+ output[section] = {k: v for k, v in inf_conf.items(section)
+ if gp_krb_ext.apply_map.get(k)}
+ return output
+
+
+class gp_access_ext(gp_inf_ext):
+ """This class takes the .inf file parameter (essentially a GPO file mapped
+ to a GUID), hashmaps it to the Samba parameter, which then uses an ldb
+ object to update the parameter to Samba4. Not registry oriented whatsoever.
+ """
+
+ def load_ldb(self):
+ try:
+ self.ldb = SamDB(self.lp.samdb_url(),
+ session_info=system_session(),
+ credentials=self.creds,
+ lp=self.lp)
+ except (NameError, LdbError):
+ raise Exception('Failed to load SamDB for assigning Group Policy')
+
+ apply_map = { 'MinimumPasswordAge': 'minPwdAge',
+ 'MaximumPasswordAge': 'maxPwdAge',
+ 'MinimumPasswordLength': 'minPwdLength',
+ 'PasswordComplexity': 'pwdProperties' }
+ def process_group_policy(self, deleted_gpo_list, changed_gpo_list):
+ if self.lp.get('server role') != 'active directory domain controller':
+ return
+ self.load_ldb()
+ inf_file = 'MACHINE/Microsoft/Windows NT/SecEdit/GptTmpl.inf'
+ for guid, settings in deleted_gpo_list:
+ self.gp_db.set_guid(guid)
+ for section in settings.keys():
+ if section == str(self):
+ for att, value in settings[section].items():
+ update_samba, _ = self.mapper().get(att)
+ update_samba(att, value)
+ self.gp_db.delete(section, att)
+ self.gp_db.commit()
+
+ for gpo in changed_gpo_list:
+ if gpo.file_sys_path:
+ self.gp_db.set_guid(gpo.name)
+ path = os.path.join(gpo.file_sys_path, inf_file)
+ inf_conf = self.parse(path)
+ if not inf_conf:
+ continue
+ for section in inf_conf.sections():
+ if section == str(self):
+ for key, value in inf_conf.items(section):
+ if key not in gp_access_ext.apply_map:
+ continue
+ att = gp_access_ext.apply_map[key]
+ (update_samba, value_func) = self.mapper().get(att)
+ update_samba(att, value_func(value))
+ self.gp_db.commit()
+
+ def ch_minPwdAge(self, attribute, val):
+ old_val = self.ldb.get_minPwdAge()
+ log.info('KDC Minimum Password age was changed from %s to %s'
+ % (old_val, val))
+ self.gp_db.store(str(self), attribute, str(old_val))
+ self.ldb.set_minPwdAge(val)
+
+ def ch_maxPwdAge(self, attribute, val):
+ old_val = self.ldb.get_maxPwdAge()
+ log.info('KDC Maximum Password age was changed from %s to %s'
+ % (old_val, val))
+ self.gp_db.store(str(self), attribute, str(old_val))
+ self.ldb.set_maxPwdAge(val)
+
+ def ch_minPwdLength(self, attribute, val):
+ old_val = self.ldb.get_minPwdLength()
+ log.info('KDC Minimum Password length was changed from %s to %s'
+ % (old_val, val))
+ self.gp_db.store(str(self), attribute, str(old_val))
+ self.ldb.set_minPwdLength(val)
+
+ def ch_pwdProperties(self, attribute, val):
+ old_val = self.ldb.get_pwdProperties()
+ log.info('KDC Password Properties were changed from %s to %s'
+ % (old_val, val))
+ self.gp_db.store(str(self), attribute, str(old_val))
+ self.ldb.set_pwdProperties(val)
+
+ def mapper(self):
+ """ldap value : samba setter"""
+ return {"minPwdAge": (self.ch_minPwdAge, days2rel_nttime),
+ "maxPwdAge": (self.ch_maxPwdAge, days2rel_nttime),
+ # Could be none, but I like the method assignment in
+ # update_samba
+ "minPwdLength": (self.ch_minPwdLength, lambda val: val),
+ "pwdProperties": (self.ch_pwdProperties, lambda val: val),
+
+ }
+
+ def __str__(self):
+ return 'System Access'
+
+ def rsop(self, gpo):
+ output = {}
+ if self.lp.get('server role') != 'active directory domain controller':
+ return output
+ inf_file = 'MACHINE/Microsoft/Windows NT/SecEdit/GptTmpl.inf'
+ if gpo.file_sys_path:
+ path = os.path.join(gpo.file_sys_path, inf_file)
+ inf_conf = self.parse(path)
+ if not inf_conf:
+ return output
+ if str(self) in inf_conf.sections():
+ section = str(self)
+ output[section] = {k: v for k, v in inf_conf.items(section)
+ if gp_access_ext.apply_map.get(k)}
+ return output
diff --git a/python/samba/gp/gp_smb_conf_ext.py b/python/samba/gp/gp_smb_conf_ext.py
new file mode 100644
index 0000000..3ef9cfd
--- /dev/null
+++ b/python/samba/gp/gp_smb_conf_ext.py
@@ -0,0 +1,127 @@
+# gp_smb_conf_ext smb.conf gpo policy
+# Copyright (C) David Mulder <dmulder@suse.com> 2018
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os, numbers
+from samba.gp.gpclass import gp_pol_ext, gp_misc_applier
+from tempfile import NamedTemporaryFile
+from samba.gp.util.logging import log
+
+def is_number(x):
+ return isinstance(x, numbers.Number) and \
+ type(x) != bool
+
+class gp_smb_conf_ext(gp_pol_ext, gp_misc_applier):
+ def unapply(self, guid, attribute, val):
+ current = self.lp.get(attribute)
+ data = self.parse_value(val)
+
+ # Only overwrite the smb.conf setting if it hasn't been modified. It
+ # may have been modified by another GPO.
+ if 'new_val' not in data or \
+ self.lptype_to_string(current) == data['new_val']:
+ self.lp.set(attribute, self.regtype_to_lptype(data['old_val'],
+ current))
+ self.store_lp_smb_conf(self.lp)
+ log.info('smb.conf [global] was changed',
+ { attribute : str(data['old_val']) })
+
+ self.cache_remove_attribute(guid, attribute)
+
+ def apply(self, guid, attribute, val):
+ old_val = self.lp.get(attribute)
+ val = self.regtype_to_lptype(val, old_val)
+
+ self.lp.set(attribute, val)
+ self.store_lp_smb_conf(self.lp)
+ log.info('smb.conf [global] was changed', { attribute : str(val) })
+
+ data = self.generate_value(old_val=self.lptype_to_string(old_val),
+ new_val=self.lptype_to_string(val))
+ self.cache_add_attribute(guid, attribute, data)
+
+ def process_group_policy(self, deleted_gpo_list, changed_gpo_list):
+ pol_file = 'MACHINE/Registry.pol'
+ for guid, settings in deleted_gpo_list:
+ smb_conf = settings.get('smb.conf')
+ if smb_conf is None:
+ continue
+ for key, value in smb_conf.items():
+ self.unapply(guid, key, value)
+
+ for gpo in changed_gpo_list:
+ if gpo.file_sys_path:
+ section_name = 'Software\\Policies\\Samba\\smb_conf'
+ path = os.path.join(gpo.file_sys_path, pol_file)
+ pol_conf = self.parse(path)
+ if not pol_conf:
+ continue
+ attrs = []
+ for e in pol_conf.entries:
+ if not e.keyname.startswith(section_name):
+ continue
+ attrs.append(e.valuename)
+ self.apply(gpo.name, e.valuename, e.data)
+
+ # Cleanup settings which were removed from the policy
+ self.clean(gpo.name, keep=attrs)
+
+ def regtype_to_lptype(self, val, old_val):
+ if type(val) == bytes:
+ val = val.decode()
+ if is_number(val) and is_number(old_val):
+ val = str(val)
+ elif is_number(val) and type(old_val) == bool:
+ val = bool(val)
+ if type(val) == bool:
+ val = 'yes' if val else 'no'
+ return val
+
+ def store_lp_smb_conf(self, lp):
+ with NamedTemporaryFile(delete=False,
+ dir=os.path.dirname(lp.configfile)) as f:
+ lp.dump(False, f.name)
+ mode = os.stat(lp.configfile).st_mode
+ os.chmod(f.name, mode)
+ os.rename(f.name, lp.configfile)
+
+ def lptype_to_string(self, val):
+ if is_number(val):
+ val = str(val)
+ elif type(val) == bool:
+ val = 'yes' if val else 'no'
+ elif type(val) == list:
+ val = ' '.join(val)
+ return val
+
+ def __str__(self):
+ return "smb.conf"
+
+ def rsop(self, gpo):
+ output = {}
+ if gpo.file_sys_path:
+ section_name = 'Software\\Policies\\Samba\\smb_conf'
+ pol_file = 'MACHINE/Registry.pol'
+ path = os.path.join(gpo.file_sys_path, pol_file)
+ pol_conf = self.parse(path)
+ if not pol_conf:
+ return output
+ for e in pol_conf.entries:
+ if not e.keyname.startswith(section_name):
+ continue
+ if 'smb.conf' not in output.keys():
+ output['smb.conf'] = {}
+ output['smb.conf'][e.valuename] = e.data
+ return output
diff --git a/python/samba/gp/gp_sudoers_ext.py b/python/samba/gp/gp_sudoers_ext.py
new file mode 100644
index 0000000..026aeba
--- /dev/null
+++ b/python/samba/gp/gp_sudoers_ext.py
@@ -0,0 +1,116 @@
+# gp_sudoers_ext samba gpo policy
+# Copyright (C) David Mulder <dmulder@suse.com> 2020
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+from samba.gp.gpclass import gp_pol_ext, gp_file_applier
+from tempfile import NamedTemporaryFile
+from subprocess import Popen, PIPE
+from samba.gp.util.logging import log
+
+def find_executable(executable, path):
+ paths = path.split(os.pathsep)
+ for p in paths:
+ f = os.path.join(p, executable)
+ if os.path.isfile(f):
+ return f
+ return None
+
+intro = '''
+### autogenerated by samba
+#
+# This file is generated by the gp_sudoers_ext Group Policy
+# Client Side Extension. To modify the contents of this file,
+# modify the appropriate Group Policy objects which apply
+# to this machine. DO NOT MODIFY THIS FILE DIRECTLY.
+#
+
+'''
+visudo = find_executable('visudo',
+ path='%s:%s' % (os.environ['PATH'], '/usr/sbin'))
+
+def sudo_applier_func(sudo_dir, sudo_entries):
+ ret = []
+ for p in sudo_entries:
+ contents = intro
+ contents += '%s\n' % p
+ with NamedTemporaryFile() as f:
+ with open(f.name, 'w') as w:
+ w.write(contents)
+ if visudo is None:
+ raise FileNotFoundError('visudo not found, please install it')
+ with Popen([visudo, '-c', '-f', f.name],
+ stdout=PIPE, stderr=PIPE) as proc:
+ sudo_validation = proc.wait()
+ if sudo_validation == 0:
+ with NamedTemporaryFile(prefix='gp_',
+ delete=False,
+ dir=sudo_dir) as f:
+ with open(f.name, 'w') as w:
+ w.write(contents)
+ ret.append(f.name)
+ else:
+ log.error('Sudoers apply failed', p)
+ return ret
+
+class gp_sudoers_ext(gp_pol_ext, gp_file_applier):
+ def __str__(self):
+ return 'Unix Settings/Sudo Rights'
+
+ def process_group_policy(self, deleted_gpo_list, changed_gpo_list,
+ sdir='/etc/sudoers.d'):
+ for guid, settings in deleted_gpo_list:
+ if str(self) in settings:
+ for attribute, sudoers in settings[str(self)].items():
+ self.unapply(guid, attribute, sudoers)
+
+ for gpo in changed_gpo_list:
+ if gpo.file_sys_path:
+ section = 'Software\\Policies\\Samba\\Unix Settings\\Sudo Rights'
+ pol_file = 'MACHINE/Registry.pol'
+ path = os.path.join(gpo.file_sys_path, pol_file)
+ pol_conf = self.parse(path)
+ if not pol_conf:
+ continue
+ sudo_entries = []
+ for e in pol_conf.entries:
+ if e.keyname == section and e.data.strip():
+ sudo_entries.append(e.data)
+ # Each GPO applies only one set of sudoers, in a
+ # set of files, so the attribute does not need uniqueness.
+ attribute = self.generate_attribute(gpo.name)
+ # The value hash is generated from the sudo_entries, ensuring
+ # any changes to this GPO will cause the files to be rewritten.
+ value_hash = self.generate_value_hash(*sudo_entries)
+ self.apply(gpo.name, attribute, value_hash, sudo_applier_func,
+ sdir, sudo_entries)
+ # Cleanup any old entries that are no longer part of the policy
+ self.clean(gpo.name, keep=[attribute])
+
+ def rsop(self, gpo):
+ output = {}
+ pol_file = 'MACHINE/Registry.pol'
+ if gpo.file_sys_path:
+ path = os.path.join(gpo.file_sys_path, pol_file)
+ pol_conf = self.parse(path)
+ if not pol_conf:
+ return output
+ for e in pol_conf.entries:
+ key = e.keyname.split('\\')[-1]
+ if key.endswith('Sudo Rights') and e.data.strip():
+ if key not in output.keys():
+ output[key] = []
+ output[key].append(e.data)
+ return output
diff --git a/python/samba/gp/gpclass.py b/python/samba/gp/gpclass.py
new file mode 100644
index 0000000..08be472
--- /dev/null
+++ b/python/samba/gp/gpclass.py
@@ -0,0 +1,1312 @@
+# Reads important GPO parameters and updates Samba
+# Copyright (C) Luke Morrison <luc785@.hotmail.com> 2013
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+import sys
+import os, shutil
+import errno
+import tdb
+import pwd
+sys.path.insert(0, "bin/python")
+from samba import WERRORError
+from configparser import ConfigParser
+from io import StringIO
+import traceback
+from samba.common import get_bytes
+from abc import ABCMeta, abstractmethod
+import xml.etree.ElementTree as etree
+import re
+from samba.net import Net
+from samba.dcerpc import nbt
+from samba.samba3 import libsmb_samba_internal as libsmb
+import samba.gpo as gpo
+from uuid import UUID
+from tempfile import NamedTemporaryFile
+from samba.dcerpc import preg
+from samba.ndr import ndr_unpack
+from samba.credentials import SMB_SIGNING_REQUIRED
+from samba.gp.util.logging import log
+from hashlib import blake2b
+import numbers
+from samba.common import get_string
+from samba.samdb import SamDB
+from samba.auth import system_session
+import ldb
+from samba.dsdb import UF_WORKSTATION_TRUST_ACCOUNT, UF_SERVER_TRUST_ACCOUNT, GPLINK_OPT_ENFORCE, GPLINK_OPT_DISABLE, GPO_BLOCK_INHERITANCE
+from samba.auth import AUTH_SESSION_INFO_DEFAULT_GROUPS, AUTH_SESSION_INFO_AUTHENTICATED, AUTH_SESSION_INFO_SIMPLE_PRIVILEGES
+from samba.dcerpc import security
+import samba.security
+from samba.dcerpc import nbt
+from datetime import datetime
+
+
+try:
+ from enum import Enum
+ GPOSTATE = Enum('GPOSTATE', 'APPLY ENFORCE UNAPPLY')
+except ImportError:
+ class GPOSTATE:
+ APPLY = 1
+ ENFORCE = 2
+ UNAPPLY = 3
+
+
+class gp_log:
+ """ Log settings overwritten by gpo apply
+ The gp_log is an xml file that stores a history of gpo changes (and the
+ original setting value).
+
+ The log is organized like so:
+
+<gp>
+ <user name="KDC-1$">
+ <applylog>
+ <guid count="0" value="{31B2F340-016D-11D2-945F-00C04FB984F9}" />
+ </applylog>
+ <guid value="{31B2F340-016D-11D2-945F-00C04FB984F9}">
+ <gp_ext name="System Access">
+ <attribute name="minPwdAge">-864000000000</attribute>
+ <attribute name="maxPwdAge">-36288000000000</attribute>
+ <attribute name="minPwdLength">7</attribute>
+ <attribute name="pwdProperties">1</attribute>
+ </gp_ext>
+ <gp_ext name="Kerberos Policy">
+ <attribute name="ticket_lifetime">1d</attribute>
+ <attribute name="renew_lifetime" />
+ <attribute name="clockskew">300</attribute>
+ </gp_ext>
+ </guid>
+ </user>
+</gp>
+
+ Each guid value contains a list of extensions, which contain a list of
+ attributes. The guid value represents a GPO. The attributes are the values
+ of those settings prior to the application of the GPO.
+ The list of guids is enclosed within a user name, which represents the user
+ the settings were applied to. This user may be the samaccountname of the
+ local computer, which implies that these are machine policies.
+ The applylog keeps track of the order in which the GPOs were applied, so
+ that they can be rolled back in reverse, returning the machine to the state
+ prior to policy application.
+ """
+ def __init__(self, user, gpostore, db_log=None):
+ """ Initialize the gp_log
+ param user - the username (or machine name) that policies are
+ being applied to
+ param gpostore - the GPOStorage obj which references the tdb which
+ contains gp_logs
+ param db_log - (optional) a string to initialize the gp_log
+ """
+ self._state = GPOSTATE.APPLY
+ self.gpostore = gpostore
+ self.username = user
+ if db_log:
+ self.gpdb = etree.fromstring(db_log)
+ else:
+ self.gpdb = etree.Element('gp')
+ self.user = user
+ user_obj = self.gpdb.find('user[@name="%s"]' % user)
+ if user_obj is None:
+ user_obj = etree.SubElement(self.gpdb, 'user')
+ user_obj.attrib['name'] = user
+
+ def state(self, value):
+ """ Policy application state
+ param value - APPLY, ENFORCE, or UNAPPLY
+
+ The behavior of the gp_log depends on whether we are applying policy,
+ enforcing policy, or unapplying policy. During an apply, old settings
+ are recorded in the log. During an enforce, settings are being applied
+ but the gp_log does not change. During an unapply, additions to the log
+ should be ignored (since function calls to apply settings are actually
+ reverting policy), but removals from the log are allowed.
+ """
+ # If we're enforcing, but we've unapplied, apply instead
+ if value == GPOSTATE.ENFORCE:
+ user_obj = self.gpdb.find('user[@name="%s"]' % self.user)
+ apply_log = user_obj.find('applylog')
+ if apply_log is None or len(apply_log) == 0:
+ self._state = GPOSTATE.APPLY
+ else:
+ self._state = value
+ else:
+ self._state = value
+
+ def get_state(self):
+ """Check the GPOSTATE
+ """
+ return self._state
+
+ def set_guid(self, guid):
+ """ Log to a different GPO guid
+ param guid - guid value of the GPO from which we're applying
+ policy
+ """
+ self.guid = guid
+ user_obj = self.gpdb.find('user[@name="%s"]' % self.user)
+ obj = user_obj.find('guid[@value="%s"]' % guid)
+ if obj is None:
+ obj = etree.SubElement(user_obj, 'guid')
+ obj.attrib['value'] = guid
+ if self._state == GPOSTATE.APPLY:
+ apply_log = user_obj.find('applylog')
+ if apply_log is None:
+ apply_log = etree.SubElement(user_obj, 'applylog')
+ prev = apply_log.find('guid[@value="%s"]' % guid)
+ if prev is None:
+ item = etree.SubElement(apply_log, 'guid')
+ item.attrib['count'] = '%d' % (len(apply_log) - 1)
+ item.attrib['value'] = guid
+
+ def store(self, gp_ext_name, attribute, old_val):
+ """ Store an attribute in the gp_log
+ param gp_ext_name - Name of the extension applying policy
+ param attribute - The attribute being modified
+ param old_val - The value of the attribute prior to policy
+ application
+ """
+ if self._state == GPOSTATE.UNAPPLY or self._state == GPOSTATE.ENFORCE:
+ return None
+ user_obj = self.gpdb.find('user[@name="%s"]' % self.user)
+ guid_obj = user_obj.find('guid[@value="%s"]' % self.guid)
+ assert guid_obj is not None, "gpo guid was not set"
+ ext = guid_obj.find('gp_ext[@name="%s"]' % gp_ext_name)
+ if ext is None:
+ ext = etree.SubElement(guid_obj, 'gp_ext')
+ ext.attrib['name'] = gp_ext_name
+ attr = ext.find('attribute[@name="%s"]' % attribute)
+ if attr is None:
+ attr = etree.SubElement(ext, 'attribute')
+ attr.attrib['name'] = attribute
+ attr.text = old_val
+
+ def retrieve(self, gp_ext_name, attribute):
+ """ Retrieve a stored attribute from the gp_log
+ param gp_ext_name - Name of the extension which applied policy
+ param attribute - The attribute being retrieved
+ return - The value of the attribute prior to policy
+ application
+ """
+ user_obj = self.gpdb.find('user[@name="%s"]' % self.user)
+ guid_obj = user_obj.find('guid[@value="%s"]' % self.guid)
+ assert guid_obj is not None, "gpo guid was not set"
+ ext = guid_obj.find('gp_ext[@name="%s"]' % gp_ext_name)
+ if ext is not None:
+ attr = ext.find('attribute[@name="%s"]' % attribute)
+ if attr is not None:
+ return attr.text
+ return None
+
+ def retrieve_all(self, gp_ext_name):
+ """ Retrieve all stored attributes for this user, GPO guid, and CSE
+ param gp_ext_name - Name of the extension which applied policy
+ return - The values of the attributes prior to policy
+ application
+ """
+ user_obj = self.gpdb.find('user[@name="%s"]' % self.user)
+ guid_obj = user_obj.find('guid[@value="%s"]' % self.guid)
+ assert guid_obj is not None, "gpo guid was not set"
+ ext = guid_obj.find('gp_ext[@name="%s"]' % gp_ext_name)
+ if ext is not None:
+ attrs = ext.findall('attribute')
+ return {attr.attrib['name']: attr.text for attr in attrs}
+ return {}
+
+ def get_applied_guids(self):
+ """ Return a list of applied ext guids
+ return - List of guids for gpos that have applied settings
+ to the system.
+ """
+ guids = []
+ user_obj = self.gpdb.find('user[@name="%s"]' % self.user)
+ if user_obj is not None:
+ apply_log = user_obj.find('applylog')
+ if apply_log is not None:
+ guid_objs = apply_log.findall('guid[@count]')
+ guids_by_count = [(g.get('count'), g.get('value'))
+ for g in guid_objs]
+ guids_by_count.sort(reverse=True)
+ guids.extend(guid for count, guid in guids_by_count)
+ return guids
+
+ def get_applied_settings(self, guids):
+ """ Return a list of applied ext guids
+ return - List of tuples containing the guid of a gpo, then
+ a dictionary of policies and their values prior
+ policy application. These are sorted so that the
+ most recently applied settings are removed first.
+ """
+ ret = []
+ user_obj = self.gpdb.find('user[@name="%s"]' % self.user)
+ for guid in guids:
+ guid_settings = user_obj.find('guid[@value="%s"]' % guid)
+ exts = guid_settings.findall('gp_ext')
+ settings = {}
+ for ext in exts:
+ attr_dict = {}
+ attrs = ext.findall('attribute')
+ for attr in attrs:
+ attr_dict[attr.attrib['name']] = attr.text
+ settings[ext.attrib['name']] = attr_dict
+ ret.append((guid, settings))
+ return ret
+
+ def delete(self, gp_ext_name, attribute):
+ """ Remove an attribute from the gp_log
+ param gp_ext_name - name of extension from which to remove the
+ attribute
+ param attribute - attribute to remove
+ """
+ user_obj = self.gpdb.find('user[@name="%s"]' % self.user)
+ guid_obj = user_obj.find('guid[@value="%s"]' % self.guid)
+ assert guid_obj is not None, "gpo guid was not set"
+ ext = guid_obj.find('gp_ext[@name="%s"]' % gp_ext_name)
+ if ext is not None:
+ attr = ext.find('attribute[@name="%s"]' % attribute)
+ if attr is not None:
+ ext.remove(attr)
+ if len(ext) == 0:
+ guid_obj.remove(ext)
+
+ def commit(self):
+ """ Write gp_log changes to disk """
+ self.gpostore.store(self.username, etree.tostring(self.gpdb, 'utf-8'))
+
+
+class GPOStorage:
+ def __init__(self, log_file):
+ if os.path.isfile(log_file):
+ self.log = tdb.open(log_file)
+ else:
+ self.log = tdb.Tdb(log_file, 0, tdb.DEFAULT, os.O_CREAT | os.O_RDWR)
+
+ def start(self):
+ self.log.transaction_start()
+
+ def get_int(self, key):
+ try:
+ return int(self.log.get(get_bytes(key)))
+ except TypeError:
+ return None
+
+ def get(self, key):
+ return self.log.get(get_bytes(key))
+
+ def get_gplog(self, user):
+ return gp_log(user, self, self.log.get(get_bytes(user)))
+
+ def store(self, key, val):
+ self.log.store(get_bytes(key), get_bytes(val))
+
+ def cancel(self):
+ self.log.transaction_cancel()
+
+ def delete(self, key):
+ self.log.delete(get_bytes(key))
+
+ def commit(self):
+ self.log.transaction_commit()
+
+ def __del__(self):
+ self.log.close()
+
+
+class gp_ext(object):
+ __metaclass__ = ABCMeta
+
+ def __init__(self, lp, creds, username, store):
+ self.lp = lp
+ self.creds = creds
+ self.username = username
+ self.gp_db = store.get_gplog(username)
+
+ @abstractmethod
+ def process_group_policy(self, deleted_gpo_list, changed_gpo_list):
+ pass
+
+ @abstractmethod
+ def read(self, policy):
+ pass
+
+ def parse(self, afile):
+ local_path = self.lp.cache_path('gpo_cache')
+ data_file = os.path.join(local_path, check_safe_path(afile).upper())
+ if os.path.exists(data_file):
+ return self.read(data_file)
+ return None
+
+ @abstractmethod
+ def __str__(self):
+ pass
+
+ @abstractmethod
+ def rsop(self, gpo):
+ return {}
+
+
+class gp_inf_ext(gp_ext):
+ def read(self, data_file):
+ with open(data_file, 'rb') as f:
+ policy = f.read()
+ inf_conf = ConfigParser(interpolation=None)
+ inf_conf.optionxform = str
+ try:
+ inf_conf.read_file(StringIO(policy.decode()))
+ except UnicodeDecodeError:
+ inf_conf.read_file(StringIO(policy.decode('utf-16')))
+ return inf_conf
+
+
+class gp_pol_ext(gp_ext):
+ def read(self, data_file):
+ with open(data_file, 'rb') as f:
+ raw = f.read()
+ return ndr_unpack(preg.file, raw)
+
+
+class gp_xml_ext(gp_ext):
+ def read(self, data_file):
+ with open(data_file, 'rb') as f:
+ raw = f.read()
+ try:
+ return etree.fromstring(raw.decode())
+ except UnicodeDecodeError:
+ return etree.fromstring(raw.decode('utf-16'))
+
+
+class gp_applier(object):
+ """Group Policy Applier/Unapplier/Modifier
+ The applier defines functions for monitoring policy application,
+ removal, and modification. It must be a multi-derived class paired
+ with a subclass of gp_ext.
+ """
+ __metaclass__ = ABCMeta
+
+ def cache_add_attribute(self, guid, attribute, value):
+ """Add an attribute and value to the Group Policy cache
+ guid - The GPO guid which applies this policy
+ attribute - The attribute name of the policy being applied
+ value - The value of the policy being applied
+
+ Normally called by the subclass apply() function after applying policy.
+ """
+ self.gp_db.set_guid(guid)
+ self.gp_db.store(str(self), attribute, value)
+ self.gp_db.commit()
+
+ def cache_remove_attribute(self, guid, attribute):
+ """Remove an attribute from the Group Policy cache
+ guid - The GPO guid which applies this policy
+ attribute - The attribute name of the policy being unapplied
+
+ Normally called by the subclass unapply() function when removing old
+ policy.
+ """
+ self.gp_db.set_guid(guid)
+ self.gp_db.delete(str(self), attribute)
+ self.gp_db.commit()
+
+ def cache_get_attribute_value(self, guid, attribute):
+ """Retrieve the value stored in the cache for the given attribute
+ guid - The GPO guid which applies this policy
+ attribute - The attribute name of the policy
+ """
+ self.gp_db.set_guid(guid)
+ return self.gp_db.retrieve(str(self), attribute)
+
+ def cache_get_all_attribute_values(self, guid):
+ """Retrieve all attribute/values currently stored for this gpo+policy
+ guid - The GPO guid which applies this policy
+ """
+ self.gp_db.set_guid(guid)
+ return self.gp_db.retrieve_all(str(self))
+
+ def cache_get_apply_state(self):
+ """Return the current apply state
+ return - APPLY|ENFORCE|UNAPPLY
+ """
+ return self.gp_db.get_state()
+
+ def generate_attribute(self, name, *args):
+ """Generate an attribute name from arbitrary data
+ name - A name to ensure uniqueness
+ args - Any arbitrary set of args, str or bytes
+ return - A blake2b digest of the data, the attribute
+
+ The importance here is the digest of the data makes the attribute
+ reproducible and uniquely identifies it. Hashing the name with
+ the data ensures we don't falsely identify a match which is the same
+ text in a different file. Using this attribute generator is optional.
+ """
+ data = b''.join([get_bytes(arg) for arg in [*args]])
+ return blake2b(get_bytes(name)+data).hexdigest()
+
+ def generate_value_hash(self, *args):
+ """Generate a unique value which identifies value changes
+ args - Any arbitrary set of args, str or bytes
+ return - A blake2b digest of the data, the value represented
+ """
+ data = b''.join([get_bytes(arg) for arg in [*args]])
+ return blake2b(data).hexdigest()
+
+ @abstractmethod
+ def unapply(self, guid, attribute, value):
+ """Group Policy Unapply
+ guid - The GPO guid which applies this policy
+ attribute - The attribute name of the policy being unapplied
+ value - The value of the policy being unapplied
+ """
+ pass
+
+ @abstractmethod
+ def apply(self, guid, attribute, applier_func, *args):
+ """Group Policy Apply
+ guid - The GPO guid which applies this policy
+ attribute - The attribute name of the policy being applied
+ applier_func - An applier function which takes variable args
+ args - The variable arguments to pass to applier_func
+
+ The applier_func function MUST return the value of the policy being
+ applied. It's important that implementations of `apply` check for and
+ first unapply any changed policy. See for example calls to
+ `cache_get_all_attribute_values()` which searches for all policies
+ applied by this GPO for this Client Side Extension (CSE).
+ """
+ pass
+
+ def clean(self, guid, keep=None, remove=None, **kwargs):
+ """Cleanup old removed attributes
+ keep - A list of attributes to keep
+ remove - A single attribute to remove, or a list of attributes to
+ remove
+ kwargs - Additional keyword args required by the subclass unapply
+ function
+
+ This is only necessary for CSEs which provide multiple attributes.
+ """
+ # Clean syntax is, either provide a single remove attribute,
+ # or a list of either removal attributes or keep attributes.
+ if keep is None:
+ keep = []
+ if remove is None:
+ remove = []
+
+ if type(remove) != list:
+ value = self.cache_get_attribute_value(guid, remove)
+ if value is not None:
+ self.unapply(guid, remove, value, **kwargs)
+ else:
+ old_vals = self.cache_get_all_attribute_values(guid)
+ for attribute, value in old_vals.items():
+ if (len(remove) > 0 and attribute in remove) or \
+ (len(keep) > 0 and attribute not in keep):
+ self.unapply(guid, attribute, value, **kwargs)
+
+
+class gp_misc_applier(gp_applier):
+ """Group Policy Miscellaneous Applier/Unapplier/Modifier
+ """
+
+ def generate_value(self, **kwargs):
+ data = etree.Element('data')
+ for k, v in kwargs.items():
+ arg = etree.SubElement(data, k)
+ arg.text = get_string(v)
+ return get_string(etree.tostring(data, 'utf-8'))
+
+ def parse_value(self, value):
+ vals = {}
+ try:
+ data = etree.fromstring(value)
+ except etree.ParseError:
+ # If parsing fails, then it's an old cache value
+ return {'old_val': value}
+ except TypeError:
+ return {}
+ itr = data.iter()
+ next(itr) # Skip the top element
+ for item in itr:
+ vals[item.tag] = item.text
+ return vals
+
+
+class gp_file_applier(gp_applier):
+ """Group Policy File Applier/Unapplier/Modifier
+ Subclass of abstract class gp_applier for monitoring policy applied
+ via a file.
+ """
+
+ def __generate_value(self, value_hash, files, sep):
+ data = [value_hash]
+ data.extend(files)
+ return sep.join(data)
+
+ def __parse_value(self, value, sep):
+ """Parse a value
+ return - A unique HASH, followed by the file list
+ """
+ if value is None:
+ return None, []
+ data = value.split(sep)
+ if '/' in data[0]:
+ # The first element is not a hash, but a filename. This is a
+ # legacy value.
+ return None, data
+ else:
+ return data[0], data[1:] if len(data) > 1 else []
+
+ def unapply(self, guid, attribute, files, sep=':'):
+ # If the value isn't a list of files, parse value from the log
+ if type(files) != list:
+ _, files = self.__parse_value(files, sep)
+ for file in files:
+ if os.path.exists(file):
+ os.unlink(file)
+ self.cache_remove_attribute(guid, attribute)
+
+ def apply(self, guid, attribute, value_hash, applier_func, *args, sep=':'):
+ """
+ applier_func MUST return a list of files created by the applier.
+
+ This applier is for policies which only apply to a single file (with
+ a couple small exceptions). This applier will remove any policy applied
+ by this GPO which doesn't match the new policy.
+ """
+ # If the policy has changed, unapply, then apply new policy
+ old_val = self.cache_get_attribute_value(guid, attribute)
+ # Ignore removal if this policy is applied and hasn't changed
+ old_val_hash, old_val_files = self.__parse_value(old_val, sep)
+ if (old_val_hash != value_hash or
+ self.cache_get_apply_state() == GPOSTATE.ENFORCE) or \
+ not all([os.path.exists(f) for f in old_val_files]):
+ self.unapply(guid, attribute, old_val_files)
+ else:
+ # If policy is already applied, skip application
+ return
+
+ # Apply the policy and log the changes
+ files = applier_func(*args)
+ new_value = self.__generate_value(value_hash, files, sep)
+ self.cache_add_attribute(guid, attribute, new_value)
+
+
+""" Fetch the hostname of a writable DC """
+
+
+def get_dc_hostname(creds, lp):
+ net = Net(creds=creds, lp=lp)
+ cldap_ret = net.finddc(domain=lp.get('realm'), flags=(nbt.NBT_SERVER_LDAP |
+ nbt.NBT_SERVER_DS))
+ return cldap_ret.pdc_dns_name
+
+
+""" Fetch a list of GUIDs for applicable GPOs """
+
+
+def get_gpo(samdb, gpo_dn):
+ g = gpo.GROUP_POLICY_OBJECT()
+ attrs = [
+ "cn",
+ "displayName",
+ "flags",
+ "gPCFileSysPath",
+ "gPCFunctionalityVersion",
+ "gPCMachineExtensionNames",
+ "gPCUserExtensionNames",
+ "gPCWQLFilter",
+ "name",
+ "nTSecurityDescriptor",
+ "versionNumber"
+ ]
+ if gpo_dn.startswith("LDAP://"):
+ gpo_dn = gpo_dn.lstrip("LDAP://")
+
+ sd_flags = (security.SECINFO_OWNER |
+ security.SECINFO_GROUP |
+ security.SECINFO_DACL)
+ try:
+ res = samdb.search(gpo_dn, ldb.SCOPE_BASE, "(objectclass=*)", attrs,
+ controls=['sd_flags:1:%d' % sd_flags])
+ except Exception:
+ log.error('Failed to fetch gpo object with nTSecurityDescriptor')
+ raise
+ if res.count != 1:
+ raise ldb.LdbError(ldb.ERR_NO_SUCH_OBJECT,
+ 'get_gpo: search failed')
+
+ g.ds_path = gpo_dn
+ if 'versionNumber' in res.msgs[0].keys():
+ g.version = int(res.msgs[0]['versionNumber'][0])
+ if 'flags' in res.msgs[0].keys():
+ g.options = int(res.msgs[0]['flags'][0])
+ if 'gPCFileSysPath' in res.msgs[0].keys():
+ g.file_sys_path = res.msgs[0]['gPCFileSysPath'][0].decode()
+ if 'displayName' in res.msgs[0].keys():
+ g.display_name = res.msgs[0]['displayName'][0].decode()
+ if 'name' in res.msgs[0].keys():
+ g.name = res.msgs[0]['name'][0].decode()
+ if 'gPCMachineExtensionNames' in res.msgs[0].keys():
+ g.machine_extensions = str(res.msgs[0]['gPCMachineExtensionNames'][0])
+ if 'gPCUserExtensionNames' in res.msgs[0].keys():
+ g.user_extensions = str(res.msgs[0]['gPCUserExtensionNames'][0])
+ if 'nTSecurityDescriptor' in res.msgs[0].keys():
+ g.set_sec_desc(bytes(res.msgs[0]['nTSecurityDescriptor'][0]))
+ return g
+
+class GP_LINK:
+ def __init__(self, gPLink, gPOptions):
+ self.link_names = []
+ self.link_opts = []
+ self.gpo_parse_gplink(gPLink)
+ self.gp_opts = int(gPOptions)
+
+ def gpo_parse_gplink(self, gPLink):
+ for p in gPLink.decode().split(']'):
+ if not p:
+ continue
+ log.debug('gpo_parse_gplink: processing link')
+ p = p.lstrip('[')
+ link_name, link_opt = p.split(';')
+ log.debug('gpo_parse_gplink: link: {}'.format(link_name))
+ log.debug('gpo_parse_gplink: opt: {}'.format(link_opt))
+ self.link_names.append(link_name)
+ self.link_opts.append(int(link_opt))
+
+ def num_links(self):
+ if len(self.link_names) != len(self.link_opts):
+ raise RuntimeError('Link names and opts mismatch')
+ return len(self.link_names)
+
+def find_samaccount(samdb, samaccountname):
+ attrs = ['dn', 'userAccountControl']
+ res = samdb.search(samdb.get_default_basedn(), ldb.SCOPE_SUBTREE,
+ '(sAMAccountName={})'.format(samaccountname), attrs)
+ if res.count != 1:
+ raise ldb.LdbError(ldb.ERR_NO_SUCH_OBJECT,
+ "Failed to find samAccountName '{}'".format(samaccountname)
+ )
+ uac = int(res.msgs[0]['userAccountControl'][0])
+ dn = res.msgs[0]['dn']
+ log.info('Found dn {} for samaccountname {}'.format(dn, samaccountname))
+ return uac, dn
+
+def get_gpo_link(samdb, link_dn):
+ res = samdb.search(link_dn, ldb.SCOPE_BASE,
+ '(objectclass=*)', ['gPLink', 'gPOptions'])
+ if res.count != 1:
+ raise ldb.LdbError(ldb.ERR_NO_SUCH_OBJECT, 'get_gpo_link: no result')
+ if 'gPLink' not in res.msgs[0]:
+ raise ldb.LdbError(ldb.ERR_NO_SUCH_ATTRIBUTE,
+ "get_gpo_link: no 'gPLink' attribute found for '{}'".format(link_dn)
+ )
+ gPLink = res.msgs[0]['gPLink'][0]
+ gPOptions = 0
+ if 'gPOptions' in res.msgs[0]:
+ gPOptions = res.msgs[0]['gPOptions'][0]
+ else:
+ log.debug("get_gpo_link: no 'gPOptions' attribute found")
+ return GP_LINK(gPLink, gPOptions)
+
+def add_gplink_to_gpo_list(samdb, gpo_list, forced_gpo_list, link_dn, gp_link,
+ link_type, only_add_forced_gpos, token):
+ for i in range(gp_link.num_links()-1, -1, -1):
+ is_forced = (gp_link.link_opts[i] & GPLINK_OPT_ENFORCE) != 0
+ if gp_link.link_opts[i] & GPLINK_OPT_DISABLE:
+ log.debug('skipping disabled GPO')
+ continue
+
+ if only_add_forced_gpos:
+ if not is_forced:
+ log.debug("skipping nonenforced GPO link "
+ "because GPOPTIONS_BLOCK_INHERITANCE "
+ "has been set")
+ continue
+ else:
+ log.debug("adding enforced GPO link although "
+ "the GPOPTIONS_BLOCK_INHERITANCE "
+ "has been set")
+
+ try:
+ new_gpo = get_gpo(samdb, gp_link.link_names[i])
+ except ldb.LdbError as e:
+ (enum, estr) = e.args
+ log.debug("failed to get gpo: %s" % gp_link.link_names[i])
+ if enum == ldb.ERR_NO_SUCH_OBJECT:
+ log.debug("skipping empty gpo: %s" % gp_link.link_names[i])
+ continue
+ return
+ else:
+ try:
+ sec_desc = ndr_unpack(security.descriptor,
+ new_gpo.get_sec_desc_buf())
+ samba.security.access_check(sec_desc, token,
+ security.SEC_STD_READ_CONTROL |
+ security.SEC_ADS_LIST |
+ security.SEC_ADS_READ_PROP)
+ except Exception as e:
+ log.debug("skipping GPO \"%s\" as object "
+ "has no access to it" % new_gpo.display_name)
+ continue
+
+ new_gpo.link = str(link_dn)
+ new_gpo.link_type = link_type
+
+ if is_forced:
+ forced_gpo_list.insert(0, new_gpo)
+ else:
+ gpo_list.insert(0, new_gpo)
+
+ log.debug("add_gplink_to_gpo_list: added GPLINK #%d %s "
+ "to GPO list" % (i, gp_link.link_names[i]))
+
+def merge_with_system_token(token_1):
+ sids = token_1.sids
+ system_token = system_session().security_token
+ sids.extend(system_token.sids)
+ token_1.sids = sids
+ token_1.rights_mask |= system_token.rights_mask
+ token_1.privilege_mask |= system_token.privilege_mask
+ # There are no claims in the system token, so it is safe not to merge the claims
+ return token_1
+
+
+def site_dn_for_machine(samdb, dc_hostname, lp, creds, hostname):
+ # [MS-GPOL] 3.2.5.1.4 Site Search
+
+ # The netr_DsRGetSiteName() needs to run over local rpc, however we do not
+ # have the call implemented in our rpc_server.
+ # What netr_DsRGetSiteName() actually does is an ldap query to get
+ # the sitename, we can do the same.
+
+ # NtVer=(NETLOGON_NT_VERSION_IP|NETLOGON_NT_VERSION_WITH_CLOSEST_SITE|
+ # NETLOGON_NT_VERSION_5EX) [0x20000014]
+ expr = "(&(DnsDomain=%s.)(User=%s)(NtVer=\\14\\00\\00\\20))" % (
+ samdb.domain_dns_name(),
+ hostname)
+ res = samdb.search(
+ base='',
+ scope=ldb.SCOPE_BASE,
+ expression=expr,
+ attrs=["Netlogon"])
+ if res.count != 1:
+ raise RuntimeError('site_dn_for_machine: No result')
+
+ samlogon_response = ndr_unpack(nbt.netlogon_samlogon_response,
+ bytes(res.msgs[0]['Netlogon'][0]))
+ if samlogon_response.ntver not in [nbt.NETLOGON_NT_VERSION_5EX,
+ (nbt.NETLOGON_NT_VERSION_1
+ | nbt.NETLOGON_NT_VERSION_5EX)]:
+ raise RuntimeError('site_dn_for_machine: Invalid NtVer in '
+ + 'netlogon_samlogon_response')
+
+ # We want NETLOGON_NT_VERSION_5EX out of the union!
+ samlogon_response.ntver = nbt.NETLOGON_NT_VERSION_5EX
+ samlogon_response_ex = samlogon_response.data
+
+ client_site = "Default-First-Site-Name"
+ if (samlogon_response_ex.client_site
+ and len(samlogon_response_ex.client_site) > 1):
+ client_site = samlogon_response_ex.client_site
+
+ site_dn = samdb.get_config_basedn()
+ site_dn.add_child("CN=Sites")
+ site_dn.add_child("CN=%s" % (client_site))
+
+ return site_dn
+
+
+
+def get_gpo_list(dc_hostname, creds, lp, username):
+ """Get the full list of GROUP_POLICY_OBJECTs for a given username.
+ Push GPOs to gpo_list so that the traversal order of the list matches
+ the order of application:
+ (L)ocal (S)ite (D)omain (O)rganizational(U)nit
+ For different domains and OUs: parent-to-child.
+ Within same level of domains and OUs: Link order.
+ Since GPOs are pushed to the front of gpo_list, GPOs have to be
+ pushed in the opposite order of application (OUs first, local last,
+ child-to-parent).
+ Forced GPOs are appended in the end since they override all others.
+ """
+ gpo_list = []
+ forced_gpo_list = []
+ url = 'ldap://' + dc_hostname
+ samdb = SamDB(url=url,
+ session_info=system_session(),
+ credentials=creds, lp=lp)
+ # username is DOM\\SAM, but get_gpo_list expects SAM
+ uac, dn = find_samaccount(samdb, username.split('\\')[-1])
+ add_only_forced_gpos = False
+
+ # Fetch the security token
+ session_info_flags = (AUTH_SESSION_INFO_DEFAULT_GROUPS |
+ AUTH_SESSION_INFO_AUTHENTICATED)
+ if url.startswith('ldap'):
+ session_info_flags |= AUTH_SESSION_INFO_SIMPLE_PRIVILEGES
+ session = samba.auth.user_session(samdb, lp_ctx=lp, dn=dn,
+ session_info_flags=session_info_flags)
+ gpo_list_machine = False
+ if uac & UF_WORKSTATION_TRUST_ACCOUNT or uac & UF_SERVER_TRUST_ACCOUNT:
+ gpo_list_machine = True
+ token = merge_with_system_token(session.security_token)
+ else:
+ token = session.security_token
+
+ # (O)rganizational(U)nit
+ parent_dn = dn.parent()
+ while True:
+ if str(parent_dn) == str(samdb.get_default_basedn().parent()):
+ break
+
+ # An account can be a member of more OUs
+ if parent_dn.get_component_name(0) == 'OU':
+ try:
+ log.debug("get_gpo_list: query OU: [%s] for GPOs" % parent_dn)
+ gp_link = get_gpo_link(samdb, parent_dn)
+ except ldb.LdbError as e:
+ (enum, estr) = e.args
+ log.debug(estr)
+ else:
+ add_gplink_to_gpo_list(samdb, gpo_list, forced_gpo_list,
+ parent_dn, gp_link,
+ gpo.GP_LINK_OU,
+ add_only_forced_gpos, token)
+
+ # block inheritance from now on
+ if gp_link.gp_opts & GPO_BLOCK_INHERITANCE:
+ add_only_forced_gpos = True
+
+ parent_dn = parent_dn.parent()
+
+ # (D)omain
+ parent_dn = dn.parent()
+ while True:
+ if str(parent_dn) == str(samdb.get_default_basedn().parent()):
+ break
+
+ # An account can just be a member of one domain
+ if parent_dn.get_component_name(0) == 'DC':
+ try:
+ log.debug("get_gpo_list: query DC: [%s] for GPOs" % parent_dn)
+ gp_link = get_gpo_link(samdb, parent_dn)
+ except ldb.LdbError as e:
+ (enum, estr) = e.args
+ log.debug(estr)
+ else:
+ add_gplink_to_gpo_list(samdb, gpo_list, forced_gpo_list,
+ parent_dn, gp_link,
+ gpo.GP_LINK_DOMAIN,
+ add_only_forced_gpos, token)
+
+ # block inheritance from now on
+ if gp_link.gp_opts & GPO_BLOCK_INHERITANCE:
+ add_only_forced_gpos = True
+
+ parent_dn = parent_dn.parent()
+
+ # (S)ite
+ if gpo_list_machine:
+ try:
+ site_dn = site_dn_for_machine(samdb, dc_hostname, lp, creds, username)
+
+ try:
+ log.debug("get_gpo_list: query SITE: [%s] for GPOs" % site_dn)
+ gp_link = get_gpo_link(samdb, site_dn)
+ except ldb.LdbError as e:
+ (enum, estr) = e.args
+ log.debug(estr)
+ else:
+ add_gplink_to_gpo_list(samdb, gpo_list, forced_gpo_list,
+ site_dn, gp_link,
+ gpo.GP_LINK_SITE,
+ add_only_forced_gpos, token)
+ except ldb.LdbError:
+ # [MS-GPOL] 3.2.5.1.4 Site Search: If the method returns
+ # ERROR_NO_SITENAME, the remainder of this message MUST be skipped
+ # and the protocol sequence MUST continue at GPO Search
+ pass
+
+ # (L)ocal
+ gpo_list.insert(0, gpo.GROUP_POLICY_OBJECT("Local Policy",
+ "Local Policy",
+ gpo.GP_LINK_LOCAL))
+
+ # Append |forced_gpo_list| at the end of |gpo_list|,
+ # so that forced GPOs are applied on top of non enforced GPOs.
+ return gpo_list+forced_gpo_list
+
+
+def cache_gpo_dir(conn, cache, sub_dir):
+ loc_sub_dir = sub_dir.upper()
+ local_dir = os.path.join(cache, loc_sub_dir)
+ try:
+ os.makedirs(local_dir, mode=0o755)
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+ for fdata in conn.list(sub_dir):
+ if fdata['attrib'] & libsmb.FILE_ATTRIBUTE_DIRECTORY:
+ cache_gpo_dir(conn, cache, os.path.join(sub_dir, fdata['name']))
+ else:
+ local_name = fdata['name'].upper()
+ f = NamedTemporaryFile(delete=False, dir=local_dir)
+ fname = os.path.join(sub_dir, fdata['name']).replace('/', '\\')
+ f.write(conn.loadfile(fname))
+ f.close()
+ os.rename(f.name, os.path.join(local_dir, local_name))
+
+
+def check_safe_path(path):
+ dirs = re.split('/|\\\\', path)
+ if 'sysvol' in path.lower():
+ ldirs = re.split('/|\\\\', path.lower())
+ dirs = dirs[ldirs.index('sysvol') + 1:]
+ if '..' not in dirs:
+ return os.path.join(*dirs)
+ raise OSError(path)
+
+
+def check_refresh_gpo_list(dc_hostname, lp, creds, gpos):
+ # Force signing for the connection
+ saved_signing_state = creds.get_smb_signing()
+ creds.set_smb_signing(SMB_SIGNING_REQUIRED)
+ conn = libsmb.Conn(dc_hostname, 'sysvol', lp=lp, creds=creds)
+ # Reset signing state
+ creds.set_smb_signing(saved_signing_state)
+ cache_path = lp.cache_path('gpo_cache')
+ for gpo_obj in gpos:
+ if not gpo_obj.file_sys_path:
+ continue
+ cache_gpo_dir(conn, cache_path, check_safe_path(gpo_obj.file_sys_path))
+
+
+def get_deleted_gpos_list(gp_db, gpos):
+ applied_gpos = gp_db.get_applied_guids()
+ current_guids = set([p.name for p in gpos])
+ deleted_gpos = [guid for guid in applied_gpos if guid not in current_guids]
+ return gp_db.get_applied_settings(deleted_gpos)
+
+def gpo_version(lp, path):
+ # gpo.gpo_get_sysvol_gpt_version() reads the GPT.INI from a local file,
+ # read from the gpo client cache.
+ gpt_path = lp.cache_path(os.path.join('gpo_cache', path))
+ return int(gpo.gpo_get_sysvol_gpt_version(gpt_path)[1])
+
+
+def apply_gp(lp, creds, store, gp_extensions, username, target, force=False):
+ gp_db = store.get_gplog(username)
+ dc_hostname = get_dc_hostname(creds, lp)
+ gpos = get_gpo_list(dc_hostname, creds, lp, username)
+ del_gpos = get_deleted_gpos_list(gp_db, gpos)
+ try:
+ check_refresh_gpo_list(dc_hostname, lp, creds, gpos)
+ except:
+ log.error('Failed downloading gpt cache from \'%s\' using SMB'
+ % dc_hostname)
+ return
+
+ if force:
+ changed_gpos = gpos
+ gp_db.state(GPOSTATE.ENFORCE)
+ else:
+ changed_gpos = []
+ for gpo_obj in gpos:
+ if not gpo_obj.file_sys_path:
+ continue
+ guid = gpo_obj.name
+ path = check_safe_path(gpo_obj.file_sys_path).upper()
+ version = gpo_version(lp, path)
+ if version != store.get_int(guid):
+ log.info('GPO %s has changed' % guid)
+ changed_gpos.append(gpo_obj)
+ gp_db.state(GPOSTATE.APPLY)
+
+ store.start()
+ for ext in gp_extensions:
+ try:
+ ext = ext(lp, creds, username, store)
+ if target == 'Computer':
+ ext.process_group_policy(del_gpos, changed_gpos)
+ else:
+ drop_privileges(username, ext.process_group_policy,
+ del_gpos, changed_gpos)
+ except Exception as e:
+ log.error('Failed to apply extension %s' % str(ext))
+ _, _, tb = sys.exc_info()
+ filename, line_number, _, _ = traceback.extract_tb(tb)[-1]
+ log.error('%s:%d: %s: %s' % (filename, line_number,
+ type(e).__name__, str(e)))
+ continue
+ for gpo_obj in gpos:
+ if not gpo_obj.file_sys_path:
+ continue
+ guid = gpo_obj.name
+ path = check_safe_path(gpo_obj.file_sys_path).upper()
+ version = gpo_version(lp, path)
+ store.store(guid, '%i' % version)
+ store.commit()
+
+
+def unapply_gp(lp, creds, store, gp_extensions, username, target):
+ gp_db = store.get_gplog(username)
+ gp_db.state(GPOSTATE.UNAPPLY)
+ # Treat all applied gpos as deleted
+ del_gpos = gp_db.get_applied_settings(gp_db.get_applied_guids())
+ store.start()
+ for ext in gp_extensions:
+ try:
+ ext = ext(lp, creds, username, store)
+ if target == 'Computer':
+ ext.process_group_policy(del_gpos, [])
+ else:
+ drop_privileges(username, ext.process_group_policy,
+ del_gpos, [])
+ except Exception as e:
+ log.error('Failed to unapply extension %s' % str(ext))
+ log.error('Message was: ' + str(e))
+ continue
+ store.commit()
+
+
+def __rsop_vals(vals, level=4):
+ if type(vals) == dict:
+ ret = [' '*level + '[ %s ] = %s' % (k, __rsop_vals(v, level+2))
+ for k, v in vals.items()]
+ return '\n' + '\n'.join(ret)
+ elif type(vals) == list:
+ ret = [' '*level + '[ %s ]' % __rsop_vals(v, level+2) for v in vals]
+ return '\n' + '\n'.join(ret)
+ else:
+ if isinstance(vals, numbers.Number):
+ return ' '*(level+2) + str(vals)
+ else:
+ return ' '*(level+2) + get_string(vals)
+
+def rsop(lp, creds, store, gp_extensions, username, target):
+ dc_hostname = get_dc_hostname(creds, lp)
+ gpos = get_gpo_list(dc_hostname, creds, lp, username)
+ check_refresh_gpo_list(dc_hostname, lp, creds, gpos)
+
+ print('Resultant Set of Policy')
+ print('%s Policy\n' % target)
+ term_width = shutil.get_terminal_size(fallback=(120, 50))[0]
+ for gpo_obj in gpos:
+ if gpo_obj.display_name.strip() == 'Local Policy':
+ continue # We never apply local policy
+ print('GPO: %s' % gpo_obj.display_name)
+ print('='*term_width)
+ for ext in gp_extensions:
+ ext = ext(lp, creds, username, store)
+ cse_name_m = re.findall(r"'([\w\.]+)'", str(type(ext)))
+ if len(cse_name_m) > 0:
+ cse_name = cse_name_m[-1].split('.')[-1]
+ else:
+ cse_name = ext.__module__.split('.')[-1]
+ print(' CSE: %s' % cse_name)
+ print(' ' + ('-'*int(term_width/2)))
+ for section, settings in ext.rsop(gpo_obj).items():
+ print(' Policy Type: %s' % section)
+ print(' ' + ('-'*int(term_width/2)))
+ print(__rsop_vals(settings).lstrip('\n'))
+ print(' ' + ('-'*int(term_width/2)))
+ print(' ' + ('-'*int(term_width/2)))
+ print('%s\n' % ('='*term_width))
+
+
+def parse_gpext_conf(smb_conf):
+ from samba.samba3 import param as s3param
+ lp = s3param.get_context()
+ if smb_conf is not None:
+ lp.load(smb_conf)
+ else:
+ lp.load_default()
+ ext_conf = lp.state_path('gpext.conf')
+ parser = ConfigParser(interpolation=None)
+ parser.read(ext_conf)
+ return lp, parser
+
+
+def atomic_write_conf(lp, parser):
+ ext_conf = lp.state_path('gpext.conf')
+ with NamedTemporaryFile(mode="w+", delete=False, dir=os.path.dirname(ext_conf)) as f:
+ parser.write(f)
+ os.rename(f.name, ext_conf)
+
+
+def check_guid(guid):
+ # Check for valid guid with curly braces
+ if guid[0] != '{' or guid[-1] != '}' or len(guid) != 38:
+ return False
+ try:
+ UUID(guid, version=4)
+ except ValueError:
+ return False
+ return True
+
+
+def register_gp_extension(guid, name, path,
+ smb_conf=None, machine=True, user=True):
+ # Check that the module exists
+ if not os.path.exists(path):
+ return False
+ if not check_guid(guid):
+ return False
+
+ lp, parser = parse_gpext_conf(smb_conf)
+ if guid not in parser.sections():
+ parser.add_section(guid)
+ parser.set(guid, 'DllName', path)
+ parser.set(guid, 'ProcessGroupPolicy', name)
+ parser.set(guid, 'NoMachinePolicy', "0" if machine else "1")
+ parser.set(guid, 'NoUserPolicy', "0" if user else "1")
+
+ atomic_write_conf(lp, parser)
+
+ return True
+
+
+def list_gp_extensions(smb_conf=None):
+ _, parser = parse_gpext_conf(smb_conf)
+ results = {}
+ for guid in parser.sections():
+ results[guid] = {}
+ results[guid]['DllName'] = parser.get(guid, 'DllName')
+ results[guid]['ProcessGroupPolicy'] = \
+ parser.get(guid, 'ProcessGroupPolicy')
+ results[guid]['MachinePolicy'] = \
+ not int(parser.get(guid, 'NoMachinePolicy'))
+ results[guid]['UserPolicy'] = not int(parser.get(guid, 'NoUserPolicy'))
+ return results
+
+
+def unregister_gp_extension(guid, smb_conf=None):
+ if not check_guid(guid):
+ return False
+
+ lp, parser = parse_gpext_conf(smb_conf)
+ if guid in parser.sections():
+ parser.remove_section(guid)
+
+ atomic_write_conf(lp, parser)
+
+ return True
+
+
+def set_privileges(username, uid, gid):
+ """
+ Set current process privileges
+ """
+
+ os.setegid(gid)
+ os.seteuid(uid)
+
+
+def drop_privileges(username, func, *args):
+ """
+ Run supplied function with privileges for specified username.
+ """
+ current_uid = os.getuid()
+
+ if not current_uid == 0:
+ raise Exception('Not enough permissions to drop privileges')
+
+ user_uid = pwd.getpwnam(username).pw_uid
+ user_gid = pwd.getpwnam(username).pw_gid
+
+ # Drop privileges
+ set_privileges(username, user_uid, user_gid)
+
+ # We need to catch exception in order to be able to restore
+ # privileges later in this function
+ out = None
+ exc = None
+ try:
+ out = func(*args)
+ except Exception as e:
+ exc = e
+
+ # Restore privileges
+ set_privileges('root', current_uid, 0)
+
+ if exc:
+ raise exc
+
+ return out
+
+def expand_pref_variables(text, gpt_path, lp, username=None):
+ utc_dt = datetime.utcnow()
+ dt = datetime.now()
+ cache_path = lp.cache_path(os.path.join('gpo_cache'))
+ # These are all the possible preference variables that MS supports. The
+ # variables set to 'None' here are currently unsupported by Samba, and will
+ # prevent the individual policy from applying.
+ variables = { 'AppDataDir': os.path.expanduser('~/.config'),
+ 'BinaryComputerSid': None,
+ 'BinaryUserSid': None,
+ 'CommonAppdataDir': None,
+ 'CommonDesktopDir': None,
+ 'CommonFavoritesDir': None,
+ 'CommonProgramsDir': None,
+ 'CommonStartUpDir': None,
+ 'ComputerName': lp.get('netbios name'),
+ 'CurrentProccessId': None,
+ 'CurrentThreadId': None,
+ 'DateTime': utc_dt.strftime('%Y-%m-%d %H:%M:%S UTC'),
+ 'DateTimeEx': str(utc_dt),
+ 'DesktopDir': os.path.expanduser('~/Desktop'),
+ 'DomainName': lp.get('realm'),
+ 'FavoritesDir': None,
+ 'GphPath': None,
+ 'GptPath': os.path.join(cache_path,
+ check_safe_path(gpt_path).upper()),
+ 'GroupPolicyVersion': None,
+ 'LastDriveMapped': None,
+ 'LastError': None,
+ 'LastErrorText': None,
+ 'LdapComputerSid': None,
+ 'LdapUserSid': None,
+ 'LocalTime': dt.strftime('%H:%M:%S'),
+ 'LocalTimeEx': dt.strftime('%H:%M:%S.%f'),
+ 'LogonDomain': lp.get('realm'),
+ 'LogonServer': None,
+ 'LogonUser': username,
+ 'LogonUserSid': None,
+ 'MacAddress': None,
+ 'NetPlacesDir': None,
+ 'OsVersion': None,
+ 'ProgramFilesDir': None,
+ 'ProgramsDir': None,
+ 'RecentDocumentsDir': None,
+ 'ResultCode': None,
+ 'ResultText': None,
+ 'ReversedComputerSid': None,
+ 'ReversedUserSid': None,
+ 'SendToDir': None,
+ 'StartMenuDir': None,
+ 'StartUpDir': None,
+ 'SystemDir': None,
+ 'SystemDrive': '/',
+ 'TempDir': '/tmp',
+ 'TimeStamp': str(datetime.timestamp(dt)),
+ 'TraceFile': None,
+ 'WindowsDir': None
+ }
+ for exp_var, val in variables.items():
+ exp_var_fmt = '%%%s%%' % exp_var
+ if exp_var_fmt in text:
+ if val is None:
+ raise NameError('Expansion variable %s is undefined' % exp_var)
+ text = text.replace(exp_var_fmt, val)
+ return text
diff --git a/python/samba/gp/util/logging.py b/python/samba/gp/util/logging.py
new file mode 100644
index 0000000..da085d8
--- /dev/null
+++ b/python/samba/gp/util/logging.py
@@ -0,0 +1,112 @@
+#
+# samba-gpupdate enhanced logging
+#
+# Copyright (C) 2019-2020 BaseALT Ltd.
+# Copyright (C) David Mulder <dmulder@samba.org> 2022
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import datetime
+import logging
+import gettext
+import random
+import sys
+
+logger = logging.getLogger("gp")
+
+
+def logger_init(name, log_level):
+ logger.addHandler(logging.StreamHandler(sys.stdout))
+ logger.setLevel(logging.CRITICAL)
+ if log_level == 1:
+ logger.setLevel(logging.ERROR)
+ elif log_level == 2:
+ logger.setLevel(logging.WARNING)
+ elif log_level == 3:
+ logger.setLevel(logging.INFO)
+ elif log_level >= 4:
+ logger.setLevel(logging.DEBUG)
+
+class slogm(object):
+ """
+ Structured log message class
+ """
+ def __init__(self, message, kwargs=None):
+ if kwargs is None:
+ kwargs = {}
+ self.message = message
+ self.kwargs = kwargs
+ if not isinstance(self.kwargs, dict):
+ self.kwargs = { 'val': self.kwargs }
+
+ def __str__(self):
+ now = str(datetime.datetime.now().isoformat(sep=' ', timespec='milliseconds'))
+ args = dict()
+ args.update(self.kwargs)
+ result = '{}|{} | {}'.format(now, self.message, args)
+
+ return result
+
+def message_with_code(mtype, message):
+ random.seed(message)
+ code = random.randint(0, 99999)
+ return '[' + mtype + str(code).rjust(5, '0') + ']| ' + \
+ gettext.gettext(message)
+
+class log(object):
+ @staticmethod
+ def info(message, data=None):
+ if data is None:
+ data = {}
+ msg = message_with_code('I', message)
+ logger.info(slogm(msg, data))
+ return msg
+
+ @staticmethod
+ def warning(message, data=None):
+ if data is None:
+ data = {}
+ msg = message_with_code('W', message)
+ logger.warning(slogm(msg, data))
+ return msg
+
+ @staticmethod
+ def warn(message, data=None):
+ if data is None:
+ data = {}
+ return log.warning(message, data)
+
+ @staticmethod
+ def error(message, data=None):
+ if data is None:
+ data = {}
+ msg = message_with_code('E', message)
+ logger.error(slogm(msg, data))
+ return msg
+
+ @staticmethod
+ def fatal(message, data=None):
+ if data is None:
+ data = {}
+ msg = message_with_code('F', message)
+ logger.fatal(slogm(msg, data))
+ return msg
+
+ @staticmethod
+ def debug(message, data=None):
+ if data is None:
+ data = {}
+ msg = message_with_code('D', message)
+ logger.debug(slogm(msg, data))
+ return msg
diff --git a/python/samba/gp/vgp_access_ext.py b/python/samba/gp/vgp_access_ext.py
new file mode 100644
index 0000000..7efb3bb
--- /dev/null
+++ b/python/samba/gp/vgp_access_ext.py
@@ -0,0 +1,178 @@
+# vgp_access_ext samba group policy
+# Copyright (C) David Mulder <dmulder@suse.com> 2020
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os, re
+from samba.gp.gpclass import gp_xml_ext, gp_file_applier
+from tempfile import NamedTemporaryFile
+from samba.gp.util.logging import log
+
+intro = '''
+### autogenerated by samba
+#
+# This file is generated by the vgp_access_ext Group Policy
+# Client Side Extension. To modify the contents of this file,
+# modify the appropriate Group Policy objects which apply
+# to this machine. DO NOT MODIFY THIS FILE DIRECTLY.
+#
+
+'''
+
+# The deny all file is implicit any time an allow entry is used
+DENY_BOUND = 9000000000
+DENY_FILE = '_gp_DENY_ALL.conf'
+
+# Each policy MUST create it's own DENY_ALL file if an allow entry exists,
+# otherwise policies will conflict and one could remove a DENY_ALL when another
+# one still requires it.
+def deny_file(access):
+ deny_filename = os.path.join(access,
+ '%d%s' % (select_next_deny(access), DENY_FILE))
+ with NamedTemporaryFile(delete=False, dir=access) as f:
+ with open(f.name, 'w') as w:
+ w.write(intro)
+ w.write('-:ALL:ALL')
+ os.chmod(f.name, 0o644)
+ os.rename(f.name, deny_filename)
+ return deny_filename
+
+def select_next_deny(directory):
+ configs = [re.match(r'(\d+)', f) for f in os.listdir(directory) if DENY_FILE in f]
+ return max([int(m.group(1)) for m in configs if m]+[DENY_BOUND])+1
+
+# Access files in /etc/security/access.d are read in the order of the system
+# locale. Here we number the conf files to ensure they are read in the correct
+# order. Ignore the deny file, since allow entries should always come before
+# the implicit deny ALL.
+def select_next_conf(directory):
+ configs = [re.match(r'(\d+)', f) for f in os.listdir(directory) if DENY_FILE not in f]
+ return max([int(m.group(1)) for m in configs if m]+[0])+1
+
+class vgp_access_ext(gp_xml_ext, gp_file_applier):
+ def __str__(self):
+ return 'VGP/Unix Settings/Host Access'
+
+ def process_group_policy(self, deleted_gpo_list, changed_gpo_list,
+ access='/etc/security/access.d'):
+ for guid, settings in deleted_gpo_list:
+ if str(self) in settings:
+ for attribute, policy_files in settings[str(self)].items():
+ self.unapply(guid, attribute, policy_files)
+
+ for gpo in changed_gpo_list:
+ if gpo.file_sys_path:
+ allow = 'MACHINE/VGP/VTLA/VAS/HostAccessControl/Allow/manifest.xml'
+ path = os.path.join(gpo.file_sys_path, allow)
+ allow_conf = self.parse(path)
+ deny = 'MACHINE/VGP/VTLA/VAS/HostAccessControl/Deny/manifest.xml'
+ path = os.path.join(gpo.file_sys_path, deny)
+ deny_conf = self.parse(path)
+ entries = []
+ policy_files = []
+ winbind_sep = self.lp.get('winbind separator')
+ if allow_conf:
+ policy = allow_conf.find('policysetting')
+ data = policy.find('data')
+ allow_listelements = data.findall('listelement')
+ for listelement in allow_listelements:
+ adobject = listelement.find('adobject')
+ name = adobject.find('name').text
+ domain = adobject.find('domain').text
+ entries.append('+:%s%s%s:ALL' % (domain,
+ winbind_sep,
+ name))
+ if len(allow_listelements) > 0:
+ log.info('Adding an implicit deny ALL because an allow'
+ ' entry is present')
+ policy_files.append(deny_file(access))
+ if deny_conf:
+ policy = deny_conf.find('policysetting')
+ data = policy.find('data')
+ for listelement in data.findall('listelement'):
+ adobject = listelement.find('adobject')
+ name = adobject.find('name').text
+ domain = adobject.find('domain').text
+ entries.append('-:%s%s%s:ALL' % (domain,
+ winbind_sep,
+ name))
+ if len(allow_listelements) > 0:
+ log.warn("Deny entry '%s' is meaningless with "
+ "allow present" % entries[-1])
+ if len(entries) == 0:
+ continue
+ conf_id = select_next_conf(access)
+ access_file = os.path.join(access, '%010d_gp.conf' % conf_id)
+ policy_files.append(access_file)
+ access_contents = '\n'.join(entries)
+ # Each GPO applies only one set of access policies, so the
+ # attribute does not need uniqueness.
+ attribute = self.generate_attribute(gpo.name)
+ # The value hash is generated from the access policy, ensuring
+ # any changes to this GPO will cause the files to be rewritten.
+ value_hash = self.generate_value_hash(access_contents)
+ def applier_func(access, access_file, policy_files):
+ if not os.path.isdir(access):
+ os.mkdir(access, 0o644)
+ with NamedTemporaryFile(delete=False, dir=access) as f:
+ with open(f.name, 'w') as w:
+ w.write(intro)
+ w.write(access_contents)
+ os.chmod(f.name, 0o644)
+ os.rename(f.name, access_file)
+ return policy_files
+ self.apply(gpo.name, attribute, value_hash, applier_func,
+ access, access_file, policy_files)
+ # Cleanup any old entries that are no longer part of the policy
+ self.clean(gpo.name, keep=[attribute])
+
+ def rsop(self, gpo):
+ output = {}
+ if gpo.file_sys_path:
+ allow = 'MACHINE/VGP/VTLA/VAS/HostAccessControl/Allow/manifest.xml'
+ path = os.path.join(gpo.file_sys_path, allow)
+ allow_conf = self.parse(path)
+ deny = 'MACHINE/VGP/VTLA/VAS/HostAccessControl/Deny/manifest.xml'
+ path = os.path.join(gpo.file_sys_path, deny)
+ deny_conf = self.parse(path)
+ entries = []
+ winbind_sep = self.lp.get('winbind separator')
+ if allow_conf:
+ policy = allow_conf.find('policysetting')
+ data = policy.find('data')
+ allow_listelements = data.findall('listelement')
+ for listelement in allow_listelements:
+ adobject = listelement.find('adobject')
+ name = adobject.find('name').text
+ domain = adobject.find('domain').text
+ if str(self) not in output.keys():
+ output[str(self)] = []
+ output[str(self)].append('+:%s%s%s:ALL' % (name,
+ winbind_sep,
+ domain))
+ if len(allow_listelements) > 0:
+ output[str(self)].append('-:ALL:ALL')
+ if deny_conf:
+ policy = deny_conf.find('policysetting')
+ data = policy.find('data')
+ for listelement in data.findall('listelement'):
+ adobject = listelement.find('adobject')
+ name = adobject.find('name').text
+ domain = adobject.find('domain').text
+ if str(self) not in output.keys():
+ output[str(self)] = []
+ output[str(self)].append('-:%s%s%s:ALL' % (name,
+ winbind_sep,
+ domain))
+ return output
diff --git a/python/samba/gp/vgp_files_ext.py b/python/samba/gp/vgp_files_ext.py
new file mode 100644
index 0000000..78bfc28
--- /dev/null
+++ b/python/samba/gp/vgp_files_ext.py
@@ -0,0 +1,140 @@
+# vgp_files_ext samba gpo policy
+# Copyright (C) David Mulder <dmulder@suse.com> 2020
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os, pwd, grp
+from samba.gp.gpclass import gp_xml_ext, check_safe_path, gp_file_applier
+from tempfile import NamedTemporaryFile
+from shutil import copyfile, move
+from samba.gp.util.logging import log
+
+def calc_mode(entry):
+ mode = 0o000
+ for permissions in entry.findall('permissions'):
+ ptype = permissions.get('type')
+ if ptype == 'user':
+ if permissions.find('read') is not None:
+ mode |= 0o400
+ if permissions.find('write') is not None:
+ mode |= 0o200
+ if permissions.find('execute') is not None:
+ mode |= 0o100
+ elif ptype == 'group':
+ if permissions.find('read') is not None:
+ mode |= 0o040
+ if permissions.find('write') is not None:
+ mode |= 0o020
+ if permissions.find('execute') is not None:
+ mode |= 0o010
+ elif ptype == 'other':
+ if permissions.find('read') is not None:
+ mode |= 0o004
+ if permissions.find('write') is not None:
+ mode |= 0o002
+ if permissions.find('execute') is not None:
+ mode |= 0o001
+ return mode
+
+def stat_from_mode(mode):
+ stat = '-'
+ for i in range(6, -1, -3):
+ mask = {0o4: 'r', 0o2: 'w', 0o1: 'x'}
+ for x in mask.keys():
+ if mode & (x << i):
+ stat += mask[x]
+ else:
+ stat += '-'
+ return stat
+
+def source_file_change(fname):
+ if os.path.exists(fname):
+ return b'%d' % os.stat(fname).st_ctime
+
+class vgp_files_ext(gp_xml_ext, gp_file_applier):
+ def __str__(self):
+ return 'VGP/Unix Settings/Files'
+
+ def process_group_policy(self, deleted_gpo_list, changed_gpo_list):
+ for guid, settings in deleted_gpo_list:
+ if str(self) in settings:
+ for attribute, _ in settings[str(self)].items():
+ self.unapply(guid, attribute, attribute)
+
+ for gpo in changed_gpo_list:
+ if gpo.file_sys_path:
+ xml = 'MACHINE/VGP/VTLA/Unix/Files/manifest.xml'
+ path = os.path.join(gpo.file_sys_path, xml)
+ xml_conf = self.parse(path)
+ if not xml_conf:
+ continue
+ policy = xml_conf.find('policysetting')
+ data = policy.find('data')
+ for entry in data.findall('file_properties'):
+ local_path = self.lp.cache_path('gpo_cache')
+ source = entry.find('source').text
+ source_file = os.path.join(local_path,
+ os.path.dirname(check_safe_path(path)).upper(),
+ source.upper())
+ if not os.path.exists(source_file):
+ log.warn('Source file does not exist', source_file)
+ continue
+ target = entry.find('target').text
+ user = entry.find('user').text
+ group = entry.find('group').text
+ mode = calc_mode(entry)
+
+ # The attribute is simply the target file.
+ attribute = target
+ # The value hash is generated from the source file last
+ # change stamp, the user, the group, and the mode, ensuring
+ # any changes to this GPO will cause the file to be
+ # rewritten.
+ value_hash = self.generate_value_hash(
+ source_file_change(source_file),
+ user, group, b'%d' % mode)
+ def applier_func(source_file, target, user, group, mode):
+ with NamedTemporaryFile(dir=os.path.dirname(target),
+ delete=False) as f:
+ copyfile(source_file, f.name)
+ os.chown(f.name, pwd.getpwnam(user).pw_uid,
+ grp.getgrnam(group).gr_gid)
+ os.chmod(f.name, mode)
+ move(f.name, target)
+ return [target]
+ self.apply(gpo.name, attribute, value_hash, applier_func,
+ source_file, target, user, group, mode)
+
+ def rsop(self, gpo):
+ output = {}
+ xml = 'MACHINE/VGP/VTLA/Unix/Files/manifest.xml'
+ if gpo.file_sys_path:
+ path = os.path.join(gpo.file_sys_path, xml)
+ xml_conf = self.parse(path)
+ if not xml_conf:
+ return output
+ policy = xml_conf.find('policysetting')
+ data = policy.find('data')
+ for entry in data.findall('file_properties'):
+ source = entry.find('source').text
+ target = entry.find('target').text
+ user = entry.find('user').text
+ group = entry.find('group').text
+ mode = calc_mode(entry)
+ p = '%s\t%s\t%s\t%s -> %s' % \
+ (stat_from_mode(mode), user, group, target, source)
+ if str(self) not in output.keys():
+ output[str(self)] = []
+ output[str(self)].append(p)
+ return output
diff --git a/python/samba/gp/vgp_issue_ext.py b/python/samba/gp/vgp_issue_ext.py
new file mode 100644
index 0000000..266e92b
--- /dev/null
+++ b/python/samba/gp/vgp_issue_ext.py
@@ -0,0 +1,90 @@
+# vgp_issue_ext samba gpo policy
+# Copyright (C) David Mulder <dmulder@suse.com> 2020
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+from samba.gp.gpclass import gp_xml_ext, gp_misc_applier
+
+class vgp_issue_ext(gp_xml_ext, gp_misc_applier):
+ def unapply(self, guid, issue, attribute, value):
+ if attribute != 'issue':
+ raise ValueError('"%s" is not a message attribute' % attribute)
+ msg = value
+ data = self.parse_value(value)
+ if os.path.exists(issue):
+ with open(issue, 'r') as f:
+ current = f.read()
+ else:
+ current = ''
+ # Only overwrite the msg if it hasn't been modified. It may have been
+ # modified by another GPO.
+ if 'new_val' not in data or current.strip() == data['new_val'].strip():
+ msg = data['old_val']
+ with open(issue, 'w') as w:
+ if msg:
+ w.write(msg)
+ else:
+ w.truncate()
+ self.cache_remove_attribute(guid, attribute)
+
+ def apply(self, guid, issue, text):
+ if os.path.exists(issue):
+ with open(issue, 'r') as f:
+ current = f.read()
+ else:
+ current = ''
+ if current != text.text:
+ with open(issue, 'w') as w:
+ w.write(text.text)
+ data = self.generate_value(old_val=current, new_val=text.text)
+ self.cache_add_attribute(guid, 'issue', data)
+
+ def __str__(self):
+ return 'Unix Settings/Issue'
+
+ def process_group_policy(self, deleted_gpo_list, changed_gpo_list,
+ issue='/etc/issue'):
+ for guid, settings in deleted_gpo_list:
+ if str(self) in settings:
+ for attribute, msg in settings[str(self)].items():
+ self.unapply(guid, issue, attribute, msg)
+
+ for gpo in changed_gpo_list:
+ if gpo.file_sys_path:
+ xml = 'MACHINE/VGP/VTLA/Unix/Issue/manifest.xml'
+ path = os.path.join(gpo.file_sys_path, xml)
+ xml_conf = self.parse(path)
+ if not xml_conf:
+ continue
+ policy = xml_conf.find('policysetting')
+ data = policy.find('data')
+ text = data.find('text')
+ self.apply(gpo.name, issue, text)
+
+ def rsop(self, gpo):
+ output = {}
+ if gpo.file_sys_path:
+ xml = 'MACHINE/VGP/VTLA/Unix/Issue/manifest.xml'
+ path = os.path.join(gpo.file_sys_path, xml)
+ xml_conf = self.parse(path)
+ if not xml_conf:
+ return output
+ policy = xml_conf.find('policysetting')
+ data = policy.find('data')
+ filename = data.find('filename')
+ text = data.find('text')
+ mfile = os.path.join('/etc', filename.text)
+ output[mfile] = text.text
+ return output
diff --git a/python/samba/gp/vgp_motd_ext.py b/python/samba/gp/vgp_motd_ext.py
new file mode 100644
index 0000000..845a5c4
--- /dev/null
+++ b/python/samba/gp/vgp_motd_ext.py
@@ -0,0 +1,90 @@
+# vgp_motd_ext samba gpo policy
+# Copyright (C) David Mulder <dmulder@suse.com> 2020
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+from samba.gp.gpclass import gp_xml_ext, gp_misc_applier
+
+class vgp_motd_ext(gp_xml_ext, gp_misc_applier):
+ def unapply(self, guid, motd, attribute, value):
+ if attribute != 'motd':
+ raise ValueError('"%s" is not a message attribute' % attribute)
+ msg = value
+ data = self.parse_value(value)
+ if os.path.exists(motd):
+ with open(motd, 'r') as f:
+ current = f.read()
+ else:
+ current = ''
+ # Only overwrite the msg if it hasn't been modified. It may have been
+ # modified by another GPO.
+ if 'new_val' not in data or current.strip() == data['new_val'].strip():
+ msg = data['old_val']
+ with open(motd, 'w') as w:
+ if msg:
+ w.write(msg)
+ else:
+ w.truncate()
+ self.cache_remove_attribute(guid, attribute)
+
+ def apply(self, guid, motd, text):
+ if os.path.exists(motd):
+ with open(motd, 'r') as f:
+ current = f.read()
+ else:
+ current = ''
+ if current != text.text:
+ with open(motd, 'w') as w:
+ w.write(text.text)
+ data = self.generate_value(old_val=current, new_val=text.text)
+ self.cache_add_attribute(guid, 'motd', data)
+
+ def __str__(self):
+ return 'Unix Settings/Message of the Day'
+
+ def process_group_policy(self, deleted_gpo_list, changed_gpo_list,
+ motd='/etc/motd'):
+ for guid, settings in deleted_gpo_list:
+ if str(self) in settings:
+ for attribute, msg in settings[str(self)].items():
+ self.unapply(guid, motd, attribute, msg)
+
+ for gpo in changed_gpo_list:
+ if gpo.file_sys_path:
+ xml = 'MACHINE/VGP/VTLA/Unix/MOTD/manifest.xml'
+ path = os.path.join(gpo.file_sys_path, xml)
+ xml_conf = self.parse(path)
+ if not xml_conf:
+ continue
+ policy = xml_conf.find('policysetting')
+ data = policy.find('data')
+ text = data.find('text')
+ self.apply(gpo.name, motd, text)
+
+ def rsop(self, gpo):
+ output = {}
+ if gpo.file_sys_path:
+ xml = 'MACHINE/VGP/VTLA/Unix/MOTD/manifest.xml'
+ path = os.path.join(gpo.file_sys_path, xml)
+ xml_conf = self.parse(path)
+ if not xml_conf:
+ return output
+ policy = xml_conf.find('policysetting')
+ data = policy.find('data')
+ filename = data.find('filename')
+ text = data.find('text')
+ mfile = os.path.join('/etc', filename.text)
+ output[mfile] = text.text
+ return output
diff --git a/python/samba/gp/vgp_openssh_ext.py b/python/samba/gp/vgp_openssh_ext.py
new file mode 100644
index 0000000..6e0ab77
--- /dev/null
+++ b/python/samba/gp/vgp_openssh_ext.py
@@ -0,0 +1,115 @@
+# vgp_openssh_ext samba group policy
+# Copyright (C) David Mulder <dmulder@suse.com> 2020
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import re
+from io import BytesIO
+from samba.gp.gpclass import gp_xml_ext, gp_file_applier
+from samba.common import get_bytes
+
+intro = b'''
+### autogenerated by samba
+#
+# This file is generated by the vgp_openssh_ext Group Policy
+# Client Side Extension. To modify the contents of this file,
+# modify the appropriate Group Policy objects which apply
+# to this machine. DO NOT MODIFY THIS FILE DIRECTLY.
+#
+
+'''
+
+# For each key value pair in sshd_config, the first obtained value will be
+# used. We must insert config files in reverse, so that the last applied policy
+# takes precedence.
+def select_next_conf(directory):
+ configs = [re.match(r'(\d+)', f) for f in os.listdir(directory)]
+ conf_ids = [int(m.group(1)) for m in configs if m]
+ conf_ids.append(9000000000) # The starting node
+ conf_id = min(conf_ids)-1
+ return os.path.join(directory, '%010d_gp.conf' % conf_id)
+
+class vgp_openssh_ext(gp_xml_ext, gp_file_applier):
+ def __str__(self):
+ return 'VGP/Unix Settings/OpenSSH'
+
+ def process_group_policy(self, deleted_gpo_list, changed_gpo_list,
+ cfg_dir='/etc/ssh/sshd_config.d'):
+ for guid, settings in deleted_gpo_list:
+ if str(self) in settings:
+ for attribute, sshd_config in settings[str(self)].items():
+ self.unapply(guid, attribute, sshd_config)
+
+ for gpo in changed_gpo_list:
+ if gpo.file_sys_path:
+ xml = 'MACHINE/VGP/VTLA/SshCfg/SshD/manifest.xml'
+ path = os.path.join(gpo.file_sys_path, xml)
+ xml_conf = self.parse(path)
+ if not xml_conf:
+ continue
+ policy = xml_conf.find('policysetting')
+ data = policy.find('data')
+ configfile = data.find('configfile')
+ for configsection in configfile.findall('configsection'):
+ if configsection.find('sectionname').text:
+ continue
+ settings = {}
+ for kv in configsection.findall('keyvaluepair'):
+ settings[kv.find('key')] = kv.find('value')
+ raw = BytesIO()
+ for k, v in settings.items():
+ raw.write(b'%s %s\n' %
+ (get_bytes(k.text), get_bytes(v.text)))
+ # Each GPO applies only one set of OpenSSH settings, in a
+ # single file, so the attribute does not need uniqueness.
+ attribute = self.generate_attribute(gpo.name)
+ # The value hash is generated from the raw data we will
+ # write to the OpenSSH settings file, ensuring any changes
+ # to this GPO will cause the file to be rewritten.
+ value_hash = self.generate_value_hash(raw.getvalue())
+ if not os.path.isdir(cfg_dir):
+ os.mkdir(cfg_dir, 0o640)
+ def applier_func(cfg_dir, raw):
+ filename = select_next_conf(cfg_dir)
+ f = open(filename, 'wb')
+ f.write(intro)
+ f.write(raw.getvalue())
+ os.chmod(filename, 0o640)
+ f.close()
+ return [filename]
+ self.apply(gpo.name, attribute, value_hash, applier_func,
+ cfg_dir, raw)
+ raw.close()
+
+ def rsop(self, gpo):
+ output = {}
+ if gpo.file_sys_path:
+ xml = 'MACHINE/VGP/VTLA/SshCfg/SshD/manifest.xml'
+ path = os.path.join(gpo.file_sys_path, xml)
+ xml_conf = self.parse(path)
+ if not xml_conf:
+ return output
+ policy = xml_conf.find('policysetting')
+ data = policy.find('data')
+ configfile = data.find('configfile')
+ for configsection in configfile.findall('configsection'):
+ if configsection.find('sectionname').text:
+ continue
+ for kv in configsection.findall('keyvaluepair'):
+ if str(self) not in output.keys():
+ output[str(self)] = {}
+ output[str(self)][kv.find('key').text] = \
+ kv.find('value').text
+ return output
diff --git a/python/samba/gp/vgp_startup_scripts_ext.py b/python/samba/gp/vgp_startup_scripts_ext.py
new file mode 100644
index 0000000..c0edb16
--- /dev/null
+++ b/python/samba/gp/vgp_startup_scripts_ext.py
@@ -0,0 +1,136 @@
+# vgp_startup_scripts_ext samba gpo policy
+# Copyright (C) David Mulder <dmulder@suse.com> 2021
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+from samba.gp.gpclass import gp_xml_ext, check_safe_path, gp_file_applier
+from tempfile import NamedTemporaryFile
+from samba.common import get_bytes
+from subprocess import Popen
+
+intro = b'''
+### autogenerated by samba
+#
+# This file is generated by the vgp_startup_scripts_ext Group Policy
+# Client Side Extension. To modify the contents of this file,
+# modify the appropriate Group Policy objects which apply
+# to this machine. DO NOT MODIFY THIS FILE DIRECTLY.
+#
+
+'''
+
+class vgp_startup_scripts_ext(gp_xml_ext, gp_file_applier):
+ def __str__(self):
+ return 'VGP/Unix Settings/Startup Scripts'
+
+ def process_group_policy(self, deleted_gpo_list, changed_gpo_list,
+ cdir='/etc/cron.d'):
+ for guid, settings in deleted_gpo_list:
+ if str(self) in settings:
+ for attribute, script in settings[str(self)].items():
+ self.unapply(guid, attribute, script)
+
+ for gpo in changed_gpo_list:
+ if gpo.file_sys_path:
+ xml = 'MACHINE/VGP/VTLA/Unix/Scripts/Startup/manifest.xml'
+ path = os.path.join(gpo.file_sys_path, xml)
+ xml_conf = self.parse(path)
+ if not xml_conf:
+ continue
+ policy = xml_conf.find('policysetting')
+ data = policy.find('data')
+ attributes = []
+ for listelement in data.findall('listelement'):
+ local_path = self.lp.cache_path('gpo_cache')
+ script = listelement.find('script').text
+ script_file = os.path.join(local_path,
+ os.path.dirname(check_safe_path(path)).upper(),
+ script.upper())
+ parameters = listelement.find('parameters')
+ if parameters is not None:
+ parameters = parameters.text
+ else:
+ parameters = ''
+ value_hash = listelement.find('hash').text
+ attribute = self.generate_attribute(script_file,
+ parameters)
+ attributes.append(attribute)
+ run_as = listelement.find('run_as')
+ if run_as is not None:
+ run_as = run_as.text
+ else:
+ run_as = 'root'
+ run_once = listelement.find('run_once') is not None
+ if run_once:
+ def applier_func(script_file, parameters):
+ Popen(['/bin/sh %s %s' % (script_file, parameters)],
+ shell=True).wait()
+ # Run once scripts don't create a file to unapply,
+ # so their is nothing to return.
+ return []
+ self.apply(gpo.name, attribute, value_hash, applier_func,
+ script_file, parameters)
+ else:
+ def applier_func(run_as, script_file, parameters):
+ entry = '@reboot %s %s %s' % (run_as, script_file,
+ parameters)
+ with NamedTemporaryFile(prefix='gp_', dir=cdir,
+ delete=False) as f:
+ f.write(intro)
+ f.write(get_bytes(entry))
+ os.chmod(f.name, 0o700)
+ return [f.name]
+ self.apply(gpo.name, attribute, value_hash, applier_func,
+ run_as, script_file, parameters)
+
+ self.clean(gpo.name, keep=attributes)
+
+ def rsop(self, gpo):
+ output = {}
+ xml = 'MACHINE/VGP/VTLA/Unix/Scripts/Startup/manifest.xml'
+ if gpo.file_sys_path:
+ path = os.path.join(gpo.file_sys_path, xml)
+ xml_conf = self.parse(path)
+ if not xml_conf:
+ return output
+ policy = xml_conf.find('policysetting')
+ data = policy.find('data')
+ for listelement in data.findall('listelement'):
+ local_path = self.lp.cache_path('gpo_cache')
+ script = listelement.find('script').text
+ script_file = os.path.join(local_path,
+ os.path.dirname(check_safe_path(path)).upper(),
+ script.upper())
+ parameters = listelement.find('parameters')
+ if parameters is not None:
+ parameters = parameters.text
+ else:
+ parameters = ''
+ run_as = listelement.find('run_as')
+ if run_as is not None:
+ run_as = run_as.text
+ else:
+ run_as = 'root'
+ run_once = listelement.find('run_once') is not None
+ if run_once:
+ entry = 'Run once as: %s `%s %s`' % (run_as, script_file,
+ parameters)
+ else:
+ entry = '@reboot %s %s %s' % (run_as, script_file,
+ parameters)
+ if str(self) not in output.keys():
+ output[str(self)] = []
+ output[str(self)].append(entry)
+ return output
diff --git a/python/samba/gp/vgp_sudoers_ext.py b/python/samba/gp/vgp_sudoers_ext.py
new file mode 100644
index 0000000..b388d8b
--- /dev/null
+++ b/python/samba/gp/vgp_sudoers_ext.py
@@ -0,0 +1,97 @@
+# vgp_sudoers_ext samba gpo policy
+# Copyright (C) David Mulder <dmulder@suse.com> 2020
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+from samba.gp.gpclass import gp_xml_ext, gp_file_applier
+from samba.gp.gp_sudoers_ext import sudo_applier_func
+
+class vgp_sudoers_ext(gp_xml_ext, gp_file_applier):
+ def __str__(self):
+ return 'VGP/Unix Settings/Sudo Rights'
+
+ def process_group_policy(self, deleted_gpo_list, changed_gpo_list,
+ sdir='/etc/sudoers.d'):
+ for guid, settings in deleted_gpo_list:
+ if str(self) in settings:
+ for attribute, sudoers in settings[str(self)].items():
+ self.unapply(guid, attribute, sudoers)
+
+ for gpo in changed_gpo_list:
+ if gpo.file_sys_path:
+ xml = 'MACHINE/VGP/VTLA/Sudo/SudoersConfiguration/manifest.xml'
+ path = os.path.join(gpo.file_sys_path, xml)
+ xml_conf = self.parse(path)
+ if not xml_conf:
+ continue
+ policy = xml_conf.find('policysetting')
+ data = policy.find('data')
+ sudo_entries = []
+ for entry in data.findall('sudoers_entry'):
+ command = entry.find('command').text
+ user = entry.find('user').text
+ listelements = entry.findall('listelement')
+ principals = []
+ for listelement in listelements:
+ principals.extend(listelement.findall('principal'))
+ if len(principals) > 0:
+ uname = ','.join([u.text if u.attrib['type'] == 'user'
+ else '%s%%' % u.text for u in principals])
+ else:
+ uname = 'ALL'
+ nopassword = entry.find('password') is None
+ np_entry = ' NOPASSWD:' if nopassword else ''
+ p = '%s ALL=(%s)%s %s' % (uname, user, np_entry, command)
+ sudo_entries.append(p)
+ # Each GPO applies only one set of sudoers, in a
+ # set of files, so the attribute does not need uniqueness.
+ attribute = self.generate_attribute(gpo.name)
+ # The value hash is generated from the sudo_entries, ensuring
+ # any changes to this GPO will cause the files to be rewritten.
+ value_hash = self.generate_value_hash(*sudo_entries)
+ self.apply(gpo.name, attribute, value_hash, sudo_applier_func,
+ sdir, sudo_entries)
+ # Cleanup any old entries that are no longer part of the policy
+ self.clean(gpo.name, keep=[attribute])
+
+ def rsop(self, gpo):
+ output = {}
+ xml = 'MACHINE/VGP/VTLA/Sudo/SudoersConfiguration/manifest.xml'
+ if gpo.file_sys_path:
+ path = os.path.join(gpo.file_sys_path, xml)
+ xml_conf = self.parse(path)
+ if not xml_conf:
+ return output
+ policy = xml_conf.find('policysetting')
+ data = policy.find('data')
+ for entry in data.findall('sudoers_entry'):
+ command = entry.find('command').text
+ user = entry.find('user').text
+ listelements = entry.findall('listelement')
+ principals = []
+ for listelement in listelements:
+ principals.extend(listelement.findall('principal'))
+ if len(principals) > 0:
+ uname = ','.join([u.text if u.attrib['type'] == 'user'
+ else '%s%%' % u.text for u in principals])
+ else:
+ uname = 'ALL'
+ nopassword = entry.find('password') is None
+ np_entry = ' NOPASSWD:' if nopassword else ''
+ p = '%s ALL=(%s)%s %s' % (uname, user, np_entry, command)
+ if str(self) not in output.keys():
+ output[str(self)] = []
+ output[str(self)].append(p)
+ return output
diff --git a/python/samba/gp/vgp_symlink_ext.py b/python/samba/gp/vgp_symlink_ext.py
new file mode 100644
index 0000000..4f85264
--- /dev/null
+++ b/python/samba/gp/vgp_symlink_ext.py
@@ -0,0 +1,76 @@
+# vgp_symlink_ext samba gpo policy
+# Copyright (C) David Mulder <dmulder@suse.com> 2020
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+from samba.gp.gpclass import gp_xml_ext, gp_file_applier
+from samba.gp.util.logging import log
+
+class vgp_symlink_ext(gp_xml_ext, gp_file_applier):
+ def __str__(self):
+ return 'VGP/Unix Settings/Symbolic Links'
+
+ def process_group_policy(self, deleted_gpo_list, changed_gpo_list):
+ for guid, settings in deleted_gpo_list:
+ if str(self) in settings:
+ for attribute, symlink in settings[str(self)].items():
+ self.unapply(guid, attribute, symlink)
+
+ for gpo in changed_gpo_list:
+ if gpo.file_sys_path:
+ xml = 'MACHINE/VGP/VTLA/Unix/Symlink/manifest.xml'
+ path = os.path.join(gpo.file_sys_path, xml)
+ xml_conf = self.parse(path)
+ if not xml_conf:
+ continue
+ policy = xml_conf.find('policysetting')
+ data = policy.find('data')
+ for entry in data.findall('file_properties'):
+ source = entry.find('source').text
+ target = entry.find('target').text
+ # We can only create a single instance of the target, so
+ # this becomes our unchanging attribute.
+ attribute = target
+ # The changeable part of our policy is the source (the
+ # thing the target points to), so our value hash is based
+ # on the source.
+ value_hash = self.generate_value_hash(source)
+ def applier_func(source, target):
+ if not os.path.exists(target):
+ os.symlink(source, target)
+ return [target]
+ else:
+ log.warn('Symlink destination exists', target)
+ return []
+ self.apply(gpo.name, attribute, value_hash, applier_func,
+ source, target)
+
+ def rsop(self, gpo):
+ output = {}
+ xml = 'MACHINE/VGP/VTLA/Unix/Symlink/manifest.xml'
+ if gpo.file_sys_path:
+ path = os.path.join(gpo.file_sys_path, xml)
+ xml_conf = self.parse(path)
+ if not xml_conf:
+ return output
+ policy = xml_conf.find('policysetting')
+ data = policy.find('data')
+ for entry in data.findall('file_properties'):
+ source = entry.find('source').text
+ target = entry.find('target').text
+ if str(self) not in output.keys():
+ output[str(self)] = []
+ output[str(self)].append('ln -s %s %s' % (source, target))
+ return output
diff --git a/python/samba/gp_parse/__init__.py b/python/samba/gp_parse/__init__.py
new file mode 100644
index 0000000..d45b9c5
--- /dev/null
+++ b/python/samba/gp_parse/__init__.py
@@ -0,0 +1,185 @@
+# GPO Parser for generic extensions
+#
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2018
+# Written by Garming Sam <garming@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from xml.dom import minidom
+from io import BytesIO
+from xml.etree.ElementTree import ElementTree, fromstring, tostring
+from hashlib import md5
+from samba.common import get_bytes
+
+
+ENTITY_USER_ID = 0
+ENTITY_SDDL_ACL = 1
+ENTITY_NETWORK_PATH = 2
+
+
+class GPNoParserException(Exception):
+ pass
+
+class GPGeneralizeException(Exception):
+ pass
+
+
+def entity_type_to_string(ent_type):
+ type_str = None
+
+ if ent_type == ENTITY_USER_ID:
+ type_str = "USER_ID"
+ elif ent_type == ENTITY_SDDL_ACL:
+ type_str = "SDDL_ACL"
+ elif ent_type == ENTITY_NETWORK_PATH:
+ type_str = "NETWORK_PATH"
+
+ return type_str
+
+
+# [MS-GPIPSEC] (LDAP)
+# [MS-GPDPC] Deployed Printer Connections (LDAP)
+# [MS-GPPREF] Preferences Extension (XML)
+# [MS-GPWL] Wireless/Wired Protocol Extension (LDAP)
+class GPParser(object):
+ encoding = 'utf-16'
+ output_encoding = 'utf-8'
+
+ def parse(self, contents):
+ pass
+
+ def write_xml(self, filename):
+ with open(filename, 'w') as f:
+ f.write('<?xml version="1.0" encoding="utf-8"?><UnknownFile/>')
+
+ def load_xml(self, filename):
+ pass
+
+ def write_binary(self, filename):
+ raise GPNoParserException("This file has no parser available.")
+
+ def write_pretty_xml(self, xml_element, handle):
+ # Add the xml header as well as format it nicely.
+ # ElementTree doesn't have a pretty-print, so use minidom.
+
+ et = ElementTree(xml_element)
+ temporary_bytes = BytesIO()
+ et.write(temporary_bytes, encoding=self.output_encoding,
+ xml_declaration=True)
+ minidom_parsed = minidom.parseString(temporary_bytes.getvalue())
+ handle.write(minidom_parsed.toprettyxml(encoding=self.output_encoding))
+
+ def new_xml_entity(self, name, ent_type):
+ identifier = md5(get_bytes(name)).hexdigest()
+
+ type_str = entity_type_to_string(ent_type)
+
+ if type_str is None:
+ raise GPGeneralizeException("No such entity type")
+
+ # For formatting reasons, align the length of the entities
+ longest = entity_type_to_string(ENTITY_NETWORK_PATH)
+ type_str = type_str.center(len(longest), '_')
+
+ return "&SAMBA__{}__{}__;".format(type_str, identifier)
+
+ def generalize_xml(self, root, out_file, global_entities):
+ entities = []
+
+ # Locate all user_id and all ACLs
+ user_ids = root.findall('.//*[@user_id="TRUE"]')
+ user_ids.sort(key = lambda x: x.tag)
+
+ for elem in user_ids:
+ old_text = elem.text
+ if old_text is None or old_text == '':
+ continue
+
+ if old_text in global_entities:
+ elem.text = global_entities[old_text]
+ entities.append((elem.text, old_text))
+ else:
+ elem.text = self.new_xml_entity(old_text,
+ ENTITY_USER_ID)
+
+ entities.append((elem.text, old_text))
+ global_entities.update([(old_text, elem.text)])
+
+ acls = root.findall('.//*[@acl="TRUE"]')
+ acls.sort(key = lambda x: x.tag)
+
+ for elem in acls:
+ old_text = elem.text
+
+ if old_text is None or old_text == '':
+ continue
+
+ if old_text in global_entities:
+ elem.text = global_entities[old_text]
+ entities.append((elem.text, old_text))
+ else:
+ elem.text = self.new_xml_entity(old_text,
+ ENTITY_SDDL_ACL)
+
+ entities.append((elem.text, old_text))
+ global_entities.update([(old_text, elem.text)])
+
+ share_paths = root.findall('.//*[@network_path="TRUE"]')
+ share_paths.sort(key = lambda x: x.tag)
+
+ for elem in share_paths:
+ old_text = elem.text
+
+ if old_text is None or old_text == '':
+ continue
+
+ stripped = old_text.lstrip('\\')
+ file_server = stripped.split('\\')[0]
+
+ server_index = old_text.find(file_server)
+
+ remaining = old_text[server_index + len(file_server):]
+ old_text = old_text[:server_index] + file_server
+
+ if old_text in global_entities:
+ elem.text = global_entities[old_text] + remaining
+ to_put = global_entities[old_text]
+ entities.append((to_put, old_text))
+ else:
+ to_put = self.new_xml_entity(old_text,
+ ENTITY_NETWORK_PATH)
+ elem.text = to_put + remaining
+
+ entities.append((to_put, old_text))
+ global_entities.update([(old_text, to_put)])
+
+ # Call any file specific customization of entities
+ # (which appear in any subclasses).
+ entities.extend(self.custom_entities(root, global_entities))
+
+ output_xml = tostring(root)
+
+ for ent in entities:
+ entb = get_bytes(ent[0])
+ output_xml = output_xml.replace(entb.replace(b'&', b'&amp;'), entb)
+
+ with open(out_file, 'wb') as f:
+ f.write(output_xml)
+
+ return entities
+
+ def custom_entities(self, root, global_entities):
+ # Override this method to do special entity handling
+ return []
diff --git a/python/samba/gp_parse/gp_aas.py b/python/samba/gp_parse/gp_aas.py
new file mode 100644
index 0000000..7aa19c0
--- /dev/null
+++ b/python/samba/gp_parse/gp_aas.py
@@ -0,0 +1,25 @@
+# GPO Parser for application advertise extensions
+#
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2018
+# Written by Garming Sam <garming@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from samba.gp_parse import GPParser
+
+# [MS-GPSI] Software Installation Protocol (+LDAP)
+class GPAasParser(GPParser):
+ # TODO More work needed to deconstruct format
+ pass
diff --git a/python/samba/gp_parse/gp_csv.py b/python/samba/gp_parse/gp_csv.py
new file mode 100644
index 0000000..ebe9c4b
--- /dev/null
+++ b/python/samba/gp_parse/gp_csv.py
@@ -0,0 +1,102 @@
+# GPO Parser for audit extensions
+#
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2018
+# Written by Garming Sam <garming@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import codecs
+import csv
+import io
+
+from io import BytesIO
+from xml.etree.ElementTree import Element, SubElement
+from samba.gp_parse import GPParser
+# [MS-GPAC] Group Policy Audit Configuration
+class GPAuditCsvParser(GPParser):
+ encoding = 'utf-8'
+ header = None
+ lines = []
+
+ def parse(self, contents):
+ self.lines = []
+ reader = csv.reader(codecs.getreader(self.encoding)(BytesIO(contents)))
+
+ self.header = next(reader)
+ for row in reader:
+ line = {}
+ for i, x in enumerate(row):
+ line[self.header[i]] = x
+
+ self.lines.append(line)
+ # print line
+
+ def write_xml(self, filename):
+ with open(filename, 'wb') as f:
+ root = Element('CsvFile')
+ child = SubElement(root, 'Row')
+ for e in self.header:
+ value = SubElement(child, 'Value')
+ value.text = e
+
+ for line in self.lines:
+ child = SubElement(root, 'Row')
+ for e, title in [(line[x], x) for x in self.header]:
+ value = SubElement(child, 'Value')
+ value.text = e
+
+ # Metadata for generalization
+ if title == 'Policy Target' and e != '':
+ value.attrib['user_id'] = 'TRUE'
+ if (title == 'Setting Value' and e != '' and
+ (line['Subcategory'] == 'RegistryGlobalSacl' or
+ line['Subcategory'] == 'FileGlobalSacl')):
+ value.attrib['acl'] = 'TRUE'
+
+ self.write_pretty_xml(root, f)
+
+
+ # contents = codecs.open(filename, encoding='utf-8').read()
+ # self.load_xml(fromstring(contents))
+
+ def load_xml(self, root):
+ header = True
+ self.lines = []
+
+ for r in root.findall('Row'):
+ if header:
+ header = False
+ self.header = []
+ for v in r.findall('Value'):
+ if not isinstance(v.text, str):
+ v.text = v.text.decode(self.output_encoding)
+ self.header.append(v.text)
+ else:
+ line = {}
+ for i, v in enumerate(r.findall('Value')):
+ line[self.header[i]] = v.text if v.text is not None else ''
+ if not isinstance(self.header[i], str):
+ line[self.header[i]] = line[self.header[i]].decode(self.output_encoding)
+
+ self.lines.append(line)
+
+ def write_binary(self, filename):
+ from io import open
+ with open(filename, 'w', encoding=self.encoding) as f:
+ # In this case "binary" means "utf-8", so we let Python do that.
+ writer = csv.writer(f, quoting=csv.QUOTE_MINIMAL)
+ writer.writerow(self.header)
+ for line in self.lines:
+ writer.writerow([line[x] for x in self.header])
diff --git a/python/samba/gp_parse/gp_inf.py b/python/samba/gp_parse/gp_inf.py
new file mode 100644
index 0000000..51035e6
--- /dev/null
+++ b/python/samba/gp_parse/gp_inf.py
@@ -0,0 +1,378 @@
+# GPO Parser for security extensions
+#
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2018
+# Written by Garming Sam <garming@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import codecs
+import collections
+import re
+
+from abc import ABCMeta, abstractmethod
+from xml.etree.ElementTree import Element, SubElement
+
+from samba.gp_parse import GPParser
+
+# [MS-GPSB] Security Protocol Extension
+class GptTmplInfParser(GPParser):
+ sections = None
+ encoding = 'utf-16'
+ output_encoding = 'utf-16le'
+
+ class AbstractParam:
+ __metaclass__ = ABCMeta
+
+ def __init__(self):
+ self.param_list = []
+
+ @abstractmethod
+ def parse(self, line):
+ pass
+
+ @abstractmethod
+ def write_section(self, header, fp):
+ pass
+
+ @abstractmethod
+ def build_xml(self, xml_parent):
+ pass
+
+ @abstractmethod
+ def from_xml(self, section):
+ pass
+
+ class IniParam(AbstractParam):
+ # param_list = [(Key, Value),]
+
+ def parse(self, line):
+ key, val = line.split('=')
+
+ self.param_list.append((key.strip(),
+ val.strip()))
+
+ # print key.strip(), val.strip()
+
+ def write_section(self, header, fp):
+ if len(self.param_list) == 0:
+ return
+ fp.write(u'[%s]\r\n' % header)
+ for key_out, val_out in self.param_list:
+ fp.write(u'%s = %s\r\n' % (key_out,
+ val_out))
+
+ def build_xml(self, xml_parent):
+ for key_ini, val_ini in self.param_list:
+ child = SubElement(xml_parent, 'Parameter')
+ key = SubElement(child, 'Key')
+ value = SubElement(child, 'Value')
+ key.text = key_ini
+ value.text = val_ini
+
+ def from_xml(self, section):
+ for param in section.findall('Parameter'):
+ key = param.find('Key').text
+ value = param.find('Value').text
+ if value is None:
+ value = ''
+
+ self.param_list.append((key, value))
+
+ class RegParam(AbstractParam):
+ # param_list = [Value, Value, ...]
+ def parse(self, line):
+ # = can occur in a registry key, so don't parse these
+ self.param_list.append(line)
+ # print line
+
+ def write_section(self, header, fp):
+ if len(self.param_list) == 0:
+ return
+ fp.write(u'[%s]\r\n' % header)
+ for param in self.param_list:
+ fp.write(u'%s\r\n' % param)
+
+ def build_xml(self, xml_parent):
+ for val_ini in self.param_list:
+ child = SubElement(xml_parent, 'Parameter')
+ value = SubElement(child, 'Value')
+ value.text = val_ini
+
+ def from_xml(self, section):
+ for param in section.findall('Parameter'):
+ value = param.find('Value').text
+ if value is None:
+ value = ''
+
+ self.param_list.append(value)
+
+ class PrivSIDListParam(AbstractParam):
+ # param_list = [(Key, [SID, SID,..]),
+ def parse(self, line):
+ key, val = line.split('=')
+
+ self.param_list.append((key.strip(),
+ [x.strip() for x in val.split(',')]))
+ # print line
+
+ def write_section(self, header, fp):
+ if len(self.param_list) == 0:
+ return
+ fp.write(u'[%s]\r\n' % header)
+ for key_out, val in self.param_list:
+ val_out = u','.join(val)
+ fp.write(u'%s = %s\r\n' % (key_out, val_out))
+
+ def build_xml(self, xml_parent):
+ for key_ini, sid_list in self.param_list:
+ child = SubElement(xml_parent, 'Parameter')
+ key = SubElement(child, 'Key')
+ key.text = key_ini
+ for val_ini in sid_list:
+ value = SubElement(child, 'Value')
+ value.attrib['user_id'] = 'TRUE'
+ value.text = val_ini
+
+ def from_xml(self, section):
+ for param in section.findall('Parameter'):
+ key = param.find('Key').text
+
+ sid_list = []
+ for val in param.findall('Value'):
+ value = val.text
+ if value is None:
+ value = ''
+
+ sid_list.append(value)
+
+ self.param_list.append((key, sid_list))
+
+ class NameModeACLParam(AbstractParam):
+ # param_list = [[Name, Mode, ACL],]
+ def parse(self, line):
+ parameters = [None, None, None]
+ current_arg = 0
+
+ while line != '':
+ # Read quoted string
+ if line[:1] == '"':
+ line = line[1:]
+ findex = line.find('"')
+ parameters[current_arg] = line[:findex]
+ line = line[findex + 1:]
+ # Skip past delimiter
+ elif line[:1] == ',':
+ line = line[1:]
+ current_arg += 1
+ # Read unquoted string
+ else:
+ findex = line.find(',')
+ parameters[current_arg] = line[:findex]
+ line = line[findex:]
+
+ # print parameters
+ # print line
+ self.param_list.append(parameters)
+
+ def write_section(self, header, fp):
+ if len(self.param_list) == 0:
+ return
+ fp.write(u'[%s]\r\n' % header)
+ for param in self.param_list:
+ fp.write(u'"%s",%s,"%s"\r\n' % tuple(param))
+
+ def build_xml(self, xml_parent):
+ for name_mode_acl in self.param_list:
+ child = SubElement(xml_parent, 'Parameter')
+
+ value = SubElement(child, 'Value')
+ value.text = name_mode_acl[0]
+
+ value = SubElement(child, 'Value')
+ value.text = name_mode_acl[1]
+
+ value = SubElement(child, 'Value')
+ value.attrib['acl'] = 'TRUE'
+ value.text = name_mode_acl[2]
+
+ def from_xml(self, section):
+ for param in section.findall('Parameter'):
+ name_mode_acl = [x.text if x.text else '' for x in param.findall('Value')]
+ self.param_list.append(name_mode_acl)
+
+ class MemberSIDListParam(AbstractParam):
+ # param_list = [([XXXX, Memberof|Members], [SID, SID...]),...]
+ def parse(self, line):
+ key, val = line.split('=')
+
+ key = key.strip()
+
+ self.param_list.append((key.split('__'),
+ [x.strip() for x in val.split(',')]))
+ # print line
+
+ def write_section(self, header, fp):
+ if len(self.param_list) == 0:
+ return
+ fp.write(u'[%s]\r\n' % header)
+
+ for key, val in self.param_list:
+ key_out = u'__'.join(key)
+ val_out = u','.join(val)
+ fp.write(u'%s = %s\r\n' % (key_out, val_out))
+
+ def build_xml(self, xml_parent):
+ for key_ini, sid_list in self.param_list:
+ child = SubElement(xml_parent, 'Parameter')
+ key = SubElement(child, 'Key')
+ key.text = key_ini[0]
+ key.attrib['member_type'] = key_ini[1]
+ key.attrib['user_id'] = 'TRUE'
+
+ for val_ini in sid_list:
+ value = SubElement(child, 'Value')
+ value.attrib['user_id'] = 'TRUE'
+ value.text = val_ini
+
+ def from_xml(self, section):
+ for param in section.findall('Parameter'):
+ key = param.find('Key')
+ member_type = key.attrib['member_type']
+
+ sid_list = []
+ for val in param.findall('Value'):
+ value = val.text
+ if value is None:
+ value = ''
+
+ sid_list.append(value)
+
+ self.param_list.append(([key.text, member_type], sid_list))
+
+ class UnicodeParam(AbstractParam):
+ def parse(self, line):
+ # print line
+ pass
+
+ def write_section(self, header, fp):
+ fp.write(u'[Unicode]\r\nUnicode=yes\r\n')
+
+ def build_xml(self, xml_parent):
+ # We do not bother storing this field
+ pass
+
+ def from_xml(self, section):
+ # We do not bother storing this field
+ pass
+
+ class VersionParam(AbstractParam):
+ def parse(self, line):
+ # print line
+ pass
+
+ def write_section(self, header, fp):
+ out = u'[Version]\r\nsignature="$CHICAGO$"\r\nRevision=1\r\n'
+ fp.write(out)
+
+ def build_xml(self, xml_parent):
+ # We do not bother storing this field
+ pass
+
+ def from_xml(self, section):
+ # We do not bother storing this field
+ pass
+
+ def parse(self, contents):
+ inf_file = contents.decode(self.encoding)
+
+ self.sections = collections.OrderedDict([
+ (u'Unicode', self.UnicodeParam()),
+ (u'Version', self.VersionParam()),
+
+ (u'System Access', self.IniParam()),
+ (u'Kerberos Policy', self.IniParam()),
+ (u'System Log', self.IniParam()),
+ (u'Security Log', self.IniParam()),
+ (u'Application Log', self.IniParam()),
+ (u'Event Audit', self.IniParam()),
+ (u'Registry Values', self.RegParam()),
+ (u'Privilege Rights', self.PrivSIDListParam()),
+ (u'Service General Setting', self.NameModeACLParam()),
+ (u'Registry Keys', self.NameModeACLParam()),
+ (u'File Security', self.NameModeACLParam()),
+ (u'Group Membership', self.MemberSIDListParam()),
+ ])
+
+ current_param_parser = None
+ current_header_name = None
+
+ for line in inf_file.splitlines():
+ match = re.match(r'\[(.*)\]', line)
+ if match:
+ header_name = match.group(1)
+ if header_name in self.sections:
+ current_param_parser = self.sections[header_name]
+ # print current_param_parser
+ continue
+
+ # print 'using', current_param_parser
+ current_param_parser.parse(line)
+
+
+ def write_binary(self, filename):
+ with codecs.open(filename, 'wb+',
+ self.output_encoding) as f:
+ # Write the byte-order mark
+ f.write(u'\ufeff')
+
+ for s in self.sections:
+ self.sections[s].write_section(s, f)
+
+ def write_xml(self, filename):
+ with open(filename, 'wb') as f:
+ root = Element('GptTmplInfFile')
+
+ for sec_inf in self.sections:
+ section = SubElement(root, 'Section')
+ section.attrib['name'] = sec_inf
+
+ self.sections[sec_inf].build_xml(section)
+
+ self.write_pretty_xml(root, f)
+
+ # contents = codecs.open(filename, encoding='utf-8').read()
+ # self.load_xml(fromstring(contents))
+
+ def load_xml(self, root):
+ self.sections = collections.OrderedDict([
+ (u'Unicode', self.UnicodeParam()),
+ (u'Version', self.VersionParam()),
+
+ (u'System Access', self.IniParam()),
+ (u'Kerberos Policy', self.IniParam()),
+ (u'System Log', self.IniParam()),
+ (u'Security Log', self.IniParam()),
+ (u'Application Log', self.IniParam()),
+ (u'Event Audit', self.IniParam()),
+ (u'Registry Values', self.RegParam()),
+ (u'Privilege Rights', self.PrivSIDListParam()),
+ (u'Service General Setting', self.NameModeACLParam()),
+ (u'Registry Keys', self.NameModeACLParam()),
+ (u'File Security', self.NameModeACLParam()),
+ (u'Group Membership', self.MemberSIDListParam()),
+ ])
+
+ for s in root.findall('Section'):
+ self.sections[s.attrib['name']].from_xml(s)
diff --git a/python/samba/gp_parse/gp_ini.py b/python/samba/gp_parse/gp_ini.py
new file mode 100644
index 0000000..e9b7ad2
--- /dev/null
+++ b/python/samba/gp_parse/gp_ini.py
@@ -0,0 +1,228 @@
+# GPO Parser for extensions with ini files
+#
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2018
+# Written by Garming Sam <garming@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import codecs
+import collections
+import re
+
+from xml.etree.ElementTree import Element, SubElement
+from configparser import ConfigParser
+from io import StringIO
+
+from samba.gp_parse import GPParser, ENTITY_USER_ID
+
+# [MS-GPFR] Group Policy Folder Redirection
+# [MS-GPSCR] Scripts Extension
+class GPIniParser(GPParser):
+ ini_conf = None
+
+ def parse(self, contents):
+ # Required dict_type in Python 2.7
+ self.ini_conf = ConfigParser(dict_type=collections.OrderedDict,
+ interpolation=None)
+ self.ini_conf.optionxform = str
+
+ self.ini_conf.read_file(StringIO(contents.decode(self.encoding)))
+
+ def build_xml_parameter(self, section_xml, section, key_ini, val_ini):
+ child = SubElement(section_xml, 'Parameter')
+ key = SubElement(child, 'Key')
+ value = SubElement(child, 'Value')
+ key.text = key_ini
+ value.text = val_ini
+
+ return child
+
+ def load_xml_parameter(self, param_xml, section):
+ key = param_xml.find('Key').text
+ value = param_xml.find('Value').text
+ if value is None:
+ value = ''
+ self.ini_conf.set(section, key, value)
+
+ return (key, value)
+
+ def build_xml_section(self, root_xml, sec_ini):
+ section = SubElement(root_xml, 'Section')
+ section.attrib['name'] = sec_ini
+
+ return section
+
+ def load_xml_section(self, section_xml):
+ section_name = section_xml.attrib['name']
+ self.ini_conf.add_section(section_name)
+
+ return section_name
+
+ def write_xml(self, filename):
+ with open(filename, 'wb') as f:
+ root = Element('IniFile')
+
+ for sec_ini in self.ini_conf.sections():
+ section = self.build_xml_section(root, sec_ini)
+
+ for key_ini, val_ini in self.ini_conf.items(sec_ini, raw=True):
+ self.build_xml_parameter(section, sec_ini, key_ini,
+ val_ini)
+
+ self.write_pretty_xml(root, f)
+
+ # from xml.etree.ElementTree import fromstring
+ # contents = codecs.open(filename, encoding='utf-8').read()
+ # self.load_xml(fromstring(contents))
+
+ def load_xml(self, root):
+ # Required dict_type in Python 2.7
+ self.ini_conf = ConfigParser(dict_type=collections.OrderedDict,
+ interpolation=None)
+ self.ini_conf.optionxform = str
+
+ for s in root.findall('Section'):
+ section_name = self.load_xml_section(s)
+
+ for param in s.findall('Parameter'):
+ self.load_xml_parameter(param, section_name)
+
+ def write_binary(self, filename):
+ with codecs.open(filename, 'wb+', self.encoding) as f:
+ self.ini_conf.write(f)
+
+
+class GPTIniParser(GPIniParser):
+ encoding = 'utf-8'
+
+ def parse(self, contents):
+ try:
+ super().parse(contents)
+ except UnicodeDecodeError:
+ # Required dict_type in Python 2.7
+ self.ini_conf = ConfigParser(dict_type=collections.OrderedDict,
+ interpolation=None)
+ self.ini_conf.optionxform = str
+
+ # Fallback to Latin-1 which RSAT appears to use
+ self.ini_conf.read_file(StringIO(contents.decode('iso-8859-1')))
+
+
+class GPScriptsIniParser(GPIniParser):
+ def build_xml_parameter(self, section_xml, section, key_ini, val_ini):
+ parent_return = super().build_xml_parameter(section_xml, section,
+ key_ini, val_ini)
+
+ cmdline = re.match('\\d+CmdLine$', key_ini)
+ if cmdline is not None:
+ value = parent_return.find('Value')
+ value.attrib['network_path'] = 'TRUE'
+
+ return parent_return
+
+
+class GPFDeploy1IniParser(GPIniParser):
+ def build_xml_parameter(self, section_xml, section, key_ini, val_ini):
+ parent_return = super().build_xml_parameter(section_xml, section,
+ key_ini, val_ini)
+ # Add generalization metadata and parse out SID list
+ if section.lower() == 'folder_redirection':
+ # Process the header section
+ # {GUID} = S-1-1-0;S-1-1-0
+
+ # Remove the un-split SID values
+ key = parent_return.find('Value')
+ parent_return.remove(key)
+
+ sid_list = val_ini.strip().strip(';').split(';')
+
+ for sid in sid_list:
+ value = SubElement(parent_return, 'Value')
+ value.text = sid
+ value.attrib['user_id'] = 'TRUE'
+
+ else:
+ # Process redirection sections
+ # Only FullPath should be a network path
+ if key_ini == 'FullPath':
+ key = parent_return.find('Value')
+ key.attrib['network_path'] = 'TRUE'
+
+ return parent_return
+
+ def load_xml_parameter(self, param_xml, section):
+ # Re-join the SID list before entering ConfigParser
+ if section.lower() == 'folder_redirection':
+ key = param_xml.find('Key').text
+ values = param_xml.findall('Value')
+
+ if len(values) == 1:
+ # There appears to be a convention of a trailing semi-colon
+ # with only one value in the SID list.
+ value = values[0].text + ';'
+ else:
+ value = ';'.join([x.text for x in values])
+
+ self.ini_conf.set(section, key, value)
+
+ return (key, value)
+
+ # Do the normal ini code for other sections
+ return super().load_xml_parameter(param_xml, section)
+
+ def build_xml_section(self, root_xml, sec_ini):
+ section = SubElement(root_xml, 'Section')
+
+ if (sec_ini.lower() != 'folder_redirection' and
+ sec_ini.lower() != 'version'):
+ guid, sid = sec_ini.split('_')
+ section.attrib['fdeploy_GUID'] = guid
+ section.attrib['fdeploy_SID'] = sid
+ else:
+ section.attrib['name'] = sec_ini
+
+ return section
+
+ def load_xml_section(self, section_xml):
+ # Construct the name from GUID + SID if no name exists
+ if 'name' in section_xml.attrib:
+ section_name = section_xml.attrib['name']
+ else:
+ guid = section_xml.attrib['fdeploy_GUID']
+ sid = section_xml.attrib['fdeploy_SID']
+ section_name = guid + '_' + sid
+
+ self.ini_conf.add_section(section_name)
+ return section_name
+
+ def custom_entities(self, root, global_entities):
+ entities = []
+ fdeploy_sids = root.findall('.//Section[@fdeploy_SID]')
+ fdeploy_sids.sort(key = lambda x: x.tag)
+
+ for sid in fdeploy_sids:
+ old_attrib = sid.attrib['fdeploy_SID']
+
+ if old_attrib in global_entities:
+ new_attrib = global_entities[old_attrib]
+ else:
+ new_attrib = self.new_xml_entity(old_attrib, ENTITY_USER_ID)
+ entities.append((new_attrib, old_attrib))
+
+ global_entities.update([(old_attrib, new_attrib)])
+
+ sid.attrib['fdeploy_SID'] = new_attrib
+
+ return entities
diff --git a/python/samba/gp_parse/gp_pol.py b/python/samba/gp_parse/gp_pol.py
new file mode 100644
index 0000000..1d5f348
--- /dev/null
+++ b/python/samba/gp_parse/gp_pol.py
@@ -0,0 +1,151 @@
+# GPO Parser for registry extension
+#
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2018
+# Written by Garming Sam <garming@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import base64
+
+from xml.etree.ElementTree import Element, SubElement
+
+from samba.dcerpc import preg
+from samba.dcerpc import misc
+from samba.ndr import ndr_pack, ndr_unpack
+
+from samba.gp_parse import GPParser
+
+# [MS-GPREG]
+# [MS-GPFAS] Firewall and Advanced Security
+# [MS-GPEF] Encrypting File System
+# [MS-GPNRPT] Name Resolution Table
+class GPPolParser(GPParser):
+ pol_file = None
+
+ reg_type = {
+ misc.REG_NONE: "REG_NONE",
+ misc.REG_SZ: "REG_SZ",
+ misc.REG_DWORD: "REG_DWORD",
+ misc.REG_DWORD_BIG_ENDIAN: "REG_DWORD_BIG_ENDIAN",
+ misc.REG_QWORD: "REG_QWORD",
+ misc.REG_EXPAND_SZ: "REG_EXPAND_SZ",
+ misc.REG_MULTI_SZ: "REG_MULTI_SZ",
+ misc.REG_BINARY: "REG_BINARY"
+ }
+
+ def map_reg_type(self, val):
+ ret = self.reg_type.get(val)
+ if ret is None:
+ return "REG_UNKNOWN"
+ return ret
+
+ def parse(self, contents):
+ self.pol_file = ndr_unpack(preg.file, contents)
+
+ def load_xml(self, root):
+ self.pol_file = preg.file()
+ self.pol_file.header.signature = root.attrib['signature']
+ self.pol_file.header.version = int(root.attrib['version'])
+ self.pol_file.num_entries = int(root.attrib['num_entries'])
+
+ entries = []
+ for e in root.findall('Entry'):
+ entry = preg.entry()
+ entry_type = int(e.attrib['type'])
+
+ entry.type = entry_type
+
+ entry.keyname = e.find('Key').text
+ value_name = e.find('ValueName').text
+ if value_name is None:
+ value_name = ''
+
+ entry.valuename = value_name
+ # entry.size = int(e.attrib['size'])
+
+ if misc.REG_MULTI_SZ == entry_type:
+ values = [x.text for x in e.findall('Value')]
+ if values == [None]:
+ data = u'\x00'
+ else:
+ data = u'\x00'.join(values) + u'\x00\x00'
+ entry.data = data.encode('utf-16le')
+ elif (misc.REG_NONE == entry_type):
+ pass
+ elif (misc.REG_SZ == entry_type or
+ misc.REG_EXPAND_SZ == entry_type):
+ string_val = e.find('Value').text
+ if string_val is None:
+ string_val = ''
+ entry.data = string_val
+ elif (misc.REG_DWORD == entry_type or
+ misc.REG_DWORD_BIG_ENDIAN == entry_type or
+ misc.REG_QWORD == entry_type):
+ entry.data = int(e.find('Value').text)
+ else: # REG UNKNOWN or REG_BINARY
+ entry.data = base64.b64decode(e.find('Value').text)
+
+ entries.append(entry)
+
+ self.pol_file.entries = entries
+ # print self.pol_file.__ndr_print__()
+
+ def write_xml(self, filename):
+ with open(filename, 'wb') as f:
+ root = Element('PolFile')
+ root.attrib['num_entries'] = str(self.pol_file.num_entries)
+ root.attrib['signature'] = self.pol_file.header.signature
+ root.attrib['version'] = str(self.pol_file.header.version)
+ for entry in self.pol_file.entries:
+ child = SubElement(root, 'Entry')
+ # child.attrib['size'] = str(entry.size)
+ child.attrib['type'] = str(entry.type)
+ child.attrib['type_name'] = self.map_reg_type(entry.type)
+ key = SubElement(child, 'Key')
+ key.text = entry.keyname
+ valuename = SubElement(child, 'ValueName')
+ valuename.text = entry.valuename
+ if misc.REG_MULTI_SZ == entry.type:
+ multi = entry.data.decode('utf-16').rstrip(u'\x00').split(u'\x00')
+ # print repr(multi)
+ for m in multi:
+ value = SubElement(child, 'Value')
+ value.text = m
+ # print tostring(value)
+ elif (misc.REG_NONE == entry.type or
+ misc.REG_SZ == entry.type or
+ misc.REG_DWORD == entry.type or
+ misc.REG_DWORD_BIG_ENDIAN == entry.type or
+ misc.REG_QWORD == entry.type or
+ misc.REG_EXPAND_SZ == entry.type):
+ value = SubElement(child, 'Value')
+ value.text = str(entry.data)
+ # print tostring(value)
+ else: # REG UNKNOWN or REG_BINARY
+ value = SubElement(child, 'Value')
+ value.text = base64.b64encode(entry.data).decode('utf8')
+ # print tostring(value)
+
+ # print tostring(root)
+
+ self.write_pretty_xml(root, f)
+
+ # contents = codecs.open(filename, encoding='utf-8').read()
+ # self.load_xml(fromstring(contents))
+
+ def write_binary(self, filename):
+ with open(filename, 'wb') as f:
+ binary_data = ndr_pack(self.pol_file)
+ f.write(binary_data)
diff --git a/python/samba/graph.py b/python/samba/graph.py
new file mode 100644
index 0000000..4c4a07f
--- /dev/null
+++ b/python/samba/graph.py
@@ -0,0 +1,820 @@
+# -*- coding: utf-8 -*-
+# Graph topology utilities and dot file generation
+#
+# Copyright (C) Andrew Bartlett 2018.
+#
+# Written by Douglas Bagnall <douglas.bagnall@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from samba import colour
+import sys
+from itertools import cycle, groupby
+
+FONT_SIZE = 10
+
+
+def reformat_graph_label(s):
+ """Break DNs over multiple lines, for better shaped and arguably more
+ readable nodes. We try to split after commas, and if necessary
+ after hyphens or failing that in arbitrary places."""
+ if len(s) < 12:
+ return s
+
+ s = s.replace(',', ',\n')
+ pieces = []
+ for p in s.split('\n'):
+ while len(p) > 20:
+ if '-' in p[2:20]:
+ q, p = p.split('-', 1)
+ else:
+ n = len(p) // 12
+ b = len(p) // n
+ q, p = p[:b], p[b:]
+ pieces.append(q + '-')
+ if p:
+ pieces.append(p)
+
+ return '\\n'.join(pieces)
+
+
+def quote_graph_label(s, reformat=False):
+ """Escape a string as graphvis requires."""
+ # escaping inside quotes is simple in dot, because only " is escaped.
+ # there is no need to count backslashes in sequences like \\\\"
+ s = s.replace('"', '\"')
+ if reformat:
+ s = reformat_graph_label(s)
+ return "%s" % s
+
+
+def shorten_vertex_names(vertices, suffix=',...', aggressive=False):
+ """Replace the common suffix (in practice, the base DN) of a number of
+ vertices with a short string (default ",..."). If this seems
+ pointless because the replaced string is very short or the results
+ seem strange, the original vertices are retained.
+
+ :param vertices: a sequence of vertices to shorten
+ :param suffix: the replacement string [",..."]
+ :param aggressive: replace certain common non-suffix strings
+
+ :return: tuple of (rename map, replacements)
+
+ The rename map is a dictionary mapping the old vertex names to
+ their shortened versions. If no changes are made, replacements
+ will be empty.
+ """
+ vmap = dict((v, v) for v in vertices)
+ replacements = []
+
+ if len(vmap) > 1:
+ # walk backwards along all the strings until we meet a character
+ # that is not shared by all.
+ i = -1
+ vlist = list(vmap.values())
+ try:
+ while True:
+ c = set(x[i] for x in vlist)
+ if len(c) > 1 or '*' in c:
+ break
+ i -= 1
+ except IndexError:
+ # We have indexed beyond the start of a string, which should
+ # only happen if one node is a strict suffix of all others.
+ return vmap, replacements
+
+ # add one to get to the last unanimous character.
+ i += 1
+
+ # now, we actually really want to split on a comma. So we walk
+ # back to a comma.
+ x = vlist[0]
+ while i < len(x) and x[i] != ',':
+ i += 1
+
+ if i >= -len(suffix):
+ # there is nothing to gain here
+ return vmap, replacements
+
+ replacements.append((suffix, x[i:]))
+
+ for k, v in vmap.items():
+ vmap[k] = v[:i] + suffix
+
+ if aggressive:
+ # Remove known common annoying strings
+ for v in vmap.values():
+ if ',CN=Servers,' not in v:
+ break
+ else:
+ vmap = dict((k, v.replace(',CN=Servers,', ',**,', 1))
+ for k, v in vmap.items())
+ replacements.append(('**', 'CN=Servers'))
+
+ for v in vmap.values():
+ if not v.startswith('CN=NTDS Settings,'):
+ break
+ else:
+ vmap = dict((k, v.replace('CN=NTDS Settings,', '*,', 1))
+ for k, v in vmap.items())
+ replacements.append(('*', 'CN=NTDS Settings'))
+
+ return vmap, replacements
+
+
+def compile_graph_key(key_items, nodes_above=None, elisions=None,
+ prefix='key_', width=2):
+ """Generate a dot file snippet that acts as a legend for a graph.
+
+ :param key_items: sequence of items (is_vertex, style, label)
+ :param nodes_above: list of vertices (pushes key into right position)
+ :param elision: tuple (short, full) indicating suffix replacement
+ :param prefix: string used to generate key node names ["key_"]
+ :param width: default width of node lines
+
+ Each item in key_items is a tuple of (is_vertex, style, label).
+ is_vertex is a boolean indicating whether the item is a vertex
+ (True) or edge (False). Style is a dot style string for the edge
+ or vertex. label is the text associated with the key item.
+ """
+ if nodes_above is None:
+ nodes_above = []
+ edge_lines = []
+ edge_names = []
+ vertex_lines = []
+ vertex_names = []
+ order_lines = []
+ for i, item in enumerate(key_items):
+ is_vertex, style, label = item
+ tag = '%s%d_' % (prefix, i)
+ label = quote_graph_label(label)
+ name = '%s_label' % tag
+
+ if is_vertex:
+ order_lines.append(name)
+ vertex_names.append(name)
+ vertex_lines.append('%s[label="%s"; %s]' %
+ (name, label, style))
+ else:
+ edge_names.append(name)
+ e1 = '%se1' % tag
+ e2 = '%se2' % tag
+ order_lines.append(name)
+ edge_lines.append('subgraph cluster_%s {' % tag)
+ edge_lines.append('%s[label=src; color="#000000"; group="%s_g"]' %
+ (e1, tag))
+ edge_lines.append('%s[label=dest; color="#000000"; group="%s_g"]' %
+ (e2, tag))
+ edge_lines.append('%s -> %s [constraint = false; %s]' % (e1, e2,
+ style))
+ edge_lines.append(('%s[shape=plaintext; style=solid; width=%f; '
+ 'label="%s\\r"]') %
+ (name, width, label))
+ edge_lines.append('}')
+
+ elision_str = ''
+ if elisions:
+ for i, elision in enumerate(reversed(elisions)):
+ order_lines.append('elision%d' % i)
+ short, long = elision
+ if short[0] == ',' and long[0] == ',':
+ short = short[1:]
+ long = long[1:]
+ elision_str += ('\nelision%d[shape=plaintext; style=solid; '
+ 'label="\\“%s” means “%s”\\r"]\n'
+ % ((i, short, long)))
+
+ above_lines = []
+ if order_lines:
+ for n in nodes_above:
+ above_lines.append('"%s" -> %s [style=invis]' %
+ (n, order_lines[0]))
+
+ s = ('subgraph cluster_key {\n'
+ 'label="Key";\n'
+ 'subgraph cluster_key_nodes {\n'
+ 'label="";\n'
+ 'color = "invis";\n'
+ '%s\n'
+ '}\n'
+ 'subgraph cluster_key_edges {\n'
+ 'label="";\n'
+ 'color = "invis";\n'
+ '%s\n'
+ '{%s}\n'
+ '}\n'
+ '%s\n'
+ '}\n'
+ '%s\n'
+ '%s [style=invis; weight=9]'
+ '\n'
+ % (';\n'.join(vertex_lines),
+ '\n'.join(edge_lines),
+ ' '.join(edge_names),
+ elision_str,
+ ';\n'.join(above_lines),
+ ' -> '.join(order_lines),
+ ))
+
+ return s
+
+
+def dot_graph(vertices, edges,
+ directed=False,
+ title=None,
+ reformat_labels=True,
+ vertex_colors=None,
+ edge_colors=None,
+ edge_labels=None,
+ vertex_styles=None,
+ edge_styles=None,
+ graph_name=None,
+ shorten_names=False,
+ key_items=None,
+ vertex_clusters=None):
+ """Generate a Graphviz representation of a list of vertices and edges.
+
+ :param vertices: list of vertex names (optional).
+ :param edges: list of (vertex, vertex) pairs
+ :param directed: bool: whether the graph is directed
+ :param title: optional title for the graph
+ :param reformat_labels: whether to wrap long vertex labels
+ :param vertex_colors: if not None, a sequence of colours for the vertices
+ :param edge_colors: if not None, colours for the edges
+ :param edge_labels: if not None, labels for the edges
+ :param vertex_styles: if not None, DOT style strings for vertices
+ :param edge_styles: if not None, DOT style strings for edges
+ :param graph_name: if not None, name of graph
+ :param shorten_names: if True, remove common DN suffixes
+ :param key: (is_vertex, style, description) tuples
+ :param vertex_clusters: list of subgraph cluster names
+
+ Colour, style, and label lists must be the same length as the
+ corresponding list of edges or vertices (or None).
+
+ Colours can be HTML RGB strings ("#FF0000") or common names
+ ("red"), or some other formats you don't want to think about.
+
+ If `vertices` is None, only the vertices mentioned in the edges
+ are shown, and their appearance can be modified using the
+ vertex_colors and vertex_styles arguments. Vertices appearing in
+ the edges but not in the `vertices` list will be shown but their
+ styles can not be modified.
+ """
+ out = []
+ write = out.append
+
+ if vertices is None:
+ vertices = set(x[0] for x in edges) | set(x[1] for x in edges)
+
+ if shorten_names:
+ vlist = list(set(x[0] for x in edges) |
+ set(x[1] for x in edges) |
+ set(vertices))
+ vmap, elisions = shorten_vertex_names(vlist)
+ vertices = [vmap[x] for x in vertices]
+ edges = [(vmap[a], vmap[b]) for a, b in edges]
+
+ else:
+ elisions = None
+
+ if graph_name is None:
+ graph_name = 'A_samba_tool_production'
+
+ if directed:
+ graph_type = 'digraph'
+ connector = '->'
+ else:
+ graph_type = 'graph'
+ connector = '--'
+
+ write('/* generated by samba */')
+ write('%s %s {' % (graph_type, graph_name))
+ if title is not None:
+ write('label="%s";' % (title,))
+ write('fontsize=%s;\n' % (FONT_SIZE))
+ write('node[fontname=Helvetica; fontsize=%s];\n' % (FONT_SIZE))
+
+ prev_cluster = None
+ cluster_n = 0
+ quoted_vertices = []
+ for i, v in enumerate(vertices):
+ v = quote_graph_label(v, reformat_labels)
+ quoted_vertices.append(v)
+ attrs = []
+ if vertex_clusters and vertex_clusters[i]:
+ cluster = vertex_clusters[i]
+ if cluster != prev_cluster:
+ if prev_cluster is not None:
+ write("}")
+ prev_cluster = cluster
+ n = quote_graph_label(cluster)
+ if cluster:
+ write('subgraph cluster_%d {' % cluster_n)
+ cluster_n += 1
+ write('style = "rounded,dotted";')
+ write('node [style="filled"; fillcolor=white];')
+ write('label = "%s";' % n)
+
+ if vertex_styles and vertex_styles[i]:
+ attrs.append(vertex_styles[i])
+ if vertex_colors and vertex_colors[i]:
+ attrs.append('color="%s"' % quote_graph_label(vertex_colors[i]))
+ if attrs:
+ write('"%s" [%s];' % (v, ', '.join(attrs)))
+ else:
+ write('"%s";' % (v,))
+
+ if prev_cluster:
+ write("}")
+
+ for i, edge in enumerate(edges):
+ a, b = edge
+ if a is None:
+ a = "Missing source value"
+ if b is None:
+ b = "Missing destination value"
+
+ a = quote_graph_label(a, reformat_labels)
+ b = quote_graph_label(b, reformat_labels)
+
+ attrs = []
+ if edge_labels:
+ label = quote_graph_label(edge_labels[i])
+ attrs.append('label="%s"' % label)
+ if edge_colors:
+ attrs.append('color="%s"' % quote_graph_label(edge_colors[i]))
+ if edge_styles:
+ attrs.append(edge_styles[i]) # no quoting
+ if attrs:
+ write('"%s" %s "%s" [%s];' % (a, connector, b, ', '.join(attrs)))
+ else:
+ write('"%s" %s "%s";' % (a, connector, b))
+
+ if key_items:
+ key = compile_graph_key(key_items, nodes_above=quoted_vertices,
+ elisions=elisions)
+ write(key)
+
+ write('}\n')
+ return '\n'.join(out)
+
+
+COLOUR_SETS = {
+ 'ansi': {
+ 'alternate rows': (colour.DARK_WHITE, colour.BLACK),
+ 'disconnected': colour.RED,
+ 'connected': colour.GREEN,
+ 'transitive': colour.DARK_YELLOW,
+ 'header': colour.UNDERLINE,
+ 'reset': colour.C_NORMAL,
+ },
+ 'ansi-heatmap': {
+ 'alternate rows': (colour.DARK_WHITE, colour.BLACK),
+ 'disconnected': colour.REV_RED,
+ 'connected': colour.REV_GREEN,
+ 'transitive': colour.REV_DARK_YELLOW,
+ 'header': colour.UNDERLINE,
+ 'reset': colour.C_NORMAL,
+ },
+ 'xterm-256color': {
+ 'alternate rows': (colour.xterm_256_colour(39),
+ colour.xterm_256_colour(45)),
+ # 'alternate rows': (colour.xterm_256_colour(246),
+ # colour.xterm_256_colour(247)),
+ 'disconnected': colour.xterm_256_colour(124, bg=True),
+ 'connected': colour.xterm_256_colour(112),
+ 'transitive': colour.xterm_256_colour(214),
+ 'transitive scale': (colour.xterm_256_colour(190),
+ colour.xterm_256_colour(184),
+ colour.xterm_256_colour(220),
+ colour.xterm_256_colour(214),
+ colour.xterm_256_colour(208),
+ ),
+ 'header': colour.UNDERLINE,
+ 'reset': colour.C_NORMAL,
+ },
+ 'xterm-256color-heatmap': {
+ 'alternate rows': (colour.xterm_256_colour(171),
+ colour.xterm_256_colour(207)),
+ # 'alternate rows': (colour.xterm_256_colour(246),
+ # colour.xterm_256_colour(247)),
+ 'disconnected': colour.xterm_256_colour(124, bg=True),
+ 'connected': colour.xterm_256_colour(112, bg=True),
+ 'transitive': colour.xterm_256_colour(214, bg=True),
+ 'transitive scale': (colour.xterm_256_colour(190, bg=True),
+ colour.xterm_256_colour(184, bg=True),
+ colour.xterm_256_colour(220, bg=True),
+ colour.xterm_256_colour(214, bg=True),
+ colour.xterm_256_colour(208, bg=True),
+ ),
+ 'header': colour.UNDERLINE,
+ 'reset': colour.C_NORMAL,
+ },
+ None: {
+ 'alternate rows': ('',),
+ 'disconnected': '',
+ 'connected': '',
+ 'transitive': '',
+ 'header': '',
+ 'reset': '',
+ }
+}
+
+CHARSETS = {
+ 'utf8': {
+ 'vertical': '│',
+ 'horizontal': '─',
+ 'corner': '╭',
+ # 'diagonal': '╲',
+ 'diagonal': '·',
+ # 'missing': '🕱',
+ 'missing': '-',
+ 'right_arrow': '←',
+ },
+ 'ascii': {
+ 'vertical': '|',
+ 'horizontal': '-',
+ 'corner': ',',
+ 'diagonal': '0',
+ 'missing': '-',
+ 'right_arrow': '<-',
+ }
+}
+
+
+def find_transitive_distance(vertices, edges):
+ all_vertices = (set(vertices) |
+ set(e[0] for e in edges) |
+ set(e[1] for e in edges))
+
+ if all_vertices != set(vertices):
+ print("there are unknown vertices: %s" %
+ (all_vertices - set(vertices)),
+ file=sys.stderr)
+
+ # with n vertices, we are always less than n hops away from
+ # anywhere else.
+ inf = len(all_vertices)
+ distances = {}
+ for v in all_vertices:
+ distances[v] = {v: 0}
+
+ for src, dest in edges:
+ distances[src][dest] = distances[src].get(dest, 1)
+
+ # This algorithm (and implementation) seems very suboptimal.
+ # potentially O(n^4), though n is smallish.
+ for i in range(inf):
+ changed = False
+ new_distances = {}
+ for v, d in distances.items():
+ new_d = d.copy()
+ new_distances[v] = new_d
+ for dest, cost in d.items():
+ for leaf, cost2 in distances[dest].items():
+ new_cost = cost + cost2
+ old_cost = d.get(leaf, inf)
+ if new_cost < old_cost:
+ new_d[leaf] = new_cost
+ changed = True
+
+ distances = new_distances
+ if not changed:
+ break
+
+ # filter out unwanted vertices and infinite links
+ answer = {}
+ for v in vertices:
+ answer[v] = {}
+ for v2 in vertices:
+ a = distances[v].get(v2, inf)
+ if a < inf:
+ answer[v][v2] = a
+
+ return answer
+
+
+def get_transitive_colourer(colours, n_vertices):
+ if 'transitive scale' in colours:
+ scale = colours['transitive scale']
+ m = len(scale)
+ n = 1 + int(n_vertices ** 0.5)
+
+ def f(link):
+ if not isinstance(link, int):
+ return ''
+ return scale[min(link * m // n, m - 1)]
+
+ else:
+ def f(link):
+ return colours['transitive']
+
+ return f
+
+
+def distance_matrix(vertices, edges,
+ utf8=False,
+ colour=None,
+ shorten_names=False,
+ generate_key=False,
+ grouping_function=None,
+ row_comments=None):
+ lines = []
+ write = lines.append
+
+ charset = CHARSETS['utf8' if utf8 else 'ascii']
+ vertical = charset['vertical']
+ horizontal = charset['horizontal']
+ corner = charset['corner']
+ diagonal = charset['diagonal']
+ missing = charset['missing']
+ right_arrow = charset['right_arrow']
+
+ colours = COLOUR_SETS[colour]
+
+ colour_cycle = cycle(colours.get('alternate rows', ('',)))
+
+ if vertices is None:
+ vertices = sorted(set(x[0] for x in edges) | set(x[1] for x in edges))
+
+ if grouping_function is not None:
+ # we sort and colour according to the grouping function
+ # which can be used to e.g. alternate colours by site.
+ vertices = sorted(vertices, key=grouping_function)
+ colour_list = []
+ for k, v in groupby(vertices, key=grouping_function):
+ c = next(colour_cycle)
+ colour_list.extend(c for x in v)
+ else:
+ colour_list = [next(colour_cycle) for v in vertices]
+
+ if shorten_names:
+ vlist = list(set(x[0] for x in edges) |
+ set(x[1] for x in edges) |
+ set(vertices))
+ vmap, replacements = shorten_vertex_names(vlist, '+',
+ aggressive=True)
+ vertices = [vmap[x] for x in vertices]
+ edges = [(vmap[a], vmap[b]) for a, b in edges]
+
+ vlen = max(6, max(len(v) for v in vertices))
+
+ # first, the key for the columns
+ c_header = colours.get('header', '')
+ c_disconn = colours.get('disconnected', '')
+ c_conn = colours.get('connected', '')
+ c_reset = colours.get('reset', '')
+
+ colour_transitive = get_transitive_colourer(colours, len(vertices))
+
+ vspace = ' ' * vlen
+ verticals = ''
+ write("%*s %s %sdestination%s" % (vlen, '',
+ ' ' * len(vertices),
+ c_header,
+ c_reset))
+ for i, v in enumerate(vertices):
+ j = len(vertices) - i
+ c = colour_list[i]
+ if j == 1:
+ start = '%s%ssource%s' % (vspace[:-6], c_header, c_reset)
+ else:
+ start = vspace
+ write('%s %s%s%s%s%s %s%s' % (start,
+ verticals,
+ c_reset,
+ c,
+ corner,
+ horizontal * j,
+ v,
+ c_reset
+ ))
+ verticals += c + vertical
+
+ connections = find_transitive_distance(vertices, edges)
+
+ for i, v in enumerate(vertices):
+ c = colour_list[i]
+ links = connections[v]
+ row = []
+ for v2 in vertices:
+ link = links.get(v2)
+ if link is None:
+ row.append('%s%s' % (c_disconn, missing))
+ continue
+ if link == 0:
+ row.append('%s%s%s%s' % (c_reset, c, diagonal, c_reset))
+ elif link == 1:
+ row.append('%s1%s' % (c_conn, c_reset))
+ else:
+ ct = colour_transitive(link)
+ if link > 9:
+ link = '>'
+ row.append('%s%s%s' % (ct, link, c_reset))
+
+ if row_comments is not None and row_comments[i]:
+ row.append('%s %s %s' % (c_reset, right_arrow, row_comments[i]))
+
+ write('%s%*s%s %s%s' % (c, vlen, v, c_reset,
+ ''.join(row), c_reset))
+
+ example_c = next(colour_cycle)
+ if shorten_names:
+ write('')
+ for substitute, original in reversed(replacements):
+ write("'%s%s%s' stands for '%s%s%s'" % (example_c,
+ substitute,
+ c_reset,
+ example_c,
+ original,
+ c_reset))
+ if generate_key:
+ write('')
+ write("Data can get from %ssource%s to %sdestination%s in the "
+ "indicated number of steps." % (c_header, c_reset,
+ c_header, c_reset))
+ write("%s%s%s means zero steps (it is the same DC)" %
+ (example_c, diagonal, c_reset))
+ write("%s1%s means a direct link" % (c_conn, c_reset))
+ write("%s2%s means a transitive link involving two steps "
+ "(i.e. one intermediate DC)" %
+ (colour_transitive(2), c_reset))
+ write("%s%s%s means there is no connection, even through other DCs" %
+ (c_disconn, missing, c_reset))
+
+ return '\n'.join(lines)
+
+
+def pad_char(char, digits, padding=' '):
+ if digits == 1:
+ padding = ''
+ return ' ' * (digits - 1) + char + padding
+
+
+def transpose_dict_matrix(m):
+ m2 = {}
+ for k1, row in m.items():
+ for k2, dist in row.items():
+ m2.setdefault(k2, {})[k1] = dist
+ return m2
+
+
+def full_matrix(rows,
+ utf8=False,
+ colour=None,
+ shorten_names=False,
+ generate_key=False,
+ grouping_function=None,
+ row_comments=None,
+ colour_scale=None,
+ digits=1,
+ ylabel='source',
+ xlabel='destination',
+ transpose=True):
+ lines = []
+ write = lines.append
+
+ if transpose:
+ rows = transpose_dict_matrix(rows)
+
+ use_padding = digits > 1
+
+ charset = CHARSETS['utf8' if utf8 else 'ascii']
+ vertical = pad_char(charset['vertical'], digits)
+ horizontal = charset['horizontal'] * (digits + use_padding)
+ corner = pad_char(charset['corner'], digits,
+ charset['horizontal'])
+ diagonal = pad_char(charset['diagonal'], digits)
+ missing = pad_char(charset['missing'], digits)
+ toobig = pad_char('>', digits)
+ right_arrow = charset['right_arrow']
+ empty = pad_char(' ', digits)
+
+ colours = COLOUR_SETS[colour]
+
+ colour_cycle = cycle(colours.get('alternate rows', ('',)))
+ vertices = list(rows.keys())
+ if grouping_function is not None:
+ # we sort and colour according to the grouping function
+ # which can be used to e.g. alternate colours by site.
+ vertices.sort(key=grouping_function)
+ colour_list = []
+ for k, v in groupby(vertices, key=grouping_function):
+ c = next(colour_cycle)
+ colour_list.extend(c for x in v)
+ else:
+ colour_list = [next(colour_cycle) for v in vertices]
+
+ if shorten_names:
+ vmap, replacements = shorten_vertex_names(vertices, '+',
+ aggressive=True)
+ rows2 = {}
+ for vert, r in rows.items():
+ rows2[vmap[vert]] = dict((vmap[k], v) for k, v in r.items())
+
+ rows = rows2
+ vertices = list(rows.keys())
+
+ vlen = max(6, len(xlabel), max(len(v) for v in vertices))
+
+ # first, the key for the columns
+ c_header = colours.get('header', '')
+ c_disconn = colours.get('disconnected', '')
+ c_conn = colours.get('connected', '')
+ c_reset = colours.get('reset', '')
+
+ if colour_scale is None:
+ colour_scale = len(rows)
+ colour_transitive = get_transitive_colourer(colours, colour_scale)
+
+ vspace = ' ' * vlen
+ verticals = ''
+ write("%s %s %s%s%s" % (vspace,
+ empty * (len(rows) + 1),
+ c_header,
+ xlabel,
+ c_reset))
+ for i, v in enumerate(vertices):
+ j = len(rows) - i
+ c = colour_list[i]
+ if j == 1:
+ start = '%s%s%s%s' % (vspace[:-len(ylabel)],
+ c_header,
+ ylabel,
+ c_reset)
+ else:
+ start = vspace
+ write('%s %s%s%s%s%s %s%s' % (start,
+ verticals,
+ c_reset,
+ c,
+ corner,
+ horizontal * j,
+ v,
+ c_reset
+ ))
+ verticals += '%s%s' % (c, vertical)
+
+ end_cell = '%s%s' % (' ' * use_padding, c_reset)
+ overflow = False
+ for i, v in enumerate(vertices):
+ links = rows[v]
+ c = colour_list[i]
+ row = []
+ for v2 in vertices:
+ if v2 not in links:
+ row.append('%s%s%s' % (c_disconn, missing, c_reset))
+ elif v == v2:
+ row.append('%s%s%s%s' % (c_reset, c, diagonal, c_reset))
+ else:
+ link = links[v2]
+ if link >= 10 ** digits:
+ ct = colour_transitive(link)
+ row.append('%s%s%s' % (ct, toobig, c_reset))
+ overflow = True
+ continue
+ if link == 0:
+ ct = c_conn
+ else:
+ ct = colour_transitive(link)
+ row.append('%s%*s%s' % (ct, digits, link, end_cell))
+
+ if row_comments is not None and row_comments[i]:
+ row.append('%s %s %s' % (c_reset, right_arrow, row_comments[i]))
+
+ write('%s%*s%s %s%s' % (c, vlen, v, c_reset,
+ ''.join(row), c_reset))
+
+ if overflow or shorten_names:
+ write('')
+
+ if overflow:
+ write("'%s%s%s' means greater than %d " %
+ (colour_transitive(10 ** digits),
+ toobig,
+ c_reset,
+ 10 ** digits - 1))
+
+ if shorten_names:
+ example_c = next(colour_cycle)
+ for substitute, original in reversed(replacements):
+ write("'%s%s%s' stands for '%s%s%s'" % (example_c,
+ substitute,
+ c_reset,
+ example_c,
+ original,
+ c_reset))
+
+ return '\n'.join(lines)
diff --git a/python/samba/hostconfig.py b/python/samba/hostconfig.py
new file mode 100644
index 0000000..f3c9aad
--- /dev/null
+++ b/python/samba/hostconfig.py
@@ -0,0 +1,81 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2008
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Local host configuration."""
+from .samdb import SamDB
+
+
+class Hostconfig(object):
+ """Aggregate object that contains all information about the configuration
+ of a Samba host."""
+
+ def __init__(self, lp):
+ self.lp = lp
+
+ def get_shares(self):
+ return SharesContainer(self.lp)
+
+ def get_samdb(self, session_info, credentials):
+ """Access the SamDB host.
+
+ :param session_info: Session info to use
+ :param credentials: Credentials to access the SamDB with
+ """
+ return SamDB(url=self.lp.samdb_url(),
+ session_info=session_info, credentials=credentials,
+ lp=self.lp)
+
+
+# TODO: Rather than accessing Loadparm directly here, we should really
+# have bindings to the param/shares.c and use those.
+
+
+class SharesContainer(object):
+ """A shares container."""
+
+ def __init__(self, lp):
+ self._lp = lp
+
+ def __getitem__(self, name):
+ if name == "global":
+ # [global] is not a share
+ raise KeyError
+ return Share(self._lp[name])
+
+ def __len__(self):
+ if "global" in self._lp.services():
+ return len(self._lp) - 1
+ return len(self._lp)
+
+ def keys(self):
+ return [name for name in self._lp.services() if name != "global"]
+
+ def __iter__(self):
+ return iter(self.keys())
+
+
+class Share(object):
+ """A file share."""
+
+ def __init__(self, service):
+ self._service = service
+
+ def __getitem__(self, name):
+ return self._service[name]
+
+ def __setitem__(self, name, value):
+ self._service[name] = value
diff --git a/python/samba/idmap.py b/python/samba/idmap.py
new file mode 100644
index 0000000..321ae8b
--- /dev/null
+++ b/python/samba/idmap.py
@@ -0,0 +1,99 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) 2008 Kai Blin <kai@samba.org>
+#
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Convenience functions for using the idmap database."""
+
+__docformat__ = "restructuredText"
+
+import ldb
+import samba
+
+
+class IDmapDB(samba.Ldb):
+ """The IDmap database."""
+
+ # Mappings for ID_TYPE_UID, ID_TYPE_GID and ID_TYPE_BOTH
+ TYPE_UID = 1
+ TYPE_GID = 2
+ TYPE_BOTH = 3
+
+ def __init__(self, url=None, lp=None, modules_dir=None, session_info=None,
+ credentials=None, flags=0, options=None):
+ """Opens the IDMap Database.
+
+ For parameter meanings see the super class (samba.Ldb)
+ """
+ self.lp = lp
+ if url is None:
+ url = lp.private_path("idmap.ldb")
+
+ super().__init__(url=url, lp=lp, modules_dir=modules_dir,
+ session_info=session_info, credentials=credentials, flags=flags,
+ options=options)
+
+ def connect(self, url=None, flags=0, options=None):
+ super().connect(url=self.lp.private_path(url), flags=flags,
+ options=options)
+
+ def increment_xid(self):
+ """Increment xidNumber, if not present it create and assign it to the lowerBound
+
+ :return xid can that be used for SID/unixid mapping
+ """
+ res = self.search(expression="distinguishedName=CN=CONFIG", base="",
+ scope=ldb.SCOPE_SUBTREE)
+ id = res[0].get("xidNumber")
+ flag = ldb.FLAG_MOD_REPLACE
+ if id is None:
+ id = res[0].get("lowerBound")
+ flag = ldb.FLAG_MOD_ADD
+ newid = int(str(id)) + 1
+ msg = ldb.Message()
+ msg.dn = ldb.Dn(self, "CN=CONFIG")
+ msg["xidNumber"] = ldb.MessageElement(str(newid), flag, "xidNumber")
+ self.modify(msg)
+ return id
+
+ def setup_name_mapping(self, sid, type, unixid=None):
+ """Setup a mapping between a sam name and a unix name.
+
+ :param sid: SID of the NT-side of the mapping.
+ :param unixname: Unix id to map to, if none supplied the next one will be selected
+ """
+ if unixid is None:
+ unixid = self.increment_xid()
+ type_string = ""
+ if type == self.TYPE_UID:
+ type_string = "ID_TYPE_UID"
+ elif type == self.TYPE_GID:
+ type_string = "ID_TYPE_GID"
+ elif type == self.TYPE_BOTH:
+ type_string = "ID_TYPE_BOTH"
+ else:
+ return
+
+ mod = """
+dn: CN=%s
+xidNumber: %s
+objectSid: %s
+objectClass: sidMap
+type: %s
+cn: %s
+
+""" % (sid, unixid, sid, type_string, sid)
+ self.add(next(self.parse_ldif(mod))[1])
diff --git a/python/samba/join.py b/python/samba/join.py
new file mode 100644
index 0000000..8b7e882
--- /dev/null
+++ b/python/samba/join.py
@@ -0,0 +1,1786 @@
+# python join code
+# Copyright Andrew Tridgell 2010
+# Copyright Andrew Bartlett 2010
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Joining a domain."""
+
+from samba.auth import system_session
+from samba.samdb import SamDB
+from samba import gensec, Ldb, drs_utils, arcfour_encrypt, string_to_byte_array
+import ldb
+import samba
+import uuid
+from samba.ndr import ndr_pack, ndr_unpack
+from samba.dcerpc import security, drsuapi, misc, nbt, lsa, drsblobs, dnsserver, dnsp
+from samba.credentials import Credentials, DONT_USE_KERBEROS
+from samba.provision import (secretsdb_self_join, provision, provision_fill,
+ FILL_DRS, FILL_SUBDOMAIN, DEFAULTSITE)
+from samba.provision.common import setup_path
+from samba.schema import Schema
+from samba import descriptor
+from samba.net import Net
+from samba.provision.sambadns import setup_bind9_dns
+from samba import read_and_sub_file
+from samba import werror
+from base64 import b64encode
+from samba import WERRORError, NTSTATUSError
+from samba import sd_utils
+from samba.dnsserver import ARecord, AAAARecord, CNAMERecord
+import random
+import time
+import re
+import os
+import tempfile
+from collections import OrderedDict
+from samba.common import get_string
+from samba.netcmd import CommandError
+from samba import dsdb, functional_level
+
+
+class DCJoinException(Exception):
+
+ def __init__(self, msg):
+ super().__init__("Can't join, error: %s" % msg)
+
+
+class DCJoinContext(object):
+ """Perform a DC join."""
+
+ def __init__(ctx, logger=None, server=None, creds=None, lp=None, site=None,
+ netbios_name=None, targetdir=None, domain=None,
+ machinepass=None, use_ntvfs=False, dns_backend=None,
+ promote_existing=False, plaintext_secrets=False,
+ backend_store=None,
+ backend_store_size=None,
+ forced_local_samdb=None):
+
+ ctx.logger = logger
+ ctx.creds = creds
+ ctx.lp = lp
+ ctx.site = site
+ ctx.targetdir = targetdir
+ ctx.use_ntvfs = use_ntvfs
+ ctx.plaintext_secrets = plaintext_secrets
+ ctx.backend_store = backend_store
+ ctx.backend_store_size = backend_store_size
+
+ ctx.promote_existing = promote_existing
+ ctx.promote_from_dn = None
+
+ ctx.nc_list = []
+ ctx.full_nc_list = []
+
+ ctx.creds.set_gensec_features(creds.get_gensec_features() | gensec.FEATURE_SEAL)
+ ctx.net = Net(creds=ctx.creds, lp=ctx.lp)
+
+ ctx.server = server
+ ctx.forced_local_samdb = forced_local_samdb
+
+ if forced_local_samdb:
+ ctx.samdb = forced_local_samdb
+ ctx.server = ctx.samdb.url
+ else:
+ if ctx.server:
+ # work out the DC's site (if not already specified)
+ if site is None:
+ ctx.site = ctx.find_dc_site(ctx.server)
+ else:
+ # work out the Primary DC for the domain (as well as an
+ # appropriate site for the new DC)
+ ctx.logger.info("Finding a writeable DC for domain '%s'" % domain)
+ ctx.server = ctx.find_dc(domain)
+ ctx.logger.info("Found DC %s" % ctx.server)
+ ctx.samdb = SamDB(url="ldap://%s" % ctx.server,
+ session_info=system_session(),
+ credentials=ctx.creds, lp=ctx.lp)
+
+ if ctx.site is None:
+ ctx.site = DEFAULTSITE
+
+ try:
+ ctx.samdb.search(scope=ldb.SCOPE_BASE, attrs=[])
+ except ldb.LdbError as e:
+ (enum, estr) = e.args
+ raise DCJoinException(estr)
+
+ ctx.base_dn = str(ctx.samdb.get_default_basedn())
+ ctx.root_dn = str(ctx.samdb.get_root_basedn())
+ ctx.schema_dn = str(ctx.samdb.get_schema_basedn())
+ ctx.config_dn = str(ctx.samdb.get_config_basedn())
+ ctx.domsid = security.dom_sid(ctx.samdb.get_domain_sid())
+ ctx.forestsid = ctx.domsid
+ ctx.domain_name = ctx.get_domain_name()
+ ctx.forest_domain_name = ctx.get_forest_domain_name()
+ ctx.invocation_id = misc.GUID(str(uuid.uuid4()))
+
+ ctx.dc_ntds_dn = ctx.samdb.get_dsServiceName()
+ ctx.dc_dnsHostName = ctx.get_dnsHostName()
+ ctx.behavior_version = ctx.get_behavior_version()
+
+ if machinepass is not None:
+ ctx.acct_pass = machinepass
+ else:
+ ctx.acct_pass = samba.generate_random_machine_password(120, 120)
+
+ ctx.dnsdomain = ctx.samdb.domain_dns_name()
+
+ # the following are all dependent on the new DC's netbios_name (which
+ # we expect to always be specified, except when cloning a DC)
+ if netbios_name:
+ # work out the DNs of all the objects we will be adding
+ ctx.myname = netbios_name
+ ctx.samname = "%s$" % ctx.myname
+ ctx.server_dn = "CN=%s,CN=Servers,CN=%s,CN=Sites,%s" % (ctx.myname, ctx.site, ctx.config_dn)
+ ctx.ntds_dn = "CN=NTDS Settings,%s" % ctx.server_dn
+ ctx.acct_dn = "CN=%s,OU=Domain Controllers,%s" % (ctx.myname, ctx.base_dn)
+ ctx.dnshostname = "%s.%s" % (ctx.myname.lower(), ctx.dnsdomain)
+ ctx.dnsforest = ctx.samdb.forest_dns_name()
+
+ topology_base = "CN=Topology,CN=Domain System Volume,CN=DFSR-GlobalSettings,CN=System,%s" % ctx.base_dn
+ if ctx.dn_exists(topology_base):
+ ctx.topology_dn = "CN=%s,%s" % (ctx.myname, topology_base)
+ else:
+ ctx.topology_dn = None
+
+ ctx.SPNs = ["HOST/%s" % ctx.myname,
+ "HOST/%s" % ctx.dnshostname,
+ "GC/%s/%s" % (ctx.dnshostname, ctx.dnsforest)]
+
+ res_rid_manager = ctx.samdb.search(scope=ldb.SCOPE_BASE,
+ attrs=["rIDManagerReference"],
+ base=ctx.base_dn)
+
+ ctx.rid_manager_dn = res_rid_manager[0]["rIDManagerReference"][0]
+
+ ctx.domaindns_zone = 'DC=DomainDnsZones,%s' % ctx.base_dn
+ ctx.forestdns_zone = 'DC=ForestDnsZones,%s' % ctx.root_dn
+
+ expr = "(&(objectClass=crossRef)(ncName=%s))" % ldb.binary_encode(ctx.domaindns_zone)
+ res_domaindns = ctx.samdb.search(scope=ldb.SCOPE_ONELEVEL,
+ attrs=[],
+ base=ctx.samdb.get_partitions_dn(),
+ expression=expr)
+ if dns_backend is None:
+ ctx.dns_backend = "NONE"
+ else:
+ if len(res_domaindns) == 0:
+ ctx.dns_backend = "NONE"
+ print("NO DNS zone information found in source domain, not replicating DNS")
+ else:
+ ctx.dns_backend = dns_backend
+
+ ctx.realm = ctx.dnsdomain
+
+ ctx.tmp_samdb = None
+
+ ctx.replica_flags = (drsuapi.DRSUAPI_DRS_INIT_SYNC |
+ drsuapi.DRSUAPI_DRS_PER_SYNC |
+ drsuapi.DRSUAPI_DRS_GET_ANC |
+ drsuapi.DRSUAPI_DRS_GET_NC_SIZE |
+ drsuapi.DRSUAPI_DRS_NEVER_SYNCED)
+
+ # these elements are optional
+ ctx.never_reveal_sid = None
+ ctx.reveal_sid = None
+ ctx.connection_dn = None
+ ctx.RODC = False
+ ctx.krbtgt_dn = None
+ ctx.drsuapi = None
+ ctx.managedby = None
+ ctx.subdomain = False
+ ctx.adminpass = None
+ ctx.partition_dn = None
+
+ ctx.dns_a_dn = None
+ ctx.dns_cname_dn = None
+
+ # Do not normally register 127. addresses but allow override for selftest
+ ctx.force_all_ips = False
+
+ def del_noerror(ctx, dn, recursive=False):
+ if recursive:
+ try:
+ res = ctx.samdb.search(base=dn, scope=ldb.SCOPE_ONELEVEL, attrs=["dn"])
+ except Exception:
+ return
+ for r in res:
+ ctx.del_noerror(r.dn, recursive=True)
+ try:
+ ctx.samdb.delete(dn)
+ print("Deleted %s" % dn)
+ except Exception:
+ pass
+
+ def cleanup_old_accounts(ctx, force=False):
+ res = ctx.samdb.search(base=ctx.samdb.get_default_basedn(),
+ expression='sAMAccountName=%s' % ldb.binary_encode(ctx.samname),
+ attrs=["msDS-krbTgtLink", "objectSID"])
+ if len(res) == 0:
+ return
+
+ if not force:
+ creds = Credentials()
+ creds.guess(ctx.lp)
+ try:
+ creds.set_machine_account(ctx.lp)
+ creds.set_kerberos_state(ctx.creds.get_kerberos_state())
+ machine_samdb = SamDB(url="ldap://%s" % ctx.server,
+ session_info=system_session(),
+ credentials=creds, lp=ctx.lp)
+ except:
+ pass
+ else:
+ token_res = machine_samdb.search(scope=ldb.SCOPE_BASE, base="", attrs=["tokenGroups"])
+ if token_res[0]["tokenGroups"][0] \
+ == res[0]["objectSID"][0]:
+ raise DCJoinException("Not removing account %s which "
+ "looks like a Samba DC account "
+ "matching the password we already have. "
+ "To override, remove secrets.ldb and secrets.tdb"
+ % ctx.samname)
+
+ ctx.del_noerror(res[0].dn, recursive=True)
+
+ krbtgt_dn = res[0].get('msDS-KrbTgtLink', idx=0)
+ if krbtgt_dn is not None:
+ ctx.new_krbtgt_dn = krbtgt_dn
+ ctx.del_noerror(ctx.new_krbtgt_dn)
+
+ res = ctx.samdb.search(base=ctx.samdb.get_default_basedn(),
+ expression='(&(sAMAccountName=%s)(servicePrincipalName=%s))' %
+ (ldb.binary_encode("dns-%s" % ctx.myname),
+ ldb.binary_encode("dns/%s" % ctx.dnshostname)),
+ attrs=[])
+ if res:
+ ctx.del_noerror(res[0].dn, recursive=True)
+
+ res = ctx.samdb.search(base=ctx.samdb.get_default_basedn(),
+ expression='(sAMAccountName=%s)' % ldb.binary_encode("dns-%s" % ctx.myname),
+ attrs=[])
+ if res:
+ raise DCJoinException("Not removing account %s which looks like "
+ "a Samba DNS service account but does not "
+ "have servicePrincipalName=%s" %
+ (ldb.binary_encode("dns-%s" % ctx.myname),
+ ldb.binary_encode("dns/%s" % ctx.dnshostname)))
+
+ def cleanup_old_join(ctx, force=False):
+ """Remove any DNs from a previous join."""
+ # find the krbtgt link
+ if not ctx.subdomain:
+ ctx.cleanup_old_accounts(force=force)
+
+ if ctx.connection_dn is not None:
+ ctx.del_noerror(ctx.connection_dn)
+ if ctx.krbtgt_dn is not None:
+ ctx.del_noerror(ctx.krbtgt_dn)
+ ctx.del_noerror(ctx.ntds_dn)
+ ctx.del_noerror(ctx.server_dn, recursive=True)
+ if ctx.topology_dn:
+ ctx.del_noerror(ctx.topology_dn)
+ if ctx.partition_dn:
+ ctx.del_noerror(ctx.partition_dn)
+
+ if ctx.subdomain:
+ binding_options = "sign"
+ lsaconn = lsa.lsarpc("ncacn_ip_tcp:%s[%s]" % (ctx.server, binding_options),
+ ctx.lp, ctx.creds)
+
+ objectAttr = lsa.ObjectAttribute()
+ objectAttr.sec_qos = lsa.QosInfo()
+
+ pol_handle = lsaconn.OpenPolicy2('',
+ objectAttr,
+ security.SEC_FLAG_MAXIMUM_ALLOWED)
+
+ name = lsa.String()
+ name.string = ctx.realm
+ info = lsaconn.QueryTrustedDomainInfoByName(pol_handle, name, lsa.LSA_TRUSTED_DOMAIN_INFO_FULL_INFO)
+
+ lsaconn.DeleteTrustedDomain(pol_handle, info.info_ex.sid)
+
+ name = lsa.String()
+ name.string = ctx.forest_domain_name
+ info = lsaconn.QueryTrustedDomainInfoByName(pol_handle, name, lsa.LSA_TRUSTED_DOMAIN_INFO_FULL_INFO)
+
+ lsaconn.DeleteTrustedDomain(pol_handle, info.info_ex.sid)
+
+ if ctx.dns_a_dn:
+ ctx.del_noerror(ctx.dns_a_dn)
+
+ if ctx.dns_cname_dn:
+ ctx.del_noerror(ctx.dns_cname_dn)
+
+ def promote_possible(ctx):
+ """confirm that the account is just a bare NT4 BDC or a member server, so can be safely promoted"""
+ if ctx.subdomain:
+ # This shouldn't happen
+ raise Exception("Can not promote into a subdomain")
+
+ res = ctx.samdb.search(base=ctx.samdb.get_default_basedn(),
+ expression='sAMAccountName=%s' % ldb.binary_encode(ctx.samname),
+ attrs=["msDS-krbTgtLink", "userAccountControl", "serverReferenceBL", "rIDSetReferences"])
+ if len(res) == 0:
+ raise Exception("Could not find domain member account '%s' to promote to a DC, use 'samba-tool domain join' instead'" % ctx.samname)
+ if "msDS-KrbTgtLink" in res[0] or "serverReferenceBL" in res[0] or "rIDSetReferences" in res[0]:
+ raise Exception("Account '%s' appears to be an active DC, use 'samba-tool domain join' if you must re-create this account" % ctx.samname)
+ if (int(res[0]["userAccountControl"][0]) & (samba.dsdb.UF_WORKSTATION_TRUST_ACCOUNT |
+ samba.dsdb.UF_SERVER_TRUST_ACCOUNT) == 0):
+ raise Exception("Account %s is not a domain member or a bare NT4 BDC, use 'samba-tool domain join' instead'" % ctx.samname)
+
+ ctx.promote_from_dn = res[0].dn
+
+ def find_dc(ctx, domain):
+ """find a writeable DC for the given domain"""
+ try:
+ ctx.cldap_ret = ctx.net.finddc(domain=domain, flags=nbt.NBT_SERVER_LDAP | nbt.NBT_SERVER_DS | nbt.NBT_SERVER_WRITABLE)
+ except NTSTATUSError as error:
+ raise CommandError("Failed to find a writeable DC for domain '%s': %s" %
+ (domain, error.args[1]))
+ except Exception:
+ raise CommandError("Failed to find a writeable DC for domain '%s'" % domain)
+ if ctx.cldap_ret.client_site is not None and ctx.cldap_ret.client_site != "":
+ ctx.site = ctx.cldap_ret.client_site
+ return ctx.cldap_ret.pdc_dns_name
+
+ def find_dc_site(ctx, server):
+ site = None
+ cldap_ret = ctx.net.finddc(address=server,
+ flags=nbt.NBT_SERVER_LDAP | nbt.NBT_SERVER_DS)
+ if cldap_ret.client_site is not None and cldap_ret.client_site != "":
+ site = cldap_ret.client_site
+ return site
+
+ def get_behavior_version(ctx):
+ res = ctx.samdb.search(base=ctx.base_dn, scope=ldb.SCOPE_BASE, attrs=["msDS-Behavior-Version"])
+ if "msDS-Behavior-Version" in res[0]:
+ return int(res[0]["msDS-Behavior-Version"][0])
+ else:
+ return samba.dsdb.DS_DOMAIN_FUNCTION_2000
+
+ def get_dnsHostName(ctx):
+ res = ctx.samdb.search(base="", scope=ldb.SCOPE_BASE, attrs=["dnsHostName"])
+ return str(res[0]["dnsHostName"][0])
+
+ def get_domain_name(ctx):
+ """get netbios name of the domain from the partitions record"""
+ partitions_dn = ctx.samdb.get_partitions_dn()
+ res = ctx.samdb.search(base=partitions_dn, scope=ldb.SCOPE_ONELEVEL, attrs=["nETBIOSName"],
+ expression='ncName=%s' % ldb.binary_encode(str(ctx.samdb.get_default_basedn())))
+ return str(res[0]["nETBIOSName"][0])
+
+ def get_forest_domain_name(ctx):
+ """get netbios name of the domain from the partitions record"""
+ partitions_dn = ctx.samdb.get_partitions_dn()
+ res = ctx.samdb.search(base=partitions_dn, scope=ldb.SCOPE_ONELEVEL, attrs=["nETBIOSName"],
+ expression='ncName=%s' % ldb.binary_encode(str(ctx.samdb.get_root_basedn())))
+ return str(res[0]["nETBIOSName"][0])
+
+ def get_parent_partition_dn(ctx):
+ """get the parent domain partition DN from parent DNS name"""
+ res = ctx.samdb.search(base=ctx.config_dn, attrs=[],
+ expression='(&(objectclass=crossRef)(dnsRoot=%s)(systemFlags:%s:=%u))' %
+ (ldb.binary_encode(ctx.parent_dnsdomain),
+ ldb.OID_COMPARATOR_AND, samba.dsdb.SYSTEM_FLAG_CR_NTDS_DOMAIN))
+ return str(res[0].dn)
+
+ def get_mysid(ctx):
+ """get the SID of the connected user. Only works with w2k8 and later,
+ so only used for RODC join"""
+ res = ctx.samdb.search(base="", scope=ldb.SCOPE_BASE, attrs=["tokenGroups"])
+ binsid = res[0]["tokenGroups"][0]
+ return get_string(ctx.samdb.schema_format_value("objectSID", binsid))
+
+ def dn_exists(ctx, dn):
+ """check if a DN exists"""
+ try:
+ res = ctx.samdb.search(base=dn, scope=ldb.SCOPE_BASE, attrs=[])
+ except ldb.LdbError as e5:
+ (enum, estr) = e5.args
+ if enum == ldb.ERR_NO_SUCH_OBJECT:
+ return False
+ raise
+ return True
+
+ def add_krbtgt_account(ctx):
+ """RODCs need a special krbtgt account"""
+ print("Adding %s" % ctx.krbtgt_dn)
+ rec = {
+ "dn": ctx.krbtgt_dn,
+ "objectclass": "user",
+ "useraccountcontrol": str(samba.dsdb.UF_NORMAL_ACCOUNT |
+ samba.dsdb.UF_ACCOUNTDISABLE),
+ "showinadvancedviewonly": "TRUE",
+ "description": "krbtgt for %s" % ctx.samname}
+ ctx.samdb.add(rec, ["rodc_join:1:1"])
+
+ # now we need to search for the samAccountName attribute on the krbtgt DN,
+ # as this will have been magically set to the krbtgt number
+ res = ctx.samdb.search(base=ctx.krbtgt_dn, scope=ldb.SCOPE_BASE, attrs=["samAccountName"])
+ ctx.krbtgt_name = res[0]["samAccountName"][0]
+
+ print("Got krbtgt_name=%s" % ctx.krbtgt_name)
+
+ m = ldb.Message()
+ m.dn = ldb.Dn(ctx.samdb, ctx.acct_dn)
+ m["msDS-krbTgtLink"] = ldb.MessageElement(ctx.krbtgt_dn,
+ ldb.FLAG_MOD_REPLACE, "msDS-krbTgtLink")
+ ctx.samdb.modify(m)
+
+ ctx.new_krbtgt_dn = "CN=%s,CN=Users,%s" % (ctx.krbtgt_name, ctx.base_dn)
+ print("Renaming %s to %s" % (ctx.krbtgt_dn, ctx.new_krbtgt_dn))
+ ctx.samdb.rename(ctx.krbtgt_dn, ctx.new_krbtgt_dn)
+
+ def drsuapi_connect(ctx):
+ """make a DRSUAPI connection to the naming master"""
+ binding_options = "seal"
+ if ctx.lp.log_level() >= 9:
+ binding_options += ",print"
+ binding_string = "ncacn_ip_tcp:%s[%s]" % (ctx.server, binding_options)
+ ctx.drsuapi = drsuapi.drsuapi(binding_string, ctx.lp, ctx.creds)
+ (ctx.drsuapi_handle, ctx.bind_supported_extensions) = drs_utils.drs_DsBind(ctx.drsuapi)
+
+ def create_tmp_samdb(ctx):
+ """create a temporary samdb object for schema queries"""
+ ctx.tmp_schema = Schema(ctx.domsid,
+ schemadn=ctx.schema_dn)
+ ctx.tmp_samdb = SamDB(session_info=system_session(), url=None, auto_connect=False,
+ credentials=ctx.creds, lp=ctx.lp, global_schema=False,
+ am_rodc=False)
+ ctx.tmp_samdb.set_schema(ctx.tmp_schema)
+
+ def DsAddEntry(ctx, recs):
+ """add a record via the DRSUAPI DsAddEntry call"""
+ if ctx.drsuapi is None:
+ ctx.drsuapi_connect()
+ if ctx.tmp_samdb is None:
+ ctx.create_tmp_samdb()
+
+ objects = []
+ for rec in recs:
+ id = drsuapi.DsReplicaObjectIdentifier()
+ id.dn = rec['dn']
+
+ attrs = []
+ for a in rec:
+ if a == 'dn':
+ continue
+ if not isinstance(rec[a], list):
+ v = [rec[a]]
+ else:
+ v = rec[a]
+ v = [x.encode('utf8') if isinstance(x, str) else x for x in v]
+ rattr = ctx.tmp_samdb.dsdb_DsReplicaAttribute(ctx.tmp_samdb, a, v)
+ attrs.append(rattr)
+
+ attribute_ctr = drsuapi.DsReplicaAttributeCtr()
+ attribute_ctr.num_attributes = len(attrs)
+ attribute_ctr.attributes = attrs
+
+ object = drsuapi.DsReplicaObject()
+ object.identifier = id
+ object.attribute_ctr = attribute_ctr
+
+ list_object = drsuapi.DsReplicaObjectListItem()
+ list_object.object = object
+ objects.append(list_object)
+
+ req2 = drsuapi.DsAddEntryRequest2()
+ req2.first_object = objects[0]
+ prev = req2.first_object
+ for o in objects[1:]:
+ prev.next_object = o
+ prev = o
+
+ (level, ctr) = ctx.drsuapi.DsAddEntry(ctx.drsuapi_handle, 2, req2)
+ if level == 2:
+ if ctr.dir_err != drsuapi.DRSUAPI_DIRERR_OK:
+ print("DsAddEntry failed with dir_err %u" % ctr.dir_err)
+ raise RuntimeError("DsAddEntry failed")
+ if ctr.extended_err[0] != werror.WERR_SUCCESS:
+ print("DsAddEntry failed with status %s info %s" % (ctr.extended_err))
+ raise RuntimeError("DsAddEntry failed")
+ if level == 3:
+ if ctr.err_ver != 1:
+ raise RuntimeError("expected err_ver 1, got %u" % ctr.err_ver)
+ if ctr.err_data.status[0] != werror.WERR_SUCCESS:
+ if ctr.err_data.info is None:
+ print("DsAddEntry failed with status %s, info omitted" % (ctr.err_data.status[1]))
+ else:
+ print("DsAddEntry failed with status %s info %s" % (ctr.err_data.status[1],
+ ctr.err_data.info.extended_err))
+ raise RuntimeError("DsAddEntry failed")
+ if ctr.err_data.dir_err != drsuapi.DRSUAPI_DIRERR_OK:
+ print("DsAddEntry failed with dir_err %u" % ctr.err_data.dir_err)
+ raise RuntimeError("DsAddEntry failed")
+
+ return ctr.objects
+
+ def join_ntdsdsa_obj(ctx):
+ """return the ntdsdsa object to add"""
+
+ print("Adding %s" % ctx.ntds_dn)
+
+ # When joining Windows, the order of certain attributes (mostly only
+ # msDS-HasMasterNCs and HasMasterNCs) seems to matter
+ rec = OrderedDict([
+ ("dn", ctx.ntds_dn),
+ ("objectclass", "nTDSDSA"),
+ ("systemFlags", str(samba.dsdb.SYSTEM_FLAG_DISALLOW_MOVE_ON_DELETE)),
+ ("dMDLocation", ctx.schema_dn)])
+
+ nc_list = [ctx.base_dn, ctx.config_dn, ctx.schema_dn]
+
+ if ctx.behavior_version >= samba.dsdb.DS_DOMAIN_FUNCTION_2003:
+ # This allows an override via smb.conf or --option using
+ # "ad dc functional level" to make us seem like 2016 to
+ # join such a domain for (say) a migration, or to test the
+ # partially implemented 2016 support.
+ domainControllerFunctionality = functional_level.dc_level_from_lp(ctx.lp)
+ rec["msDS-Behavior-Version"] = str(domainControllerFunctionality)
+
+ if ctx.behavior_version >= samba.dsdb.DS_DOMAIN_FUNCTION_2003:
+ rec["msDS-HasDomainNCs"] = ctx.base_dn
+
+ if ctx.RODC:
+ rec["objectCategory"] = "CN=NTDS-DSA-RO,%s" % ctx.schema_dn
+ rec["msDS-HasFullReplicaNCs"] = ctx.full_nc_list
+ rec["options"] = "37"
+ else:
+ rec["objectCategory"] = "CN=NTDS-DSA,%s" % ctx.schema_dn
+
+ # Note that Windows seems to have an undocumented requirement that
+ # the msDS-HasMasterNCs attribute occurs before HasMasterNCs
+ if ctx.behavior_version >= samba.dsdb.DS_DOMAIN_FUNCTION_2003:
+ rec["msDS-HasMasterNCs"] = ctx.full_nc_list
+
+ rec["HasMasterNCs"] = []
+ for nc in nc_list:
+ if nc in ctx.full_nc_list:
+ rec["HasMasterNCs"].append(nc)
+
+ rec["options"] = "1"
+ rec["invocationId"] = ndr_pack(ctx.invocation_id)
+
+ return rec
+
+ def join_add_ntdsdsa(ctx):
+ """add the ntdsdsa object"""
+
+ rec = ctx.join_ntdsdsa_obj()
+ if ctx.forced_local_samdb:
+ ctx.samdb.add(rec, controls=["relax:0"])
+ elif ctx.RODC:
+ ctx.samdb.add(rec, ["rodc_join:1:1"])
+ else:
+ ctx.DsAddEntry([rec])
+
+ # find the GUID of our NTDS DN
+ res = ctx.samdb.search(base=ctx.ntds_dn, scope=ldb.SCOPE_BASE, attrs=["objectGUID"])
+ ctx.ntds_guid = misc.GUID(ctx.samdb.schema_format_value("objectGUID", res[0]["objectGUID"][0]))
+
+ def join_add_objects(ctx, specified_sid=None):
+ """add the various objects needed for the join"""
+ if ctx.acct_dn:
+ print("Adding %s" % ctx.acct_dn)
+ rec = {
+ "dn": ctx.acct_dn,
+ "objectClass": "computer",
+ "displayname": ctx.samname,
+ "samaccountname": ctx.samname,
+ "userAccountControl": str(ctx.userAccountControl | samba.dsdb.UF_ACCOUNTDISABLE),
+ "dnshostname": ctx.dnshostname}
+ if ctx.behavior_version >= samba.dsdb.DS_DOMAIN_FUNCTION_2008:
+ rec['msDS-SupportedEncryptionTypes'] = str(samba.dsdb.ENC_ALL_TYPES)
+ elif ctx.promote_existing:
+ rec['msDS-SupportedEncryptionTypes'] = []
+ if ctx.managedby:
+ rec["managedby"] = ctx.managedby
+ elif ctx.promote_existing:
+ rec["managedby"] = []
+
+ if ctx.never_reveal_sid:
+ rec["msDS-NeverRevealGroup"] = ctx.never_reveal_sid
+ elif ctx.promote_existing:
+ rec["msDS-NeverRevealGroup"] = []
+
+ if ctx.reveal_sid:
+ rec["msDS-RevealOnDemandGroup"] = ctx.reveal_sid
+ elif ctx.promote_existing:
+ rec["msDS-RevealOnDemandGroup"] = []
+
+ if specified_sid:
+ rec["objectSid"] = ndr_pack(specified_sid)
+
+ if ctx.promote_existing:
+ if ctx.promote_from_dn != ctx.acct_dn:
+ ctx.samdb.rename(ctx.promote_from_dn, ctx.acct_dn)
+ ctx.samdb.modify(ldb.Message.from_dict(ctx.samdb, rec, ldb.FLAG_MOD_REPLACE))
+ else:
+ controls = None
+ if specified_sid is not None:
+ controls = ["relax:0"]
+ ctx.samdb.add(rec, controls=controls)
+
+ if ctx.krbtgt_dn:
+ ctx.add_krbtgt_account()
+
+ if ctx.server_dn:
+ print("Adding %s" % ctx.server_dn)
+ rec = {
+ "dn": ctx.server_dn,
+ "objectclass": "server",
+ # windows uses 50000000 decimal for systemFlags. A windows hex/decimal mixup bug?
+ "systemFlags": str(samba.dsdb.SYSTEM_FLAG_CONFIG_ALLOW_RENAME |
+ samba.dsdb.SYSTEM_FLAG_CONFIG_ALLOW_LIMITED_MOVE |
+ samba.dsdb.SYSTEM_FLAG_DISALLOW_MOVE_ON_DELETE),
+ # windows seems to add the dnsHostName later
+ "dnsHostName": ctx.dnshostname}
+
+ if ctx.acct_dn:
+ rec["serverReference"] = ctx.acct_dn
+
+ ctx.samdb.add(rec)
+
+ if ctx.subdomain:
+ # the rest is done after replication
+ ctx.ntds_guid = None
+ return
+
+ if ctx.ntds_dn:
+ ctx.join_add_ntdsdsa()
+
+ # Add the Replica-Locations or RO-Replica-Locations attributes
+ # TODO Is this supposed to be for the schema partition too?
+ expr = "(&(objectClass=crossRef)(ncName=%s))" % ldb.binary_encode(ctx.domaindns_zone)
+ domain = (ctx.samdb.search(scope=ldb.SCOPE_ONELEVEL,
+ attrs=[],
+ base=ctx.samdb.get_partitions_dn(),
+ expression=expr), ctx.domaindns_zone)
+
+ expr = "(&(objectClass=crossRef)(ncName=%s))" % ldb.binary_encode(ctx.forestdns_zone)
+ forest = (ctx.samdb.search(scope=ldb.SCOPE_ONELEVEL,
+ attrs=[],
+ base=ctx.samdb.get_partitions_dn(),
+ expression=expr), ctx.forestdns_zone)
+
+ for part, zone in (domain, forest):
+ if zone not in ctx.nc_list:
+ continue
+
+ if len(part) == 1:
+ m = ldb.Message()
+ m.dn = part[0].dn
+ attr = "msDS-NC-Replica-Locations"
+ if ctx.RODC:
+ attr = "msDS-NC-RO-Replica-Locations"
+
+ m[attr] = ldb.MessageElement(ctx.ntds_dn,
+ ldb.FLAG_MOD_ADD, attr)
+ ctx.samdb.modify(m)
+
+ if ctx.connection_dn is not None:
+ print("Adding %s" % ctx.connection_dn)
+ rec = {
+ "dn": ctx.connection_dn,
+ "objectclass": "nTDSConnection",
+ "enabledconnection": "TRUE",
+ "options": "65",
+ "fromServer": ctx.dc_ntds_dn}
+ ctx.samdb.add(rec)
+
+ if ctx.acct_dn:
+ print("Adding SPNs to %s" % ctx.acct_dn)
+ m = ldb.Message()
+ m.dn = ldb.Dn(ctx.samdb, ctx.acct_dn)
+ for i in range(len(ctx.SPNs)):
+ ctx.SPNs[i] = ctx.SPNs[i].replace("$NTDSGUID", str(ctx.ntds_guid))
+ m["servicePrincipalName"] = ldb.MessageElement(ctx.SPNs,
+ ldb.FLAG_MOD_REPLACE,
+ "servicePrincipalName")
+ ctx.samdb.modify(m)
+
+ # The account password set operation should normally be done over
+ # LDAP. Windows 2000 DCs however allow this only with SSL
+ # connections which are hard to set up and otherwise refuse with
+ # ERR_UNWILLING_TO_PERFORM. In this case we fall back to libnet
+ # over SAMR.
+ print("Setting account password for %s" % ctx.samname)
+ try:
+ ctx.samdb.setpassword("(&(objectClass=user)(sAMAccountName=%s))"
+ % ldb.binary_encode(ctx.samname),
+ ctx.acct_pass,
+ force_change_at_next_login=False,
+ username=ctx.samname)
+ except ldb.LdbError as e2:
+ (num, _) = e2.args
+ if num != ldb.ERR_UNWILLING_TO_PERFORM:
+ raise
+ ctx.net.set_password(account_name=ctx.samname,
+ domain_name=ctx.domain_name,
+ newpassword=ctx.acct_pass)
+
+ res = ctx.samdb.search(base=ctx.acct_dn, scope=ldb.SCOPE_BASE,
+ attrs=["msDS-KeyVersionNumber",
+ "objectSID"])
+ if "msDS-KeyVersionNumber" in res[0]:
+ ctx.key_version_number = int(res[0]["msDS-KeyVersionNumber"][0])
+ else:
+ ctx.key_version_number = None
+
+ ctx.new_dc_account_sid = ndr_unpack(security.dom_sid,
+ res[0]["objectSid"][0])
+
+ print("Enabling account")
+ m = ldb.Message()
+ m.dn = ldb.Dn(ctx.samdb, ctx.acct_dn)
+ m["userAccountControl"] = ldb.MessageElement(str(ctx.userAccountControl),
+ ldb.FLAG_MOD_REPLACE,
+ "userAccountControl")
+ ctx.samdb.modify(m)
+
+ if ctx.dns_backend.startswith("BIND9_"):
+ ctx.dnspass = samba.generate_random_password(128, 255)
+
+ recs = ctx.samdb.parse_ldif(read_and_sub_file(setup_path("provision_dns_add_samba.ldif"),
+ {"DNSDOMAIN": ctx.dnsdomain,
+ "DOMAINDN": ctx.base_dn,
+ "HOSTNAME": ctx.myname,
+ "DNSPASS_B64": b64encode(ctx.dnspass.encode('utf-16-le')).decode('utf8'),
+ "DNSNAME": ctx.dnshostname}))
+ for changetype, msg in recs:
+ assert changetype == ldb.CHANGETYPE_NONE
+ dns_acct_dn = msg["dn"]
+ print("Adding DNS account %s with dns/ SPN" % msg["dn"])
+
+ # Remove dns password (we will set it as a modify, as we can't do clearTextPassword over LDAP)
+ del msg["clearTextPassword"]
+ # Remove isCriticalSystemObject for similar reasons, it cannot be set over LDAP
+ del msg["isCriticalSystemObject"]
+ # Disable account until password is set
+ msg["userAccountControl"] = str(samba.dsdb.UF_NORMAL_ACCOUNT |
+ samba.dsdb.UF_ACCOUNTDISABLE)
+ try:
+ ctx.samdb.add(msg)
+ except ldb.LdbError as e:
+ (num, _) = e.args
+ if num != ldb.ERR_ENTRY_ALREADY_EXISTS:
+ raise
+
+ # The account password set operation should normally be done over
+ # LDAP. Windows 2000 DCs however allow this only with SSL
+ # connections which are hard to set up and otherwise refuse with
+ # ERR_UNWILLING_TO_PERFORM. In this case we fall back to libnet
+ # over SAMR.
+ print("Setting account password for dns-%s" % ctx.myname)
+ try:
+ ctx.samdb.setpassword("(&(objectClass=user)(samAccountName=dns-%s))"
+ % ldb.binary_encode(ctx.myname),
+ ctx.dnspass,
+ force_change_at_next_login=False,
+ username=ctx.samname)
+ except ldb.LdbError as e3:
+ (num, _) = e3.args
+ if num != ldb.ERR_UNWILLING_TO_PERFORM:
+ raise
+ ctx.net.set_password(account_name="dns-%s" % ctx.myname,
+ domain_name=ctx.domain_name,
+ newpassword=ctx.dnspass)
+
+ res = ctx.samdb.search(base=dns_acct_dn, scope=ldb.SCOPE_BASE,
+ attrs=["msDS-KeyVersionNumber"])
+ if "msDS-KeyVersionNumber" in res[0]:
+ ctx.dns_key_version_number = int(res[0]["msDS-KeyVersionNumber"][0])
+ else:
+ ctx.dns_key_version_number = None
+
+ def join_add_objects2(ctx):
+ """add the various objects needed for the join, for subdomains post replication"""
+
+ print("Adding %s" % ctx.partition_dn)
+ name_map = {'SubdomainAdmins': "%s-%s" % (str(ctx.domsid), security.DOMAIN_RID_ADMINS)}
+ sd_binary = descriptor.get_paritions_crossref_subdomain_descriptor(ctx.forestsid, name_map=name_map)
+ rec = {
+ "dn": ctx.partition_dn,
+ "objectclass": "crossRef",
+ "objectCategory": "CN=Cross-Ref,%s" % ctx.schema_dn,
+ "nCName": ctx.base_dn,
+ "nETBIOSName": ctx.domain_name,
+ "dnsRoot": ctx.dnsdomain,
+ "trustParent": ctx.parent_partition_dn,
+ "systemFlags": str(samba.dsdb.SYSTEM_FLAG_CR_NTDS_NC |samba.dsdb.SYSTEM_FLAG_CR_NTDS_DOMAIN),
+ "ntSecurityDescriptor": sd_binary,
+ }
+
+ if ctx.behavior_version >= samba.dsdb.DS_DOMAIN_FUNCTION_2003:
+ rec["msDS-Behavior-Version"] = str(ctx.behavior_version)
+
+ rec2 = ctx.join_ntdsdsa_obj()
+
+ objects = ctx.DsAddEntry([rec, rec2])
+ if len(objects) != 2:
+ raise DCJoinException("Expected 2 objects from DsAddEntry")
+
+ ctx.ntds_guid = objects[1].guid
+
+ print("Replicating partition DN")
+ ctx.repl.replicate(ctx.partition_dn,
+ misc.GUID("00000000-0000-0000-0000-000000000000"),
+ ctx.ntds_guid,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ,
+ replica_flags=drsuapi.DRSUAPI_DRS_WRIT_REP)
+
+ print("Replicating NTDS DN")
+ ctx.repl.replicate(ctx.ntds_dn,
+ misc.GUID("00000000-0000-0000-0000-000000000000"),
+ ctx.ntds_guid,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ,
+ replica_flags=drsuapi.DRSUAPI_DRS_WRIT_REP)
+
+ def join_provision(ctx):
+ """Provision the local SAM."""
+
+ print("Calling bare provision")
+
+ smbconf = ctx.lp.configfile
+
+ presult = provision(ctx.logger, system_session(), smbconf=smbconf,
+ targetdir=ctx.targetdir, samdb_fill=FILL_DRS, realm=ctx.realm,
+ rootdn=ctx.root_dn, domaindn=ctx.base_dn,
+ schemadn=ctx.schema_dn, configdn=ctx.config_dn,
+ serverdn=ctx.server_dn, domain=ctx.domain_name,
+ hostname=ctx.myname, domainsid=ctx.domsid,
+ machinepass=ctx.acct_pass, serverrole="active directory domain controller",
+ sitename=ctx.site, lp=ctx.lp, ntdsguid=ctx.ntds_guid,
+ use_ntvfs=ctx.use_ntvfs, dns_backend=ctx.dns_backend,
+ plaintext_secrets=ctx.plaintext_secrets,
+ backend_store=ctx.backend_store,
+ backend_store_size=ctx.backend_store_size,
+ batch_mode=True)
+ print("Provision OK for domain DN %s" % presult.domaindn)
+ ctx.local_samdb = presult.samdb
+ ctx.lp = presult.lp
+ ctx.paths = presult.paths
+ ctx.names = presult.names
+
+ # Fix up the forestsid, it may be different if we are joining as a subdomain
+ ctx.names.forestsid = ctx.forestsid
+
+ def join_provision_own_domain(ctx):
+ """Provision the local SAM."""
+
+ # we now operate exclusively on the local database, which
+ # we need to reopen in order to get the newly created schema
+ # we set the transaction_index_cache_size to 200,000 to ensure it is
+ # not too small, if it's too small the performance of the join will
+ # be negatively impacted.
+ print("Reconnecting to local samdb")
+ ctx.samdb = SamDB(url=ctx.local_samdb.url,
+ options=[
+ "transaction_index_cache_size:200000"],
+ session_info=system_session(),
+ lp=ctx.local_samdb.lp,
+ global_schema=False)
+ ctx.samdb.set_invocation_id(str(ctx.invocation_id))
+ ctx.local_samdb = ctx.samdb
+
+ ctx.logger.info("Finding domain GUID from ncName")
+ res = ctx.local_samdb.search(base=ctx.partition_dn, scope=ldb.SCOPE_BASE, attrs=['ncName'],
+ controls=["extended_dn:1:1", "reveal_internals:0"])
+
+ if 'nCName' not in res[0]:
+ raise DCJoinException("Can't find naming context on partition DN %s in %s" % (ctx.partition_dn, ctx.samdb.url))
+
+ try:
+ ctx.names.domainguid = str(misc.GUID(ldb.Dn(ctx.samdb, res[0]['ncName'][0].decode('utf8')).get_extended_component('GUID')))
+ except KeyError:
+ raise DCJoinException("Can't find GUID in naming master on partition DN %s" % res[0]['ncName'][0])
+
+ ctx.logger.info("Got domain GUID %s" % ctx.names.domainguid)
+
+ ctx.logger.info("Calling own domain provision")
+
+ secrets_ldb = Ldb(ctx.paths.secrets, session_info=system_session(), lp=ctx.lp)
+
+ provision_fill(ctx.local_samdb, secrets_ldb,
+ ctx.logger, ctx.names, ctx.paths,
+ dom_for_fun_level=ctx.behavior_version,
+ samdb_fill=FILL_SUBDOMAIN,
+ machinepass=ctx.acct_pass, serverrole="active directory domain controller",
+ lp=ctx.lp, hostip=ctx.names.hostip, hostip6=ctx.names.hostip6,
+ dns_backend=ctx.dns_backend, adminpass=ctx.adminpass)
+
+ if ctx.behavior_version >= samba.dsdb.DS_DOMAIN_FUNCTION_2012:
+ adprep_level = ctx.behavior_version
+
+ updates_allowed_overridden = False
+ if ctx.lp.get("dsdb:schema update allowed") is None:
+ ctx.lp.set("dsdb:schema update allowed", "yes")
+ print("Temporarily overriding 'dsdb:schema update allowed' setting")
+ updates_allowed_overridden = True
+
+ ctx.samdb.transaction_start()
+ try:
+ from samba.domain_update import DomainUpdate
+
+ domain = DomainUpdate(ctx.local_samdb, fix=True)
+ domain.check_updates_functional_level(adprep_level,
+ samba.dsdb.DS_DOMAIN_FUNCTION_2008,
+ update_revision=True)
+
+ ctx.samdb.transaction_commit()
+ except Exception as e:
+ ctx.samdb.transaction_cancel()
+ raise DCJoinException("DomainUpdate() failed: %s" % e)
+
+ if updates_allowed_overridden:
+ ctx.lp.set("dsdb:schema update allowed", "no")
+
+ print("Provision OK for domain %s" % ctx.names.dnsdomain)
+
+ def create_replicator(ctx, repl_creds, binding_options):
+ """Creates a new DRS object for managing replications"""
+ return drs_utils.drs_Replicate(
+ "ncacn_ip_tcp:%s[%s]" % (ctx.server, binding_options),
+ ctx.lp, repl_creds, ctx.local_samdb, ctx.invocation_id)
+
+ def join_replicate(ctx):
+ """Replicate the SAM."""
+
+ ctx.logger.info("Starting replication")
+
+ # A global transaction is started so that linked attributes
+ # are applied at the very end, once all partitions are
+ # replicated. This helps get all cross-partition links.
+ ctx.local_samdb.transaction_start()
+ try:
+ source_dsa_invocation_id = misc.GUID(ctx.samdb.get_invocation_id())
+ if ctx.ntds_guid is None:
+ print("Using DS_BIND_GUID_W2K3")
+ destination_dsa_guid = misc.GUID(drsuapi.DRSUAPI_DS_BIND_GUID_W2K3)
+ else:
+ destination_dsa_guid = ctx.ntds_guid
+
+ if ctx.RODC:
+ repl_creds = Credentials()
+ repl_creds.guess(ctx.lp)
+ repl_creds.set_kerberos_state(DONT_USE_KERBEROS)
+ repl_creds.set_username(ctx.samname)
+ repl_creds.set_password(ctx.acct_pass)
+ else:
+ repl_creds = ctx.creds
+
+ binding_options = "seal"
+ if ctx.lp.log_level() >= 9:
+ binding_options += ",print"
+
+ repl = ctx.create_replicator(repl_creds, binding_options)
+
+ repl.replicate(ctx.schema_dn, source_dsa_invocation_id,
+ destination_dsa_guid, schema=True, rodc=ctx.RODC,
+ replica_flags=ctx.replica_flags)
+ repl.replicate(ctx.config_dn, source_dsa_invocation_id,
+ destination_dsa_guid, rodc=ctx.RODC,
+ replica_flags=ctx.replica_flags)
+ if not ctx.subdomain:
+ # Replicate first the critical objects for the basedn
+
+ # We do this to match Windows. The default case is to
+ # do a critical objects replication, then a second
+ # with all objects.
+
+ print("Replicating critical objects from the base DN of the domain")
+ try:
+ repl.replicate(ctx.base_dn, source_dsa_invocation_id,
+ destination_dsa_guid, rodc=ctx.RODC,
+ replica_flags=ctx.domain_replica_flags | drsuapi.DRSUAPI_DRS_CRITICAL_ONLY)
+ except WERRORError as e:
+
+ if e.args[0] == werror.WERR_DS_DRA_MISSING_PARENT:
+ ctx.logger.warning("First pass of replication with "
+ "DRSUAPI_DRS_CRITICAL_ONLY "
+ "not possible due to a missing parent object. "
+ "This is typical of a Samba "
+ "4.5 or earlier server. "
+ "We will replicate all the objects instead.")
+ else:
+ raise
+
+ # Now replicate all the objects in the domain (unless
+ # we were run with --critical-only).
+ #
+ # Doing the replication of users as a second pass
+ # matches more closely the Windows behaviour, which is
+ # actually to do this on first startup.
+ #
+ # Use --critical-only if you want that (but you don't
+ # really, it is better to see any errors here).
+ if not ctx.domain_replica_flags & drsuapi.DRSUAPI_DRS_CRITICAL_ONLY:
+ try:
+ repl.replicate(ctx.base_dn, source_dsa_invocation_id,
+ destination_dsa_guid, rodc=ctx.RODC,
+ replica_flags=ctx.domain_replica_flags)
+ except WERRORError as e:
+
+ if e.args[0] == werror.WERR_DS_DRA_MISSING_PARENT and \
+ ctx.domain_replica_flags & drsuapi.DRSUAPI_DRS_CRITICAL_ONLY:
+ ctx.logger.warning("Replication with DRSUAPI_DRS_CRITICAL_ONLY "
+ "failed due to a missing parent object. "
+ "This may be a Samba 4.5 or earlier server "
+ "and is not compatible with --critical-only")
+ raise
+
+ print("Done with always replicated NC (base, config, schema)")
+
+ # At this point we should already have an entry in the ForestDNS
+ # and DomainDNS NC (those under CN=Partitions,DC=...) in order to
+ # indicate that we hold a replica for this NC.
+ for nc in (ctx.domaindns_zone, ctx.forestdns_zone):
+ if nc in ctx.nc_list:
+ print("Replicating %s" % (str(nc)))
+ repl.replicate(nc, source_dsa_invocation_id,
+ destination_dsa_guid, rodc=ctx.RODC,
+ replica_flags=ctx.replica_flags)
+
+ if ctx.RODC:
+ repl.replicate(ctx.acct_dn, source_dsa_invocation_id,
+ destination_dsa_guid,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_SECRET, rodc=True)
+ repl.replicate(ctx.new_krbtgt_dn, source_dsa_invocation_id,
+ destination_dsa_guid,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_SECRET, rodc=True)
+ elif ctx.rid_manager_dn is not None:
+ # Try and get a RID Set if we can. This is only possible against the RID Master. Warn otherwise.
+ try:
+ repl.replicate(ctx.rid_manager_dn, source_dsa_invocation_id,
+ destination_dsa_guid,
+ exop=drsuapi.DRSUAPI_EXOP_FSMO_RID_ALLOC)
+ except samba.DsExtendedError as e1:
+ (enum, estr) = e1.args
+ if enum == drsuapi.DRSUAPI_EXOP_ERR_FSMO_NOT_OWNER:
+ print("WARNING: Unable to replicate own RID Set, as server %s (the server we joined) is not the RID Master." % ctx.server)
+ print("NOTE: This is normal and expected, Samba will be able to create users after it contacts the RID Master at first startup.")
+ else:
+ raise
+
+ ctx.repl = repl
+ ctx.source_dsa_invocation_id = source_dsa_invocation_id
+ ctx.destination_dsa_guid = destination_dsa_guid
+
+ ctx.logger.info("Committing SAM database - this may take some time")
+ except:
+ ctx.local_samdb.transaction_cancel()
+ raise
+ else:
+
+ # This is a special case, we have completed a full
+ # replication so if a link comes to us that points to a
+ # deleted object, and we asked for all objects already, we
+ # just have to ignore it, the chance to re-try the
+ # replication with GET_TGT has long gone. This can happen
+ # if the object is deleted and sent to us after the link
+ # was sent, as we are processing all links in the
+ # transaction_commit().
+ if not ctx.domain_replica_flags & drsuapi.DRSUAPI_DRS_CRITICAL_ONLY:
+ ctx.local_samdb.set_opaque_integer(dsdb.DSDB_FULL_JOIN_REPLICATION_COMPLETED_OPAQUE_NAME,
+ 1)
+ ctx.local_samdb.transaction_commit()
+ ctx.local_samdb.set_opaque_integer(dsdb.DSDB_FULL_JOIN_REPLICATION_COMPLETED_OPAQUE_NAME,
+ 0)
+ ctx.logger.info("Committed SAM database")
+
+ # A large replication may have caused our LDB connection to the
+ # remote DC to timeout, so check the connection is still alive
+ ctx.refresh_ldb_connection()
+
+ def refresh_ldb_connection(ctx):
+ try:
+ # query the rootDSE to check the connection
+ ctx.samdb.search(scope=ldb.SCOPE_BASE, attrs=[])
+ except ldb.LdbError as e:
+ (enum, estr) = e.args
+
+ # if the connection was disconnected, then reconnect
+ if (enum == ldb.ERR_OPERATIONS_ERROR and
+ ('NT_STATUS_CONNECTION_DISCONNECTED' in estr or
+ 'NT_STATUS_CONNECTION_RESET' in estr)):
+ ctx.logger.warning("LDB connection disconnected. Reconnecting")
+ ctx.samdb = SamDB(url="ldap://%s" % ctx.server,
+ session_info=system_session(),
+ credentials=ctx.creds, lp=ctx.lp)
+ else:
+ raise DCJoinException(estr)
+
+ def send_DsReplicaUpdateRefs(ctx, dn):
+ r = drsuapi.DsReplicaUpdateRefsRequest1()
+ r.naming_context = drsuapi.DsReplicaObjectIdentifier()
+ r.naming_context.dn = str(dn)
+ r.naming_context.guid = misc.GUID("00000000-0000-0000-0000-000000000000")
+ r.naming_context.sid = security.dom_sid("S-0-0")
+ r.dest_dsa_guid = ctx.ntds_guid
+ r.dest_dsa_dns_name = "%s._msdcs.%s" % (str(ctx.ntds_guid), ctx.dnsforest)
+ r.options = drsuapi.DRSUAPI_DRS_ADD_REF | drsuapi.DRSUAPI_DRS_DEL_REF
+ if not ctx.RODC:
+ r.options |= drsuapi.DRSUAPI_DRS_WRIT_REP
+
+ if ctx.drsuapi is None:
+ ctx.drsuapi_connect()
+
+ ctx.drsuapi.DsReplicaUpdateRefs(ctx.drsuapi_handle, 1, r)
+
+ def join_add_dns_records(ctx):
+ """Remotely Add a DNS record to the target DC. We assume that if we
+ replicate DNS that the server holds the DNS roles and can accept
+ updates.
+
+ This avoids issues getting replication going after the DC
+ first starts as the rest of the domain does not have to
+ wait for samba_dnsupdate to run successfully.
+
+ Specifically, we add the records implied by the DsReplicaUpdateRefs
+ call above.
+
+ We do not just run samba_dnsupdate as we want to strictly
+ operate against the DC we just joined:
+ - We do not want to query another DNS server
+ - We do not want to obtain a Kerberos ticket
+ (as the KDC we select may not be the DC we just joined,
+ and so may not be in sync with the password we just set)
+ - We do not wish to set the _ldap records until we have started
+ - We do not wish to use NTLM (the --use-samba-tool mode forces
+ NTLM)
+
+ """
+
+ client_version = dnsserver.DNS_CLIENT_VERSION_LONGHORN
+ select_flags = dnsserver.DNS_RPC_VIEW_AUTHORITY_DATA |\
+ dnsserver.DNS_RPC_VIEW_NO_CHILDREN
+
+ zone = ctx.dnsdomain
+ msdcs_zone = "_msdcs.%s" % ctx.dnsforest
+ name = ctx.myname
+ msdcs_cname = str(ctx.ntds_guid)
+ cname_target = "%s.%s" % (name, zone)
+ IPs = samba.interface_ips(ctx.lp, ctx.force_all_ips)
+
+ ctx.logger.info("Adding %d remote DNS records for %s.%s" %
+ (len(IPs), name, zone))
+
+ binding_options = "sign"
+ dns_conn = dnsserver.dnsserver("ncacn_ip_tcp:%s[%s]" % (ctx.server, binding_options),
+ ctx.lp, ctx.creds)
+
+ name_found = True
+
+ sd_helper = sd_utils.SDUtils(ctx.samdb)
+
+ change_owner_sd = security.descriptor()
+ change_owner_sd.owner_sid = ctx.new_dc_account_sid
+ change_owner_sd.group_sid = security.dom_sid("%s-%d" %
+ (str(ctx.domsid),
+ security.DOMAIN_RID_DCS))
+
+ # TODO: Remove any old records from the primary DNS name
+ try:
+ (buflen, res) \
+ = dns_conn.DnssrvEnumRecords2(client_version,
+ 0,
+ ctx.server,
+ zone,
+ name,
+ None,
+ dnsp.DNS_TYPE_ALL,
+ select_flags,
+ None,
+ None)
+ except WERRORError as e:
+ if e.args[0] == werror.WERR_DNS_ERROR_NAME_DOES_NOT_EXIST:
+ name_found = False
+
+ if name_found:
+ for rec in res.rec:
+ for record in rec.records:
+ if record.wType == dnsp.DNS_TYPE_A or \
+ record.wType == dnsp.DNS_TYPE_AAAA:
+ # delete record
+ del_rec_buf = dnsserver.DNS_RPC_RECORD_BUF()
+ del_rec_buf.rec = record
+ try:
+ dns_conn.DnssrvUpdateRecord2(client_version,
+ 0,
+ ctx.server,
+ zone,
+ name,
+ None,
+ del_rec_buf)
+ except WERRORError as e:
+ if e.args[0] == werror.WERR_DNS_ERROR_NAME_DOES_NOT_EXIST:
+ pass
+ else:
+ raise
+
+ for IP in IPs:
+ if IP.find(':') != -1:
+ ctx.logger.info("Adding DNS AAAA record %s.%s for IPv6 IP: %s"
+ % (name, zone, IP))
+ rec = AAAARecord(IP)
+ else:
+ ctx.logger.info("Adding DNS A record %s.%s for IPv4 IP: %s"
+ % (name, zone, IP))
+ rec = ARecord(IP)
+
+ # Add record
+ add_rec_buf = dnsserver.DNS_RPC_RECORD_BUF()
+ add_rec_buf.rec = rec
+ dns_conn.DnssrvUpdateRecord2(client_version,
+ 0,
+ ctx.server,
+ zone,
+ name,
+ add_rec_buf,
+ None)
+
+ if (len(IPs) > 0):
+ domaindns_zone_dn = ldb.Dn(ctx.samdb, ctx.domaindns_zone)
+ (ctx.dns_a_dn, ldap_record) \
+ = ctx.samdb.dns_lookup("%s.%s" % (name, zone),
+ dns_partition=domaindns_zone_dn)
+
+ # Make the DC own the DNS record, not the administrator
+ sd_helper.modify_sd_on_dn(ctx.dns_a_dn, change_owner_sd,
+ controls=["sd_flags:1:%d"
+ % (security.SECINFO_OWNER
+ | security.SECINFO_GROUP)])
+
+ # Add record
+ ctx.logger.info("Adding DNS CNAME record %s.%s for %s"
+ % (msdcs_cname, msdcs_zone, cname_target))
+
+ add_rec_buf = dnsserver.DNS_RPC_RECORD_BUF()
+ rec = CNAMERecord(cname_target)
+ add_rec_buf.rec = rec
+ dns_conn.DnssrvUpdateRecord2(client_version,
+ 0,
+ ctx.server,
+ msdcs_zone,
+ msdcs_cname,
+ add_rec_buf,
+ None)
+
+ forestdns_zone_dn = ldb.Dn(ctx.samdb, ctx.forestdns_zone)
+ (ctx.dns_cname_dn, ldap_record) \
+ = ctx.samdb.dns_lookup("%s.%s" % (msdcs_cname, msdcs_zone),
+ dns_partition=forestdns_zone_dn)
+
+ # Make the DC own the DNS record, not the administrator
+ sd_helper.modify_sd_on_dn(ctx.dns_cname_dn, change_owner_sd,
+ controls=["sd_flags:1:%d"
+ % (security.SECINFO_OWNER
+ | security.SECINFO_GROUP)])
+
+ ctx.logger.info("All other DNS records (like _ldap SRV records) " +
+ "will be created samba_dnsupdate on first startup")
+
+ def join_replicate_new_dns_records(ctx):
+ for nc in (ctx.domaindns_zone, ctx.forestdns_zone):
+ if nc in ctx.nc_list:
+ ctx.logger.info("Replicating new DNS records in %s" % (str(nc)))
+ ctx.repl.replicate(nc, ctx.source_dsa_invocation_id,
+ ctx.ntds_guid, rodc=ctx.RODC,
+ replica_flags=ctx.replica_flags,
+ full_sync=False)
+
+ def join_finalise(ctx):
+ """Finalise the join, mark us synchronised and setup secrets db."""
+
+ # FIXME we shouldn't do this in all cases
+
+ # If for some reasons we joined in another site than the one of
+ # DC we just replicated from then we don't need to send the updatereplicateref
+ # as replication between sites is time based and on the initiative of the
+ # requesting DC
+ ctx.logger.info("Sending DsReplicaUpdateRefs for all the replicated partitions")
+ for nc in ctx.nc_list:
+ ctx.send_DsReplicaUpdateRefs(nc)
+
+ if ctx.RODC:
+ print("Setting RODC invocationId")
+ ctx.local_samdb.set_invocation_id(str(ctx.invocation_id))
+ ctx.local_samdb.set_opaque_integer("domainFunctionality",
+ ctx.behavior_version)
+ m = ldb.Message()
+ m.dn = ldb.Dn(ctx.local_samdb, "%s" % ctx.ntds_dn)
+ m["invocationId"] = ldb.MessageElement(ndr_pack(ctx.invocation_id),
+ ldb.FLAG_MOD_REPLACE,
+ "invocationId")
+ ctx.local_samdb.modify(m)
+
+ # Note: as RODC the invocationId is only stored
+ # on the RODC itself, the other DCs never see it.
+ #
+ # That's is why we fix up the replPropertyMetaData stamp
+ # for the 'invocationId' attribute, we need to change
+ # the 'version' to '0', this is what windows 2008r2 does as RODC
+ #
+ # This means if the object on a RWDC ever gets a invocationId
+ # attribute, it will have version '1' (or higher), which will
+ # will overwrite the RODC local value.
+ ctx.local_samdb.set_attribute_replmetadata_version(m.dn,
+ "invocationId",
+ 0)
+
+ ctx.logger.info("Setting isSynchronized and dsServiceName")
+ m = ldb.Message()
+ m.dn = ldb.Dn(ctx.local_samdb, '@ROOTDSE')
+ m["isSynchronized"] = ldb.MessageElement("TRUE", ldb.FLAG_MOD_REPLACE, "isSynchronized")
+
+ guid = ctx.ntds_guid
+ m["dsServiceName"] = ldb.MessageElement("<GUID=%s>" % str(guid),
+ ldb.FLAG_MOD_REPLACE, "dsServiceName")
+ ctx.local_samdb.modify(m)
+
+ if ctx.subdomain:
+ return
+
+ secrets_ldb = Ldb(ctx.paths.secrets, session_info=system_session(), lp=ctx.lp)
+
+ ctx.logger.info("Setting up secrets database")
+ secretsdb_self_join(secrets_ldb, domain=ctx.domain_name,
+ realm=ctx.realm,
+ dnsdomain=ctx.dnsdomain,
+ netbiosname=ctx.myname,
+ domainsid=ctx.domsid,
+ machinepass=ctx.acct_pass,
+ secure_channel_type=ctx.secure_channel_type,
+ key_version_number=ctx.key_version_number)
+
+ if ctx.dns_backend.startswith("BIND9_"):
+ setup_bind9_dns(ctx.local_samdb, secrets_ldb,
+ ctx.names, ctx.paths, ctx.logger,
+ dns_backend=ctx.dns_backend,
+ dnspass=ctx.dnspass, os_level=ctx.behavior_version,
+ key_version_number=ctx.dns_key_version_number)
+
+ def join_setup_trusts(ctx):
+ """provision the local SAM."""
+
+ print("Setup domain trusts with server %s" % ctx.server)
+ binding_options = "" # why doesn't signing work here? w2k8r2 claims no session key
+ lsaconn = lsa.lsarpc("ncacn_np:%s[%s]" % (ctx.server, binding_options),
+ ctx.lp, ctx.creds)
+
+ objectAttr = lsa.ObjectAttribute()
+ objectAttr.sec_qos = lsa.QosInfo()
+
+ pol_handle = lsaconn.OpenPolicy2(''.decode('utf-8'),
+ objectAttr, security.SEC_FLAG_MAXIMUM_ALLOWED)
+
+ info = lsa.TrustDomainInfoInfoEx()
+ info.domain_name.string = ctx.dnsdomain
+ info.netbios_name.string = ctx.domain_name
+ info.sid = ctx.domsid
+ info.trust_direction = lsa.LSA_TRUST_DIRECTION_INBOUND | lsa.LSA_TRUST_DIRECTION_OUTBOUND
+ info.trust_type = lsa.LSA_TRUST_TYPE_UPLEVEL
+ info.trust_attributes = lsa.LSA_TRUST_ATTRIBUTE_WITHIN_FOREST
+
+ try:
+ oldname = lsa.String()
+ oldname.string = ctx.dnsdomain
+ oldinfo = lsaconn.QueryTrustedDomainInfoByName(pol_handle, oldname,
+ lsa.LSA_TRUSTED_DOMAIN_INFO_FULL_INFO)
+ print("Removing old trust record for %s (SID %s)" % (ctx.dnsdomain, oldinfo.info_ex.sid))
+ lsaconn.DeleteTrustedDomain(pol_handle, oldinfo.info_ex.sid)
+ except RuntimeError:
+ pass
+
+ password_blob = string_to_byte_array(ctx.trustdom_pass.encode('utf-16-le'))
+
+ clear_value = drsblobs.AuthInfoClear()
+ clear_value.size = len(password_blob)
+ clear_value.password = password_blob
+
+ clear_authentication_information = drsblobs.AuthenticationInformation()
+ clear_authentication_information.LastUpdateTime = samba.unix2nttime(int(time.time()))
+ clear_authentication_information.AuthType = lsa.TRUST_AUTH_TYPE_CLEAR
+ clear_authentication_information.AuthInfo = clear_value
+
+ authentication_information_array = drsblobs.AuthenticationInformationArray()
+ authentication_information_array.count = 1
+ authentication_information_array.array = [clear_authentication_information]
+
+ outgoing = drsblobs.trustAuthInOutBlob()
+ outgoing.count = 1
+ outgoing.current = authentication_information_array
+
+ trustpass = drsblobs.trustDomainPasswords()
+ confounder = [3] * 512
+
+ for i in range(512):
+ confounder[i] = random.randint(0, 255)
+
+ trustpass.confounder = confounder
+
+ trustpass.outgoing = outgoing
+ trustpass.incoming = outgoing
+
+ trustpass_blob = ndr_pack(trustpass)
+
+ encrypted_trustpass = arcfour_encrypt(lsaconn.session_key, trustpass_blob)
+
+ auth_blob = lsa.DATA_BUF2()
+ auth_blob.size = len(encrypted_trustpass)
+ auth_blob.data = string_to_byte_array(encrypted_trustpass)
+
+ auth_info = lsa.TrustDomainInfoAuthInfoInternal()
+ auth_info.auth_blob = auth_blob
+
+ trustdom_handle = lsaconn.CreateTrustedDomainEx2(pol_handle,
+ info,
+ auth_info,
+ security.SEC_STD_DELETE)
+
+ rec = {
+ "dn": "cn=%s,cn=system,%s" % (ctx.dnsforest, ctx.base_dn),
+ "objectclass": "trustedDomain",
+ "trustType": str(info.trust_type),
+ "trustAttributes": str(info.trust_attributes),
+ "trustDirection": str(info.trust_direction),
+ "flatname": ctx.forest_domain_name,
+ "trustPartner": ctx.dnsforest,
+ "trustAuthIncoming": ndr_pack(outgoing),
+ "trustAuthOutgoing": ndr_pack(outgoing),
+ "securityIdentifier": ndr_pack(ctx.forestsid)
+ }
+ ctx.local_samdb.add(rec)
+
+ rec = {
+ "dn": "cn=%s$,cn=users,%s" % (ctx.forest_domain_name, ctx.base_dn),
+ "objectclass": "user",
+ "userAccountControl": str(samba.dsdb.UF_INTERDOMAIN_TRUST_ACCOUNT),
+ "clearTextPassword": ctx.trustdom_pass.encode('utf-16-le'),
+ "samAccountName": "%s$" % ctx.forest_domain_name
+ }
+ ctx.local_samdb.add(rec)
+
+ def build_nc_lists(ctx):
+ # nc_list is the list of naming context (NC) for which we will
+ # replicate in and send a updateRef command to the partner DC
+
+ # full_nc_list is the list of naming context (NC) we hold
+ # read/write copies of. These are not subsets of each other.
+ ctx.nc_list = [ctx.config_dn, ctx.schema_dn]
+ ctx.full_nc_list = [ctx.base_dn, ctx.config_dn, ctx.schema_dn]
+
+ if ctx.subdomain and ctx.dns_backend != "NONE":
+ ctx.full_nc_list += [ctx.domaindns_zone]
+
+ elif not ctx.subdomain:
+ ctx.nc_list += [ctx.base_dn]
+
+ if ctx.dns_backend != "NONE":
+ ctx.nc_list += [ctx.domaindns_zone]
+ ctx.nc_list += [ctx.forestdns_zone]
+ ctx.full_nc_list += [ctx.domaindns_zone]
+ ctx.full_nc_list += [ctx.forestdns_zone]
+
+ def do_join(ctx):
+ ctx.build_nc_lists()
+
+ if ctx.promote_existing:
+ ctx.promote_possible()
+ else:
+ ctx.cleanup_old_join()
+
+ try:
+ ctx.join_add_objects()
+ ctx.join_provision()
+ ctx.join_replicate()
+ if ctx.subdomain:
+ ctx.join_add_objects2()
+ ctx.join_provision_own_domain()
+ ctx.join_setup_trusts()
+
+ if ctx.dns_backend != "NONE":
+ ctx.join_add_dns_records()
+ ctx.join_replicate_new_dns_records()
+
+ ctx.join_finalise()
+ except:
+ try:
+ print("Join failed - cleaning up")
+ except IOError:
+ pass
+
+ # cleanup the failed join (checking we still have a live LDB
+ # connection to the remote DC first)
+ ctx.refresh_ldb_connection()
+ ctx.cleanup_old_join()
+ raise
+
+
+def join_RODC(logger=None, server=None, creds=None, lp=None, site=None, netbios_name=None,
+ targetdir=None, domain=None, domain_critical_only=False,
+ machinepass=None, use_ntvfs=False, dns_backend=None,
+ promote_existing=False, plaintext_secrets=False,
+ backend_store=None,
+ backend_store_size=None):
+ """Join as a RODC."""
+
+ ctx = DCJoinContext(logger, server, creds, lp, site, netbios_name,
+ targetdir, domain, machinepass, use_ntvfs, dns_backend,
+ promote_existing, plaintext_secrets,
+ backend_store=backend_store,
+ backend_store_size=backend_store_size)
+
+ lp.set("workgroup", ctx.domain_name)
+ logger.info("workgroup is %s" % ctx.domain_name)
+
+ lp.set("realm", ctx.realm)
+ logger.info("realm is %s" % ctx.realm)
+
+ ctx.krbtgt_dn = "CN=krbtgt_%s,CN=Users,%s" % (ctx.myname, ctx.base_dn)
+
+ # setup some defaults for accounts that should be replicated to this RODC
+ ctx.never_reveal_sid = [
+ "<SID=%s-%s>" % (ctx.domsid, security.DOMAIN_RID_RODC_DENY),
+ "<SID=%s>" % security.SID_BUILTIN_ADMINISTRATORS,
+ "<SID=%s>" % security.SID_BUILTIN_SERVER_OPERATORS,
+ "<SID=%s>" % security.SID_BUILTIN_BACKUP_OPERATORS,
+ "<SID=%s>" % security.SID_BUILTIN_ACCOUNT_OPERATORS]
+ ctx.reveal_sid = "<SID=%s-%s>" % (ctx.domsid, security.DOMAIN_RID_RODC_ALLOW)
+
+ mysid = ctx.get_mysid()
+ admin_dn = "<SID=%s>" % mysid
+ ctx.managedby = admin_dn
+
+ ctx.userAccountControl = (samba.dsdb.UF_WORKSTATION_TRUST_ACCOUNT |
+ samba.dsdb.UF_TRUSTED_TO_AUTHENTICATE_FOR_DELEGATION |
+ samba.dsdb.UF_PARTIAL_SECRETS_ACCOUNT)
+
+ ctx.SPNs.extend(["RestrictedKrbHost/%s" % ctx.myname,
+ "RestrictedKrbHost/%s" % ctx.dnshostname])
+
+ ctx.connection_dn = "CN=RODC Connection (FRS),%s" % ctx.ntds_dn
+ ctx.secure_channel_type = misc.SEC_CHAN_RODC
+ ctx.RODC = True
+ ctx.replica_flags |= (drsuapi.DRSUAPI_DRS_SPECIAL_SECRET_PROCESSING |
+ drsuapi.DRSUAPI_DRS_GET_ALL_GROUP_MEMBERSHIP)
+ ctx.domain_replica_flags = ctx.replica_flags
+ if domain_critical_only:
+ ctx.domain_replica_flags |= drsuapi.DRSUAPI_DRS_CRITICAL_ONLY
+
+ ctx.do_join()
+
+ logger.info("Joined domain %s (SID %s) as an RODC" % (ctx.domain_name, ctx.domsid))
+
+
+def join_DC(logger=None, server=None, creds=None, lp=None, site=None, netbios_name=None,
+ targetdir=None, domain=None, domain_critical_only=False,
+ machinepass=None, use_ntvfs=False, dns_backend=None,
+ promote_existing=False, plaintext_secrets=False,
+ backend_store=None,
+ backend_store_size=None):
+ """Join as a DC."""
+ ctx = DCJoinContext(logger, server, creds, lp, site, netbios_name,
+ targetdir, domain, machinepass, use_ntvfs, dns_backend,
+ promote_existing, plaintext_secrets,
+ backend_store=backend_store,
+ backend_store_size=backend_store_size)
+
+ lp.set("workgroup", ctx.domain_name)
+ logger.info("workgroup is %s" % ctx.domain_name)
+
+ lp.set("realm", ctx.realm)
+ logger.info("realm is %s" % ctx.realm)
+
+ ctx.userAccountControl = samba.dsdb.UF_SERVER_TRUST_ACCOUNT | samba.dsdb.UF_TRUSTED_FOR_DELEGATION
+
+ ctx.SPNs.append('E3514235-4B06-11D1-AB04-00C04FC2DCD2/$NTDSGUID/%s' % ctx.dnsdomain)
+ ctx.secure_channel_type = misc.SEC_CHAN_BDC
+
+ ctx.replica_flags |= (drsuapi.DRSUAPI_DRS_WRIT_REP |
+ drsuapi.DRSUAPI_DRS_FULL_SYNC_IN_PROGRESS)
+ ctx.domain_replica_flags = ctx.replica_flags
+ if domain_critical_only:
+ ctx.domain_replica_flags |= drsuapi.DRSUAPI_DRS_CRITICAL_ONLY
+
+ ctx.do_join()
+ logger.info("Joined domain %s (SID %s) as a DC" % (ctx.domain_name, ctx.domsid))
+
+
+def join_clone(logger=None, server=None, creds=None, lp=None,
+ targetdir=None, domain=None, include_secrets=False,
+ dns_backend="NONE", backend_store=None,
+ backend_store_size=None):
+ """Creates a local clone of a remote DC."""
+ ctx = DCCloneContext(logger, server, creds, lp, targetdir=targetdir,
+ domain=domain, dns_backend=dns_backend,
+ include_secrets=include_secrets,
+ backend_store=backend_store,
+ backend_store_size=backend_store_size)
+
+ lp.set("workgroup", ctx.domain_name)
+ logger.info("workgroup is %s" % ctx.domain_name)
+
+ lp.set("realm", ctx.realm)
+ logger.info("realm is %s" % ctx.realm)
+
+ ctx.do_join()
+ logger.info("Cloned domain %s (SID %s)" % (ctx.domain_name, ctx.domsid))
+ return ctx
+
+
+class DCCloneContext(DCJoinContext):
+ """Clones a remote DC."""
+
+ def __init__(ctx, logger=None, server=None, creds=None, lp=None,
+ targetdir=None, domain=None, dns_backend=None,
+ include_secrets=False, backend_store=None,
+ backend_store_size=None):
+ super().__init__(logger, server, creds, lp,
+ targetdir=targetdir, domain=domain,
+ dns_backend=dns_backend,
+ backend_store=backend_store,
+ backend_store_size=backend_store_size)
+
+ # As we don't want to create or delete these DNs, we set them to None
+ ctx.server_dn = None
+ ctx.ntds_dn = None
+ ctx.acct_dn = None
+ ctx.myname = ctx.server.split('.')[0]
+ ctx.ntds_guid = None
+ ctx.rid_manager_dn = None
+
+ # Save this early
+ ctx.remote_dc_ntds_guid = ctx.samdb.get_ntds_GUID()
+
+ ctx.replica_flags |= (drsuapi.DRSUAPI_DRS_WRIT_REP |
+ drsuapi.DRSUAPI_DRS_FULL_SYNC_IN_PROGRESS)
+ if not include_secrets:
+ ctx.replica_flags |= drsuapi.DRSUAPI_DRS_SPECIAL_SECRET_PROCESSING
+ ctx.domain_replica_flags = ctx.replica_flags
+
+ def join_finalise(ctx):
+ ctx.logger.info("Setting isSynchronized and dsServiceName")
+ m = ldb.Message()
+ m.dn = ldb.Dn(ctx.local_samdb, '@ROOTDSE')
+ m["isSynchronized"] = ldb.MessageElement("TRUE", ldb.FLAG_MOD_REPLACE,
+ "isSynchronized")
+
+ # We want to appear to be the server we just cloned
+ guid = ctx.remote_dc_ntds_guid
+ m["dsServiceName"] = ldb.MessageElement("<GUID=%s>" % str(guid),
+ ldb.FLAG_MOD_REPLACE,
+ "dsServiceName")
+ ctx.local_samdb.modify(m)
+
+ def do_join(ctx):
+ ctx.build_nc_lists()
+
+ # When cloning a DC, we just want to provision a DC locally, then
+ # grab the remote DC's entire DB via DRS replication
+ ctx.join_provision()
+ ctx.join_replicate()
+ ctx.join_finalise()
+
+
+# Used to create a renamed backup of a DC. Renaming the domain means that the
+# cloned/backup DC can be started without interfering with the production DC.
+class DCCloneAndRenameContext(DCCloneContext):
+ """Clones a remote DC, renaming the domain along the way."""
+
+ def __init__(ctx, new_base_dn, new_domain_name, new_realm, logger=None,
+ server=None, creds=None, lp=None, targetdir=None, domain=None,
+ dns_backend=None, include_secrets=True, backend_store=None):
+ super().__init__(logger, server, creds, lp,
+ targetdir=targetdir,
+ domain=domain,
+ dns_backend=dns_backend,
+ include_secrets=include_secrets,
+ backend_store=backend_store)
+ # store the new DN (etc) that we want the cloned DB to use
+ ctx.new_base_dn = new_base_dn
+ ctx.new_domain_name = new_domain_name
+ ctx.new_realm = new_realm
+
+ def create_replicator(ctx, repl_creds, binding_options):
+ """Creates a new DRS object for managing replications"""
+
+ # We want to rename all the domain objects, and the simplest way to do
+ # this is during replication. This is because the base DN of the top-
+ # level replicated object will flow through to all the objects below it
+ binding_str = "ncacn_ip_tcp:%s[%s]" % (ctx.server, binding_options)
+ return drs_utils.drs_ReplicateRenamer(binding_str, ctx.lp, repl_creds,
+ ctx.local_samdb,
+ ctx.invocation_id,
+ ctx.base_dn, ctx.new_base_dn)
+
+ def create_non_global_lp(ctx, global_lp):
+ """Creates a non-global LoadParm based on the global LP's settings"""
+
+ # the samba code shares a global LoadParm by default. Here we create a
+ # new LoadParm that retains the global settings, but any changes we
+ # make to it won't automatically affect the rest of the samba code.
+ # The easiest way to do this is to dump the global settings to a
+ # temporary smb.conf file, and then load the temp file into a new
+ # non-global LoadParm
+ fd, tmp_file = tempfile.mkstemp()
+ global_lp.dump(False, tmp_file)
+ local_lp = samba.param.LoadParm(filename_for_non_global_lp=tmp_file)
+ os.remove(tmp_file)
+ return local_lp
+
+ def rename_dn(ctx, dn_str):
+ """Uses string substitution to replace the base DN"""
+ old_base_dn = ctx.base_dn
+ return re.sub('%s$' % old_base_dn, ctx.new_base_dn, dn_str)
+
+ # we want to override the normal DCCloneContext's join_provision() so that
+ # use the new domain DNs during the provision. We do this because:
+ # - it sets up smb.conf/secrets.ldb with the new realm/workgroup values
+ # - it sets up a default SAM DB that uses the new Schema DNs (without which
+ # we couldn't apply the renamed DRS objects during replication)
+ def join_provision(ctx):
+ """Provision the local (renamed) SAM."""
+
+ print("Provisioning the new (renamed) domain...")
+
+ # the provision() calls make_smbconf() which uses lp.dump()/lp.load()
+ # to create a new smb.conf. By default, it uses the global LoadParm to
+ # do this, and so it would overwrite the realm/domain values globally.
+ # We still need the global LoadParm to retain the old domain's details,
+ # so we can connect to (and clone) the existing DC.
+ # So, copy the global settings into a non-global LoadParm, which we can
+ # then pass into provision(). This generates a new smb.conf correctly,
+ # without overwriting the global realm/domain values just yet.
+ non_global_lp = ctx.create_non_global_lp(ctx.lp)
+
+ # do the provision with the new/renamed domain DN values
+ presult = provision(ctx.logger, system_session(),
+ targetdir=ctx.targetdir, samdb_fill=FILL_DRS,
+ realm=ctx.new_realm, lp=non_global_lp,
+ rootdn=ctx.rename_dn(ctx.root_dn), domaindn=ctx.new_base_dn,
+ schemadn=ctx.rename_dn(ctx.schema_dn),
+ configdn=ctx.rename_dn(ctx.config_dn),
+ domain=ctx.new_domain_name, domainsid=ctx.domsid,
+ serverrole="active directory domain controller",
+ dns_backend=ctx.dns_backend,
+ backend_store=ctx.backend_store)
+
+ print("Provision OK for renamed domain DN %s" % presult.domaindn)
+ ctx.local_samdb = presult.samdb
+ ctx.paths = presult.paths
diff --git a/python/samba/kcc/__init__.py b/python/samba/kcc/__init__.py
new file mode 100644
index 0000000..22590d0
--- /dev/null
+++ b/python/samba/kcc/__init__.py
@@ -0,0 +1,2754 @@
+# define the KCC object
+#
+# Copyright (C) Dave Craft 2011
+# Copyright (C) Andrew Bartlett 2015
+#
+# Andrew Bartlett's alleged work performed by his underlings Douglas
+# Bagnall and Garming Sam.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import random
+import uuid
+from functools import cmp_to_key
+import itertools
+from samba import unix2nttime, nttime2unix
+from samba import ldb, dsdb, drs_utils
+from samba.auth import system_session
+from samba.samdb import SamDB
+from samba.dcerpc import drsuapi, misc
+
+from samba.kcc.kcc_utils import Site, Partition, Transport, SiteLink
+from samba.kcc.kcc_utils import NCReplica, NCType, nctype_lut, GraphNode
+from samba.kcc.kcc_utils import RepsFromTo, KCCError, KCCFailedObject
+from samba.kcc.graph import convert_schedule_to_repltimes
+
+from samba.ndr import ndr_pack
+
+from samba.kcc.graph_utils import verify_and_dot
+
+from samba.kcc import ldif_import_export
+from samba.kcc.graph import setup_graph, get_spanning_tree_edges
+from samba.kcc.graph import Vertex
+
+from samba.kcc.debug import DEBUG, DEBUG_FN, logger
+from samba.kcc import debug
+from samba.common import cmp
+
+
+def sort_dsa_by_gc_and_guid(dsa1, dsa2):
+ """Helper to sort DSAs by guid global catalog status
+
+ GC DSAs come before non-GC DSAs, other than that, the guids are
+ sorted in NDR form.
+
+ :param dsa1: A DSA object
+ :param dsa2: Another DSA
+ :return: -1, 0, or 1, indicating sort order.
+ """
+ if dsa1.is_gc() and not dsa2.is_gc():
+ return -1
+ if not dsa1.is_gc() and dsa2.is_gc():
+ return +1
+ return cmp(ndr_pack(dsa1.dsa_guid), ndr_pack(dsa2.dsa_guid))
+
+
+def is_smtp_replication_available():
+ """Can the KCC use SMTP replication?
+
+ Currently always returns false because Samba doesn't implement
+ SMTP transfer for NC changes between DCs.
+
+ :return: Boolean (always False)
+ """
+ return False
+
+
+class KCC(object):
+ """The Knowledge Consistency Checker class.
+
+ A container for objects and methods allowing a run of the KCC. Produces a
+ set of connections in the samdb for which the Distributed Replication
+ Service can then utilize to replicate naming contexts
+
+ :param unix_now: The putative current time in seconds since 1970.
+ :param readonly: Don't write to the database.
+ :param verify: Check topological invariants for the generated graphs
+ :param debug: Write verbosely to stderr.
+ :param dot_file_dir: write diagnostic Graphviz files in this directory
+ """
+ def __init__(self, unix_now, readonly=False, verify=False, debug=False,
+ dot_file_dir=None):
+ """Initializes the partitions class which can hold
+ our local DCs partitions or all the partitions in
+ the forest
+ """
+ self.part_table = {} # partition objects
+ self.site_table = {}
+ self.ip_transport = None
+ self.sitelink_table = {}
+ self.dsa_by_dnstr = {}
+ self.dsa_by_guid = {}
+
+ self.get_dsa_by_guidstr = self.dsa_by_guid.get
+ self.get_dsa = self.dsa_by_dnstr.get
+
+ # TODO: These should be backed by a 'permanent' store so that when
+ # calling DRSGetReplInfo with DS_REPL_INFO_KCC_DSA_CONNECT_FAILURES,
+ # the failure information can be returned
+ self.kcc_failed_links = {}
+ self.kcc_failed_connections = set()
+
+ # Used in inter-site topology computation. A list
+ # of connections (by NTDSConnection object) that are
+ # to be kept when pruning un-needed NTDS Connections
+ self.kept_connections = set()
+
+ self.my_dsa_dnstr = None # My dsa DN
+ self.my_dsa = None # My dsa object
+
+ self.my_site_dnstr = None
+ self.my_site = None
+
+ self.samdb = None
+
+ self.unix_now = unix_now
+ self.nt_now = unix2nttime(unix_now)
+ self.readonly = readonly
+ self.verify = verify
+ self.debug = debug
+ self.dot_file_dir = dot_file_dir
+
+ def load_ip_transport(self):
+ """Loads the inter-site transport objects for Sites
+
+ :return: None
+ :raise KCCError: if no IP transport is found
+ """
+ try:
+ res = self.samdb.search("CN=Inter-Site Transports,CN=Sites,%s" %
+ self.samdb.get_config_basedn(),
+ scope=ldb.SCOPE_SUBTREE,
+ expression="(objectClass=interSiteTransport)")
+ except ldb.LdbError as e2:
+ (enum, estr) = e2.args
+ raise KCCError("Unable to find inter-site transports - (%s)" %
+ estr)
+
+ for msg in res:
+ dnstr = str(msg.dn)
+
+ transport = Transport(dnstr)
+
+ transport.load_transport(self.samdb)
+ if transport.name == 'IP':
+ self.ip_transport = transport
+ elif transport.name == 'SMTP':
+ logger.debug("Samba KCC is ignoring the obsolete "
+ "SMTP transport.")
+
+ else:
+ logger.warning("Samba KCC does not support the transport "
+ "called %r." % (transport.name,))
+
+ if self.ip_transport is None:
+ raise KCCError("there doesn't seem to be an IP transport")
+
+ def load_all_sitelinks(self):
+ """Loads the inter-site siteLink objects
+
+ :return: None
+ :raise KCCError: if site-links aren't found
+ """
+ try:
+ res = self.samdb.search("CN=Inter-Site Transports,CN=Sites,%s" %
+ self.samdb.get_config_basedn(),
+ scope=ldb.SCOPE_SUBTREE,
+ expression="(objectClass=siteLink)")
+ except ldb.LdbError as e3:
+ (enum, estr) = e3.args
+ raise KCCError("Unable to find inter-site siteLinks - (%s)" % estr)
+
+ for msg in res:
+ dnstr = str(msg.dn)
+
+ # already loaded
+ if dnstr in self.sitelink_table:
+ continue
+
+ sitelink = SiteLink(dnstr)
+
+ sitelink.load_sitelink(self.samdb)
+
+ # Assign this siteLink to table
+ # and index by dn
+ self.sitelink_table[dnstr] = sitelink
+
+ def load_site(self, dn_str):
+ """Helper for load_my_site and load_all_sites.
+
+ Put all the site's DSAs into the KCC indices.
+
+ :param dn_str: a site dn_str
+ :return: the Site object pertaining to the dn_str
+ """
+ site = Site(dn_str, self.unix_now)
+ site.load_site(self.samdb)
+
+ # We avoid replacing the site with an identical copy in case
+ # somewhere else has a reference to the old one, which would
+ # lead to all manner of confusion and chaos.
+ guid = str(site.site_guid)
+ if guid not in self.site_table:
+ self.site_table[guid] = site
+ self.dsa_by_dnstr.update(site.dsa_table)
+ self.dsa_by_guid.update((str(x.dsa_guid), x)
+ for x in site.dsa_table.values())
+
+ return self.site_table[guid]
+
+ def load_my_site(self):
+ """Load the Site object for the local DSA.
+
+ :return: None
+ """
+ self.my_site_dnstr = ("CN=%s,CN=Sites,%s" % (
+ self.samdb.server_site_name(),
+ self.samdb.get_config_basedn()))
+
+ self.my_site = self.load_site(self.my_site_dnstr)
+
+ def load_all_sites(self):
+ """Discover all sites and create Site objects.
+
+ :return: None
+ :raise: KCCError if sites can't be found
+ """
+ try:
+ res = self.samdb.search("CN=Sites,%s" %
+ self.samdb.get_config_basedn(),
+ scope=ldb.SCOPE_SUBTREE,
+ expression="(objectClass=site)")
+ except ldb.LdbError as e4:
+ (enum, estr) = e4.args
+ raise KCCError("Unable to find sites - (%s)" % estr)
+
+ for msg in res:
+ sitestr = str(msg.dn)
+ self.load_site(sitestr)
+
+ def load_my_dsa(self):
+ """Discover my nTDSDSA dn thru the rootDSE entry
+
+ :return: None
+ :raise: KCCError if DSA can't be found
+ """
+ dn_query = "<GUID=%s>" % self.samdb.get_ntds_GUID()
+ dn = ldb.Dn(self.samdb, dn_query)
+ try:
+ res = self.samdb.search(base=dn, scope=ldb.SCOPE_BASE,
+ attrs=["objectGUID"])
+ except ldb.LdbError as e5:
+ (enum, estr) = e5.args
+ DEBUG_FN("Search for dn '%s' [from %s] failed: %s. "
+ "This typically happens in --importldif mode due "
+ "to lack of module support." % (dn, dn_query, estr))
+ try:
+ # We work around the failure above by looking at the
+ # dsServiceName that was put in the fake rootdse by
+ # the --exportldif, rather than the
+ # samdb.get_ntds_GUID(). The disadvantage is that this
+ # mode requires we modify the @ROOTDSE dnq to support
+ # --forced-local-dsa
+ service_name_res = self.samdb.search(base="",
+ scope=ldb.SCOPE_BASE,
+ attrs=["dsServiceName"])
+ dn = ldb.Dn(self.samdb,
+ service_name_res[0]["dsServiceName"][0].decode('utf8'))
+
+ res = self.samdb.search(base=dn, scope=ldb.SCOPE_BASE,
+ attrs=["objectGUID"])
+ except ldb.LdbError as e:
+ (enum, estr) = e.args
+ raise KCCError("Unable to find my nTDSDSA - (%s)" % estr)
+
+ if len(res) != 1:
+ raise KCCError("Unable to find my nTDSDSA at %s" %
+ dn.extended_str())
+
+ ntds_guid = misc.GUID(self.samdb.get_ntds_GUID())
+ if misc.GUID(res[0]["objectGUID"][0]) != ntds_guid:
+ raise KCCError("Did not find the GUID we expected,"
+ " perhaps due to --importldif")
+
+ self.my_dsa_dnstr = str(res[0].dn)
+
+ self.my_dsa = self.my_site.get_dsa(self.my_dsa_dnstr)
+
+ if self.my_dsa_dnstr not in self.dsa_by_dnstr:
+ debug.DEBUG_DARK_YELLOW("my_dsa %s isn't in self.dsas_by_dnstr:"
+ " it must be RODC.\n"
+ "Let's add it, because my_dsa is special!"
+ "\n(likewise for self.dsa_by_guid)" %
+ self.my_dsa_dnstr)
+
+ self.dsa_by_dnstr[self.my_dsa_dnstr] = self.my_dsa
+ self.dsa_by_guid[str(self.my_dsa.dsa_guid)] = self.my_dsa
+
+ def load_all_partitions(self):
+ """Discover and load all partitions.
+
+ Each NC is inserted into the part_table by partition
+ dn string (not the nCName dn string)
+
+ :return: None
+ :raise: KCCError if partitions can't be found
+ """
+ try:
+ res = self.samdb.search("CN=Partitions,%s" %
+ self.samdb.get_config_basedn(),
+ scope=ldb.SCOPE_SUBTREE,
+ expression="(objectClass=crossRef)")
+ except ldb.LdbError as e6:
+ (enum, estr) = e6.args
+ raise KCCError("Unable to find partitions - (%s)" % estr)
+
+ for msg in res:
+ partstr = str(msg.dn)
+
+ # already loaded
+ if partstr in self.part_table:
+ continue
+
+ part = Partition(partstr)
+
+ part.load_partition(self.samdb)
+ self.part_table[partstr] = part
+
+ def refresh_failed_links_connections(self, ping=None):
+ """Ensure the failed links list is up to date
+
+ Based on MS-ADTS 6.2.2.1
+
+ :param ping: An oracle function of remote site availability
+ :return: None
+ """
+ # LINKS: Refresh failed links
+ self.kcc_failed_links = {}
+ current, needed = self.my_dsa.get_rep_tables()
+ for replica in current.values():
+ # For every possible connection to replicate
+ for reps_from in replica.rep_repsFrom:
+ failure_count = reps_from.consecutive_sync_failures
+ if failure_count <= 0:
+ continue
+
+ dsa_guid = str(reps_from.source_dsa_obj_guid)
+ time_first_failure = reps_from.last_success
+ last_result = reps_from.last_attempt
+ dns_name = reps_from.dns_name1
+
+ f = self.kcc_failed_links.get(dsa_guid)
+ if f is None:
+ f = KCCFailedObject(dsa_guid, failure_count,
+ time_first_failure, last_result,
+ dns_name)
+ self.kcc_failed_links[dsa_guid] = f
+ else:
+ f.failure_count = max(f.failure_count, failure_count)
+ f.time_first_failure = min(f.time_first_failure,
+ time_first_failure)
+ f.last_result = last_result
+
+ # CONNECTIONS: Refresh failed connections
+ restore_connections = set()
+ if ping is not None:
+ DEBUG("refresh_failed_links: checking if links are still down")
+ for connection in self.kcc_failed_connections:
+ if ping(connection.dns_name):
+ # Failed connection is no longer failing
+ restore_connections.add(connection)
+ else:
+ connection.failure_count += 1
+ else:
+ DEBUG("refresh_failed_links: not checking live links because we\n"
+ "weren't asked to --attempt-live-connections")
+
+ # Remove the restored connections from the failed connections
+ self.kcc_failed_connections.difference_update(restore_connections)
+
+ def is_stale_link_connection(self, target_dsa):
+ """Check whether a link to a remote DSA is stale
+
+ Used in MS-ADTS 6.2.2.2 Intrasite Connection Creation
+
+ Returns True if the remote seems to have been down for at
+ least two hours, otherwise False.
+
+ :param target_dsa: the remote DSA object
+ :return: True if link is stale, otherwise False
+ """
+ failed_link = self.kcc_failed_links.get(str(target_dsa.dsa_guid))
+ if failed_link:
+ # failure_count should be > 0, but check anyways
+ if failed_link.failure_count > 0:
+ unix_first_failure = \
+ nttime2unix(failed_link.time_first_failure)
+ # TODO guard against future
+ if unix_first_failure > self.unix_now:
+ logger.error("The last success time attribute for "
+ "repsFrom is in the future!")
+
+ # Perform calculation in seconds
+ if (self.unix_now - unix_first_failure) > 60 * 60 * 2:
+ return True
+
+ # TODO connections.
+ # We have checked failed *links*, but we also need to check
+ # *connections*
+
+ return False
+
+ # TODO: This should be backed by some form of local database
+ def remove_unneeded_failed_links_connections(self):
+ # Remove all tuples in kcc_failed_links where failure count = 0
+ # In this implementation, this should never happen.
+
+ # Remove all connections which were not used this run or connections
+ # that became active during this run.
+ pass
+
+ def _ensure_connections_are_loaded(self, connections):
+ """Load or fake-load NTDSConnections lacking GUIDs
+
+ New connections don't have GUIDs and created times which are
+ needed for sorting. If we're in read-only mode, we make fake
+ GUIDs, otherwise we ask SamDB to do it for us.
+
+ :param connections: an iterable of NTDSConnection objects.
+ :return: None
+ """
+ for cn_conn in connections:
+ if cn_conn.guid is None:
+ if self.readonly:
+ cn_conn.guid = misc.GUID(str(uuid.uuid4()))
+ cn_conn.whenCreated = self.nt_now
+ else:
+ cn_conn.load_connection(self.samdb)
+
+ def _mark_broken_ntdsconn(self):
+ """Find NTDS Connections that lack a remote
+
+ I'm not sure how they appear. Let's be rid of them by marking
+ them with the to_be_deleted attribute.
+
+ :return: None
+ """
+ for cn_conn in self.my_dsa.connect_table.values():
+ s_dnstr = cn_conn.get_from_dnstr()
+ if s_dnstr is None:
+ DEBUG_FN("%s has phantom connection %s" % (self.my_dsa,
+ cn_conn))
+ cn_conn.to_be_deleted = True
+
+ def _mark_unneeded_local_ntdsconn(self):
+ """Find unneeded intrasite NTDS Connections for removal
+
+ Based on MS-ADTS 6.2.2.4 Removing Unnecessary Connections.
+ Every DC removes its own unnecessary intrasite connections.
+ This function tags them with the to_be_deleted attribute.
+
+ :return: None
+ """
+ # XXX should an RODC be regarded as same site? It isn't part
+ # of the intrasite ring.
+
+ if self.my_site.is_cleanup_ntdsconn_disabled():
+ DEBUG_FN("not doing ntdsconn cleanup for site %s, "
+ "because it is disabled" % self.my_site)
+ return
+
+ mydsa = self.my_dsa
+
+ try:
+ self._ensure_connections_are_loaded(mydsa.connect_table.values())
+ except KCCError:
+ # RODC never actually added any connections to begin with
+ if mydsa.is_ro():
+ return
+
+ local_connections = []
+
+ for cn_conn in mydsa.connect_table.values():
+ s_dnstr = cn_conn.get_from_dnstr()
+ if s_dnstr in self.my_site.dsa_table:
+ removable = not (cn_conn.is_generated() or
+ cn_conn.is_rodc_topology())
+ packed_guid = ndr_pack(cn_conn.guid)
+ local_connections.append((cn_conn, s_dnstr,
+ packed_guid, removable))
+
+ # Avoid "ValueError: r cannot be bigger than the iterable" in
+ # for a, b in itertools.permutations(local_connections, 2):
+ if (len(local_connections) < 2):
+ return
+
+ for a, b in itertools.permutations(local_connections, 2):
+ cn_conn, s_dnstr, packed_guid, removable = a
+ cn_conn2, s_dnstr2, packed_guid2, removable2 = b
+ if (removable and
+ s_dnstr == s_dnstr2 and
+ cn_conn.whenCreated < cn_conn2.whenCreated or
+ (cn_conn.whenCreated == cn_conn2.whenCreated and
+ packed_guid < packed_guid2)):
+ cn_conn.to_be_deleted = True
+
+ def _mark_unneeded_intersite_ntdsconn(self):
+ """find unneeded intersite NTDS Connections for removal
+
+ Based on MS-ADTS 6.2.2.4 Removing Unnecessary Connections. The
+ intersite topology generator removes links for all DCs in its
+ site. Here we just tag them with the to_be_deleted attribute.
+
+ :return: None
+ """
+ # TODO Figure out how best to handle the RODC case
+ # The RODC is ISTG, but shouldn't act on anyone's behalf.
+ if self.my_dsa.is_ro():
+ return
+
+ # Find the intersite connections
+ local_dsas = self.my_site.dsa_table
+ connections_and_dsas = []
+ for dsa in local_dsas.values():
+ for cn in dsa.connect_table.values():
+ if cn.to_be_deleted:
+ continue
+ s_dnstr = cn.get_from_dnstr()
+ if s_dnstr is None:
+ continue
+ if s_dnstr not in local_dsas:
+ from_dsa = self.get_dsa(s_dnstr)
+ # Samba ONLY: ISTG removes connections to dead DCs
+ if from_dsa is None or '\\0ADEL' in s_dnstr:
+ logger.info("DSA appears deleted, removing connection %s"
+ % s_dnstr)
+ cn.to_be_deleted = True
+ continue
+ connections_and_dsas.append((cn, dsa, from_dsa))
+
+ self._ensure_connections_are_loaded(x[0] for x in connections_and_dsas)
+ for cn, to_dsa, from_dsa in connections_and_dsas:
+ if not cn.is_generated() or cn.is_rodc_topology():
+ continue
+
+ # If the connection is in the kept_connections list, we
+ # only remove it if an endpoint seems down.
+ if (cn in self.kept_connections and
+ not (self.is_bridgehead_failed(to_dsa, True) or
+ self.is_bridgehead_failed(from_dsa, True))):
+ continue
+
+ # this one is broken and might be superseded by another.
+ # But which other? Let's just say another link to the same
+ # site can supersede.
+ from_dnstr = from_dsa.dsa_dnstr
+ for site in self.site_table.values():
+ if from_dnstr in site.rw_dsa_table:
+ for cn2, to_dsa2, from_dsa2 in connections_and_dsas:
+ if (cn is not cn2 and
+ from_dsa2 in site.rw_dsa_table):
+ cn.to_be_deleted = True
+
+ def _commit_changes(self, dsa):
+ if dsa.is_ro() or self.readonly:
+ for connect in dsa.connect_table.values():
+ if connect.to_be_deleted:
+ logger.info("TO BE DELETED:\n%s" % connect)
+ if connect.to_be_added:
+ logger.info("TO BE ADDED:\n%s" % connect)
+ if connect.to_be_modified:
+ logger.info("TO BE MODIFIED:\n%s" % connect)
+
+ # Perform deletion from our tables but perform
+ # no database modification
+ dsa.commit_connections(self.samdb, ro=True)
+ else:
+ # Commit any modified connections
+ dsa.commit_connections(self.samdb)
+
+ def remove_unneeded_ntdsconn(self, all_connected):
+ """Remove unneeded NTDS Connections once topology is calculated
+
+ Based on MS-ADTS 6.2.2.4 Removing Unnecessary Connections
+
+ :param all_connected: indicates whether all sites are connected
+ :return: None
+ """
+ self._mark_broken_ntdsconn()
+ self._mark_unneeded_local_ntdsconn()
+ # if we are not the istg, we're done!
+ # if we are the istg, but all_connected is False, we also do nothing.
+ if self.my_dsa.is_istg() and all_connected:
+ self._mark_unneeded_intersite_ntdsconn()
+
+ for dsa in self.my_site.dsa_table.values():
+ self._commit_changes(dsa)
+
+ def modify_repsFrom(self, n_rep, t_repsFrom, s_rep, s_dsa, cn_conn):
+ """Update an repsFrom object if required.
+
+ Part of MS-ADTS 6.2.2.5.
+
+ Update t_repsFrom if necessary to satisfy requirements. Such
+ updates are typically required when the IDL_DRSGetNCChanges
+ server has moved from one site to another--for example, to
+ enable compression when the server is moved from the
+ client's site to another site.
+
+ The repsFrom.update_flags bit field may be modified
+ auto-magically if any changes are made here. See
+ kcc_utils.RepsFromTo for gory details.
+
+
+ :param n_rep: NC replica we need
+ :param t_repsFrom: repsFrom tuple to modify
+ :param s_rep: NC replica at source DSA
+ :param s_dsa: source DSA
+ :param cn_conn: Local DSA NTDSConnection child
+
+ :return: None
+ """
+ s_dnstr = s_dsa.dsa_dnstr
+ same_site = s_dnstr in self.my_site.dsa_table
+
+ # if schedule doesn't match then update and modify
+ times = convert_schedule_to_repltimes(cn_conn.schedule)
+ if times != t_repsFrom.schedule:
+ t_repsFrom.schedule = times
+
+ # Bit DRS_ADD_REF is set in replicaFlags unconditionally
+ # Samba ONLY:
+ if ((t_repsFrom.replica_flags &
+ drsuapi.DRSUAPI_DRS_ADD_REF) == 0x0):
+ t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_ADD_REF
+
+ # Bit DRS_PER_SYNC is set in replicaFlags if and only
+ # if nTDSConnection schedule has a value v that specifies
+ # scheduled replication is to be performed at least once
+ # per week.
+ if cn_conn.is_schedule_minimum_once_per_week():
+
+ if ((t_repsFrom.replica_flags &
+ drsuapi.DRSUAPI_DRS_PER_SYNC) == 0x0):
+ t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_PER_SYNC
+
+ # Bit DRS_INIT_SYNC is set in t.replicaFlags if and only
+ # if the source DSA and the local DC's nTDSDSA object are
+ # in the same site or source dsa is the FSMO role owner
+ # of one or more FSMO roles in the NC replica.
+ if same_site or n_rep.is_fsmo_role_owner(s_dnstr):
+
+ if ((t_repsFrom.replica_flags &
+ drsuapi.DRSUAPI_DRS_INIT_SYNC) == 0x0):
+ t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_INIT_SYNC
+
+ # If bit NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT is set in
+ # cn!options, bit DRS_NEVER_NOTIFY is set in t.replicaFlags
+ # if and only if bit NTDSCONN_OPT_USE_NOTIFY is clear in
+ # cn!options. Otherwise, bit DRS_NEVER_NOTIFY is set in
+ # t.replicaFlags if and only if s and the local DC's
+ # nTDSDSA object are in different sites.
+ if ((cn_conn.options &
+ dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT) != 0x0):
+
+ if (cn_conn.options & dsdb.NTDSCONN_OPT_USE_NOTIFY) == 0x0:
+ # WARNING
+ #
+ # it LOOKS as if this next test is a bit silly: it
+ # checks the flag then sets it if it not set; the same
+ # effect could be achieved by unconditionally setting
+ # it. But in fact the repsFrom object has special
+ # magic attached to it, and altering replica_flags has
+ # side-effects. That is bad in my opinion, but there
+ # you go.
+ if ((t_repsFrom.replica_flags &
+ drsuapi.DRSUAPI_DRS_NEVER_NOTIFY) == 0x0):
+ t_repsFrom.replica_flags |= \
+ drsuapi.DRSUAPI_DRS_NEVER_NOTIFY
+
+ elif not same_site:
+
+ if ((t_repsFrom.replica_flags &
+ drsuapi.DRSUAPI_DRS_NEVER_NOTIFY) == 0x0):
+ t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_NEVER_NOTIFY
+
+ # Bit DRS_USE_COMPRESSION is set in t.replicaFlags if
+ # and only if s and the local DC's nTDSDSA object are
+ # not in the same site and the
+ # NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION bit is
+ # clear in cn!options
+ if (not same_site and
+ (cn_conn.options &
+ dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION) == 0x0):
+
+ if ((t_repsFrom.replica_flags &
+ drsuapi.DRSUAPI_DRS_USE_COMPRESSION) == 0x0):
+ t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_USE_COMPRESSION
+
+ # Bit DRS_TWOWAY_SYNC is set in t.replicaFlags if and only
+ # if bit NTDSCONN_OPT_TWOWAY_SYNC is set in cn!options.
+ if (cn_conn.options & dsdb.NTDSCONN_OPT_TWOWAY_SYNC) != 0x0:
+
+ if ((t_repsFrom.replica_flags &
+ drsuapi.DRSUAPI_DRS_TWOWAY_SYNC) == 0x0):
+ t_repsFrom.replica_flags |= drsuapi.DRSUAPI_DRS_TWOWAY_SYNC
+
+ # Bits DRS_DISABLE_AUTO_SYNC and DRS_DISABLE_PERIODIC_SYNC are
+ # set in t.replicaFlags if and only if cn!enabledConnection = false.
+ if not cn_conn.is_enabled():
+
+ if ((t_repsFrom.replica_flags &
+ drsuapi.DRSUAPI_DRS_DISABLE_AUTO_SYNC) == 0x0):
+ t_repsFrom.replica_flags |= \
+ drsuapi.DRSUAPI_DRS_DISABLE_AUTO_SYNC
+
+ if ((t_repsFrom.replica_flags &
+ drsuapi.DRSUAPI_DRS_DISABLE_PERIODIC_SYNC) == 0x0):
+ t_repsFrom.replica_flags |= \
+ drsuapi.DRSUAPI_DRS_DISABLE_PERIODIC_SYNC
+
+ # If s and the local DC's nTDSDSA object are in the same site,
+ # cn!transportType has no value, or the RDN of cn!transportType
+ # is CN=IP:
+ #
+ # Bit DRS_MAIL_REP in t.replicaFlags is clear.
+ #
+ # t.uuidTransport = NULL GUID.
+ #
+ # t.uuidDsa = The GUID-based DNS name of s.
+ #
+ # Otherwise:
+ #
+ # Bit DRS_MAIL_REP in t.replicaFlags is set.
+ #
+ # If x is the object with dsname cn!transportType,
+ # t.uuidTransport = x!objectGUID.
+ #
+ # Let a be the attribute identified by
+ # x!transportAddressAttribute. If a is
+ # the dNSHostName attribute, t.uuidDsa = the GUID-based
+ # DNS name of s. Otherwise, t.uuidDsa = (s!parent)!a.
+ #
+ # It appears that the first statement i.e.
+ #
+ # "If s and the local DC's nTDSDSA object are in the same
+ # site, cn!transportType has no value, or the RDN of
+ # cn!transportType is CN=IP:"
+ #
+ # could be a slightly tighter statement if it had an "or"
+ # between each condition. I believe this should
+ # be interpreted as:
+ #
+ # IF (same-site) OR (no-value) OR (type-ip)
+ #
+ # because IP should be the primary transport mechanism
+ # (even in inter-site) and the absence of the transportType
+ # attribute should always imply IP no matter if its multi-site
+ #
+ # NOTE MS-TECH INCORRECT:
+ #
+ # All indications point to these statements above being
+ # incorrectly stated:
+ #
+ # t.uuidDsa = The GUID-based DNS name of s.
+ #
+ # Let a be the attribute identified by
+ # x!transportAddressAttribute. If a is
+ # the dNSHostName attribute, t.uuidDsa = the GUID-based
+ # DNS name of s. Otherwise, t.uuidDsa = (s!parent)!a.
+ #
+ # because the uuidDSA is a GUID and not a GUID-base DNS
+ # name. Nor can uuidDsa hold (s!parent)!a if not
+ # dNSHostName. What should have been said is:
+ #
+ # t.naDsa = The GUID-based DNS name of s
+ #
+ # That would also be correct if transportAddressAttribute
+ # were "mailAddress" because (naDsa) can also correctly
+ # hold the SMTP ISM service address.
+ #
+ nastr = "%s._msdcs.%s" % (s_dsa.dsa_guid, self.samdb.forest_dns_name())
+
+ if ((t_repsFrom.replica_flags &
+ drsuapi.DRSUAPI_DRS_MAIL_REP) != 0x0):
+ t_repsFrom.replica_flags &= ~drsuapi.DRSUAPI_DRS_MAIL_REP
+
+ t_repsFrom.transport_guid = misc.GUID()
+
+ # See (NOTE MS-TECH INCORRECT) above
+
+ # NOTE: it looks like these conditionals are pointless,
+ # because the state will end up as `t_repsFrom.dns_name1 ==
+ # nastr` in either case, BUT the repsFrom thing is magic and
+ # assigning to it alters some flags. So we try not to update
+ # it unless necessary.
+ if t_repsFrom.dns_name1 != nastr:
+ t_repsFrom.dns_name1 = nastr
+
+ if t_repsFrom.version > 0x1 and t_repsFrom.dns_name2 != nastr:
+ t_repsFrom.dns_name2 = nastr
+
+ if t_repsFrom.is_modified():
+ DEBUG_FN("modify_repsFrom(): %s" % t_repsFrom)
+
+ def get_dsa_for_implied_replica(self, n_rep, cn_conn):
+ """If a connection imply a replica, find the relevant DSA
+
+ Given a NC replica and NTDS Connection, determine if the
+ connection implies a repsFrom tuple should be present from the
+ source DSA listed in the connection to the naming context. If
+ it should be, return the DSA; otherwise return None.
+
+ Based on part of MS-ADTS 6.2.2.5
+
+ :param n_rep: NC replica
+ :param cn_conn: NTDS Connection
+ :return: source DSA or None
+ """
+ # XXX different conditions for "implies" than MS-ADTS 6.2.2
+ # preamble.
+
+ # It boils down to: we want an enabled, non-FRS connections to
+ # a valid remote DSA with a non-RO replica corresponding to
+ # n_rep.
+
+ if not cn_conn.is_enabled() or cn_conn.is_rodc_topology():
+ return None
+
+ s_dnstr = cn_conn.get_from_dnstr()
+ s_dsa = self.get_dsa(s_dnstr)
+
+ # No DSA matching this source DN string?
+ if s_dsa is None:
+ return None
+
+ s_rep = s_dsa.get_current_replica(n_rep.nc_dnstr)
+
+ if (s_rep is not None and
+ s_rep.is_present() and
+ (not s_rep.is_ro() or n_rep.is_partial())):
+ return s_dsa
+ return None
+
+ def translate_ntdsconn(self, current_dsa=None):
+ """Adjust repsFrom to match NTDSConnections
+
+ This function adjusts values of repsFrom abstract attributes of NC
+ replicas on the local DC to match those implied by
+ nTDSConnection objects.
+
+ Based on [MS-ADTS] 6.2.2.5
+
+ :param current_dsa: optional DSA on whose behalf we are acting.
+ :return: None
+ """
+ ro = False
+ if current_dsa is None:
+ current_dsa = self.my_dsa
+
+ if current_dsa.is_ro():
+ ro = True
+
+ if current_dsa.is_translate_ntdsconn_disabled():
+ DEBUG_FN("skipping translate_ntdsconn() "
+ "because disabling flag is set")
+ return
+
+ DEBUG_FN("translate_ntdsconn(): enter")
+
+ current_rep_table, needed_rep_table = current_dsa.get_rep_tables()
+
+ # Filled in with replicas we currently have that need deleting
+ delete_reps = set()
+
+ # We're using the MS notation names here to allow
+ # correlation back to the published algorithm.
+ #
+ # n_rep - NC replica (n)
+ # t_repsFrom - tuple (t) in n!repsFrom
+ # s_dsa - Source DSA of the replica. Defined as nTDSDSA
+ # object (s) such that (s!objectGUID = t.uuidDsa)
+ # In our IDL representation of repsFrom the (uuidDsa)
+ # attribute is called (source_dsa_obj_guid)
+ # cn_conn - (cn) is nTDSConnection object and child of the local
+ # DC's nTDSDSA object and (cn!fromServer = s)
+ # s_rep - source DSA replica of n
+ #
+ # If we have the replica and its not needed
+ # then we add it to the "to be deleted" list.
+ for dnstr in current_rep_table:
+ # If we're on the RODC, hardcode the update flags
+ if ro:
+ c_rep = current_rep_table[dnstr]
+ c_rep.load_repsFrom(self.samdb)
+ for t_repsFrom in c_rep.rep_repsFrom:
+ replica_flags = (drsuapi.DRSUAPI_DRS_INIT_SYNC |
+ drsuapi.DRSUAPI_DRS_PER_SYNC |
+ drsuapi.DRSUAPI_DRS_ADD_REF |
+ drsuapi.DRSUAPI_DRS_SPECIAL_SECRET_PROCESSING |
+ drsuapi.DRSUAPI_DRS_NONGC_RO_REP)
+ if t_repsFrom.replica_flags != replica_flags:
+ t_repsFrom.replica_flags = replica_flags
+ c_rep.commit_repsFrom(self.samdb, ro=self.readonly)
+ else:
+ if dnstr not in needed_rep_table:
+ delete_reps.add(dnstr)
+
+ DEBUG_FN('current %d needed %d delete %d' % (len(current_rep_table),
+ len(needed_rep_table), len(delete_reps)))
+
+ if delete_reps:
+ # TODO Must delete repsFrom/repsTo for these replicas
+ DEBUG('deleting these reps: %s' % delete_reps)
+ for dnstr in delete_reps:
+ del current_rep_table[dnstr]
+
+ # HANDLE REPS-FROM
+ #
+ # Now perform the scan of replicas we'll need
+ # and compare any current repsFrom against the
+ # connections
+ for n_rep in needed_rep_table.values():
+
+ # load any repsFrom and fsmo roles as we'll
+ # need them during connection translation
+ n_rep.load_repsFrom(self.samdb)
+ n_rep.load_fsmo_roles(self.samdb)
+
+ # Loop thru the existing repsFrom tuples (if any)
+ # XXX This is a list and could contain duplicates
+ # (multiple load_repsFrom calls)
+ for t_repsFrom in n_rep.rep_repsFrom:
+
+ # for each tuple t in n!repsFrom, let s be the nTDSDSA
+ # object such that s!objectGUID = t.uuidDsa
+ guidstr = str(t_repsFrom.source_dsa_obj_guid)
+ s_dsa = self.get_dsa_by_guidstr(guidstr)
+
+ # Source dsa is gone from config (strange)
+ # so cleanup stale repsFrom for unlisted DSA
+ if s_dsa is None:
+ logger.warning("repsFrom source DSA guid (%s) not found" %
+ guidstr)
+ t_repsFrom.to_be_deleted = True
+ continue
+
+ # Find the connection that this repsFrom would use. If
+ # there isn't a good one (i.e. non-RODC_TOPOLOGY,
+ # meaning non-FRS), we delete the repsFrom.
+ s_dnstr = s_dsa.dsa_dnstr
+ connections = current_dsa.get_connection_by_from_dnstr(s_dnstr)
+ for cn_conn in connections:
+ if not cn_conn.is_rodc_topology():
+ break
+ else:
+ # no break means no non-rodc_topology connection exists
+ t_repsFrom.to_be_deleted = True
+ continue
+
+ # KCC removes this repsFrom tuple if any of the following
+ # is true:
+ # No NC replica of the NC "is present" on DSA that
+ # would be source of replica
+ #
+ # A writable replica of the NC "should be present" on
+ # the local DC, but a partial replica "is present" on
+ # the source DSA
+ s_rep = s_dsa.get_current_replica(n_rep.nc_dnstr)
+
+ if s_rep is None or not s_rep.is_present() or \
+ (not n_rep.is_ro() and s_rep.is_partial()):
+
+ t_repsFrom.to_be_deleted = True
+ continue
+
+ # If the KCC did not remove t from n!repsFrom, it updates t
+ self.modify_repsFrom(n_rep, t_repsFrom, s_rep, s_dsa, cn_conn)
+
+ # Loop thru connections and add implied repsFrom tuples
+ # for each NTDSConnection under our local DSA if the
+ # repsFrom is not already present
+ for cn_conn in current_dsa.connect_table.values():
+
+ s_dsa = self.get_dsa_for_implied_replica(n_rep, cn_conn)
+ if s_dsa is None:
+ continue
+
+ # Loop thru the existing repsFrom tuples (if any) and
+ # if we already have a tuple for this connection then
+ # no need to proceed to add. It will have been changed
+ # to have the correct attributes above
+ for t_repsFrom in n_rep.rep_repsFrom:
+ guidstr = str(t_repsFrom.source_dsa_obj_guid)
+ if s_dsa is self.get_dsa_by_guidstr(guidstr):
+ s_dsa = None
+ break
+
+ if s_dsa is None:
+ continue
+
+ # Create a new RepsFromTo and proceed to modify
+ # it according to specification
+ t_repsFrom = RepsFromTo(n_rep.nc_dnstr)
+
+ t_repsFrom.source_dsa_obj_guid = s_dsa.dsa_guid
+
+ s_rep = s_dsa.get_current_replica(n_rep.nc_dnstr)
+
+ self.modify_repsFrom(n_rep, t_repsFrom, s_rep, s_dsa, cn_conn)
+
+ # Add to our NC repsFrom as this is newly computed
+ if t_repsFrom.is_modified():
+ n_rep.rep_repsFrom.append(t_repsFrom)
+
+ if self.readonly or ro:
+ # Display any to be deleted or modified repsFrom
+ text = n_rep.dumpstr_to_be_deleted()
+ if text:
+ logger.info("TO BE DELETED:\n%s" % text)
+ text = n_rep.dumpstr_to_be_modified()
+ if text:
+ logger.info("TO BE MODIFIED:\n%s" % text)
+
+ # Perform deletion from our tables but perform
+ # no database modification
+ n_rep.commit_repsFrom(self.samdb, ro=True)
+ else:
+ # Commit any modified repsFrom to the NC replica
+ n_rep.commit_repsFrom(self.samdb)
+
+ # HANDLE REPS-TO:
+ #
+ # Now perform the scan of replicas we'll need
+ # and compare any current repsTo against the
+ # connections
+
+ # RODC should never push to anybody (should we check this?)
+ if ro:
+ return
+
+ for n_rep in needed_rep_table.values():
+
+ # load any repsTo and fsmo roles as we'll
+ # need them during connection translation
+ n_rep.load_repsTo(self.samdb)
+
+ # Loop thru the existing repsTo tuples (if any)
+ # XXX This is a list and could contain duplicates
+ # (multiple load_repsTo calls)
+ for t_repsTo in n_rep.rep_repsTo:
+
+ # for each tuple t in n!repsTo, let s be the nTDSDSA
+ # object such that s!objectGUID = t.uuidDsa
+ guidstr = str(t_repsTo.source_dsa_obj_guid)
+ s_dsa = self.get_dsa_by_guidstr(guidstr)
+
+ # Source dsa is gone from config (strange)
+ # so cleanup stale repsTo for unlisted DSA
+ if s_dsa is None:
+ logger.warning("repsTo source DSA guid (%s) not found" %
+ guidstr)
+ t_repsTo.to_be_deleted = True
+ continue
+
+ # Find the connection that this repsTo would use. If
+ # there isn't a good one (i.e. non-RODC_TOPOLOGY,
+ # meaning non-FRS), we delete the repsTo.
+ s_dnstr = s_dsa.dsa_dnstr
+ if '\\0ADEL' in s_dnstr:
+ logger.warning("repsTo source DSA guid (%s) appears deleted" %
+ guidstr)
+ t_repsTo.to_be_deleted = True
+ continue
+
+ connections = s_dsa.get_connection_by_from_dnstr(self.my_dsa_dnstr)
+ if len(connections) > 0:
+ # Then this repsTo is tentatively valid
+ continue
+ else:
+ # There is no plausible connection for this repsTo
+ t_repsTo.to_be_deleted = True
+
+ if self.readonly:
+ # Display any to be deleted or modified repsTo
+ for rt in n_rep.rep_repsTo:
+ if rt.to_be_deleted:
+ logger.info("REMOVING REPS-TO: %s" % rt)
+
+ # Perform deletion from our tables but perform
+ # no database modification
+ n_rep.commit_repsTo(self.samdb, ro=True)
+ else:
+ # Commit any modified repsTo to the NC replica
+ n_rep.commit_repsTo(self.samdb)
+
+ # TODO Remove any duplicate repsTo values. This should never happen in
+ # any normal situations.
+
+ def merge_failed_links(self, ping=None):
+ """Merge of kCCFailedLinks and kCCFailedLinks from bridgeheads.
+
+ The KCC on a writable DC attempts to merge the link and connection
+ failure information from bridgehead DCs in its own site to help it
+ identify failed bridgehead DCs.
+
+ Based on MS-ADTS 6.2.2.3.2 "Merge of kCCFailedLinks and kCCFailedLinks
+ from Bridgeheads"
+
+ :param ping: An oracle of current bridgehead availability
+ :return: None
+ """
+ # 1. Queries every bridgehead server in your site (other than yourself)
+ # 2. For every ntDSConnection that references a server in a different
+ # site merge all the failure info
+ #
+ # XXX - not implemented yet
+ if ping is not None:
+ debug.DEBUG_RED("merge_failed_links() is NOT IMPLEMENTED")
+ else:
+ DEBUG_FN("skipping merge_failed_links() because it requires "
+ "real network connections\n"
+ "and we weren't asked to --attempt-live-connections")
+
+ def setup_graph(self, part):
+ """Set up an intersite graph
+
+ An intersite graph has a Vertex for each site object, a
+ MultiEdge for each SiteLink object, and a MutliEdgeSet for
+ each siteLinkBridge object (or implied siteLinkBridge). It
+ reflects the intersite topology in a slightly more abstract
+ graph form.
+
+ Roughly corresponds to MS-ADTS 6.2.2.3.4.3
+
+ :param part: a Partition object
+ :returns: an InterSiteGraph object
+ """
+ # If 'Bridge all site links' is enabled and Win2k3 bridges required
+ # is not set
+ # NTDSTRANSPORT_OPT_BRIDGES_REQUIRED 0x00000002
+ # No documentation for this however, ntdsapi.h appears to have:
+ # NTDSSETTINGS_OPT_W2K3_BRIDGES_REQUIRED = 0x00001000
+ bridges_required = self.my_site.site_options & 0x00001002 != 0
+ transport_guid = str(self.ip_transport.guid)
+
+ g = setup_graph(part, self.site_table, transport_guid,
+ self.sitelink_table, bridges_required)
+
+ if self.verify or self.dot_file_dir is not None:
+ dot_edges = []
+ for edge in g.edges:
+ for a, b in itertools.combinations(edge.vertices, 2):
+ dot_edges.append((a.site.site_dnstr, b.site.site_dnstr))
+ verify_properties = ()
+ name = 'site_edges_%s' % part.partstr
+ verify_and_dot(name, dot_edges, directed=False,
+ label=self.my_dsa_dnstr,
+ properties=verify_properties, debug=DEBUG,
+ verify=self.verify,
+ dot_file_dir=self.dot_file_dir)
+
+ return g
+
+ def get_bridgehead(self, site, part, transport, partial_ok, detect_failed):
+ """Get a bridghead DC for a site.
+
+ Part of MS-ADTS 6.2.2.3.4.4
+
+ :param site: site object representing for which a bridgehead
+ DC is desired.
+ :param part: crossRef for NC to replicate.
+ :param transport: interSiteTransport object for replication
+ traffic.
+ :param partial_ok: True if a DC containing a partial
+ replica or a full replica will suffice, False if only
+ a full replica will suffice.
+ :param detect_failed: True to detect failed DCs and route
+ replication traffic around them, False to assume no DC
+ has failed.
+ :return: dsa object for the bridgehead DC or None
+ """
+
+ bhs = self.get_all_bridgeheads(site, part, transport,
+ partial_ok, detect_failed)
+ if not bhs:
+ debug.DEBUG_MAGENTA("get_bridgehead FAILED:\nsitedn = %s" %
+ site.site_dnstr)
+ return None
+
+ debug.DEBUG_GREEN("get_bridgehead:\n\tsitedn = %s\n\tbhdn = %s" %
+ (site.site_dnstr, bhs[0].dsa_dnstr))
+ return bhs[0]
+
+ def get_all_bridgeheads(self, site, part, transport,
+ partial_ok, detect_failed):
+ """Get all bridghead DCs on a site satisfying the given criteria
+
+ Part of MS-ADTS 6.2.2.3.4.4
+
+ :param site: site object representing the site for which
+ bridgehead DCs are desired.
+ :param part: partition for NC to replicate.
+ :param transport: interSiteTransport object for
+ replication traffic.
+ :param partial_ok: True if a DC containing a partial
+ replica or a full replica will suffice, False if
+ only a full replica will suffice.
+ :param detect_failed: True to detect failed DCs and route
+ replication traffic around them, FALSE to assume
+ no DC has failed.
+ :return: list of dsa object for available bridgehead DCs
+ """
+ bhs = []
+
+ if transport.name != "IP":
+ raise KCCError("get_all_bridgeheads has run into a "
+ "non-IP transport! %r"
+ % (transport.name,))
+
+ DEBUG_FN(site.rw_dsa_table)
+ for dsa in site.rw_dsa_table.values():
+
+ pdnstr = dsa.get_parent_dnstr()
+
+ # IF t!bridgeheadServerListBL has one or more values and
+ # t!bridgeheadServerListBL does not contain a reference
+ # to the parent object of dc then skip dc
+ if ((len(transport.bridgehead_list) != 0 and
+ pdnstr not in transport.bridgehead_list)):
+ continue
+
+ # IF dc is in the same site as the local DC
+ # IF a replica of cr!nCName is not in the set of NC replicas
+ # that "should be present" on dc or a partial replica of the
+ # NC "should be present" but partialReplicasOkay = FALSE
+ # Skip dc
+ if self.my_site.same_site(dsa):
+ needed, ro, partial = part.should_be_present(dsa)
+ if not needed or (partial and not partial_ok):
+ continue
+ rep = dsa.get_current_replica(part.nc_dnstr)
+
+ # ELSE
+ # IF an NC replica of cr!nCName is not in the set of NC
+ # replicas that "are present" on dc or a partial replica of
+ # the NC "is present" but partialReplicasOkay = FALSE
+ # Skip dc
+ else:
+ rep = dsa.get_current_replica(part.nc_dnstr)
+ if rep is None or (rep.is_partial() and not partial_ok):
+ continue
+
+ # IF AmIRODC() and cr!nCName corresponds to default NC then
+ # Let dsaobj be the nTDSDSA object of the dc
+ # IF dsaobj.msDS-Behavior-Version < DS_DOMAIN_FUNCTION_2008
+ # Skip dc
+ if self.my_dsa.is_ro() and rep is not None and rep.is_default():
+ if not dsa.is_minimum_behavior(dsdb.DS_DOMAIN_FUNCTION_2008):
+ continue
+
+ # IF BridgeheadDCFailed(dc!objectGUID, detectFailedDCs) = TRUE
+ # Skip dc
+ if self.is_bridgehead_failed(dsa, detect_failed):
+ DEBUG("bridgehead is failed")
+ continue
+
+ DEBUG_FN("found a bridgehead: %s" % dsa.dsa_dnstr)
+ bhs.append(dsa)
+
+ # IF bit NTDSSETTINGS_OPT_IS_RAND_BH_SELECTION_DISABLED is set in
+ # s!options
+ # SORT bhs such that all GC servers precede DCs that are not GC
+ # servers, and otherwise by ascending objectGUID
+ # ELSE
+ # SORT bhs in a random order
+ if site.is_random_bridgehead_disabled():
+ bhs.sort(key=cmp_to_key(sort_dsa_by_gc_and_guid))
+ else:
+ random.shuffle(bhs)
+ debug.DEBUG_YELLOW(bhs)
+ return bhs
+
+ def is_bridgehead_failed(self, dsa, detect_failed):
+ """Determine whether a given DC is known to be in a failed state
+
+ :param dsa: the bridgehead to test
+ :param detect_failed: True to really check, False to assume no failure
+ :return: True if and only if the DC should be considered failed
+
+ Here we DEPART from the pseudo code spec which appears to be
+ wrong. It says, in full:
+
+ /***** BridgeheadDCFailed *****/
+ /* Determine whether a given DC is known to be in a failed state.
+ * IN: objectGUID - objectGUID of the DC's nTDSDSA object.
+ * IN: detectFailedDCs - TRUE if and only failed DC detection is
+ * enabled.
+ * RETURNS: TRUE if and only if the DC should be considered to be in a
+ * failed state.
+ */
+ BridgeheadDCFailed(IN GUID objectGUID, IN bool detectFailedDCs) : bool
+ {
+ IF bit NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED is set in
+ the options attribute of the site settings object for the local
+ DC's site
+ RETURN FALSE
+ ELSEIF a tuple z exists in the kCCFailedLinks or
+ kCCFailedConnections variables such that z.UUIDDsa =
+ objectGUID, z.FailureCount > 1, and the current time -
+ z.TimeFirstFailure > 2 hours
+ RETURN TRUE
+ ELSE
+ RETURN detectFailedDCs
+ ENDIF
+ }
+
+ where you will see detectFailedDCs is not behaving as
+ advertised -- it is acting as a default return code in the
+ event that a failure is not detected, not a switch turning
+ detection on or off. Elsewhere the documentation seems to
+ concur with the comment rather than the code.
+ """
+ if not detect_failed:
+ return False
+
+ # NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED = 0x00000008
+ # When DETECT_STALE_DISABLED, we can never know of if
+ # it's in a failed state
+ if self.my_site.site_options & 0x00000008:
+ return False
+
+ return self.is_stale_link_connection(dsa)
+
+ def create_connection(self, part, rbh, rsite, transport,
+ lbh, lsite, link_opt, link_sched,
+ partial_ok, detect_failed):
+ """Create an nTDSConnection object as specified if it doesn't exist.
+
+ Part of MS-ADTS 6.2.2.3.4.5
+
+ :param part: crossRef object for the NC to replicate.
+ :param rbh: nTDSDSA object for DC to act as the
+ IDL_DRSGetNCChanges server (which is in a site other
+ than the local DC's site).
+ :param rsite: site of the rbh
+ :param transport: interSiteTransport object for the transport
+ to use for replication traffic.
+ :param lbh: nTDSDSA object for DC to act as the
+ IDL_DRSGetNCChanges client (which is in the local DC's site).
+ :param lsite: site of the lbh
+ :param link_opt: Replication parameters (aggregated siteLink options,
+ etc.)
+ :param link_sched: Schedule specifying the times at which
+ to begin replicating.
+ :partial_ok: True if bridgehead DCs containing partial
+ replicas of the NC are acceptable.
+ :param detect_failed: True to detect failed DCs and route
+ replication traffic around them, FALSE to assume no DC
+ has failed.
+ """
+ rbhs_all = self.get_all_bridgeheads(rsite, part, transport,
+ partial_ok, False)
+ rbh_table = dict((x.dsa_dnstr, x) for x in rbhs_all)
+
+ debug.DEBUG_GREY("rbhs_all: %s %s" % (len(rbhs_all),
+ [x.dsa_dnstr for x in rbhs_all]))
+
+ # MS-TECH says to compute rbhs_avail but then doesn't use it
+ # rbhs_avail = self.get_all_bridgeheads(rsite, part, transport,
+ # partial_ok, detect_failed)
+
+ lbhs_all = self.get_all_bridgeheads(lsite, part, transport,
+ partial_ok, False)
+ if lbh.is_ro():
+ lbhs_all.append(lbh)
+
+ debug.DEBUG_GREY("lbhs_all: %s %s" % (len(lbhs_all),
+ [x.dsa_dnstr for x in lbhs_all]))
+
+ # MS-TECH says to compute lbhs_avail but then doesn't use it
+ # lbhs_avail = self.get_all_bridgeheads(lsite, part, transport,
+ # partial_ok, detect_failed)
+
+ # FOR each nTDSConnection object cn such that the parent of cn is
+ # a DC in lbhsAll and cn!fromServer references a DC in rbhsAll
+ for ldsa in lbhs_all:
+ for cn in ldsa.connect_table.values():
+
+ rdsa = rbh_table.get(cn.from_dnstr)
+ if rdsa is None:
+ continue
+
+ debug.DEBUG_DARK_YELLOW("rdsa is %s" % rdsa.dsa_dnstr)
+ # IF bit NTDSCONN_OPT_IS_GENERATED is set in cn!options and
+ # NTDSCONN_OPT_RODC_TOPOLOGY is clear in cn!options and
+ # cn!transportType references t
+ if ((cn.is_generated() and
+ not cn.is_rodc_topology() and
+ cn.transport_guid == transport.guid)):
+
+ # IF bit NTDSCONN_OPT_USER_OWNED_SCHEDULE is clear in
+ # cn!options and cn!schedule != sch
+ # Perform an originating update to set cn!schedule to
+ # sched
+ if ((not cn.is_user_owned_schedule() and
+ not cn.is_equivalent_schedule(link_sched))):
+ cn.schedule = link_sched
+ cn.set_modified(True)
+
+ # IF bits NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
+ # NTDSCONN_OPT_USE_NOTIFY are set in cn
+ if cn.is_override_notify_default() and \
+ cn.is_use_notify():
+
+ # IF bit NTDSSITELINK_OPT_USE_NOTIFY is clear in
+ # ri.Options
+ # Perform an originating update to clear bits
+ # NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
+ # NTDSCONN_OPT_USE_NOTIFY in cn!options
+ if (link_opt & dsdb.NTDSSITELINK_OPT_USE_NOTIFY) == 0:
+ cn.options &= \
+ ~(dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT |
+ dsdb.NTDSCONN_OPT_USE_NOTIFY)
+ cn.set_modified(True)
+
+ # ELSE
+ else:
+
+ # IF bit NTDSSITELINK_OPT_USE_NOTIFY is set in
+ # ri.Options
+ # Perform an originating update to set bits
+ # NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
+ # NTDSCONN_OPT_USE_NOTIFY in cn!options
+ if (link_opt & dsdb.NTDSSITELINK_OPT_USE_NOTIFY) != 0:
+ cn.options |= \
+ (dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT |
+ dsdb.NTDSCONN_OPT_USE_NOTIFY)
+ cn.set_modified(True)
+
+ # IF bit NTDSCONN_OPT_TWOWAY_SYNC is set in cn!options
+ if cn.is_twoway_sync():
+
+ # IF bit NTDSSITELINK_OPT_TWOWAY_SYNC is clear in
+ # ri.Options
+ # Perform an originating update to clear bit
+ # NTDSCONN_OPT_TWOWAY_SYNC in cn!options
+ if (link_opt & dsdb.NTDSSITELINK_OPT_TWOWAY_SYNC) == 0:
+ cn.options &= ~dsdb.NTDSCONN_OPT_TWOWAY_SYNC
+ cn.set_modified(True)
+
+ # ELSE
+ else:
+
+ # IF bit NTDSSITELINK_OPT_TWOWAY_SYNC is set in
+ # ri.Options
+ # Perform an originating update to set bit
+ # NTDSCONN_OPT_TWOWAY_SYNC in cn!options
+ if (link_opt & dsdb.NTDSSITELINK_OPT_TWOWAY_SYNC) != 0:
+ cn.options |= dsdb.NTDSCONN_OPT_TWOWAY_SYNC
+ cn.set_modified(True)
+
+ # IF bit NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION is set
+ # in cn!options
+ if cn.is_intersite_compression_disabled():
+
+ # IF bit NTDSSITELINK_OPT_DISABLE_COMPRESSION is clear
+ # in ri.Options
+ # Perform an originating update to clear bit
+ # NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION in
+ # cn!options
+ if ((link_opt &
+ dsdb.NTDSSITELINK_OPT_DISABLE_COMPRESSION) == 0):
+ cn.options &= \
+ ~dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION
+ cn.set_modified(True)
+
+ # ELSE
+ else:
+ # IF bit NTDSSITELINK_OPT_DISABLE_COMPRESSION is set in
+ # ri.Options
+ # Perform an originating update to set bit
+ # NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION in
+ # cn!options
+ if ((link_opt &
+ dsdb.NTDSSITELINK_OPT_DISABLE_COMPRESSION) != 0):
+ cn.options |= \
+ dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION
+ cn.set_modified(True)
+
+ # Display any modified connection
+ if self.readonly or ldsa.is_ro():
+ if cn.to_be_modified:
+ logger.info("TO BE MODIFIED:\n%s" % cn)
+
+ ldsa.commit_connections(self.samdb, ro=True)
+ else:
+ ldsa.commit_connections(self.samdb)
+ # ENDFOR
+
+ valid_connections = 0
+
+ # FOR each nTDSConnection object cn such that cn!parent is
+ # a DC in lbhsAll and cn!fromServer references a DC in rbhsAll
+ for ldsa in lbhs_all:
+ for cn in ldsa.connect_table.values():
+
+ rdsa = rbh_table.get(cn.from_dnstr)
+ if rdsa is None:
+ continue
+
+ debug.DEBUG_DARK_YELLOW("round 2: rdsa is %s" % rdsa.dsa_dnstr)
+
+ # IF (bit NTDSCONN_OPT_IS_GENERATED is clear in cn!options or
+ # cn!transportType references t) and
+ # NTDSCONN_OPT_RODC_TOPOLOGY is clear in cn!options
+ if (((not cn.is_generated() or
+ cn.transport_guid == transport.guid) and
+ not cn.is_rodc_topology())):
+
+ # LET rguid be the objectGUID of the nTDSDSA object
+ # referenced by cn!fromServer
+ # LET lguid be (cn!parent)!objectGUID
+
+ # IF BridgeheadDCFailed(rguid, detectFailedDCs) = FALSE and
+ # BridgeheadDCFailed(lguid, detectFailedDCs) = FALSE
+ # Increment cValidConnections by 1
+ if ((not self.is_bridgehead_failed(rdsa, detect_failed) and
+ not self.is_bridgehead_failed(ldsa, detect_failed))):
+ valid_connections += 1
+
+ # IF keepConnections does not contain cn!objectGUID
+ # APPEND cn!objectGUID to keepConnections
+ self.kept_connections.add(cn)
+
+ # ENDFOR
+ debug.DEBUG_RED("valid connections %d" % valid_connections)
+ DEBUG("kept_connections:\n%s" % (self.kept_connections,))
+ # IF cValidConnections = 0
+ if valid_connections == 0:
+
+ # LET opt be NTDSCONN_OPT_IS_GENERATED
+ opt = dsdb.NTDSCONN_OPT_IS_GENERATED
+
+ # IF bit NTDSSITELINK_OPT_USE_NOTIFY is set in ri.Options
+ # SET bits NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT and
+ # NTDSCONN_OPT_USE_NOTIFY in opt
+ if (link_opt & dsdb.NTDSSITELINK_OPT_USE_NOTIFY) != 0:
+ opt |= (dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT |
+ dsdb.NTDSCONN_OPT_USE_NOTIFY)
+
+ # IF bit NTDSSITELINK_OPT_TWOWAY_SYNC is set in ri.Options
+ # SET bit NTDSCONN_OPT_TWOWAY_SYNC opt
+ if (link_opt & dsdb.NTDSSITELINK_OPT_TWOWAY_SYNC) != 0:
+ opt |= dsdb.NTDSCONN_OPT_TWOWAY_SYNC
+
+ # IF bit NTDSSITELINK_OPT_DISABLE_COMPRESSION is set in
+ # ri.Options
+ # SET bit NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION in opt
+ if ((link_opt &
+ dsdb.NTDSSITELINK_OPT_DISABLE_COMPRESSION) != 0):
+ opt |= dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION
+
+ # Perform an originating update to create a new nTDSConnection
+ # object cn that is a child of lbh, cn!enabledConnection = TRUE,
+ # cn!options = opt, cn!transportType is a reference to t,
+ # cn!fromServer is a reference to rbh, and cn!schedule = sch
+ DEBUG_FN("new connection, KCC dsa: %s" % self.my_dsa.dsa_dnstr)
+ system_flags = (dsdb.SYSTEM_FLAG_CONFIG_ALLOW_RENAME |
+ dsdb.SYSTEM_FLAG_CONFIG_ALLOW_MOVE)
+
+ cn = lbh.new_connection(opt, system_flags, transport,
+ rbh.dsa_dnstr, link_sched)
+
+ # Display any added connection
+ if self.readonly or lbh.is_ro():
+ if cn.to_be_added:
+ logger.info("TO BE ADDED:\n%s" % cn)
+
+ lbh.commit_connections(self.samdb, ro=True)
+ else:
+ lbh.commit_connections(self.samdb)
+
+ # APPEND cn!objectGUID to keepConnections
+ self.kept_connections.add(cn)
+
+ def add_transports(self, vertex, local_vertex, graph, detect_failed):
+ """Build a Vertex's transport lists
+
+ Each vertex has accept_red_red and accept_black lists that
+ list what transports they accept under various conditions. The
+ only transport that is ever accepted is IP, and a dummy extra
+ transport called "EDGE_TYPE_ALL".
+
+ Part of MS-ADTS 6.2.2.3.4.3 -- ColorVertices
+
+ :param vertex: the remote vertex we are thinking about
+ :param local_vertex: the vertex relating to the local site.
+ :param graph: the intersite graph
+ :param detect_failed: whether to detect failed links
+ :return: True if some bridgeheads were not found
+ """
+ # The docs ([MS-ADTS] 6.2.2.3.4.3) say to use local_vertex
+ # here, but using vertex seems to make more sense. That is,
+ # the docs want this:
+ #
+ # bh = self.get_bridgehead(local_vertex.site, vertex.part, transport,
+ # local_vertex.is_black(), detect_failed)
+ #
+ # TODO WHY?????
+
+ vertex.accept_red_red = []
+ vertex.accept_black = []
+ found_failed = False
+
+ if vertex in graph.connected_vertices:
+ t_guid = str(self.ip_transport.guid)
+
+ bh = self.get_bridgehead(vertex.site, vertex.part,
+ self.ip_transport,
+ vertex.is_black(), detect_failed)
+ if bh is None:
+ if vertex.site.is_rodc_site():
+ vertex.accept_red_red.append(t_guid)
+ else:
+ found_failed = True
+ else:
+ vertex.accept_red_red.append(t_guid)
+ vertex.accept_black.append(t_guid)
+
+ # Add additional transport to ensure another run of Dijkstra
+ vertex.accept_red_red.append("EDGE_TYPE_ALL")
+ vertex.accept_black.append("EDGE_TYPE_ALL")
+
+ return found_failed
+
+ def create_connections(self, graph, part, detect_failed):
+ """Create intersite NTDSConnections as needed by a partition
+
+ Construct an NC replica graph for the NC identified by
+ the given crossRef, then create any additional nTDSConnection
+ objects required.
+
+ :param graph: site graph.
+ :param part: crossRef object for NC.
+ :param detect_failed: True to detect failed DCs and route
+ replication traffic around them, False to assume no DC
+ has failed.
+
+ Modifies self.kept_connections by adding any connections
+ deemed to be "in use".
+
+ :return: (all_connected, found_failed_dc)
+ (all_connected) True if the resulting NC replica graph
+ connects all sites that need to be connected.
+ (found_failed_dc) True if one or more failed DCs were
+ detected.
+ """
+ all_connected = True
+ found_failed = False
+
+ DEBUG_FN("create_connections(): enter\n"
+ "\tpartdn=%s\n\tdetect_failed=%s" %
+ (part.nc_dnstr, detect_failed))
+
+ # XXX - This is a highly abbreviated function from the MS-TECH
+ # ref. It creates connections between bridgeheads to all
+ # sites that have appropriate replicas. Thus we are not
+ # creating a minimum cost spanning tree but instead
+ # producing a fully connected tree. This should produce
+ # a full (albeit not optimal cost) replication topology.
+
+ my_vertex = Vertex(self.my_site, part)
+ my_vertex.color_vertex()
+
+ for v in graph.vertices:
+ v.color_vertex()
+ if self.add_transports(v, my_vertex, graph, detect_failed):
+ found_failed = True
+
+ # No NC replicas for this NC in the site of the local DC,
+ # so no nTDSConnection objects need be created
+ if my_vertex.is_white():
+ return all_connected, found_failed
+
+ edge_list, n_components = get_spanning_tree_edges(graph,
+ self.my_site,
+ label=part.partstr)
+
+ DEBUG_FN("%s Number of components: %d" %
+ (part.nc_dnstr, n_components))
+ if n_components > 1:
+ all_connected = False
+
+ # LET partialReplicaOkay be TRUE if and only if
+ # localSiteVertex.Color = COLOR.BLACK
+ partial_ok = my_vertex.is_black()
+
+ # Utilize the IP transport only for now
+ transport = self.ip_transport
+
+ DEBUG("edge_list %s" % edge_list)
+ for e in edge_list:
+ # XXX more accurate comparison?
+ if e.directed and e.vertices[0].site is self.my_site:
+ continue
+
+ if e.vertices[0].site is self.my_site:
+ rsite = e.vertices[1].site
+ else:
+ rsite = e.vertices[0].site
+
+ # We don't make connections to our own site as that
+ # is intrasite topology generator's job
+ if rsite is self.my_site:
+ DEBUG("rsite is my_site")
+ continue
+
+ # Determine bridgehead server in remote site
+ rbh = self.get_bridgehead(rsite, part, transport,
+ partial_ok, detect_failed)
+ if rbh is None:
+ continue
+
+ # RODC acts as an BH for itself
+ # IF AmIRODC() then
+ # LET lbh be the nTDSDSA object of the local DC
+ # ELSE
+ # LET lbh be the result of GetBridgeheadDC(localSiteVertex.ID,
+ # cr, t, partialReplicaOkay, detectFailedDCs)
+ if self.my_dsa.is_ro():
+ lsite = self.my_site
+ lbh = self.my_dsa
+ else:
+ lsite = self.my_site
+ lbh = self.get_bridgehead(lsite, part, transport,
+ partial_ok, detect_failed)
+ # TODO
+ if lbh is None:
+ debug.DEBUG_RED("DISASTER! lbh is None")
+ return False, True
+
+ DEBUG_FN("lsite: %s\nrsite: %s" % (lsite, rsite))
+ DEBUG_FN("vertices %s" % (e.vertices,))
+ debug.DEBUG_BLUE("bridgeheads\n%s\n%s\n%s" % (lbh, rbh, "-" * 70))
+
+ sitelink = e.site_link
+ if sitelink is None:
+ link_opt = 0x0
+ link_sched = None
+ else:
+ link_opt = sitelink.options
+ link_sched = sitelink.schedule
+
+ self.create_connection(part, rbh, rsite, transport,
+ lbh, lsite, link_opt, link_sched,
+ partial_ok, detect_failed)
+
+ return all_connected, found_failed
+
+ def create_intersite_connections(self):
+ """Create NTDSConnections as necessary for all partitions.
+
+ Computes an NC replica graph for each NC replica that "should be
+ present" on the local DC or "is present" on any DC in the same site
+ as the local DC. For each edge directed to an NC replica on such a
+ DC from an NC replica on a DC in another site, the KCC creates an
+ nTDSConnection object to imply that edge if one does not already
+ exist.
+
+ Modifies self.kept_connections - A set of nTDSConnection
+ objects for edges that are directed
+ to the local DC's site in one or more NC replica graphs.
+
+ :return: True if spanning trees were created for all NC replica
+ graphs, otherwise False.
+ """
+ all_connected = True
+ self.kept_connections = set()
+
+ # LET crossRefList be the set containing each object o of class
+ # crossRef such that o is a child of the CN=Partitions child of the
+ # config NC
+
+ # FOR each crossRef object cr in crossRefList
+ # IF cr!enabled has a value and is false, or if FLAG_CR_NTDS_NC
+ # is clear in cr!systemFlags, skip cr.
+ # LET g be the GRAPH return of SetupGraph()
+
+ for part in self.part_table.values():
+
+ if not part.is_enabled():
+ continue
+
+ if part.is_foreign():
+ continue
+
+ graph = self.setup_graph(part)
+
+ # Create nTDSConnection objects, routing replication traffic
+ # around "failed" DCs.
+ found_failed = False
+
+ connected, found_failed = self.create_connections(graph,
+ part, True)
+
+ DEBUG("with detect_failed: connected %s Found failed %s" %
+ (connected, found_failed))
+ if not connected:
+ all_connected = False
+
+ if found_failed:
+ # One or more failed DCs preclude use of the ideal NC
+ # replica graph. Add connections for the ideal graph.
+ self.create_connections(graph, part, False)
+
+ return all_connected
+
+ def intersite(self, ping):
+ """Generate the inter-site KCC replica graph and nTDSConnections
+
+ As per MS-ADTS 6.2.2.3.
+
+ If self.readonly is False, the connections are added to self.samdb.
+
+ Produces self.kept_connections which is a set of NTDS
+ Connections that should be kept during subsequent pruning
+ process.
+
+ After this has run, all sites should be connected in a minimum
+ spanning tree.
+
+ :param ping: An oracle function of remote site availability
+ :return (True or False): (True) if the produced NC replica
+ graph connects all sites that need to be connected
+ """
+
+ # Retrieve my DSA
+ mydsa = self.my_dsa
+ mysite = self.my_site
+ all_connected = True
+
+ DEBUG_FN("intersite(): enter")
+
+ # Determine who is the ISTG
+ if self.readonly:
+ mysite.select_istg(self.samdb, mydsa, ro=True)
+ else:
+ mysite.select_istg(self.samdb, mydsa, ro=False)
+
+ # Test whether local site has topology disabled
+ if mysite.is_intersite_topology_disabled():
+ DEBUG_FN("intersite(): exit disabled all_connected=%d" %
+ all_connected)
+ return all_connected
+
+ if not mydsa.is_istg():
+ DEBUG_FN("intersite(): exit not istg all_connected=%d" %
+ all_connected)
+ return all_connected
+
+ self.merge_failed_links(ping)
+
+ # For each NC with an NC replica that "should be present" on the
+ # local DC or "is present" on any DC in the same site as the
+ # local DC, the KCC constructs a site graph--a precursor to an NC
+ # replica graph. The site connectivity for a site graph is defined
+ # by objects of class interSiteTransport, siteLink, and
+ # siteLinkBridge in the config NC.
+
+ all_connected = self.create_intersite_connections()
+
+ DEBUG_FN("intersite(): exit all_connected=%d" % all_connected)
+ return all_connected
+
+ # This function currently does no actions. The reason being that we cannot
+ # perform modifies in this way on the RODC.
+ def update_rodc_connection(self, ro=True):
+ """Updates the RODC NTFRS connection object.
+
+ If the local DSA is not an RODC, this does nothing.
+ """
+ if not self.my_dsa.is_ro():
+ return
+
+ # Given an nTDSConnection object cn1, such that cn1.options contains
+ # NTDSCONN_OPT_RODC_TOPOLOGY, and another nTDSConnection object cn2,
+ # does not contain NTDSCONN_OPT_RODC_TOPOLOGY, modify cn1 to ensure
+ # that the following is true:
+ #
+ # cn1.fromServer = cn2.fromServer
+ # cn1.schedule = cn2.schedule
+ #
+ # If no such cn2 can be found, cn1 is not modified.
+ # If no such cn1 can be found, nothing is modified by this task.
+
+ all_connections = self.my_dsa.connect_table.values()
+ ro_connections = [x for x in all_connections if x.is_rodc_topology()]
+ rw_connections = [x for x in all_connections
+ if x not in ro_connections]
+
+ # XXX here we are dealing with multiple RODC_TOPO connections,
+ # if they exist. It is not clear whether the spec means that
+ # or if it ever arises.
+ if rw_connections and ro_connections:
+ for con in ro_connections:
+ cn2 = rw_connections[0]
+ con.from_dnstr = cn2.from_dnstr
+ con.schedule = cn2.schedule
+ con.to_be_modified = True
+
+ self.my_dsa.commit_connections(self.samdb, ro=ro)
+
+ def intrasite_max_node_edges(self, node_count):
+ """Find the maximum number of edges directed to an intrasite node
+
+ The KCC does not create more than 50 edges directed to a
+ single DC. To optimize replication, we compute that each node
+ should have n+2 total edges directed to it such that (n) is
+ the smallest non-negative integer satisfying
+ (node_count <= 2*(n*n) + 6*n + 7)
+
+ (If the number of edges is m (i.e. n + 2), that is the same as
+ 2 * m*m - 2 * m + 3). We think in terms of n because that is
+ the number of extra connections over the double directed ring
+ that exists by default.
+
+ edges n nodecount
+ 2 0 7
+ 3 1 15
+ 4 2 27
+ 5 3 43
+ ...
+ 50 48 4903
+
+ :param node_count: total number of nodes in the replica graph
+
+ The intention is that there should be no more than 3 hops
+ between any two DSAs at a site. With up to 7 nodes the 2 edges
+ of the ring are enough; any configuration of extra edges with
+ 8 nodes will be enough. It is less clear that the 3 hop
+ guarantee holds at e.g. 15 nodes in degenerate cases, but
+ those are quite unlikely given the extra edges are randomly
+ arranged.
+
+ :param node_count: the number of nodes in the site
+ "return: The desired maximum number of connections
+ """
+ n = 0
+ while True:
+ if node_count <= (2 * (n * n) + (6 * n) + 7):
+ break
+ n = n + 1
+ n = n + 2
+ if n < 50:
+ return n
+ return 50
+
+ def construct_intrasite_graph(self, site_local, dc_local,
+ nc_x, gc_only, detect_stale):
+ """Create an intrasite graph using given parameters
+
+ This might be called a number of times per site with different
+ parameters.
+
+ Based on [MS-ADTS] 6.2.2.2
+
+ :param site_local: site for which we are working
+ :param dc_local: local DC that potentially needs a replica
+ :param nc_x: naming context (x) that we are testing if it
+ "should be present" on the local DC
+ :param gc_only: Boolean - only consider global catalog servers
+ :param detect_stale: Boolean - check whether links seems down
+ :return: None
+ """
+ # We're using the MS notation names here to allow
+ # correlation back to the published algorithm.
+ #
+ # nc_x - naming context (x) that we are testing if it
+ # "should be present" on the local DC
+ # f_of_x - replica (f) found on a DC (s) for NC (x)
+ # dc_s - DC where f_of_x replica was found
+ # dc_local - local DC that potentially needs a replica
+ # (f_of_x)
+ # r_list - replica list R
+ # p_of_x - replica (p) is partial and found on a DC (s)
+ # for NC (x)
+ # l_of_x - replica (l) is the local replica for NC (x)
+ # that should appear on the local DC
+ # r_len = is length of replica list |R|
+ #
+ # If the DSA doesn't need a replica for this
+ # partition (NC x) then continue
+ needed, ro, partial = nc_x.should_be_present(dc_local)
+
+ debug.DEBUG_YELLOW("construct_intrasite_graph(): enter" +
+ "\n\tgc_only=%d" % gc_only +
+ "\n\tdetect_stale=%d" % detect_stale +
+ "\n\tneeded=%s" % needed +
+ "\n\tro=%s" % ro +
+ "\n\tpartial=%s" % partial +
+ "\n%s" % nc_x)
+
+ if not needed:
+ debug.DEBUG_RED("%s lacks 'should be present' status, "
+ "aborting construct_intrasite_graph!" %
+ nc_x.nc_dnstr)
+ return
+
+ # Create a NCReplica that matches what the local replica
+ # should say. We'll use this below in our r_list
+ l_of_x = NCReplica(dc_local, nc_x.nc_dnstr)
+
+ l_of_x.identify_by_basedn(self.samdb)
+
+ l_of_x.rep_partial = partial
+ l_of_x.rep_ro = ro
+
+ # Add this replica that "should be present" to the
+ # needed replica table for this DSA
+ dc_local.add_needed_replica(l_of_x)
+
+ # Replica list
+ #
+ # Let R be a sequence containing each writable replica f of x
+ # such that f "is present" on a DC s satisfying the following
+ # criteria:
+ #
+ # * s is a writable DC other than the local DC.
+ #
+ # * s is in the same site as the local DC.
+ #
+ # * If x is a read-only full replica and x is a domain NC,
+ # then the DC's functional level is at least
+ # DS_BEHAVIOR_WIN2008.
+ #
+ # * Bit NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED is set
+ # in the options attribute of the site settings object for
+ # the local DC's site, or no tuple z exists in the
+ # kCCFailedLinks or kCCFailedConnections variables such
+ # that z.UUIDDsa is the objectGUID of the nTDSDSA object
+ # for s, z.FailureCount > 0, and the current time -
+ # z.TimeFirstFailure > 2 hours.
+
+ r_list = []
+
+ # We'll loop thru all the DSAs looking for
+ # writeable NC replicas that match the naming
+ # context dn for (nc_x)
+ #
+ for dc_s in self.my_site.dsa_table.values():
+ # If this partition (nc_x) doesn't appear as a
+ # replica (f_of_x) on (dc_s) then continue
+ if nc_x.nc_dnstr not in dc_s.current_rep_table:
+ continue
+
+ # Pull out the NCReplica (f) of (x) with the dn
+ # that matches NC (x) we are examining.
+ f_of_x = dc_s.current_rep_table[nc_x.nc_dnstr]
+
+ # Replica (f) of NC (x) must be writable
+ if f_of_x.is_ro():
+ continue
+
+ # Replica (f) of NC (x) must satisfy the
+ # "is present" criteria for DC (s) that
+ # it was found on
+ if not f_of_x.is_present():
+ continue
+
+ # DC (s) must be a writable DSA other than
+ # my local DC. In other words we'd only replicate
+ # from other writable DC
+ if dc_s.is_ro() or dc_s is dc_local:
+ continue
+
+ # Certain replica graphs are produced only
+ # for global catalogs, so test against
+ # method input parameter
+ if gc_only and not dc_s.is_gc():
+ continue
+
+ # DC (s) must be in the same site as the local DC
+ # as this is the intra-site algorithm. This is
+ # handled by virtue of placing DSAs in per
+ # site objects (see enclosing for() loop)
+
+ # If NC (x) is intended to be read-only full replica
+ # for a domain NC on the target DC then the source
+ # DC should have functional level at minimum WIN2008
+ #
+ # Effectively we're saying that in order to replicate
+ # to a targeted RODC (which was introduced in Windows 2008)
+ # then we have to replicate from a DC that is also minimally
+ # at that level.
+ #
+ # You can also see this requirement in the MS special
+ # considerations for RODC which state that to deploy
+ # an RODC, at least one writable domain controller in
+ # the domain must be running Windows Server 2008
+ if ro and not partial and nc_x.nc_type == NCType.domain:
+ if not dc_s.is_minimum_behavior(dsdb.DS_DOMAIN_FUNCTION_2008):
+ continue
+
+ # If we haven't been told to turn off stale connection
+ # detection and this dsa has a stale connection then
+ # continue
+ if detect_stale and self.is_stale_link_connection(dc_s):
+ continue
+
+ # Replica meets criteria. Add it to table indexed
+ # by the GUID of the DC that it appears on
+ r_list.append(f_of_x)
+
+ # If a partial (not full) replica of NC (x) "should be present"
+ # on the local DC, append to R each partial replica (p of x)
+ # such that p "is present" on a DC satisfying the same
+ # criteria defined above for full replica DCs.
+ #
+ # XXX This loop and the previous one differ only in whether
+ # the replica is partial or not. here we only accept partial
+ # (because we're partial); before we only accepted full. Order
+ # doesn't matter (the list is sorted a few lines down) so these
+ # loops could easily be merged. Or this could be a helper
+ # function.
+
+ if partial:
+ # Now we loop thru all the DSAs looking for
+ # partial NC replicas that match the naming
+ # context dn for (NC x)
+ for dc_s in self.my_site.dsa_table.values():
+
+ # If this partition NC (x) doesn't appear as a
+ # replica (p) of NC (x) on the dsa DC (s) then
+ # continue
+ if nc_x.nc_dnstr not in dc_s.current_rep_table:
+ continue
+
+ # Pull out the NCReplica with the dn that
+ # matches NC (x) we are examining.
+ p_of_x = dc_s.current_rep_table[nc_x.nc_dnstr]
+
+ # Replica (p) of NC (x) must be partial
+ if not p_of_x.is_partial():
+ continue
+
+ # Replica (p) of NC (x) must satisfy the
+ # "is present" criteria for DC (s) that
+ # it was found on
+ if not p_of_x.is_present():
+ continue
+
+ # DC (s) must be a writable DSA other than
+ # my DSA. In other words we'd only replicate
+ # from other writable DSA
+ if dc_s.is_ro() or dc_s is dc_local:
+ continue
+
+ # Certain replica graphs are produced only
+ # for global catalogs, so test against
+ # method input parameter
+ if gc_only and not dc_s.is_gc():
+ continue
+
+ # If we haven't been told to turn off stale connection
+ # detection and this dsa has a stale connection then
+ # continue
+ if detect_stale and self.is_stale_link_connection(dc_s):
+ continue
+
+ # Replica meets criteria. Add it to table indexed
+ # by the GUID of the DSA that it appears on
+ r_list.append(p_of_x)
+
+ # Append to R the NC replica that "should be present"
+ # on the local DC
+ r_list.append(l_of_x)
+
+ r_list.sort(key=lambda rep: ndr_pack(rep.rep_dsa_guid))
+ r_len = len(r_list)
+
+ max_node_edges = self.intrasite_max_node_edges(r_len)
+
+ # Add a node for each r_list element to the replica graph
+ graph_list = []
+ for rep in r_list:
+ node = GraphNode(rep.rep_dsa_dnstr, max_node_edges)
+ graph_list.append(node)
+
+ # For each r(i) from (0 <= i < |R|-1)
+ i = 0
+ while i < (r_len - 1):
+ # Add an edge from r(i) to r(i+1) if r(i) is a full
+ # replica or r(i+1) is a partial replica
+ if not r_list[i].is_partial() or r_list[i +1].is_partial():
+ graph_list[i + 1].add_edge_from(r_list[i].rep_dsa_dnstr)
+
+ # Add an edge from r(i+1) to r(i) if r(i+1) is a full
+ # replica or ri is a partial replica.
+ if not r_list[i + 1].is_partial() or r_list[i].is_partial():
+ graph_list[i].add_edge_from(r_list[i + 1].rep_dsa_dnstr)
+ i = i + 1
+
+ # Add an edge from r|R|-1 to r0 if r|R|-1 is a full replica
+ # or r0 is a partial replica.
+ if not r_list[r_len - 1].is_partial() or r_list[0].is_partial():
+ graph_list[0].add_edge_from(r_list[r_len - 1].rep_dsa_dnstr)
+
+ # Add an edge from r0 to r|R|-1 if r0 is a full replica or
+ # r|R|-1 is a partial replica.
+ if not r_list[0].is_partial() or r_list[r_len -1].is_partial():
+ graph_list[r_len - 1].add_edge_from(r_list[0].rep_dsa_dnstr)
+
+ DEBUG("r_list is length %s" % len(r_list))
+ DEBUG('\n'.join(str((x.rep_dsa_guid, x.rep_dsa_dnstr))
+ for x in r_list))
+
+ do_dot_files = self.dot_file_dir is not None and self.debug
+ if self.verify or do_dot_files:
+ dot_edges = []
+ dot_vertices = set()
+ for v1 in graph_list:
+ dot_vertices.add(v1.dsa_dnstr)
+ for v2 in v1.edge_from:
+ dot_edges.append((v2, v1.dsa_dnstr))
+ dot_vertices.add(v2)
+
+ verify_properties = ('connected',)
+ verify_and_dot('intrasite_pre_ntdscon', dot_edges, dot_vertices,
+ label='%s__%s__%s' % (site_local.site_dnstr,
+ nctype_lut[nc_x.nc_type],
+ nc_x.nc_dnstr),
+ properties=verify_properties, debug=DEBUG,
+ verify=self.verify,
+ dot_file_dir=self.dot_file_dir,
+ directed=True)
+
+ rw_dot_vertices = set(x for x in dot_vertices
+ if not self.get_dsa(x).is_ro())
+ rw_dot_edges = [(a, b) for a, b in dot_edges if
+ a in rw_dot_vertices and b in rw_dot_vertices]
+ rw_verify_properties = ('connected',
+ 'directed_double_ring_or_small')
+ verify_and_dot('intrasite_rw_pre_ntdscon', rw_dot_edges,
+ rw_dot_vertices,
+ label='%s__%s__%s' % (site_local.site_dnstr,
+ nctype_lut[nc_x.nc_type],
+ nc_x.nc_dnstr),
+ properties=rw_verify_properties, debug=DEBUG,
+ verify=self.verify,
+ dot_file_dir=self.dot_file_dir,
+ directed=True)
+
+ # For each existing nTDSConnection object implying an edge
+ # from rj of R to ri such that j != i, an edge from rj to ri
+ # is not already in the graph, and the total edges directed
+ # to ri is less than n+2, the KCC adds that edge to the graph.
+ for vertex in graph_list:
+ dsa = self.my_site.dsa_table[vertex.dsa_dnstr]
+ for connect in dsa.connect_table.values():
+ remote = connect.from_dnstr
+ if remote in self.my_site.dsa_table:
+ vertex.add_edge_from(remote)
+
+ DEBUG('reps are: %s' % ' '.join(x.rep_dsa_dnstr for x in r_list))
+ DEBUG('dsas are: %s' % ' '.join(x.dsa_dnstr for x in graph_list))
+
+ for tnode in graph_list:
+ # To optimize replication latency in sites with many NC
+ # replicas, the KCC adds new edges directed to ri to bring
+ # the total edges to n+2, where the NC replica rk of R
+ # from which the edge is directed is chosen at random such
+ # that k != i and an edge from rk to ri is not already in
+ # the graph.
+ #
+ # Note that the KCC tech ref does not give a number for
+ # the definition of "sites with many NC replicas". At a
+ # bare minimum to satisfy n+2 edges directed at a node we
+ # have to have at least three replicas in |R| (i.e. if n
+ # is zero then at least replicas from two other graph
+ # nodes may direct edges to us).
+ if r_len >= 3 and not tnode.has_sufficient_edges():
+ candidates = [x for x in graph_list if
+ (x is not tnode and
+ x.dsa_dnstr not in tnode.edge_from)]
+
+ debug.DEBUG_BLUE("looking for random link for %s. r_len %d, "
+ "graph len %d candidates %d"
+ % (tnode.dsa_dnstr, r_len, len(graph_list),
+ len(candidates)))
+
+ DEBUG("candidates %s" % [x.dsa_dnstr for x in candidates])
+
+ while candidates and not tnode.has_sufficient_edges():
+ other = random.choice(candidates)
+ DEBUG("trying to add candidate %s" % other.dsa_dnstr)
+ if not tnode.add_edge_from(other.dsa_dnstr):
+ debug.DEBUG_RED("could not add %s" % other.dsa_dnstr)
+ candidates.remove(other)
+ else:
+ DEBUG_FN("not adding links to %s: nodes %s, links is %s/%s" %
+ (tnode.dsa_dnstr, r_len, len(tnode.edge_from),
+ tnode.max_edges))
+
+ # Print the graph node in debug mode
+ DEBUG_FN("%s" % tnode)
+
+ # For each edge directed to the local DC, ensure a nTDSConnection
+ # points to us that satisfies the KCC criteria
+
+ if tnode.dsa_dnstr == dc_local.dsa_dnstr:
+ tnode.add_connections_from_edges(dc_local, self.ip_transport)
+
+ if self.verify or do_dot_files:
+ dot_edges = []
+ dot_vertices = set()
+ for v1 in graph_list:
+ dot_vertices.add(v1.dsa_dnstr)
+ for v2 in v1.edge_from:
+ dot_edges.append((v2, v1.dsa_dnstr))
+ dot_vertices.add(v2)
+
+ verify_properties = ('connected',)
+ verify_and_dot('intrasite_post_ntdscon', dot_edges, dot_vertices,
+ label='%s__%s__%s' % (site_local.site_dnstr,
+ nctype_lut[nc_x.nc_type],
+ nc_x.nc_dnstr),
+ properties=verify_properties, debug=DEBUG,
+ verify=self.verify,
+ dot_file_dir=self.dot_file_dir,
+ directed=True)
+
+ rw_dot_vertices = set(x for x in dot_vertices
+ if not self.get_dsa(x).is_ro())
+ rw_dot_edges = [(a, b) for a, b in dot_edges if
+ a in rw_dot_vertices and b in rw_dot_vertices]
+ rw_verify_properties = ('connected',
+ 'directed_double_ring_or_small')
+ verify_and_dot('intrasite_rw_post_ntdscon', rw_dot_edges,
+ rw_dot_vertices,
+ label='%s__%s__%s' % (site_local.site_dnstr,
+ nctype_lut[nc_x.nc_type],
+ nc_x.nc_dnstr),
+ properties=rw_verify_properties, debug=DEBUG,
+ verify=self.verify,
+ dot_file_dir=self.dot_file_dir,
+ directed=True)
+
+ def intrasite(self):
+ """Generate the intrasite KCC connections
+
+ As per MS-ADTS 6.2.2.2.
+
+ If self.readonly is False, the connections are added to self.samdb.
+
+ After this call, all DCs in each site with more than 3 DCs
+ should be connected in a bidirectional ring. If a site has 2
+ DCs, they will bidirectionally connected. Sites with many DCs
+ may have arbitrary extra connections.
+
+ :return: None
+ """
+ mydsa = self.my_dsa
+
+ DEBUG_FN("intrasite(): enter")
+
+ # Test whether local site has topology disabled
+ mysite = self.my_site
+ if mysite.is_intrasite_topology_disabled():
+ return
+
+ detect_stale = (not mysite.is_detect_stale_disabled())
+ for connect in mydsa.connect_table.values():
+ if connect.to_be_added:
+ debug.DEBUG_CYAN("TO BE ADDED:\n%s" % connect)
+
+ # Loop thru all the partitions, with gc_only False
+ for partdn, part in self.part_table.items():
+ self.construct_intrasite_graph(mysite, mydsa, part, False,
+ detect_stale)
+ for connect in mydsa.connect_table.values():
+ if connect.to_be_added:
+ debug.DEBUG_BLUE("TO BE ADDED:\n%s" % connect)
+
+ # If the DC is a GC server, the KCC constructs an additional NC
+ # replica graph (and creates nTDSConnection objects) for the
+ # config NC as above, except that only NC replicas that "are present"
+ # on GC servers are added to R.
+ for connect in mydsa.connect_table.values():
+ if connect.to_be_added:
+ debug.DEBUG_YELLOW("TO BE ADDED:\n%s" % connect)
+
+ # Do it again, with gc_only True
+ for partdn, part in self.part_table.items():
+ if part.is_config():
+ self.construct_intrasite_graph(mysite, mydsa, part, True,
+ detect_stale)
+
+ # The DC repeats the NC replica graph computation and nTDSConnection
+ # creation for each of the NC replica graphs, this time assuming
+ # that no DC has failed. It does so by re-executing the steps as
+ # if the bit NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED were
+ # set in the options attribute of the site settings object for
+ # the local DC's site. (ie. we set "detec_stale" flag to False)
+ for connect in mydsa.connect_table.values():
+ if connect.to_be_added:
+ debug.DEBUG_BLUE("TO BE ADDED:\n%s" % connect)
+
+ # Loop thru all the partitions.
+ for partdn, part in self.part_table.items():
+ self.construct_intrasite_graph(mysite, mydsa, part, False,
+ False) # don't detect stale
+
+ # If the DC is a GC server, the KCC constructs an additional NC
+ # replica graph (and creates nTDSConnection objects) for the
+ # config NC as above, except that only NC replicas that "are present"
+ # on GC servers are added to R.
+ for connect in mydsa.connect_table.values():
+ if connect.to_be_added:
+ debug.DEBUG_RED("TO BE ADDED:\n%s" % connect)
+
+ for partdn, part in self.part_table.items():
+ if part.is_config():
+ self.construct_intrasite_graph(mysite, mydsa, part, True,
+ False) # don't detect stale
+
+ self._commit_changes(mydsa)
+
+ def list_dsas(self):
+ """Compile a comprehensive list of DSA DNs
+
+ These are all the DSAs on all the sites that KCC would be
+ dealing with.
+
+ This method is not idempotent and may not work correctly in
+ sequence with KCC.run().
+
+ :return: a list of DSA DN strings.
+ """
+ self.load_my_site()
+ self.load_my_dsa()
+
+ self.load_all_sites()
+ self.load_all_partitions()
+ self.load_ip_transport()
+ self.load_all_sitelinks()
+ dsas = []
+ for site in self.site_table.values():
+ dsas.extend([dsa.dsa_dnstr.replace('CN=NTDS Settings,', '', 1)
+ for dsa in site.dsa_table.values()])
+ return dsas
+
+ def load_samdb(self, dburl, lp, creds, force=False):
+ """Load the database using an url, loadparm, and credentials
+
+ If force is False, the samdb won't be reloaded if it already
+ exists.
+
+ :param dburl: a database url.
+ :param lp: a loadparm object.
+ :param creds: a Credentials object.
+ :param force: a boolean indicating whether to overwrite.
+
+ """
+ if force or self.samdb is None:
+ try:
+ self.samdb = SamDB(url=dburl,
+ session_info=system_session(),
+ credentials=creds, lp=lp)
+ except ldb.LdbError as e1:
+ (num, msg) = e1.args
+ raise KCCError("Unable to open sam database %s : %s" %
+ (dburl, msg))
+
+ def plot_all_connections(self, basename, verify_properties=()):
+ """Helper function to plot and verify NTDSConnections
+
+ :param basename: an identifying string to use in filenames and logs.
+ :param verify_properties: properties to verify (default empty)
+ """
+ verify = verify_properties and self.verify
+ if not verify and self.dot_file_dir is None:
+ return
+
+ dot_edges = []
+ dot_vertices = []
+ edge_colours = []
+ vertex_colours = []
+
+ for dsa in self.dsa_by_dnstr.values():
+ dot_vertices.append(dsa.dsa_dnstr)
+ if dsa.is_ro():
+ vertex_colours.append('#cc0000')
+ else:
+ vertex_colours.append('#0000cc')
+ for con in dsa.connect_table.values():
+ if con.is_rodc_topology():
+ edge_colours.append('red')
+ else:
+ edge_colours.append('blue')
+ dot_edges.append((con.from_dnstr, dsa.dsa_dnstr))
+
+ verify_and_dot(basename, dot_edges, vertices=dot_vertices,
+ label=self.my_dsa_dnstr,
+ properties=verify_properties, debug=DEBUG,
+ verify=verify, dot_file_dir=self.dot_file_dir,
+ directed=True, edge_colors=edge_colours,
+ vertex_colors=vertex_colours)
+
+ def run(self, dburl, lp, creds, forced_local_dsa=None,
+ forget_local_links=False, forget_intersite_links=False,
+ attempt_live_connections=False):
+ """Perform a KCC run, possibly updating repsFrom topology
+
+ :param dburl: url of the database to work with.
+ :param lp: a loadparm object.
+ :param creds: a Credentials object.
+ :param forced_local_dsa: pretend to be on the DSA with this dn_str
+ :param forget_local_links: calculate as if no connections existed
+ (boolean, default False)
+ :param forget_intersite_links: calculate with only intrasite connection
+ (boolean, default False)
+ :param attempt_live_connections: attempt to connect to remote DSAs to
+ determine link availability (boolean, default False)
+ :return: 1 on error, 0 otherwise
+ """
+ if self.samdb is None:
+ DEBUG_FN("samdb is None; let's load it from %s" % (dburl,))
+ self.load_samdb(dburl, lp, creds, force=False)
+
+ if forced_local_dsa:
+ self.samdb.set_ntds_settings_dn("CN=NTDS Settings,%s" %
+ forced_local_dsa)
+
+ try:
+ # Setup
+ self.load_my_site()
+ self.load_my_dsa()
+
+ self.load_all_sites()
+ self.load_all_partitions()
+ self.load_ip_transport()
+ self.load_all_sitelinks()
+
+ if self.verify or self.dot_file_dir is not None:
+ guid_to_dnstr = {}
+ for site in self.site_table.values():
+ guid_to_dnstr.update((str(dsa.dsa_guid), dnstr)
+ for dnstr, dsa
+ in site.dsa_table.items())
+
+ self.plot_all_connections('dsa_initial')
+
+ dot_edges = []
+ current_reps, needed_reps = self.my_dsa.get_rep_tables()
+ for dnstr, c_rep in current_reps.items():
+ DEBUG("c_rep %s" % c_rep)
+ dot_edges.append((self.my_dsa.dsa_dnstr, dnstr))
+
+ verify_and_dot('dsa_repsFrom_initial', dot_edges,
+ directed=True, label=self.my_dsa_dnstr,
+ properties=(), debug=DEBUG, verify=self.verify,
+ dot_file_dir=self.dot_file_dir)
+
+ dot_edges = []
+ for site in self.site_table.values():
+ for dsa in site.dsa_table.values():
+ current_reps, needed_reps = dsa.get_rep_tables()
+ for dn_str, rep in current_reps.items():
+ for reps_from in rep.rep_repsFrom:
+ DEBUG("rep %s" % rep)
+ dsa_guid = str(reps_from.source_dsa_obj_guid)
+ dsa_dn = guid_to_dnstr[dsa_guid]
+ dot_edges.append((dsa.dsa_dnstr, dsa_dn))
+
+ verify_and_dot('dsa_repsFrom_initial_all', dot_edges,
+ directed=True, label=self.my_dsa_dnstr,
+ properties=(), debug=DEBUG, verify=self.verify,
+ dot_file_dir=self.dot_file_dir)
+
+ dot_edges = []
+ dot_colours = []
+ for link in self.sitelink_table.values():
+ from hashlib import md5
+ tmp_str = link.dnstr.encode('utf8')
+ colour = '#' + md5(tmp_str).hexdigest()[:6]
+ for a, b in itertools.combinations(link.site_list, 2):
+ dot_edges.append((a[1], b[1]))
+ dot_colours.append(colour)
+ properties = ('connected',)
+ verify_and_dot('dsa_sitelink_initial', dot_edges,
+ directed=False,
+ label=self.my_dsa_dnstr, properties=properties,
+ debug=DEBUG, verify=self.verify,
+ dot_file_dir=self.dot_file_dir,
+ edge_colors=dot_colours)
+
+ if forget_local_links:
+ for dsa in self.my_site.dsa_table.values():
+ dsa.connect_table = dict((k, v) for k, v in
+ dsa.connect_table.items()
+ if v.is_rodc_topology() or
+ (v.from_dnstr not in
+ self.my_site.dsa_table))
+ self.plot_all_connections('dsa_forgotten_local')
+
+ if forget_intersite_links:
+ for site in self.site_table.values():
+ for dsa in site.dsa_table.values():
+ dsa.connect_table = dict((k, v) for k, v in
+ dsa.connect_table.items()
+ if site is self.my_site and
+ v.is_rodc_topology())
+
+ self.plot_all_connections('dsa_forgotten_all')
+
+ if attempt_live_connections:
+ # Encapsulates lp and creds in a function that
+ # attempts connections to remote DSAs.
+ def ping(self, dnsname):
+ try:
+ drs_utils.drsuapi_connect(dnsname, self.lp, self.creds)
+ except drs_utils.drsException:
+ return False
+ return True
+ else:
+ ping = None
+ # These are the published steps (in order) for the
+ # MS-TECH description of the KCC algorithm ([MS-ADTS] 6.2.2)
+
+ # Step 1
+ self.refresh_failed_links_connections(ping)
+
+ # Step 2
+ self.intrasite()
+
+ # Step 3
+ all_connected = self.intersite(ping)
+
+ # Step 4
+ self.remove_unneeded_ntdsconn(all_connected)
+
+ # Step 5
+ self.translate_ntdsconn()
+
+ # Step 6
+ self.remove_unneeded_failed_links_connections()
+
+ # Step 7
+ self.update_rodc_connection()
+
+ if self.verify or self.dot_file_dir is not None:
+ self.plot_all_connections('dsa_final',
+ ('connected',))
+
+ debug.DEBUG_MAGENTA("there are %d dsa guids" %
+ len(guid_to_dnstr))
+
+ dot_edges = []
+ edge_colors = []
+ my_dnstr = self.my_dsa.dsa_dnstr
+ current_reps, needed_reps = self.my_dsa.get_rep_tables()
+ for dnstr, n_rep in needed_reps.items():
+ for reps_from in n_rep.rep_repsFrom:
+ guid_str = str(reps_from.source_dsa_obj_guid)
+ dot_edges.append((my_dnstr, guid_to_dnstr[guid_str]))
+ edge_colors.append('#' + str(n_rep.nc_guid)[:6])
+
+ verify_and_dot('dsa_repsFrom_final', dot_edges, directed=True,
+ label=self.my_dsa_dnstr,
+ properties=(), debug=DEBUG, verify=self.verify,
+ dot_file_dir=self.dot_file_dir,
+ edge_colors=edge_colors)
+
+ dot_edges = []
+
+ for site in self.site_table.values():
+ for dsa in site.dsa_table.values():
+ current_reps, needed_reps = dsa.get_rep_tables()
+ for n_rep in needed_reps.values():
+ for reps_from in n_rep.rep_repsFrom:
+ dsa_guid = str(reps_from.source_dsa_obj_guid)
+ dsa_dn = guid_to_dnstr[dsa_guid]
+ dot_edges.append((dsa.dsa_dnstr, dsa_dn))
+
+ verify_and_dot('dsa_repsFrom_final_all', dot_edges,
+ directed=True, label=self.my_dsa_dnstr,
+ properties=(), debug=DEBUG, verify=self.verify,
+ dot_file_dir=self.dot_file_dir)
+
+ except:
+ raise
+
+ return 0
+
+ def import_ldif(self, dburl, lp, ldif_file, forced_local_dsa=None):
+ """Import relevant objects and attributes from an LDIF file.
+
+ The point of this function is to allow a programmer/debugger to
+ import an LDIF file with non-security relevant information that
+ was previously extracted from a DC database. The LDIF file is used
+ to create a temporary abbreviated database. The KCC algorithm can
+ then run against this abbreviated database for debug or test
+ verification that the topology generated is computationally the
+ same between different OSes and algorithms.
+
+ :param dburl: path to the temporary abbreviated db to create
+ :param lp: a loadparm object.
+ :param ldif_file: path to the ldif file to import
+ :param forced_local_dsa: perform KCC from this DSA's point of view
+ :return: zero on success, 1 on error
+ """
+ try:
+ self.samdb = ldif_import_export.ldif_to_samdb(dburl, lp, ldif_file,
+ forced_local_dsa)
+ except ldif_import_export.LdifError as e:
+ logger.critical(e)
+ return 1
+ return 0
+
+ def export_ldif(self, dburl, lp, creds, ldif_file):
+ """Save KCC relevant details to an ldif file
+
+ The point of this function is to allow a programmer/debugger to
+ extract an LDIF file with non-security relevant information from
+ a DC database. The LDIF file can then be used to "import" via
+ the import_ldif() function this file into a temporary abbreviated
+ database. The KCC algorithm can then run against this abbreviated
+ database for debug or test verification that the topology generated
+ is computationally the same between different OSes and algorithms.
+
+ :param dburl: LDAP database URL to extract info from
+ :param lp: a loadparm object.
+ :param cred: a Credentials object.
+ :param ldif_file: output LDIF file name to create
+ :return: zero on success, 1 on error
+ """
+ try:
+ ldif_import_export.samdb_to_ldif_file(self.samdb, dburl, lp, creds,
+ ldif_file)
+ except ldif_import_export.LdifError as e:
+ logger.critical(e)
+ return 1
+ return 0
diff --git a/python/samba/kcc/debug.py b/python/samba/kcc/debug.py
new file mode 100644
index 0000000..8a69bde
--- /dev/null
+++ b/python/samba/kcc/debug.py
@@ -0,0 +1,61 @@
+# Debug utilities for samba_kcc
+#
+# Copyright (C) Andrew Bartlett 2015
+#
+# Although Andrew Bartlett owns the copyright, the actual work was
+# performed by Douglas Bagnall and Garming Sam.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import sys
+import logging
+from functools import partial
+import traceback
+
+logger = logging.getLogger("samba_kcc")
+logger.addHandler(logging.StreamHandler(sys.stdout))
+DEBUG = logger.debug
+WARN = logger.warning
+
+
+# colours for prettier logs
+from samba.colour import C_NORMAL, REV_RED
+from samba.colour import DARK_RED, RED
+from samba.colour import DARK_GREEN, GREEN
+from samba.colour import DARK_YELLOW, YELLOW
+from samba.colour import DARK_BLUE, BLUE
+from samba.colour import PURPLE, MAGENTA
+from samba.colour import DARK_CYAN, CYAN
+from samba.colour import GREY, WHITE
+
+
+def _color_debug(*args, **kwargs):
+ DEBUG('%s%s%s' % (kwargs['color'], args[0], C_NORMAL), *args[1:])
+
+
+_globals = globals()
+for _color in ('DARK_RED', 'RED', 'DARK_GREEN', 'GREEN', 'YELLOW',
+ 'DARK_YELLOW', 'DARK_BLUE', 'BLUE', 'PURPLE', 'MAGENTA',
+ 'DARK_CYAN', 'CYAN', 'GREY', 'WHITE', 'REV_RED'):
+ _globals['DEBUG_' + _color] = partial(_color_debug, color=_globals[_color])
+
+
+def DEBUG_FN(msg=''):
+ filename, lineno, function, text = traceback.extract_stack(None, 2)[0]
+ DEBUG("%s%s:%s%s %s%s()%s '%s'" % (CYAN, filename, BLUE, lineno,
+ CYAN, function, C_NORMAL, msg))
+
+
+def null_debug(*args, **kwargs):
+ pass
diff --git a/python/samba/kcc/graph.py b/python/samba/kcc/graph.py
new file mode 100644
index 0000000..63f1c3a
--- /dev/null
+++ b/python/samba/kcc/graph.py
@@ -0,0 +1,859 @@
+# Graph functions used by KCC intersite
+#
+# Copyright (C) Dave Craft 2011
+# Copyright (C) Andrew Bartlett 2015
+#
+# Andrew Bartlett's alleged work performed by his underlings Douglas
+# Bagnall and Garming Sam.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import itertools
+import heapq
+
+from samba.kcc.graph_utils import write_dot_file, verify_and_dot, verify_graph
+from samba.kcc.kcc_utils import KCCError
+from samba.ndr import ndr_pack
+from samba.dcerpc import misc
+
+from samba.kcc.debug import DEBUG, DEBUG_FN, WARN
+
+MAX_DWORD = 2 ** 32 - 1
+
+
+class ReplInfo(object):
+ """Represents information about replication
+
+ NTDSConnections use one representation a replication schedule, and
+ graph vertices use another. This is the Vertex one.
+
+ """
+ def __init__(self):
+ self.cost = 0
+ self.interval = 0
+ self.options = 0
+ self.schedule = None
+ self.duration = 84 * 8
+
+ def set_repltimes_from_schedule(self, schedule):
+ """Convert the schedule and calculate duration
+
+ :param schedule: the schedule to convert
+ """
+ self.schedule = convert_schedule_to_repltimes(schedule)
+ self.duration = total_schedule(self.schedule)
+
+
+def total_schedule(schedule):
+ """Return the total number of 15 minute windows in which the schedule
+ is set to replicate in a week. If the schedule is None it is
+ assumed that the replication will happen in every 15 minute
+ window.
+
+ This is essentially a bit population count.
+ """
+
+ if schedule is None:
+ return 84 * 8 # 84 bytes = 84 * 8 bits
+
+ total = 0
+ for byte in schedule:
+ while byte != 0:
+ total += byte & 1
+ byte >>= 1
+ return total
+
+
+def convert_schedule_to_repltimes(schedule):
+ """Convert NTDS Connection schedule to replTime schedule.
+
+ Schedule defined in MS-ADTS 6.1.4.5.2
+ ReplTimes defined in MS-DRSR 5.164.
+
+ "Schedule" has 168 bytes but only the lower nibble of each is
+ significant. There is one byte per hour. Bit 3 (0x08) represents
+ the first 15 minutes of the hour and bit 0 (0x01) represents the
+ last 15 minutes. The first byte presumably covers 12am - 1am
+ Sunday, though the spec doesn't define the start of a week.
+
+ "ReplTimes" has 84 bytes which are the 168 lower nibbles of
+ "Schedule" packed together. Thus each byte covers 2 hours. Bits 7
+ (i.e. 0x80) is the first 15 minutes and bit 0 is the last. The
+ first byte covers Sunday 12am - 2am (per spec).
+
+ Here we pack two elements of the NTDS Connection schedule slots
+ into one element of the replTimes list.
+
+ If no schedule appears in NTDS Connection then a default of 0x11
+ is set in each replTimes slot as per behaviour noted in a Windows
+ DC. That default would cause replication within the last 15
+ minutes of each hour.
+ """
+ # note, NTDSConnection schedule == None means "once an hour"
+ # repl_info == None means "always"
+ if schedule is None or schedule.dataArray[0] is None:
+ return [0x11] * 84
+
+ times = []
+ data = schedule.dataArray[0].slots
+
+ for i in range(84):
+ times.append((data[i * 2] & 0xF) << 4 | (data[i * 2 + 1] & 0xF))
+
+ return times
+
+
+def combine_repl_info(info_a, info_b):
+ """Generate an repl_info combining two others
+
+ The schedule is set to be the intersection of the two input schedules.
+ The duration is set to be the duration of the new schedule.
+ The cost is the sum of the costs (saturating at a huge value).
+ The options are the intersection of the input options.
+ The interval is the maximum of the two intervals.
+
+ :param info_a: An input replInfo object
+ :param info_b: An input replInfo object
+ :return: a new ReplInfo combining the other 2
+ """
+ info_c = ReplInfo()
+ info_c.interval = max(info_a.interval, info_b.interval)
+ info_c.options = info_a.options & info_b.options
+
+ # schedule of None defaults to "always"
+ if info_a.schedule is None:
+ info_a.schedule = [0xFF] * 84
+ if info_b.schedule is None:
+ info_b.schedule = [0xFF] * 84
+
+ info_c.schedule = [a & b for a, b in zip(info_a.schedule, info_b.schedule)]
+ info_c.duration = total_schedule(info_c.schedule)
+
+ info_c.cost = min(info_a.cost + info_b.cost, MAX_DWORD)
+ return info_c
+
+
+def get_spanning_tree_edges(graph, my_site, label=None, verify=False,
+ dot_file_dir=None):
+ """Find edges for the intersite graph
+
+ From MS-ADTS 6.2.2.3.4.4
+
+ :param graph: a kcc.kcc_utils.Graph object
+ :param my_site: the topology generator's site
+ :param label: a label for use in dot files and verification
+ :param verify: if True, try to verify that graph properties are correct
+ :param dot_file_dir: if not None, write Graphviz dot files here
+ """
+ # Phase 1: Run Dijkstra's to get a list of internal edges, which are
+ # just the shortest-paths connecting colored vertices
+
+ internal_edges = set()
+
+ for e_set in graph.edge_set:
+ edgeType = None
+ for v in graph.vertices:
+ v.edges = []
+
+ # All con_type in an edge set is the same
+ for e in e_set.edges:
+ edgeType = e.con_type
+ for v in e.vertices:
+ v.edges.append(e)
+
+ if verify or dot_file_dir is not None:
+ graph_edges = [(a.site.site_dnstr, b.site.site_dnstr)
+ for a, b in
+ itertools.chain(
+ *(itertools.combinations(edge.vertices, 2)
+ for edge in e_set.edges))]
+ graph_nodes = [v.site.site_dnstr for v in graph.vertices]
+
+ if dot_file_dir is not None:
+ write_dot_file('edgeset_%s' % (edgeType,), graph_edges,
+ vertices=graph_nodes, label=label)
+
+ if verify:
+ errors = verify_graph(graph_edges, vertices=graph_nodes,
+ properties=('complete', 'connected'))
+ if errors:
+ DEBUG('spanning tree edge set %s FAILED' % edgeType)
+ for p, e, doc in errors:
+ DEBUG("%18s: %s" % (p, e))
+ raise KCCError("spanning tree failed")
+
+ # Run dijkstra's algorithm with just the red vertices as seeds
+ # Seed from the full replicas
+ dijkstra(graph, edgeType, False)
+
+ # Process edge set
+ process_edge_set(graph, e_set, internal_edges)
+
+ # Run dijkstra's algorithm with red and black vertices as the seeds
+ # Seed from both full and partial replicas
+ dijkstra(graph, edgeType, True)
+
+ # Process edge set
+ process_edge_set(graph, e_set, internal_edges)
+
+ # All vertices have root/component as itself
+ setup_vertices(graph)
+ process_edge_set(graph, None, internal_edges)
+
+ if verify or dot_file_dir is not None:
+ graph_edges = [(e.v1.site.site_dnstr, e.v2.site.site_dnstr)
+ for e in internal_edges]
+ graph_nodes = [v.site.site_dnstr for v in graph.vertices]
+ verify_properties = ('multi_edge_forest',)
+ verify_and_dot('prekruskal', graph_edges, graph_nodes, label=label,
+ properties=verify_properties, debug=DEBUG,
+ verify=verify, dot_file_dir=dot_file_dir)
+
+ # Phase 2: Run Kruskal's on the internal edges
+ output_edges, components = kruskal(graph, internal_edges)
+
+ # This recalculates the cost for the path connecting the
+ # closest red vertex. Ignoring types is fine because NO
+ # suboptimal edge should exist in the graph
+ dijkstra(graph, "EDGE_TYPE_ALL", False) # TODO rename
+ # Phase 3: Process the output
+ for v in graph.vertices:
+ if v.is_red():
+ v.dist_to_red = 0
+ else:
+ v.dist_to_red = v.repl_info.cost
+
+ if verify or dot_file_dir is not None:
+ graph_edges = [(e.v1.site.site_dnstr, e.v2.site.site_dnstr)
+ for e in internal_edges]
+ graph_nodes = [v.site.site_dnstr for v in graph.vertices]
+ verify_properties = ('multi_edge_forest',)
+ verify_and_dot('postkruskal', graph_edges, graph_nodes,
+ label=label, properties=verify_properties,
+ debug=DEBUG, verify=verify,
+ dot_file_dir=dot_file_dir)
+
+ # Ensure only one-way connections for partial-replicas,
+ # and make sure they point the right way.
+ edge_list = []
+ for edge in output_edges:
+ # We know these edges only have two endpoints because we made
+ # them.
+ v, w = edge.vertices
+ if v.site is my_site or w.site is my_site:
+ if (((v.is_black() or w.is_black()) and
+ v.dist_to_red != MAX_DWORD)):
+ edge.directed = True
+
+ if w.dist_to_red < v.dist_to_red:
+ edge.vertices[:] = w, v
+ edge_list.append(edge)
+
+ if verify or dot_file_dir is not None:
+ graph_edges = [[x.site.site_dnstr for x in e.vertices]
+ for e in edge_list]
+ # add the reverse edge if not directed.
+ graph_edges.extend([x.site.site_dnstr
+ for x in reversed(e.vertices)]
+ for e in edge_list if not e.directed)
+ graph_nodes = [x.site.site_dnstr for x in graph.vertices]
+ verify_properties = ()
+ verify_and_dot('post-one-way-partial', graph_edges, graph_nodes,
+ label=label, properties=verify_properties,
+ debug=DEBUG, verify=verify,
+ directed=True,
+ dot_file_dir=dot_file_dir)
+
+ # count the components
+ return edge_list, components
+
+
+def create_edge(con_type, site_link, guid_to_vertex):
+ """Set up a MultiEdge for the intersite graph
+
+ A MultiEdge can have multiple vertices.
+
+ From MS-ADTS 6.2.2.3.4.4
+
+ :param con_type: a transport type GUID
+ :param site_link: a kcc.kcc_utils.SiteLink object
+ :param guid_to_vertex: a mapping between GUIDs and vertices
+ :return: a MultiEdge
+ """
+ e = MultiEdge()
+ e.site_link = site_link
+ e.vertices = []
+ for site_guid, site_dn in site_link.site_list:
+ if str(site_guid) in guid_to_vertex:
+ e.vertices.extend(guid_to_vertex.get(str(site_guid)))
+ e.repl_info.cost = site_link.cost
+ e.repl_info.options = site_link.options
+ e.repl_info.interval = site_link.interval
+ e.repl_info.set_repltimes_from_schedule(site_link.schedule)
+ e.con_type = con_type
+ e.directed = False
+ return e
+
+
+def create_auto_edge_set(graph, transport_guid):
+ """Set up an automatic MultiEdgeSet for the intersite graph
+
+ From within MS-ADTS 6.2.2.3.4.4
+
+ :param graph: the intersite graph object
+ :param transport_guid: a transport type GUID
+ :return: a MultiEdgeSet
+ """
+ e_set = MultiEdgeSet()
+ # use a NULL guid, not associated with a SiteLinkBridge object
+ e_set.guid = misc.GUID()
+ for site_link in graph.edges:
+ if site_link.con_type == transport_guid:
+ e_set.edges.append(site_link)
+
+ return e_set
+
+
+def setup_vertices(graph):
+ """Initialise vertices in the graph for the Dijkstra's run.
+
+ Part of MS-ADTS 6.2.2.3.4.4
+
+ The schedule and options are set to all-on, so that intersections
+ with real data defer to that data.
+
+ Refer to the convert_schedule_to_repltimes() docstring for an
+ explanation of the repltimes schedule values.
+
+ :param graph: an IntersiteGraph object
+ :return: None
+ """
+ for v in graph.vertices:
+ if v.is_white():
+ v.repl_info.cost = MAX_DWORD
+ v.root = None
+ v.component_id = None
+ else:
+ v.repl_info.cost = 0
+ v.root = v
+ v.component_id = v
+
+ v.repl_info.interval = 0
+ v.repl_info.options = 0xFFFFFFFF
+ # repl_info.schedule == None means "always".
+ v.repl_info.schedule = None
+ v.repl_info.duration = 84 * 8
+ v.demoted = False
+
+
+def dijkstra(graph, edge_type, include_black):
+ """Perform Dijkstra's algorithm on an intersite graph.
+
+ :param graph: an IntersiteGraph object
+ :param edge_type: a transport type GUID
+ :param include_black: boolean, whether to include black vertices
+ :return: None
+ """
+ queue = setup_dijkstra(graph, edge_type, include_black)
+ while len(queue) > 0:
+ cost, guid, vertex = heapq.heappop(queue)
+ for edge in vertex.edges:
+ for v in edge.vertices:
+ if v is not vertex:
+ # add new path from vertex to v
+ try_new_path(graph, queue, vertex, edge, v)
+
+
+def setup_dijkstra(graph, edge_type, include_black):
+ """Create a vertex queue for Dijksta's algorithm.
+
+ :param graph: an IntersiteGraph object
+ :param edge_type: a transport type GUID
+ :param include_black: boolean, whether to include black vertices
+ :return: A heap queue of vertices
+ """
+ queue = []
+ setup_vertices(graph)
+ for vertex in graph.vertices:
+ if vertex.is_white():
+ continue
+
+ if (((vertex.is_black() and not include_black)
+ or edge_type not in vertex.accept_black
+ or edge_type not in vertex.accept_red_red)):
+ vertex.repl_info.cost = MAX_DWORD
+ vertex.root = None # NULL GUID
+ vertex.demoted = True # Demoted appears not to be used
+ else:
+ heapq.heappush(queue, (vertex.repl_info.cost, vertex.guid, vertex))
+
+ return queue
+
+
+def try_new_path(graph, queue, vfrom, edge, vto):
+ """Helper function for Dijksta's algorithm.
+
+ :param graph: an IntersiteGraph object
+ :param queue: the empty queue to initialise.
+ :param vfrom: Vertex we are coming from
+ :param edge: an edge to try
+ :param vto: the other Vertex
+ :return: None
+ """
+ new_repl_info = combine_repl_info(vfrom.repl_info, edge.repl_info)
+
+ # Cheaper or longer schedule goes in the heap
+
+ if (new_repl_info.cost < vto.repl_info.cost or
+ new_repl_info.duration > vto.repl_info.duration):
+ vto.root = vfrom.root
+ vto.component_id = vfrom.component_id
+ vto.repl_info = new_repl_info
+ heapq.heappush(queue, (vto.repl_info.cost, vto.guid, vto))
+
+
+def check_demote_vertex(vertex, edge_type):
+ """Demote non-white vertices that accept only white edges
+
+ This makes them seem temporarily like white vertices.
+
+ :param vertex: a Vertex()
+ :param edge_type: a transport type GUID
+ :return: None
+ """
+ if vertex.is_white():
+ return
+
+ # Accepts neither red-red nor black edges, demote
+ if ((edge_type not in vertex.accept_black and
+ edge_type not in vertex.accept_red_red)):
+ vertex.repl_info.cost = MAX_DWORD
+ vertex.root = None
+ vertex.demoted = True # Demoted appears not to be used
+
+
+def undemote_vertex(vertex):
+ """Un-demote non-white vertices
+
+ Set a vertex's to an undemoted state.
+
+ :param vertex: a Vertex()
+ :return: None
+ """
+ if vertex.is_white():
+ return
+
+ vertex.repl_info.cost = 0
+ vertex.root = vertex
+ vertex.demoted = False
+
+
+def process_edge_set(graph, e_set, internal_edges):
+ """Find internal edges to pass to Kruskal's algorithm
+
+ :param graph: an IntersiteGraph object
+ :param e_set: an edge set
+ :param internal_edges: a set that internal edges get added to
+ :return: None
+ """
+ if e_set is None:
+ for edge in graph.edges:
+ for vertex in edge.vertices:
+ check_demote_vertex(vertex, edge.con_type)
+ process_edge(graph, edge, internal_edges)
+ for vertex in edge.vertices:
+ undemote_vertex(vertex)
+ else:
+ for edge in e_set.edges:
+ process_edge(graph, edge, internal_edges)
+
+
+def process_edge(graph, examine, internal_edges):
+ """Find the set of all vertices touching an edge to examine
+
+ :param graph: an IntersiteGraph object
+ :param examine: an edge
+ :param internal_edges: a set that internal edges get added to
+ :return: None
+ """
+ vertices = []
+ for v in examine.vertices:
+ # Append a 4-tuple of color, repl cost, guid and vertex
+ vertices.append((v.color, v.repl_info.cost, v.ndrpacked_guid, v))
+ # Sort by color, lower
+ DEBUG("vertices is %s" % vertices)
+ vertices.sort()
+
+ color, cost, guid, bestv = vertices[0]
+ # Add to internal edges an edge from every colored vertex to bestV
+ for v in examine.vertices:
+ if v.component_id is None or v.root is None:
+ continue
+
+ # Only add edge if valid inter-tree edge - needs a root and
+ # different components
+ if ((bestv.component_id is not None and
+ bestv.root is not None and
+ v.component_id is not None and
+ v.root is not None and
+ bestv.component_id != v.component_id)):
+ add_int_edge(graph, internal_edges, examine, bestv, v)
+
+
+def add_int_edge(graph, internal_edges, examine, v1, v2):
+ """Add edges between compatible red and black vertices
+
+ Internal edges form the core of the tree -- white and RODC
+ vertices attach to it as leaf nodes. An edge needs to have black
+ or red endpoints with compatible replication schedules to be
+ accepted as an internal edge.
+
+ Here we examine an edge and add it to the set of internal edges if
+ it looks good.
+
+ :param graph: the graph object.
+ :param internal_edges: a set of internal edges
+ :param examine: an edge to examine for suitability.
+ :param v1: a Vertex
+ :param v2: the other Vertex
+ """
+ root1 = v1.root
+ root2 = v2.root
+
+ red_red = root1.is_red() and root2.is_red()
+
+ if red_red:
+ if (examine.con_type not in root1.accept_red_red
+ or examine.con_type not in root2.accept_red_red):
+ return
+ elif (examine.con_type not in root1.accept_black
+ or examine.con_type not in root2.accept_black):
+ return
+
+ # Create the transitive replInfo for the two trees and this edge
+ ri = combine_repl_info(v1.repl_info, v2.repl_info)
+ if ri.duration == 0:
+ return
+
+ ri2 = combine_repl_info(ri, examine.repl_info)
+ if ri2.duration == 0:
+ return
+
+ # Order by vertex guid
+ if root1.ndrpacked_guid > root2.ndrpacked_guid:
+ root1, root2 = root2, root1
+
+ newIntEdge = InternalEdge(root1, root2, red_red, ri2, examine.con_type,
+ examine.site_link)
+
+ internal_edges.add(newIntEdge)
+
+
+def kruskal(graph, edges):
+ """Perform Kruskal's algorithm using the given set of edges
+
+ The input edges are "internal edges" -- between red and black
+ nodes. The output edges are a minimal spanning tree.
+
+ :param graph: the graph object.
+ :param edges: a set of edges
+ :return: a tuple of a list of edges, and the number of components
+ """
+ for v in graph.vertices:
+ v.edges = []
+
+ components = set([x for x in graph.vertices if not x.is_white()])
+ edges = list(edges)
+
+ # Sorted based on internal comparison function of internal edge
+ edges.sort()
+
+ # XXX expected_num_tree_edges is never used
+ expected_num_tree_edges = 0 # TODO this value makes little sense
+
+ count_edges = 0
+ output_edges = []
+ index = 0
+ while index < len(edges): # TODO and num_components > 1
+ e = edges[index]
+ parent1 = find_component(e.v1)
+ parent2 = find_component(e.v2)
+ if parent1 is not parent2:
+ count_edges += 1
+ add_out_edge(graph, output_edges, e)
+ parent1.component_id = parent2
+ components.discard(parent1)
+
+ index += 1
+
+ return output_edges, len(components)
+
+
+def find_component(vertex):
+ """Kruskal helper to find the component a vertex belongs to.
+
+ :param vertex: a Vertex
+ :return: the Vertex object representing the component
+ """
+ if vertex.component_id is vertex:
+ return vertex
+
+ current = vertex
+ while current.component_id is not current:
+ current = current.component_id
+
+ root = current
+ current = vertex
+ while current.component_id is not root:
+ n = current.component_id
+ current.component_id = root
+ current = n
+
+ return root
+
+
+def add_out_edge(graph, output_edges, e):
+ """Kruskal helper to add output edges
+
+ :param graph: the InterSiteGraph
+ :param output_edges: the list of spanning tree edges
+ :param e: the edge to be added
+ :return: None
+ """
+ v1 = e.v1
+ v2 = e.v2
+
+ # This multi-edge is a 'real' undirected 2-vertex edge with no
+ # GUID. XXX It is not really the same thing at all as the
+ # multi-vertex edges relating to site-links. We shouldn't really
+ # be using the same class or storing them in the same list as the
+ # other ones. But we do. Historical reasons.
+ ee = MultiEdge()
+ ee.directed = False
+ ee.site_link = e.site_link
+ ee.vertices.append(v1)
+ ee.vertices.append(v2)
+ ee.con_type = e.e_type
+ ee.repl_info = e.repl_info
+ output_edges.append(ee)
+
+ v1.edges.append(ee)
+ v2.edges.append(ee)
+
+
+def setup_graph(part, site_table, transport_guid, sitelink_table,
+ bridges_required):
+ """Set up an IntersiteGraph based on intersite topology
+
+ The graph will have a Vertex for each site, a MultiEdge for each
+ siteLink object, and a MultiEdgeSet for each siteLinkBridge object
+ (or implied siteLinkBridge).
+
+ :param part: the partition we are dealing with
+ :param site_table: a mapping of guids to sites (KCC.site_table)
+ :param transport_guid: the GUID of the IP transport
+ :param sitelink_table: a mapping of dnstrs to sitelinks
+ :param bridges_required: boolean, asking in vain for something to do
+ with site link bridges
+ :return: a new IntersiteGraph
+ """
+ guid_to_vertex = {}
+ # Create graph
+ g = IntersiteGraph()
+ # Add vertices
+ for site_guid, site in site_table.items():
+ vertex = Vertex(site, part)
+ vertex.guid = site_guid
+ vertex.ndrpacked_guid = ndr_pack(site.site_guid)
+ g.vertices.add(vertex)
+ guid_vertices = guid_to_vertex.setdefault(site_guid, [])
+ guid_vertices.append(vertex)
+
+ connected_vertices = set()
+
+ for site_link_dn, site_link in sitelink_table.items():
+ new_edge = create_edge(transport_guid, site_link,
+ guid_to_vertex)
+ connected_vertices.update(new_edge.vertices)
+ g.edges.add(new_edge)
+
+ # XXX we are ignoring the bridges_required option and indeed the
+ # whole concept of SiteLinkBridge objects.
+ if bridges_required:
+ WARN("Samba KCC ignores the bridges required option")
+
+ g.edge_set.add(create_auto_edge_set(g, transport_guid))
+ g.connected_vertices = connected_vertices
+
+ return g
+
+
+class VertexColor(object):
+ """Enumeration of vertex colours"""
+ (red, black, white, unknown) = range(0, 4)
+
+
+class Vertex(object):
+ """intersite graph representation of a Site.
+
+ There is a separate vertex for each partition.
+
+ :param site: the site to make a vertex of.
+ :param part: the partition.
+ """
+ def __init__(self, site, part):
+ self.site = site
+ self.part = part
+ self.color = VertexColor.unknown
+ self.edges = []
+ self.accept_red_red = []
+ self.accept_black = []
+ self.repl_info = ReplInfo()
+ self.root = self
+ self.guid = None
+ self.component_id = self
+ self.demoted = False
+ self.options = 0
+ self.interval = 0
+
+ def color_vertex(self):
+ """Color to indicate which kind of NC replica the vertex contains
+ """
+ # IF s contains one or more DCs with full replicas of the
+ # NC cr!nCName
+ # SET v.Color to COLOR.RED
+ # ELSEIF s contains one or more partial replicas of the NC
+ # SET v.Color to COLOR.BLACK
+ # ELSE
+ # SET v.Color to COLOR.WHITE
+
+ # set to minimum (no replica)
+ self.color = VertexColor.white
+
+ for dnstr, dsa in self.site.dsa_table.items():
+ rep = dsa.get_current_replica(self.part.nc_dnstr)
+ if rep is None:
+ continue
+
+ # We have a full replica which is the largest
+ # value so exit
+ if not rep.is_partial():
+ self.color = VertexColor.red
+ break
+ else:
+ self.color = VertexColor.black
+
+ def is_red(self):
+ assert(self.color != VertexColor.unknown)
+ return (self.color == VertexColor.red)
+
+ def is_black(self):
+ assert(self.color != VertexColor.unknown)
+ return (self.color == VertexColor.black)
+
+ def is_white(self):
+ assert(self.color != VertexColor.unknown)
+ return (self.color == VertexColor.white)
+
+
+class IntersiteGraph(object):
+ """Graph for representing the intersite"""
+ def __init__(self):
+ self.vertices = set()
+ self.edges = set()
+ self.edge_set = set()
+ # All vertices that are endpoints of edges
+ self.connected_vertices = None
+
+
+class MultiEdgeSet(object):
+ """Defines a multi edge set"""
+ def __init__(self):
+ self.guid = 0 # objectGuid siteLinkBridge
+ self.edges = []
+
+
+class MultiEdge(object):
+ """An "edge" between multiple vertices"""
+ def __init__(self):
+ self.site_link = None # object siteLink
+ self.vertices = []
+ self.con_type = None # interSiteTransport GUID
+ self.repl_info = ReplInfo()
+ self.directed = True
+
+
+class InternalEdge(object):
+ """An edge that forms part of the minimal spanning tree
+
+ These are used in the Kruskal's algorithm. Their interesting
+ feature isa that they are sortable, with the good edges sorting
+ before the bad ones -- lower is better.
+ """
+ def __init__(self, v1, v2, redred, repl, eType, site_link):
+ self.v1 = v1
+ self.v2 = v2
+ self.red_red = redred
+ self.repl_info = repl
+ self.e_type = eType
+ self.site_link = site_link
+
+ def __hash__(self):
+ return hash((
+ self.v1, self.v2, self.red_red, self.repl_info, self.e_type,
+ self.site_link))
+
+ def __eq__(self, other):
+ return not self < other and not other < self
+
+ def __ne__(self, other):
+ return self < other or other < self
+
+ def __gt__(self, other):
+ return other < self
+
+ def __ge__(self, other):
+ return not self < other
+
+ def __le__(self, other):
+ return not other < self
+
+ def __lt__(self, other):
+ """Here "less than" means "better".
+
+ From within MS-ADTS 6.2.2.3.4.4:
+
+ SORT internalEdges by (descending RedRed,
+ ascending ReplInfo.Cost,
+ descending available time in ReplInfo.Schedule,
+ ascending V1ID,
+ ascending V2ID,
+ ascending Type)
+ """
+ if self.red_red != other.red_red:
+ return self.red_red
+
+ if self.repl_info.cost != other.repl_info.cost:
+ return self.repl_info.cost < other.repl_info.cost
+
+ if self.repl_info.duration != other.repl_info.duration:
+ return self.repl_info.duration > other.repl_info.duration
+
+ if self.v1.guid != other.v1.guid:
+ return self.v1.ndrpacked_guid < other.v1.ndrpacked_guid
+
+ if self.v2.guid != other.v2.guid:
+ return self.v2.ndrpacked_guid < other.v2.ndrpacked_guid
+
+ return self.e_type < other.e_type
diff --git a/python/samba/kcc/graph_utils.py b/python/samba/kcc/graph_utils.py
new file mode 100644
index 0000000..c89d06a
--- /dev/null
+++ b/python/samba/kcc/graph_utils.py
@@ -0,0 +1,343 @@
+# Graph topology utilities, used by KCC
+#
+# Copyright (C) Andrew Bartlett 2015
+#
+# Copyright goes to Andrew Bartlett, but the actual work was performed
+# by Douglas Bagnall and Garming Sam.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import itertools
+
+from samba.graph import dot_graph
+
+
+def write_dot_file(basename, edge_list, vertices=None, label=None,
+ dot_file_dir=None, debug=None, **kwargs):
+ s = dot_graph(vertices, edge_list, title=label, **kwargs)
+ if label:
+ # sanitise DN and guid labels
+ basename += '_' + label.replace(', ', '')
+
+ filename = os.path.join(dot_file_dir, "%s.dot" % basename)
+ if debug is not None:
+ debug("writing graph to %s" % filename)
+ f = open(filename, 'w')
+ f.write(s)
+ f.close()
+
+
+class GraphError(Exception):
+ pass
+
+
+def verify_graph_complete(edges, vertices, edge_vertices):
+ """The graph is complete, which is to say there is an edge between
+ every pair of nodes."""
+ for v in vertices:
+ remotes = set()
+ for a, b in edges:
+ if a == v:
+ remotes.add(b)
+ elif b == v:
+ remotes.add(a)
+ if len(remotes) + 1 != len(vertices):
+ raise GraphError("graph is not fully connected")
+
+
+def verify_graph_connected(edges, vertices, edge_vertices):
+ """There is a path between any two nodes."""
+ if not edges:
+ if len(vertices) <= 1:
+ return
+ raise GraphError("all vertices are disconnected because "
+ "there are no edges:")
+
+ remaining_edges = list(edges)
+ reached = set(remaining_edges.pop())
+ while True:
+ doomed = []
+ for i, e in enumerate(remaining_edges):
+ a, b = e
+ if a in reached:
+ reached.add(b)
+ doomed.append(i)
+ elif b in reached:
+ reached.add(a)
+ doomed.append(i)
+ if not doomed:
+ break
+ for i in reversed(doomed):
+ del remaining_edges[i]
+
+ if remaining_edges or reached != set(vertices):
+ s = ("the graph is not connected, "
+ "as the following vertices are unreachable:\n ")
+ s += '\n '.join(v for v in sorted(vertices)
+ if v not in reached)
+ raise GraphError(s)
+
+
+def verify_graph_connected_under_edge_failures(edges, vertices, edge_vertices):
+ """The graph stays connected when any single edge is removed."""
+ if len(edges) == 0:
+ return verify_graph_connected(edges, vertices, edge_vertices)
+
+ for subset in itertools.combinations(edges, len(edges) - 1):
+ try:
+ verify_graph_connected(subset, vertices, edge_vertices)
+ except GraphError as e:
+ for edge in edges:
+ if edge not in subset:
+ raise GraphError("The graph will be disconnected when the "
+ "connection from %s to %s fails" % edge)
+
+
+def verify_graph_connected_under_vertex_failures(edges, vertices,
+ edge_vertices):
+ """The graph stays connected when any single vertex is removed."""
+ for v in vertices:
+ sub_vertices = [x for x in vertices if x is not v]
+ sub_edges = [x for x in edges if v not in x]
+ verify_graph_connected(sub_edges, sub_vertices, sub_vertices)
+
+
+def verify_graph_forest(edges, vertices, edge_vertices):
+ """The graph contains no loops."""
+ trees = [set(e) for e in edges]
+ while True:
+ for a, b in itertools.combinations(trees, 2):
+ intersection = a & b
+ if intersection:
+ if len(intersection) == 1:
+ a |= b
+ trees.remove(b)
+ break
+ else:
+ raise GraphError("there is a loop in the graph\n"
+ " vertices %s\n edges %s\n"
+ " intersection %s" %
+ (vertices, edges, intersection))
+ else:
+ # no break in itertools.combinations loop means no
+ # further mergers, so we're done.
+ #
+ # XXX here we also know whether it is a tree or a
+ # forest by len(trees) but the connected test already
+ # tells us that.
+ return
+
+
+def verify_graph_multi_edge_forest(edges, vertices, edge_vertices):
+ """This allows a forest with duplicate edges. That is if multiple
+ edges go between the same two vertices, they are treated as a
+ single edge by this test.
+
+ e.g.:
+ o
+ pass: o-o=o o=o (|) fail: o-o
+ `o o `o'
+ """
+ unique_edges = set(edges)
+ trees = [set(e) for e in unique_edges]
+ while True:
+ for a, b in itertools.combinations(trees, 2):
+ intersection = a & b
+ if intersection:
+ if len(intersection) == 1:
+ a |= b
+ trees.remove(b)
+ break
+ else:
+ raise GraphError("there is a loop in the graph")
+ else:
+ return
+
+
+def verify_graph_no_lonely_vertices(edges, vertices, edge_vertices):
+ """There are no vertices without edges."""
+ lonely = set(vertices) - set(edge_vertices)
+ if lonely:
+ raise GraphError("some vertices are not connected:\n%s" %
+ '\n'.join(sorted(lonely)))
+
+
+def verify_graph_no_unknown_vertices(edges, vertices, edge_vertices):
+ """The edge endpoints contain no vertices that are otherwise unknown."""
+ unknown = set(edge_vertices) - set(vertices)
+ if unknown:
+ raise GraphError("some edge vertices are seemingly unknown:\n%s" %
+ '\n'.join(sorted(unknown)))
+
+
+def verify_graph_directed_double_ring(edges, vertices, edge_vertices):
+ """Each node has at least two directed edges leaving it, and two
+ arriving. The edges work in pairs that have the same end points
+ but point in opposite directions. The pairs form a path that
+ touches every vertex and form a loop.
+
+ There might be other connections that *aren't* part of the ring.
+
+ Deciding this for sure is NP-complete (the Hamiltonian path
+ problem), but there are some easy failures that can be detected.
+ So far we check for:
+ - leaf nodes
+ - disjoint subgraphs
+ - robustness against edge and vertex failure
+ """
+ # a zero or one node graph is OK with no edges.
+ # The two vertex case is special. Use
+ # verify_graph_directed_double_ring_or_small() to allow that.
+ if not edges and len(vertices) <= 1:
+ return
+ if len(edges) < 2 * len(vertices):
+ raise GraphError("directed double ring requires at least twice "
+ "as many edges as vertices")
+
+ # Reduce the problem space by looking only at bi-directional links.
+ half_duplex = set(edges)
+ duplex_links = set()
+ for edge in edges:
+ rev_edge = (edge[1], edge[0])
+ if edge in half_duplex and rev_edge in half_duplex:
+ duplex_links.add(edge)
+ half_duplex.remove(edge)
+ half_duplex.remove(rev_edge)
+
+ # the Hamiltonian cycle problem is NP-complete in general, but we
+ # can cheat a bit and prove a less strong result.
+ #
+ # We declutter the graph by replacing nodes with edges connecting
+ # their neighbours.
+ #
+ # A-B-C --> A-C
+ #
+ # -A-B-C- --> -A--C-
+ # `D_ `D'_
+ #
+ # In the end there should be a single 2 vertex graph.
+
+ edge_map = {}
+ for a, b in duplex_links:
+ edge_map.setdefault(a, set()).add(b)
+ edge_map.setdefault(b, set()).add(a)
+
+ # an easy to detect failure is a lonely leaf node
+ for vertex, neighbours in edge_map.items():
+ if len(neighbours) == 1:
+ raise GraphError("wanted double directed ring, found a leaf node"
+ "(%s)" % vertex)
+
+ for vertex in list(edge_map.keys()):
+ nset = edge_map[vertex]
+ if not nset:
+ continue
+ for n in nset:
+ n_neighbours = edge_map[n]
+ n_neighbours.remove(vertex)
+ n_neighbours.update(x for x in nset if x != n)
+ del edge_map[vertex]
+
+ if len(edge_map) > 1:
+ raise GraphError("wanted double directed ring, but "
+ "this looks like a split graph\n"
+ "(%s can't reach each other)" %
+ ', '.join(edge_map.keys()))
+
+ verify_graph_connected_under_edge_failures(duplex_links, vertices,
+ edge_vertices)
+ verify_graph_connected_under_vertex_failures(duplex_links, vertices,
+ edge_vertices)
+
+
+def verify_graph_directed_double_ring_or_small(edges, vertices, edge_vertices):
+ """This performs the directed_double_ring test but makes special
+ concessions for small rings where the strict rules don't really
+ apply."""
+ if len(vertices) < 2:
+ return
+ if len(vertices) == 2:
+ """With 2 nodes there should be a single link in each directions."""
+ if (len(edges) == 2 and
+ edges[0][0] == edges[1][1] and
+ edges[0][1] == edges[1][0]):
+ return
+ raise GraphError("A two vertex graph should have an edge each way.")
+
+ return verify_graph_directed_double_ring(edges, vertices, edge_vertices)
+
+
+def verify_graph(edges, vertices=None, directed=False, properties=()):
+ errors = []
+ properties = [x.replace(' ', '_') for x in properties]
+
+ edge_vertices = set()
+ for a, b in edges:
+ edge_vertices.add(a)
+ edge_vertices.add(b)
+
+ if vertices is None:
+ vertices = edge_vertices
+ else:
+ vertices = set(vertices)
+
+ for p in properties:
+ fn = 'verify_graph_%s' % p
+ f = globals()[fn]
+ try:
+ f(edges, vertices, edge_vertices)
+ except GraphError as e:
+ errors.append((p, e, f.__doc__))
+
+ return errors
+
+
+def verify_and_dot(basename, edges, vertices=None, label=None,
+ reformat_labels=True, directed=False,
+ properties=(), fatal=True, debug=None,
+ verify=True, dot_file_dir=None,
+ edge_colors=None, edge_labels=None,
+ vertex_colors=None):
+
+ if dot_file_dir is not None:
+ write_dot_file(basename, edges, vertices=vertices, label=label,
+ dot_file_dir=dot_file_dir,
+ reformat_labels=reformat_labels, directed=directed,
+ debug=debug, edge_colors=edge_colors,
+ edge_labels=edge_labels, vertex_colors=vertex_colors)
+
+ if verify:
+ errors = verify_graph(edges, vertices,
+ properties=properties)
+ if errors:
+ title = '%s %s' % (basename, label or '')
+ debug("%s FAILED:" % title)
+ for p, e, doc in errors:
+ debug(" %18s: %s" % (p, e))
+ if fatal:
+ raise GraphError("The '%s' graph lacks the following "
+ "properties:\n%s" %
+ (title, '\n'.join('%s: %s' % (p, e)
+ for p, e, doc in errors)))
+
+
+def list_verify_tests():
+ for k, v in sorted(globals().items()):
+ if k.startswith('verify_graph_'):
+ print(k.replace('verify_graph_', ''))
+ if v.__doc__:
+ print(' %s' % (v.__doc__.rstrip()))
+ else:
+ print()
diff --git a/python/samba/kcc/kcc_utils.py b/python/samba/kcc/kcc_utils.py
new file mode 100644
index 0000000..326889d
--- /dev/null
+++ b/python/samba/kcc/kcc_utils.py
@@ -0,0 +1,2364 @@
+# KCC topology utilities
+#
+# Copyright (C) Dave Craft 2011
+# Copyright (C) Jelmer Vernooij 2011
+# Copyright (C) Andrew Bartlett 2015
+#
+# Andrew Bartlett's alleged work performed by his underlings Douglas
+# Bagnall and Garming Sam.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+import sys
+import ldb
+import uuid
+
+from samba import dsdb
+from samba.dcerpc import (
+ drsblobs,
+ drsuapi,
+ misc,
+)
+from samba.samdb import dsdb_Dn
+from samba.ndr import ndr_unpack, ndr_pack
+from collections import Counter
+
+
+class KCCError(Exception):
+ pass
+
+
+class NCType(object):
+ (unknown, schema, domain, config, application) = range(0, 5)
+
+
+# map the NCType enum to strings for debugging
+nctype_lut = dict((v, k) for k, v in NCType.__dict__.items() if k[:2] != '__')
+
+
+class NamingContext(object):
+ """Base class for a naming context.
+
+ Holds the DN, GUID, SID (if available) and type of the DN.
+ Subclasses may inherit from this and specialize
+ """
+
+ def __init__(self, nc_dnstr):
+ """Instantiate a NamingContext
+
+ :param nc_dnstr: NC dn string
+ """
+ self.nc_dnstr = nc_dnstr
+ self.nc_guid = None
+ self.nc_sid = None
+ self.nc_type = NCType.unknown
+
+ def __str__(self):
+ """Debug dump string output of class"""
+ text = "%s:" % (self.__class__.__name__,) +\
+ "\n\tnc_dnstr=%s" % self.nc_dnstr +\
+ "\n\tnc_guid=%s" % str(self.nc_guid)
+
+ if self.nc_sid is None:
+ text = text + "\n\tnc_sid=<absent>"
+ else:
+ text = text + "\n\tnc_sid=<present>"
+
+ text = text + "\n\tnc_type=%s (%s)" % (nctype_lut[self.nc_type],
+ self.nc_type)
+ return text
+
+ def load_nc(self, samdb):
+ attrs = ["objectGUID",
+ "objectSid"]
+ try:
+ res = samdb.search(base=self.nc_dnstr,
+ scope=ldb.SCOPE_BASE, attrs=attrs)
+
+ except ldb.LdbError as e:
+ (enum, estr) = e.args
+ raise KCCError("Unable to find naming context (%s) - (%s)" %
+ (self.nc_dnstr, estr))
+ msg = res[0]
+ if "objectGUID" in msg:
+ self.nc_guid = misc.GUID(samdb.schema_format_value("objectGUID",
+ msg["objectGUID"][0]))
+ if "objectSid" in msg:
+ self.nc_sid = msg["objectSid"][0]
+
+ assert self.nc_guid is not None
+
+ def is_config(self):
+ """Return True if NC is config"""
+ assert self.nc_type != NCType.unknown
+ return self.nc_type == NCType.config
+
+ def identify_by_basedn(self, samdb):
+ """Given an NC object, identify what type it is thru
+ the samdb basedn strings and NC sid value
+ """
+ # Invoke loader to initialize guid and more
+ # importantly sid value (sid is used to identify
+ # domain NCs)
+ if self.nc_guid is None:
+ self.load_nc(samdb)
+
+ # We check against schema and config because they
+ # will be the same for all nTDSDSAs in the forest.
+ # That leaves the domain NCs which can be identified
+ # by sid and application NCs as the last identified
+ if self.nc_dnstr == str(samdb.get_schema_basedn()):
+ self.nc_type = NCType.schema
+ elif self.nc_dnstr == str(samdb.get_config_basedn()):
+ self.nc_type = NCType.config
+ elif self.nc_sid is not None:
+ self.nc_type = NCType.domain
+ else:
+ self.nc_type = NCType.application
+
+ def identify_by_dsa_attr(self, samdb, attr):
+ """Given an NC which has been discovered thru the
+ nTDSDSA database object, determine what type of NC
+ it is (i.e. schema, config, domain, application) via
+ the use of the schema attribute under which the NC
+ was found.
+
+ :param attr: attr of nTDSDSA object where NC DN appears
+ """
+ # If the NC is listed under msDS-HasDomainNCs then
+ # this can only be a domain NC and it is our default
+ # domain for this dsa
+ if attr == "msDS-HasDomainNCs":
+ self.nc_type = NCType.domain
+
+ # If the NC is listed under hasPartialReplicaNCs
+ # this is only a domain NC
+ elif attr == "hasPartialReplicaNCs":
+ self.nc_type = NCType.domain
+
+ # NCs listed under hasMasterNCs are either
+ # default domain, schema, or config. We
+ # utilize the identify_by_basedn() to
+ # identify those
+ elif attr == "hasMasterNCs":
+ self.identify_by_basedn(samdb)
+
+ # Still unknown (unlikely) but for completeness
+ # and for finally identifying application NCs
+ if self.nc_type == NCType.unknown:
+ self.identify_by_basedn(samdb)
+
+
+class NCReplica(NamingContext):
+ """Naming context replica that is relative to a specific DSA.
+
+ This is a more specific form of NamingContext class (inheriting from that
+ class) and it identifies unique attributes of the DSA's replica for a NC.
+ """
+
+ def __init__(self, dsa, nc_dnstr):
+ """Instantiate a Naming Context Replica
+
+ :param dsa_guid: GUID of DSA where replica appears
+ :param nc_dnstr: NC dn string
+ """
+ self.rep_dsa_dnstr = dsa.dsa_dnstr
+ self.rep_dsa_guid = dsa.dsa_guid
+ self.rep_default = False # replica for DSA's default domain
+ self.rep_partial = False
+ self.rep_ro = False
+ self.rep_instantiated_flags = 0
+
+ self.rep_fsmo_role_owner = None
+
+ # RepsFromTo tuples
+ self.rep_repsFrom = []
+
+ # RepsFromTo tuples
+ self.rep_repsTo = []
+
+ # The (is present) test is a combination of being
+ # enumerated in (hasMasterNCs or msDS-hasFullReplicaNCs or
+ # hasPartialReplicaNCs) as well as its replica flags found
+ # thru the msDS-HasInstantiatedNCs. If the NC replica meets
+ # the first enumeration test then this flag is set true
+ self.rep_present_criteria_one = False
+
+ # Call my super class we inherited from
+ NamingContext.__init__(self, nc_dnstr)
+
+ def __str__(self):
+ """Debug dump string output of class"""
+ text = "%s:" % self.__class__.__name__ +\
+ "\n\tdsa_dnstr=%s" % self.rep_dsa_dnstr +\
+ "\n\tdsa_guid=%s" % self.rep_dsa_guid +\
+ "\n\tdefault=%s" % self.rep_default +\
+ "\n\tro=%s" % self.rep_ro +\
+ "\n\tpartial=%s" % self.rep_partial +\
+ "\n\tpresent=%s" % self.is_present() +\
+ "\n\tfsmo_role_owner=%s" % self.rep_fsmo_role_owner +\
+ "".join("\n%s" % rep for rep in self.rep_repsFrom) +\
+ "".join("\n%s" % rep for rep in self.rep_repsTo)
+
+ return "%s\n%s" % (NamingContext.__str__(self), text)
+
+ def set_instantiated_flags(self, flags=0):
+ """Set or clear NC replica instantiated flags"""
+ self.rep_instantiated_flags = flags
+
+ def identify_by_dsa_attr(self, samdb, attr):
+ """Given an NC which has been discovered thru the
+ nTDSDSA database object, determine what type of NC
+ replica it is (i.e. partial, read only, default)
+
+ :param attr: attr of nTDSDSA object where NC DN appears
+ """
+ # If the NC was found under hasPartialReplicaNCs
+ # then a partial replica at this dsa
+ if attr == "hasPartialReplicaNCs":
+ self.rep_partial = True
+ self.rep_present_criteria_one = True
+
+ # If the NC is listed under msDS-HasDomainNCs then
+ # this can only be a domain NC and it is the DSA's
+ # default domain NC
+ elif attr == "msDS-HasDomainNCs":
+ self.rep_default = True
+
+ # NCs listed under hasMasterNCs are either
+ # default domain, schema, or config. We check
+ # against schema and config because they will be
+ # the same for all nTDSDSAs in the forest. That
+ # leaves the default domain NC remaining which
+ # may be different for each nTDSDSAs (and thus
+ # we don't compare against this samdb's default
+ # basedn
+ elif attr == "hasMasterNCs":
+ self.rep_present_criteria_one = True
+
+ if self.nc_dnstr != str(samdb.get_schema_basedn()) and \
+ self.nc_dnstr != str(samdb.get_config_basedn()):
+ self.rep_default = True
+
+ # RODC only
+ elif attr == "msDS-hasFullReplicaNCs":
+ self.rep_present_criteria_one = True
+ self.rep_ro = True
+
+ # Not RODC
+ elif attr == "msDS-hasMasterNCs":
+ self.rep_present_criteria_one = True
+ self.rep_ro = False
+
+ # Now use this DSA attribute to identify the naming
+ # context type by calling the super class method
+ # of the same name
+ NamingContext.identify_by_dsa_attr(self, samdb, attr)
+
+ def is_default(self):
+ """Whether this is a default domain for the dsa that this NC appears on
+ """
+ return self.rep_default
+
+ def is_ro(self):
+ """Return True if NC replica is read only"""
+ return self.rep_ro
+
+ def is_partial(self):
+ """Return True if NC replica is partial"""
+ return self.rep_partial
+
+ def is_present(self):
+ """Given an NC replica which has been discovered thru the
+ nTDSDSA database object and populated with replica flags
+ from the msDS-HasInstantiatedNCs; return whether the NC
+ replica is present (true) or if the IT_NC_GOING flag is
+ set then the NC replica is not present (false)
+ """
+ if self.rep_present_criteria_one and \
+ self.rep_instantiated_flags & dsdb.INSTANCE_TYPE_NC_GOING == 0:
+ return True
+ return False
+
+ def load_repsFrom(self, samdb):
+ """Given an NC replica which has been discovered thru the nTDSDSA
+ database object, load the repsFrom attribute for the local replica.
+ held by my dsa. The repsFrom attribute is not replicated so this
+ attribute is relative only to the local DSA that the samdb exists on
+ """
+ try:
+ res = samdb.search(base=self.nc_dnstr, scope=ldb.SCOPE_BASE,
+ attrs=["repsFrom"])
+
+ except ldb.LdbError as e1:
+ (enum, estr) = e1.args
+ raise KCCError("Unable to find NC for (%s) - (%s)" %
+ (self.nc_dnstr, estr))
+
+ msg = res[0]
+
+ # Possibly no repsFrom if this is a singleton DC
+ if "repsFrom" in msg:
+ for value in msg["repsFrom"]:
+ try:
+ unpacked = ndr_unpack(drsblobs.repsFromToBlob, value)
+ except RuntimeError as e:
+ print("bad repsFrom NDR: %r" % (value),
+ file=sys.stderr)
+ continue
+ rep = RepsFromTo(self.nc_dnstr, unpacked)
+ self.rep_repsFrom.append(rep)
+
+ def commit_repsFrom(self, samdb, ro=False):
+ """Commit repsFrom to the database"""
+
+ # XXX - This is not truly correct according to the MS-TECH
+ # docs. To commit a repsFrom we should be using RPCs
+ # IDL_DRSReplicaAdd, IDL_DRSReplicaModify, and
+ # IDL_DRSReplicaDel to affect a repsFrom change.
+ #
+ # Those RPCs are missing in samba, so I'll have to
+ # implement them to get this to more accurately
+ # reflect the reference docs. As of right now this
+ # commit to the database will work as its what the
+ # older KCC also did
+ modify = False
+ newreps = []
+ delreps = []
+
+ for repsFrom in self.rep_repsFrom:
+
+ # Leave out any to be deleted from
+ # replacement list. Build a list
+ # of to be deleted reps which we will
+ # remove from rep_repsFrom list below
+ if repsFrom.to_be_deleted:
+ delreps.append(repsFrom)
+ modify = True
+ continue
+
+ if repsFrom.is_modified():
+ repsFrom.set_unmodified()
+ modify = True
+
+ # current (unmodified) elements also get
+ # appended here but no changes will occur
+ # unless something is "to be modified" or
+ # "to be deleted"
+ newreps.append(ndr_pack(repsFrom.ndr_blob))
+
+ # Now delete these from our list of rep_repsFrom
+ for repsFrom in delreps:
+ self.rep_repsFrom.remove(repsFrom)
+ delreps = []
+
+ # Nothing to do if no reps have been modified or
+ # need to be deleted or input option has informed
+ # us to be "readonly" (ro). Leave database
+ # record "as is"
+ if not modify or ro:
+ return
+
+ m = ldb.Message()
+ m.dn = ldb.Dn(samdb, self.nc_dnstr)
+
+ m["repsFrom"] = \
+ ldb.MessageElement(newreps, ldb.FLAG_MOD_REPLACE, "repsFrom")
+
+ try:
+ samdb.modify(m)
+
+ except ldb.LdbError as estr:
+ raise KCCError("Could not set repsFrom for (%s) - (%s)" %
+ (self.nc_dnstr, estr))
+
+ def load_replUpToDateVector(self, samdb):
+ """Given an NC replica which has been discovered thru the nTDSDSA
+ database object, load the replUpToDateVector attribute for the
+ local replica. held by my dsa. The replUpToDateVector
+ attribute is not replicated so this attribute is relative only
+ to the local DSA that the samdb exists on
+
+ """
+ try:
+ res = samdb.search(base=self.nc_dnstr, scope=ldb.SCOPE_BASE,
+ attrs=["replUpToDateVector"])
+
+ except ldb.LdbError as e2:
+ (enum, estr) = e2.args
+ raise KCCError("Unable to find NC for (%s) - (%s)" %
+ (self.nc_dnstr, estr))
+
+ msg = res[0]
+
+ # Possibly no replUpToDateVector if this is a singleton DC
+ if "replUpToDateVector" in msg:
+ value = msg["replUpToDateVector"][0]
+ blob = ndr_unpack(drsblobs.replUpToDateVectorBlob,
+ value)
+ if blob.version != 2:
+ # Samba only generates version 2, and this runs locally
+ raise AttributeError("Unexpected replUpToDateVector version %d"
+ % blob.version)
+
+ self.rep_replUpToDateVector_cursors = blob.ctr.cursors
+ else:
+ self.rep_replUpToDateVector_cursors = []
+
+ def dumpstr_to_be_deleted(self):
+ return '\n'.join(str(x) for x in self.rep_repsFrom if x.to_be_deleted)
+
+ def dumpstr_to_be_modified(self):
+ return '\n'.join(str(x) for x in self.rep_repsFrom if x.is_modified())
+
+ def load_fsmo_roles(self, samdb):
+ """Given an NC replica which has been discovered thru the nTDSDSA
+ database object, load the fSMORoleOwner attribute.
+ """
+ try:
+ res = samdb.search(base=self.nc_dnstr, scope=ldb.SCOPE_BASE,
+ attrs=["fSMORoleOwner"])
+
+ except ldb.LdbError as e3:
+ (enum, estr) = e3.args
+ raise KCCError("Unable to find NC for (%s) - (%s)" %
+ (self.nc_dnstr, estr))
+
+ msg = res[0]
+
+ # Possibly no fSMORoleOwner
+ if "fSMORoleOwner" in msg:
+ self.rep_fsmo_role_owner = msg["fSMORoleOwner"]
+
+ def is_fsmo_role_owner(self, dsa_dnstr):
+ if self.rep_fsmo_role_owner is not None and \
+ self.rep_fsmo_role_owner == dsa_dnstr:
+ return True
+ return False
+
+ def load_repsTo(self, samdb):
+ """Given an NC replica which has been discovered thru the nTDSDSA
+ database object, load the repsTo attribute for the local replica.
+ held by my dsa. The repsTo attribute is not replicated so this
+ attribute is relative only to the local DSA that the samdb exists on
+
+ This is responsible for push replication, not scheduled pull
+ replication. Not to be confused for repsFrom.
+ """
+ try:
+ res = samdb.search(base=self.nc_dnstr, scope=ldb.SCOPE_BASE,
+ attrs=["repsTo"])
+
+ except ldb.LdbError as e4:
+ (enum, estr) = e4.args
+ raise KCCError("Unable to find NC for (%s) - (%s)" %
+ (self.nc_dnstr, estr))
+
+ msg = res[0]
+
+ # Possibly no repsTo if this is a singleton DC
+ if "repsTo" in msg:
+ for value in msg["repsTo"]:
+ try:
+ unpacked = ndr_unpack(drsblobs.repsFromToBlob, value)
+ except RuntimeError as e:
+ print("bad repsTo NDR: %r" % (value),
+ file=sys.stderr)
+ continue
+ rep = RepsFromTo(self.nc_dnstr, unpacked)
+ self.rep_repsTo.append(rep)
+
+ def commit_repsTo(self, samdb, ro=False):
+ """Commit repsTo to the database"""
+
+ # XXX - This is not truly correct according to the MS-TECH
+ # docs. To commit a repsTo we should be using RPCs
+ # IDL_DRSReplicaAdd, IDL_DRSReplicaModify, and
+ # IDL_DRSReplicaDel to affect a repsTo change.
+ #
+ # Those RPCs are missing in samba, so I'll have to
+ # implement them to get this to more accurately
+ # reflect the reference docs. As of right now this
+ # commit to the database will work as its what the
+ # older KCC also did
+ modify = False
+ newreps = []
+ delreps = []
+
+ for repsTo in self.rep_repsTo:
+
+ # Leave out any to be deleted from
+ # replacement list. Build a list
+ # of to be deleted reps which we will
+ # remove from rep_repsTo list below
+ if repsTo.to_be_deleted:
+ delreps.append(repsTo)
+ modify = True
+ continue
+
+ if repsTo.is_modified():
+ repsTo.set_unmodified()
+ modify = True
+
+ # current (unmodified) elements also get
+ # appended here but no changes will occur
+ # unless something is "to be modified" or
+ # "to be deleted"
+ newreps.append(ndr_pack(repsTo.ndr_blob))
+
+ # Now delete these from our list of rep_repsTo
+ for repsTo in delreps:
+ self.rep_repsTo.remove(repsTo)
+ delreps = []
+
+ # Nothing to do if no reps have been modified or
+ # need to be deleted or input option has informed
+ # us to be "readonly" (ro). Leave database
+ # record "as is"
+ if not modify or ro:
+ return
+
+ m = ldb.Message()
+ m.dn = ldb.Dn(samdb, self.nc_dnstr)
+
+ m["repsTo"] = \
+ ldb.MessageElement(newreps, ldb.FLAG_MOD_REPLACE, "repsTo")
+
+ try:
+ samdb.modify(m)
+
+ except ldb.LdbError as estr:
+ raise KCCError("Could not set repsTo for (%s) - (%s)" %
+ (self.nc_dnstr, estr))
+
+
+class DirectoryServiceAgent(object):
+
+ def __init__(self, dsa_dnstr):
+ """Initialize DSA class.
+
+ Class is subsequently fully populated by calling the load_dsa() method
+
+ :param dsa_dnstr: DN of the nTDSDSA
+ """
+ self.dsa_dnstr = dsa_dnstr
+ self.dsa_guid = None
+ self.dsa_ivid = None
+ self.dsa_is_ro = False
+ self.dsa_is_istg = False
+ self.options = 0
+ self.dsa_behavior = 0
+ self.default_dnstr = None # default domain dn string for dsa
+
+ # NCReplicas for this dsa that are "present"
+ # Indexed by DN string of naming context
+ self.current_rep_table = {}
+
+ # NCReplicas for this dsa that "should be present"
+ # Indexed by DN string of naming context
+ self.needed_rep_table = {}
+
+ # NTDSConnections for this dsa. These are current
+ # valid connections that are committed or pending a commit
+ # in the database. Indexed by DN string of connection
+ self.connect_table = {}
+
+ def __str__(self):
+ """Debug dump string output of class"""
+
+ text = "%s:" % self.__class__.__name__
+ if self.dsa_dnstr is not None:
+ text = text + "\n\tdsa_dnstr=%s" % self.dsa_dnstr
+ if self.dsa_guid is not None:
+ text = text + "\n\tdsa_guid=%s" % str(self.dsa_guid)
+ if self.dsa_ivid is not None:
+ text = text + "\n\tdsa_ivid=%s" % str(self.dsa_ivid)
+
+ text += "\n\tro=%s" % self.is_ro() +\
+ "\n\tgc=%s" % self.is_gc() +\
+ "\n\tistg=%s" % self.is_istg() +\
+ "\ncurrent_replica_table:" +\
+ "\n%s" % self.dumpstr_current_replica_table() +\
+ "\nneeded_replica_table:" +\
+ "\n%s" % self.dumpstr_needed_replica_table() +\
+ "\nconnect_table:" +\
+ "\n%s" % self.dumpstr_connect_table()
+
+ return text
+
+ def get_current_replica(self, nc_dnstr):
+ return self.current_rep_table.get(nc_dnstr)
+
+ def is_istg(self):
+ """Returns True if dsa is intersite topology generator for it's site"""
+ # The KCC on an RODC always acts as an ISTG for itself
+ return self.dsa_is_istg or self.dsa_is_ro
+
+ def is_ro(self):
+ """Returns True if dsa a read only domain controller"""
+ return self.dsa_is_ro
+
+ def is_gc(self):
+ """Returns True if dsa hosts a global catalog"""
+ if (self.options & dsdb.DS_NTDSDSA_OPT_IS_GC) != 0:
+ return True
+ return False
+
+ def is_minimum_behavior(self, version):
+ """Is dsa at minimum windows level greater than or equal to (version)
+
+ :param version: Windows version to test against
+ (e.g. DS_DOMAIN_FUNCTION_2008)
+ """
+ if self.dsa_behavior >= version:
+ return True
+ return False
+
+ def is_translate_ntdsconn_disabled(self):
+ """Whether this allows NTDSConnection translation in its options."""
+ if (self.options & dsdb.DS_NTDSDSA_OPT_DISABLE_NTDSCONN_XLATE) != 0:
+ return True
+ return False
+
+ def get_rep_tables(self):
+ """Return DSA current and needed replica tables
+ """
+ return self.current_rep_table, self.needed_rep_table
+
+ def get_parent_dnstr(self):
+ """Get the parent DN string of this object."""
+ head, sep, tail = self.dsa_dnstr.partition(',')
+ return tail
+
+ def load_dsa(self, samdb):
+ """Load a DSA from the samdb.
+
+ Prior initialization has given us the DN of the DSA that we are to
+ load. This method initializes all other attributes, including loading
+ the NC replica table for this DSA.
+ """
+ attrs = ["objectGUID",
+ "invocationID",
+ "options",
+ "msDS-isRODC",
+ "msDS-Behavior-Version"]
+ try:
+ res = samdb.search(base=self.dsa_dnstr, scope=ldb.SCOPE_BASE,
+ attrs=attrs)
+
+ except ldb.LdbError as e5:
+ (enum, estr) = e5.args
+ raise KCCError("Unable to find nTDSDSA for (%s) - (%s)" %
+ (self.dsa_dnstr, estr))
+
+ msg = res[0]
+ self.dsa_guid = misc.GUID(samdb.schema_format_value("objectGUID",
+ msg["objectGUID"][0]))
+
+ # RODCs don't originate changes and thus have no invocationId,
+ # therefore we must check for existence first
+ if "invocationId" in msg:
+ self.dsa_ivid = misc.GUID(samdb.schema_format_value("objectGUID",
+ msg["invocationId"][0]))
+
+ if "options" in msg:
+ self.options = int(msg["options"][0])
+
+ if "msDS-isRODC" in msg and str(msg["msDS-isRODC"][0]) == "TRUE":
+ self.dsa_is_ro = True
+ else:
+ self.dsa_is_ro = False
+
+ if "msDS-Behavior-Version" in msg:
+ self.dsa_behavior = int(msg['msDS-Behavior-Version'][0])
+
+ # Load the NC replicas that are enumerated on this dsa
+ self.load_current_replica_table(samdb)
+
+ # Load the nTDSConnection that are enumerated on this dsa
+ self.load_connection_table(samdb)
+
+ def load_current_replica_table(self, samdb):
+ """Method to load the NC replica's listed for DSA object.
+
+ This method queries the samdb for (hasMasterNCs, msDS-hasMasterNCs,
+ hasPartialReplicaNCs, msDS-HasDomainNCs, msDS-hasFullReplicaNCs, and
+ msDS-HasInstantiatedNCs) to determine complete list of NC replicas that
+ are enumerated for the DSA. Once a NC replica is loaded it is
+ identified (schema, config, etc) and the other replica attributes
+ (partial, ro, etc) are determined.
+
+ :param samdb: database to query for DSA replica list
+ """
+ ncattrs = [
+ # not RODC - default, config, schema (old style)
+ "hasMasterNCs",
+ # not RODC - default, config, schema, app NCs
+ "msDS-hasMasterNCs",
+ # domain NC partial replicas
+ "hasPartialReplicaNCs",
+ # default domain NC
+ "msDS-HasDomainNCs",
+ # RODC only - default, config, schema, app NCs
+ "msDS-hasFullReplicaNCs",
+ # Identifies if replica is coming, going, or stable
+ "msDS-HasInstantiatedNCs"
+ ]
+ try:
+ res = samdb.search(base=self.dsa_dnstr, scope=ldb.SCOPE_BASE,
+ attrs=ncattrs)
+
+ except ldb.LdbError as e6:
+ (enum, estr) = e6.args
+ raise KCCError("Unable to find nTDSDSA NCs for (%s) - (%s)" %
+ (self.dsa_dnstr, estr))
+
+ # The table of NCs for the dsa we are searching
+ tmp_table = {}
+
+ # We should get one response to our query here for
+ # the ntds that we requested
+ if len(res[0]) > 0:
+
+ # Our response will contain a number of elements including
+ # the dn of the dsa as well as elements for each
+ # attribute (e.g. hasMasterNCs). Each of these elements
+ # is a dictionary list which we retrieve the keys for and
+ # then iterate over them
+ for k in res[0].keys():
+ if k == "dn":
+ continue
+
+ # For each attribute type there will be one or more DNs
+ # listed. For instance DCs normally have 3 hasMasterNCs
+ # listed.
+ for value in res[0][k]:
+ # Turn dn into a dsdb_Dn so we can use
+ # its methods to parse a binary DN
+ dsdn = dsdb_Dn(samdb, value.decode('utf8'))
+ flags = dsdn.get_binary_integer()
+ dnstr = str(dsdn.dn)
+
+ if dnstr not in tmp_table:
+ rep = NCReplica(self, dnstr)
+ tmp_table[dnstr] = rep
+ else:
+ rep = tmp_table[dnstr]
+
+ if k == "msDS-HasInstantiatedNCs":
+ rep.set_instantiated_flags(flags)
+ continue
+
+ rep.identify_by_dsa_attr(samdb, k)
+
+ # if we've identified the default domain NC
+ # then save its DN string
+ if rep.is_default():
+ self.default_dnstr = dnstr
+ else:
+ raise KCCError("No nTDSDSA NCs for (%s)" % self.dsa_dnstr)
+
+ # Assign our newly built NC replica table to this dsa
+ self.current_rep_table = tmp_table
+
+ def add_needed_replica(self, rep):
+ """Method to add a NC replica that "should be present" to the
+ needed_rep_table.
+ """
+ self.needed_rep_table[rep.nc_dnstr] = rep
+
+ def load_connection_table(self, samdb):
+ """Method to load the nTDSConnections listed for DSA object.
+
+ :param samdb: database to query for DSA connection list
+ """
+ try:
+ res = samdb.search(base=self.dsa_dnstr,
+ scope=ldb.SCOPE_SUBTREE,
+ expression="(objectClass=nTDSConnection)")
+
+ except ldb.LdbError as e7:
+ (enum, estr) = e7.args
+ raise KCCError("Unable to find nTDSConnection for (%s) - (%s)" %
+ (self.dsa_dnstr, estr))
+
+ for msg in res:
+ dnstr = str(msg.dn)
+
+ # already loaded
+ if dnstr in self.connect_table:
+ continue
+
+ connect = NTDSConnection(dnstr)
+
+ connect.load_connection(samdb)
+ self.connect_table[dnstr] = connect
+
+ def commit_connections(self, samdb, ro=False):
+ """Method to commit any uncommitted nTDSConnections
+ modifications that are in our table. These would be
+ identified connections that are marked to be added or
+ deleted
+
+ :param samdb: database to commit DSA connection list to
+ :param ro: if (true) then perform internal operations but
+ do not write to the database (readonly)
+ """
+ delconn = []
+
+ for dnstr, connect in self.connect_table.items():
+ if connect.to_be_added:
+ connect.commit_added(samdb, ro)
+
+ if connect.to_be_modified:
+ connect.commit_modified(samdb, ro)
+
+ if connect.to_be_deleted:
+ connect.commit_deleted(samdb, ro)
+ delconn.append(dnstr)
+
+ # Now delete the connection from the table
+ for dnstr in delconn:
+ del self.connect_table[dnstr]
+
+ def add_connection(self, dnstr, connect):
+ assert dnstr not in self.connect_table
+ self.connect_table[dnstr] = connect
+
+ def get_connection_by_from_dnstr(self, from_dnstr):
+ """Scan DSA nTDSConnection table and return connection
+ with a "fromServer" dn string equivalent to method
+ input parameter.
+
+ :param from_dnstr: search for this from server entry
+ """
+ answer = []
+ for connect in self.connect_table.values():
+ if connect.get_from_dnstr() == from_dnstr:
+ answer.append(connect)
+
+ return answer
+
+ def dumpstr_current_replica_table(self):
+ """Debug dump string output of current replica table"""
+ return '\n'.join(str(x) for x in self.current_rep_table)
+
+ def dumpstr_needed_replica_table(self):
+ """Debug dump string output of needed replica table"""
+ return '\n'.join(str(x) for x in self.needed_rep_table)
+
+ def dumpstr_connect_table(self):
+ """Debug dump string output of connect table"""
+ return '\n'.join(str(x) for x in self.connect_table)
+
+ def new_connection(self, options, system_flags, transport, from_dnstr,
+ sched):
+ """Set up a new connection for the DSA based on input
+ parameters. Connection will be added to the DSA
+ connect_table and will be marked as "to be added" pending
+ a call to commit_connections()
+ """
+ dnstr = "CN=%s," % str(uuid.uuid4()) + self.dsa_dnstr
+
+ connect = NTDSConnection(dnstr)
+ connect.to_be_added = True
+ connect.enabled = True
+ connect.from_dnstr = from_dnstr
+ connect.options = options
+ connect.system_flags = system_flags
+
+ if transport is not None:
+ connect.transport_dnstr = transport.dnstr
+ connect.transport_guid = transport.guid
+
+ if sched is not None:
+ connect.schedule = sched
+ else:
+ # Create schedule. Attribute value set according to MS-TECH
+ # intra-site connection creation document
+ connect.schedule = new_connection_schedule()
+
+ self.add_connection(dnstr, connect)
+ return connect
+
+
+class NTDSConnection(object):
+ """Class defines a nTDSConnection found under a DSA
+ """
+ def __init__(self, dnstr):
+ self.dnstr = dnstr
+ self.guid = None
+ self.enabled = False
+ self.whenCreated = 0
+ self.to_be_added = False # new connection needs to be added
+ self.to_be_deleted = False # old connection needs to be deleted
+ self.to_be_modified = False
+ self.options = 0
+ self.system_flags = 0
+ self.transport_dnstr = None
+ self.transport_guid = None
+ self.from_dnstr = None
+ self.schedule = None
+
+ def __str__(self):
+ """Debug dump string output of NTDSConnection object"""
+
+ text = "%s:\n\tdn=%s" % (self.__class__.__name__, self.dnstr) +\
+ "\n\tenabled=%s" % self.enabled +\
+ "\n\tto_be_added=%s" % self.to_be_added +\
+ "\n\tto_be_deleted=%s" % self.to_be_deleted +\
+ "\n\tto_be_modified=%s" % self.to_be_modified +\
+ "\n\toptions=0x%08X" % self.options +\
+ "\n\tsystem_flags=0x%08X" % self.system_flags +\
+ "\n\twhenCreated=%d" % self.whenCreated +\
+ "\n\ttransport_dn=%s" % self.transport_dnstr
+
+ if self.guid is not None:
+ text += "\n\tguid=%s" % str(self.guid)
+
+ if self.transport_guid is not None:
+ text += "\n\ttransport_guid=%s" % str(self.transport_guid)
+
+ text = text + "\n\tfrom_dn=%s" % self.from_dnstr
+
+ if self.schedule is not None:
+ text += "\n\tschedule.size=%s" % self.schedule.size +\
+ "\n\tschedule.bandwidth=%s" % self.schedule.bandwidth +\
+ ("\n\tschedule.numberOfSchedules=%s" %
+ self.schedule.numberOfSchedules)
+
+ for i, header in enumerate(self.schedule.headerArray):
+ text += ("\n\tschedule.headerArray[%d].type=%d" %
+ (i, header.type)) +\
+ ("\n\tschedule.headerArray[%d].offset=%d" %
+ (i, header.offset)) +\
+ "\n\tschedule.dataArray[%d].slots[ " % i +\
+ "".join("0x%X " % slot for slot in self.schedule.dataArray[i].slots) +\
+ "]"
+
+ return text
+
+ def load_connection(self, samdb):
+ """Given a NTDSConnection object with an prior initialization
+ for the object's DN, search for the DN and load attributes
+ from the samdb.
+ """
+ attrs = ["options",
+ "enabledConnection",
+ "schedule",
+ "whenCreated",
+ "objectGUID",
+ "transportType",
+ "fromServer",
+ "systemFlags"]
+ try:
+ res = samdb.search(base=self.dnstr, scope=ldb.SCOPE_BASE,
+ attrs=attrs)
+
+ except ldb.LdbError as e8:
+ (enum, estr) = e8.args
+ raise KCCError("Unable to find nTDSConnection for (%s) - (%s)" %
+ (self.dnstr, estr))
+
+ msg = res[0]
+
+ if "options" in msg:
+ self.options = int(msg["options"][0])
+
+ if "enabledConnection" in msg:
+ if str(msg["enabledConnection"][0]).upper().lstrip().rstrip() == "TRUE":
+ self.enabled = True
+
+ if "systemFlags" in msg:
+ self.system_flags = int(msg["systemFlags"][0])
+
+ try:
+ self.guid = \
+ misc.GUID(samdb.schema_format_value("objectGUID",
+ msg["objectGUID"][0]))
+ except KeyError:
+ raise KCCError("Unable to find objectGUID in nTDSConnection "
+ "for (%s)" % (self.dnstr))
+
+ if "transportType" in msg:
+ dsdn = dsdb_Dn(samdb, msg["transportType"][0].decode('utf8'))
+ self.load_connection_transport(samdb, str(dsdn.dn))
+
+ if "schedule" in msg:
+ self.schedule = ndr_unpack(drsblobs.schedule, msg["schedule"][0])
+
+ if "whenCreated" in msg:
+ self.whenCreated = ldb.string_to_time(str(msg["whenCreated"][0]))
+
+ if "fromServer" in msg:
+ dsdn = dsdb_Dn(samdb, msg["fromServer"][0].decode('utf8'))
+ self.from_dnstr = str(dsdn.dn)
+ assert self.from_dnstr is not None
+
+ def load_connection_transport(self, samdb, tdnstr):
+ """Given a NTDSConnection object which enumerates a transport
+ DN, load the transport information for the connection object
+
+ :param tdnstr: transport DN to load
+ """
+ attrs = ["objectGUID"]
+ try:
+ res = samdb.search(base=tdnstr,
+ scope=ldb.SCOPE_BASE, attrs=attrs)
+
+ except ldb.LdbError as e9:
+ (enum, estr) = e9.args
+ raise KCCError("Unable to find transport (%s) - (%s)" %
+ (tdnstr, estr))
+
+ if "objectGUID" in res[0]:
+ msg = res[0]
+ self.transport_dnstr = tdnstr
+ self.transport_guid = \
+ misc.GUID(samdb.schema_format_value("objectGUID",
+ msg["objectGUID"][0]))
+ assert self.transport_dnstr is not None
+ assert self.transport_guid is not None
+
+ def commit_deleted(self, samdb, ro=False):
+ """Local helper routine for commit_connections() which
+ handles committed connections that are to be deleted from
+ the database database
+ """
+ assert self.to_be_deleted
+ self.to_be_deleted = False
+
+ # No database modification requested
+ if ro:
+ return
+
+ try:
+ samdb.delete(self.dnstr)
+ except ldb.LdbError as e10:
+ (enum, estr) = e10.args
+ raise KCCError("Could not delete nTDSConnection for (%s) - (%s)" %
+ (self.dnstr, estr))
+
+ def commit_added(self, samdb, ro=False):
+ """Local helper routine for commit_connections() which
+ handles committed connections that are to be added to the
+ database
+ """
+ assert self.to_be_added
+ self.to_be_added = False
+
+ # No database modification requested
+ if ro:
+ return
+
+ # First verify we don't have this entry to ensure nothing
+ # is programmatically amiss
+ found = False
+ try:
+ msg = samdb.search(base=self.dnstr, scope=ldb.SCOPE_BASE)
+ if len(msg) != 0:
+ found = True
+
+ except ldb.LdbError as e11:
+ (enum, estr) = e11.args
+ if enum != ldb.ERR_NO_SUCH_OBJECT:
+ raise KCCError("Unable to search for (%s) - (%s)" %
+ (self.dnstr, estr))
+ if found:
+ raise KCCError("nTDSConnection for (%s) already exists!" %
+ self.dnstr)
+
+ if self.enabled:
+ enablestr = "TRUE"
+ else:
+ enablestr = "FALSE"
+
+ # Prepare a message for adding to the samdb
+ m = ldb.Message()
+ m.dn = ldb.Dn(samdb, self.dnstr)
+
+ m["objectClass"] = \
+ ldb.MessageElement("nTDSConnection", ldb.FLAG_MOD_ADD,
+ "objectClass")
+ m["showInAdvancedViewOnly"] = \
+ ldb.MessageElement("TRUE", ldb.FLAG_MOD_ADD,
+ "showInAdvancedViewOnly")
+ m["enabledConnection"] = \
+ ldb.MessageElement(enablestr, ldb.FLAG_MOD_ADD,
+ "enabledConnection")
+ m["fromServer"] = \
+ ldb.MessageElement(self.from_dnstr, ldb.FLAG_MOD_ADD, "fromServer")
+ m["options"] = \
+ ldb.MessageElement(str(self.options), ldb.FLAG_MOD_ADD, "options")
+ m["systemFlags"] = \
+ ldb.MessageElement(str(self.system_flags), ldb.FLAG_MOD_ADD,
+ "systemFlags")
+
+ if self.transport_dnstr is not None:
+ m["transportType"] = \
+ ldb.MessageElement(str(self.transport_dnstr), ldb.FLAG_MOD_ADD,
+ "transportType")
+
+ if self.schedule is not None:
+ m["schedule"] = \
+ ldb.MessageElement(ndr_pack(self.schedule),
+ ldb.FLAG_MOD_ADD, "schedule")
+ try:
+ samdb.add(m)
+ except ldb.LdbError as e12:
+ (enum, estr) = e12.args
+ raise KCCError("Could not add nTDSConnection for (%s) - (%s)" %
+ (self.dnstr, estr))
+
+ def commit_modified(self, samdb, ro=False):
+ """Local helper routine for commit_connections() which
+ handles committed connections that are to be modified to the
+ database
+ """
+ assert self.to_be_modified
+ self.to_be_modified = False
+
+ # No database modification requested
+ if ro:
+ return
+
+ # First verify we have this entry to ensure nothing
+ # is programmatically amiss
+ try:
+ # we don't use the search result, but it tests the status
+ # of self.dnstr in the database.
+ samdb.search(base=self.dnstr, scope=ldb.SCOPE_BASE)
+
+ except ldb.LdbError as e13:
+ (enum, estr) = e13.args
+ if enum == ldb.ERR_NO_SUCH_OBJECT:
+ raise KCCError("nTDSConnection for (%s) doesn't exist!" %
+ self.dnstr)
+ raise KCCError("Unable to search for (%s) - (%s)" %
+ (self.dnstr, estr))
+
+ if self.enabled:
+ enablestr = "TRUE"
+ else:
+ enablestr = "FALSE"
+
+ # Prepare a message for modifying the samdb
+ m = ldb.Message()
+ m.dn = ldb.Dn(samdb, self.dnstr)
+
+ m["enabledConnection"] = \
+ ldb.MessageElement(enablestr, ldb.FLAG_MOD_REPLACE,
+ "enabledConnection")
+ m["fromServer"] = \
+ ldb.MessageElement(self.from_dnstr, ldb.FLAG_MOD_REPLACE,
+ "fromServer")
+ m["options"] = \
+ ldb.MessageElement(str(self.options), ldb.FLAG_MOD_REPLACE,
+ "options")
+ m["systemFlags"] = \
+ ldb.MessageElement(str(self.system_flags), ldb.FLAG_MOD_REPLACE,
+ "systemFlags")
+
+ if self.transport_dnstr is not None:
+ m["transportType"] = \
+ ldb.MessageElement(str(self.transport_dnstr),
+ ldb.FLAG_MOD_REPLACE, "transportType")
+ else:
+ m["transportType"] = \
+ ldb.MessageElement([], ldb.FLAG_MOD_DELETE, "transportType")
+
+ if self.schedule is not None:
+ m["schedule"] = \
+ ldb.MessageElement(ndr_pack(self.schedule),
+ ldb.FLAG_MOD_REPLACE, "schedule")
+ else:
+ m["schedule"] = \
+ ldb.MessageElement([], ldb.FLAG_MOD_DELETE, "schedule")
+ try:
+ samdb.modify(m)
+ except ldb.LdbError as e14:
+ (enum, estr) = e14.args
+ raise KCCError("Could not modify nTDSConnection for (%s) - (%s)" %
+ (self.dnstr, estr))
+
+ def set_modified(self, truefalse):
+ self.to_be_modified = truefalse
+
+ def is_schedule_minimum_once_per_week(self):
+ """Returns True if our schedule includes at least one
+ replication interval within the week. False otherwise
+ """
+ # replinfo schedule is None means "always", while
+ # NTDSConnection schedule is None means "never".
+ if self.schedule is None or self.schedule.dataArray[0] is None:
+ return False
+
+ for slot in self.schedule.dataArray[0].slots:
+ if (slot & 0x0F) != 0x0:
+ return True
+ return False
+
+ def is_equivalent_schedule(self, sched):
+ """Returns True if our schedule is equivalent to the input
+ comparison schedule.
+
+ :param shed: schedule to compare to
+ """
+ # There are 4 cases, where either self.schedule or sched can be None
+ #
+ # | self. is None | self. is not None
+ # --------------+-----------------+--------------------
+ # sched is None | True | False
+ # --------------+-----------------+--------------------
+ # sched is not None | False | do calculations
+
+ if self.schedule is None:
+ return sched is None
+
+ if sched is None:
+ return False
+
+ if ((self.schedule.size != sched.size or
+ self.schedule.bandwidth != sched.bandwidth or
+ self.schedule.numberOfSchedules != sched.numberOfSchedules)):
+ return False
+
+ for i, header in enumerate(self.schedule.headerArray):
+
+ if self.schedule.headerArray[i].type != sched.headerArray[i].type:
+ return False
+
+ if self.schedule.headerArray[i].offset != \
+ sched.headerArray[i].offset:
+ return False
+
+ for a, b in zip(self.schedule.dataArray[i].slots,
+ sched.dataArray[i].slots):
+ if a != b:
+ return False
+ return True
+
+ def is_rodc_topology(self):
+ """Returns True if NTDS Connection specifies RODC
+ topology only
+ """
+ if self.options & dsdb.NTDSCONN_OPT_RODC_TOPOLOGY == 0:
+ return False
+ return True
+
+ def is_generated(self):
+ """Returns True if NTDS Connection was generated by the
+ KCC topology algorithm as opposed to set by the administrator
+ """
+ if self.options & dsdb.NTDSCONN_OPT_IS_GENERATED == 0:
+ return False
+ return True
+
+ def is_override_notify_default(self):
+ """Returns True if NTDS Connection should override notify default
+ """
+ if self.options & dsdb.NTDSCONN_OPT_OVERRIDE_NOTIFY_DEFAULT == 0:
+ return False
+ return True
+
+ def is_use_notify(self):
+ """Returns True if NTDS Connection should use notify
+ """
+ if self.options & dsdb.NTDSCONN_OPT_USE_NOTIFY == 0:
+ return False
+ return True
+
+ def is_twoway_sync(self):
+ """Returns True if NTDS Connection should use twoway sync
+ """
+ if self.options & dsdb.NTDSCONN_OPT_TWOWAY_SYNC == 0:
+ return False
+ return True
+
+ def is_intersite_compression_disabled(self):
+ """Returns True if NTDS Connection intersite compression
+ is disabled
+ """
+ if self.options & dsdb.NTDSCONN_OPT_DISABLE_INTERSITE_COMPRESSION == 0:
+ return False
+ return True
+
+ def is_user_owned_schedule(self):
+ """Returns True if NTDS Connection has a user owned schedule
+ """
+ if self.options & dsdb.NTDSCONN_OPT_USER_OWNED_SCHEDULE == 0:
+ return False
+ return True
+
+ def is_enabled(self):
+ """Returns True if NTDS Connection is enabled
+ """
+ return self.enabled
+
+ def get_from_dnstr(self):
+ """Return fromServer dn string attribute"""
+ return self.from_dnstr
+
+
+class Partition(NamingContext):
+ """A naming context discovered thru Partitions DN of the config schema.
+
+ This is a more specific form of NamingContext class (inheriting from that
+ class) and it identifies unique attributes enumerated in the Partitions
+ such as which nTDSDSAs are cross referenced for replicas
+ """
+ def __init__(self, partstr):
+ self.partstr = partstr
+ self.enabled = True
+ self.system_flags = 0
+ self.rw_location_list = []
+ self.ro_location_list = []
+
+ # We don't have enough info to properly
+ # fill in the naming context yet. We'll get that
+ # fully set up with load_partition().
+ NamingContext.__init__(self, None)
+
+ def load_partition(self, samdb):
+ """Given a Partition class object that has been initialized with its
+ partition dn string, load the partition from the sam database, identify
+ the type of the partition (schema, domain, etc) and record the list of
+ nTDSDSAs that appear in the cross reference attributes
+ msDS-NC-Replica-Locations and msDS-NC-RO-Replica-Locations.
+
+ :param samdb: sam database to load partition from
+ """
+ attrs = ["nCName",
+ "Enabled",
+ "systemFlags",
+ "msDS-NC-Replica-Locations",
+ "msDS-NC-RO-Replica-Locations"]
+ try:
+ res = samdb.search(base=self.partstr, scope=ldb.SCOPE_BASE,
+ attrs=attrs)
+
+ except ldb.LdbError as e15:
+ (enum, estr) = e15.args
+ raise KCCError("Unable to find partition for (%s) - (%s)" %
+ (self.partstr, estr))
+ msg = res[0]
+ for k in msg.keys():
+ if k == "dn":
+ continue
+
+ if k == "Enabled":
+ if str(msg[k][0]).upper().lstrip().rstrip() == "TRUE":
+ self.enabled = True
+ else:
+ self.enabled = False
+ continue
+
+ if k == "systemFlags":
+ self.system_flags = int(msg[k][0])
+ continue
+
+ for value in msg[k]:
+ dsdn = dsdb_Dn(samdb, value.decode('utf8'))
+ dnstr = str(dsdn.dn)
+
+ if k == "nCName":
+ self.nc_dnstr = dnstr
+ continue
+
+ if k == "msDS-NC-Replica-Locations":
+ self.rw_location_list.append(dnstr)
+ continue
+
+ if k == "msDS-NC-RO-Replica-Locations":
+ self.ro_location_list.append(dnstr)
+ continue
+
+ # Now identify what type of NC this partition
+ # enumerated
+ self.identify_by_basedn(samdb)
+
+ def is_enabled(self):
+ """Returns True if partition is enabled
+ """
+ return self.is_enabled
+
+ def is_foreign(self):
+ """Returns True if this is not an Active Directory NC in our
+ forest but is instead something else (e.g. a foreign NC)
+ """
+ if (self.system_flags & dsdb.SYSTEM_FLAG_CR_NTDS_NC) == 0:
+ return True
+ else:
+ return False
+
+ def should_be_present(self, target_dsa):
+ """Tests whether this partition should have an NC replica
+ on the target dsa. This method returns a tuple of
+ needed=True/False, ro=True/False, partial=True/False
+
+ :param target_dsa: should NC be present on target dsa
+ """
+ ro = False
+ partial = False
+
+ # If this is the config, schema, or default
+ # domain NC for the target dsa then it should
+ # be present
+ needed = (self.nc_type == NCType.config or
+ self.nc_type == NCType.schema or
+ (self.nc_type == NCType.domain and
+ self.nc_dnstr == target_dsa.default_dnstr))
+
+ # A writable replica of an application NC should be present
+ # if there a cross reference to the target DSA exists. Depending
+ # on whether the DSA is ro we examine which type of cross reference
+ # to look for (msDS-NC-Replica-Locations or
+ # msDS-NC-RO-Replica-Locations
+ if self.nc_type == NCType.application:
+ if target_dsa.is_ro():
+ if target_dsa.dsa_dnstr in self.ro_location_list:
+ needed = True
+ else:
+ if target_dsa.dsa_dnstr in self.rw_location_list:
+ needed = True
+
+ # If the target dsa is a gc then a partial replica of a
+ # domain NC (other than the DSAs default domain) should exist
+ # if there is also a cross reference for the DSA
+ if (target_dsa.is_gc() and
+ self.nc_type == NCType.domain and
+ self.nc_dnstr != target_dsa.default_dnstr and
+ (target_dsa.dsa_dnstr in self.ro_location_list or
+ target_dsa.dsa_dnstr in self.rw_location_list)):
+ needed = True
+ partial = True
+
+ # partial NCs are always readonly
+ if needed and (target_dsa.is_ro() or partial):
+ ro = True
+
+ return needed, ro, partial
+
+ def __str__(self):
+ """Debug dump string output of class"""
+ text = "%s" % NamingContext.__str__(self) +\
+ "\n\tpartdn=%s" % self.partstr +\
+ "".join("\n\tmsDS-NC-Replica-Locations=%s" % k for k in self.rw_location_list) +\
+ "".join("\n\tmsDS-NC-RO-Replica-Locations=%s" % k for k in self.ro_location_list)
+ return text
+
+
+class Site(object):
+ """An individual site object discovered thru the configuration
+ naming context. Contains all DSAs that exist within the site
+ """
+ def __init__(self, site_dnstr, nt_now):
+ self.site_dnstr = site_dnstr
+ self.site_guid = None
+ self.site_options = 0
+ self.site_topo_generator = None
+ self.site_topo_failover = 0 # appears to be in minutes
+ self.dsa_table = {}
+ self.rw_dsa_table = {}
+ self.nt_now = nt_now
+
+ def load_site(self, samdb):
+ """Loads the NTDS Site Settings options attribute for the site
+ as well as querying and loading all DSAs that appear within
+ the site.
+ """
+ ssdn = "CN=NTDS Site Settings,%s" % self.site_dnstr
+ attrs = ["options",
+ "interSiteTopologyFailover",
+ "interSiteTopologyGenerator"]
+ try:
+ res = samdb.search(base=ssdn, scope=ldb.SCOPE_BASE,
+ attrs=attrs)
+ self_res = samdb.search(base=self.site_dnstr, scope=ldb.SCOPE_BASE,
+ attrs=['objectGUID'])
+ except ldb.LdbError as e16:
+ (enum, estr) = e16.args
+ raise KCCError("Unable to find site settings for (%s) - (%s)" %
+ (ssdn, estr))
+
+ msg = res[0]
+ if "options" in msg:
+ self.site_options = int(msg["options"][0])
+
+ if "interSiteTopologyGenerator" in msg:
+ self.site_topo_generator = \
+ str(msg["interSiteTopologyGenerator"][0])
+
+ if "interSiteTopologyFailover" in msg:
+ self.site_topo_failover = int(msg["interSiteTopologyFailover"][0])
+
+ msg = self_res[0]
+ if "objectGUID" in msg:
+ self.site_guid = misc.GUID(samdb.schema_format_value("objectGUID",
+ msg["objectGUID"][0]))
+
+ self.load_all_dsa(samdb)
+
+ def load_all_dsa(self, samdb):
+ """Discover all nTDSDSA thru the sites entry and
+ instantiate and load the DSAs. Each dsa is inserted
+ into the dsa_table by dn string.
+ """
+ try:
+ res = samdb.search(self.site_dnstr,
+ scope=ldb.SCOPE_SUBTREE,
+ expression="(objectClass=nTDSDSA)")
+ except ldb.LdbError as e17:
+ (enum, estr) = e17.args
+ raise KCCError("Unable to find nTDSDSAs - (%s)" % estr)
+
+ for msg in res:
+ dnstr = str(msg.dn)
+
+ # already loaded
+ if dnstr in self.dsa_table:
+ continue
+
+ dsa = DirectoryServiceAgent(dnstr)
+
+ dsa.load_dsa(samdb)
+
+ # Assign this dsa to my dsa table
+ # and index by dsa dn
+ self.dsa_table[dnstr] = dsa
+ if not dsa.is_ro():
+ self.rw_dsa_table[dnstr] = dsa
+
+ def get_dsa(self, dnstr):
+ """Return a previously loaded DSA object by consulting
+ the sites dsa_table for the provided DSA dn string
+
+ :return: None if DSA doesn't exist
+ """
+ return self.dsa_table.get(dnstr)
+
+ def select_istg(self, samdb, mydsa, ro):
+ """Determine if my DC should be an intersite topology
+ generator. If my DC is the istg and is both a writeable
+ DC and the database is opened in write mode then we perform
+ an originating update to set the interSiteTopologyGenerator
+ attribute in the NTDS Site Settings object. An RODC always
+ acts as an ISTG for itself.
+ """
+ # The KCC on an RODC always acts as an ISTG for itself
+ if mydsa.dsa_is_ro:
+ mydsa.dsa_is_istg = True
+ self.site_topo_generator = mydsa.dsa_dnstr
+ return True
+
+ c_rep = get_dsa_config_rep(mydsa)
+
+ # Load repsFrom and replUpToDateVector if not already loaded
+ # so we can get the current state of the config replica and
+ # whether we are getting updates from the istg
+ c_rep.load_repsFrom(samdb)
+
+ c_rep.load_replUpToDateVector(samdb)
+
+ # From MS-ADTS 6.2.2.3.1 ISTG selection:
+ # First, the KCC on a writable DC determines whether it acts
+ # as an ISTG for its site
+ #
+ # Let s be the object such that s!lDAPDisplayName = nTDSDSA
+ # and classSchema in s!objectClass.
+ #
+ # Let D be the sequence of objects o in the site of the local
+ # DC such that o!objectCategory = s. D is sorted in ascending
+ # order by objectGUID.
+ #
+ # Which is a fancy way of saying "sort all the nTDSDSA objects
+ # in the site by guid in ascending order". Place sorted list
+ # in D_sort[]
+ D_sort = sorted(
+ self.rw_dsa_table.values(),
+ key=lambda dsa: ndr_pack(dsa.dsa_guid))
+
+ # double word number of 100 nanosecond intervals since 1600s
+
+ # Let f be the duration o!interSiteTopologyFailover seconds, or 2 hours
+ # if o!interSiteTopologyFailover is 0 or has no value.
+ #
+ # Note: lastSuccess and ntnow are in 100 nanosecond intervals
+ # so it appears we have to turn f into the same interval
+ #
+ # interSiteTopologyFailover (if set) appears to be in minutes
+ # so we'll need to convert to seconds and then 100 nanosecond
+ # intervals
+ # XXX [MS-ADTS] 6.2.2.3.1 says it is seconds, not minutes.
+ #
+ # 10,000,000 is number of 100 nanosecond intervals in a second
+ if self.site_topo_failover == 0:
+ f = 2 * 60 * 60 * 10000000
+ else:
+ f = self.site_topo_failover * 60 * 10000000
+
+ # Let o be the site settings object for the site of the local
+ # DC, or NULL if no such o exists.
+ d_dsa = self.dsa_table.get(self.site_topo_generator)
+
+ # From MS-ADTS 6.2.2.3.1 ISTG selection:
+ # If o != NULL and o!interSiteTopologyGenerator is not the
+ # nTDSDSA object for the local DC and
+ # o!interSiteTopologyGenerator is an element dj of sequence D:
+ #
+ if d_dsa is not None and d_dsa is not mydsa:
+ # From MS-ADTS 6.2.2.3.1 ISTG Selection:
+ # Let c be the cursor in the replUpToDateVector variable
+ # associated with the NC replica of the config NC such
+ # that c.uuidDsa = dj!invocationId. If no such c exists
+ # (No evidence of replication from current ITSG):
+ # Let i = j.
+ # Let t = 0.
+ #
+ # Else if the current time < c.timeLastSyncSuccess - f
+ # (Evidence of time sync problem on current ISTG):
+ # Let i = 0.
+ # Let t = 0.
+ #
+ # Else (Evidence of replication from current ITSG):
+ # Let i = j.
+ # Let t = c.timeLastSyncSuccess.
+ #
+ # last_success appears to be a double word containing
+ # number of 100 nanosecond intervals since the 1600s
+ j_idx = D_sort.index(d_dsa)
+
+ found = False
+ for cursor in c_rep.rep_replUpToDateVector_cursors:
+ if d_dsa.dsa_ivid == cursor.source_dsa_invocation_id:
+ found = True
+ break
+
+ if not found:
+ i_idx = j_idx
+ t_time = 0
+
+ # XXX doc says current time < c.timeLastSyncSuccess - f
+ # which is true only if f is negative or clocks are wrong.
+ # f is not negative in the default case (2 hours).
+ elif self.nt_now - cursor.last_sync_success > f:
+ i_idx = 0
+ t_time = 0
+ else:
+ i_idx = j_idx
+ t_time = cursor.last_sync_success
+
+ # Otherwise (Nominate local DC as ISTG):
+ # Let i be the integer such that di is the nTDSDSA
+ # object for the local DC.
+ # Let t = the current time.
+ else:
+ i_idx = D_sort.index(mydsa)
+ t_time = self.nt_now
+
+ # Compute a function that maintains the current ISTG if
+ # it is alive, cycles through other candidates if not.
+ #
+ # Let k be the integer (i + ((current time - t) /
+ # o!interSiteTopologyFailover)) MOD |D|.
+ #
+ # Note: We don't want to divide by zero here so they must
+ # have meant "f" instead of "o!interSiteTopologyFailover"
+ k_idx = (i_idx + ((self.nt_now - t_time) // f)) % len(D_sort)
+
+ # The local writable DC acts as an ISTG for its site if and
+ # only if dk is the nTDSDSA object for the local DC. If the
+ # local DC does not act as an ISTG, the KCC skips the
+ # remainder of this task.
+ d_dsa = D_sort[k_idx]
+ d_dsa.dsa_is_istg = True
+
+ # Update if we are the ISTG, otherwise return
+ if d_dsa is not mydsa:
+ return False
+
+ # Nothing to do
+ if self.site_topo_generator == mydsa.dsa_dnstr:
+ return True
+
+ self.site_topo_generator = mydsa.dsa_dnstr
+
+ # If readonly database then do not perform a
+ # persistent update
+ if ro:
+ return True
+
+ # Perform update to the samdb
+ ssdn = "CN=NTDS Site Settings,%s" % self.site_dnstr
+
+ m = ldb.Message()
+ m.dn = ldb.Dn(samdb, ssdn)
+
+ m["interSiteTopologyGenerator"] = \
+ ldb.MessageElement(mydsa.dsa_dnstr, ldb.FLAG_MOD_REPLACE,
+ "interSiteTopologyGenerator")
+ try:
+ samdb.modify(m)
+
+ except ldb.LdbError as estr:
+ raise KCCError(
+ "Could not set interSiteTopologyGenerator for (%s) - (%s)" %
+ (ssdn, estr))
+ return True
+
+ def is_intrasite_topology_disabled(self):
+ """Returns True if intra-site topology is disabled for site"""
+ return (self.site_options &
+ dsdb.DS_NTDSSETTINGS_OPT_IS_AUTO_TOPOLOGY_DISABLED) != 0
+
+ def is_intersite_topology_disabled(self):
+ """Returns True if inter-site topology is disabled for site"""
+ return ((self.site_options &
+ dsdb.DS_NTDSSETTINGS_OPT_IS_INTER_SITE_AUTO_TOPOLOGY_DISABLED)
+ != 0)
+
+ def is_random_bridgehead_disabled(self):
+ """Returns True if selection of random bridgehead is disabled"""
+ return (self.site_options &
+ dsdb.DS_NTDSSETTINGS_OPT_IS_RAND_BH_SELECTION_DISABLED) != 0
+
+ def is_detect_stale_disabled(self):
+ """Returns True if detect stale is disabled for site"""
+ return (self.site_options &
+ dsdb.DS_NTDSSETTINGS_OPT_IS_TOPL_DETECT_STALE_DISABLED) != 0
+
+ def is_cleanup_ntdsconn_disabled(self):
+ """Returns True if NTDS Connection cleanup is disabled for site"""
+ return (self.site_options &
+ dsdb.DS_NTDSSETTINGS_OPT_IS_TOPL_CLEANUP_DISABLED) != 0
+
+ def same_site(self, dsa):
+ """Return True if dsa is in this site"""
+ if self.get_dsa(dsa.dsa_dnstr):
+ return True
+ return False
+
+ def is_rodc_site(self):
+ if len(self.dsa_table) > 0 and len(self.rw_dsa_table) == 0:
+ return True
+ return False
+
+ def __str__(self):
+ """Debug dump string output of class"""
+ text = "%s:" % self.__class__.__name__ +\
+ "\n\tdn=%s" % self.site_dnstr +\
+ "\n\toptions=0x%X" % self.site_options +\
+ "\n\ttopo_generator=%s" % self.site_topo_generator +\
+ "\n\ttopo_failover=%d" % self.site_topo_failover
+ for key, dsa in self.dsa_table.items():
+ text = text + "\n%s" % dsa
+ return text
+
+
+class GraphNode(object):
+ """A graph node describing a set of edges that should be directed to it.
+
+ Each edge is a connection for a particular naming context replica directed
+ from another node in the forest to this node.
+ """
+
+ def __init__(self, dsa_dnstr, max_node_edges):
+ """Instantiate the graph node according to a DSA dn string
+
+ :param max_node_edges: maximum number of edges that should ever
+ be directed to the node
+ """
+ self.max_edges = max_node_edges
+ self.dsa_dnstr = dsa_dnstr
+ self.edge_from = []
+
+ def __str__(self):
+ text = "%s:" % self.__class__.__name__ +\
+ "\n\tdsa_dnstr=%s" % self.dsa_dnstr +\
+ "\n\tmax_edges=%d" % self.max_edges
+
+ for i, edge in enumerate(self.edge_from):
+ if isinstance(edge, str):
+ text += "\n\tedge_from[%d]=%s" % (i, edge)
+
+ return text
+
+ def add_edge_from(self, from_dsa_dnstr):
+ """Add an edge from the dsa to our graph nodes edge from list
+
+ :param from_dsa_dnstr: the dsa that the edge emanates from
+ """
+ assert isinstance(from_dsa_dnstr, str)
+
+ # No edges from myself to myself
+ if from_dsa_dnstr == self.dsa_dnstr:
+ return False
+ # Only one edge from a particular node
+ if from_dsa_dnstr in self.edge_from:
+ return False
+ # Not too many edges
+ if len(self.edge_from) >= self.max_edges:
+ return False
+ self.edge_from.append(from_dsa_dnstr)
+ return True
+
+ def add_edges_from_connections(self, dsa):
+ """For each nTDSConnection object associated with a particular
+ DSA, we test if it implies an edge to this graph node (i.e.
+ the "fromServer" attribute). If it does then we add an
+ edge from the server unless we are over the max edges for this
+ graph node
+
+ :param dsa: dsa with a dnstr equivalent to his graph node
+ """
+ for connect in dsa.connect_table.values():
+ self.add_edge_from(connect.from_dnstr)
+
+ def add_connections_from_edges(self, dsa, transport):
+ """For each edge directed to this graph node, ensure there
+ is a corresponding nTDSConnection object in the dsa.
+ """
+ for edge_dnstr in self.edge_from:
+ connections = dsa.get_connection_by_from_dnstr(edge_dnstr)
+
+ # For each edge directed to the NC replica that
+ # "should be present" on the local DC, the KCC determines
+ # whether an object c exists such that:
+ #
+ # c is a child of the DC's nTDSDSA object.
+ # c.objectCategory = nTDSConnection
+ #
+ # Given the NC replica ri from which the edge is directed,
+ # c.fromServer is the dsname of the nTDSDSA object of
+ # the DC on which ri "is present".
+ #
+ # c.options does not contain NTDSCONN_OPT_RODC_TOPOLOGY
+
+ found_valid = False
+ for connect in connections:
+ if connect.is_rodc_topology():
+ continue
+ found_valid = True
+
+ if found_valid:
+ continue
+
+ # if no such object exists then the KCC adds an object
+ # c with the following attributes
+
+ # Generate a new dnstr for this nTDSConnection
+ opt = dsdb.NTDSCONN_OPT_IS_GENERATED
+ flags = (dsdb.SYSTEM_FLAG_CONFIG_ALLOW_RENAME |
+ dsdb.SYSTEM_FLAG_CONFIG_ALLOW_MOVE)
+
+ dsa.new_connection(opt, flags, transport, edge_dnstr, None)
+
+ def has_sufficient_edges(self):
+ """Return True if we have met the maximum "from edges" criteria"""
+ if len(self.edge_from) >= self.max_edges:
+ return True
+ return False
+
+
+class Transport(object):
+ """Class defines a Inter-site transport found under Sites
+ """
+
+ def __init__(self, dnstr):
+ self.dnstr = dnstr
+ self.options = 0
+ self.guid = None
+ self.name = None
+ self.address_attr = None
+ self.bridgehead_list = []
+
+ def __str__(self):
+ """Debug dump string output of Transport object"""
+
+ text = "%s:\n\tdn=%s" % (self.__class__.__name__, self.dnstr) +\
+ "\n\tguid=%s" % str(self.guid) +\
+ "\n\toptions=%d" % self.options +\
+ "\n\taddress_attr=%s" % self.address_attr +\
+ "\n\tname=%s" % self.name +\
+ "".join("\n\tbridgehead_list=%s" % dnstr for dnstr in self.bridgehead_list)
+
+ return text
+
+ def load_transport(self, samdb):
+ """Given a Transport object with an prior initialization
+ for the object's DN, search for the DN and load attributes
+ from the samdb.
+ """
+ attrs = ["objectGUID",
+ "options",
+ "name",
+ "bridgeheadServerListBL",
+ "transportAddressAttribute"]
+ try:
+ res = samdb.search(base=self.dnstr, scope=ldb.SCOPE_BASE,
+ attrs=attrs)
+
+ except ldb.LdbError as e18:
+ (enum, estr) = e18.args
+ raise KCCError("Unable to find Transport for (%s) - (%s)" %
+ (self.dnstr, estr))
+
+ msg = res[0]
+ self.guid = misc.GUID(samdb.schema_format_value("objectGUID",
+ msg["objectGUID"][0]))
+
+ if "options" in msg:
+ self.options = int(msg["options"][0])
+
+ if "transportAddressAttribute" in msg:
+ self.address_attr = str(msg["transportAddressAttribute"][0])
+
+ if "name" in msg:
+ self.name = str(msg["name"][0])
+
+ if "bridgeheadServerListBL" in msg:
+ for value in msg["bridgeheadServerListBL"]:
+ dsdn = dsdb_Dn(samdb, value.decode('utf8'))
+ dnstr = str(dsdn.dn)
+ if dnstr not in self.bridgehead_list:
+ self.bridgehead_list.append(dnstr)
+
+
+class RepsFromTo(object):
+ """Class encapsulation of the NDR repsFromToBlob.
+
+ Removes the necessity of external code having to
+ understand about other_info or manipulation of
+ update flags.
+ """
+ def __init__(self, nc_dnstr=None, ndr_blob=None):
+
+ self.__dict__['to_be_deleted'] = False
+ self.__dict__['nc_dnstr'] = nc_dnstr
+ self.__dict__['update_flags'] = 0x0
+ # XXX the following sounds dubious and/or better solved
+ # elsewhere, but lets leave it for now. In particular, there
+ # seems to be no reason for all the non-ndr generated
+ # attributes to be handled in the round about way (e.g.
+ # self.__dict__['to_be_deleted'] = False above). On the other
+ # hand, it all seems to work. Hooray! Hands off!.
+ #
+ # WARNING:
+ #
+ # There is a very subtle bug here with python
+ # and our NDR code. If you assign directly to
+ # a NDR produced struct (e.g. t_repsFrom.ctr.other_info)
+ # then a proper python GC reference count is not
+ # maintained.
+ #
+ # To work around this we maintain an internal
+ # reference to "dns_name(x)" and "other_info" elements
+ # of repsFromToBlob. This internal reference
+ # is hidden within this class but it is why you
+ # see statements like this below:
+ #
+ # self.__dict__['ndr_blob'].ctr.other_info = \
+ # self.__dict__['other_info'] = drsblobs.repsFromTo1OtherInfo()
+ #
+ # That would appear to be a redundant assignment but
+ # it is necessary to hold a proper python GC reference
+ # count.
+ if ndr_blob is None:
+ self.__dict__['ndr_blob'] = drsblobs.repsFromToBlob()
+ self.__dict__['ndr_blob'].version = 0x1
+ self.__dict__['dns_name1'] = None
+ self.__dict__['dns_name2'] = None
+
+ self.__dict__['ndr_blob'].ctr.other_info = \
+ self.__dict__['other_info'] = drsblobs.repsFromTo1OtherInfo()
+
+ else:
+ self.__dict__['ndr_blob'] = ndr_blob
+ self.__dict__['other_info'] = ndr_blob.ctr.other_info
+
+ if ndr_blob.version == 0x1:
+ self.__dict__['dns_name1'] = ndr_blob.ctr.other_info.dns_name
+ self.__dict__['dns_name2'] = None
+ else:
+ self.__dict__['dns_name1'] = ndr_blob.ctr.other_info.dns_name1
+ self.__dict__['dns_name2'] = ndr_blob.ctr.other_info.dns_name2
+
+ def __str__(self):
+ """Debug dump string output of class"""
+
+ text = "%s:" % self.__class__.__name__ +\
+ "\n\tdnstr=%s" % self.nc_dnstr +\
+ "\n\tupdate_flags=0x%X" % self.update_flags +\
+ "\n\tversion=%d" % self.version +\
+ "\n\tsource_dsa_obj_guid=%s" % self.source_dsa_obj_guid +\
+ ("\n\tsource_dsa_invocation_id=%s" %
+ self.source_dsa_invocation_id) +\
+ "\n\ttransport_guid=%s" % self.transport_guid +\
+ "\n\treplica_flags=0x%X" % self.replica_flags +\
+ ("\n\tconsecutive_sync_failures=%d" %
+ self.consecutive_sync_failures) +\
+ "\n\tlast_success=%s" % self.last_success +\
+ "\n\tlast_attempt=%s" % self.last_attempt +\
+ "\n\tdns_name1=%s" % self.dns_name1 +\
+ "\n\tdns_name2=%s" % self.dns_name2 +\
+ "\n\tschedule[ " +\
+ "".join("0x%X " % slot for slot in self.schedule) +\
+ "]"
+
+ return text
+
+ def __setattr__(self, item, value):
+ """Set an attribute and change update flag.
+
+ Be aware that setting any RepsFromTo attribute will set the
+ drsuapi.DRSUAPI_DRS_UPDATE_ADDRESS update flag.
+ """
+ if item in ['schedule', 'replica_flags', 'transport_guid',
+ 'source_dsa_obj_guid', 'source_dsa_invocation_id',
+ 'consecutive_sync_failures', 'last_success',
+ 'last_attempt']:
+
+ if item in ['replica_flags']:
+ self.__dict__['update_flags'] |= drsuapi.DRSUAPI_DRS_UPDATE_FLAGS
+ elif item in ['schedule']:
+ self.__dict__['update_flags'] |= drsuapi.DRSUAPI_DRS_UPDATE_SCHEDULE
+
+ setattr(self.__dict__['ndr_blob'].ctr, item, value)
+
+ elif item in ['dns_name1']:
+ self.__dict__['dns_name1'] = value
+
+ if self.__dict__['ndr_blob'].version == 0x1:
+ self.__dict__['ndr_blob'].ctr.other_info.dns_name = \
+ self.__dict__['dns_name1']
+ else:
+ self.__dict__['ndr_blob'].ctr.other_info.dns_name1 = \
+ self.__dict__['dns_name1']
+
+ elif item in ['dns_name2']:
+ self.__dict__['dns_name2'] = value
+
+ if self.__dict__['ndr_blob'].version == 0x1:
+ raise AttributeError(item)
+ else:
+ self.__dict__['ndr_blob'].ctr.other_info.dns_name2 = \
+ self.__dict__['dns_name2']
+
+ elif item in ['nc_dnstr']:
+ self.__dict__['nc_dnstr'] = value
+
+ elif item in ['to_be_deleted']:
+ self.__dict__['to_be_deleted'] = value
+
+ elif item in ['version']:
+ raise AttributeError("Attempt to set readonly attribute %s" % item)
+ else:
+ raise AttributeError("Unknown attribute %s" % item)
+
+ self.__dict__['update_flags'] |= drsuapi.DRSUAPI_DRS_UPDATE_ADDRESS
+
+ def __getattr__(self, item):
+ """Overload of RepsFromTo attribute retrieval.
+
+ Allows external code to ignore substructures within the blob
+ """
+ if item in ['schedule', 'replica_flags', 'transport_guid',
+ 'source_dsa_obj_guid', 'source_dsa_invocation_id',
+ 'consecutive_sync_failures', 'last_success',
+ 'last_attempt']:
+ return getattr(self.__dict__['ndr_blob'].ctr, item)
+
+ elif item in ['version']:
+ return self.__dict__['ndr_blob'].version
+
+ elif item in ['dns_name1']:
+ if self.__dict__['ndr_blob'].version == 0x1:
+ return self.__dict__['ndr_blob'].ctr.other_info.dns_name
+ else:
+ return self.__dict__['ndr_blob'].ctr.other_info.dns_name1
+
+ elif item in ['dns_name2']:
+ if self.__dict__['ndr_blob'].version == 0x1:
+ raise AttributeError(item)
+ else:
+ return self.__dict__['ndr_blob'].ctr.other_info.dns_name2
+
+ elif item in ['to_be_deleted']:
+ return self.__dict__['to_be_deleted']
+
+ elif item in ['nc_dnstr']:
+ return self.__dict__['nc_dnstr']
+
+ elif item in ['update_flags']:
+ return self.__dict__['update_flags']
+
+ raise AttributeError("Unknown attribute %s" % item)
+
+ def is_modified(self):
+ return (self.update_flags != 0x0)
+
+ def set_unmodified(self):
+ self.__dict__['update_flags'] = 0x0
+
+
+class SiteLink(object):
+ """Class defines a site link found under sites
+ """
+
+ def __init__(self, dnstr):
+ self.dnstr = dnstr
+ self.options = 0
+ self.system_flags = 0
+ self.cost = 0
+ self.schedule = None
+ self.interval = None
+ self.site_list = []
+
+ def __str__(self):
+ """Debug dump string output of Transport object"""
+
+ text = "%s:\n\tdn=%s" % (self.__class__.__name__, self.dnstr) +\
+ "\n\toptions=%d" % self.options +\
+ "\n\tsystem_flags=%d" % self.system_flags +\
+ "\n\tcost=%d" % self.cost +\
+ "\n\tinterval=%s" % self.interval
+
+ if self.schedule is not None:
+ text += "\n\tschedule.size=%s" % self.schedule.size +\
+ "\n\tschedule.bandwidth=%s" % self.schedule.bandwidth +\
+ ("\n\tschedule.numberOfSchedules=%s" %
+ self.schedule.numberOfSchedules)
+
+ for i, header in enumerate(self.schedule.headerArray):
+ text += ("\n\tschedule.headerArray[%d].type=%d" %
+ (i, header.type)) +\
+ ("\n\tschedule.headerArray[%d].offset=%d" %
+ (i, header.offset)) +\
+ "\n\tschedule.dataArray[%d].slots[ " % i +\
+ "".join("0x%X " % slot for slot in self.schedule.dataArray[i].slots) +\
+ "]"
+
+ for guid, dn in self.site_list:
+ text = text + "\n\tsite_list=%s (%s)" % (guid, dn)
+ return text
+
+ def load_sitelink(self, samdb):
+ """Given a siteLink object with an prior initialization
+ for the object's DN, search for the DN and load attributes
+ from the samdb.
+ """
+ attrs = ["options",
+ "systemFlags",
+ "cost",
+ "schedule",
+ "replInterval",
+ "siteList"]
+ try:
+ res = samdb.search(base=self.dnstr, scope=ldb.SCOPE_BASE,
+ attrs=attrs, controls=['extended_dn:0'])
+
+ except ldb.LdbError as e19:
+ (enum, estr) = e19.args
+ raise KCCError("Unable to find SiteLink for (%s) - (%s)" %
+ (self.dnstr, estr))
+
+ msg = res[0]
+
+ if "options" in msg:
+ self.options = int(msg["options"][0])
+
+ if "systemFlags" in msg:
+ self.system_flags = int(msg["systemFlags"][0])
+
+ if "cost" in msg:
+ self.cost = int(msg["cost"][0])
+
+ if "replInterval" in msg:
+ self.interval = int(msg["replInterval"][0])
+
+ if "siteList" in msg:
+ for value in msg["siteList"]:
+ dsdn = dsdb_Dn(samdb, value.decode('utf8'))
+ guid = misc.GUID(dsdn.dn.get_extended_component('GUID'))
+ dnstr = str(dsdn.dn)
+ if (guid, dnstr) not in self.site_list:
+ self.site_list.append((guid, dnstr))
+
+ if "schedule" in msg:
+ self.schedule = ndr_unpack(drsblobs.schedule, value)
+ else:
+ self.schedule = new_connection_schedule()
+
+
+class KCCFailedObject(object):
+ def __init__(self, uuid, failure_count, time_first_failure,
+ last_result, dns_name):
+ self.uuid = uuid
+ self.failure_count = failure_count
+ self.time_first_failure = time_first_failure
+ self.last_result = last_result
+ self.dns_name = dns_name
+
+
+##################################################
+# Global Functions and Variables
+##################################################
+
+def get_dsa_config_rep(dsa):
+ # Find configuration NC replica for the DSA
+ for c_rep in dsa.current_rep_table.values():
+ if c_rep.is_config():
+ return c_rep
+
+ raise KCCError("Unable to find config NC replica for (%s)" %
+ dsa.dsa_dnstr)
+
+
+def new_connection_schedule():
+ """Create a default schedule for an NTDSConnection or Sitelink. This
+ is packed differently from the repltimes schedule used elsewhere
+ in KCC (where the 168 nibbles are packed into 84 bytes).
+ """
+ # 168 byte instances of the 0x01 value. The low order 4 bits
+ # of the byte equate to 15 minute intervals within a single hour.
+ # There are 168 bytes because there are 168 hours in a full week
+ # Effectively we are saying to perform replication at the end of
+ # each hour of the week
+ schedule = drsblobs.schedule()
+
+ schedule.size = 188
+ schedule.bandwidth = 0
+ schedule.numberOfSchedules = 1
+
+ header = drsblobs.scheduleHeader()
+ header.type = 0
+ header.offset = 20
+
+ schedule.headerArray = [header]
+
+ data = drsblobs.scheduleSlots()
+ data.slots = [0x01] * 168
+
+ schedule.dataArray = [data]
+ return schedule
+
+
+##################################################
+# DNS related calls
+##################################################
+
+def uncovered_sites_to_cover(samdb, site_name):
+ """
+ Discover which sites have no DCs and whose lowest single-hop cost
+ distance for any link attached to that site is linked to the site supplied.
+
+ We compare the lowest cost of your single-hop link to this site to all of
+ those available (if it exists). This means that a lower ranked siteLink
+ with only the uncovered site can trump any available links (but this can
+ only be done with specific, poorly enacted user configuration).
+
+ If the site is connected to more than one other site with the same
+ siteLink, only the largest site (failing that sorted alphabetically)
+ creates the DNS records.
+
+ :param samdb database
+ :param site_name origin site (with a DC)
+
+ :return a list of sites this site should be covering (for DNS)
+ """
+ sites_to_cover = []
+
+ server_res = samdb.search(base=samdb.get_config_basedn(),
+ scope=ldb.SCOPE_SUBTREE,
+ expression="(&(objectClass=server)"
+ "(serverReference=*))")
+
+ site_res = samdb.search(base=samdb.get_config_basedn(),
+ scope=ldb.SCOPE_SUBTREE,
+ expression="(objectClass=site)")
+
+ sites_in_use = Counter()
+ dc_count = 0
+
+ # Assume server is of form DC,Servers,Site-ABCD because of schema
+ for msg in server_res:
+ site_dn = msg.dn.parent().parent()
+ sites_in_use[site_dn.canonical_str()] += 1
+
+ if site_dn.get_rdn_value().lower() == site_name.lower():
+ dc_count += 1
+
+ if len(sites_in_use) != len(site_res):
+ # There is a possible uncovered site
+ sites_uncovered = []
+
+ for msg in site_res:
+ if msg.dn.canonical_str() not in sites_in_use:
+ sites_uncovered.append(msg)
+
+ own_site_dn = "CN={},CN=Sites,{}".format(
+ ldb.binary_encode(site_name),
+ ldb.binary_encode(str(samdb.get_config_basedn()))
+ )
+
+ for site in sites_uncovered:
+ encoded_dn = ldb.binary_encode(str(site.dn))
+
+ # Get a sorted list of all siteLinks featuring the uncovered site
+ link_res1 = samdb.search(base=samdb.get_config_basedn(),
+ scope=ldb.SCOPE_SUBTREE, attrs=["cost"],
+ expression="(&(objectClass=siteLink)"
+ "(siteList={}))".format(encoded_dn),
+ controls=["server_sort:1:0:cost"])
+
+ # Get a sorted list of all siteLinks connecting this an the
+ # uncovered site
+ link_res2 = samdb.search(base=samdb.get_config_basedn(),
+ scope=ldb.SCOPE_SUBTREE,
+ attrs=["cost", "siteList"],
+ expression="(&(objectClass=siteLink)"
+ "(siteList={})(siteList={}))".format(
+ own_site_dn,
+ encoded_dn),
+ controls=["server_sort:1:0:cost"])
+
+ # Add to list if your link is equal in cost to lowest cost link
+ if len(link_res1) > 0 and len(link_res2) > 0:
+ cost1 = int(link_res1[0]['cost'][0])
+ cost2 = int(link_res2[0]['cost'][0])
+
+ # Own siteLink must match the lowest cost link
+ if cost1 != cost2:
+ continue
+
+ # In a siteLink with more than 2 sites attached, only pick the
+ # largest site, and if there are multiple, the earliest
+ # alphabetically.
+ to_cover = True
+ for site_val in link_res2[0]['siteList']:
+ site_dn = ldb.Dn(samdb, str(site_val))
+ site_dn_str = site_dn.canonical_str()
+ site_rdn = site_dn.get_rdn_value().lower()
+ if sites_in_use[site_dn_str] > dc_count:
+ to_cover = False
+ break
+ elif (sites_in_use[site_dn_str] == dc_count and
+ site_rdn < site_name.lower()):
+ to_cover = False
+ break
+
+ if to_cover:
+ site_cover_rdn = site.dn.get_rdn_value()
+ sites_to_cover.append(site_cover_rdn.lower())
+
+ return sites_to_cover
diff --git a/python/samba/kcc/ldif_import_export.py b/python/samba/kcc/ldif_import_export.py
new file mode 100644
index 0000000..41f0fd7
--- /dev/null
+++ b/python/samba/kcc/ldif_import_export.py
@@ -0,0 +1,403 @@
+# LDIF helper functions for the samba_kcc tool
+#
+# Copyright (C) Dave Craft 2011
+# Copyright (C) Andrew Bartlett 2015
+#
+# Andrew Bartlett's alleged work performed by his underlings Douglas
+# Bagnall and Garming Sam.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+
+from samba import Ldb, ldb, read_and_sub_file
+from samba.auth import system_session
+from samba.samdb import SamDB, dsdb_Dn
+
+
+class LdifError(Exception):
+ pass
+
+
+def write_search_result(samdb, f, res):
+ for msg in res:
+ lstr = samdb.write_ldif(msg, ldb.CHANGETYPE_NONE)
+ f.write("%s" % lstr)
+
+
+def ldif_to_samdb(dburl, lp, ldif_file, forced_local_dsa=None):
+ """Routine to import all objects and attributes that are relevant
+ to the KCC algorithms from a previously exported LDIF file.
+
+ The point of this function is to allow a programmer/debugger to
+ import an LDIF file with non-security relevant information that
+ was previously extracted from a DC database. The LDIF file is used
+ to create a temporary abbreviated database. The KCC algorithm can
+ then run against this abbreviated database for debug or test
+ verification that the topology generated is computationally the
+ same between different OSes and algorithms.
+
+ :param dburl: path to the temporary abbreviated db to create
+ :param ldif_file: path to the ldif file to import
+ """
+ if os.path.exists(dburl):
+ raise LdifError("Specify a database (%s) that doesn't already exist." %
+ dburl)
+
+ # Use ["modules:"] as we are attempting to build a sam
+ # database as opposed to start it here.
+ tmpdb = Ldb(url=dburl, session_info=system_session(),
+ lp=lp, options=["modules:"])
+
+ tmpdb.transaction_start()
+ try:
+ data = read_and_sub_file(ldif_file, None)
+ tmpdb.add_ldif(data, None)
+ if forced_local_dsa:
+ tmpdb.modify_ldif("""dn: @ROOTDSE
+changetype: modify
+replace: dsServiceName
+dsServiceName: CN=NTDS Settings,%s
+ """ % forced_local_dsa)
+
+ tmpdb.add_ldif("""dn: @MODULES
+@LIST: rootdse,extended_dn_in,extended_dn_out_ldb,objectguid
+-
+""")
+
+ except Exception as estr:
+ tmpdb.transaction_cancel()
+ raise LdifError("Failed to import %s: %s" % (ldif_file, estr))
+
+ tmpdb.transaction_commit()
+
+ # We have an abbreviated list of options here because we have built
+ # an abbreviated database. We use the rootdse and extended-dn
+ # modules only during this re-open
+ samdb = SamDB(url=dburl, session_info=system_session(), lp=lp)
+ return samdb
+
+
+def samdb_to_ldif_file(samdb, dburl, lp, creds, ldif_file):
+ """Routine to extract all objects and attributes that are relevant
+ to the KCC algorithms from a DC database.
+
+ The point of this function is to allow a programmer/debugger to
+ extract an LDIF file with non-security relevant information from
+ a DC database. The LDIF file can then be used to "import" via
+ the import_ldif() function this file into a temporary abbreviated
+ database. The KCC algorithm can then run against this abbreviated
+ database for debug or test verification that the topology generated
+ is computationally the same between different OSes and algorithms.
+
+ :param dburl: LDAP database URL to extract info from
+ :param ldif_file: output LDIF file name to create
+ """
+ try:
+ samdb = SamDB(url=dburl,
+ session_info=system_session(),
+ credentials=creds, lp=lp)
+ except ldb.LdbError as e:
+ (enum, estr) = e.args
+ raise LdifError("Unable to open sam database (%s) : %s" %
+ (dburl, estr))
+
+ if os.path.exists(ldif_file):
+ raise LdifError("Specify a file (%s) that doesn't already exist." %
+ ldif_file)
+
+ try:
+ f = open(ldif_file, "w")
+ except IOError as ioerr:
+ raise LdifError("Unable to open (%s) : %s" % (ldif_file, str(ioerr)))
+
+ try:
+ # Query Partitions
+ attrs = ["objectClass",
+ "objectGUID",
+ "cn",
+ "whenChanged",
+ "objectSid",
+ "Enabled",
+ "systemFlags",
+ "dnsRoot",
+ "nCName",
+ "msDS-NC-Replica-Locations",
+ "msDS-NC-RO-Replica-Locations"]
+
+ sstr = "CN=Partitions,%s" % samdb.get_config_basedn()
+ res = samdb.search(base=sstr, scope=ldb.SCOPE_SUBTREE,
+ attrs=attrs,
+ expression="(objectClass=crossRef)")
+
+ # Write partitions output
+ write_search_result(samdb, f, res)
+
+ # Query cross reference container
+ attrs = ["objectClass",
+ "objectGUID",
+ "cn",
+ "whenChanged",
+ "fSMORoleOwner",
+ "systemFlags",
+ "msDS-Behavior-Version",
+ "msDS-EnabledFeature"]
+
+ sstr = "CN=Partitions,%s" % samdb.get_config_basedn()
+ res = samdb.search(base=sstr, scope=ldb.SCOPE_SUBTREE,
+ attrs=attrs,
+ expression="(objectClass=crossRefContainer)")
+
+ # Write cross reference container output
+ write_search_result(samdb, f, res)
+
+ # Query Sites
+ attrs = ["objectClass",
+ "objectGUID",
+ "cn",
+ "whenChanged",
+ "systemFlags"]
+
+ sstr = "CN=Sites,%s" % samdb.get_config_basedn()
+ sites = samdb.search(base=sstr, scope=ldb.SCOPE_SUBTREE,
+ attrs=attrs,
+ expression="(objectClass=site)")
+
+ # Write sites output
+ write_search_result(samdb, f, sites)
+
+ # Query NTDS Site Settings
+ for msg in sites:
+ sitestr = str(msg.dn)
+
+ attrs = ["objectClass",
+ "objectGUID",
+ "cn",
+ "whenChanged",
+ "interSiteTopologyGenerator",
+ "interSiteTopologyFailover",
+ "schedule",
+ "options"]
+
+ sstr = "CN=NTDS Site Settings,%s" % sitestr
+ res = samdb.search(base=sstr, scope=ldb.SCOPE_BASE,
+ attrs=attrs)
+
+ # Write Site Settings output
+ write_search_result(samdb, f, res)
+
+ # Naming context list
+ nclist = []
+
+ # Query Directory Service Agents
+ for msg in sites:
+ sstr = str(msg.dn)
+
+ ncattrs = ["hasMasterNCs",
+ "msDS-hasMasterNCs",
+ "hasPartialReplicaNCs",
+ "msDS-HasDomainNCs",
+ "msDS-hasFullReplicaNCs",
+ "msDS-HasInstantiatedNCs"]
+ attrs = ["objectClass",
+ "objectGUID",
+ "cn",
+ "whenChanged",
+ "invocationID",
+ "options",
+ "msDS-isRODC",
+ "msDS-Behavior-Version"]
+
+ res = samdb.search(base=sstr, scope=ldb.SCOPE_SUBTREE,
+ attrs=attrs + ncattrs,
+ expression="(objectClass=nTDSDSA)")
+
+ # Spin thru all the DSAs looking for NC replicas
+ # and build a list of all possible Naming Contexts
+ # for subsequent retrieval below
+ for res_msg in res:
+ for k in res_msg.keys():
+ if k in ncattrs:
+ for value in res_msg[k]:
+ # Some of these have binary DNs so
+ # use dsdb_Dn to split out relevant parts
+ dsdn = dsdb_Dn(samdb, value.decode('utf8'))
+ dnstr = str(dsdn.dn)
+ if dnstr not in nclist:
+ nclist.append(dnstr)
+
+ # Write DSA output
+ write_search_result(samdb, f, res)
+
+ # Query NTDS Connections
+ for msg in sites:
+ sstr = str(msg.dn)
+
+ attrs = ["objectClass",
+ "objectGUID",
+ "cn",
+ "whenChanged",
+ "options",
+ "whenCreated",
+ "enabledConnection",
+ "schedule",
+ "transportType",
+ "fromServer",
+ "systemFlags"]
+
+ res = samdb.search(base=sstr, scope=ldb.SCOPE_SUBTREE,
+ attrs=attrs,
+ expression="(objectClass=nTDSConnection)")
+ # Write NTDS Connection output
+ write_search_result(samdb, f, res)
+
+ # Query Intersite transports
+ attrs = ["objectClass",
+ "objectGUID",
+ "cn",
+ "whenChanged",
+ "options",
+ "name",
+ "bridgeheadServerListBL",
+ "transportAddressAttribute"]
+
+ sstr = "CN=Inter-Site Transports,CN=Sites,%s" % \
+ samdb.get_config_basedn()
+ res = samdb.search(sstr, scope=ldb.SCOPE_SUBTREE,
+ attrs=attrs,
+ expression="(objectClass=interSiteTransport)")
+
+ # Write inter-site transport output
+ write_search_result(samdb, f, res)
+
+ # Query siteLink
+ attrs = ["objectClass",
+ "objectGUID",
+ "cn",
+ "whenChanged",
+ "systemFlags",
+ "options",
+ "schedule",
+ "replInterval",
+ "siteList",
+ "cost"]
+
+ sstr = "CN=Sites,%s" % \
+ samdb.get_config_basedn()
+ res = samdb.search(sstr, scope=ldb.SCOPE_SUBTREE,
+ attrs=attrs,
+ expression="(objectClass=siteLink)",
+ controls=['extended_dn:0'])
+
+ # Write siteLink output
+ write_search_result(samdb, f, res)
+
+ # Query siteLinkBridge
+ attrs = ["objectClass",
+ "objectGUID",
+ "cn",
+ "whenChanged",
+ "siteLinkList"]
+
+ sstr = "CN=Sites,%s" % samdb.get_config_basedn()
+ res = samdb.search(sstr, scope=ldb.SCOPE_SUBTREE,
+ attrs=attrs,
+ expression="(objectClass=siteLinkBridge)")
+
+ # Write siteLinkBridge output
+ write_search_result(samdb, f, res)
+
+ # Query servers containers
+ # Needed for samdb.server_site_name()
+ attrs = ["objectClass",
+ "objectGUID",
+ "cn",
+ "whenChanged",
+ "systemFlags"]
+
+ sstr = "CN=Sites,%s" % samdb.get_config_basedn()
+ res = samdb.search(sstr, scope=ldb.SCOPE_SUBTREE,
+ attrs=attrs,
+ expression="(objectClass=serversContainer)")
+
+ # Write servers container output
+ write_search_result(samdb, f, res)
+
+ # Query servers
+ # Needed because some transport interfaces refer back to
+ # attributes found in the server object. Also needed
+ # so extended-dn will be happy with dsServiceName in rootDSE
+ attrs = ["objectClass",
+ "objectGUID",
+ "cn",
+ "whenChanged",
+ "systemFlags",
+ "dNSHostName",
+ "mailAddress"]
+
+ sstr = "CN=Sites,%s" % samdb.get_config_basedn()
+ res = samdb.search(sstr, scope=ldb.SCOPE_SUBTREE,
+ attrs=attrs,
+ expression="(objectClass=server)")
+
+ # Write server output
+ write_search_result(samdb, f, res)
+
+ # Query Naming Context replicas
+ attrs = ["objectClass",
+ "objectGUID",
+ "cn",
+ "whenChanged",
+ "objectSid",
+ "fSMORoleOwner",
+ "msDS-Behavior-Version",
+ "repsFrom",
+ "repsTo"]
+
+ for sstr in nclist:
+ res = samdb.search(sstr, scope=ldb.SCOPE_BASE,
+ attrs=attrs)
+
+ # Write naming context output
+ write_search_result(samdb, f, res)
+
+ # Query rootDSE replicas
+ attrs = ["objectClass",
+ "objectGUID",
+ "cn",
+ "whenChanged",
+ "rootDomainNamingContext",
+ "configurationNamingContext",
+ "schemaNamingContext",
+ "defaultNamingContext",
+ "dsServiceName"]
+
+ sstr = ""
+ res = samdb.search(sstr, scope=ldb.SCOPE_BASE,
+ attrs=attrs)
+
+ # Record the rootDSE object as a dn as it
+ # would appear in the base ldb file. We have
+ # to save it this way because we are going to
+ # be importing as an abbreviated database.
+ res[0].dn = ldb.Dn(samdb, "@ROOTDSE")
+
+ # Write rootdse output
+ write_search_result(samdb, f, res)
+
+ except ldb.LdbError as e1:
+ (enum, estr) = e1.args
+ raise LdifError("Error processing (%s) : %s" % (sstr, estr))
+
+ f.close()
diff --git a/python/samba/logger.py b/python/samba/logger.py
new file mode 100644
index 0000000..a35ef2a
--- /dev/null
+++ b/python/samba/logger.py
@@ -0,0 +1,69 @@
+# Samba common functions
+#
+# Copyright (C) Joe Guo <joeg@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+import logging
+from samba.colour import GREY, YELLOW, GREEN, RED, DARK_RED, C_NORMAL
+from samba.colour import is_colour_wanted
+
+LEVEL_COLORS = {
+ logging.CRITICAL: DARK_RED,
+ logging.ERROR: RED,
+ logging.WARNING: YELLOW,
+ logging.INFO: GREEN,
+ logging.DEBUG: GREY,
+}
+
+
+class ColoredFormatter(logging.Formatter):
+ """Add color to log according to level"""
+
+ def format(self, record):
+ log = super().format(record)
+ color = LEVEL_COLORS.get(record.levelno, GREY)
+ return color + log + C_NORMAL
+
+
+def get_samba_logger(
+ name='samba', stream=sys.stderr,
+ level=None, verbose=False, quiet=False,
+ fmt=('%(levelname)s %(asctime)s pid:%(process)d '
+ '%(pathname)s #%(lineno)d: %(message)s'),
+ datefmt=None):
+ """
+ Get a logger instance and config it.
+ """
+ logger = logging.getLogger(name)
+
+ if not level:
+ # if level not specified, map options to level
+ level = ((verbose and logging.DEBUG) or
+ (quiet and logging.WARNING) or logging.INFO)
+
+ logger.setLevel(level)
+ if is_colour_wanted(stream):
+ Formatter = ColoredFormatter
+ else:
+ Formatter = logging.Formatter
+ formatter = Formatter(fmt=fmt, datefmt=datefmt)
+
+ handler = logging.StreamHandler(stream=stream)
+ handler.setFormatter(formatter)
+ logger.addHandler(handler)
+
+ return logger
diff --git a/python/samba/mdb_util.py b/python/samba/mdb_util.py
new file mode 100644
index 0000000..688e066
--- /dev/null
+++ b/python/samba/mdb_util.py
@@ -0,0 +1,43 @@
+# Unix SMB/CIFS implementation.
+# mdb util helpers
+#
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2018
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import subprocess
+import os
+from samba.netcmd import CommandError
+
+
+def mdb_copy(file1, file2):
+ """Copy mdb file using mdb_copy utility and rename it
+ """
+ # Find the location of the mdb_copy tool
+ dirs = os.getenv('PATH').split(os.pathsep)
+ found = False
+ for d in dirs:
+ toolpath = os.path.join(d, "mdb_copy")
+ if os.path.exists(toolpath):
+ found = True
+ break
+
+ if not found:
+ raise CommandError("mdb_copy not found. "
+ "You may need to install the lmdb-utils package")
+
+ mdb_copy_cmd = [toolpath, "-n", file1, "%s.copy.mdb" % file1]
+ status = subprocess.check_call(mdb_copy_cmd, close_fds=True, shell=False)
+
+ os.rename("%s.copy.mdb" % file1, file2)
diff --git a/python/samba/ms_display_specifiers.py b/python/samba/ms_display_specifiers.py
new file mode 100644
index 0000000..ae48dce
--- /dev/null
+++ b/python/samba/ms_display_specifiers.py
@@ -0,0 +1,195 @@
+# Create DisplaySpecifiers LDIF (as a string) from the documents provided by
+# Microsoft under the WSPP.
+#
+# Copyright (C) Andrew Kroeger <andrew@id10ts.net> 2009
+#
+# Based on ms_schema.py
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import re
+
+
+def __read_folded_line(f, buffer):
+ """Read a line from an LDIF file, unfolding it"""
+ line = buffer
+
+ while True:
+ l = f.readline()
+
+ if l[:1] == " ":
+ # continued line
+
+ # cannot fold an empty line
+ assert(line != "" and line != "\n")
+
+ # preserves '\n '
+ line = line + l
+ else:
+ # non-continued line
+ if line == "":
+ line = l
+
+ if l == "":
+ # eof, definitely won't be folded
+ break
+ else:
+ # marks end of a folded line
+ # line contains the now unfolded line
+ # buffer contains the start of the next possibly folded line
+ buffer = l
+ break
+
+ return (line, buffer)
+
+
+# Only compile regexp once.
+# Will not match options after the attribute type.
+attr_type_re = re.compile("^([A-Za-z][A-Za-z0-9-]*):")
+
+
+def __read_raw_entries(f):
+ """Read an LDIF entry, only unfolding lines"""
+
+ buffer = ""
+
+ while True:
+ entry = []
+
+ while True:
+ (l, buffer) = __read_folded_line(f, buffer)
+
+ if l[:1] == "#":
+ continue
+
+ if l == "\n" or l == "":
+ break
+
+ m = attr_type_re.match(l)
+
+ if m:
+ if l[-1:] == "\n":
+ l = l[:-1]
+
+ entry.append(l)
+ else:
+ print("Invalid line: %s" % l, end=' ', file=sys.stderr)
+ sys.exit(1)
+
+ if len(entry):
+ yield entry
+
+ if l == "":
+ break
+
+
+def fix_dn(dn):
+ """Fix a string DN to use ${CONFIGDN}"""
+
+ if dn.find("<Configuration NC Distinguished Name>") != -1:
+ dn = dn.replace("\n ", "")
+ return dn.replace("<Configuration NC Distinguished Name>", "${CONFIGDN}")
+ else:
+ return dn
+
+
+def __write_ldif_one(entry):
+ """Write out entry as LDIF"""
+ out = []
+
+ for l in entry:
+ if l[2] == 0:
+ out.append("%s: %s" % (l[0], l[1]))
+ else:
+ # This is a base64-encoded value
+ out.append("%s:: %s" % (l[0], l[1]))
+
+ return "\n".join(out)
+
+
+def __transform_entry(entry):
+ """Perform required transformations to the Microsoft-provided LDIF"""
+
+ temp_entry = []
+
+ for l in entry:
+ t = []
+
+ if l.find("::") != -1:
+ # This is a base64-encoded value
+ t = l.split(":: ", 1)
+ t.append(1)
+ else:
+ t = l.split(": ", 1)
+ t.append(0)
+
+ key = t[0].lower()
+
+ if key == "changetype":
+ continue
+
+ if key == "distinguishedname":
+ continue
+
+ if key == "instancetype":
+ continue
+
+ if key == "name":
+ continue
+
+ if key == "cn":
+ continue
+
+ if key == "objectcategory":
+ continue
+
+ if key == "showinadvancedviewonly":
+ value = t[1].upper().lstrip().rstrip()
+ if value == "TRUE":
+ # Remove showInAdvancedViewOnly attribute if it is set to the
+ # default value of TRUE
+ continue
+
+ t[1] = fix_dn(t[1])
+
+ temp_entry.append(t)
+
+ entry = temp_entry
+
+ return entry
+
+
+def read_ms_ldif(filename):
+ """Read and transform Microsoft-provided LDIF file."""
+
+ out = []
+
+ from io import open
+ with open(filename, "r", encoding='latin-1') as f:
+ for entry in __read_raw_entries(f):
+ out.append(__write_ldif_one(__transform_entry(entry)))
+
+ return "\n\n".join(out) + "\n\n"
+
+
+if __name__ == '__main__':
+ import sys
+
+ try:
+ display_specifiers_file = sys.argv[1]
+ except IndexError:
+ print("Usage: %s display-specifiers-ldif-file.txt" % (sys.argv[0]), file=sys.stderr)
+ sys.exit(1)
+
+ print(read_ms_ldif(display_specifiers_file))
diff --git a/python/samba/ms_forest_updates_markdown.py b/python/samba/ms_forest_updates_markdown.py
new file mode 100644
index 0000000..0a0d211
--- /dev/null
+++ b/python/samba/ms_forest_updates_markdown.py
@@ -0,0 +1,309 @@
+# Create forest updates ldif from Github markdown
+#
+# Each update is converted to an ldif then gets written to a corresponding
+# .LDF output file or stored in a dictionary.
+#
+# Only add updates can generally be applied.
+#
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""Generate LDIF from Github documentation."""
+
+import re
+import os
+import markdown
+import xml.etree.ElementTree as ET
+from samba.common import get_string
+
+
+# Display specifier updates or otherwise (ignored in forest_update.py)
+def noop(description, attributes, sd):
+ return (None, None, [], None)
+
+
+# ACE addition updates (ignored in forest_update.py)
+def parse_grant(description, attributes, sd):
+ # Granting the "CN=Send-As,CN=Extended-Rights" to gMSA accounts.
+ if (description.startswith("Granting the ") and
+ description.endswith("to gMSA accounts.") and
+ (attributes and attributes.lower() == 'n/a') and
+ (sd and sd.lower() == 'n/a')):
+ return ('modify', extract_dn_or_none(description),
+ ['add: appliesTo', 'appliesTo: 7b8b558a-93a5-4af7-adca-c017e67f1057'],
+ None)
+
+ return ('modify', None, [], sd if sd.lower() != 'n/a' else None)
+
+
+# Addition of new objects to the directory (most are applied in forest_update.py)
+def parse_add(description, attributes, sd):
+ dn = extract_dn(description)
+ return ('add', dn, extract_attrib(dn, attributes), sd if sd.lower() != 'n/a' else None)
+
+
+# Set of a particular attribute (ignored in forest_update.py)
+def parse_set(description, attributes, sd):
+ return ('modify', extract_dn_or_none(description),
+ extract_replace_attrib(attributes),
+ sd if sd.lower() != 'n/a' else None)
+
+
+# Set of a particular ACE (ignored in forest_update.py)
+# The general issue is that the list of DNs must be generated dynamically
+def parse_ace(description, attributes, sd):
+
+ def extract_dn_ace(text):
+ if 'Sam-Domain' in text:
+ return ('${DOMAIN_DN}', 'CN=Sam-Domain,${SCHEMA_DN}')
+ elif 'Domain-DNS' in text:
+ return ('${...}', 'CN=Domain-DNS,${SCHEMA_DN}')
+
+ return None
+
+ return [('modify', extract_dn_ace(description)[0],
+ ['replace: nTSecurityDescriptor',
+ 'nTSecurityDescriptor: ${DOMAIN_SCHEMA_SD}%s' % sd], None),
+ ('modify', extract_dn_ace(description)[1],
+ ['replace: defaultSecurityDescriptor',
+ 'defaultSecurityDescriptor: ${OLD_SAMBA_SD}%s' % sd], None)]
+
+
+# We are really only interested in 'Created' items
+operation_map = {
+ # modify
+ 'Granting': parse_grant,
+ # add
+ 'Created': parse_add,
+ # modify
+ 'Set': parse_set,
+ # modify
+ 'Added ACE': parse_ace,
+ # modify
+ 'Updated': parse_set,
+ # unknown
+ 'Call': noop
+}
+
+
+def extract_dn(text):
+ """
+ Extract a DN from the textual description
+ :param text:
+ :return: DN in string form
+ """
+ text = text.replace(' in the Schema partition.', ',${SCHEMA_DN}')
+ text = text.replace(' in the Configuration partition.', ',${CONFIG_DN}')
+ dn = re.search('([CDO][NCU]=.*?,)*([CDO][NCU]=.*)', text).group(0)
+
+ # This should probably be also fixed upstream
+ if dn == 'CN=ad://ext/AuthenticationSilo,CN=Claim Types,CN=Claims Configuration,CN=Services':
+ return 'CN=ad://ext/AuthenticationSilo,CN=Claim Types,CN=Claims Configuration,CN=Services,${CONFIG_DN}'
+
+ # Granting the "CN=Send-As,CN=Extended-Rights" to gMSA accounts.
+ if dn.endswith(',CN=Extended-Rights" to gMSA accounts.'):
+ dn = dn.replace('" to gMSA accounts.', '')
+ return dn + ",${CONFIG_DN}"
+
+ return dn
+
+
+def extract_dn_or_none(text):
+ """
+ Same as above, but returns None if it doesn't work
+ :param text:
+ :return: DN or None
+ """
+ try:
+ return extract_dn(text)
+ except:
+ return None
+
+
+def save_ldif(filename, answers, out_folder):
+ """
+ Save ldif to disk for each updates
+ :param filename: filename use ([OPERATION NUM]-{GUID}.ldif)
+ :param answers: array of tuples generated with earlier functions
+ :param out_folder: folder to prepend
+ """
+ path = os.path.join(out_folder, filename)
+ with open(path, 'w') as ldif:
+ for answer in answers:
+ change, dn, attrib, sd = answer
+ ldif.write('dn: %s\n' % dn)
+ ldif.write('changetype: %s\n' % change)
+ if len(attrib) > 0:
+ ldif.write('\n'.join(attrib) + '\n')
+ if sd is not None:
+ ldif.write('nTSecurityDescriptor: D:%s\n' % sd)
+ ldif.write('-\n\n')
+
+
+def save_array(guid, answers, out_dict):
+ """
+ Save ldif to an output dictionary
+ :param guid: GUID to store
+ :param answers: array of tuples generated with earlier functions
+ :param out_dict: output dictionary
+ """
+ ldif = ''
+ for answer in answers:
+ change, dn, attrib, sd = answer
+ ldif += 'dn: %s\n' % dn
+ ldif += 'changetype: %s\n' % change
+ if len(attrib) > 0:
+ ldif += '\n'.join(attrib) + '\n'
+ if sd is not None:
+ ldif += 'nTSecurityDescriptor: D:%s\n' % sd
+ ldif += '-\n\n'
+
+ out_dict[guid] = ldif
+
+
+def extract_attrib(dn, attributes):
+ """
+ Extract the attributes as an array from the attributes column
+ :param dn: parsed from markdown
+ :param attributes: from markdown
+ :return: attribute array (ldif-type format)
+ """
+ attrib = [x.lstrip('- ') for x in attributes.split('- ') if x.lower() != 'n/a' and x != '']
+ attrib = [x.replace(': True', ': TRUE') if x.endswith(': True') else x for x in attrib]
+ attrib = [x.replace(': False', ': FALSE') if x.endswith(': False') else x for x in attrib]
+ # We only have one such value, we may as well skip them all consistently
+ attrib = [x for x in attrib if not x.lower().startswith('msds-claimpossiblevalues')]
+
+ return attrib
+
+
+def extract_replace_attrib(attributes):
+ """
+ Extract the attributes as an array from the attributes column
+ (for replace)
+ :param attributes: from markdown
+ :return: attribute array (ldif-type format)
+ """
+ lines = [x.lstrip('- ') for x in attributes.split('- ') if x.lower() != 'n/a' and x != '']
+ lines = [('replace: %s' % line.split(':')[0], line) for line in lines]
+ lines = [line for pair in lines for line in pair]
+ return lines
+
+
+def innertext(tag):
+ return (tag.text or '') + \
+ ''.join(innertext(e) for e in tag) + \
+ (tag.tail or '')
+
+
+def read_ms_markdown(in_file, out_folder=None, out_dict=None):
+ """
+ Read Github documentation to produce forest wide updates
+ :param in_file: Forest-Wide-Updates.md
+ :param out_folder: output folder
+ :param out_dict: output dictionary
+ """
+
+ with open(in_file) as update_file:
+ # There is a hidden ClaimPossibleValues in this md file
+ content = update_file.read()
+
+ content = re.sub(r'<p>',
+ '<br />',
+ content)
+ content = re.sub(r'CN=\\<forest root domain',
+ 'CN=<forest root domain',
+ content)
+
+ content = re.sub(r'CN=<forest root domain.*?>',
+ '${FOREST_ROOT_DOMAIN}',
+ content)
+
+ html = markdown.markdown(content,
+ output_format='xhtml')
+
+ html = html.replace('CN=Schema,%ws', '${SCHEMA_DN}')
+
+ tree = ET.fromstring('<root>' + html + '</root>')
+
+ for node in tree:
+ if not node.text:
+ continue
+ updates = None
+ if node.text.startswith('|Operation'):
+ # Strip first and last |
+ updates = [x[1:len(x) - 1].split('|') for x in
+ get_string(ET.tostring(node, method='text')).splitlines()]
+ elif node.text.startswith('| Operation'):
+ # Strip first and last |
+ updates = [x[2:len(x) - 2].split(' | ') for x in
+ get_string(ET.tostring(node, method='text')).splitlines()]
+ if updates:
+ for update in updates[2:]:
+ output = re.match(r'Operation (\d+): {(.*)}', update[0])
+ if output:
+ # print output.group(1), output.group(2)
+ guid = output.group(2)
+ filename = "%s-{%s}.ldif" % (output.group(1).zfill(4), guid)
+
+ found = False
+
+ if update[3].startswith('Created') or update[1].startswith('Added ACE'):
+ # Trigger the security descriptor code
+ # Reduce info to just the security descriptor
+ update[3] = update[3].split(':')[-1]
+
+ result = parse_ace(update[1], update[2], update[3])
+
+ if filename and out_folder is not None:
+ save_ldif(filename, result, out_folder)
+ else:
+ save_array(guid, result, out_dict)
+
+ continue
+
+ for operation in operation_map:
+ if update[1].startswith(operation):
+ found = True
+
+ result = operation_map[operation](update[1], update[2], update[3])
+
+ if filename and out_folder is not None:
+ save_ldif(filename, [result], out_folder)
+ else:
+ save_array(guid, [result], out_dict)
+
+ break
+
+ if not found:
+ raise Exception(update)
+
+ # print ET.tostring(node, method='text')
+
+
+if __name__ == '__main__':
+ import sys
+
+ out_folder = ''
+
+ if len(sys.argv) == 0:
+ print("Usage: %s <Forest-Wide-Updates.md> [<output folder>]" % (sys.argv[0]), file=sys.stderr)
+ sys.exit(1)
+
+ in_file = sys.argv[1]
+ if len(sys.argv) > 2:
+ out_folder = sys.argv[2]
+
+ read_ms_markdown(in_file, out_folder)
diff --git a/python/samba/ms_schema.py b/python/samba/ms_schema.py
new file mode 100644
index 0000000..986ae3d
--- /dev/null
+++ b/python/samba/ms_schema.py
@@ -0,0 +1,337 @@
+# create schema.ldif (as a string) from WSPP documentation
+#
+# based on minschema.py and minschema_wspp
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""Generate LDIF from WSPP documentation."""
+
+import re
+import base64
+import uuid
+
+bitFields = {}
+
+# ADTS: 2.2.9
+# bit positions as labeled in the docs
+bitFields["searchflags"] = {
+ 'fATTINDEX': 31, # IX
+ 'fPDNTATTINDEX': 30, # PI
+ 'fANR': 29, # AR
+ 'fPRESERVEONDELETE': 28, # PR
+ 'fCOPY': 27, # CP
+ 'fTUPLEINDEX': 26, # TP
+ 'fSUBTREEATTINDEX': 25, # ST
+ 'fCONFIDENTIAL': 24, # CF
+ 'fCONFIDENTAIL': 24, # typo
+ 'fNEVERVALUEAUDIT': 23, # NV
+ 'fRODCAttribute': 22, # RO
+
+
+ # missing in ADTS but required by LDIF
+ 'fRODCFilteredAttribute': 22, # RO
+ 'fRODCFILTEREDATTRIBUTE': 22, # case
+ 'fEXTENDEDLINKTRACKING': 21, # XL
+ 'fBASEONLY': 20, # BO
+ 'fPARTITIONSECRET': 19, # SE
+}
+
+# ADTS: 2.2.10
+bitFields["systemflags"] = {
+ 'FLAG_ATTR_NOT_REPLICATED': 31, 'FLAG_CR_NTDS_NC': 31, # NR
+ 'FLAG_ATTR_REQ_PARTIAL_SET_MEMBER': 30, 'FLAG_CR_NTDS_DOMAIN': 30, # PS
+ 'FLAG_ATTR_IS_CONSTRUCTED': 29, 'FLAG_CR_NTDS_NOT_GC_REPLICATED': 29, # CS
+ 'FLAG_ATTR_IS_OPERATIONAL': 28, # OP
+ 'FLAG_SCHEMA_BASE_OBJECT': 27, # BS
+ 'FLAG_ATTR_IS_RDN': 26, # RD
+ 'FLAG_DISALLOW_MOVE_ON_DELETE': 6, # DE
+ 'FLAG_DOMAIN_DISALLOW_MOVE': 5, # DM
+ 'FLAG_DOMAIN_DISALLOW_RENAME': 4, # DR
+ 'FLAG_CONFIG_ALLOW_LIMITED_MOVE': 3, # AL
+ 'FLAG_CONFIG_ALLOW_MOVE': 2, # AM
+ 'FLAG_CONFIG_ALLOW_RENAME': 1, # AR
+ 'FLAG_DISALLOW_DELETE': 0 # DD
+}
+
+# ADTS: 2.2.11
+bitFields["schemaflagsex"] = {
+ 'FLAG_ATTR_IS_CRITICAL': 31
+}
+
+# ADTS: 3.1.1.2.2.2
+oMObjectClassBER = {
+ '1.3.12.2.1011.28.0.702': base64.b64encode(b'\x2B\x0C\x02\x87\x73\x1C\x00\x85\x3E').decode('utf8'),
+ '1.2.840.113556.1.1.1.12': base64.b64encode(b'\x2A\x86\x48\x86\xF7\x14\x01\x01\x01\x0C').decode('utf8'),
+ '2.6.6.1.2.5.11.29': base64.b64encode(b'\x56\x06\x01\x02\x05\x0B\x1D').decode('utf8'),
+ '1.2.840.113556.1.1.1.11': base64.b64encode(b'\x2A\x86\x48\x86\xF7\x14\x01\x01\x01\x0B').decode('utf8'),
+ '1.3.12.2.1011.28.0.714': base64.b64encode(b'\x2B\x0C\x02\x87\x73\x1C\x00\x85\x4A').decode('utf8'),
+ '1.3.12.2.1011.28.0.732': base64.b64encode(b'\x2B\x0C\x02\x87\x73\x1C\x00\x85\x5C').decode('utf8'),
+ '1.2.840.113556.1.1.1.6': base64.b64encode(b'\x2A\x86\x48\x86\xF7\x14\x01\x01\x01\x06').decode('utf8')
+}
+
+# separated by commas in docs, and must be broken up
+multivalued_attrs = set(["auxiliaryclass", "maycontain", "mustcontain", "posssuperiors",
+ "systemauxiliaryclass", "systemmaycontain", "systemmustcontain",
+ "systemposssuperiors"])
+
+
+def __read_folded_line(f, buffer):
+ """ reads a line from an LDIF file, unfolding it"""
+ line = buffer
+
+ attr_type_re = re.compile("^([A-Za-z][A-Za-z0-9-]*[A-Za-z0-9])::?")
+
+ while True:
+ l = f.readline()
+
+ if l[:1] == " ":
+ # continued line
+
+ # cannot fold an empty line
+ assert(line != "" and line != "\n")
+
+ # preserves '\n '
+ line = line + l
+ else:
+ # non-continued line
+ if line == "":
+ line = l
+
+ if l == "":
+ # eof, definitely won't be folded
+ break
+ else:
+ if l[:1] != "#" and l != "\n" and l != "":
+ m = attr_type_re.match(l)
+ if not m:
+ line = line + " " + l
+ continue
+
+ # marks end of a folded line
+ # line contains the now unfolded line
+ # buffer contains the start of the next possibly folded line
+ buffer = l
+ break
+
+ return (line, buffer)
+
+
+def __read_raw_entries(f):
+ """reads an LDIF entry, only unfolding lines"""
+ import sys
+
+ # will not match options after the attribute type
+ # attributes in the schema definition have at least two chars
+ attr_type_re = re.compile("^([A-Za-z][A-Za-z0-9-]*[A-Za-z0-9])::?")
+
+ buffer = ""
+
+ while True:
+ entry = []
+
+ while True:
+ (l, buffer) = __read_folded_line(f, buffer)
+
+ if l[:1] == "#":
+ continue
+
+ if l == "\n" or l == "":
+ break
+
+ m = attr_type_re.match(l)
+
+ if m:
+ if l[-1:] == "\n":
+ l = l[:-1]
+
+ entry.append(l)
+ else:
+ print("Invalid line: %s" % l, end=' ', file=sys.stderr)
+ sys.exit(1)
+
+ if len(entry):
+ yield entry
+
+ if l == "":
+ break
+
+
+def fix_dn(dn):
+ """fix a string DN to use ${SCHEMADN}"""
+
+ # folding?
+ if dn.find("<RootDomainDN>") != -1:
+ dn = dn.replace("\n ", "")
+ dn = dn.replace(" ", "")
+ return dn.replace("CN=Schema,CN=Configuration,<RootDomainDN>", "${SCHEMADN}")
+ elif dn.endswith("DC=X"):
+ return dn.replace("CN=Schema,CN=Configuration,DC=X", "${SCHEMADN}")
+ elif dn.endswith("CN=X"):
+ return dn.replace("CN=Schema,CN=Configuration,CN=X", "${SCHEMADN}")
+ else:
+ return dn
+
+
+def __convert_bitfield(key, value):
+ """Evaluate the OR expression in 'value'"""
+ assert(isinstance(value, str))
+
+ value = value.replace("\n ", "")
+ value = value.replace(" ", "")
+
+ try:
+ # some attributes already have numeric values
+ o = int(value)
+ except ValueError:
+ o = 0
+ flags = value.split("|")
+ for f in flags:
+ bitpos = bitFields[key][f]
+ o = o | (1 << (31 - bitpos))
+
+ return str(o)
+
+
+def __write_ldif_one(entry):
+ """Write out entry as LDIF"""
+ out = []
+
+ for l in entry:
+ if isinstance(l[1], str):
+ vl = [l[1]]
+ else:
+ vl = l[1]
+
+ if l[2]:
+ out.append("%s:: %s" % (l[0], l[1]))
+ continue
+
+ for v in vl:
+ out.append("%s: %s" % (l[0], v))
+
+ return "\n".join(out)
+
+
+def __transform_entry(entry, objectClass):
+ """Perform transformations required to convert the LDIF-like schema
+ file entries to LDIF, including Samba-specific stuff."""
+
+ entry = [l.split(":", 1) for l in entry]
+
+ cn = ""
+ skip_dn = skip_objectclass = skip_admin_description = skip_admin_display_name = False
+
+ for l in entry:
+ if l[1].startswith(': '):
+ l.append(True)
+ l[1] = l[1][2:]
+ else:
+ l.append(False)
+
+ key = l[0].lower()
+ l[1] = l[1].lstrip()
+ l[1] = l[1].rstrip()
+
+ if not cn and key == "cn":
+ cn = l[1]
+
+ if key in multivalued_attrs:
+ # unlike LDIF, these are comma-separated
+ l[1] = l[1].replace("\n ", "")
+ l[1] = l[1].replace(" ", "")
+
+ l[1] = l[1].split(",")
+
+ if key in bitFields:
+ l[1] = __convert_bitfield(key, l[1])
+
+ if key == "omobjectclass":
+ if not l[2]:
+ l[1] = oMObjectClassBER[l[1].strip()]
+ l[2] = True
+
+ if isinstance(l[1], str):
+ l[1] = fix_dn(l[1])
+
+ if key == 'dn':
+ skip_dn = True
+ dn = l[1]
+
+ if key == 'objectclass':
+ skip_objectclass = True
+ elif key == 'admindisplayname':
+ skip_admin_display_name = True
+ elif key == 'admindescription':
+ skip_admin_description = True
+
+ assert(cn)
+
+ header = []
+ if not skip_dn:
+ header.append(["dn", "CN=%s,${SCHEMADN}" % cn, False])
+ else:
+ header.append(["dn", dn, False])
+
+ if not skip_objectclass:
+ header.append(["objectClass", ["top", objectClass], False])
+ if not skip_admin_description:
+ header.append(["adminDescription", cn, False])
+ if not skip_admin_display_name:
+ header.append(["adminDisplayName", cn, False])
+
+ header.append(["objectGUID", str(uuid.uuid4()), False])
+
+ entry = header + [x for x in entry if x[0].lower() not in set(['dn', 'changetype', 'objectcategory'])]
+
+ return entry
+
+
+def __parse_schema_file(filename, objectClass):
+ """Load and transform a schema file."""
+
+ out = []
+
+ from io import open
+ with open(filename, "r", encoding='latin-1') as f:
+ for entry in __read_raw_entries(f):
+ out.append(__write_ldif_one(__transform_entry(entry, objectClass)))
+
+ return "\n\n".join(out)
+
+
+def read_ms_schema(attr_file, classes_file, dump_attributes=True, dump_classes=True, debug=False):
+ """Read WSPP documentation-derived schema files."""
+
+ attr_ldif = ""
+ classes_ldif = ""
+
+ if dump_attributes:
+ attr_ldif = __parse_schema_file(attr_file, "attributeSchema")
+ if dump_classes:
+ classes_ldif = __parse_schema_file(classes_file, "classSchema")
+
+ return attr_ldif + "\n\n" + classes_ldif + "\n\n"
+
+
+if __name__ == '__main__':
+ import sys
+
+ try:
+ attr_file = sys.argv[1]
+ classes_file = sys.argv[2]
+ except IndexError:
+ print("Usage: %s attr-file.txt classes-file.txt" % (sys.argv[0]), file=sys.stderr)
+ sys.exit(1)
+
+ print(read_ms_schema(attr_file, classes_file))
diff --git a/python/samba/ms_schema_markdown.py b/python/samba/ms_schema_markdown.py
new file mode 100644
index 0000000..8a9ad78
--- /dev/null
+++ b/python/samba/ms_schema_markdown.py
@@ -0,0 +1,78 @@
+# Create schema.ldif from Github markdown
+#
+# Each LDF section in the markdown file then gets written to a corresponding
+# .LDF output file.
+#
+# Copyright (C) Andrew Bartlett 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""Generate LDIF from Github documentation."""
+
+import re
+import os
+import markdown
+import xml.etree.ElementTree as ET
+
+
+def innertext(tag):
+ return (tag.text or '') + \
+ ''.join(innertext(e) for e in tag) + \
+ (tag.tail or '')
+
+
+def read_ms_markdown(in_file, out_folder):
+ """Read Github documentation-derived schema files."""
+
+ with open(in_file) as update_file:
+ # Remove any comments from the raw LDF files
+ html = markdown.markdown(re.sub(r'(?m)^# .*\n?', '', update_file.read()),
+ output_format='xhtml')
+
+ tree = ET.fromstring('<root>' + html + '</root>')
+
+ ldf = None
+ try:
+ for node in tree:
+ if node.tag == 'h3':
+ if ldf is not None:
+ ldf.close()
+
+ out_path = os.path.join(out_folder, innertext(node).strip())
+ ldf = open(out_path, 'w')
+ elif node.tag == 'h2':
+ if ldf is not None:
+ ldf.close()
+ ldf = None
+ elif node.tag == 'p' and ldf is not None:
+ ldf.write(innertext(node).replace('```', '') + '\n')
+ finally:
+ if ldf is not None:
+ ldf.close()
+
+
+if __name__ == '__main__':
+ import sys
+
+ out_folder = ''
+
+ if len(sys.argv) == 0:
+ print("Usage: %s <Schema-Update.md> [<output folder>]" % (sys.argv[0]), file=sys.stderr)
+ sys.exit(1)
+
+ in_file = sys.argv[1]
+ if len(sys.argv) > 2:
+ out_folder = sys.argv[2]
+
+ read_ms_markdown(in_file, out_folder)
diff --git a/python/samba/ndr.py b/python/samba/ndr.py
new file mode 100644
index 0000000..4207ee2
--- /dev/null
+++ b/python/samba/ndr.py
@@ -0,0 +1,153 @@
+# -*- coding: utf-8 -*-
+
+# Unix SMB/CIFS implementation.
+# Copyright © Jelmer Vernooij <jelmer@samba.org> 2008
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+
+"""Network Data Representation (NDR) marshalling and unmarshalling."""
+
+
+def ndr_pack(object):
+ """Pack a NDR object.
+
+ :param object: Object to pack
+ :return: String object with marshalled object.
+ """
+ ndr_pack = getattr(object, "__ndr_pack__", None)
+ if ndr_pack is None:
+ raise TypeError("%r is not a NDR object" % object)
+ return ndr_pack()
+
+
+def ndr_unpack(cls, data, allow_remaining=False):
+ """NDR unpack an object.
+
+ :param cls: Class of the object to unpack
+ :param data: Buffer to unpack
+ :param allow_remaining: allows remaining data at the end (default=False)
+ :return: Unpacked object
+ """
+ object = cls()
+ ndr_unpack = getattr(object, "__ndr_unpack__", None)
+ if ndr_unpack is None:
+ raise TypeError("%r is not a NDR object" % object)
+ ndr_unpack(data, allow_remaining=allow_remaining)
+ return object
+
+
+def ndr_print(object):
+ ndr_print = getattr(object, "__ndr_print__", None)
+ if ndr_print is None:
+ raise TypeError(f"{object} is not a NDR object")
+ return ndr_print()
+
+
+def ndr_deepcopy(object):
+ """Create a deep copy of a NDR object, using pack/unpack
+
+ :param object: Object to copy
+ :return: The object copy
+ """
+ ndr_pack = getattr(object, "__ndr_pack__", None)
+ if ndr_pack is None:
+ raise TypeError("%r is not a NDR object" % object)
+ data = ndr_pack()
+ cls = type(object)
+ copy = cls()
+ ndr_unpack = getattr(copy, "__ndr_unpack__", None)
+ if ndr_unpack is None:
+ raise TypeError("%r is not a NDR object" % copy)
+ ndr_unpack(data, allow_remaining=False)
+ return copy
+
+
+def ndr_pack_in(object, bigendian=False, ndr64=False):
+ """Pack the input of an NDR function object.
+
+ :param object: Object to pack
+ :param bigendian: use LIBNDR_FLAG_BIGENDIAN (default=False)
+ :param ndr64: use LIBNDR_FLAG_NDR64 (default=False)
+ :return: String object with marshalled object.
+ """
+ ndr_pack_in_fn = getattr(object, "__ndr_pack_in__", None)
+ if ndr_pack_in_fn is None:
+ raise TypeError("%r is not a NDR function object" % object)
+ return ndr_pack_in_fn(bigendian=bigendian, ndr64=ndr64)
+
+
+def ndr_unpack_in(object, data, bigendian=False, ndr64=False, allow_remaining=False):
+ """Unpack the input of an NDR function object.
+
+ :param cls: Class of the object to unpack
+ :param data: Buffer to unpack
+ :param bigendian: use LIBNDR_FLAG_BIGENDIAN (default=False)
+ :param ndr64: use LIBNDR_FLAG_NDR64 (default=False)
+ :param allow_remaining: allows remaining data at the end (default=False)
+ :return: Unpacked object
+ """
+ ndr_unpack_in_fn = getattr(object, "__ndr_unpack_in__", None)
+ if ndr_unpack_in_fn is None:
+ raise TypeError("%r is not a NDR function object" % object)
+ ndr_unpack_in_fn(data, bigendian=bigendian, ndr64=ndr64,
+ allow_remaining=allow_remaining)
+ return object
+
+
+def ndr_print_in(object):
+ ndr_print_in_fn = getattr(object, "__ndr_print_in__", None)
+ if ndr_print_in_fn is None:
+ raise TypeError("%r is not a NDR function object" % object)
+ return ndr_print_in_fn()
+
+
+def ndr_pack_out(object, bigendian=False, ndr64=False):
+ """Pack the output of an NDR function object.
+
+ :param object: Object to pack
+ :param bigendian: use LIBNDR_FLAG_BIGENDIAN (default=False)
+ :param ndr64: use LIBNDR_FLAG_NDR64 (default=False)
+ :return: String object with marshalled object.
+ """
+ ndr_pack_out_fn = getattr(object, "__ndr_pack_out__", None)
+ if ndr_pack_out_fn is None:
+ raise TypeError("%r is not a NDR function object" % object)
+ return ndr_pack_out_fn(bigendian=bigendian, ndr64=ndr64)
+
+
+def ndr_unpack_out(object, data, bigendian=False, ndr64=False, allow_remaining=False):
+ """Unpack the output of an NDR function object.
+
+ :param cls: Class of the object to unpack
+ :param data: Buffer to unpack
+ :param bigendian: use LIBNDR_FLAG_BIGENDIAN (default=False)
+ :param ndr64: use LIBNDR_FLAG_NDR64 (default=False)
+ :param allow_remaining: allows remaining data at the end (default=False)
+ :return: Unpacked object
+ """
+ ndr_unpack_out_fn = getattr(object, "__ndr_unpack_out__", None)
+ if ndr_unpack_out_fn is None:
+ raise TypeError("%r is not a NDR function object" % object)
+ ndr_unpack_out_fn(data, bigendian=bigendian, ndr64=ndr64,
+ allow_remaining=allow_remaining)
+ return object
+
+
+def ndr_print_out(object):
+ ndr_print_out_fn = getattr(object, "__ndr_print_out__", None)
+ if ndr_print_out_fn is None:
+ raise TypeError("%r is not a NDR function object" % object)
+ return ndr_print_out_fn()
diff --git a/python/samba/netcmd/__init__.py b/python/samba/netcmd/__init__.py
new file mode 100644
index 0000000..7ddc1dc
--- /dev/null
+++ b/python/samba/netcmd/__init__.py
@@ -0,0 +1,396 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2009-2012
+# Copyright (C) Theresa Halloran <theresahalloran@gmail.com> 2011
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import json
+import optparse
+import sys
+import textwrap
+import traceback
+
+import samba
+from ldb import ERR_INVALID_CREDENTIALS, LdbError
+from samba import colour
+from samba.auth import system_session
+from samba.getopt import Option, OptionParser
+from samba.logger import get_samba_logger
+from samba.samdb import SamDB
+from samba.dcerpc.security import SDDLValueError
+
+from .encoders import JSONEncoder
+
+
+class PlainHelpFormatter(optparse.IndentedHelpFormatter):
+ """This help formatter does text wrapping and preserves newlines."""
+
+ def format_description(self, description=""):
+ desc_width = self.width - self.current_indent
+ indent = " " * self.current_indent
+ paragraphs = description.split('\n')
+ wrapped_paragraphs = [
+ textwrap.fill(p,
+ desc_width,
+ initial_indent=indent,
+ subsequent_indent=indent)
+ for p in paragraphs]
+ result = "\n".join(wrapped_paragraphs) + "\n"
+ return result
+
+ def format_epilog(self, epilog):
+ if epilog:
+ return "\n" + epilog + "\n"
+ else:
+ return ""
+
+
+class Command(object):
+ """A samba-tool command."""
+
+ def _get_short_description(self):
+ return self.__doc__.splitlines()[0].rstrip("\n")
+
+ short_description = property(_get_short_description)
+
+ def _get_full_description(self):
+ lines = self.__doc__.split("\n")
+ return lines[0] + "\n" + textwrap.dedent("\n".join(lines[1:]))
+
+ full_description = property(_get_full_description)
+
+ def _get_name(self):
+ name = self.__class__.__name__
+ if name.startswith("cmd_"):
+ return name[4:]
+ return name
+
+ name = property(_get_name)
+
+ # synopsis must be defined in all subclasses in order to provide the
+ # command usage
+ synopsis = None
+ takes_args = []
+ takes_options = []
+ takes_optiongroups = {}
+
+ hidden = False
+ use_colour = True
+ requested_colour = None
+
+ raw_argv = None
+ raw_args = None
+ raw_kwargs = None
+
+ def _set_files(self, outf=None, errf=None):
+ if outf is not None:
+ self.outf = outf
+ if errf is not None:
+ self.errf = errf
+
+ def __init__(self, outf=sys.stdout, errf=sys.stderr):
+ self._set_files(outf, errf)
+
+ def usage(self, prog=None):
+ parser, _ = self._create_parser(prog)
+ parser.print_usage()
+
+ def _print_error(self, msg, evalue=None, klass=None):
+ err = colour.c_DARK_RED("ERROR")
+ klass = '' if klass is None else f'({klass})'
+
+ if evalue is None:
+ print(f"{err}{klass}: {msg}", file=self.errf)
+ else:
+ print(f"{err}{klass}: {msg} - {evalue}", file=self.errf)
+
+ def _print_sddl_value_error(self, e):
+ generic_msg, specific_msg, position, sddl = e.args
+ print(f"{colour.c_DARK_RED('ERROR')}: {generic_msg}\n",
+ file=self.errf)
+ print(f' {sddl}', file=self.errf)
+ # If the SDDL contains non-ascii characters, the byte offset
+ # provided by the exception won't agree with the visual offset
+ # because those characters will be encoded as multiple bytes.
+ #
+ # To account for this we'll attempt to measure the string
+ # length of the specified number of bytes. That is not quite
+ # the same as the visual length, because the SDDL could
+ # contain zero-width, full-width, or combining characters, but
+ # it is closer.
+ try:
+ position = len((sddl.encode()[:position]).decode())
+ except ValueError:
+ # use the original position
+ pass
+
+ print(f"{colour.c_DARK_YELLOW('^'):>{position + 2}}", file=self.errf)
+ print(f' {specific_msg}', file=self.errf)
+
+ def ldb_connect(self, hostopts, sambaopts, credopts):
+ """Helper to connect to Ldb database using command line opts."""
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+ return SamDB(hostopts.H, credentials=creds,
+ session_info=system_session(lp), lp=lp)
+
+ def print_json(self, data):
+ """Print json on the screen using consistent formatting and sorting.
+
+ A custom JSONEncoder class is used to help with serializing unknown
+ objects such as Dn for example.
+ """
+ json.dump(data, self.outf, cls=JSONEncoder, indent=2, sort_keys=True)
+ self.outf.write("\n")
+
+ def show_command_error(self, e):
+ """display a command error"""
+ if isinstance(e, CommandError):
+ (etype, evalue, etraceback) = e.exception_info
+ inner_exception = e.inner_exception
+ message = e.message
+ force_traceback = False
+ else:
+ (etype, evalue, etraceback) = sys.exc_info()
+ inner_exception = e
+ message = "uncaught exception"
+ force_traceback = True
+
+ if isinstance(e, optparse.OptParseError):
+ print(evalue, file=self.errf)
+ self.usage()
+ force_traceback = False
+
+ elif isinstance(inner_exception, LdbError):
+ (ldb_ecode, ldb_emsg) = inner_exception.args
+ if ldb_ecode == ERR_INVALID_CREDENTIALS:
+ print("Invalid username or password", file=self.errf)
+ force_traceback = False
+ elif ldb_emsg == 'LDAP client internal error: NT_STATUS_NETWORK_UNREACHABLE':
+ print("Could not reach remote server", file=self.errf)
+ force_traceback = False
+ elif ldb_emsg.startswith("Unable to open tdb "):
+ self._print_error(message, ldb_emsg, 'ldb')
+ force_traceback = False
+ else:
+ self._print_error(message, ldb_emsg, 'ldb')
+
+ elif isinstance(inner_exception, SDDLValueError):
+ self._print_sddl_value_error(inner_exception)
+ force_traceback = False
+
+ elif isinstance(inner_exception, AssertionError):
+ self._print_error(message, klass='assert')
+ force_traceback = True
+ elif isinstance(inner_exception, RuntimeError):
+ self._print_error(message, evalue, 'runtime')
+ elif type(inner_exception) is Exception:
+ self._print_error(message, evalue, 'exception')
+ force_traceback = True
+ elif inner_exception is None:
+ self._print_error(message)
+ else:
+ self._print_error(message, evalue, str(etype))
+
+ if force_traceback or samba.get_debug_level() >= 3:
+ traceback.print_tb(etraceback, file=self.errf)
+
+ def _create_parser(self, prog=None, epilog=None):
+ parser = OptionParser(
+ usage=self.synopsis,
+ description=self.full_description,
+ formatter=PlainHelpFormatter(),
+ prog=prog,
+ epilog=epilog,
+ option_class=Option)
+ parser.add_options(self.takes_options)
+ optiongroups = {}
+ for name in sorted(self.takes_optiongroups.keys()):
+ optiongroup = self.takes_optiongroups[name]
+ optiongroups[name] = optiongroup(parser)
+ parser.add_option_group(optiongroups[name])
+ if self.use_colour:
+ parser.add_option("--color",
+ help="use colour if available (default: auto)",
+ metavar="always|never|auto",
+ default="auto")
+
+ return parser, optiongroups
+
+ def message(self, text):
+ self.outf.write(text + "\n")
+
+ def _resolve(self, path, *argv, outf=None, errf=None):
+ """This is a leaf node, the command that will actually run."""
+ self._set_files(outf, errf)
+ self.command_name = path
+ return (self, argv)
+
+ def _run(self, *argv):
+ parser, optiongroups = self._create_parser(self.command_name)
+
+ # Handle possible validation errors raised by parser
+ try:
+ opts, args = parser.parse_args(list(argv))
+ except Exception as e:
+ self.show_command_error(e)
+ return -1
+
+ # Filter out options from option groups
+ kwargs = dict(opts.__dict__)
+ for option_group in parser.option_groups:
+ for option in option_group.option_list:
+ if option.dest is not None and option.dest in kwargs:
+ del kwargs[option.dest]
+ kwargs.update(optiongroups)
+
+ if self.use_colour:
+ self.apply_colour_choice(kwargs.pop('color', 'auto'))
+
+ # Check for a min a max number of allowed arguments, whenever possible
+ # The suffix "?" means zero or one occurrence
+ # The suffix "+" means at least one occurrence
+ # The suffix "*" means zero or more occurrences
+ min_args = 0
+ max_args = 0
+ undetermined_max_args = False
+ for i, arg in enumerate(self.takes_args):
+ if arg[-1] != "?" and arg[-1] != "*":
+ min_args += 1
+ if arg[-1] == "+" or arg[-1] == "*":
+ undetermined_max_args = True
+ else:
+ max_args += 1
+ if (len(args) < min_args) or (not undetermined_max_args and len(args) > max_args):
+ parser.print_usage()
+ return -1
+
+ self.raw_argv = list(argv)
+ self.raw_args = args
+ self.raw_kwargs = kwargs
+
+ try:
+ return self.run(*args, **kwargs)
+ except Exception as e:
+ self.show_command_error(e)
+ return -1
+
+ def run(self, *args, **kwargs):
+ """Run the command. This should be overridden by all subclasses."""
+ raise NotImplementedError(f"'{self.command_name}' run method not implemented")
+
+ def get_logger(self, name="", verbose=False, quiet=False, **kwargs):
+ """Get a logger object."""
+ return get_samba_logger(
+ name=name or self.name, stream=self.errf,
+ verbose=verbose, quiet=quiet,
+ **kwargs)
+
+ def apply_colour_choice(self, requested):
+ """Heuristics to work out whether the user wants colour output, from a
+ --color=yes|no|auto option. This alters the ANSI 16 bit colour
+ "constants" in the colour module to be either real colours or empty
+ strings.
+ """
+ self.requested_colour = requested
+ try:
+ colour.colour_if_wanted(self.outf,
+ self.errf,
+ hint=requested)
+ except ValueError as e:
+ raise CommandError(f"Unknown --color option: {requested} "
+ "please choose from always|never|auto")
+
+
+class SuperCommand(Command):
+ """A samba-tool command with subcommands."""
+
+ synopsis = "%prog <subcommand>"
+
+ subcommands = {}
+
+ def _resolve(self, path, *args, outf=None, errf=None):
+ """This is an internal node. We need to consume one of the args and
+ find the relevant child, returning an instance of that Command.
+
+ If there are no children, this SuperCommand will be returned
+ and its _run() will do a --help like thing.
+ """
+ self.command_name = path
+ self._set_files(outf, errf)
+
+ # We collect up certain option arguments and pass them to the
+ # leaf, which is why we iterate over args, though we really
+ # expect to return in the first iteration.
+ deferred_args = []
+
+ for i, a in enumerate(args):
+ if a in self.subcommands:
+ sub_args = args[i + 1:] + tuple(deferred_args)
+ sub_path = f'{path} {a}'
+
+ sub = self.subcommands[a]
+ return sub._resolve(sub_path, *sub_args, outf=outf, errf=errf)
+
+ elif a in ['--help', 'help', None, '-h', '-V', '--version']:
+ # we pass these to the leaf node.
+ if a == 'help':
+ a = '--help'
+ deferred_args.append(a)
+ continue
+
+ # they are talking nonsense
+ print("%s: no such subcommand: %s\n" % (path, a), file=self.outf)
+ return (self, [])
+
+ # We didn't find a subcommand, but maybe we found e.g. --version
+ print("%s: missing subcommand\n" % (path), file=self.outf)
+ return (self, deferred_args)
+
+ def _run(self, *argv):
+ epilog = "\nAvailable subcommands:\n"
+
+ subcmds = sorted(self.subcommands.keys())
+ max_length = max([len(c) for c in subcmds])
+ for cmd_name in subcmds:
+ cmd = self.subcommands[cmd_name]
+ if cmd.hidden:
+ continue
+ epilog += " %*s - %s\n" % (
+ -max_length, cmd_name, cmd.short_description)
+
+ epilog += ("For more help on a specific subcommand, please type: "
+ f"{self.command_name} <subcommand> (-h|--help)\n")
+
+ parser, optiongroups = self._create_parser(self.command_name, epilog=epilog)
+ opts, args = parser.parse_args(list(argv))
+
+ # note: if argv had --help, parser.parse_args() will have
+ # already done the .print_help() and attempted to exit with
+ # return code 0, so we won't get here.
+ parser.print_help()
+ return -1
+
+
+class CommandError(Exception):
+ """An exception class for samba-tool Command errors."""
+
+ def __init__(self, message, inner_exception=None):
+ self.message = message
+ self.inner_exception = inner_exception
+ self.exception_info = sys.exc_info()
+
+ def __repr__(self):
+ return "CommandError(%s)" % self.message
diff --git a/python/samba/netcmd/common.py b/python/samba/netcmd/common.py
new file mode 100644
index 0000000..2aa50c7
--- /dev/null
+++ b/python/samba/netcmd/common.py
@@ -0,0 +1,161 @@
+# common functions for samba-tool python commands
+#
+# Copyright Andrew Tridgell 2010
+# Copyright Giampaolo Lauria 2011 <lauria2@yahoo.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import re
+from samba.dcerpc import nbt
+from samba.net import Net
+from samba.netcmd import CommandError
+import ldb
+
+
+# In MS AD, setting a timeout to '(never)' corresponds to this value
+NEVER_TIMESTAMP = int(-0x8000000000000000)
+
+
+def _get_user_realm_domain(user, sam=None):
+ r""" get the realm or the domain and the base user
+ from user like:
+ * username
+ * DOMAIN\username
+ * username@REALM
+
+ A SamDB object can also be passed in to check
+ our domain or realm against the obtained ones.
+ """
+ baseuser = user
+ m = re.match(r"(\w+)\\(\w+$)", user)
+ if m:
+ domain = m.group(1)
+ baseuser = m.group(2)
+
+ if sam is not None:
+ our_domain = sam.domain_netbios_name()
+ if domain.lower() != our_domain.lower():
+ raise CommandError(f"Given domain '{domain}' does not match "
+ f"our domain '{our_domain}'!")
+
+ return (baseuser.lower(), "", domain.upper())
+
+ realm = ""
+ m = re.match(r"(\w+)@(\w+)", user)
+ if m:
+ baseuser = m.group(1)
+ realm = m.group(2)
+
+ if sam is not None:
+ our_realm = sam.domain_dns_name()
+ our_realm_initial = our_realm.split('.', 1)[0]
+ if realm.lower() != our_realm_initial.lower():
+ raise CommandError(f"Given realm '{realm}' does not match our "
+ f"realm '{our_realm}'!")
+
+ return (baseuser.lower(), realm.upper(), "")
+
+
+def netcmd_dnsname(lp):
+ """return the full DNS name of our own host. Used as a default
+ for hostname when running status queries"""
+ return lp.get('netbios name').lower() + "." + lp.get('realm').lower()
+
+
+def netcmd_finddc(lp, creds, realm=None):
+ """Return domain-name of a writable/ldap-capable DC for the default
+ domain (parameter "realm" in smb.conf) unless another realm has been
+ specified as argument"""
+ net = Net(creds=creds, lp=lp)
+ if realm is None:
+ realm = lp.get('realm')
+ cldap_ret = net.finddc(domain=realm,
+ flags=nbt.NBT_SERVER_LDAP | nbt.NBT_SERVER_DS | nbt.NBT_SERVER_WRITABLE)
+ return cldap_ret.pdc_dns_name
+
+
+def netcmd_get_domain_infos_via_cldap(lp, creds, address=None):
+ """Return domain information (CLDAP record) of the ldap-capable
+ DC with the specified address"""
+ net = Net(creds=creds, lp=lp)
+ cldap_ret = net.finddc(address=address,
+ flags=nbt.NBT_SERVER_LDAP | nbt.NBT_SERVER_DS)
+ return cldap_ret
+
+def is_printable_attr_val(val):
+ import unicodedata
+
+ # The value must be convertible to a string value.
+ try:
+ str_val = str(val)
+ except:
+ return False
+
+ # Characters of the Unicode Character Category "C" ("Other") are
+ # supposed to be not printable. The category "C" includes control
+ # characters, format specifier and others.
+ for c in str_val:
+ if unicodedata.category(c)[0] == 'C':
+ return False
+
+ return True
+
+def get_ldif_for_editor(samdb, msg):
+
+ # Copy the given message, because we do not
+ # want to modify the original message.
+ m = ldb.Message()
+ m.dn = msg.dn
+
+ for k in msg.keys():
+ if k == "dn":
+ continue
+ vals = msg[k]
+ m[k] = vals
+ need_base64 = False
+ for v in vals:
+ if is_printable_attr_val(v):
+ continue
+ need_base64 = True
+ break
+ if not need_base64:
+ m[k].set_flags(ldb.FLAG_FORCE_NO_BASE64_LDIF)
+
+ result_ldif = samdb.write_ldif(m, ldb.CHANGETYPE_NONE)
+
+ return result_ldif
+
+
+def timestamp_to_mins(timestamp_str):
+ """Converts a timestamp in -100 nanosecond units to minutes"""
+ # treat a timestamp of 'never' the same as zero (this should work OK for
+ # most settings, and it displays better than trying to convert
+ # -0x8000000000000000 to minutes)
+ if int(timestamp_str) == NEVER_TIMESTAMP:
+ return 0
+ else:
+ return abs(int(timestamp_str)) / (1e7 * 60)
+
+
+def timestamp_to_days(timestamp_str):
+ """Converts a timestamp in -100 nanosecond units to days"""
+ return timestamp_to_mins(timestamp_str) / (60 * 24)
+
+
+def attr_default(msg, attrname, default):
+ """get an attribute from a ldap msg with a default"""
+ if attrname in msg:
+ return msg[attrname][0]
+ return default
diff --git a/python/samba/netcmd/computer.py b/python/samba/netcmd/computer.py
new file mode 100644
index 0000000..1413803
--- /dev/null
+++ b/python/samba/netcmd/computer.py
@@ -0,0 +1,729 @@
+# machine account (computer) management
+#
+# Copyright Bjoern Baumbch <bb@sernet.de> 2018
+#
+# based on user management
+# Copyright Jelmer Vernooij 2010 <jelmer@samba.org>
+# Copyright Theresa Halloran 2011 <theresahalloran@gmail.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import samba.getopt as options
+import ldb
+import socket
+import samba
+import re
+import os
+import tempfile
+from samba import sd_utils
+from samba.dcerpc import dnsserver, dnsp, security
+from samba.dnsserver import ARecord, AAAARecord
+from samba.ndr import ndr_unpack, ndr_pack, ndr_print
+from samba.remove_dc import remove_dns_references
+from samba.auth import system_session
+from samba.samdb import SamDB
+from samba.common import get_bytes
+from subprocess import check_call, CalledProcessError
+from . import common
+
+from samba import (
+ credentials,
+ dsdb,
+ Ldb,
+ werror,
+ WERRORError
+)
+
+from samba.netcmd import (
+ Command,
+ CommandError,
+ SuperCommand,
+ Option,
+)
+
+def _is_valid_ip(ip_string, address_families=None):
+ """Check ip string is valid address"""
+ # by default, check both ipv4 and ipv6
+ if not address_families:
+ address_families = [socket.AF_INET, socket.AF_INET6]
+
+ for address_family in address_families:
+ try:
+ socket.inet_pton(address_family, ip_string)
+ return True # if no error, return directly
+ except socket.error:
+ continue # Otherwise, check next family
+ return False
+
+
+def _is_valid_ipv4(ip_string):
+ """Check ip string is valid ipv4 address"""
+ return _is_valid_ip(ip_string, address_families=[socket.AF_INET])
+
+
+def _is_valid_ipv6(ip_string):
+ """Check ip string is valid ipv6 address"""
+ return _is_valid_ip(ip_string, address_families=[socket.AF_INET6])
+
+
+def add_dns_records(
+ samdb, name, dns_conn, change_owner_sd,
+ server, ip_address_list, logger):
+ """Add DNS A or AAAA records while creating computer. """
+ name = name.rstrip('$')
+ client_version = dnsserver.DNS_CLIENT_VERSION_LONGHORN
+ select_flags = dnsserver.DNS_RPC_VIEW_AUTHORITY_DATA | dnsserver.DNS_RPC_VIEW_NO_CHILDREN
+ zone = samdb.domain_dns_name()
+ name_found = True
+ sd_helper = sd_utils.SDUtils(samdb)
+
+ try:
+ buflen, res = dns_conn.DnssrvEnumRecords2(
+ client_version,
+ 0,
+ server,
+ zone,
+ name,
+ None,
+ dnsp.DNS_TYPE_ALL,
+ select_flags,
+ None,
+ None,
+ )
+ except WERRORError as e:
+ if e.args[0] == werror.WERR_DNS_ERROR_NAME_DOES_NOT_EXIST:
+ name_found = False
+
+ if name_found:
+ for rec in res.rec:
+ for record in rec.records:
+ if record.wType == dnsp.DNS_TYPE_A or record.wType == dnsp.DNS_TYPE_AAAA:
+ # delete record
+ del_rec_buf = dnsserver.DNS_RPC_RECORD_BUF()
+ del_rec_buf.rec = record
+ try:
+ dns_conn.DnssrvUpdateRecord2(
+ client_version,
+ 0,
+ server,
+ zone,
+ name,
+ None,
+ del_rec_buf,
+ )
+ except WERRORError as e:
+ if e.args[0] != werror.WERR_DNS_ERROR_NAME_DOES_NOT_EXIST:
+ raise
+
+ for ip_address in ip_address_list:
+ if _is_valid_ipv6(ip_address):
+ logger.info("Adding DNS AAAA record %s.%s for IPv6 IP: %s" % (
+ name, zone, ip_address))
+ rec = AAAARecord(ip_address)
+ elif _is_valid_ipv4(ip_address):
+ logger.info("Adding DNS A record %s.%s for IPv4 IP: %s" % (
+ name, zone, ip_address))
+ rec = ARecord(ip_address)
+ else:
+ raise ValueError('Invalid IP: {}'.format(ip_address))
+
+ # Add record
+ add_rec_buf = dnsserver.DNS_RPC_RECORD_BUF()
+ add_rec_buf.rec = rec
+
+ dns_conn.DnssrvUpdateRecord2(
+ client_version,
+ 0,
+ server,
+ zone,
+ name,
+ add_rec_buf,
+ None,
+ )
+
+ if (len(ip_address_list) > 0):
+ domaindns_zone_dn = ldb.Dn(
+ samdb,
+ 'DC=DomainDnsZones,%s' % samdb.get_default_basedn(),
+ )
+
+ dns_a_dn, ldap_record = samdb.dns_lookup(
+ "%s.%s" % (name, zone),
+ dns_partition=domaindns_zone_dn,
+ )
+
+ # Make the DC own the DNS record, not the administrator
+ sd_helper.modify_sd_on_dn(
+ dns_a_dn,
+ change_owner_sd,
+ controls=["sd_flags:1:%d" % (security.SECINFO_OWNER | security.SECINFO_GROUP)],
+ )
+
+
+class cmd_computer_add(Command):
+ """Add a new computer.
+
+This command adds a new computer account to the Active Directory domain.
+The computername specified on the command is the sAMaccountName without the
+trailing $ (dollar sign).
+
+Computer accounts may represent physical entities, such as workstations. Computer
+accounts are also referred to as security principals and are assigned a
+security identifier (SID).
+
+Example1:
+samba-tool computer add Computer1 -H ldap://samba.samdom.example.com \\
+ -Uadministrator%passw1rd
+
+Example1 shows how to add a new computer to the domain against a remote LDAP
+server. The -H parameter is used to specify the remote target server. The -U
+option is used to pass the userid and password authorized to issue the command
+remotely.
+
+Example2:
+sudo samba-tool computer add Computer2
+
+Example2 shows how to add a new computer to the domain against the local
+server. sudo is used so a user may run the command as root.
+
+Example3:
+samba-tool computer add Computer3 --computerou='OU=OrgUnit'
+
+Example3 shows how to add a new computer in the OrgUnit organizational unit.
+
+"""
+ synopsis = "%prog <computername> [options]"
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server",
+ type=str, metavar="URL", dest="H"),
+ Option("--computerou",
+ help=("DN of alternative location (with or without domainDN "
+ "counterpart) to default CN=Computers in which new "
+ "computer object will be created. E.g. 'OU=<OU name>'"),
+ type=str),
+ Option("--description", help="Computer's description", type=str),
+ Option("--prepare-oldjoin",
+ help="Prepare enabled machine account for oldjoin mechanism",
+ action="store_true"),
+ Option("--ip-address",
+ dest='ip_address_list',
+ help=("IPv4 address for the computer's A record, or IPv6 "
+ "address for AAAA record, can be provided multiple "
+ "times"),
+ action='append'),
+ Option("--service-principal-name",
+ dest='service_principal_name_list',
+ help=("Computer's Service Principal Name, can be provided "
+ "multiple times"),
+ action='append')
+ ]
+
+ takes_args = ["computername"]
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ def run(self, computername, credopts=None, sambaopts=None, versionopts=None,
+ H=None, computerou=None, description=None, prepare_oldjoin=False,
+ ip_address_list=None, service_principal_name_list=None):
+
+ if ip_address_list is None:
+ ip_address_list = []
+
+ if service_principal_name_list is None:
+ service_principal_name_list = []
+
+ # check each IP address if provided
+ for ip_address in ip_address_list:
+ if not _is_valid_ip(ip_address):
+ raise CommandError('Invalid IP address {}'.format(ip_address))
+
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+
+ try:
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+ samdb.newcomputer(computername, computerou=computerou,
+ description=description,
+ prepare_oldjoin=prepare_oldjoin,
+ ip_address_list=ip_address_list,
+ service_principal_name_list=service_principal_name_list,
+ )
+
+ if ip_address_list:
+ # if ip_address_list provided, then we need to create DNS
+ # records for this computer.
+
+ hostname = re.sub(r"\$$", "", computername)
+ if hostname.count('$'):
+ raise CommandError('Illegal computername "%s"' % computername)
+
+ filters = '(&(sAMAccountName={}$)(objectclass=computer))'.format(
+ ldb.binary_encode(hostname))
+
+ recs = samdb.search(
+ base=samdb.domain_dn(),
+ scope=ldb.SCOPE_SUBTREE,
+ expression=filters,
+ attrs=['primaryGroupID', 'objectSid'])
+
+ group = recs[0]['primaryGroupID'][0]
+ owner = ndr_unpack(security.dom_sid, recs[0]["objectSid"][0])
+
+ dns_conn = dnsserver.dnsserver(
+ "ncacn_ip_tcp:{}[sign]".format(samdb.host_dns_name()),
+ lp, creds)
+
+ change_owner_sd = security.descriptor()
+ change_owner_sd.owner_sid = owner
+ change_owner_sd.group_sid = security.dom_sid(
+ "{}-{}".format(samdb.get_domain_sid(), group),
+ )
+
+ add_dns_records(
+ samdb, hostname, dns_conn,
+ change_owner_sd, samdb.host_dns_name(),
+ ip_address_list, self.get_logger())
+ except Exception as e:
+ raise CommandError("Failed to add computer '%s': " %
+ computername, e)
+
+ self.outf.write("Computer '%s' added successfully\n" % computername)
+
+
+class cmd_computer_delete(Command):
+ """Delete a computer.
+
+This command deletes a computer account from the Active Directory domain. The
+computername specified on the command is the sAMAccountName without the
+trailing $ (dollar sign).
+
+Once the account is deleted, all permissions and memberships associated with
+that account are deleted. If a new computer account is added with the same name
+as a previously deleted account name, the new computer does not have the
+previous permissions. The new account computer will be assigned a new security
+identifier (SID) and permissions and memberships will have to be added.
+
+The command may be run from the root userid or another authorized
+userid. The -H or --URL= option can be used to execute the command against
+a remote server.
+
+Example1:
+samba-tool computer delete Computer1 -H ldap://samba.samdom.example.com \\
+ -Uadministrator%passw1rd
+
+Example1 shows how to delete a computer in the domain against a remote LDAP
+server. The -H parameter is used to specify the remote target server. The
+--computername= and --password= options are used to pass the computername and
+password of a computer that exists on the remote server and is authorized to
+issue the command on that server.
+
+Example2:
+sudo samba-tool computer delete Computer2
+
+Example2 shows how to delete a computer in the domain against the local server.
+sudo is used so a computer may run the command as root.
+
+"""
+ synopsis = "%prog <computername> [options]"
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server",
+ type=str, metavar="URL", dest="H"),
+ ]
+
+ takes_args = ["computername"]
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ def run(self, computername, credopts=None, sambaopts=None,
+ versionopts=None, H=None):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp, fallback_machine=True)
+
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+
+ samaccountname = computername
+ if not computername.endswith('$'):
+ samaccountname = "%s$" % computername
+
+ filter = ("(&(sAMAccountName=%s)(sAMAccountType=%u))" %
+ (ldb.binary_encode(samaccountname),
+ dsdb.ATYPE_WORKSTATION_TRUST))
+ try:
+ res = samdb.search(base=samdb.domain_dn(),
+ scope=ldb.SCOPE_SUBTREE,
+ expression=filter,
+ attrs=["userAccountControl", "dNSHostName"])
+ computer_dn = res[0].dn
+ computer_ac = int(res[0]["userAccountControl"][0])
+ if "dNSHostName" in res[0]:
+ computer_dns_host_name = str(res[0]["dNSHostName"][0])
+ else:
+ computer_dns_host_name = None
+ except IndexError:
+ raise CommandError('Unable to find computer "%s"' % computername)
+
+ computer_is_workstation = (
+ computer_ac & dsdb.UF_WORKSTATION_TRUST_ACCOUNT)
+ if not computer_is_workstation:
+ raise CommandError('Failed to remove computer "%s": '
+ 'Computer is not a workstation - removal denied'
+ % computername)
+ try:
+ samdb.delete(computer_dn)
+ if computer_dns_host_name:
+ remove_dns_references(
+ samdb, self.get_logger(), computer_dns_host_name,
+ ignore_no_name=True)
+ except Exception as e:
+ raise CommandError('Failed to remove computer "%s"' %
+ samaccountname, e)
+ self.outf.write("Deleted computer %s\n" % computername)
+
+
+class cmd_computer_edit(Command):
+ """Modify Computer AD object.
+
+ This command will allow editing of a computer account in the Active
+ Directory domain. You will then be able to add or change attributes and
+ their values.
+
+ The computername specified on the command is the sAMaccountName with or
+ without the trailing $ (dollar sign).
+
+ The command may be run from the root userid or another authorized userid.
+
+ The -H or --URL= option can be used to execute the command against a remote
+ server.
+
+ Example1:
+ samba-tool computer edit Computer1 -H ldap://samba.samdom.example.com \\
+ -U administrator --password=passw1rd
+
+ Example1 shows how to edit a computers attributes in the domain against a
+ remote LDAP server.
+
+ The -H parameter is used to specify the remote target server.
+
+ Example2:
+ samba-tool computer edit Computer2
+
+ Example2 shows how to edit a computers attributes in the domain against a
+ local LDAP server.
+
+ Example3:
+ samba-tool computer edit Computer3 --editor=nano
+
+ Example3 shows how to edit a computers attributes in the domain against a
+ local LDAP server using the 'nano' editor.
+ """
+ synopsis = "%prog <computername> [options]"
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server",
+ type=str, metavar="URL", dest="H"),
+ Option("--editor", help="Editor to use instead of the system default,"
+ " or 'vi' if no system default is set.", type=str),
+ ]
+
+ takes_args = ["computername"]
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ def run(self, computername, credopts=None, sambaopts=None, versionopts=None,
+ H=None, editor=None):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp, fallback_machine=True)
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+
+ samaccountname = computername
+ if not computername.endswith('$'):
+ samaccountname = "%s$" % computername
+
+ filter = ("(&(sAMAccountType=%d)(sAMAccountName=%s))" %
+ (dsdb.ATYPE_WORKSTATION_TRUST,
+ ldb.binary_encode(samaccountname)))
+
+ domaindn = samdb.domain_dn()
+
+ try:
+ res = samdb.search(base=domaindn,
+ expression=filter,
+ scope=ldb.SCOPE_SUBTREE)
+ computer_dn = res[0].dn
+ except IndexError:
+ raise CommandError('Unable to find computer "%s"' % (computername))
+
+ if len(res) != 1:
+ raise CommandError('Invalid number of results: for "%s": %d' %
+ ((computername), len(res)))
+
+ msg = res[0]
+ result_ldif = common.get_ldif_for_editor(samdb, msg)
+
+ if editor is None:
+ editor = os.environ.get('EDITOR')
+ if editor is None:
+ editor = 'vi'
+
+ with tempfile.NamedTemporaryFile(suffix=".tmp") as t_file:
+ t_file.write(get_bytes(result_ldif))
+ t_file.flush()
+ try:
+ check_call([editor, t_file.name])
+ except CalledProcessError as e:
+ raise CalledProcessError("ERROR: ", e)
+ with open(t_file.name) as edited_file:
+ edited_message = edited_file.read()
+
+ msgs_edited = samdb.parse_ldif(edited_message)
+ msg_edited = next(msgs_edited)[1]
+
+ res_msg_diff = samdb.msg_diff(msg, msg_edited)
+ if len(res_msg_diff) == 0:
+ self.outf.write("Nothing to do\n")
+ return
+
+ try:
+ samdb.modify(res_msg_diff)
+ except Exception as e:
+ raise CommandError("Failed to modify computer '%s': " %
+ computername, e)
+
+ self.outf.write("Modified computer '%s' successfully\n" % computername)
+
+class cmd_computer_list(Command):
+ """List all computers."""
+
+ synopsis = "%prog [options]"
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server",
+ type=str, metavar="URL", dest="H"),
+ Option("-b", "--base-dn",
+ help="Specify base DN to use",
+ type=str),
+ Option("--full-dn", dest="full_dn",
+ default=False,
+ action="store_true",
+ help="Display DN instead of the sAMAccountName.")
+ ]
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ def run(self,
+ sambaopts=None,
+ credopts=None,
+ versionopts=None,
+ H=None,
+ base_dn=None,
+ full_dn=False):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp, fallback_machine=True)
+
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+
+ filter = "(sAMAccountType=%u)" % (dsdb.ATYPE_WORKSTATION_TRUST)
+
+ search_dn = samdb.domain_dn()
+ if base_dn:
+ search_dn = samdb.normalize_dn_in_domain(base_dn)
+
+ res = samdb.search(search_dn,
+ scope=ldb.SCOPE_SUBTREE,
+ expression=filter,
+ attrs=["samaccountname"])
+ if (len(res) == 0):
+ return
+
+ for msg in res:
+ if full_dn:
+ self.outf.write("%s\n" % msg.get("dn"))
+ continue
+
+ self.outf.write("%s\n" % msg.get("samaccountname", idx=0))
+
+
+class cmd_computer_show(Command):
+ """Display a computer AD object.
+
+This command displays a computer account and it's attributes in the Active
+Directory domain.
+The computername specified on the command is the sAMAccountName.
+
+The command may be run from the root userid or another authorized
+userid.
+
+The -H or --URL= option can be used to execute the command against a remote
+server.
+
+Example1:
+samba-tool computer show Computer1 -H ldap://samba.samdom.example.com \\
+ -U administrator
+
+Example1 shows how display a computers attributes in the domain against a
+remote LDAP server.
+
+The -H parameter is used to specify the remote target server.
+
+Example2:
+samba-tool computer show Computer2
+
+Example2 shows how to display a computers attributes in the domain against a
+local LDAP server.
+
+Example3:
+samba-tool computer show Computer2 --attributes=objectSid,operatingSystem
+
+Example3 shows how to display a computers objectSid and operatingSystem
+attribute.
+"""
+ synopsis = "%prog <computername> [options]"
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server",
+ type=str, metavar="URL", dest="H"),
+ Option("--attributes",
+ help=("Comma separated list of attributes, "
+ "which will be printed."),
+ type=str, dest="computer_attrs"),
+ ]
+
+ takes_args = ["computername"]
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ def run(self, computername, credopts=None, sambaopts=None, versionopts=None,
+ H=None, computer_attrs=None):
+
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp, fallback_machine=True)
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+
+ attrs = None
+ if computer_attrs:
+ attrs = computer_attrs.split(",")
+
+ samaccountname = computername
+ if not computername.endswith('$'):
+ samaccountname = "%s$" % computername
+
+ filter = ("(&(sAMAccountType=%d)(sAMAccountName=%s))" %
+ (dsdb.ATYPE_WORKSTATION_TRUST,
+ ldb.binary_encode(samaccountname)))
+
+ domaindn = samdb.domain_dn()
+
+ try:
+ res = samdb.search(base=domaindn, expression=filter,
+ scope=ldb.SCOPE_SUBTREE, attrs=attrs)
+ computer_dn = res[0].dn
+ except IndexError:
+ raise CommandError('Unable to find computer "%s"' %
+ samaccountname)
+
+ for msg in res:
+ computer_ldif = common.get_ldif_for_editor(samdb, msg)
+ self.outf.write(computer_ldif)
+
+
+class cmd_computer_move(Command):
+ """Move a computer to an organizational unit/container."""
+
+ synopsis = "%prog <computername> <new_ou_dn> [options]"
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server",
+ type=str, metavar="URL", dest="H"),
+ ]
+
+ takes_args = ["computername", "new_ou_dn"]
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ def run(self, computername, new_ou_dn, credopts=None, sambaopts=None,
+ versionopts=None, H=None):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp, fallback_machine=True)
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+ domain_dn = ldb.Dn(samdb, samdb.domain_dn())
+
+ samaccountname = computername
+ if not computername.endswith('$'):
+ samaccountname = "%s$" % computername
+
+ filter = ("(&(sAMAccountName=%s)(sAMAccountType=%u))" %
+ (ldb.binary_encode(samaccountname),
+ dsdb.ATYPE_WORKSTATION_TRUST))
+ try:
+ res = samdb.search(base=domain_dn,
+ expression=filter,
+ scope=ldb.SCOPE_SUBTREE)
+ computer_dn = res[0].dn
+ except IndexError:
+ raise CommandError('Unable to find computer "%s"' % (computername))
+
+ full_new_ou_dn = ldb.Dn(samdb, new_ou_dn)
+ if not full_new_ou_dn.is_child_of(domain_dn):
+ full_new_ou_dn.add_base(domain_dn)
+ new_computer_dn = ldb.Dn(samdb, str(computer_dn))
+ new_computer_dn.remove_base_components(len(computer_dn) -1)
+ new_computer_dn.add_base(full_new_ou_dn)
+ try:
+ samdb.rename(computer_dn, new_computer_dn)
+ except Exception as e:
+ raise CommandError('Failed to move computer "%s"' % computername, e)
+ self.outf.write('Moved computer "%s" to "%s"\n' %
+ (computername, new_ou_dn))
+
+
+class cmd_computer(SuperCommand):
+ """Computer management."""
+
+ subcommands = {}
+ subcommands["add"] = cmd_computer_add()
+ subcommands["create"] = cmd_computer_add()
+ subcommands["delete"] = cmd_computer_delete()
+ subcommands["edit"] = cmd_computer_edit()
+ subcommands["list"] = cmd_computer_list()
+ subcommands["show"] = cmd_computer_show()
+ subcommands["move"] = cmd_computer_move()
diff --git a/python/samba/netcmd/contact.py b/python/samba/netcmd/contact.py
new file mode 100644
index 0000000..064a3ce
--- /dev/null
+++ b/python/samba/netcmd/contact.py
@@ -0,0 +1,861 @@
+# samba-tool contact management
+#
+# Copyright Bjoern Baumbach 2019 <bbaumbach@samba.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import samba.getopt as options
+import ldb
+import os
+import tempfile
+from subprocess import check_call, CalledProcessError
+from operator import attrgetter
+from samba.auth import system_session
+from samba.samdb import SamDB
+from samba import (
+ credentials,
+ dsdb,
+)
+from samba.net import Net
+
+from samba.netcmd import (
+ Command,
+ CommandError,
+ SuperCommand,
+ Option,
+)
+from samba.common import get_bytes
+from . import common
+
+
+class cmd_add(Command):
+ """Add a new contact.
+
+ This command adds a new contact to the Active Directory domain.
+
+ The name of the new contact can be specified by the first argument
+ 'contactname' or the --given-name, --initial and --surname arguments.
+ If no 'contactname' is given, contact's name will be made up of the given
+ arguments by combining the given-name, initials and surname. Each argument
+ is optional. A dot ('.') will be appended to the initials automatically.
+
+ Example1:
+ samba-tool contact add "James T. Kirk" --job-title=Captain \\
+ -H ldap://samba.samdom.example.com -UAdministrator%Passw1rd
+
+ The example shows how to add a new contact to the domain against a remote
+ LDAP server.
+
+ Example2:
+ samba-tool contact add --given-name=James --initials=T --surname=Kirk
+
+ The example shows how to add a new contact to the domain against a local
+ server. The resulting name is "James T. Kirk".
+ """
+
+ synopsis = "%prog [contactname] [options]"
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server",
+ type=str, metavar="URL", dest="H"),
+ Option("--ou",
+ help=("DN of alternative location (with or without domainDN "
+ "counterpart) in which the new contact will be created. "
+ "E.g. 'OU=<OU name>'. "
+ "Default is the domain base."),
+ type=str),
+ Option("--surname", help="Contact's surname", type=str),
+ Option("--given-name", help="Contact's given name", type=str),
+ Option("--initials", help="Contact's initials", type=str),
+ Option("--display-name", help="Contact's display name", type=str),
+ Option("--job-title", help="Contact's job title", type=str),
+ Option("--department", help="Contact's department", type=str),
+ Option("--company", help="Contact's company", type=str),
+ Option("--description", help="Contact's description", type=str),
+ Option("--mail-address", help="Contact's email address", type=str),
+ Option("--internet-address", help="Contact's home page", type=str),
+ Option("--telephone-number", help="Contact's phone number", type=str),
+ Option("--mobile-number",
+ help="Contact's mobile phone number",
+ type=str),
+ Option("--physical-delivery-office",
+ help="Contact's office location",
+ type=str),
+ ]
+
+ takes_args = ["fullcontactname?"]
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ def run(self,
+ fullcontactname=None,
+ sambaopts=None,
+ credopts=None,
+ versionopts=None,
+ H=None,
+ ou=None,
+ surname=None,
+ given_name=None,
+ initials=None,
+ display_name=None,
+ job_title=None,
+ department=None,
+ company=None,
+ description=None,
+ mail_address=None,
+ internet_address=None,
+ telephone_number=None,
+ mobile_number=None,
+ physical_delivery_office=None):
+
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+
+ try:
+ samdb = SamDB(url=H,
+ session_info=system_session(),
+ credentials=creds,
+ lp=lp)
+ ret_name = samdb.newcontact(
+ fullcontactname=fullcontactname,
+ ou=ou,
+ surname=surname,
+ givenname=given_name,
+ initials=initials,
+ displayname=display_name,
+ jobtitle=job_title,
+ department=department,
+ company=company,
+ description=description,
+ mailaddress=mail_address,
+ internetaddress=internet_address,
+ telephonenumber=telephone_number,
+ mobilenumber=mobile_number,
+ physicaldeliveryoffice=physical_delivery_office)
+ except Exception as e:
+ raise CommandError("Failed to add contact", e)
+
+ self.outf.write("Contact '%s' added successfully\n" % ret_name)
+
+
+class cmd_delete(Command):
+ """Delete a contact.
+
+ This command deletes a contact object from the Active Directory domain.
+
+ The contactname specified on the command is the common name or the
+ distinguished name of the contact object. The distinguished name of the
+ contact can be specified with or without the domainDN component.
+
+ Example:
+ samba-tool contact delete Contact1 \\
+ -H ldap://samba.samdom.example.com \\
+ --username=Administrator --password=Passw1rd
+
+ The example shows how to delete a contact in the domain against a remote
+ LDAP server.
+ """
+ synopsis = "%prog <contactname> [options]"
+
+ takes_options = [
+ Option("-H",
+ "--URL",
+ help="LDB URL for database or target server",
+ type=str,
+ metavar="URL",
+ dest="H"),
+ ]
+
+ takes_args = ["contactname"]
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ def run(self,
+ contactname,
+ sambaopts=None,
+ credopts=None,
+ versionopts=None,
+ H=None):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp, fallback_machine=True)
+ samdb = SamDB(url=H,
+ session_info=system_session(),
+ credentials=creds,
+ lp=lp)
+ base_dn = samdb.domain_dn()
+ scope = ldb.SCOPE_SUBTREE
+
+ filter = ("(&(objectClass=contact)(name=%s))" %
+ ldb.binary_encode(contactname))
+
+ if contactname.upper().startswith("CN="):
+ # contact is specified by DN
+ filter = "(objectClass=contact)"
+ scope = ldb.SCOPE_BASE
+ try:
+ base_dn = samdb.normalize_dn_in_domain(contactname)
+ except Exception as e:
+ raise CommandError('Invalid dn "%s": %s' %
+ (contactname, e))
+
+ try:
+ res = samdb.search(base=base_dn,
+ scope=scope,
+ expression=filter,
+ attrs=["dn"])
+ contact_dn = res[0].dn
+ except IndexError:
+ raise CommandError('Unable to find contact "%s"' % (contactname))
+
+ if len(res) > 1:
+ for msg in sorted(res, key=attrgetter('dn')):
+ self.outf.write("found: %s\n" % msg.dn)
+ raise CommandError("Multiple results for contact '%s'\n"
+ "Please specify the contact's full DN" %
+ contactname)
+
+ try:
+ samdb.delete(contact_dn)
+ except Exception as e:
+ raise CommandError('Failed to remove contact "%s"' % contactname, e)
+ self.outf.write("Deleted contact %s\n" % contactname)
+
+
+class cmd_list(Command):
+ """List all contacts.
+ """
+
+ synopsis = "%prog [options]"
+
+ takes_options = [
+ Option("-H",
+ "--URL",
+ help="LDB URL for database or target server",
+ type=str,
+ metavar="URL",
+ dest="H"),
+ Option("-b", "--base-dn",
+ help="Specify base DN to use.",
+ type=str),
+ Option("--full-dn",
+ dest="full_dn",
+ default=False,
+ action='store_true',
+ help="Display contact's full DN instead of the name."),
+ ]
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ def run(self,
+ sambaopts=None,
+ credopts=None,
+ versionopts=None,
+ H=None,
+ base_dn=None,
+ full_dn=False):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp, fallback_machine=True)
+
+ samdb = SamDB(url=H,
+ session_info=system_session(),
+ credentials=creds,
+ lp=lp)
+
+ search_dn = samdb.domain_dn()
+ if base_dn:
+ search_dn = samdb.normalize_dn_in_domain(base_dn)
+
+ res = samdb.search(search_dn,
+ scope=ldb.SCOPE_SUBTREE,
+ expression="(objectClass=contact)",
+ attrs=["name"])
+ if (len(res) == 0):
+ return
+
+ if full_dn:
+ for msg in sorted(res, key=attrgetter('dn')):
+ self.outf.write("%s\n" % msg.dn)
+ return
+
+ for msg in res:
+ contact_name = msg.get("name", idx=0)
+
+ self.outf.write("%s\n" % contact_name)
+
+
+class cmd_edit(Command):
+ """Modify a contact.
+
+ This command will allow editing of a contact object in the Active Directory
+ domain. You will then be able to add or change attributes and their values.
+
+ The contactname specified on the command is the common name or the
+ distinguished name of the contact object. The distinguished name of the
+ contact can be specified with or without the domainDN component.
+
+ The command may be run from the root userid or another authorized userid.
+
+ The -H or --URL= option can be used to execute the command against a remote
+ server.
+
+ Example1:
+ samba-tool contact edit Contact1 -H ldap://samba.samdom.example.com \\
+ -U Administrator --password=Passw1rd
+
+ Example1 shows how to edit a contact's attributes in the domain against a
+ remote LDAP server.
+
+ The -H parameter is used to specify the remote target server.
+
+ Example2:
+ samba-tool contact edit CN=Contact2,OU=people,DC=samdom,DC=example,DC=com
+
+ Example2 shows how to edit a contact's attributes in the domain against a
+ local server. The contact, which is located in the 'people' OU,
+ is specified by the full distinguished name.
+
+ Example3:
+ samba-tool contact edit Contact3 --editor=nano
+
+ Example3 shows how to edit a contact's attributes in the domain against a
+ local server using the 'nano' editor.
+ """
+ synopsis = "%prog <contactname> [options]"
+
+ takes_options = [
+ Option("-H",
+ "--URL",
+ help="LDB URL for database or target server",
+ type=str,
+ metavar="URL",
+ dest="H"),
+ Option("--editor",
+ help="Editor to use instead of the system default, "
+ "or 'vi' if no system default is set.",
+ type=str),
+ ]
+
+ takes_args = ["contactname"]
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ def run(self,
+ contactname,
+ sambaopts=None,
+ credopts=None,
+ versionopts=None,
+ H=None,
+ editor=None):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp, fallback_machine=True)
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+ base_dn = samdb.domain_dn()
+ scope = ldb.SCOPE_SUBTREE
+
+ filter = ("(&(objectClass=contact)(name=%s))" %
+ ldb.binary_encode(contactname))
+
+ if contactname.upper().startswith("CN="):
+ # contact is specified by DN
+ filter = "(objectClass=contact)"
+ scope = ldb.SCOPE_BASE
+ try:
+ base_dn = samdb.normalize_dn_in_domain(contactname)
+ except Exception as e:
+ raise CommandError('Invalid dn "%s": %s' %
+ (contactname, e))
+
+ try:
+ res = samdb.search(base=base_dn,
+ scope=scope,
+ expression=filter)
+ contact_dn = res[0].dn
+ except IndexError:
+ raise CommandError('Unable to find contact "%s"' % (contactname))
+
+ if len(res) > 1:
+ for msg in sorted(res, key=attrgetter('dn')):
+ self.outf.write("found: %s\n" % msg.dn)
+ raise CommandError("Multiple results for contact '%s'\n"
+ "Please specify the contact's full DN" %
+ contactname)
+
+ for msg in res:
+ result_ldif = common.get_ldif_for_editor(samdb, msg)
+
+ if editor is None:
+ editor = os.environ.get('EDITOR')
+ if editor is None:
+ editor = 'vi'
+
+ with tempfile.NamedTemporaryFile(suffix=".tmp") as t_file:
+ t_file.write(get_bytes(result_ldif))
+ t_file.flush()
+ try:
+ check_call([editor, t_file.name])
+ except CalledProcessError as e:
+ raise CalledProcessError("ERROR: ", e)
+ with open(t_file.name) as edited_file:
+ edited_message = edited_file.read()
+
+
+ msgs_edited = samdb.parse_ldif(edited_message)
+ msg_edited = next(msgs_edited)[1]
+
+ res_msg_diff = samdb.msg_diff(msg, msg_edited)
+ if len(res_msg_diff) == 0:
+ self.outf.write("Nothing to do\n")
+ return
+
+ try:
+ samdb.modify(res_msg_diff)
+ except Exception as e:
+ raise CommandError("Failed to modify contact '%s': " % contactname,
+ e)
+
+ self.outf.write("Modified contact '%s' successfully\n" % contactname)
+
+
+class cmd_show(Command):
+ """Display a contact.
+
+ This command displays a contact object with it's attributes in the Active
+ Directory domain.
+
+ The contactname specified on the command is the common name or the
+ distinguished name of the contact object. The distinguished name of the
+ contact can be specified with or without the domainDN component.
+
+ The command may be run from the root userid or another authorized userid.
+
+ The -H or --URL= option can be used to execute the command against a remote
+ server.
+
+ Example1:
+ samba-tool contact show Contact1 -H ldap://samba.samdom.example.com \\
+ -U Administrator --password=Passw1rd
+
+ Example1 shows how to display a contact's attributes in the domain against
+ a remote LDAP server.
+
+ The -H parameter is used to specify the remote target server.
+
+ Example2:
+ samba-tool contact show CN=Contact2,OU=people,DC=samdom,DC=example,DC=com
+
+ Example2 shows how to display a contact's attributes in the domain against
+ a local server. The contact, which is located in the 'people' OU, is
+ specified by the full distinguished name.
+
+ Example3:
+ samba-tool contact show Contact3 --attributes=mail,mobile
+
+ Example3 shows how to display a contact's mail and mobile attributes.
+ """
+ synopsis = "%prog <contactname> [options]"
+
+ takes_options = [
+ Option("-H",
+ "--URL",
+ help="LDB URL for database or target server",
+ type=str,
+ metavar="URL",
+ dest="H"),
+ Option("--attributes",
+ help=("Comma separated list of attributes, "
+ "which will be printed."),
+ type=str,
+ dest="contact_attrs"),
+ ]
+
+ takes_args = ["contactname"]
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ def run(self,
+ contactname,
+ sambaopts=None,
+ credopts=None,
+ versionopts=None,
+ H=None,
+ contact_attrs=None):
+
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp, fallback_machine=True)
+ samdb = SamDB(url=H,
+ session_info=system_session(),
+ credentials=creds,
+ lp=lp)
+ base_dn = samdb.domain_dn()
+ scope = ldb.SCOPE_SUBTREE
+
+ attrs = None
+ if contact_attrs:
+ attrs = contact_attrs.split(",")
+
+ filter = ("(&(objectClass=contact)(name=%s))" %
+ ldb.binary_encode(contactname))
+
+ if contactname.upper().startswith("CN="):
+ # contact is specified by DN
+ filter = "(objectClass=contact)"
+ scope = ldb.SCOPE_BASE
+ try:
+ base_dn = samdb.normalize_dn_in_domain(contactname)
+ except Exception as e:
+ raise CommandError('Invalid dn "%s": %s' %
+ (contactname, e))
+
+ try:
+ res = samdb.search(base=base_dn,
+ expression=filter,
+ scope=scope,
+ attrs=attrs)
+ contact_dn = res[0].dn
+ except IndexError:
+ raise CommandError('Unable to find contact "%s"' % (contactname))
+
+ if len(res) > 1:
+ for msg in sorted(res, key=attrgetter('dn')):
+ self.outf.write("found: %s\n" % msg.dn)
+ raise CommandError("Multiple results for contact '%s'\n"
+ "Please specify the contact's DN" %
+ contactname)
+
+ for msg in res:
+ contact_ldif = common.get_ldif_for_editor(samdb, msg)
+ self.outf.write(contact_ldif)
+
+
+class cmd_move(Command):
+ """Move a contact object to an organizational unit or container.
+
+ The contactname specified on the command is the common name or the
+ distinguished name of the contact object. The distinguished name of the
+ contact can be specified with or without the domainDN component.
+
+ The name of the organizational unit or container can be specified as the
+ distinguished name, with or without the domainDN component.
+
+ The command may be run from the root userid or another authorized userid.
+
+ The -H or --URL= option can be used to execute the command against a remote
+ server.
+
+ Example1:
+ samba-tool contact move Contact1 'OU=people' \\
+ -H ldap://samba.samdom.example.com -U Administrator
+
+ Example1 shows how to move a contact Contact1 into the 'people'
+ organizational unit on a remote LDAP server.
+
+ The -H parameter is used to specify the remote target server.
+
+ Example2:
+ samba-tool contact move Contact1 OU=Contacts,DC=samdom,DC=example,DC=com
+
+ Example2 shows how to move a contact Contact1 into the OU=Contacts
+ organizational unit on the local server.
+ """
+
+ synopsis = "%prog <contactname> <new_parent_dn> [options]"
+
+ takes_options = [
+ Option("-H",
+ "--URL",
+ help="LDB URL for database or target server",
+ type=str,
+ metavar="URL",
+ dest="H"),
+ ]
+
+ takes_args = ["contactname", "new_parent_dn"]
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ def run(self,
+ contactname,
+ new_parent_dn,
+ sambaopts=None,
+ credopts=None,
+ versionopts=None,
+ H=None):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp, fallback_machine=True)
+ samdb = SamDB(url=H,
+ session_info=system_session(),
+ credentials=creds,
+ lp=lp)
+ base_dn = samdb.domain_dn()
+ scope = ldb.SCOPE_SUBTREE
+
+ filter = ("(&(objectClass=contact)(name=%s))" %
+ ldb.binary_encode(contactname))
+
+ if contactname.upper().startswith("CN="):
+ # contact is specified by DN
+ filter = "(objectClass=contact)"
+ scope = ldb.SCOPE_BASE
+ try:
+ base_dn = samdb.normalize_dn_in_domain(contactname)
+ except Exception as e:
+ raise CommandError('Invalid dn "%s": %s' %
+ (contactname, e))
+
+ try:
+ res = samdb.search(base=base_dn,
+ scope=scope,
+ expression=filter,
+ attrs=["dn"])
+ contact_dn = res[0].dn
+ except IndexError:
+ raise CommandError('Unable to find contact "%s"' % (contactname))
+
+ if len(res) > 1:
+ for msg in sorted(res, key=attrgetter('dn')):
+ self.outf.write("found: %s\n" % msg.dn)
+ raise CommandError("Multiple results for contact '%s'\n"
+ "Please specify the contact's full DN" %
+ contactname)
+
+ try:
+ full_new_parent_dn = samdb.normalize_dn_in_domain(new_parent_dn)
+ except Exception as e:
+ raise CommandError('Invalid new_parent_dn "%s": %s' %
+ (new_parent_dn, e))
+
+ full_new_contact_dn = ldb.Dn(samdb, str(contact_dn))
+ full_new_contact_dn.remove_base_components(len(contact_dn) - 1)
+ full_new_contact_dn.add_base(full_new_parent_dn)
+
+ try:
+ samdb.rename(contact_dn, full_new_contact_dn)
+ except Exception as e:
+ raise CommandError('Failed to move contact "%s"' % contactname, e)
+ self.outf.write('Moved contact "%s" into "%s"\n' %
+ (contactname, full_new_parent_dn))
+
+class cmd_rename(Command):
+ """Rename a contact and related attributes.
+
+ This command allows to set the contact's name related attributes.
+ The contact's new CN will be made up by combining the given-name, initials
+ and surname. A dot ('.') will be appended to the initials automatically, if
+ required.
+ Use the --force-new-cn option to specify the new CN manually and the
+ --reset-cn option to reset this changes.
+
+ Use an empty attribute value to remove the specified attribute.
+
+ The contactname specified on the command is the CN.
+
+ The command may be run locally from the root userid or another authorized
+ userid.
+
+ The -H or --URL= option can be used to execute the command against a remote
+ server.
+
+ Example1:
+ samba-tool contact rename "John Doe" --surname=Bloggs \\
+ --force-new-cn=John
+
+ Example1 shows how to change the surname ('sn' attribute) of a contact
+ 'John Doe' to 'Bloggs' and change the CN to 'John' on the local server.
+
+ Example2:
+ samba-tool contact rename "J Doe" --given-name=John
+ -H ldap://samba.samdom.example.com -U administrator
+
+ Example2 shows how to rename the given name of a contact 'J Doe' to
+ 'John'. The contact's cn will be renamed automatically, based on
+ the given name, initials and surname, if the previous CN is the
+ standard combination of the previous name attributes.
+ The -H parameter is used to specify the remote target server.
+ """
+
+ synopsis = "%prog <contactname> [options]"
+
+ takes_options = [
+ Option("-H", "--URL",
+ help="LDB URL for database or target server",
+ type=str, metavar="URL", dest="H"),
+ Option("--surname",
+ help="New surname",
+ type=str),
+ Option("--given-name",
+ help="New given name",
+ type=str),
+ Option("--initials",
+ help="New initials",
+ type=str),
+ Option("--force-new-cn",
+ help="Specify a new CN (RDN) instead of using a combination "
+ "of the given name, initials and surname.",
+ type=str, metavar="NEW_CN"),
+ Option("--reset-cn",
+ help="Set the CN (RDN) to the combination of the given name, "
+ "initials and surname. Use this option to reset "
+ "the changes made with the --force-new-cn option.",
+ action="store_true"),
+ Option("--display-name",
+ help="New display name",
+ type=str),
+ Option("--mail-address",
+ help="New email address",
+ type=str),
+ ]
+
+ takes_args = ["contactname"]
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+
+ def run(self, contactname, credopts=None, sambaopts=None, versionopts=None,
+ H=None, surname=None, given_name=None, initials=None, force_new_cn=None,
+ display_name=None, mail_address=None, reset_cn=None):
+ # illegal options
+ if force_new_cn and reset_cn:
+ raise CommandError("It is not allowed to specify --force-new-cn "
+ "together with --reset-cn.")
+ if force_new_cn == "":
+ raise CommandError("Failed to rename contact - delete protected "
+ "attribute 'CN'")
+
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp, fallback_machine=True)
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+ domain_dn = ldb.Dn(samdb, samdb.domain_dn())
+
+ filter = ("(&(objectClass=contact)(name=%s))" %
+ ldb.binary_encode(contactname))
+ try:
+ res = samdb.search(base=domain_dn,
+ scope=ldb.SCOPE_SUBTREE,
+ expression=filter,
+ attrs=["name",
+ "sn",
+ "givenName",
+ "cn",
+ "initials",
+ "displayName",
+ "mail"]
+ )
+ old_contact = res[0]
+ contact_dn = old_contact.dn
+ except IndexError:
+ raise CommandError('Unable to find contact "%s"' % (contactname))
+
+ contact_parent_dn = contact_dn.parent()
+ old_cn = old_contact["cn"][0]
+
+ if force_new_cn is not None:
+ new_cn = force_new_cn
+ else:
+ new_cn = samdb.fullname_from_names(old_attrs=old_contact,
+ given_name=given_name,
+ initials=initials,
+ surname=surname)
+
+ # change CN, if the new CN is different and the old CN is the
+ # standard CN or the change is forced with force-new-cn or reset-cn
+ excepted_cn = samdb.fullname_from_names(old_attrs=old_contact)
+ must_change_cn = str(old_cn) != str(new_cn) and \
+ (str(old_cn) == str(excepted_cn) or \
+ reset_cn or bool(force_new_cn))
+
+ new_contact_dn = ldb.Dn(samdb, "CN=%s" % new_cn)
+ new_contact_dn.add_base(contact_parent_dn)
+
+ if new_cn == "" and must_change_cn:
+ raise CommandError("Failed to rename contact '%s' - "
+ "can not set an empty CN "
+ "(please use --force-new-cn to specify a "
+ "different CN or --given-name, --initials or "
+ "--surname to set name attributes)" % old_cn)
+
+ # format given attributes
+ contact_attrs = ldb.Message()
+ contact_attrs.dn = contact_dn
+ samdb.prepare_attr_replace(contact_attrs, old_contact, "givenName", given_name)
+ samdb.prepare_attr_replace(contact_attrs, old_contact, "sn", surname)
+ samdb.prepare_attr_replace(contact_attrs, old_contact, "initials", initials)
+ samdb.prepare_attr_replace(contact_attrs, old_contact, "displayName", display_name)
+ samdb.prepare_attr_replace(contact_attrs, old_contact, "mail", mail_address)
+
+ contact_attributes_changed = len(contact_attrs) > 0
+
+ # update the contact with formatted attributes
+ samdb.transaction_start()
+ try:
+ if contact_attributes_changed == True:
+ samdb.modify(contact_attrs)
+ if must_change_cn:
+ samdb.rename(contact_dn, new_contact_dn)
+ except Exception as e:
+ samdb.transaction_cancel()
+ raise CommandError('Failed to rename contact "%s"' % contactname, e)
+ samdb.transaction_commit()
+
+ if must_change_cn:
+ self.outf.write('Renamed CN of contact "%s" from "%s" to "%s" '
+ 'successfully\n' % (contactname, old_cn, new_cn))
+
+ if contact_attributes_changed:
+ self.outf.write('Following attributes of contact "%s" have been '
+ 'changed successfully:\n' % (contactname))
+ for attr in contact_attrs.keys():
+ if attr == "dn":
+ continue
+ self.outf.write('%s: %s\n' % (attr, contact_attrs[attr]
+ if contact_attrs[attr] else '[removed]'))
+
+class cmd_contact(SuperCommand):
+ """Contact management."""
+
+ subcommands = {}
+ subcommands["add"] = cmd_add()
+ subcommands["create"] = cmd_add()
+ subcommands["delete"] = cmd_delete()
+ subcommands["edit"] = cmd_edit()
+ subcommands["list"] = cmd_list()
+ subcommands["move"] = cmd_move()
+ subcommands["show"] = cmd_show()
+ subcommands["rename"] = cmd_rename()
diff --git a/python/samba/netcmd/dbcheck.py b/python/samba/netcmd/dbcheck.py
new file mode 100644
index 0000000..657881b
--- /dev/null
+++ b/python/samba/netcmd/dbcheck.py
@@ -0,0 +1,193 @@
+# Samba4 AD database checker
+#
+# Copyright (C) Andrew Tridgell 2011
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import optparse
+import sys
+
+import ldb
+import samba.getopt as options
+from samba import colour
+from samba.auth import system_session
+from samba.dbchecker import dbcheck
+from samba.samdb import SamDB
+
+from . import Command, CommandError, Option
+
+
+class cmd_dbcheck(Command):
+ """Check local AD database for errors."""
+ synopsis = "%prog [<DN>] [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptionsDouble,
+ }
+
+ def process_yes(option, opt, value, parser):
+ assert value is None
+ rargs = parser.rargs
+ if rargs:
+ arg = rargs[0]
+ if ((arg[:2] == "--" and len(arg) > 2) or
+ (arg[:1] == "-" and len(arg) > 1 and arg[1] != "-")):
+ setattr(parser.values, "yes", True)
+ else:
+ setattr(parser.values, "yes_rules", arg.split())
+ del rargs[0]
+ else:
+ setattr(parser.values, "yes", True)
+
+ takes_args = ["DN?"]
+
+ takes_options = [
+ Option("--scope", dest="scope", default="SUB",
+ help="Pass search scope that builds DN list. Options: SUB, ONE, BASE"),
+ Option("--fix", dest="fix", default=False, action='store_true',
+ help='Fix any errors found'),
+ Option("--yes", action='callback', callback=process_yes,
+ help="don't confirm changes individually. Applies all as a single transaction (will not succeed if any errors are found)"),
+ Option("--cross-ncs", dest="cross_ncs", default=False, action='store_true',
+ help="cross naming context boundaries"),
+ Option("-v", "--verbose", dest="verbose", action="store_true", default=False,
+ help="Print more details of checking"),
+ Option("-q", "--quiet", action="store_true", default=False,
+ help="don't print details of checking"),
+ Option("--attrs", dest="attrs", default=None, help="list of attributes to check (space separated)"),
+ Option("--reindex", dest="reindex", default=False, action="store_true", help="force database re-index"),
+ Option("--force-modules", dest="force_modules", default=False, action="store_true", help="force loading of Samba modules and ignore the @MODULES record (for very old databases)"),
+ Option("--reset-well-known-acls",
+ dest="reset_well_known_acls",
+ default=False, action="store_true",
+ help=("reset ACLs on objects with well known default values"
+ " (for updating from early 4.0.x)")),
+ Option("--quick-membership-checks", dest="quick_membership_checks",
+ help=("Skips missing/orphaned memberOf backlinks checks, "
+ "but speeds up dbcheck dramatically for domains with "
+ "large groups"),
+ default=False, action="store_true"),
+ Option("-H", "--URL", help="LDB URL for database or target server (defaults to local SAM database)",
+ type=str, metavar="URL", dest="H"),
+ Option("--selftest-check-expired-tombstones",
+ dest="selftest_check_expired_tombstones", default=False, action="store_true",
+ help=optparse.SUPPRESS_HELP), # This is only used by tests
+ ]
+
+ def run(self, DN=None, H=None, verbose=False, fix=False, yes=False,
+ cross_ncs=False, quiet=False,
+ scope="SUB", credopts=None, sambaopts=None, versionopts=None,
+ attrs=None, reindex=False, force_modules=False,
+ quick_membership_checks=False,
+ reset_well_known_acls=False,
+ selftest_check_expired_tombstones=False,
+ yes_rules=None):
+
+ if yes_rules is None:
+ yes_rules = []
+
+ lp = sambaopts.get_loadparm()
+
+ over_ldap = H is not None and H.startswith('ldap')
+
+ if over_ldap:
+ creds = credopts.get_credentials(lp, fallback_machine=True)
+ else:
+ creds = None
+
+ if force_modules:
+ samdb = SamDB(session_info=system_session(), url=H,
+ credentials=creds, lp=lp, options=["modules=samba_dsdb"])
+ else:
+ try:
+ samdb = SamDB(session_info=system_session(), url=H,
+ credentials=creds, lp=lp)
+ except:
+ raise CommandError("Failed to connect to DB at %s. If this is a really old sam.ldb (before alpha9), then try again with --force-modules" % H)
+
+ if H is None or not over_ldap:
+ samdb_schema = samdb
+ else:
+ samdb_schema = SamDB(session_info=system_session(), url=None,
+ credentials=creds, lp=lp)
+
+ scope_map = {"SUB": ldb.SCOPE_SUBTREE, "BASE": ldb.SCOPE_BASE, "ONE": ldb.SCOPE_ONELEVEL}
+ scope = scope.upper()
+ if scope not in scope_map:
+ raise CommandError("Unknown scope %s" % scope)
+ search_scope = scope_map[scope]
+
+ controls = ['show_deleted:1']
+ if over_ldap:
+ controls.append('paged_results:1:1000')
+ if cross_ncs:
+ controls.append("search_options:1:2")
+
+ if not attrs:
+ attrs = ['*']
+ else:
+ attrs = attrs.split()
+
+ # The dbcheck module always prints to stdout, not our self.outf
+ # (yes, maybe FIXME).
+ stdout_colour = colour.colour_if_wanted(sys.stdout,
+ hint=self.requested_colour)
+
+ started_transaction = False
+ if yes and fix:
+ samdb.transaction_start()
+ started_transaction = True
+ try:
+ chk = dbcheck(samdb, samdb_schema=samdb_schema, verbose=verbose,
+ fix=fix, yes=yes, quiet=quiet,
+ in_transaction=started_transaction,
+ quick_membership_checks=quick_membership_checks,
+ reset_well_known_acls=reset_well_known_acls,
+ check_expired_tombstones=selftest_check_expired_tombstones,
+ colour=stdout_colour)
+
+ for option in yes_rules:
+ if hasattr(chk, option):
+ setattr(chk, option, 'ALL')
+ else:
+ raise CommandError("Invalid fix rule %s" % option)
+
+ if reindex:
+ self.outf.write("Re-indexing...\n")
+ error_count = 0
+ if chk.reindex_database():
+ self.outf.write("completed re-index OK\n")
+
+ elif force_modules:
+ self.outf.write("Resetting @MODULES...\n")
+ error_count = 0
+ if chk.reset_modules():
+ self.outf.write("completed @MODULES reset OK\n")
+
+ else:
+ error_count = chk.check_database(DN=DN, scope=search_scope,
+ controls=controls, attrs=attrs)
+ except:
+ if started_transaction:
+ samdb.transaction_cancel()
+ raise
+
+ if started_transaction:
+ samdb.transaction_commit()
+
+ if error_count != 0:
+ sys.exit(1)
diff --git a/python/samba/netcmd/delegation.py b/python/samba/netcmd/delegation.py
new file mode 100644
index 0000000..840be20
--- /dev/null
+++ b/python/samba/netcmd/delegation.py
@@ -0,0 +1,689 @@
+# delegation management
+#
+# Copyright Matthieu Patou mat@samba.org 2010
+# Copyright Stefan Metzmacher metze@samba.org 2011
+# Copyright Bjoern Baumbach bb@sernet.de 2011
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import samba.getopt as options
+import ldb
+from samba import provision
+from samba import dsdb
+from samba.samdb import SamDB
+from samba.auth import system_session
+from samba.dcerpc import security
+from samba.ndr import ndr_pack, ndr_unpack
+from samba.netcmd.common import _get_user_realm_domain
+from samba.netcmd import (
+ Command,
+ CommandError,
+ SuperCommand,
+ Option
+)
+
+
+class cmd_delegation_show(Command):
+ """Show the delegation setting of an account."""
+
+ synopsis = "%prog <accountname> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H"),
+ ]
+
+ takes_args = ["accountname"]
+
+ def show_security_descriptor(self, sam, security_descriptor):
+ dacl = security_descriptor.dacl
+ desc_type = security_descriptor.type
+
+ warning_info = ('Security Descriptor of attribute '
+ 'msDS-AllowedToActOnBehalfOfOtherIdentity')
+
+ if dacl is None or not desc_type & security.SEC_DESC_DACL_PRESENT:
+ self.errf.write(f'Warning: DACL not present in {warning_info}!\n')
+ return
+
+ if not desc_type & security.SEC_DESC_SELF_RELATIVE:
+ self.errf.write(f'Warning: DACL in {warning_info} lacks '
+ f'SELF_RELATIVE flag!\n')
+ return
+
+ first = True
+
+ for ace in dacl.aces:
+ trustee = ace.trustee
+
+ # Convert the trustee SID into a DN if we can.
+ try:
+ res = sam.search(f'<SID={trustee}>',
+ scope=ldb.SCOPE_BASE)
+ except ldb.LdbError as err:
+ num, _ = err.args
+ if num != ldb.ERR_NO_SUCH_OBJECT:
+ raise
+ else:
+ if len(res) == 1:
+ trustee = res[0].dn
+
+ ignore = False
+
+ if (ace.type == security.SEC_ACE_TYPE_ACCESS_DENIED
+ or ace.type == security.SEC_ACE_TYPE_ACCESS_DENIED_OBJECT):
+ self.errf.write(f'Warning: ACE in {warning_info} denies '
+ f'access for trustee {trustee}!\n')
+ # Ignore the ACE if it denies access
+ ignore = True
+ elif (ace.type != security.SEC_ACE_TYPE_ACCESS_ALLOWED
+ and ace.type != security.SEC_ACE_TYPE_ACCESS_ALLOWED_OBJECT):
+ # Ignore the ACE if it doesn't explicitly allow access
+ ignore = True
+
+ inherit_only = ace.flags & security.SEC_ACE_FLAG_INHERIT_ONLY
+ object_inherit = ace.flags & security.SEC_ACE_FLAG_OBJECT_INHERIT
+ container_inherit = (
+ ace.flags & security.SEC_ACE_FLAG_CONTAINER_INHERIT)
+ inherited_ace = ace.flags & security.SEC_ACE_FLAG_INHERITED_ACE
+
+ if inherit_only and not object_inherit and not container_inherit:
+ # Ignore the ACE if it is propagated only to child objects, but
+ # neither of the object and container inherit flags are set.
+ ignore = True
+ else:
+ if container_inherit:
+ self.errf.write(f'Warning: ACE for trustee {trustee} has '
+ f'unexpected CONTAINER_INHERIT flag set in '
+ f'{warning_info}!\n')
+ ignore = True
+
+ if inherited_ace:
+ self.errf.write(f'Warning: ACE for trustee {trustee} has '
+ f'unexpected INHERITED_ACE flag set in '
+ f'{warning_info}!\n')
+ ignore = True
+
+ if not ace.access_mask:
+ # Ignore the ACE if it doesn't grant any permissions.
+ ignore = True
+
+ if not ignore:
+ if first:
+ self.outf.write(' Principals that may delegate to this '
+ 'account:\n')
+ first = False
+
+ self.outf.write(f'msDS-AllowedToActOnBehalfOfOtherIdentity: '
+ f'{trustee}\n')
+
+
+ def run(self, accountname, H=None, credopts=None, sambaopts=None, versionopts=None):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+ paths = provision.provision_paths_from_lp(lp, lp.get("realm"))
+
+ if H is None:
+ path = paths.samdb
+ else:
+ path = H
+
+ sam = SamDB(path, session_info=system_session(),
+ credentials=creds, lp=lp)
+ # TODO once I understand how, use the domain info to naildown
+ # to the correct domain
+ (cleanedaccount, realm, domain) = _get_user_realm_domain(accountname,
+ sam)
+
+ res = sam.search(expression="sAMAccountName=%s" %
+ ldb.binary_encode(cleanedaccount),
+ scope=ldb.SCOPE_SUBTREE,
+ attrs=["userAccountControl", "msDS-AllowedToDelegateTo",
+ "msDS-AllowedToActOnBehalfOfOtherIdentity"])
+ if len(res) == 0:
+ raise CommandError("Unable to find account name '%s'" % accountname)
+ assert(len(res) == 1)
+
+ uac = int(res[0].get("userAccountControl")[0])
+ allowed = res[0].get("msDS-AllowedToDelegateTo")
+ allowed_from = res[0].get("msDS-AllowedToActOnBehalfOfOtherIdentity", idx=0)
+
+ self.outf.write("Account-DN: %s\n" % str(res[0].dn))
+ self.outf.write("UF_TRUSTED_FOR_DELEGATION: %s\n"
+ % bool(uac & dsdb.UF_TRUSTED_FOR_DELEGATION))
+ self.outf.write("UF_TRUSTED_TO_AUTHENTICATE_FOR_DELEGATION: %s\n" %
+ bool(uac & dsdb.UF_TRUSTED_TO_AUTHENTICATE_FOR_DELEGATION))
+
+ if allowed:
+ self.outf.write(" Services this account may delegate to:\n")
+ for a in allowed:
+ self.outf.write("msDS-AllowedToDelegateTo: %s\n" % a)
+ if allowed_from is not None:
+ try:
+ security_descriptor = ndr_unpack(security.descriptor, allowed_from)
+ except RuntimeError:
+ self.errf.write("Warning: Security Descriptor of attribute "
+ "msDS-AllowedToActOnBehalfOfOtherIdentity "
+ "could not be unmarshalled!\n")
+ else:
+ self.show_security_descriptor(sam, security_descriptor)
+
+
+class cmd_delegation_for_any_service(Command):
+ """Set/unset UF_TRUSTED_FOR_DELEGATION for an account."""
+
+ synopsis = "%prog <accountname> [(on|off)] [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H"),
+ ]
+
+ takes_args = ["accountname", "onoff"]
+
+ def run(self, accountname, onoff, H=None, credopts=None, sambaopts=None,
+ versionopts=None):
+
+ on = False
+ if onoff == "on":
+ on = True
+ elif onoff == "off":
+ on = False
+ else:
+ raise CommandError("invalid argument: '%s' (choose from 'on', 'off')" % onoff)
+
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+ paths = provision.provision_paths_from_lp(lp, lp.get("realm"))
+ if H is None:
+ path = paths.samdb
+ else:
+ path = H
+
+ sam = SamDB(path, session_info=system_session(),
+ credentials=creds, lp=lp)
+ # TODO once I understand how, use the domain info to naildown
+ # to the correct domain
+ (cleanedaccount, realm, domain) = _get_user_realm_domain(accountname,
+ sam)
+
+ search_filter = "sAMAccountName=%s" % ldb.binary_encode(cleanedaccount)
+ flag = dsdb.UF_TRUSTED_FOR_DELEGATION
+ try:
+ sam.toggle_userAccountFlags(search_filter, flag,
+ flags_str="Trusted-for-Delegation",
+ on=on, strict=True)
+ except Exception as err:
+ raise CommandError(err)
+
+
+class cmd_delegation_for_any_protocol(Command):
+ """Set/unset UF_TRUSTED_TO_AUTHENTICATE_FOR_DELEGATION (S4U2Proxy) for an account."""
+
+ synopsis = "%prog <accountname> [(on|off)] [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H"),
+ ]
+
+ takes_args = ["accountname", "onoff"]
+
+ def run(self, accountname, onoff, H=None, credopts=None, sambaopts=None,
+ versionopts=None):
+
+ on = False
+ if onoff == "on":
+ on = True
+ elif onoff == "off":
+ on = False
+ else:
+ raise CommandError("invalid argument: '%s' (choose from 'on', 'off')" % onoff)
+
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp, fallback_machine=True)
+ paths = provision.provision_paths_from_lp(lp, lp.get("realm"))
+ if H is None:
+ path = paths.samdb
+ else:
+ path = H
+
+ sam = SamDB(path, session_info=system_session(),
+ credentials=creds, lp=lp)
+ # TODO once I understand how, use the domain info to naildown
+ # to the correct domain
+ (cleanedaccount, realm, domain) = _get_user_realm_domain(accountname,
+ sam)
+
+ search_filter = "sAMAccountName=%s" % ldb.binary_encode(cleanedaccount)
+ flag = dsdb.UF_TRUSTED_TO_AUTHENTICATE_FOR_DELEGATION
+ try:
+ sam.toggle_userAccountFlags(search_filter, flag,
+ flags_str="Trusted-to-Authenticate-for-Delegation",
+ on=on, strict=True)
+ except Exception as err:
+ raise CommandError(err)
+
+
+class cmd_delegation_add_service(Command):
+ """Add a service principal to msDS-AllowedToDelegateTo so that an account may delegate to it."""
+
+ synopsis = "%prog <accountname> <principal> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H"),
+ ]
+
+ takes_args = ["accountname", "principal"]
+
+ def run(self, accountname, principal, H=None, credopts=None, sambaopts=None,
+ versionopts=None):
+
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+ paths = provision.provision_paths_from_lp(lp, lp.get("realm"))
+ if H is None:
+ path = paths.samdb
+ else:
+ path = H
+
+ sam = SamDB(path, session_info=system_session(),
+ credentials=creds, lp=lp)
+ # TODO once I understand how, use the domain info to naildown
+ # to the correct domain
+ (cleanedaccount, realm, domain) = _get_user_realm_domain(accountname,
+ sam)
+
+ res = sam.search(expression="sAMAccountName=%s" %
+ ldb.binary_encode(cleanedaccount),
+ scope=ldb.SCOPE_SUBTREE,
+ attrs=["msDS-AllowedToDelegateTo"])
+ if len(res) == 0:
+ raise CommandError("Unable to find account name '%s'" % accountname)
+ assert(len(res) == 1)
+
+ msg = ldb.Message()
+ msg.dn = res[0].dn
+ msg["msDS-AllowedToDelegateTo"] = ldb.MessageElement([principal],
+ ldb.FLAG_MOD_ADD,
+ "msDS-AllowedToDelegateTo")
+ try:
+ sam.modify(msg)
+ except Exception as err:
+ raise CommandError(err)
+
+
+class cmd_delegation_del_service(Command):
+ """Delete a service principal from msDS-AllowedToDelegateTo so that an account may no longer delegate to it."""
+
+ synopsis = "%prog <accountname> <principal> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H"),
+ ]
+
+ takes_args = ["accountname", "principal"]
+
+ def run(self, accountname, principal, H=None, credopts=None, sambaopts=None,
+ versionopts=None):
+
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+ paths = provision.provision_paths_from_lp(lp, lp.get("realm"))
+ if H is None:
+ path = paths.samdb
+ else:
+ path = H
+
+ sam = SamDB(path, session_info=system_session(),
+ credentials=creds, lp=lp)
+ # TODO once I understand how, use the domain info to naildown
+ # to the correct domain
+ (cleanedaccount, realm, domain) = _get_user_realm_domain(accountname,
+ sam)
+
+ res = sam.search(expression="sAMAccountName=%s" %
+ ldb.binary_encode(cleanedaccount),
+ scope=ldb.SCOPE_SUBTREE,
+ attrs=["msDS-AllowedToDelegateTo"])
+ if len(res) == 0:
+ raise CommandError("Unable to find account name '%s'" % accountname)
+ assert(len(res) == 1)
+
+ msg = ldb.Message()
+ msg.dn = res[0].dn
+ msg["msDS-AllowedToDelegateTo"] = ldb.MessageElement([principal],
+ ldb.FLAG_MOD_DELETE,
+ "msDS-AllowedToDelegateTo")
+ try:
+ sam.modify(msg)
+ except Exception as err:
+ raise CommandError(err)
+
+
+class cmd_delegation_add_principal(Command):
+ """Add a principal to msDS-AllowedToActOnBehalfOfOtherIdentity that may delegate to an account."""
+
+ synopsis = "%prog <accountname> <principal> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server",
+ type=str, metavar="URL", dest="H"),
+ ]
+
+ takes_args = ["accountname", "principal"]
+
+ def run(self, accountname, principal, H=None, credopts=None, sambaopts=None,
+ versionopts=None):
+
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+ paths = provision.provision_paths_from_lp(lp, lp.get("realm"))
+ if H is None:
+ path = paths.samdb
+ else:
+ path = H
+
+ sam = SamDB(path, session_info=system_session(),
+ credentials=creds, lp=lp)
+ # TODO once I understand how, use the domain info to naildown
+ # to the correct domain
+ cleanedaccount, _, _ = _get_user_realm_domain(accountname, sam)
+
+ account_res = sam.search(
+ expression="sAMAccountName=%s" %
+ ldb.binary_encode(cleanedaccount),
+ scope=ldb.SCOPE_SUBTREE,
+ attrs=["msDS-AllowedToActOnBehalfOfOtherIdentity"])
+ if len(account_res) == 0:
+ raise CommandError(f"Unable to find account name '{accountname}'")
+ assert(len(account_res) == 1)
+
+ data = account_res[0].get(
+ "msDS-AllowedToActOnBehalfOfOtherIdentity", idx=0)
+ if data is None:
+ # Create the security descriptor if it is not present.
+ owner_sid = security.dom_sid(security.SID_BUILTIN_ADMINISTRATORS)
+
+ security_desc = security.descriptor()
+ security_desc.revision = security.SD_REVISION
+ security_desc.type = (security.SEC_DESC_DACL_PRESENT |
+ security.SEC_DESC_SELF_RELATIVE)
+ security_desc.owner_sid = owner_sid
+
+ dacl = None
+ else:
+ try:
+ security_desc = ndr_unpack(security.descriptor, data)
+ except RuntimeError:
+ raise CommandError(f"Security Descriptor of attribute "
+ f"msDS-AllowedToActOnBehalfOfOtherIdentity "
+ f"for account '{accountname}' could not be "
+ f"unmarshalled!")
+
+ dacl = security_desc.dacl
+
+ if dacl is None:
+ # Create the DACL if it is not present.
+ dacl = security.acl()
+ dacl.revision = security.SECURITY_ACL_REVISION_ADS
+ dacl.num_aces = 0
+
+ # TODO once I understand how, use the domain info to naildown
+ # to the correct domain
+ cleanedprinc, _, _ = _get_user_realm_domain(principal, sam)
+
+ princ_res = sam.search(expression="sAMAccountName=%s" %
+ ldb.binary_encode(cleanedprinc),
+ scope=ldb.SCOPE_SUBTREE,
+ attrs=["objectSid"])
+ if len(princ_res) == 0:
+ raise CommandError(f"Unable to find principal name '{principal}'")
+ assert(len(princ_res) == 1)
+
+ princ_sid = security.dom_sid(
+ sam.schema_format_value(
+ "objectSID",
+ princ_res[0].get("objectSID", idx=0)).decode("utf-8"))
+
+ aces = dacl.aces
+
+ # Check that there is no existing ACE for this principal.
+ if any(ace.trustee == princ_sid for ace in aces):
+ raise CommandError(
+ f"ACE for principal '{principal}' already present in Security "
+ f"Descriptor of attribute "
+ f"msDS-AllowedToActOnBehalfOfOtherIdentity for account "
+ f"'{accountname}'.")
+
+ # Create the new ACE.
+ ace = security.ace()
+ ace.type = security.SEC_ACE_TYPE_ACCESS_ALLOWED
+ ace.flags = 0
+ ace.access_mask = security.SEC_ADS_GENERIC_ALL
+ ace.trustee = princ_sid
+
+ aces.append(ace)
+
+ dacl.aces = aces
+ dacl.num_aces += 1
+
+ security_desc.dacl = dacl
+
+ new_data = ndr_pack(security_desc)
+
+ # Set the new security descriptor. First, delete the original value to
+ # detect a race condition if someone else updates the attribute at the
+ # same time.
+ msg = ldb.Message()
+ msg.dn = account_res[0].dn
+ if data is not None:
+ msg["0"] = ldb.MessageElement(
+ data, ldb.FLAG_MOD_DELETE,
+ "msDS-AllowedToActOnBehalfOfOtherIdentity")
+ msg["1"] = ldb.MessageElement(
+ new_data, ldb.FLAG_MOD_ADD,
+ "msDS-AllowedToActOnBehalfOfOtherIdentity")
+ try:
+ sam.modify(msg)
+ except ldb.LdbError as err:
+ num, _ = err.args
+ if num == ldb.ERR_NO_SUCH_ATTRIBUTE:
+ raise CommandError(
+ f"Refused to update attribute "
+ f"msDS-AllowedToActOnBehalfOfOtherIdentity for account "
+ f"'{accountname}': a conflicting attribute update "
+ f"occurred simultaneously.")
+ else:
+ raise CommandError(err)
+
+
+class cmd_delegation_del_principal(Command):
+ """Delete a principal from msDS-AllowedToActOnBehalfOfOtherIdentity that may no longer delegate to an account."""
+
+ synopsis = "%prog <accountname> <principal> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server",
+ type=str, metavar="URL", dest="H"),
+ ]
+
+ takes_args = ["accountname", "principal"]
+
+ def run(self, accountname, principal, H=None, credopts=None, sambaopts=None,
+ versionopts=None):
+
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+ paths = provision.provision_paths_from_lp(lp, lp.get("realm"))
+ if H is None:
+ path = paths.samdb
+ else:
+ path = H
+
+ sam = SamDB(path, session_info=system_session(),
+ credentials=creds, lp=lp)
+ # TODO once I understand how, use the domain info to naildown
+ # to the correct domain
+ cleanedaccount, _, _ = _get_user_realm_domain(accountname, sam)
+
+ account_res = sam.search(
+ expression="sAMAccountName=%s" %
+ ldb.binary_encode(cleanedaccount),
+ scope=ldb.SCOPE_SUBTREE,
+ attrs=["msDS-AllowedToActOnBehalfOfOtherIdentity"])
+ if len(account_res) == 0:
+ raise CommandError("Unable to find account name '%s'" % accountname)
+ assert(len(account_res) == 1)
+
+ data = account_res[0].get(
+ "msDS-AllowedToActOnBehalfOfOtherIdentity", idx=0)
+ if data is None:
+ raise CommandError(f"Attribute "
+ f"msDS-AllowedToActOnBehalfOfOtherIdentity for "
+ f"account '{accountname}' not present!")
+
+ try:
+ security_desc = ndr_unpack(security.descriptor, data)
+ except RuntimeError:
+ raise CommandError(f"Security Descriptor of attribute "
+ f"msDS-AllowedToActOnBehalfOfOtherIdentity for "
+ f"account '{accountname}' could not be "
+ f"unmarshalled!")
+
+ dacl = security_desc.dacl
+ if dacl is None:
+ raise CommandError(f"DACL not present on Security Descriptor of "
+ f"attribute "
+ f"msDS-AllowedToActOnBehalfOfOtherIdentity for "
+ f"account '{accountname}'!")
+
+ # TODO once I understand how, use the domain info to naildown
+ # to the correct domain
+ cleanedprinc, _, _ = _get_user_realm_domain(principal, sam)
+
+ princ_res = sam.search(expression="sAMAccountName=%s" %
+ ldb.binary_encode(cleanedprinc),
+ scope=ldb.SCOPE_SUBTREE,
+ attrs=["objectSid"])
+ if len(princ_res) == 0:
+ raise CommandError(f"Unable to find principal name '{principal}'")
+ assert(len(princ_res) == 1)
+
+ princ_sid = security.dom_sid(
+ sam.schema_format_value(
+ "objectSID",
+ princ_res[0].get("objectSID", idx=0)).decode("utf-8"))
+
+ old_aces = dacl.aces
+
+ # Remove any ACEs relating to the specified principal.
+ aces = [ace for ace in old_aces if ace.trustee != princ_sid]
+
+ # Raise an error if we didn't find any.
+ if len(aces) == len(old_aces):
+ raise CommandError(f"Unable to find ACE for principal "
+ f"'{principal}' in Security Descriptor of "
+ f"attribute "
+ f"msDS-AllowedToActOnBehalfOfOtherIdentity for "
+ f"account '{accountname}'.")
+
+ dacl.num_aces = len(aces)
+ dacl.aces = aces
+
+ security_desc.dacl = dacl
+
+ new_data = ndr_pack(security_desc)
+
+ # Set the new security descriptor. First, delete the original value to
+ # detect a race condition if someone else updates the attribute at the
+ # same time.
+ msg = ldb.Message()
+ msg.dn = account_res[0].dn
+ msg["0"] = ldb.MessageElement(
+ data, ldb.FLAG_MOD_DELETE,
+ "msDS-AllowedToActOnBehalfOfOtherIdentity")
+ msg["1"] = ldb.MessageElement(
+ new_data, ldb.FLAG_MOD_ADD,
+ "msDS-AllowedToActOnBehalfOfOtherIdentity")
+ try:
+ sam.modify(msg)
+ except ldb.LdbError as err:
+ num, _ = err.args
+ if num == ldb.ERR_NO_SUCH_ATTRIBUTE:
+ raise CommandError(
+ f"Refused to update attribute "
+ f"msDS-AllowedToActOnBehalfOfOtherIdentity for account "
+ f"'{accountname}': a conflicting attribute update "
+ f"occurred simultaneously.")
+ else:
+ raise CommandError(err)
+
+
+class cmd_delegation(SuperCommand):
+ """Delegation management."""
+
+ subcommands = {}
+ subcommands["show"] = cmd_delegation_show()
+ subcommands["for-any-service"] = cmd_delegation_for_any_service()
+ subcommands["for-any-protocol"] = cmd_delegation_for_any_protocol()
+ subcommands["add-service"] = cmd_delegation_add_service()
+ subcommands["del-service"] = cmd_delegation_del_service()
+ subcommands["add-principal"] = cmd_delegation_add_principal()
+ subcommands["del-principal"] = cmd_delegation_del_principal()
diff --git a/python/samba/netcmd/dns.py b/python/samba/netcmd/dns.py
new file mode 100644
index 0000000..693fc9a
--- /dev/null
+++ b/python/samba/netcmd/dns.py
@@ -0,0 +1,1394 @@
+# DNS management tool
+#
+# Copyright (C) Amitay Isaacs 2011-2012
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+import samba.getopt as options
+from samba import WERRORError
+from samba import werror
+from struct import pack
+from socket import inet_ntop, inet_pton
+from socket import AF_INET
+from socket import AF_INET6
+import struct
+import time
+import ldb
+from samba.ndr import ndr_unpack, ndr_pack
+import re
+
+from samba import remove_dc, dsdb_dns
+from samba.samdb import SamDB
+from samba.auth import system_session
+
+from samba.netcmd import (
+ Command,
+ CommandError,
+ Option,
+ SuperCommand,
+)
+from samba.dcerpc import dnsp, dnsserver
+
+from samba.dnsserver import record_from_string, DNSParseError, flag_from_string
+from samba.dnsserver import dns_record_match
+
+
+def dns_connect(server, lp, creds):
+ if server.lower() == 'localhost':
+ server = '127.0.0.1'
+ binding_str = "ncacn_ip_tcp:%s[sign]" % server
+ try:
+ dns_conn = dnsserver.dnsserver(binding_str, lp, creds)
+ except RuntimeError as e:
+ raise CommandError('Connecting to DNS RPC server %s failed with %s' % (server, e))
+
+ return dns_conn
+
+
+class DnsConnWrapper:
+ """A wrapper around a dnsserver.dnsserver connection that makes it
+ harder not to report friendly messages.
+
+ If, rather than
+
+ dns_conn = dns_connect(server, lp, creds)
+
+ you use
+
+ dns_conn = DnsConnWrapper(server, lp, creds)
+
+ then various common errors (for example, misspelled zones) on
+ common operations will raise CommandErrors that turn into
+ relatively nice messages (when compared to tracebacks).
+
+ In addition, if you provide a messages keyword argument, it will
+ override the defaults. Note that providing None will turn off the
+ default, letting the original exception shine through.
+
+ messages = {
+ werror.WERR_DNS_ERROR_ZONE_DOES_NOT_EXIST: (
+ f'Zone {zone} does not exist and so could not be deleted.'),
+ werror.WERR_DNS_ERROR_NAME_DOES_NOT_EXIST: None
+ }
+ res = dns_conn.DnssrvOperation2( # ...
+ messages=messages)
+
+ This example changes the message for ZONE_DOES_NOT_EXIST and
+ avoids catching NAME_DOES_NOT_EXIST.
+
+ Only WERRORErrors are intercepted.
+ """
+
+ default_messages = {
+ werror.WERR_DNS_ERROR_DS_UNAVAILABLE: "Could not contact RPC server",
+ werror.WERR_DNS_ERROR_ZONE_ALREADY_EXISTS: 'Zone already exists',
+ werror.WERR_DNS_ERROR_RECORD_DOES_NOT_EXIST: 'The record does not exist',
+ werror.WERR_DNS_ERROR_NAME_DOES_NOT_EXIST: 'The zone does not exist',
+ werror.WERR_ACCESS_DENIED: 'Insufficient permissions',
+ }
+
+ def __init__(self, server, lp, creds):
+ self.dns_conn = dns_connect(server, lp, creds)
+
+ def __getattr__(self, name):
+ attr = getattr(self.dns_conn, name)
+ if name not in {
+ "DnssrvComplexOperation2",
+ "DnssrvEnumRecords2",
+ "DnssrvOperation2",
+ "DnssrvQuery2",
+ "DnssrvUpdateRecord2"}:
+ return attr
+
+ def f(*args, messages=None):
+ if messages is None:
+ messages = {}
+
+ try:
+ return attr(*args)
+ except WERRORError as e:
+ werr, errstr = e.args
+ if werr in messages:
+ if werr is None:
+ # None overrides a default message, leaving the bare exception
+ raise
+ raise CommandError(f"{messages[werr]} [{errstr}]", e)
+ if werr in self.default_messages:
+ raise CommandError(f"{self.default_messages[werr]} [{errstr}]", e)
+ raise
+
+ return f
+
+
+def bool_string(flag):
+ if flag == 0:
+ ret = 'FALSE'
+ elif flag == 1:
+ ret = 'TRUE'
+ else:
+ ret = 'UNKNOWN (0x%x)' % flag
+ return ret
+
+
+def enum_string(module, enum_defs, value):
+ ret = None
+ for e in enum_defs:
+ if value == getattr(module, e):
+ ret = e
+ break
+ if not ret:
+ ret = 'UNKNOWN (0x%x)' % value
+ return ret
+
+
+def bitmap_string(module, bitmap_defs, value):
+ ret = ''
+ for b in bitmap_defs:
+ if value & getattr(module, b):
+ ret += '%s ' % b
+ if not ret:
+ ret = 'NONE'
+ return ret
+
+
+def boot_method_string(boot_method):
+ enum_defs = ['DNS_BOOT_METHOD_UNINITIALIZED', 'DNS_BOOT_METHOD_FILE',
+ 'DNS_BOOT_METHOD_REGISTRY', 'DNS_BOOT_METHOD_DIRECTORY']
+ return enum_string(dnsserver, enum_defs, boot_method)
+
+
+def name_check_flag_string(check_flag):
+ enum_defs = ['DNS_ALLOW_RFC_NAMES_ONLY', 'DNS_ALLOW_NONRFC_NAMES',
+ 'DNS_ALLOW_MULTIBYTE_NAMES', 'DNS_ALLOW_ALL_NAMES']
+ return enum_string(dnsserver, enum_defs, check_flag)
+
+
+def zone_type_string(zone_type):
+ enum_defs = ['DNS_ZONE_TYPE_CACHE', 'DNS_ZONE_TYPE_PRIMARY',
+ 'DNS_ZONE_TYPE_SECONDARY', 'DNS_ZONE_TYPE_STUB',
+ 'DNS_ZONE_TYPE_FORWARDER', 'DNS_ZONE_TYPE_SECONDARY_CACHE']
+ return enum_string(dnsp, enum_defs, zone_type)
+
+
+def zone_update_string(zone_update):
+ enum_defs = ['DNS_ZONE_UPDATE_OFF', 'DNS_ZONE_UPDATE_UNSECURE',
+ 'DNS_ZONE_UPDATE_SECURE']
+ return enum_string(dnsp, enum_defs, zone_update)
+
+
+def zone_secondary_security_string(security):
+ enum_defs = ['DNS_ZONE_SECSECURE_NO_SECURITY', 'DNS_ZONE_SECSECURE_NS_ONLY',
+ 'DNS_ZONE_SECSECURE_LIST_ONLY', 'DNS_ZONE_SECSECURE_NO_XFER']
+ return enum_string(dnsserver, enum_defs, security)
+
+
+def zone_notify_level_string(notify_level):
+ enum_defs = ['DNS_ZONE_NOTIFY_OFF', 'DNS_ZONE_NOTIFY_ALL_SECONDARIES',
+ 'DNS_ZONE_NOTIFY_LIST_ONLY']
+ return enum_string(dnsserver, enum_defs, notify_level)
+
+
+def dp_flags_string(dp_flags):
+ bitmap_defs = ['DNS_DP_AUTOCREATED', 'DNS_DP_LEGACY', 'DNS_DP_DOMAIN_DEFAULT',
+ 'DNS_DP_FOREST_DEFAULT', 'DNS_DP_ENLISTED', 'DNS_DP_DELETED']
+ return bitmap_string(dnsserver, bitmap_defs, dp_flags)
+
+
+def zone_flags_string(flags):
+ bitmap_defs = ['DNS_RPC_ZONE_PAUSED', 'DNS_RPC_ZONE_SHUTDOWN',
+ 'DNS_RPC_ZONE_REVERSE', 'DNS_RPC_ZONE_AUTOCREATED',
+ 'DNS_RPC_ZONE_DSINTEGRATED', 'DNS_RPC_ZONE_AGING',
+ 'DNS_RPC_ZONE_UPDATE_UNSECURE', 'DNS_RPC_ZONE_UPDATE_SECURE',
+ 'DNS_RPC_ZONE_READONLY']
+ return bitmap_string(dnsserver, bitmap_defs, flags)
+
+
+def ip4_array_string(array):
+ ret = []
+ if not array:
+ return ret
+ for i in range(array.AddrCount):
+ addr = inet_ntop(AF_INET, pack('I', array.AddrArray[i]))
+ ret.append(addr)
+ return ret
+
+
+def dns_addr_array_string(array):
+ ret = []
+ if not array:
+ return ret
+ for i in range(array.AddrCount):
+ if array.AddrArray[i].MaxSa[0] == 0x02:
+ x = struct.pack('4B', *array.AddrArray[i].MaxSa[4:8])
+ addr = inet_ntop(AF_INET, x)
+ elif array.AddrArray[i].MaxSa[0] == 0x17:
+ x = struct.pack('16B', *array.AddrArray[i].MaxSa[8:24])
+ addr = inet_ntop(AF_INET6, x)
+ else:
+ addr = 'UNKNOWN'
+ ret.append(addr)
+ return ret
+
+
+def dns_type_flag(rec_type):
+ try:
+ return flag_from_string(rec_type)
+ except DNSParseError as e:
+ raise CommandError(*e.args)
+
+
+def dns_client_version(cli_version):
+ version = cli_version.upper()
+ if version == 'W2K':
+ client_version = dnsserver.DNS_CLIENT_VERSION_W2K
+ elif version == 'DOTNET':
+ client_version = dnsserver.DNS_CLIENT_VERSION_DOTNET
+ elif version == 'LONGHORN':
+ client_version = dnsserver.DNS_CLIENT_VERSION_LONGHORN
+ else:
+ raise CommandError('Unknown client version %s' % cli_version)
+ return client_version
+
+
+def print_serverinfo(outf, typeid, serverinfo):
+ outf.write(' dwVersion : 0x%x\n' % serverinfo.dwVersion)
+ outf.write(' fBootMethod : %s\n' % boot_method_string(serverinfo.fBootMethod))
+ outf.write(' fAdminConfigured : %s\n' % bool_string(serverinfo.fAdminConfigured))
+ outf.write(' fAllowUpdate : %s\n' % bool_string(serverinfo.fAllowUpdate))
+ outf.write(' fDsAvailable : %s\n' % bool_string(serverinfo.fDsAvailable))
+ outf.write(' pszServerName : %s\n' % serverinfo.pszServerName)
+ outf.write(' pszDsContainer : %s\n' % serverinfo.pszDsContainer)
+
+ if typeid != dnsserver.DNSSRV_TYPEID_SERVER_INFO:
+ outf.write(' aipServerAddrs : %s\n' %
+ ip4_array_string(serverinfo.aipServerAddrs))
+ outf.write(' aipListenAddrs : %s\n' %
+ ip4_array_string(serverinfo.aipListenAddrs))
+ outf.write(' aipForwarders : %s\n' %
+ ip4_array_string(serverinfo.aipForwarders))
+ else:
+ outf.write(' aipServerAddrs : %s\n' %
+ dns_addr_array_string(serverinfo.aipServerAddrs))
+ outf.write(' aipListenAddrs : %s\n' %
+ dns_addr_array_string(serverinfo.aipListenAddrs))
+ outf.write(' aipForwarders : %s\n' %
+ dns_addr_array_string(serverinfo.aipForwarders))
+
+ outf.write(' dwLogLevel : %d\n' % serverinfo.dwLogLevel)
+ outf.write(' dwDebugLevel : %d\n' % serverinfo.dwDebugLevel)
+ outf.write(' dwForwardTimeout : %d\n' % serverinfo.dwForwardTimeout)
+ outf.write(' dwRpcPrototol : 0x%x\n' % serverinfo.dwRpcProtocol)
+ outf.write(' dwNameCheckFlag : %s\n' % name_check_flag_string(serverinfo.dwNameCheckFlag))
+ outf.write(' cAddressAnswerLimit : %d\n' % serverinfo.cAddressAnswerLimit)
+ outf.write(' dwRecursionRetry : %d\n' % serverinfo.dwRecursionRetry)
+ outf.write(' dwRecursionTimeout : %d\n' % serverinfo.dwRecursionTimeout)
+ outf.write(' dwMaxCacheTtl : %d\n' % serverinfo.dwMaxCacheTtl)
+ outf.write(' dwDsPollingInterval : %d\n' % serverinfo.dwDsPollingInterval)
+ outf.write(' dwScavengingInterval : %d\n' % serverinfo.dwScavengingInterval)
+ outf.write(' dwDefaultRefreshInterval : %d\n' % serverinfo.dwDefaultRefreshInterval)
+ outf.write(' dwDefaultNoRefreshInterval : %d\n' % serverinfo.dwDefaultNoRefreshInterval)
+ outf.write(' fAutoReverseZones : %s\n' % bool_string(serverinfo.fAutoReverseZones))
+ outf.write(' fAutoCacheUpdate : %s\n' % bool_string(serverinfo.fAutoCacheUpdate))
+ outf.write(' fRecurseAfterForwarding : %s\n' % bool_string(serverinfo.fRecurseAfterForwarding))
+ outf.write(' fForwardDelegations : %s\n' % bool_string(serverinfo.fForwardDelegations))
+ outf.write(' fNoRecursion : %s\n' % bool_string(serverinfo.fNoRecursion))
+ outf.write(' fSecureResponses : %s\n' % bool_string(serverinfo.fSecureResponses))
+ outf.write(' fRoundRobin : %s\n' % bool_string(serverinfo.fRoundRobin))
+ outf.write(' fLocalNetPriority : %s\n' % bool_string(serverinfo.fLocalNetPriority))
+ outf.write(' fBindSecondaries : %s\n' % bool_string(serverinfo.fBindSecondaries))
+ outf.write(' fWriteAuthorityNs : %s\n' % bool_string(serverinfo.fWriteAuthorityNs))
+ outf.write(' fStrictFileParsing : %s\n' % bool_string(serverinfo.fStrictFileParsing))
+ outf.write(' fLooseWildcarding : %s\n' % bool_string(serverinfo.fLooseWildcarding))
+ outf.write(' fDefaultAgingState : %s\n' % bool_string(serverinfo.fDefaultAgingState))
+
+ if typeid != dnsserver.DNSSRV_TYPEID_SERVER_INFO_W2K:
+ outf.write(' dwRpcStructureVersion : 0x%x\n' % serverinfo.dwRpcStructureVersion)
+ outf.write(' aipLogFilter : %s\n' % dns_addr_array_string(serverinfo.aipLogFilter))
+ outf.write(' pwszLogFilePath : %s\n' % serverinfo.pwszLogFilePath)
+ outf.write(' pszDomainName : %s\n' % serverinfo.pszDomainName)
+ outf.write(' pszForestName : %s\n' % serverinfo.pszForestName)
+ outf.write(' pszDomainDirectoryPartition : %s\n' % serverinfo.pszDomainDirectoryPartition)
+ outf.write(' pszForestDirectoryPartition : %s\n' % serverinfo.pszForestDirectoryPartition)
+
+ outf.write(' dwLocalNetPriorityNetMask : 0x%x\n' % serverinfo.dwLocalNetPriorityNetMask)
+ outf.write(' dwLastScavengeTime : %d\n' % serverinfo.dwLastScavengeTime)
+ outf.write(' dwEventLogLevel : %d\n' % serverinfo.dwEventLogLevel)
+ outf.write(' dwLogFileMaxSize : %d\n' % serverinfo.dwLogFileMaxSize)
+ outf.write(' dwDsForestVersion : %d\n' % serverinfo.dwDsForestVersion)
+ outf.write(' dwDsDomainVersion : %d\n' % serverinfo.dwDsDomainVersion)
+ outf.write(' dwDsDsaVersion : %d\n' % serverinfo.dwDsDsaVersion)
+
+ if typeid == dnsserver.DNSSRV_TYPEID_SERVER_INFO:
+ outf.write(' fReadOnlyDC : %s\n' % bool_string(serverinfo.fReadOnlyDC))
+
+
+def print_zoneinfo(outf, typeid, zoneinfo):
+ outf.write(' pszZoneName : %s\n' % zoneinfo.pszZoneName)
+ outf.write(' dwZoneType : %s\n' % zone_type_string(zoneinfo.dwZoneType))
+ outf.write(' fReverse : %s\n' % bool_string(zoneinfo.fReverse))
+ outf.write(' fAllowUpdate : %s\n' % zone_update_string(zoneinfo.fAllowUpdate))
+ outf.write(' fPaused : %s\n' % bool_string(zoneinfo.fPaused))
+ outf.write(' fShutdown : %s\n' % bool_string(zoneinfo.fShutdown))
+ outf.write(' fAutoCreated : %s\n' % bool_string(zoneinfo.fAutoCreated))
+ outf.write(' fUseDatabase : %s\n' % bool_string(zoneinfo.fUseDatabase))
+ outf.write(' pszDataFile : %s\n' % zoneinfo.pszDataFile)
+ if typeid != dnsserver.DNSSRV_TYPEID_ZONE_INFO:
+ outf.write(' aipMasters : %s\n' %
+ ip4_array_string(zoneinfo.aipMasters))
+ else:
+ outf.write(' aipMasters : %s\n' %
+ dns_addr_array_string(zoneinfo.aipMasters))
+ outf.write(' fSecureSecondaries : %s\n' % zone_secondary_security_string(zoneinfo.fSecureSecondaries))
+ outf.write(' fNotifyLevel : %s\n' % zone_notify_level_string(zoneinfo.fNotifyLevel))
+ if typeid != dnsserver.DNSSRV_TYPEID_ZONE_INFO:
+ outf.write(' aipSecondaries : %s\n' %
+ ip4_array_string(zoneinfo.aipSecondaries))
+ outf.write(' aipNotify : %s\n' %
+ ip4_array_string(zoneinfo.aipNotify))
+ else:
+ outf.write(' aipSecondaries : %s\n' %
+ dns_addr_array_string(zoneinfo.aipSecondaries))
+ outf.write(' aipNotify : %s\n' %
+ dns_addr_array_string(zoneinfo.aipNotify))
+ outf.write(' fUseWins : %s\n' % bool_string(zoneinfo.fUseWins))
+ outf.write(' fUseNbstat : %s\n' % bool_string(zoneinfo.fUseNbstat))
+ outf.write(' fAging : %s\n' % bool_string(zoneinfo.fAging))
+ outf.write(' dwNoRefreshInterval : %d\n' % zoneinfo.dwNoRefreshInterval)
+ outf.write(' dwRefreshInterval : %d\n' % zoneinfo.dwRefreshInterval)
+ outf.write(' dwAvailForScavengeTime : %d\n' % zoneinfo.dwAvailForScavengeTime)
+ if typeid != dnsserver.DNSSRV_TYPEID_ZONE_INFO:
+ outf.write(' aipScavengeServers : %s\n' %
+ ip4_array_string(zoneinfo.aipScavengeServers))
+ else:
+ outf.write(' aipScavengeServers : %s\n' %
+ dns_addr_array_string(zoneinfo.aipScavengeServers))
+
+ if typeid != dnsserver.DNSSRV_TYPEID_ZONE_INFO_W2K:
+ outf.write(' dwRpcStructureVersion : 0x%x\n' % zoneinfo.dwRpcStructureVersion)
+ outf.write(' dwForwarderTimeout : %d\n' % zoneinfo.dwForwarderTimeout)
+ outf.write(' fForwarderSlave : %d\n' % zoneinfo.fForwarderSlave)
+ if typeid != dnsserver.DNSSRV_TYPEID_ZONE_INFO:
+ outf.write(' aipLocalMasters : %s\n' %
+ ip4_array_string(zoneinfo.aipLocalMasters))
+ else:
+ outf.write(' aipLocalMasters : %s\n' %
+ dns_addr_array_string(zoneinfo.aipLocalMasters))
+ outf.write(' dwDpFlags : %s\n' % dp_flags_string(zoneinfo.dwDpFlags))
+ outf.write(' pszDpFqdn : %s\n' % zoneinfo.pszDpFqdn)
+ outf.write(' pwszZoneDn : %s\n' % zoneinfo.pwszZoneDn)
+ outf.write(' dwLastSuccessfulSoaCheck : %d\n' % zoneinfo.dwLastSuccessfulSoaCheck)
+ outf.write(' dwLastSuccessfulXfr : %d\n' % zoneinfo.dwLastSuccessfulXfr)
+
+ if typeid == dnsserver.DNSSRV_TYPEID_ZONE_INFO:
+ outf.write(' fQueuedForBackgroundLoad : %s\n' % bool_string(zoneinfo.fQueuedForBackgroundLoad))
+ outf.write(' fBackgroundLoadInProgress : %s\n' % bool_string(zoneinfo.fBackgroundLoadInProgress))
+ outf.write(' fReadOnlyZone : %s\n' % bool_string(zoneinfo.fReadOnlyZone))
+ outf.write(' dwLastXfrAttempt : %d\n' % zoneinfo.dwLastXfrAttempt)
+ outf.write(' dwLastXfrResult : %d\n' % zoneinfo.dwLastXfrResult)
+
+
+def print_zone(outf, typeid, zone):
+ outf.write(' pszZoneName : %s\n' % zone.pszZoneName)
+ outf.write(' Flags : %s\n' % zone_flags_string(zone.Flags))
+ outf.write(' ZoneType : %s\n' % zone_type_string(zone.ZoneType))
+ outf.write(' Version : %s\n' % zone.Version)
+
+ if typeid != dnsserver.DNSSRV_TYPEID_ZONE_W2K:
+ outf.write(' dwDpFlags : %s\n' % dp_flags_string(zone.dwDpFlags))
+ outf.write(' pszDpFqdn : %s\n' % zone.pszDpFqdn)
+
+
+def print_enumzones(outf, typeid, zones):
+ outf.write(' %d zone(s) found\n' % zones.dwZoneCount)
+ for zone in zones.ZoneArray:
+ outf.write('\n')
+ print_zone(outf, typeid, zone)
+
+
+def print_dns_record(outf, rec):
+ if rec.wType == dnsp.DNS_TYPE_A:
+ mesg = 'A: %s' % (rec.data)
+ elif rec.wType == dnsp.DNS_TYPE_AAAA:
+ mesg = 'AAAA: %s' % (rec.data)
+ elif rec.wType == dnsp.DNS_TYPE_PTR:
+ mesg = 'PTR: %s' % (rec.data.str)
+ elif rec.wType == dnsp.DNS_TYPE_NS:
+ mesg = 'NS: %s' % (rec.data.str)
+ elif rec.wType == dnsp.DNS_TYPE_CNAME:
+ mesg = 'CNAME: %s' % (rec.data.str)
+ elif rec.wType == dnsp.DNS_TYPE_SOA:
+ mesg = 'SOA: serial=%d, refresh=%d, retry=%d, expire=%d, minttl=%d, ns=%s, email=%s' % (
+ rec.data.dwSerialNo,
+ rec.data.dwRefresh,
+ rec.data.dwRetry,
+ rec.data.dwExpire,
+ rec.data.dwMinimumTtl,
+ rec.data.NamePrimaryServer.str,
+ rec.data.ZoneAdministratorEmail.str)
+ elif rec.wType == dnsp.DNS_TYPE_MX:
+ mesg = 'MX: %s (%d)' % (rec.data.nameExchange.str, rec.data.wPreference)
+ elif rec.wType == dnsp.DNS_TYPE_SRV:
+ mesg = 'SRV: %s (%d, %d, %d)' % (rec.data.nameTarget.str, rec.data.wPort,
+ rec.data.wPriority, rec.data.wWeight)
+ elif rec.wType == dnsp.DNS_TYPE_TXT:
+ slist = ['"%s"' % name.str for name in rec.data.str]
+ mesg = 'TXT: %s' % ','.join(slist)
+ else:
+ mesg = 'Unknown: '
+ outf.write(' %s (flags=%x, serial=%d, ttl=%d)\n' % (
+ mesg, rec.dwFlags, rec.dwSerial, rec.dwTtlSeconds))
+
+
+def print_dnsrecords(outf, records):
+ for rec in records.rec:
+ outf.write(' Name=%s, Records=%d, Children=%d\n' % (
+ rec.dnsNodeName.str,
+ rec.wRecordCount,
+ rec.dwChildCount))
+ for dns_rec in rec.records:
+ print_dns_record(outf, dns_rec)
+
+
+# Convert data into a dns record
+def data_to_dns_record(record_type, data):
+ try:
+ rec = record_from_string(record_type, data)
+ except DNSParseError as e:
+ raise CommandError(*e.args) from None
+
+ return rec
+
+
+class cmd_serverinfo(Command):
+ """Query for Server information."""
+
+ synopsis = '%prog <server> [options]'
+
+ takes_args = ['server']
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option('--client-version', help='Client Version',
+ default='longhorn', metavar='w2k|dotnet|longhorn',
+ choices=['w2k', 'dotnet', 'longhorn'], dest='cli_ver'),
+ ]
+
+ def run(self, server, cli_ver, sambaopts=None, credopts=None,
+ versionopts=None):
+ self.lp = sambaopts.get_loadparm()
+ self.creds = credopts.get_credentials(self.lp)
+ dns_conn = DnsConnWrapper(server, self.lp, self.creds)
+
+ client_version = dns_client_version(cli_ver)
+
+ typeid, res = dns_conn.DnssrvQuery2(client_version, 0, server,
+ None, 'ServerInfo')
+ print_serverinfo(self.outf, typeid, res)
+
+
+def _add_integer_options(table, takes_options, integer_properties):
+ """Generate options for cmd_zoneoptions"""
+ for k, doc, _min, _max in table:
+ o = '--' + k.lower()
+ opt = Option(o,
+ help=f"{doc} [{_min}-{_max}]",
+ type="int",
+ dest=k)
+ takes_options.append(opt)
+ integer_properties.append((k, _min, _max, o))
+
+
+class cmd_zoneoptions(Command):
+ """Change zone aging options."""
+
+ synopsis = '%prog <server> <zone> [options]'
+
+ takes_args = ['server', 'zone']
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option('--client-version', help='Client Version',
+ default='longhorn', metavar='w2k|dotnet|longhorn',
+ choices=['w2k', 'dotnet', 'longhorn'], dest='cli_ver'),
+ Option('--mark-old-records-static', metavar="YYYY-MM-DD",
+ help="Make records older than this (YYYY-MM-DD) static"),
+ Option('--mark-records-static-regex', metavar="REGEXP",
+ help="Make records matching this regular expression static"),
+ Option('--mark-records-dynamic-regex', metavar="REGEXP",
+ help="Make records matching this regular expression dynamic"),
+ Option('-n', '--dry-run', action='store_true',
+ help="Don't change anything, say what would happen"),
+ ]
+
+ integer_properties = []
+ # Any zone parameter that is stored as an integer (which is most of
+ # them) can be added to this table. The name should be the dnsp
+ # mixed case name, which will get munged into a lowercase name for
+ # the option. (e.g. "Aging" becomes "--aging").
+ #
+ # Note: just because we add a name here doesn't mean we will use
+ # it.
+ _add_integer_options([
+ # ( name, help-string, min, max )
+ ('Aging', 'Enable record aging', 0, 1),
+ ('NoRefreshInterval',
+ 'Aging no refresh interval in hours (0: use default)',
+ 0, 10 * 365 * 24),
+ ('RefreshInterval',
+ 'Aging refresh interval in hours (0: use default)',
+ 0, 10 * 365 * 24),
+ ],
+ takes_options,
+ integer_properties)
+
+ def run(self, server, zone, cli_ver, sambaopts=None, credopts=None,
+ versionopts=None, dry_run=False,
+ mark_old_records_static=None,
+ mark_records_static_regex=None,
+ mark_records_dynamic_regex=None,
+ **kwargs):
+ self.lp = sambaopts.get_loadparm()
+ self.creds = credopts.get_credentials(self.lp)
+ dns_conn = DnsConnWrapper(server, self.lp, self.creds)
+
+ client_version = dns_client_version(cli_ver)
+ nap_type = dnsserver.DNSSRV_TYPEID_NAME_AND_PARAM
+
+ for k, _min, _max, o in self.integer_properties:
+ if kwargs.get(k) is None:
+ continue
+ v = kwargs[k]
+ if _min is not None and v < _min:
+ raise CommandError(f"{o} must be at least {_min}")
+ if _max is not None and v > _max:
+ raise CommandError(f"{o} can't exceed {_max}")
+
+ name_param = dnsserver.DNS_RPC_NAME_AND_PARAM()
+ name_param.dwParam = v
+ name_param.pszNodeName = k
+ if dry_run:
+ print(f"would set {k} to {v} for {zone}", file=self.outf)
+ continue
+ try:
+ dns_conn.DnssrvOperation2(client_version,
+ 0,
+ server,
+ zone,
+ 0,
+ 'ResetDwordProperty',
+ nap_type,
+ name_param)
+ except WERRORError as e:
+ raise CommandError(f"Could not set {k} to {v}") from None
+
+ print(f"Set {k} to {v}", file=self.outf)
+
+ # We don't want to allow more than one of these --mark-*
+ # options at a time, as they are sensitive to ordering and
+ # the order is not documented.
+ n_mark_options = 0
+ for x in (mark_old_records_static,
+ mark_records_static_regex,
+ mark_records_dynamic_regex):
+ if x is not None:
+ n_mark_options += 1
+
+ if n_mark_options > 1:
+ raise CommandError("Multiple --mark-* options will not work\n")
+
+ if mark_old_records_static is not None:
+ self.mark_old_records_static(server, zone,
+ mark_old_records_static,
+ dry_run)
+
+ if mark_records_static_regex is not None:
+ self.mark_records_static_regex(server,
+ zone,
+ mark_records_static_regex,
+ dry_run)
+
+ if mark_records_dynamic_regex is not None:
+ self.mark_records_dynamic_regex(server,
+ zone,
+ mark_records_dynamic_regex,
+ dry_run)
+
+
+ def _get_dns_nodes(self, server, zone_name):
+ samdb = SamDB(url="ldap://%s" % server,
+ session_info=system_session(),
+ credentials=self.creds, lp=self.lp)
+
+ zone_dn = (f"DC={zone_name},CN=MicrosoftDNS,DC=DomainDNSZones,"
+ f"{samdb.get_default_basedn()}")
+
+ nodes = samdb.search(base=zone_dn,
+ scope=ldb.SCOPE_SUBTREE,
+ expression=("(&(objectClass=dnsNode)"
+ "(!(dNSTombstoned=TRUE)))"),
+ attrs=["dnsRecord", "name"])
+ return samdb, nodes
+
+ def mark_old_records_static(self, server, zone_name, date_string, dry_run):
+ try:
+ ts = time.strptime(date_string, "%Y-%m-%d")
+ t = time.mktime(ts)
+ except ValueError as e:
+ raise CommandError(f"Invalid date {date_string}: should be YYY-MM-DD")
+ threshold = dsdb_dns.unix_to_dns_timestamp(int(t))
+
+ samdb, nodes = self._get_dns_nodes(server, zone_name)
+
+ for node in nodes:
+ if "dnsRecord" not in node:
+ continue
+
+ values = list(node["dnsRecord"])
+ changes = 0
+ for i, v in enumerate(values):
+ rec = ndr_unpack(dnsp.DnssrvRpcRecord, v)
+ if rec.dwTimeStamp < threshold and rec.dwTimeStamp != 0:
+ rec.dwTimeStamp = 0
+ values[i] = ndr_pack(rec)
+ changes += 1
+
+ if changes == 0:
+ continue
+
+ name = node["name"][0].decode()
+
+ if dry_run:
+ print(f"would make {changes}/{len(values)} records static "
+ f"on {name}.{zone_name}.", file=self.outf)
+ continue
+
+ msg = ldb.Message.from_dict(samdb,
+ {'dn': node.dn,
+ 'dnsRecord': values
+ },
+ ldb.FLAG_MOD_REPLACE)
+ samdb.modify(msg)
+ print(f"made {changes}/{len(values)} records static on "
+ f"{name}.{zone_name}.", file=self.outf)
+
+ def mark_records_static_regex(self, server, zone_name, regex, dry_run):
+ """Make the records of nodes with matching names static.
+ """
+ r = re.compile(regex)
+ samdb, nodes = self._get_dns_nodes(server, zone_name)
+
+ for node in nodes:
+ name = node["name"][0].decode()
+ if not r.search(name):
+ continue
+ if "dnsRecord" not in node:
+ continue
+
+ values = list(node["dnsRecord"])
+ if len(values) == 0:
+ continue
+
+ changes = 0
+ for i, v in enumerate(values):
+ rec = ndr_unpack(dnsp.DnssrvRpcRecord, v)
+ if rec.dwTimeStamp != 0:
+ rec.dwTimeStamp = 0
+ values[i] = ndr_pack(rec)
+ changes += 1
+
+ if changes == 0:
+ continue
+
+ if dry_run:
+ print(f"would make {changes}/{len(values)} records static "
+ f"on {name}.{zone_name}.", file=self.outf)
+ continue
+
+ msg = ldb.Message.from_dict(samdb,
+ {'dn': node.dn,
+ 'dnsRecord': values
+ },
+ ldb.FLAG_MOD_REPLACE)
+ samdb.modify(msg)
+ print(f"made {changes}/{len(values)} records static on "
+ f"{name}.{zone_name}.", file=self.outf)
+
+ def mark_records_dynamic_regex(self, server, zone_name, regex, dry_run):
+ """Make the records of nodes with matching names dynamic, with a
+ current timestamp. In this case we only adjust the A, AAAA,
+ and TXT records.
+ """
+ r = re.compile(regex)
+ samdb, nodes = self._get_dns_nodes(server, zone_name)
+ now = time.time()
+ dns_timestamp = dsdb_dns.unix_to_dns_timestamp(int(now))
+ safe_wtypes = {
+ dnsp.DNS_TYPE_A,
+ dnsp.DNS_TYPE_AAAA,
+ dnsp.DNS_TYPE_TXT
+ }
+
+ for node in nodes:
+ name = node["name"][0].decode()
+ if not r.search(name):
+ continue
+ if "dnsRecord" not in node:
+ continue
+
+ values = list(node["dnsRecord"])
+ if len(values) == 0:
+ continue
+
+ changes = 0
+ for i, v in enumerate(values):
+ rec = ndr_unpack(dnsp.DnssrvRpcRecord, v)
+ if rec.wType in safe_wtypes and rec.dwTimeStamp == 0:
+ rec.dwTimeStamp = dns_timestamp
+ values[i] = ndr_pack(rec)
+ changes += 1
+
+ if changes == 0:
+ continue
+
+ if dry_run:
+ print(f"would make {changes}/{len(values)} records dynamic "
+ f"on {name}.{zone_name}.", file=self.outf)
+ continue
+
+ msg = ldb.Message.from_dict(samdb,
+ {'dn': node.dn,
+ 'dnsRecord': values
+ },
+ ldb.FLAG_MOD_REPLACE)
+ samdb.modify(msg)
+ print(f"made {changes}/{len(values)} records dynamic on "
+ f"{name}.{zone_name}.", file=self.outf)
+
+
+class cmd_zoneinfo(Command):
+ """Query for zone information."""
+
+ synopsis = '%prog <server> <zone> [options]'
+
+ takes_args = ['server', 'zone']
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option('--client-version', help='Client Version',
+ default='longhorn', metavar='w2k|dotnet|longhorn',
+ choices=['w2k', 'dotnet', 'longhorn'], dest='cli_ver'),
+ ]
+
+ def run(self, server, zone, cli_ver, sambaopts=None, credopts=None,
+ versionopts=None):
+ self.lp = sambaopts.get_loadparm()
+ self.creds = credopts.get_credentials(self.lp)
+ dns_conn = DnsConnWrapper(server, self.lp, self.creds)
+
+ client_version = dns_client_version(cli_ver)
+
+ typeid, res = dns_conn.DnssrvQuery2(client_version, 0, server, zone,
+ 'ZoneInfo')
+ print_zoneinfo(self.outf, typeid, res)
+
+
+class cmd_zonelist(Command):
+ """Query for zones."""
+
+ synopsis = '%prog <server> [options]'
+
+ takes_args = ['server']
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option('--client-version', help='Client Version',
+ default='longhorn', metavar='w2k|dotnet|longhorn',
+ choices=['w2k', 'dotnet', 'longhorn'], dest='cli_ver'),
+ Option('--primary', help='List primary zones (default)',
+ action='store_true', dest='primary'),
+ Option('--secondary', help='List secondary zones',
+ action='store_true', dest='secondary'),
+ Option('--cache', help='List cached zones',
+ action='store_true', dest='cache'),
+ Option('--auto', help='List automatically created zones',
+ action='store_true', dest='auto'),
+ Option('--forward', help='List forward zones',
+ action='store_true', dest='forward'),
+ Option('--reverse', help='List reverse zones',
+ action='store_true', dest='reverse'),
+ Option('--ds', help='List directory integrated zones',
+ action='store_true', dest='ds'),
+ Option('--non-ds', help='List non-directory zones',
+ action='store_true', dest='nonds')
+ ]
+
+ def run(self, server, cli_ver, primary=False, secondary=False, cache=False,
+ auto=False, forward=False, reverse=False, ds=False, nonds=False,
+ sambaopts=None, credopts=None, versionopts=None):
+ request_filter = 0
+
+ if primary:
+ request_filter |= dnsserver.DNS_ZONE_REQUEST_PRIMARY
+ if secondary:
+ request_filter |= dnsserver.DNS_ZONE_REQUEST_SECONDARY
+ if cache:
+ request_filter |= dnsserver.DNS_ZONE_REQUEST_CACHE
+ if auto:
+ request_filter |= dnsserver.DNS_ZONE_REQUEST_AUTO
+ if forward:
+ request_filter |= dnsserver.DNS_ZONE_REQUEST_FORWARD
+ if reverse:
+ request_filter |= dnsserver.DNS_ZONE_REQUEST_REVERSE
+ if ds:
+ request_filter |= dnsserver.DNS_ZONE_REQUEST_DS
+ if nonds:
+ request_filter |= dnsserver.DNS_ZONE_REQUEST_NON_DS
+
+ if request_filter == 0:
+ request_filter = dnsserver.DNS_ZONE_REQUEST_PRIMARY
+
+ self.lp = sambaopts.get_loadparm()
+ self.creds = credopts.get_credentials(self.lp)
+ dns_conn = DnsConnWrapper(server, self.lp, self.creds)
+
+ client_version = dns_client_version(cli_ver)
+
+ typeid, res = dns_conn.DnssrvComplexOperation2(client_version,
+ 0, server, None,
+ 'EnumZones',
+ dnsserver.DNSSRV_TYPEID_DWORD,
+ request_filter)
+
+ if client_version == dnsserver.DNS_CLIENT_VERSION_W2K:
+ typeid = dnsserver.DNSSRV_TYPEID_ZONE_W2K
+ else:
+ typeid = dnsserver.DNSSRV_TYPEID_ZONE
+ print_enumzones(self.outf, typeid, res)
+
+
+class cmd_zonecreate(Command):
+ """Create a zone."""
+
+ synopsis = '%prog <server> <zone> [options]'
+
+ takes_args = ['server', 'zone']
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option('--client-version', help='Client Version',
+ default='longhorn', metavar='w2k|dotnet|longhorn',
+ choices=['w2k', 'dotnet', 'longhorn'], dest='cli_ver'),
+ Option('--dns-directory-partition',
+ help='Specify the naming context for the new zone, which '
+ 'affects the replication scope (domain or forest wide '
+ 'replication, default: domain).',
+ default='domain',
+ metavar='domain|forest',
+ choices=['domain', 'forest'],
+ dest='dns_dp'),
+ ]
+
+ def run(self,
+ server,
+ zone,
+ cli_ver,
+ dns_dp,
+ sambaopts=None,
+ credopts=None,
+ versionopts=None):
+ self.lp = sambaopts.get_loadparm()
+ self.creds = credopts.get_credentials(self.lp)
+ dns_conn = DnsConnWrapper(server, self.lp, self.creds)
+
+ zone = zone.lower()
+
+ dns_directorypartition = dnsserver.DNS_DP_DOMAIN_DEFAULT
+ if dns_dp == 'forest':
+ dns_directorypartition = dnsserver.DNS_DP_FOREST_DEFAULT
+
+ client_version = dns_client_version(cli_ver)
+ if client_version == dnsserver.DNS_CLIENT_VERSION_W2K:
+ typeid = dnsserver.DNSSRV_TYPEID_ZONE_CREATE_W2K
+ zone_create_info = dnsserver.DNS_RPC_ZONE_CREATE_INFO_W2K()
+ zone_create_info.pszZoneName = zone
+ zone_create_info.dwZoneType = dnsp.DNS_ZONE_TYPE_PRIMARY
+ zone_create_info.fAging = 0
+ zone_create_info.fDsIntegrated = 1
+ zone_create_info.fLoadExisting = 1
+ elif client_version == dnsserver.DNS_CLIENT_VERSION_DOTNET:
+ typeid = dnsserver.DNSSRV_TYPEID_ZONE_CREATE_DOTNET
+ zone_create_info = dnsserver.DNS_RPC_ZONE_CREATE_INFO_DOTNET()
+ zone_create_info.pszZoneName = zone
+ zone_create_info.dwZoneType = dnsp.DNS_ZONE_TYPE_PRIMARY
+ zone_create_info.fAging = 0
+ zone_create_info.fDsIntegrated = 1
+ zone_create_info.fLoadExisting = 1
+ zone_create_info.dwDpFlags = dns_directorypartition
+ else:
+ typeid = dnsserver.DNSSRV_TYPEID_ZONE_CREATE
+ zone_create_info = dnsserver.DNS_RPC_ZONE_CREATE_INFO_LONGHORN()
+ zone_create_info.pszZoneName = zone
+ zone_create_info.dwZoneType = dnsp.DNS_ZONE_TYPE_PRIMARY
+ zone_create_info.fAging = 0
+ zone_create_info.fDsIntegrated = 1
+ zone_create_info.fLoadExisting = 1
+ zone_create_info.dwDpFlags = dns_directorypartition
+
+ dns_conn.DnssrvOperation2(client_version, 0, server, None,
+ 0, 'ZoneCreate', typeid,
+ zone_create_info)
+
+ typeid = dnsserver.DNSSRV_TYPEID_NAME_AND_PARAM
+ name_and_param = dnsserver.DNS_RPC_NAME_AND_PARAM()
+ name_and_param.pszNodeName = 'AllowUpdate'
+ name_and_param.dwParam = dnsp.DNS_ZONE_UPDATE_SECURE
+
+ messages = {
+ werror.WERR_DNS_ERROR_ZONE_ALREADY_EXISTS: (
+ f'Zone "{zone}" already exists.')
+ }
+
+ dns_conn.DnssrvOperation2(client_version, 0, server, zone,
+ 0, 'ResetDwordProperty', typeid,
+ name_and_param, messages=messages)
+
+ self.outf.write('Zone %s created successfully\n' % zone)
+
+
+class cmd_zonedelete(Command):
+ """Delete a zone."""
+
+ synopsis = '%prog <server> <zone> [options]'
+
+ takes_args = ['server', 'zone']
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ def run(self, server, zone, sambaopts=None, credopts=None,
+ versionopts=None):
+
+ self.lp = sambaopts.get_loadparm()
+ self.creds = credopts.get_credentials(self.lp)
+ dns_conn = DnsConnWrapper(server, self.lp, self.creds)
+
+ zone = zone.lower()
+
+ messages = {
+ werror.WERR_DNS_ERROR_ZONE_DOES_NOT_EXIST: (
+ f'Zone {zone} does not exist and so could not be deleted.'),
+ }
+ res = dns_conn.DnssrvOperation2(dnsserver.DNS_CLIENT_VERSION_LONGHORN,
+ 0, server, zone, 0, 'DeleteZoneFromDs',
+ dnsserver.DNSSRV_TYPEID_NULL,
+ None, messages=messages)
+
+ self.outf.write('Zone %s deleted successfully\n' % zone)
+
+
+class cmd_query(Command):
+ """Query a name."""
+
+ synopsis = ('%prog <server> <zone> <name> '
+ '<A|AAAA|PTR|CNAME|MX|NS|SOA|SRV|TXT|ALL> [options]')
+
+ takes_args = ['server', 'zone', 'name', 'rtype']
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option('--authority', help='Search authoritative records (default)',
+ action='store_true', dest='authority'),
+ Option('--cache', help='Search cached records',
+ action='store_true', dest='cache'),
+ Option('--glue', help='Search glue records',
+ action='store_true', dest='glue'),
+ Option('--root', help='Search root hints',
+ action='store_true', dest='root'),
+ Option('--additional', help='List additional records',
+ action='store_true', dest='additional'),
+ Option('--no-children', help='Do not list children',
+ action='store_true', dest='no_children'),
+ Option('--only-children', help='List only children',
+ action='store_true', dest='only_children')
+ ]
+
+ def run(self, server, zone, name, rtype, authority=False, cache=False,
+ glue=False, root=False, additional=False, no_children=False,
+ only_children=False, sambaopts=None, credopts=None,
+ versionopts=None):
+ record_type = dns_type_flag(rtype)
+
+ if name.find('*') != -1:
+ self.outf.write('use "@" to dump entire domain, looking up %s\n' %
+ name)
+
+ select_flags = 0
+ if authority:
+ select_flags |= dnsserver.DNS_RPC_VIEW_AUTHORITY_DATA
+ if cache:
+ select_flags |= dnsserver.DNS_RPC_VIEW_CACHE_DATA
+ if glue:
+ select_flags |= dnsserver.DNS_RPC_VIEW_GLUE_DATA
+ if root:
+ select_flags |= dnsserver.DNS_RPC_VIEW_ROOT_HINT_DATA
+ if additional:
+ select_flags |= dnsserver.DNS_RPC_VIEW_ADDITIONAL_DATA
+ if no_children:
+ select_flags |= dnsserver.DNS_RPC_VIEW_NO_CHILDREN
+ if only_children:
+ select_flags |= dnsserver.DNS_RPC_VIEW_ONLY_CHILDREN
+
+ if select_flags == 0:
+ select_flags = dnsserver.DNS_RPC_VIEW_AUTHORITY_DATA
+
+ if select_flags == dnsserver.DNS_RPC_VIEW_ADDITIONAL_DATA:
+ self.outf.write('Specify either --authority or --root along with --additional.\n')
+ self.outf.write('Assuming --authority.\n')
+ select_flags |= dnsserver.DNS_RPC_VIEW_AUTHORITY_DATA
+
+ self.lp = sambaopts.get_loadparm()
+ self.creds = credopts.get_credentials(self.lp)
+ dns_conn = DnsConnWrapper(server, self.lp, self.creds)
+
+ messages = {
+ werror.WERR_DNS_ERROR_NAME_DOES_NOT_EXIST: (
+ 'Record or zone does not exist.')
+ }
+ buflen, res = dns_conn.DnssrvEnumRecords2(
+ dnsserver.DNS_CLIENT_VERSION_LONGHORN, 0, server, zone, name,
+ None, record_type, select_flags, None, None,
+ messages=messages)
+
+ print_dnsrecords(self.outf, res)
+
+
+class cmd_roothints(Command):
+ """Query root hints."""
+
+ synopsis = '%prog <server> [<name>] [options]'
+
+ takes_args = ['server', 'name?']
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ def run(self, server, name='.', sambaopts=None, credopts=None,
+ versionopts=None):
+ record_type = dnsp.DNS_TYPE_NS
+ select_flags = (dnsserver.DNS_RPC_VIEW_ROOT_HINT_DATA |
+ dnsserver.DNS_RPC_VIEW_ADDITIONAL_DATA)
+
+ self.lp = sambaopts.get_loadparm()
+ self.creds = credopts.get_credentials(self.lp)
+ dns_conn = DnsConnWrapper(server, self.lp, self.creds)
+
+ buflen, res = dns_conn.DnssrvEnumRecords2(
+ dnsserver.DNS_CLIENT_VERSION_LONGHORN, 0, server, '..RootHints',
+ name, None, record_type, select_flags, None, None)
+ print_dnsrecords(self.outf, res)
+
+
+class cmd_add_record(Command):
+ """Add a DNS record
+
+ For each type data contents are as follows:
+ A ipv4_address_string
+ AAAA ipv6_address_string
+ PTR fqdn_string
+ CNAME fqdn_string
+ NS fqdn_string
+ MX "fqdn_string preference"
+ SRV "fqdn_string port priority weight"
+ TXT "'string1' 'string2' ..."
+ """
+
+ synopsis = '%prog <server> <zone> <name> <A|AAAA|PTR|CNAME|NS|MX|SRV|TXT> <data>'
+
+ takes_args = ['server', 'zone', 'name', 'rtype', 'data']
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ def run(self, server, zone, name, rtype, data, sambaopts=None,
+ credopts=None, versionopts=None):
+
+ if rtype.upper() not in ('A', 'AAAA', 'PTR', 'CNAME', 'NS', 'MX', 'SRV', 'TXT'):
+ raise CommandError('Adding record of type %s is not supported' % rtype)
+
+ record_type = dns_type_flag(rtype)
+ rec = data_to_dns_record(record_type, data)
+
+ self.lp = sambaopts.get_loadparm()
+ self.creds = credopts.get_credentials(self.lp)
+ dns_conn = DnsConnWrapper(server, self.lp, self.creds)
+
+ add_rec_buf = dnsserver.DNS_RPC_RECORD_BUF()
+ add_rec_buf.rec = rec
+
+ messages = {
+ werror.WERR_DNS_ERROR_NAME_DOES_NOT_EXIST: (
+ 'Zone does not exist; record could not be added. '
+ f'zone[{zone}] name[{name}'),
+ werror.WERR_DNS_ERROR_RECORD_ALREADY_EXISTS: (
+ 'Record already exists; record could not be added. '
+ f'zone[{zone}] name[{name}]')
+ }
+ dns_conn.DnssrvUpdateRecord2(dnsserver.DNS_CLIENT_VERSION_LONGHORN,
+ 0, server, zone, name, add_rec_buf, None,
+ messages=messages)
+
+ self.outf.write('Record added successfully\n')
+
+
+class cmd_update_record(Command):
+ """Update a DNS record
+
+ For each type data contents are as follows:
+ A ipv4_address_string
+ AAAA ipv6_address_string
+ PTR fqdn_string
+ CNAME fqdn_string
+ NS fqdn_string
+ MX "fqdn_string preference"
+ SOA "fqdn_dns fqdn_email serial refresh retry expire minimumttl"
+ SRV "fqdn_string port priority weight"
+ TXT "'string1' 'string2' ..."
+ """
+
+ synopsis = '%prog <server> <zone> <name> <A|AAAA|PTR|CNAME|NS|MX|SOA|SRV|TXT> <olddata> <newdata>'
+
+ takes_args = ['server', 'zone', 'name', 'rtype', 'olddata', 'newdata']
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ def run(self, server, zone, name, rtype, olddata, newdata,
+ sambaopts=None, credopts=None, versionopts=None):
+
+ rtype = rtype.upper()
+ if rtype not in ('A', 'AAAA', 'PTR', 'CNAME', 'NS', 'MX', 'SOA', 'SRV', 'TXT'):
+ raise CommandError('Updating record of type %s is not supported' % rtype)
+
+ try:
+ if rtype == 'A':
+ inet_pton(AF_INET, newdata)
+ elif rtype == 'AAAA':
+ inet_pton(AF_INET6, newdata)
+ except OSError as e:
+ raise CommandError(f"bad data for {rtype}: {e!r}")
+
+ record_type = dns_type_flag(rtype)
+ rec = data_to_dns_record(record_type, newdata)
+
+ self.lp = sambaopts.get_loadparm()
+ self.creds = credopts.get_credentials(self.lp)
+ dns_conn = DnsConnWrapper(server, self.lp, self.creds)
+
+ try:
+ rec_match = dns_record_match(dns_conn.dns_conn, server, zone,
+ name, record_type, olddata)
+ except DNSParseError as e:
+ raise CommandError(*e.args) from None
+
+ if not rec_match:
+ raise CommandError('Record or zone does not exist.')
+
+ # Copy properties from existing record to new record
+ rec.dwFlags = rec_match.dwFlags
+ rec.dwSerial = rec_match.dwSerial
+ rec.dwTtlSeconds = rec_match.dwTtlSeconds
+ rec.dwTimeStamp = rec_match.dwTimeStamp
+
+ add_rec_buf = dnsserver.DNS_RPC_RECORD_BUF()
+ add_rec_buf.rec = rec
+
+ del_rec_buf = dnsserver.DNS_RPC_RECORD_BUF()
+ del_rec_buf.rec = rec_match
+
+ messages = {
+ werror.WERR_DNS_ERROR_NAME_DOES_NOT_EXIST: (
+ f'Zone {zone} does not exist; record could not be updated.'),
+ }
+
+ dns_conn.DnssrvUpdateRecord2(dnsserver.DNS_CLIENT_VERSION_LONGHORN,
+ 0,
+ server,
+ zone,
+ name,
+ add_rec_buf,
+ del_rec_buf,
+ messages=messages)
+
+ self.outf.write('Record updated successfully\n')
+
+
+class cmd_delete_record(Command):
+ """Delete a DNS record
+
+ For each type data contents are as follows:
+ A ipv4_address_string
+ AAAA ipv6_address_string
+ PTR fqdn_string
+ CNAME fqdn_string
+ NS fqdn_string
+ MX "fqdn_string preference"
+ SRV "fqdn_string port priority weight"
+ TXT "'string1' 'string2' ..."
+ """
+
+ synopsis = '%prog <server> <zone> <name> <A|AAAA|PTR|CNAME|NS|MX|SRV|TXT> <data>'
+
+ takes_args = ['server', 'zone', 'name', 'rtype', 'data']
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ def run(self, server, zone, name, rtype, data, sambaopts=None, credopts=None, versionopts=None):
+
+ if rtype.upper() not in ('A', 'AAAA', 'PTR', 'CNAME', 'NS', 'MX', 'SRV', 'TXT'):
+ raise CommandError('Deleting record of type %s is not supported' % rtype)
+
+ record_type = dns_type_flag(rtype)
+ rec = data_to_dns_record(record_type, data)
+
+ self.lp = sambaopts.get_loadparm()
+ self.creds = credopts.get_credentials(self.lp)
+ dns_conn = DnsConnWrapper(server, self.lp, self.creds)
+
+ del_rec_buf = dnsserver.DNS_RPC_RECORD_BUF()
+ del_rec_buf.rec = rec
+
+ messages = {
+ werror.WERR_DNS_ERROR_NAME_DOES_NOT_EXIST: (
+ 'Zone does not exist; record could not be deleted. '
+ f'zone[{zone}] name[{name}'),
+ werror.WERR_DNS_ERROR_RECORD_ALREADY_EXISTS: (
+ 'Record already exists; record could not be deleted. '
+ f'zone[{zone}] name[{name}]')
+ }
+ dns_conn.DnssrvUpdateRecord2(dnsserver.DNS_CLIENT_VERSION_LONGHORN,
+ 0,
+ server,
+ zone,
+ name,
+ None,
+ del_rec_buf,
+ messages=messages)
+
+ self.outf.write('Record deleted successfully\n')
+
+
+class cmd_cleanup_record(Command):
+ """Cleanup DNS records for a DNS host.
+
+ example:
+
+ samba-tool dns cleanup dc1 dc1.samdom.test.site -U USER%PASSWORD
+
+ NOTE: This command in many cases will only mark the `dNSTombstoned` attr
+ as `TRUE` on the DNS records. Querying will no longer return results but
+ there may still be some placeholder entries in the database.
+ """
+
+ synopsis = '%prog <server> <dnshostname>'
+
+ takes_args = ['server', 'dnshostname']
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("-v", "--verbose", help="Be verbose", action="store_true"),
+ Option("-q", "--quiet", help="Be quiet", action="store_true"),
+ ]
+
+ def run(self, server, dnshostname, sambaopts=None, credopts=None,
+ versionopts=None, verbose=False, quiet=False):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+
+ logger = self.get_logger(verbose=verbose, quiet=quiet)
+
+ samdb = SamDB(url="ldap://%s" % server,
+ session_info=system_session(),
+ credentials=creds, lp=lp)
+
+ remove_dc.remove_dns_references(samdb, logger, dnshostname,
+ ignore_no_name=True)
+
+
+class cmd_dns(SuperCommand):
+ """Domain Name Service (DNS) management."""
+
+ subcommands = {}
+ subcommands['serverinfo'] = cmd_serverinfo()
+ subcommands['zoneoptions'] = cmd_zoneoptions()
+ subcommands['zoneinfo'] = cmd_zoneinfo()
+ subcommands['zonelist'] = cmd_zonelist()
+ subcommands['zonecreate'] = cmd_zonecreate()
+ subcommands['zonedelete'] = cmd_zonedelete()
+ subcommands['query'] = cmd_query()
+ subcommands['roothints'] = cmd_roothints()
+ subcommands['add'] = cmd_add_record()
+ subcommands['update'] = cmd_update_record()
+ subcommands['delete'] = cmd_delete_record()
+ subcommands['cleanup'] = cmd_cleanup_record()
diff --git a/python/samba/netcmd/domain/__init__.py b/python/samba/netcmd/domain/__init__.py
new file mode 100644
index 0000000..1c527f1
--- /dev/null
+++ b/python/samba/netcmd/domain/__init__.py
@@ -0,0 +1,73 @@
+# domain management
+#
+# Copyright Matthias Dieter Wallnoefer 2009
+# Copyright Andrew Kroeger 2009
+# Copyright Jelmer Vernooij 2007-2012
+# Copyright Giampaolo Lauria 2011
+# Copyright Matthieu Patou <mat@matws.net> 2011
+# Copyright Andrew Bartlett 2008-2015
+# Copyright Stefan Metzmacher 2012
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from samba import is_ad_dc_built
+from samba.netcmd import SuperCommand
+
+from .auth import cmd_domain_auth
+from .backup import cmd_domain_backup
+from .claim import cmd_domain_claim
+from .classicupgrade import cmd_domain_classicupgrade
+from .common import (common_join_options, common_ntvfs_options,
+ common_provision_join_options)
+from .dcpromo import cmd_domain_dcpromo
+from .demote import cmd_domain_demote
+from .functional_prep import cmd_domain_functional_prep
+from .info import cmd_domain_info
+from .join import cmd_domain_join
+from .keytab import cmd_domain_export_keytab
+from .leave import cmd_domain_leave
+from .level import cmd_domain_level
+from .passwordsettings import cmd_domain_passwordsettings
+from .provision import cmd_domain_provision
+from .samba3upgrade import cmd_domain_samba3upgrade
+from .schemaupgrade import cmd_domain_schema_upgrade
+from .tombstones import cmd_domain_tombstones
+from .trust import cmd_domain_trust
+
+
+class cmd_domain(SuperCommand):
+ """Domain management."""
+
+ subcommands = {}
+ if cmd_domain_export_keytab is not None:
+ subcommands["exportkeytab"] = cmd_domain_export_keytab()
+ subcommands["info"] = cmd_domain_info()
+ subcommands["join"] = cmd_domain_join()
+ subcommands["leave"] = cmd_domain_leave()
+ subcommands["claim"] = cmd_domain_claim()
+ subcommands["auth"] = cmd_domain_auth()
+ if is_ad_dc_built():
+ subcommands["demote"] = cmd_domain_demote()
+ subcommands["provision"] = cmd_domain_provision()
+ subcommands["dcpromo"] = cmd_domain_dcpromo()
+ subcommands["level"] = cmd_domain_level()
+ subcommands["passwordsettings"] = cmd_domain_passwordsettings()
+ subcommands["classicupgrade"] = cmd_domain_classicupgrade()
+ subcommands["samba3upgrade"] = cmd_domain_samba3upgrade()
+ subcommands["trust"] = cmd_domain_trust()
+ subcommands["tombstones"] = cmd_domain_tombstones()
+ subcommands["schemaupgrade"] = cmd_domain_schema_upgrade()
+ subcommands["functionalprep"] = cmd_domain_functional_prep()
+ subcommands["backup"] = cmd_domain_backup()
diff --git a/python/samba/netcmd/domain/auth/__init__.py b/python/samba/netcmd/domain/auth/__init__.py
new file mode 100644
index 0000000..fd74f3e
--- /dev/null
+++ b/python/samba/netcmd/domain/auth/__init__.py
@@ -0,0 +1,35 @@
+# Unix SMB/CIFS implementation.
+#
+# authentication silos
+#
+# Copyright (C) Catalyst.Net Ltd. 2023
+#
+# Written by Rob van der Linde <rob@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from samba.netcmd import SuperCommand
+
+from .policy import cmd_domain_auth_policy
+from .silo import cmd_domain_auth_silo
+
+
+class cmd_domain_auth(SuperCommand):
+ """Manage authentication silos and policies on the domain."""
+
+ subcommands = {
+ "policy": cmd_domain_auth_policy(),
+ "silo": cmd_domain_auth_silo(),
+ }
diff --git a/python/samba/netcmd/domain/auth/policy.py b/python/samba/netcmd/domain/auth/policy.py
new file mode 100644
index 0000000..de9ce4b
--- /dev/null
+++ b/python/samba/netcmd/domain/auth/policy.py
@@ -0,0 +1,685 @@
+# Unix SMB/CIFS implementation.
+#
+# authentication silos - authentication policy management
+#
+# Copyright (C) Catalyst.Net Ltd. 2023
+#
+# Written by Rob van der Linde <rob@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import samba.getopt as options
+from samba.netcmd import Command, CommandError, Option, SuperCommand
+from samba.netcmd.domain.models import AuthenticationPolicy,\
+ AuthenticationSilo, Group
+from samba.netcmd.domain.models.auth_policy import MIN_TGT_LIFETIME,\
+ MAX_TGT_LIFETIME, StrongNTLMPolicy
+from samba.netcmd.domain.models.exceptions import ModelError
+from samba.netcmd.validators import Range
+
+
+def check_similar_args(option, args):
+ """Helper method for checking similar mutually exclusive args.
+
+ Example: --user-allowed-to-authenticate-from and
+ --user-allowed-to-authenticate-from-device-silo
+ """
+ num = sum(arg is not None for arg in args)
+ if num > 1:
+ raise CommandError(f"{option} argument repeated {num} times.")
+
+
+class UserOptions(options.OptionGroup):
+ """User options used by policy create and policy modify commands."""
+
+ def __init__(self, parser):
+ super().__init__(parser, "User Options")
+
+ self.add_option("--user-tgt-lifetime-mins",
+ help="Ticket-Granting-Ticket lifetime for user accounts.",
+ dest="tgt_lifetime", type=int, action="callback",
+ callback=self.set_option,
+ validators=[Range(min=MIN_TGT_LIFETIME, max=MAX_TGT_LIFETIME)])
+ self.add_option("--user-allow-ntlm-auth",
+ help="Allow NTLM network authentication despite the fact that the user "
+ "is restricted to selected devices.",
+ dest="allow_ntlm_auth", default=False,
+ action="callback", callback=self.set_option)
+ self.add_option("--user-allowed-to-authenticate-from",
+ help="SDDL Rules setting which device the user is allowed to authenticate from.",
+ type=str, dest="allowed_to_authenticate_from",
+ action="callback", callback=self.set_option,
+ metavar="SDDL")
+ self.add_option("--user-allowed-to-authenticate-from-device-silo",
+ help="To authenticate, the user must log in from a device in SILO.",
+ type=str, dest="allowed_to_authenticate_from_device_silo",
+ action="callback", callback=self.set_option,
+ metavar="SILO")
+ self.add_option("--user-allowed-to-authenticate-from-device-group",
+ help="To authenticate, the user must log in from a device in GROUP.",
+ type=str, dest="allowed_to_authenticate_from_device_group",
+ action="callback", callback=self.set_option,
+ metavar="GROUP")
+ self.add_option("--user-allowed-to-authenticate-to",
+ help="A target service, on a user account, requires the connecting user to match SDDL",
+ type=str, dest="allowed_to_authenticate_to",
+ action="callback", callback=self.set_option,
+ metavar="SDDL")
+ self.add_option("--user-allowed-to-authenticate-to-by-group",
+ help="A target service, on a user account, requires the connecting user to be in GROUP",
+ type=str, dest="allowed_to_authenticate_to_by_group",
+ action="callback", callback=self.set_option,
+ metavar="GROUP")
+ self.add_option("--user-allowed-to-authenticate-to-by-silo",
+ help="A target service, on a user account, requires the connecting user to be in SILO",
+ type=str, dest="allowed_to_authenticate_to_by_silo",
+ action="callback", callback=self.set_option,
+ metavar="SILO")
+
+
+class ServiceOptions(options.OptionGroup):
+ """Service options used by policy create and policy modify commands."""
+
+ def __init__(self, parser):
+ super().__init__(parser, "Service Options")
+
+ self.add_option("--service-tgt-lifetime-mins",
+ help="Ticket-Granting-Ticket lifetime for service accounts.",
+ dest="tgt_lifetime", type=int, action="callback",
+ callback=self.set_option,
+ validators=[Range(min=MIN_TGT_LIFETIME, max=MAX_TGT_LIFETIME)])
+ self.add_option("--service-allow-ntlm-auth",
+ help="Allow NTLM network authentication despite "
+ "the fact that the service account "
+ "is restricted to selected devices.",
+ dest="allow_ntlm_auth", default=False,
+ action="callback", callback=self.set_option)
+ self.add_option("--service-allowed-to-authenticate-from",
+ help="SDDL Rules setting which device the "
+ "service account is allowed to authenticate from.",
+ type=str, dest="allowed_to_authenticate_from",
+ action="callback", callback=self.set_option,
+ metavar="SDDL")
+ self.add_option("--service-allowed-to-authenticate-from-device-silo",
+ help="To authenticate, the service must authenticate on a device in SILO.",
+ type=str, dest="allowed_to_authenticate_from_device_silo",
+ action="callback", callback=self.set_option,
+ metavar="SILO")
+ self.add_option("--service-allowed-to-authenticate-from-device-group",
+ help="To authenticate, the service must authenticate on a device in GROUP.",
+ type=str, dest="allowed_to_authenticate_from_device_group",
+ action="callback", callback=self.set_option,
+ metavar="GROUP")
+ self.add_option("--service-allowed-to-authenticate-to",
+ help="The target service requires the connecting user to match SDDL",
+ type=str, dest="allowed_to_authenticate_to",
+ action="callback", callback=self.set_option,
+ metavar="SDDL")
+ self.add_option("--service-allowed-to-authenticate-to-by-group",
+ help="The target service requires the connecting user to be in GROUP",
+ type=str, dest="allowed_to_authenticate_to_by_group",
+ action="callback", callback=self.set_option,
+ metavar="GROUP")
+ self.add_option("--service-allowed-to-authenticate-to-by-silo",
+ help="The target service requires the connecting user to be in SILO",
+ type=str, dest="allowed_to_authenticate_to_by_silo",
+ action="callback", callback=self.set_option,
+ metavar="SILO")
+
+
+class ComputerOptions(options.OptionGroup):
+ """Computer options used by policy create and policy modify commands."""
+
+ def __init__(self, parser):
+ super().__init__(parser, "Computer Options")
+
+ self.add_option("--computer-tgt-lifetime-mins",
+ help="Ticket-Granting-Ticket lifetime for computer accounts.",
+ dest="tgt_lifetime", type=int, action="callback",
+ callback=self.set_option,
+ validators=[Range(min=MIN_TGT_LIFETIME, max=MAX_TGT_LIFETIME)])
+ self.add_option("--computer-allowed-to-authenticate-to",
+ help="The computer account (server, workstation) service requires the connecting user to match SDDL",
+ type=str, dest="allowed_to_authenticate_to",
+ action="callback", callback=self.set_option,
+ metavar="SDDL")
+ self.add_option("--computer-allowed-to-authenticate-to-by-group",
+ help="The computer account (server, workstation) service requires the connecting user to be in GROUP",
+ type=str, dest="allowed_to_authenticate_to_by_group",
+ action="callback", callback=self.set_option,
+ metavar="GROUP")
+ self.add_option("--computer-allowed-to-authenticate-to-by-silo",
+ help="The computer account (server, workstation) service requires the connecting user to be in SILO",
+ type=str, dest="allowed_to_authenticate_to_by_silo",
+ action="callback", callback=self.set_option,
+ metavar="SILO")
+
+
+class cmd_domain_auth_policy_list(Command):
+ """List authentication policies on the domain."""
+
+ synopsis = "%prog -H <URL> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "hostopts": options.HostOptions,
+ }
+
+ takes_options = [
+ Option("--json", help="Output results in JSON format.",
+ dest="output_format", action="store_const", const="json"),
+ ]
+
+ def run(self, hostopts=None, sambaopts=None, credopts=None,
+ output_format=None):
+
+ ldb = self.ldb_connect(hostopts, sambaopts, credopts)
+
+ # Authentication policies grouped by cn.
+ try:
+ policies = {policy.cn: policy.as_dict()
+ for policy in AuthenticationPolicy.query(ldb)}
+ except ModelError as e:
+ raise CommandError(e)
+
+ # Using json output format gives more detail.
+ if output_format == "json":
+ self.print_json(policies)
+ else:
+ for policy in policies.keys():
+ self.outf.write(f"{policy}\n")
+
+
+class cmd_domain_auth_policy_view(Command):
+ """View an authentication policy on the domain."""
+
+ synopsis = "%prog -H <URL> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "hostopts": options.HostOptions,
+ }
+
+ takes_options = [
+ Option("--name",
+ help="Name of authentication policy to view (required).",
+ dest="name", action="store", type=str, required=True),
+ ]
+
+ def run(self, hostopts=None, sambaopts=None, credopts=None, name=None):
+
+ ldb = self.ldb_connect(hostopts, sambaopts, credopts)
+
+ try:
+ policy = AuthenticationPolicy.get(ldb, cn=name)
+ except ModelError as e:
+ raise CommandError(e)
+
+ # Check if authentication policy exists first.
+ if policy is None:
+ raise CommandError(f"Authentication policy {name} not found.")
+
+ # Display policy as JSON.
+ self.print_json(policy.as_dict())
+
+
+class cmd_domain_auth_policy_create(Command):
+ """Create an authentication policy on the domain."""
+
+ synopsis = "%prog -H <URL> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "hostopts": options.HostOptions,
+ "useropts": UserOptions,
+ "serviceopts": ServiceOptions,
+ "computeropts": ComputerOptions,
+ }
+
+ takes_options = [
+ Option("--name", help="Name of authentication policy (required).",
+ dest="name", action="store", type=str, required=True),
+ Option("--description",
+ help="Optional description for authentication policy.",
+ dest="description", action="store", type=str),
+ Option("--protect",
+ help="Protect authentication silo from accidental deletion.",
+ dest="protect", action="store_true"),
+ Option("--unprotect",
+ help="Unprotect authentication silo from accidental deletion.",
+ dest="unprotect", action="store_true"),
+ Option("--audit",
+ help="Only audit authentication policy.",
+ dest="audit", action="store_true"),
+ Option("--enforce",
+ help="Enforce authentication policy.",
+ dest="enforce", action="store_true"),
+ Option("--strong-ntlm-policy",
+ help=f"Strong NTLM Policy ({StrongNTLMPolicy.choices_str()}).",
+ dest="strong_ntlm_policy", type="choice", action="store",
+ choices=StrongNTLMPolicy.get_choices(),
+ default="Disabled"),
+ ]
+
+ def run(self, hostopts=None, sambaopts=None, credopts=None, useropts=None,
+ serviceopts=None, computeropts=None, name=None, description=None,
+ protect=None, unprotect=None, audit=None, enforce=None,
+ strong_ntlm_policy=None):
+
+ if protect and unprotect:
+ raise CommandError("--protect and --unprotect cannot be used together.")
+ if audit and enforce:
+ raise CommandError("--audit and --enforce cannot be used together.")
+
+ # Check for repeated, similar arguments.
+ check_similar_args("--user-allowed-to-authenticate-from",
+ [useropts.allowed_to_authenticate_from,
+ useropts.allowed_to_authenticate_from_device_group,
+ useropts.allowed_to_authenticate_from_device_silo])
+ check_similar_args("--user-allowed-to-authenticate-to",
+ [useropts.allowed_to_authenticate_to,
+ useropts.allowed_to_authenticate_to_by_group,
+ useropts.allowed_to_authenticate_to_by_silo])
+ check_similar_args("--service-allowed-to-authenticate-from",
+ [serviceopts.allowed_to_authenticate_from,
+ serviceopts.allowed_to_authenticate_from_device_group,
+ serviceopts.allowed_to_authenticate_from_device_silo])
+ check_similar_args("--service-allowed-to-authenticate-to",
+ [serviceopts.allowed_to_authenticate_to,
+ serviceopts.allowed_to_authenticate_to_by_group,
+ serviceopts.allowed_to_authenticate_to_by_silo])
+ check_similar_args("--computer-allowed-to-authenticate-to",
+ [computeropts.allowed_to_authenticate_to,
+ computeropts.allowed_to_authenticate_to_by_group,
+ computeropts.allowed_to_authenticate_to_by_silo])
+
+ ldb = self.ldb_connect(hostopts, sambaopts, credopts)
+
+ # Generate SDDL for authenticating users from a device in a group
+ if useropts.allowed_to_authenticate_from_device_group:
+ group = Group.get(
+ ldb, cn=useropts.allowed_to_authenticate_from_device_group)
+ useropts.allowed_to_authenticate_from = group.get_authentication_sddl()
+
+ # Generate SDDL for authenticating users from a device in a silo
+ if useropts.allowed_to_authenticate_from_device_silo:
+ silo = AuthenticationSilo.get(
+ ldb, cn=useropts.allowed_to_authenticate_from_device_silo)
+ useropts.allowed_to_authenticate_from = silo.get_authentication_sddl()
+
+ # Generate SDDL for authenticating user accounts to a group
+ if useropts.allowed_to_authenticate_to_by_group:
+ group = Group.get(
+ ldb, cn=useropts.allowed_to_authenticate_to_by_group)
+ useropts.allowed_to_authenticate_to = group.get_authentication_sddl()
+
+ # Generate SDDL for authenticating user accounts to a silo
+ if useropts.allowed_to_authenticate_to_by_silo:
+ silo = AuthenticationSilo.get(
+ ldb, cn=useropts.allowed_to_authenticate_to_by_silo)
+ useropts.allowed_to_authenticate_to = silo.get_authentication_sddl()
+
+ # Generate SDDL for authenticating service accounts from a device in a group
+ if serviceopts.allowed_to_authenticate_from_device_group:
+ group = Group.get(
+ ldb, cn=serviceopts.allowed_to_authenticate_from_device_group)
+ serviceopts.allowed_to_authenticate_from = group.get_authentication_sddl()
+
+ # Generate SDDL for authenticating service accounts from a device in a silo
+ if serviceopts.allowed_to_authenticate_from_device_silo:
+ silo = AuthenticationSilo.get(
+ ldb, cn=serviceopts.allowed_to_authenticate_from_device_silo)
+ serviceopts.allowed_to_authenticate_from = silo.get_authentication_sddl()
+
+ # Generate SDDL for authenticating service accounts to a group
+ if serviceopts.allowed_to_authenticate_to_by_group:
+ group = Group.get(
+ ldb, cn=serviceopts.allowed_to_authenticate_to_by_group)
+ serviceopts.allowed_to_authenticate_to = group.get_authentication_sddl()
+
+ # Generate SDDL for authenticating service accounts to a silo
+ if serviceopts.allowed_to_authenticate_to_by_silo:
+ silo = AuthenticationSilo.get(
+ ldb, cn=serviceopts.allowed_to_authenticate_to_by_silo)
+ serviceopts.allowed_to_authenticate_to = silo.get_authentication_sddl()
+
+ # Generate SDDL for authenticating computer accounts to a group
+ if computeropts.allowed_to_authenticate_to_by_group:
+ group = Group.get(
+ ldb, cn=computeropts.allowed_to_authenticate_to_by_group)
+ computeropts.allowed_to_authenticate_to = group.get_authentication_sddl()
+
+ # Generate SDDL for authenticating computer accounts to a silo
+ if computeropts.allowed_to_authenticate_to_by_silo:
+ silo = AuthenticationSilo.get(
+ ldb, cn=computeropts.allowed_to_authenticate_to_by_silo)
+ computeropts.allowed_to_authenticate_to = silo.get_authentication_sddl()
+
+ try:
+ policy = AuthenticationPolicy.get(ldb, cn=name)
+ except ModelError as e:
+ raise CommandError(e)
+
+ # Make sure authentication policy doesn't already exist.
+ if policy is not None:
+ raise CommandError(f"Authentication policy {name} already exists.")
+
+ # New policy object.
+ policy = AuthenticationPolicy(
+ cn=name,
+ description=description,
+ strong_ntlm_policy=StrongNTLMPolicy[strong_ntlm_policy.upper()],
+ user_allow_ntlm_auth=useropts.allow_ntlm_auth,
+ user_tgt_lifetime=useropts.tgt_lifetime,
+ user_allowed_to_authenticate_from=useropts.allowed_to_authenticate_from,
+ user_allowed_to_authenticate_to=useropts.allowed_to_authenticate_to,
+ service_allow_ntlm_auth=serviceopts.allow_ntlm_auth,
+ service_tgt_lifetime=serviceopts.tgt_lifetime,
+ service_allowed_to_authenticate_from=serviceopts.allowed_to_authenticate_from,
+ service_allowed_to_authenticate_to=serviceopts.allowed_to_authenticate_to,
+ computer_tgt_lifetime=computeropts.tgt_lifetime,
+ computer_allowed_to_authenticate_to=computeropts.allowed_to_authenticate_to,
+ )
+
+ # Either --enforce will be set or --audit but never both.
+ # The default if both are missing is enforce=True.
+ if enforce is not None:
+ policy.enforced = enforce
+ else:
+ policy.enforced = not audit
+
+ # Create policy.
+ try:
+ policy.save(ldb)
+
+ if protect:
+ policy.protect(ldb)
+ except ModelError as e:
+ raise CommandError(e)
+
+ # Authentication policy created successfully.
+ self.outf.write(f"Created authentication policy: {name}\n")
+
+
+class cmd_domain_auth_policy_modify(Command):
+ """Modify authentication policies on the domain."""
+
+ synopsis = "%prog -H <URL> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "hostopts": options.HostOptions,
+ "useropts": UserOptions,
+ "serviceopts": ServiceOptions,
+ "computeropts": ComputerOptions,
+ }
+
+ takes_options = [
+ Option("--name", help="Name of authentication policy (required).",
+ dest="name", action="store", type=str, required=True),
+ Option("--description",
+ help="Optional description for authentication policy.",
+ dest="description", action="store", type=str),
+ Option("--protect",
+ help="Protect authentication silo from accidental deletion.",
+ dest="protect", action="store_true"),
+ Option("--unprotect",
+ help="Unprotect authentication silo from accidental deletion.",
+ dest="unprotect", action="store_true"),
+ Option("--audit",
+ help="Only audit authentication policy.",
+ dest="audit", action="store_true"),
+ Option("--enforce",
+ help="Enforce authentication policy.",
+ dest="enforce", action="store_true"),
+ Option("--strong-ntlm-policy",
+ help=f"Strong NTLM Policy ({StrongNTLMPolicy.choices_str()}).",
+ dest="strong_ntlm_policy", type="choice", action="store",
+ choices=StrongNTLMPolicy.get_choices()),
+ ]
+
+ def run(self, hostopts=None, sambaopts=None, credopts=None, useropts=None,
+ serviceopts=None, computeropts=None, name=None, description=None,
+ protect=None, unprotect=None, audit=None, enforce=None,
+ strong_ntlm_policy=None):
+
+ if protect and unprotect:
+ raise CommandError("--protect and --unprotect cannot be used together.")
+ if audit and enforce:
+ raise CommandError("--audit and --enforce cannot be used together.")
+
+ # Check for repeated, similar arguments.
+ check_similar_args("--user-allowed-to-authenticate-from",
+ [useropts.allowed_to_authenticate_from,
+ useropts.allowed_to_authenticate_from_device_group,
+ useropts.allowed_to_authenticate_from_device_silo])
+ check_similar_args("--user-allowed-to-authenticate-to",
+ [useropts.allowed_to_authenticate_to,
+ useropts.allowed_to_authenticate_to_by_group,
+ useropts.allowed_to_authenticate_to_by_silo])
+ check_similar_args("--service-allowed-to-authenticate-from",
+ [serviceopts.allowed_to_authenticate_from,
+ serviceopts.allowed_to_authenticate_from_device_group,
+ serviceopts.allowed_to_authenticate_from_device_silo])
+ check_similar_args("--service-allowed-to-authenticate-to",
+ [serviceopts.allowed_to_authenticate_to,
+ serviceopts.allowed_to_authenticate_to_by_group,
+ serviceopts.allowed_to_authenticate_to_by_silo])
+ check_similar_args("--computer-allowed-to-authenticate-to",
+ [computeropts.allowed_to_authenticate_to,
+ computeropts.allowed_to_authenticate_to_by_group,
+ computeropts.allowed_to_authenticate_to_by_silo])
+
+ ldb = self.ldb_connect(hostopts, sambaopts, credopts)
+
+ # Generate SDDL for authenticating users from a device in a group
+ if useropts.allowed_to_authenticate_from_device_group:
+ group = Group.get(
+ ldb, cn=useropts.allowed_to_authenticate_from_device_group)
+ useropts.allowed_to_authenticate_from = group.get_authentication_sddl()
+
+ # Generate SDDL for authenticating users from a device in a silo
+ if useropts.allowed_to_authenticate_from_device_silo:
+ silo = AuthenticationSilo.get(
+ ldb, cn=useropts.allowed_to_authenticate_from_device_silo)
+ useropts.allowed_to_authenticate_from = silo.get_authentication_sddl()
+
+ # Generate SDDL for authenticating user accounts to a group
+ if useropts.allowed_to_authenticate_to_by_group:
+ group = Group.get(
+ ldb, cn=useropts.allowed_to_authenticate_to_by_group)
+ useropts.allowed_to_authenticate_to = group.get_authentication_sddl()
+
+ # Generate SDDL for authenticating user accounts to a silo
+ if useropts.allowed_to_authenticate_to_by_silo:
+ silo = AuthenticationSilo.get(
+ ldb, cn=useropts.allowed_to_authenticate_to_by_silo)
+ useropts.allowed_to_authenticate_to = silo.get_authentication_sddl()
+
+ # Generate SDDL for authenticating users from a device a device in a group
+ if serviceopts.allowed_to_authenticate_from_device_group:
+ group = Group.get(
+ ldb, cn=serviceopts.allowed_to_authenticate_from_device_group)
+ serviceopts.allowed_to_authenticate_from = group.get_authentication_sddl()
+
+ # Generate SDDL for authenticating service accounts from a device in a silo
+ if serviceopts.allowed_to_authenticate_from_device_silo:
+ silo = AuthenticationSilo.get(
+ ldb, cn=serviceopts.allowed_to_authenticate_from_device_silo)
+ serviceopts.allowed_to_authenticate_from = silo.get_authentication_sddl()
+
+ # Generate SDDL for authenticating service accounts to a group
+ if serviceopts.allowed_to_authenticate_to_by_group:
+ group = Group.get(
+ ldb, cn=serviceopts.allowed_to_authenticate_to_by_group)
+ serviceopts.allowed_to_authenticate_to = group.get_authentication_sddl()
+
+ # Generate SDDL for authenticating service accounts to a silo
+ if serviceopts.allowed_to_authenticate_to_by_silo:
+ silo = AuthenticationSilo.get(
+ ldb, cn=serviceopts.allowed_to_authenticate_to_by_silo)
+ serviceopts.allowed_to_authenticate_to = silo.get_authentication_sddl()
+
+ # Generate SDDL for authenticating computer accounts to a group
+ if computeropts.allowed_to_authenticate_to_by_group:
+ group = Group.get(
+ ldb, cn=computeropts.allowed_to_authenticate_to_by_group)
+ computeropts.allowed_to_authenticate_to = group.get_authentication_sddl()
+
+ # Generate SDDL for authenticating computer accounts to a silo
+ if computeropts.allowed_to_authenticate_to_by_silo:
+ silo = AuthenticationSilo.get(
+ ldb, cn=computeropts.allowed_to_authenticate_to_by_silo)
+ computeropts.allowed_to_authenticate_to = silo.get_authentication_sddl()
+
+ try:
+ policy = AuthenticationPolicy.get(ldb, cn=name)
+ except ModelError as e:
+ raise CommandError(e)
+
+ # Check if authentication policy exists.
+ if policy is None:
+ raise CommandError(f"Authentication policy {name} not found.")
+
+ # Either --enforce will be set or --audit but never both.
+ if enforce:
+ policy.enforced = True
+ elif audit:
+ policy.enforced = False
+
+ # Update the description.
+ if description is not None:
+ policy.description = description
+
+ # User sign on
+ ###############
+
+ if strong_ntlm_policy is not None:
+ policy.strong_ntlm_policy = \
+ StrongNTLMPolicy[strong_ntlm_policy.upper()]
+
+ if useropts.tgt_lifetime is not None:
+ policy.user_tgt_lifetime = useropts.tgt_lifetime
+
+ if useropts.allowed_to_authenticate_from is not None:
+ policy.user_allowed_to_authenticate_from = \
+ useropts.allowed_to_authenticate_from
+
+ if useropts.allowed_to_authenticate_to is not None:
+ policy.user_allowed_to_authenticate_to = \
+ useropts.allowed_to_authenticate_to
+
+ # Service sign on
+ ##################
+
+ if serviceopts.tgt_lifetime is not None:
+ policy.service_tgt_lifetime = serviceopts.tgt_lifetime
+
+ if serviceopts.allowed_to_authenticate_from is not None:
+ policy.service_allowed_to_authenticate_from = \
+ serviceopts.allowed_to_authenticate_from
+
+ if serviceopts.allowed_to_authenticate_to is not None:
+ policy.service_allowed_to_authenticate_to = \
+ serviceopts.allowed_to_authenticate_to
+
+ # Computer
+ ###########
+
+ if computeropts.tgt_lifetime is not None:
+ policy.computer_tgt_lifetime = computeropts.tgt_lifetime
+
+ if computeropts.allowed_to_authenticate_to is not None:
+ policy.computer_allowed_to_authenticate_to = \
+ computeropts.allowed_to_authenticate_to
+
+ # Update policy.
+ try:
+ policy.save(ldb)
+
+ if protect:
+ policy.protect(ldb)
+ elif unprotect:
+ policy.unprotect(ldb)
+ except ModelError as e:
+ raise CommandError(e)
+
+ # Authentication policy updated successfully.
+ self.outf.write(f"Updated authentication policy: {name}\n")
+
+
+class cmd_domain_auth_policy_delete(Command):
+ """Delete authentication policies on the domain."""
+
+ synopsis = "%prog -H <URL> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "hostopts": options.HostOptions,
+ }
+
+ takes_options = [
+ Option("--name", help="Name of authentication policy (required).",
+ dest="name", action="store", type=str, required=True),
+ Option("--force", help="Force delete protected authentication policy.",
+ dest="force", action="store_true")
+ ]
+
+ def run(self, hostopts=None, sambaopts=None, credopts=None, name=None,
+ force=None):
+
+ ldb = self.ldb_connect(hostopts, sambaopts, credopts)
+
+ try:
+ policy = AuthenticationPolicy.get(ldb, cn=name)
+ except ModelError as e:
+ raise CommandError(e)
+
+ # Check if authentication policy exists first.
+ if policy is None:
+ raise CommandError(f"Authentication policy {name} not found.")
+
+ # Delete item, --force removes delete protection first.
+ try:
+ if force:
+ policy.unprotect(ldb)
+
+ policy.delete(ldb)
+ except ModelError as e:
+ if not force:
+ raise CommandError(
+ f"{e}\nTry --force to delete protected authentication policies.")
+ else:
+ raise CommandError(e)
+
+ # Authentication policy deleted successfully.
+ self.outf.write(f"Deleted authentication policy: {name}\n")
+
+
+class cmd_domain_auth_policy(SuperCommand):
+ """Manage authentication policies on the domain."""
+
+ subcommands = {
+ "list": cmd_domain_auth_policy_list(),
+ "view": cmd_domain_auth_policy_view(),
+ "create": cmd_domain_auth_policy_create(),
+ "modify": cmd_domain_auth_policy_modify(),
+ "delete": cmd_domain_auth_policy_delete(),
+ }
diff --git a/python/samba/netcmd/domain/auth/silo.py b/python/samba/netcmd/domain/auth/silo.py
new file mode 100644
index 0000000..2e27761
--- /dev/null
+++ b/python/samba/netcmd/domain/auth/silo.py
@@ -0,0 +1,402 @@
+# Unix SMB/CIFS implementation.
+#
+# authentication silos - authentication silo management
+#
+# Copyright (C) Catalyst.Net Ltd. 2023
+#
+# Written by Rob van der Linde <rob@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import samba.getopt as options
+from samba.netcmd import Command, CommandError, Option, SuperCommand
+from samba.netcmd.domain.models import AuthenticationPolicy, AuthenticationSilo
+from samba.netcmd.domain.models.exceptions import ModelError
+
+from .silo_member import cmd_domain_auth_silo_member
+
+
+class cmd_domain_auth_silo_list(Command):
+ """List authentication silos on the domain."""
+
+ synopsis = "%prog -H <URL> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "hostopts": options.HostOptions,
+ }
+
+ takes_options = [
+ Option("--json", help="Output results in JSON format.",
+ dest="output_format", action="store_const", const="json"),
+ ]
+
+ def run(self, hostopts=None, sambaopts=None, credopts=None,
+ output_format=None):
+
+ ldb = self.ldb_connect(hostopts, sambaopts, credopts)
+
+ # Authentication silos grouped by cn.
+ try:
+ silos = {silo.cn: silo.as_dict()
+ for silo in AuthenticationSilo.query(ldb)}
+ except ModelError as e:
+ raise CommandError(e)
+
+ # Using json output format gives more detail.
+ if output_format == "json":
+ self.print_json(silos)
+ else:
+ for silo in silos.keys():
+ self.outf.write(f"{silo}\n")
+
+
+class cmd_domain_auth_silo_view(Command):
+ """View an authentication silo on the domain."""
+
+ synopsis = "%prog -H <URL> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "hostopts": options.HostOptions,
+ }
+
+ takes_options = [
+ Option("--name",
+ help="Name of authentication silo to view (required).",
+ dest="name", action="store", type=str, required=True),
+ ]
+
+ def run(self, hostopts=None, sambaopts=None, credopts=None, name=None):
+
+ ldb = self.ldb_connect(hostopts, sambaopts, credopts)
+
+ try:
+ silo = AuthenticationSilo.get(ldb, cn=name)
+ except ModelError as e:
+ raise CommandError(e)
+
+ # Check if silo exists first.
+ if silo is None:
+ raise CommandError(f"Authentication silo {name} not found.")
+
+ # Display silo as JSON.
+ self.print_json(silo.as_dict())
+
+
+class cmd_domain_auth_silo_create(Command):
+ """Create a new authentication silo on the domain."""
+
+ synopsis = "%prog -H <URL> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "hostopts": options.HostOptions,
+ }
+
+ takes_options = [
+ Option("--name", help="Name of authentication silo (required).",
+ dest="name", action="store", type=str, required=True),
+ Option("--description",
+ help="Optional description for authentication silo.",
+ dest="description", action="store", type=str),
+ Option("--user-authentication-policy",
+ help="User account authentication policy.",
+ dest="user_authentication_policy", action="store", type=str,
+ metavar="USER_POLICY"),
+ Option("--service-authentication-policy",
+ help="Managed service account authentication policy.",
+ dest="service_authentication_policy", action="store", type=str,
+ metavar="SERVICE_POLICY"),
+ Option("--computer-authentication-policy",
+ help="Computer authentication policy.",
+ dest="computer_authentication_policy", action="store", type=str,
+ metavar="COMPUTER_POLICY"),
+ Option("--protect",
+ help="Protect authentication silo from accidental deletion.",
+ dest="protect", action="store_true"),
+ Option("--unprotect",
+ help="Unprotect authentication silo from accidental deletion.",
+ dest="unprotect", action="store_true"),
+ Option("--audit",
+ help="Only audit silo policies.",
+ dest="audit", action="store_true"),
+ Option("--enforce",
+ help="Enforce silo policies.",
+ dest="enforce", action="store_true")
+ ]
+
+ @staticmethod
+ def get_policy(ldb, name):
+ """Helper function to fetch auth policy or raise CommandError.
+
+ :param ldb: Ldb connection
+ :param name: Either the DN or name of authentication policy
+ """
+ try:
+ return AuthenticationPolicy.lookup(ldb, name)
+ except (LookupError, ValueError) as e:
+ raise CommandError(e)
+
+ def run(self, hostopts=None, sambaopts=None, credopts=None,
+ name=None, description=None,
+ user_authentication_policy=None,
+ service_authentication_policy=None,
+ computer_authentication_policy=None,
+ protect=None, unprotect=None,
+ audit=None, enforce=None):
+
+ if protect and unprotect:
+ raise CommandError("--protect and --unprotect cannot be used together.")
+ if audit and enforce:
+ raise CommandError("--audit and --enforce cannot be used together.")
+
+ ldb = self.ldb_connect(hostopts, sambaopts, credopts)
+
+ try:
+ silo = AuthenticationSilo.get(ldb, cn=name)
+ except ModelError as e:
+ raise CommandError(e)
+
+ # Make sure silo doesn't already exist.
+ if silo is not None:
+ raise CommandError(f"Authentication silo {name} already exists.")
+
+ # New silo object.
+ silo = AuthenticationSilo(cn=name, description=description)
+
+ # Set user policy
+ if user_authentication_policy:
+ silo.user_authentication_policy = \
+ self.get_policy(ldb, user_authentication_policy).dn
+
+ # Set service policy
+ if service_authentication_policy:
+ silo.service_authentication_policy = \
+ self.get_policy(ldb, service_authentication_policy).dn
+
+ # Set computer policy
+ if computer_authentication_policy:
+ silo.computer_authentication_policy = \
+ self.get_policy(ldb, computer_authentication_policy).dn
+
+ # Either --enforce will be set or --audit but never both.
+ # The default if both are missing is enforce=True.
+ if enforce is not None:
+ silo.enforced = enforce
+ else:
+ silo.enforced = not audit
+
+ # Create silo
+ try:
+ silo.save(ldb)
+
+ if protect:
+ silo.protect(ldb)
+ except ModelError as e:
+ raise CommandError(e)
+
+ # Authentication silo created successfully.
+ self.outf.write(f"Created authentication silo: {name}\n")
+
+
+class cmd_domain_auth_silo_modify(Command):
+ """Modify an authentication silo on the domain."""
+
+ synopsis = "%prog -H <URL> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "hostopts": options.HostOptions,
+ }
+
+ takes_options = [
+ Option("--name", help="Name of authentication silo (required).",
+ dest="name", action="store", type=str, required=True),
+ Option("--description",
+ help="Optional description for authentication silo.",
+ dest="description", action="store", type=str),
+ Option("--user-authentication-policy",
+ help="User account authentication policy.",
+ dest="user_authentication_policy", action="store", type=str,
+ metavar="USER_POLICY"),
+ Option("--service-authentication-policy",
+ help="Managed service account authentication policy.",
+ dest="service_authentication_policy", action="store", type=str,
+ metavar="SERVICE_POLICY"),
+ Option("--computer-authentication-policy",
+ help="Computer authentication policy.",
+ dest="computer_authentication_policy", action="store", type=str,
+ metavar="COMPUTER_POLICY"),
+ Option("--protect",
+ help="Protect authentication silo from accidental deletion.",
+ dest="protect", action="store_true"),
+ Option("--unprotect",
+ help="Unprotect authentication silo from accidental deletion.",
+ dest="unprotect", action="store_true"),
+ Option("--audit",
+ help="Only audit silo policies.",
+ dest="audit", action="store_true"),
+ Option("--enforce",
+ help="Enforce silo policies.",
+ dest="enforce", action="store_true")
+ ]
+
+ @staticmethod
+ def get_policy(ldb, name):
+ """Helper function to fetch auth policy or raise CommandError.
+
+ :param ldb: Ldb connection
+ :param name: Either the DN or name of authentication policy
+ """
+ try:
+ return AuthenticationPolicy.lookup(ldb, name)
+ except (LookupError, ModelError, ValueError) as e:
+ raise CommandError(e)
+
+ def run(self, hostopts=None, sambaopts=None, credopts=None,
+ name=None, description=None,
+ user_authentication_policy=None,
+ service_authentication_policy=None,
+ computer_authentication_policy=None,
+ protect=None, unprotect=None,
+ audit=None, enforce=None):
+
+ if audit and enforce:
+ raise CommandError("--audit and --enforce cannot be used together.")
+ if protect and unprotect:
+ raise CommandError("--protect and --unprotect cannot be used together.")
+
+ ldb = self.ldb_connect(hostopts, sambaopts, credopts)
+
+ try:
+ silo = AuthenticationSilo.get(ldb, cn=name)
+ except ModelError as e:
+ raise CommandError(e)
+
+ # Check if silo exists first.
+ if silo is None:
+ raise CommandError(f"Authentication silo {name} not found.")
+
+ # Either --enforce will be set or --audit but never both.
+ if enforce:
+ silo.enforced = True
+ elif audit:
+ silo.enforced = False
+
+ # Update the description.
+ if description is not None:
+ silo.description = description
+
+ # Set or unset user policy.
+ if user_authentication_policy == "":
+ silo.user_authentication_policy = None
+ elif user_authentication_policy:
+ silo.user_authentication_policy = \
+ self.get_policy(ldb, user_authentication_policy).dn
+
+ # Set or unset service policy.
+ if service_authentication_policy == "":
+ silo.service_authentication_policy = None
+ elif service_authentication_policy:
+ silo.service_authentication_policy = \
+ self.get_policy(ldb, service_authentication_policy).dn
+
+ # Set or unset computer policy.
+ if computer_authentication_policy == "":
+ silo.computer_authentication_policy = None
+ elif computer_authentication_policy:
+ silo.computer_authentication_policy = \
+ self.get_policy(ldb, computer_authentication_policy).dn
+
+ # Update silo
+ try:
+ silo.save(ldb)
+
+ if protect:
+ silo.protect(ldb)
+ elif unprotect:
+ silo.unprotect(ldb)
+ except ModelError as e:
+ raise CommandError(e)
+
+ # Silo updated successfully.
+ self.outf.write(f"Updated authentication silo: {name}\n")
+
+
+class cmd_domain_auth_silo_delete(Command):
+ """Delete an authentication silo on the domain."""
+
+ synopsis = "%prog -H <URL> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "hostopts": options.HostOptions,
+ }
+
+ takes_options = [
+ Option("--name", help="Name of authentication silo (required).",
+ dest="name", action="store", type=str, required=True),
+ Option("--force", help="Force delete protected authentication silo.",
+ dest="force", action="store_true")
+ ]
+
+ def run(self, hostopts=None, sambaopts=None, credopts=None, name=None,
+ force=None):
+
+ ldb = self.ldb_connect(hostopts, sambaopts, credopts)
+
+ try:
+ silo = AuthenticationSilo.get(ldb, cn=name)
+ except ModelError as e:
+ raise CommandError(e)
+
+ # Check if silo exists first.
+ if silo is None:
+ raise CommandError(f"Authentication silo {name} not found.")
+
+ # Delete silo
+ try:
+ if force:
+ silo.unprotect(ldb)
+
+ silo.delete(ldb)
+ except ModelError as e:
+ if not force:
+ raise CommandError(
+ f"{e}\nTry --force to delete protected authentication silos.")
+ else:
+ raise CommandError(e)
+
+ # Authentication silo deleted successfully.
+ self.outf.write(f"Deleted authentication silo: {name}\n")
+
+
+class cmd_domain_auth_silo(SuperCommand):
+ """Manage authentication silos on the domain."""
+
+ subcommands = {
+ "list": cmd_domain_auth_silo_list(),
+ "view": cmd_domain_auth_silo_view(),
+ "create": cmd_domain_auth_silo_create(),
+ "modify": cmd_domain_auth_silo_modify(),
+ "delete": cmd_domain_auth_silo_delete(),
+ "member": cmd_domain_auth_silo_member(),
+ }
diff --git a/python/samba/netcmd/domain/auth/silo_member.py b/python/samba/netcmd/domain/auth/silo_member.py
new file mode 100644
index 0000000..9b41400
--- /dev/null
+++ b/python/samba/netcmd/domain/auth/silo_member.py
@@ -0,0 +1,201 @@
+# Unix SMB/CIFS implementation.
+#
+# authentication silos - silo member management
+#
+# Copyright (C) Catalyst.Net Ltd. 2023
+#
+# Written by Rob van der Linde <rob@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import samba.getopt as options
+from samba.netcmd import Command, CommandError, Option, SuperCommand
+from samba.netcmd.domain.models import AuthenticationSilo, User
+from samba.netcmd.domain.models.exceptions import ModelError
+
+
+class cmd_domain_auth_silo_member_grant(Command):
+ """Grant a member access to an authentication silo."""
+
+ synopsis = "%prog -H <URL> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "hostopts": options.HostOptions,
+ }
+
+ takes_options = [
+ Option("--name",
+ help="Name of authentication silo (required).",
+ dest="name", action="store", type=str, required=True),
+ Option("--member",
+ help="Member to grant access to the silo (DN or account name).",
+ dest="member", action="store", type=str, required=True),
+ ]
+
+ def run(self, hostopts=None, sambaopts=None, credopts=None,
+ name=None, member=None):
+
+ ldb = self.ldb_connect(hostopts, sambaopts, credopts)
+
+ try:
+ silo = AuthenticationSilo.get(ldb, cn=name)
+ except ModelError as e:
+ raise CommandError(e)
+
+ # Check if authentication silo exists first.
+ if silo is None:
+ raise CommandError(f"Authentication silo {name} not found.")
+
+ try:
+ user = User.find(ldb, member)
+ except ModelError as e:
+ raise CommandError(e)
+
+ # Ensure the user actually exists first.
+ if user is None:
+ raise CommandError(f"User {member} not found.")
+
+ # Grant access to member.
+ try:
+ silo.grant(ldb, user)
+ except ModelError as e:
+ raise CommandError(e)
+
+ # Display silo assigned status.
+ if user.assigned_silo and user.assigned_silo == silo.dn:
+ status = "assigned"
+ else:
+ status = "unassigned"
+
+ print(f"User {user} granted access to the authentication silo {name} ({status}).",
+ file=self.outf)
+
+
+class cmd_domain_auth_silo_member_list(Command):
+ """List all members in the authentication silo."""
+
+ synopsis = "%prog -H <URL> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "hostopts": options.HostOptions,
+ }
+
+ takes_options = [
+ Option("--name",
+ help="Name of authentication silo (required).",
+ dest="name", action="store", type=str, required=True),
+ Option("--json", help="Output results in JSON format.",
+ dest="output_format", action="store_const", const="json"),
+ ]
+
+ def run(self, hostopts=None, sambaopts=None, credopts=None,
+ name=None, output_format=None):
+
+ ldb = self.ldb_connect(hostopts, sambaopts, credopts)
+
+ try:
+ silo = AuthenticationSilo.get(ldb, cn=name)
+ except ModelError as e:
+ raise CommandError(e)
+
+ # Check if authentication silo exists first.
+ if silo is None:
+ raise CommandError(f"Authentication silo {name} not found.")
+
+ # Fetch all members.
+ try:
+ members = [User.get(ldb, dn=dn) for dn in silo.members]
+ except ModelError as e:
+ raise CommandError(e)
+
+ # Using json output format gives more detail.
+ if output_format == "json":
+ self.print_json([member.as_dict() for member in members])
+ else:
+ for member in members:
+ print(member.dn, file=self.outf)
+
+
+class cmd_domain_auth_silo_member_revoke(Command):
+ """Revoke a member from an authentication silo."""
+
+ synopsis = "%prog -H <URL> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "hostopts": options.HostOptions,
+ }
+
+ takes_options = [
+ Option("--name",
+ help="Name of authentication silo (required).",
+ dest="name", action="store", type=str, required=True),
+ Option("--member",
+ help="Member to revoke from the silo (DN or account name).",
+ dest="member", action="store", type=str, required=True),
+ ]
+
+ def run(self, hostopts=None, sambaopts=None, credopts=None,
+ name=None, member=None):
+
+ ldb = self.ldb_connect(hostopts, sambaopts, credopts)
+
+ try:
+ silo = AuthenticationSilo.get(ldb, cn=name)
+ except ModelError as e:
+ raise CommandError(e)
+
+ # Check if authentication silo exists first.
+ if silo is None:
+ raise CommandError(f"Authentication silo {name} not found.")
+
+ try:
+ user = User.find(ldb, member)
+ except ModelError as e:
+ raise CommandError(e)
+
+ # Ensure the user actually exists first.
+ if user is None:
+ raise CommandError(f"User {member} not found.")
+
+ # Revoke member access.
+ try:
+ silo.revoke(ldb, user)
+ except ModelError as e:
+ raise CommandError(e)
+
+ # Display silo assigned status.
+ if user.assigned_silo and user.assigned_silo == silo.dn:
+ status = "assigned"
+ else:
+ status = "unassigned"
+
+ print(f"User {user} revoked from the authentication silo {name} ({status}).",
+ file=self.outf)
+
+
+class cmd_domain_auth_silo_member(SuperCommand):
+ """Manage members in an authentication silo."""
+
+ subcommands = {
+ "grant": cmd_domain_auth_silo_member_grant(),
+ "list": cmd_domain_auth_silo_member_list(),
+ "revoke": cmd_domain_auth_silo_member_revoke(),
+ }
diff --git a/python/samba/netcmd/domain/backup.py b/python/samba/netcmd/domain/backup.py
new file mode 100644
index 0000000..fc7ff53
--- /dev/null
+++ b/python/samba/netcmd/domain/backup.py
@@ -0,0 +1,1256 @@
+# domain_backup
+#
+# Copyright Andrew Bartlett <abartlet@samba.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+import datetime
+import os
+import sys
+import logging
+import shutil
+import tempfile
+import samba
+import tdb
+import samba.getopt as options
+from samba.samdb import SamDB, get_default_backend_store
+import ldb
+from ldb import LdbError
+from samba.samba3 import libsmb_samba_internal as libsmb
+from samba.samba3 import param as s3param
+from samba.ntacls import backup_online, backup_restore, backup_offline
+from samba.auth import system_session
+from samba.join import DCJoinContext, join_clone, DCCloneAndRenameContext
+from samba.dcerpc.security import dom_sid
+from samba.netcmd import Option, CommandError
+from samba.dcerpc import misc, security, drsblobs
+from samba import Ldb
+from samba.netcmd.fsmo import cmd_fsmo_seize
+from samba.provision import make_smbconf, DEFAULTSITE
+from samba.upgradehelpers import update_krbtgt_account_password
+from samba.remove_dc import remove_dc
+from samba.provision import secretsdb_self_join
+from samba.dbchecker import dbcheck
+import re
+from samba.provision import guess_names, determine_host_ip, determine_host_ip6
+from samba.provision.sambadns import (fill_dns_data_partitions,
+ get_dnsadmins_sid,
+ get_domainguid)
+from samba.tdb_util import tdb_copy
+from samba.mdb_util import mdb_copy
+import errno
+from subprocess import CalledProcessError
+from samba import sites
+from samba.dsdb import _dsdb_load_udv_v2
+from samba.ndr import ndr_pack
+from samba.credentials import SMB_SIGNING_REQUIRED
+from samba import safe_tarfile as tarfile
+
+
+# work out a SID (based on a free RID) to use when the domain gets restored.
+# This ensures that the restored DC's SID won't clash with any other RIDs
+# already in use in the domain
+def get_sid_for_restore(samdb, logger):
+ # Allocate a new RID without modifying the database. This should be safe,
+ # because we acquire the RID master role after creating an account using
+ # this RID during the restore process. Acquiring the RID master role
+ # creates a new RID pool which we will fetch RIDs from, so we shouldn't get
+ # duplicates.
+ try:
+ rid = samdb.next_free_rid()
+ except LdbError as err:
+ logger.info("A SID could not be allocated for restoring the domain. "
+ "Either no RID Set was found on this DC, "
+ "or the RID Set was not usable.")
+ logger.info("To initialise this DC's RID pools, obtain a RID Set from "
+ "this domain's RID master, or run samba-tool dbcheck "
+ "to fix the existing RID Set.")
+ raise CommandError("Cannot create backup", err)
+
+ # Construct full SID
+ sid = dom_sid(samdb.get_domain_sid())
+ sid_for_restore = str(sid) + '-' + str(rid)
+
+ # Confirm the SID is not already in use
+ try:
+ res = samdb.search(scope=ldb.SCOPE_BASE,
+ base='<SID=%s>' % sid_for_restore,
+ attrs=[],
+ controls=['show_deleted:1',
+ 'show_recycled:1'])
+ if len(res) != 1:
+ # This case makes no sense, but neither does a corrupt RID set
+ raise CommandError("Cannot create backup - "
+ "this DC's RID pool is corrupt, "
+ "the next SID (%s) appears to be in use." %
+ sid_for_restore)
+ raise CommandError("Cannot create backup - "
+ "this DC's RID pool is corrupt, "
+ "the next SID %s points to existing object %s. "
+ "Please run samba-tool dbcheck on the source DC." %
+ (sid_for_restore, res[0].dn))
+ except ldb.LdbError as e:
+ (enum, emsg) = e.args
+ if enum != ldb.ERR_NO_SUCH_OBJECT:
+ # We want NO_SUCH_OBJECT, anything else is a serious issue
+ raise
+
+ return str(sid) + '-' + str(rid)
+
+
+def smb_sysvol_conn(server, lp, creds):
+ """Returns an SMB connection to the sysvol share on the DC"""
+ # the SMB bindings rely on having a s3 loadparm
+ s3_lp = s3param.get_context()
+ s3_lp.load(lp.configfile)
+
+ # Force signing for the connection
+ saved_signing_state = creds.get_smb_signing()
+ creds.set_smb_signing(SMB_SIGNING_REQUIRED)
+ conn = libsmb.Conn(server, "sysvol", lp=s3_lp, creds=creds)
+ # Reset signing state
+ creds.set_smb_signing(saved_signing_state)
+ return conn
+
+
+def get_timestamp():
+ return datetime.datetime.now().isoformat().replace(':', '-')
+
+
+def backup_filepath(targetdir, name, time_str):
+ filename = 'samba-backup-%s-%s.tar.bz2' % (name, time_str)
+ return os.path.join(targetdir, filename)
+
+
+def create_backup_tar(logger, tmpdir, backup_filepath):
+ # Adds everything in the tmpdir into a new tar file
+ logger.info("Creating backup file %s..." % backup_filepath)
+ tf = tarfile.open(backup_filepath, 'w:bz2')
+ tf.add(tmpdir, arcname='./')
+ tf.close()
+
+
+def create_log_file(targetdir, lp, backup_type, server, include_secrets,
+ extra_info=None):
+ # create a summary file about the backup, which will get included in the
+ # tar file. This makes it easy for users to see what the backup involved,
+ # without having to untar the DB and interrogate it
+ f = open(os.path.join(targetdir, "backup.txt"), 'w')
+ try:
+ time_str = datetime.datetime.now().strftime('%Y-%b-%d %H:%M:%S')
+ f.write("Backup created %s\n" % time_str)
+ f.write("Using samba-tool version: %s\n" % lp.get('server string'))
+ f.write("Domain %s backup, using DC '%s'\n" % (backup_type, server))
+ f.write("Backup for domain %s (NetBIOS), %s (DNS realm)\n" %
+ (lp.get('workgroup'), lp.get('realm').lower()))
+ f.write("Backup contains domain secrets: %s\n" % str(include_secrets))
+ if extra_info:
+ f.write("%s\n" % extra_info)
+ finally:
+ f.close()
+
+
+# Add a backup-specific marker to the DB with info that we'll use during
+# the restore process
+def add_backup_marker(samdb, marker, value):
+ m = ldb.Message()
+ m.dn = ldb.Dn(samdb, "@SAMBA_DSDB")
+ m[marker] = ldb.MessageElement(value, ldb.FLAG_MOD_ADD, marker)
+ samdb.modify(m)
+
+
+def check_targetdir(logger, targetdir):
+ if targetdir is None:
+ raise CommandError('Target directory required')
+
+ if not os.path.exists(targetdir):
+ logger.info('Creating targetdir %s...' % targetdir)
+ os.makedirs(targetdir)
+ elif not os.path.isdir(targetdir):
+ raise CommandError("%s is not a directory" % targetdir)
+
+
+# For '--no-secrets' backups, this sets the Administrator user's password to a
+# randomly-generated value. This is similar to the provision behaviour
+def set_admin_password(logger, samdb):
+ """Sets a randomly generated password for the backup DB's admin user"""
+
+ # match the admin user by RID
+ domainsid = samdb.get_domain_sid()
+ match_admin = "(objectsid=%s-%s)" % (domainsid,
+ security.DOMAIN_RID_ADMINISTRATOR)
+ search_expr = "(&(objectClass=user)%s)" % (match_admin,)
+
+ # retrieve the admin username (just in case it's been renamed)
+ res = samdb.search(base=samdb.domain_dn(), scope=ldb.SCOPE_SUBTREE,
+ expression=search_expr)
+ username = str(res[0]['samaccountname'])
+
+ adminpass = samba.generate_random_password(12, 32)
+ logger.info("Setting %s password in backup to: %s" % (username, adminpass))
+ logger.info("Run 'samba-tool user setpassword %s' after restoring DB" %
+ username)
+ samdb.setpassword(search_expr, adminpass, force_change_at_next_login=False,
+ username=username)
+
+
+class cmd_domain_backup_online(samba.netcmd.Command):
+ """Copy a running DC's current DB into a backup tar file.
+
+ Takes a backup copy of the current domain from a running DC. If the domain
+ were to undergo a catastrophic failure, then the backup file can be used to
+ recover the domain. The backup created is similar to the DB that a new DC
+ would receive when it joins the domain.
+
+ Note that:
+ - it's recommended to run 'samba-tool dbcheck' before taking a backup-file
+ and fix any errors it reports.
+ - all the domain's secrets are included in the backup file.
+ - although the DB contents can be untarred and examined manually, you need
+ to run 'samba-tool domain backup restore' before you can start a Samba DC
+ from the backup file."""
+
+ synopsis = "%prog --server=<DC-to-backup> --targetdir=<output-dir>"
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("--server", help="The DC to backup", type=str),
+ Option("--targetdir", type=str,
+ help="Directory to write the backup file to"),
+ Option("--no-secrets", action="store_true", default=False,
+ help="Exclude secret values from the backup created"),
+ Option("--backend-store", type="choice", metavar="BACKENDSTORE",
+ choices=["tdb", "mdb"],
+ help="Specify the database backend to be used "
+ "(default is %s)" % get_default_backend_store()),
+ ]
+
+ def run(self, sambaopts=None, credopts=None, server=None, targetdir=None,
+ no_secrets=False, backend_store=None):
+ logger = self.get_logger()
+ logger.setLevel(logging.DEBUG)
+
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+
+ # Make sure we have all the required args.
+ if server is None:
+ raise CommandError('Server required')
+
+ check_targetdir(logger, targetdir)
+
+ tmpdir = tempfile.mkdtemp(dir=targetdir)
+
+ # Run a clone join on the remote
+ include_secrets = not no_secrets
+ try:
+ ctx = join_clone(logger=logger, creds=creds, lp=lp,
+ include_secrets=include_secrets, server=server,
+ dns_backend='SAMBA_INTERNAL', targetdir=tmpdir,
+ backend_store=backend_store)
+
+ # get the paths used for the clone, then drop the old samdb connection
+ paths = ctx.paths
+ del ctx
+
+ # Get a free RID to use as the new DC's SID (when it gets restored)
+ remote_sam = SamDB(url='ldap://' + server, credentials=creds,
+ session_info=system_session(), lp=lp)
+ new_sid = get_sid_for_restore(remote_sam, logger)
+ realm = remote_sam.domain_dns_name()
+
+ # Grab the remote DC's sysvol files and bundle them into a tar file
+ logger.info("Backing up sysvol files (via SMB)...")
+ sysvol_tar = os.path.join(tmpdir, 'sysvol.tar.gz')
+ smb_conn = smb_sysvol_conn(server, lp, creds)
+ backup_online(smb_conn, sysvol_tar, remote_sam.get_domain_sid())
+
+ # remove the default sysvol files created by the clone (we want to
+ # make sure we restore the sysvol.tar.gz files instead)
+ shutil.rmtree(paths.sysvol)
+
+ # Edit the downloaded sam.ldb to mark it as a backup
+ samdb = SamDB(url=paths.samdb, session_info=system_session(), lp=lp,
+ flags=ldb.FLG_DONT_CREATE_DB)
+ time_str = get_timestamp()
+ add_backup_marker(samdb, "backupDate", time_str)
+ add_backup_marker(samdb, "sidForRestore", new_sid)
+ add_backup_marker(samdb, "backupType", "online")
+
+ # ensure the admin user always has a password set (same as provision)
+ if no_secrets:
+ set_admin_password(logger, samdb)
+
+ # Add everything in the tmpdir to the backup tar file
+ backup_file = backup_filepath(targetdir, realm, time_str)
+ create_log_file(tmpdir, lp, "online", server, include_secrets)
+ create_backup_tar(logger, tmpdir, backup_file)
+ finally:
+ shutil.rmtree(tmpdir)
+
+
+class cmd_domain_backup_restore(cmd_fsmo_seize):
+ """Restore the domain's DB from a backup-file.
+
+ This restores a previously backed up copy of the domain's DB on a new DC.
+
+ Note that the restored DB will not contain the original DC that the backup
+ was taken from (or any other DCs in the original domain). Only the new DC
+ (specified by --newservername) will be present in the restored DB.
+
+ Samba can then be started against the restored DB. Any existing DCs for the
+ domain should be shutdown before the new DC is started. Other DCs can then
+ be joined to the new DC to recover the network.
+
+ Note that this command should be run as the root user - it will fail
+ otherwise."""
+
+ synopsis = ("%prog --backup-file=<tar-file> --targetdir=<output-dir> "
+ "--newservername=<DC-name>")
+ takes_options = [
+ Option("--backup-file", help="Path to backup file", type=str),
+ Option("--targetdir", help="Path to write to", type=str),
+ Option("--newservername", help="Name for new server", type=str),
+ Option("--host-ip", type="string", metavar="IPADDRESS",
+ help="set IPv4 ipaddress"),
+ Option("--host-ip6", type="string", metavar="IP6ADDRESS",
+ help="set IPv6 ipaddress"),
+ Option("--site", help="Site to add the new server in", type=str),
+ ]
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ def register_dns_zone(self, logger, samdb, lp, ntdsguid, host_ip,
+ host_ip6, site):
+ """
+ Registers the new realm's DNS objects when a renamed domain backup
+ is restored.
+ """
+ names = guess_names(lp)
+ domaindn = names.domaindn
+ forestdn = samdb.get_root_basedn().get_linearized()
+ dnsdomain = names.dnsdomain.lower()
+ dnsforest = dnsdomain
+ hostname = names.netbiosname.lower()
+ domainsid = dom_sid(samdb.get_domain_sid())
+ dnsadmins_sid = get_dnsadmins_sid(samdb, domaindn)
+ domainguid = get_domainguid(samdb, domaindn)
+
+ # work out the IP address to use for the new DC's DNS records
+ host_ip = determine_host_ip(logger, lp, host_ip)
+ host_ip6 = determine_host_ip6(logger, lp, host_ip6)
+
+ if host_ip is None and host_ip6 is None:
+ raise CommandError('Please specify a host-ip for the new server')
+
+ logger.info("DNS realm was renamed to %s" % dnsdomain)
+ logger.info("Populating DNS partitions for new realm...")
+
+ # Add the DNS objects for the new realm (note: the backup clone already
+ # has the root server objects, so don't add them again)
+ fill_dns_data_partitions(samdb, domainsid, site, domaindn,
+ forestdn, dnsdomain, dnsforest, hostname,
+ host_ip, host_ip6, domainguid, ntdsguid,
+ dnsadmins_sid, add_root=False)
+
+ def fix_old_dc_references(self, samdb):
+ """Fixes attributes that reference the old/removed DCs"""
+
+ # we just want to fix up DB problems here that were introduced by us
+ # removing the old DCs. We restrict what we fix up so that the restored
+ # DB matches the backed-up DB as close as possible. (There may be other
+ # DB issues inherited from the backed-up DC, but it's not our place to
+ # silently try to fix them here).
+ samdb.transaction_start()
+ chk = dbcheck(samdb, quiet=True, fix=True, yes=False,
+ in_transaction=True)
+
+ # fix up stale references to the old DC
+ setattr(chk, 'fix_all_old_dn_string_component_mismatch', 'ALL')
+ attrs = ['lastKnownParent', 'interSiteTopologyGenerator']
+
+ # fix-up stale one-way links that point to the old DC
+ setattr(chk, 'remove_plausible_deleted_DN_links', 'ALL')
+ attrs += ['msDS-NC-Replica-Locations']
+
+ cross_ncs_ctrl = 'search_options:1:2'
+ controls = ['show_deleted:1', cross_ncs_ctrl]
+ chk.check_database(controls=controls, attrs=attrs)
+ samdb.transaction_commit()
+
+ def create_default_site(self, samdb, logger):
+ """Creates the default site, if it doesn't already exist"""
+
+ sitename = DEFAULTSITE
+ search_expr = "(&(cn={0})(objectclass=site))".format(sitename)
+ res = samdb.search(samdb.get_config_basedn(), scope=ldb.SCOPE_SUBTREE,
+ expression=search_expr)
+
+ if len(res) == 0:
+ logger.info("Creating default site '{0}'".format(sitename))
+ sites.create_site(samdb, samdb.get_config_basedn(), sitename)
+
+ return sitename
+
+ def remove_backup_markers(self, samdb):
+ """Remove DB markers added by the backup process"""
+
+ # check what markers we need to remove (this may vary)
+ markers = ['sidForRestore', 'backupRename', 'backupDate', 'backupType']
+ res = samdb.search(base=ldb.Dn(samdb, "@SAMBA_DSDB"),
+ scope=ldb.SCOPE_BASE,
+ attrs=markers)
+
+ # remove any markers that exist in the DB
+ m = ldb.Message()
+ m.dn = ldb.Dn(samdb, "@SAMBA_DSDB")
+
+ for attr in markers:
+ if attr in res[0]:
+ m[attr] = ldb.MessageElement([], ldb.FLAG_MOD_DELETE, attr)
+
+ samdb.modify(m)
+
+ def get_backup_type(self, samdb):
+ res = samdb.search(base=ldb.Dn(samdb, "@SAMBA_DSDB"),
+ scope=ldb.SCOPE_BASE,
+ attrs=['backupRename', 'backupType'])
+
+ # note that the backupType marker won't exist on backups created on
+ # v4.9. However, we can still infer the type, as only rename and
+ # online backups are supported on v4.9
+ if 'backupType' in res[0]:
+ backup_type = str(res[0]['backupType'])
+ elif 'backupRename' in res[0]:
+ backup_type = "rename"
+ else:
+ backup_type = "online"
+
+ return backup_type
+
+ def save_uptodate_vectors(self, samdb, partitions):
+ """Ensures the UTDV used by DRS is correct after an offline backup"""
+ for nc in partitions:
+ # load the replUpToDateVector we *should* have
+ utdv = _dsdb_load_udv_v2(samdb, nc)
+
+ # convert it to NDR format and write it into the DB
+ utdv_blob = drsblobs.replUpToDateVectorBlob()
+ utdv_blob.version = 2
+ utdv_blob.ctr.cursors = utdv
+ utdv_blob.ctr.count = len(utdv)
+ new_value = ndr_pack(utdv_blob)
+
+ m = ldb.Message()
+ m.dn = ldb.Dn(samdb, nc)
+ m["replUpToDateVector"] = ldb.MessageElement(new_value,
+ ldb.FLAG_MOD_REPLACE,
+ "replUpToDateVector")
+ samdb.modify(m)
+
+ def run(self, sambaopts=None, credopts=None, backup_file=None,
+ targetdir=None, newservername=None, host_ip=None, host_ip6=None,
+ site=None):
+ if not (backup_file and os.path.exists(backup_file)):
+ raise CommandError('Backup file not found.')
+ if targetdir is None:
+ raise CommandError('Please specify a target directory')
+ # allow restoredc to install into a directory prepopulated by selftest
+ if (os.path.exists(targetdir) and os.listdir(targetdir) and
+ os.environ.get('SAMBA_SELFTEST') != '1'):
+ raise CommandError('Target directory is not empty')
+ if not newservername:
+ raise CommandError('Server name required')
+
+ logger = logging.getLogger()
+ logger.setLevel(logging.DEBUG)
+ logger.addHandler(logging.StreamHandler(sys.stdout))
+
+ # ldapcmp prefers the server's netBIOS name in upper-case
+ newservername = newservername.upper()
+
+ # extract the backup .tar to a temp directory
+ targetdir = os.path.abspath(targetdir)
+ tf = tarfile.open(backup_file)
+ tf.extractall(targetdir)
+ tf.close()
+
+ # use the smb.conf that got backed up, by default (save what was
+ # actually backed up, before we mess with it)
+ smbconf = os.path.join(targetdir, 'etc', 'smb.conf')
+ shutil.copyfile(smbconf, smbconf + ".orig")
+
+ # if a smb.conf was specified on the cmd line, then use that instead
+ cli_smbconf = sambaopts.get_loadparm_path()
+ if cli_smbconf:
+ logger.info("Using %s as restored domain's smb.conf" % cli_smbconf)
+ shutil.copyfile(cli_smbconf, smbconf)
+
+ lp = samba.param.LoadParm()
+ lp.load(smbconf)
+
+ # open a DB connection to the restored DB
+ private_dir = os.path.join(targetdir, 'private')
+ samdb_path = os.path.join(private_dir, 'sam.ldb')
+ samdb = SamDB(url=samdb_path, session_info=system_session(), lp=lp,
+ flags=ldb.FLG_DONT_CREATE_DB)
+ backup_type = self.get_backup_type(samdb)
+
+ if site is None:
+ # There's no great way to work out the correct site to add the
+ # restored DC to. By default, add it to Default-First-Site-Name,
+ # creating the site if it doesn't already exist
+ site = self.create_default_site(samdb, logger)
+ logger.info("Adding new DC to site '{0}'".format(site))
+
+ # read the naming contexts out of the DB
+ res = samdb.search(base="", scope=ldb.SCOPE_BASE,
+ attrs=['namingContexts'])
+ ncs = [str(r) for r in res[0].get('namingContexts')]
+
+ # for offline backups we need to make sure the upToDateness info
+ # contains the invocation-ID and highest-USN of the DC we backed up.
+ # Otherwise replication propagation dampening won't correctly filter
+ # objects created by that DC
+ if backup_type == "offline":
+ self.save_uptodate_vectors(samdb, ncs)
+
+ # Create account using the join_add_objects function in the join object
+ # We need namingContexts, account control flags, and the sid saved by
+ # the backup process.
+ creds = credopts.get_credentials(lp)
+ ctx = DCJoinContext(logger, creds=creds, lp=lp, site=site,
+ forced_local_samdb=samdb,
+ netbios_name=newservername)
+ ctx.nc_list = ncs
+ ctx.full_nc_list = ncs
+ ctx.userAccountControl = (samba.dsdb.UF_SERVER_TRUST_ACCOUNT |
+ samba.dsdb.UF_TRUSTED_FOR_DELEGATION)
+
+ # rewrite the smb.conf to make sure it uses the new targetdir settings.
+ # (This doesn't update all filepaths in a customized config, but it
+ # corrects the same paths that get set by a new provision)
+ logger.info('Updating basic smb.conf settings...')
+ make_smbconf(smbconf, newservername, ctx.domain_name,
+ ctx.realm, targetdir, lp=lp,
+ serverrole="active directory domain controller")
+
+ # Get the SID saved by the backup process and create account
+ res = samdb.search(base=ldb.Dn(samdb, "@SAMBA_DSDB"),
+ scope=ldb.SCOPE_BASE,
+ attrs=['sidForRestore'])
+ sid = res[0].get('sidForRestore')[0]
+ logger.info('Creating account with SID: ' + str(sid))
+ try:
+ ctx.join_add_objects(specified_sid=dom_sid(str(sid)))
+ except LdbError as e:
+ (enum, emsg) = e.args
+ if enum != ldb.ERR_CONSTRAINT_VIOLATION:
+ raise
+
+ dup_res = []
+ try:
+ dup_res = samdb.search(base=ldb.Dn(samdb, "<SID=%s>" % sid),
+ scope=ldb.SCOPE_BASE,
+ attrs=['objectGUID'],
+ controls=["show_deleted:0",
+ "show_recycled:0"])
+ except LdbError as dup_e:
+ (dup_enum, _) = dup_e.args
+ if dup_enum != ldb.ERR_NO_SUCH_OBJECT:
+ raise
+
+ if (len(dup_res) != 1):
+ raise
+
+ objectguid = samdb.schema_format_value("objectGUID",
+ dup_res[0]["objectGUID"][0])
+ objectguid = objectguid.decode('utf-8')
+ logger.error("The RID Pool on the source DC for the backup in %s "
+ "may be corrupt "
+ "or in conflict with SIDs already allocated "
+ "in the domain. " % backup_file)
+ logger.error("Running 'samba-tool dbcheck' on the source "
+ "DC (and obtaining a new backup) may correct the issue.")
+ logger.error("Alternatively please obtain a new backup "
+ "against a different DC.")
+ logger.error("The SID we wish to use (%s) is recorded in "
+ "@SAMBA_DSDB as the sidForRestore attribute."
+ % sid)
+
+ raise CommandError("Domain restore failed because there "
+ "is already an existing object (%s) "
+ "with SID %s and objectGUID %s. "
+ "This conflicts with "
+ "the new DC account we want to add "
+ "for the restored domain. " % (
+ dup_res[0].dn, sid, objectguid))
+
+ m = ldb.Message()
+ m.dn = ldb.Dn(samdb, '@ROOTDSE')
+ ntds_guid = str(ctx.ntds_guid)
+ m["dsServiceName"] = ldb.MessageElement("<GUID=%s>" % ntds_guid,
+ ldb.FLAG_MOD_REPLACE,
+ "dsServiceName")
+ samdb.modify(m)
+
+ # if we renamed the backed-up domain, then we need to add the DNS
+ # objects for the new realm (we do this in the restore, now that we
+ # know the new DC's IP address)
+ if backup_type == "rename":
+ self.register_dns_zone(logger, samdb, lp, ctx.ntds_guid,
+ host_ip, host_ip6, site)
+
+ secrets_path = os.path.join(private_dir, 'secrets.ldb')
+ secrets_ldb = Ldb(secrets_path, session_info=system_session(), lp=lp,
+ flags=ldb.FLG_DONT_CREATE_DB)
+ secretsdb_self_join(secrets_ldb, domain=ctx.domain_name,
+ realm=ctx.realm, dnsdomain=ctx.dnsdomain,
+ netbiosname=ctx.myname, domainsid=ctx.domsid,
+ machinepass=ctx.acct_pass,
+ key_version_number=ctx.key_version_number,
+ secure_channel_type=misc.SEC_CHAN_BDC)
+
+ # Seize DNS roles
+ domain_dn = samdb.domain_dn()
+ forest_dn = samba.dn_from_dns_name(samdb.forest_dns_name())
+ dns_roles = [("domaindns", domain_dn),
+ ("forestdns", forest_dn)]
+ for role, dn in dns_roles:
+ if dn in ncs:
+ self.seize_dns_role(role, samdb, None, None, None, force=True)
+
+ # Seize other roles
+ for role in ['rid', 'pdc', 'naming', 'infrastructure', 'schema']:
+ self.seize_role(role, samdb, force=True)
+
+ # Get all DCs and remove them (this ensures these DCs cannot
+ # replicate because they will not have a password)
+ search_expr = "(&(objectClass=Server)(serverReference=*))"
+ res = samdb.search(samdb.get_config_basedn(), scope=ldb.SCOPE_SUBTREE,
+ expression=search_expr)
+ for m in res:
+ cn = str(m.get('cn')[0])
+ if cn != newservername:
+ remove_dc(samdb, logger, cn)
+
+ # Remove the repsFrom and repsTo from each NC to ensure we do
+ # not try (and fail) to talk to the old DCs
+ for nc in ncs:
+ msg = ldb.Message()
+ msg.dn = ldb.Dn(samdb, nc)
+
+ msg["repsFrom"] = ldb.MessageElement([],
+ ldb.FLAG_MOD_REPLACE,
+ "repsFrom")
+ msg["repsTo"] = ldb.MessageElement([],
+ ldb.FLAG_MOD_REPLACE,
+ "repsTo")
+ samdb.modify(msg)
+
+ # Update the krbtgt passwords twice, ensuring no tickets from
+ # the old domain are valid
+ update_krbtgt_account_password(samdb)
+ update_krbtgt_account_password(samdb)
+
+ # restore the sysvol directory from the backup tar file, including the
+ # original NTACLs. Note that the backup_restore() will fail if not root
+ sysvol_tar = os.path.join(targetdir, 'sysvol.tar.gz')
+ dest_sysvol_dir = lp.get('path', 'sysvol')
+ if not os.path.exists(dest_sysvol_dir):
+ os.makedirs(dest_sysvol_dir)
+ backup_restore(sysvol_tar, dest_sysvol_dir, samdb, smbconf)
+ os.remove(sysvol_tar)
+
+ # fix up any stale links to the old DCs we just removed
+ logger.info("Fixing up any remaining references to the old DCs...")
+ self.fix_old_dc_references(samdb)
+
+ # Remove DB markers added by the backup process
+ self.remove_backup_markers(samdb)
+
+ logger.info("Backup file successfully restored to %s" % targetdir)
+ logger.info("Please check the smb.conf settings are correct before "
+ "starting samba.")
+
+
+class cmd_domain_backup_rename(samba.netcmd.Command):
+ """Copy a running DC's DB to backup file, renaming the domain in the process.
+
+ Where <new-domain> is the new domain's NetBIOS name, and <new-dnsrealm> is
+ the new domain's realm in DNS form.
+
+ This is similar to 'samba-tool backup online' in that it clones the DB of a
+ running DC. However, this option also renames all the domain entries in the
+ DB. Renaming the domain makes it possible to restore and start a new Samba
+ DC without it interfering with the existing Samba domain. In other words,
+ you could use this option to clone your production samba domain and restore
+ it to a separate pre-production environment that won't overlap or interfere
+ with the existing production Samba domain.
+
+ Note that:
+ - it's recommended to run 'samba-tool dbcheck' before taking a backup-file
+ and fix any errors it reports.
+ - all the domain's secrets are included in the backup file.
+ - although the DB contents can be untarred and examined manually, you need
+ to run 'samba-tool domain backup restore' before you can start a Samba DC
+ from the backup file.
+ - GPO and sysvol information will still refer to the old realm and will
+ need to be updated manually.
+ - if you specify 'keep-dns-realm', then the DNS records will need updating
+ in order to work (they will still refer to the old DC's IP instead of the
+ new DC's address).
+ - we recommend that you only use this option if you know what you're doing.
+ """
+
+ synopsis = ("%prog <new-domain> <new-dnsrealm> --server=<DC-to-backup> "
+ "--targetdir=<output-dir>")
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("--server", help="The DC to backup", type=str),
+ Option("--targetdir", help="Directory to write the backup file",
+ type=str),
+ Option("--keep-dns-realm", action="store_true", default=False,
+ help="Retain the DNS entries for the old realm in the backup"),
+ Option("--no-secrets", action="store_true", default=False,
+ help="Exclude secret values from the backup created"),
+ Option("--backend-store", type="choice", metavar="BACKENDSTORE",
+ choices=["tdb", "mdb"],
+ help="Specify the database backend to be used "
+ "(default is %s)" % get_default_backend_store()),
+ ]
+
+ takes_args = ["new_domain_name", "new_dns_realm"]
+
+ def update_dns_root(self, logger, samdb, old_realm, delete_old_dns):
+ """Updates dnsRoot for the partition objects to reflect the rename"""
+
+ # lookup the crossRef objects that hold the old realm's dnsRoot
+ partitions_dn = samdb.get_partitions_dn()
+ res = samdb.search(base=partitions_dn, scope=ldb.SCOPE_ONELEVEL,
+ attrs=["dnsRoot"],
+ expression='(&(objectClass=crossRef)(dnsRoot=*))')
+ new_realm = samdb.domain_dns_name()
+
+ # go through and add the new realm
+ for res_msg in res:
+ # dnsRoot can be multi-valued, so only look for the old realm
+ for dns_root in res_msg["dnsRoot"]:
+ dns_root = str(dns_root)
+ dn = res_msg.dn
+ if old_realm in dns_root:
+ new_dns_root = re.sub('%s$' % old_realm, new_realm,
+ dns_root)
+ logger.info("Adding %s dnsRoot to %s" % (new_dns_root, dn))
+
+ m = ldb.Message()
+ m.dn = dn
+ m["dnsRoot"] = ldb.MessageElement(new_dns_root,
+ ldb.FLAG_MOD_ADD,
+ "dnsRoot")
+ samdb.modify(m)
+
+ # optionally remove the dnsRoot for the old realm
+ if delete_old_dns:
+ logger.info("Removing %s dnsRoot from %s" % (dns_root,
+ dn))
+ m["dnsRoot"] = ldb.MessageElement(dns_root,
+ ldb.FLAG_MOD_DELETE,
+ "dnsRoot")
+ samdb.modify(m)
+
+ # Updates the CN=<domain>,CN=Partitions,CN=Configuration,... object to
+ # reflect the domain rename
+ def rename_domain_partition(self, logger, samdb, new_netbios_name):
+ """Renames the domain partition object and updates its nETBIOSName"""
+
+ # lookup the crossRef object that holds the nETBIOSName (nCName has
+ # already been updated by this point, but the netBIOS hasn't)
+ base_dn = samdb.get_default_basedn()
+ nc_name = ldb.binary_encode(str(base_dn))
+ partitions_dn = samdb.get_partitions_dn()
+ res = samdb.search(base=partitions_dn, scope=ldb.SCOPE_ONELEVEL,
+ attrs=["nETBIOSName"],
+ expression='ncName=%s' % nc_name)
+
+ logger.info("Changing backup domain's NetBIOS name to %s" %
+ new_netbios_name)
+ m = ldb.Message()
+ m.dn = res[0].dn
+ m["nETBIOSName"] = ldb.MessageElement(new_netbios_name,
+ ldb.FLAG_MOD_REPLACE,
+ "nETBIOSName")
+ samdb.modify(m)
+
+ # renames the object itself to reflect the change in domain
+ new_dn = "CN=%s,%s" % (new_netbios_name, partitions_dn)
+ logger.info("Renaming %s --> %s" % (res[0].dn, new_dn))
+ samdb.rename(res[0].dn, new_dn, controls=['relax:0'])
+
+ def delete_old_dns_zones(self, logger, samdb, old_realm):
+ # remove the top-level DNS entries for the old realm
+ basedn = samdb.get_default_basedn()
+ dn = "DC=%s,CN=MicrosoftDNS,DC=DomainDnsZones,%s" % (old_realm, basedn)
+ logger.info("Deleting old DNS zone %s" % dn)
+ samdb.delete(dn, ["tree_delete:1"])
+
+ forestdn = samdb.get_root_basedn().get_linearized()
+ dn = "DC=_msdcs.%s,CN=MicrosoftDNS,DC=ForestDnsZones,%s" % (old_realm,
+ forestdn)
+ logger.info("Deleting old DNS zone %s" % dn)
+ samdb.delete(dn, ["tree_delete:1"])
+
+ def fix_old_dn_attributes(self, samdb):
+ """Fixes attributes (i.e. objectCategory) that still use the old DN"""
+
+ samdb.transaction_start()
+ # Just fix any mismatches in DN detected (leave any other errors)
+ chk = dbcheck(samdb, quiet=True, fix=True, yes=False,
+ in_transaction=True)
+ # fix up incorrect objectCategory/etc attributes
+ setattr(chk, 'fix_all_old_dn_string_component_mismatch', 'ALL')
+ cross_ncs_ctrl = 'search_options:1:2'
+ controls = ['show_deleted:1', cross_ncs_ctrl]
+ chk.check_database(controls=controls)
+ samdb.transaction_commit()
+
+ def run(self, new_domain_name, new_dns_realm, sambaopts=None,
+ credopts=None, server=None, targetdir=None, keep_dns_realm=False,
+ no_secrets=False, backend_store=None):
+ logger = self.get_logger()
+ logger.setLevel(logging.INFO)
+
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+
+ # Make sure we have all the required args.
+ if server is None:
+ raise CommandError('Server required')
+
+ check_targetdir(logger, targetdir)
+
+ delete_old_dns = not keep_dns_realm
+
+ new_dns_realm = new_dns_realm.lower()
+ new_domain_name = new_domain_name.upper()
+
+ new_base_dn = samba.dn_from_dns_name(new_dns_realm)
+ logger.info("New realm for backed up domain: %s" % new_dns_realm)
+ logger.info("New base DN for backed up domain: %s" % new_base_dn)
+ logger.info("New domain NetBIOS name: %s" % new_domain_name)
+
+ tmpdir = tempfile.mkdtemp(dir=targetdir)
+
+ # setup a join-context for cloning the remote server
+ include_secrets = not no_secrets
+ ctx = DCCloneAndRenameContext(new_base_dn, new_domain_name,
+ new_dns_realm, logger=logger,
+ creds=creds, lp=lp,
+ include_secrets=include_secrets,
+ dns_backend='SAMBA_INTERNAL',
+ server=server, targetdir=tmpdir,
+ backend_store=backend_store)
+
+ # sanity-check we're not "renaming" the domain to the same values
+ old_domain = ctx.domain_name
+ if old_domain == new_domain_name:
+ shutil.rmtree(tmpdir)
+ raise CommandError("Cannot use the current domain NetBIOS name.")
+
+ old_realm = ctx.realm
+ if old_realm == new_dns_realm:
+ shutil.rmtree(tmpdir)
+ raise CommandError("Cannot use the current domain DNS realm.")
+
+ # do the clone/rename
+ ctx.do_join()
+
+ # get the paths used for the clone, then drop the old samdb connection
+ del ctx.local_samdb
+ paths = ctx.paths
+
+ # get a free RID to use as the new DC's SID (when it gets restored)
+ remote_sam = SamDB(url='ldap://' + server, credentials=creds,
+ session_info=system_session(), lp=lp)
+ new_sid = get_sid_for_restore(remote_sam, logger)
+
+ # Grab the remote DC's sysvol files and bundle them into a tar file.
+ # Note we end up with 2 sysvol dirs - the original domain's files (that
+ # use the old realm) backed here, as well as default files generated
+ # for the new realm as part of the clone/join.
+ sysvol_tar = os.path.join(tmpdir, 'sysvol.tar.gz')
+ smb_conn = smb_sysvol_conn(server, lp, creds)
+ backup_online(smb_conn, sysvol_tar, remote_sam.get_domain_sid())
+
+ # connect to the local DB (making sure we use the new/renamed config)
+ lp.load(paths.smbconf)
+ samdb = SamDB(url=paths.samdb, session_info=system_session(), lp=lp,
+ flags=ldb.FLG_DONT_CREATE_DB)
+
+ # Edit the cloned sam.ldb to mark it as a backup
+ time_str = get_timestamp()
+ add_backup_marker(samdb, "backupDate", time_str)
+ add_backup_marker(samdb, "sidForRestore", new_sid)
+ add_backup_marker(samdb, "backupRename", old_realm)
+ add_backup_marker(samdb, "backupType", "rename")
+
+ # fix up the DNS objects that are using the old dnsRoot value
+ self.update_dns_root(logger, samdb, old_realm, delete_old_dns)
+
+ # update the netBIOS name and the Partition object for the domain
+ self.rename_domain_partition(logger, samdb, new_domain_name)
+
+ if delete_old_dns:
+ self.delete_old_dns_zones(logger, samdb, old_realm)
+
+ logger.info("Fixing DN attributes after rename...")
+ self.fix_old_dn_attributes(samdb)
+
+ # ensure the admin user always has a password set (same as provision)
+ if no_secrets:
+ set_admin_password(logger, samdb)
+
+ # Add everything in the tmpdir to the backup tar file
+ backup_file = backup_filepath(targetdir, new_dns_realm, time_str)
+ create_log_file(tmpdir, lp, "rename", server, include_secrets,
+ "Original domain %s (NetBIOS), %s (DNS realm)" %
+ (old_domain, old_realm))
+ create_backup_tar(logger, tmpdir, backup_file)
+
+ shutil.rmtree(tmpdir)
+
+
+class cmd_domain_backup_offline(samba.netcmd.Command):
+ """Backup the local domain directories safely into a tar file.
+
+ Takes a backup copy of the current domain from the local files on disk,
+ with proper locking of the DB to ensure consistency. If the domain were to
+ undergo a catastrophic failure, then the backup file can be used to recover
+ the domain.
+
+ An offline backup differs to an online backup in the following ways:
+ - a backup can be created even if the DC isn't currently running.
+ - includes non-replicated attributes that an online backup wouldn't store.
+ - takes a copy of the raw database files, which has the risk that any
+ hidden problems in the DB are preserved in the backup."""
+
+ synopsis = "%prog [options]"
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ }
+
+ takes_options = [
+ Option("--targetdir",
+ help="Output directory (required)",
+ type=str),
+ ]
+
+ backup_ext = '.bak-offline'
+
+ def offline_tdb_copy(self, path):
+ backup_path = path + self.backup_ext
+ try:
+ tdb_copy(path, backup_path, readonly=True)
+ except CalledProcessError as copy_err:
+ # If the copy didn't work, check if it was caused by an EINVAL
+ # error on opening the DB. If so, it's a mutex locked database,
+ # which we can safely ignore.
+ try:
+ tdb.open(path)
+ except Exception as e:
+ if hasattr(e, 'errno') and e.errno == errno.EINVAL:
+ return
+ raise e
+ raise copy_err
+
+ except FileNotFoundError as e:
+ # tdbbackup tool was not found.
+ raise CommandError(e.strerror, e)
+
+ if not os.path.exists(backup_path):
+ s = "tdbbackup said backup succeeded but {0} not found"
+ raise CommandError(s.format(backup_path))
+
+
+ def offline_mdb_copy(self, path):
+ mdb_copy(path, path + self.backup_ext)
+
+ # Secrets databases are a special case: a transaction must be started
+ # on the secrets.ldb file before backing up that file and secrets.tdb
+ def backup_secrets(self, private_dir, lp, logger):
+ secrets_path = os.path.join(private_dir, 'secrets')
+ secrets_obj = Ldb(secrets_path + '.ldb', lp=lp,
+ flags=ldb.FLG_DONT_CREATE_DB)
+ logger.info('Starting transaction on ' + secrets_path)
+ secrets_obj.transaction_start()
+ self.offline_tdb_copy(secrets_path + '.ldb')
+ self.offline_tdb_copy(secrets_path + '.tdb')
+ secrets_obj.transaction_cancel()
+
+ # sam.ldb must have a transaction started on it before backing up
+ # everything in sam.ldb.d with the appropriate backup function.
+ #
+ # Obtains the sidForRestore (SID for the new DC) and returns it
+ # from under the transaction
+ def backup_smb_dbs(self, private_dir, samdb, lp, logger):
+ sam_ldb_path = os.path.join(private_dir, 'sam.ldb')
+
+ # First, determine if DB backend is MDB. Assume not unless there is a
+ # 'backendStore' attribute on @PARTITION containing the text 'mdb'
+ store_label = "backendStore"
+ res = samdb.search(base="@PARTITION", scope=ldb.SCOPE_BASE,
+ attrs=[store_label])
+ mdb_backend = store_label in res[0] and str(res[0][store_label][0]) == 'mdb'
+
+ # This is needed to keep this variable in scope until the end
+ # of the transaction.
+ res_iterator = None
+
+ copy_function = None
+ if mdb_backend:
+ logger.info('MDB backend detected. Using mdb backup function.')
+ copy_function = self.offline_mdb_copy
+
+ # We can't backup with a write transaction open, so get a
+ # read lock with a search_iterator().
+ #
+ # We have tests in lib/ldb/tests/python/api.py that the
+ # search iterator takes a read lock effective against a
+ # transaction. This in turn will ensure there are no
+ # transactions on either the main or sub-database, even if
+ # the read locks were not enforced globally (they are).
+ res_iterator = samdb.search_iterator()
+ else:
+ logger.info('Starting transaction on ' + sam_ldb_path)
+ copy_function = self.offline_tdb_copy
+ samdb.transaction_start()
+
+ logger.info(' backing up ' + sam_ldb_path)
+ self.offline_tdb_copy(sam_ldb_path)
+ sam_ldb_d = sam_ldb_path + '.d'
+ for sam_file in os.listdir(sam_ldb_d):
+ sam_file = os.path.join(sam_ldb_d, sam_file)
+ if sam_file.endswith('.ldb'):
+ logger.info(' backing up locked/related file ' + sam_file)
+ copy_function(sam_file)
+ elif sam_file.endswith('.tdb'):
+ logger.info(' tdbbackup of locked/related file ' + sam_file)
+ self.offline_tdb_copy(sam_file)
+ else:
+ logger.info(' copying locked/related file ' + sam_file)
+ shutil.copyfile(sam_file, sam_file + self.backup_ext)
+
+ sid = get_sid_for_restore(samdb, logger)
+
+ if mdb_backend:
+ # Delete the iterator, release the read lock
+ del(res_iterator)
+ else:
+ samdb.transaction_cancel()
+
+ return sid
+
+ # Find where a path should go in the fixed backup archive structure.
+ def get_arc_path(self, path, conf_paths):
+ backup_dirs = {"private": conf_paths.private_dir,
+ "state": conf_paths.state_dir,
+ "etc": os.path.dirname(conf_paths.smbconf)}
+ matching_dirs = [(_, p) for (_, p) in backup_dirs.items() if
+ path.startswith(p)]
+ arc_path, fs_path = matching_dirs[0]
+
+ # If more than one directory is a parent of this path, then at least
+ # one configured path is a subdir of another. Use closest match.
+ if len(matching_dirs) > 1:
+ arc_path, fs_path = max(matching_dirs, key=lambda p: len(p[1]))
+ arc_path += path[len(fs_path):]
+
+ return arc_path
+
+ def run(self, sambaopts=None, targetdir=None):
+
+ logger = logging.getLogger()
+ logger.setLevel(logging.DEBUG)
+ logger.addHandler(logging.StreamHandler(sys.stdout))
+
+ # Get the absolute paths of all the directories we're going to backup
+ lp = sambaopts.get_loadparm()
+
+ paths = samba.provision.provision_paths_from_lp(lp, lp.get('realm'))
+ if not (paths.samdb and os.path.exists(paths.samdb)):
+ logger.error("No database found at {0}".format(paths.samdb))
+ raise CommandError('Please check you are root, and ' +
+ 'are running this command on an AD DC')
+
+ check_targetdir(logger, targetdir)
+
+ # Iterating over the directories in this specific order ensures that
+ # when the private directory contains hardlinks that are also contained
+ # in other directories to be backed up (such as in paths.binddns_dir),
+ # the hardlinks in the private directory take precedence.
+ backup_dirs = [paths.private_dir, paths.state_dir,
+ os.path.dirname(paths.smbconf)] # etc dir
+ logger.info('running backup on dirs: {0}'.format(' '.join(backup_dirs)))
+
+ # Recursively get all file paths in the backup directories
+ all_files = []
+ all_stats = set()
+ for backup_dir in backup_dirs:
+ for (working_dir, _, filenames) in os.walk(backup_dir):
+ if working_dir.startswith(paths.sysvol):
+ continue
+ if working_dir.endswith('.sock') or '.sock/' in working_dir:
+ continue
+ # The BIND DNS database can be regenerated, so it doesn't need
+ # to be backed up.
+ if working_dir.startswith(os.path.join(paths.binddns_dir, 'dns')):
+ continue
+
+ for filename in filenames:
+ full_path = os.path.join(working_dir, filename)
+
+ # Ignore files that have already been added. This prevents
+ # duplicates if one backup dir is a subdirectory of another,
+ # or if backup dirs contain hardlinks.
+ try:
+ s = os.stat(full_path, follow_symlinks=False)
+ except FileNotFoundError:
+ logger.warning(f"{full_path} does not exist!")
+ continue
+
+ if (s.st_ino, s.st_dev) in all_stats:
+ continue
+
+ # Assume existing backup files are from a previous backup.
+ # Delete and ignore.
+ if filename.endswith(self.backup_ext):
+ os.remove(full_path)
+ continue
+
+ # Sock files are autogenerated at runtime, ignore.
+ if filename.endswith('.sock'):
+ continue
+
+ all_files.append(full_path)
+ all_stats.add((s.st_ino, s.st_dev))
+
+ # We would prefer to open with FLG_RDONLY but then we can't
+ # start a transaction which is the strong isolation we want
+ # for the backup.
+ samdb = SamDB(url=paths.samdb, session_info=system_session(), lp=lp,
+ flags=ldb.FLG_DONT_CREATE_DB)
+
+ # Backup secrets, sam.ldb and their downstream files
+ self.backup_secrets(paths.private_dir, lp, logger)
+ sid = self.backup_smb_dbs(paths.private_dir, samdb, lp, logger)
+
+ # Get the domain SID so we can later place it in the backup
+ dom_sid_str = samdb.get_domain_sid()
+ dom_sid = security.dom_sid(dom_sid_str)
+
+ # Close the original samdb, to avoid any confusion, we will
+ # not use this any more as the data has all been copied under
+ # the transaction
+ samdb = None
+
+ # Open the new backed up samdb, flag it as backed up, and write
+ # the next SID so the restore tool can add objects. We use
+ # options=["modules:"] here to prevent any modules from loading.
+ # WARNING: Don't change this code unless you know what you're doing.
+ # Writing to a .bak file only works because the DN being
+ # written to happens to be top level.
+ samdb = Ldb(url=paths.samdb + self.backup_ext,
+ session_info=system_session(), lp=lp,
+ options=["modules:"], flags=ldb.FLG_DONT_CREATE_DB)
+ time_str = get_timestamp()
+ add_backup_marker(samdb, "backupDate", time_str)
+ add_backup_marker(samdb, "sidForRestore", sid)
+ add_backup_marker(samdb, "backupType", "offline")
+
+ # Close the backed up samdb
+ samdb = None
+
+ # Now handle all the LDB and TDB files that are not linked to
+ # anything else. Use transactions for LDBs.
+ for path in all_files:
+ if not os.path.exists(path + self.backup_ext):
+ if path.endswith('.ldb'):
+ logger.info('Starting transaction on solo db: ' + path)
+ ldb_obj = Ldb(path, lp=lp, flags=ldb.FLG_DONT_CREATE_DB)
+ ldb_obj.transaction_start()
+ logger.info(' running tdbbackup on the same file')
+ self.offline_tdb_copy(path)
+ ldb_obj.transaction_cancel()
+ elif path.endswith('.tdb'):
+ logger.info('running tdbbackup on lone tdb file ' + path)
+ self.offline_tdb_copy(path)
+
+ # Now make the backup tar file and add all
+ # backed up files and any other files to it.
+ temp_tar_dir = tempfile.mkdtemp(dir=targetdir,
+ prefix='INCOMPLETEsambabackupfile')
+ temp_tar_name = os.path.join(temp_tar_dir, "samba-backup.tar.bz2")
+ tar = tarfile.open(temp_tar_name, 'w:bz2')
+
+ logger.info('running offline ntacl backup of sysvol')
+ sysvol_tar_fn = 'sysvol.tar.gz'
+ sysvol_tar = os.path.join(temp_tar_dir, sysvol_tar_fn)
+ backup_offline(paths.sysvol, sysvol_tar, paths.smbconf, dom_sid)
+ tar.add(sysvol_tar, sysvol_tar_fn)
+ os.remove(sysvol_tar)
+
+ create_log_file(temp_tar_dir, lp, "offline", "localhost", True)
+ backup_fn = os.path.join(temp_tar_dir, "backup.txt")
+ tar.add(backup_fn, os.path.basename(backup_fn))
+ os.remove(backup_fn)
+
+ logger.info('building backup tar')
+ for path in all_files:
+ arc_path = self.get_arc_path(path, paths)
+
+ if os.path.exists(path + self.backup_ext):
+ logger.info(' adding backup ' + arc_path + self.backup_ext +
+ ' to tar and deleting file')
+ tar.add(path + self.backup_ext, arcname=arc_path)
+ os.remove(path + self.backup_ext)
+ elif path.endswith('.ldb') or path.endswith('.tdb'):
+ logger.info(' skipping ' + arc_path)
+ else:
+ logger.info(' adding misc file ' + arc_path)
+ tar.add(path, arcname=arc_path)
+
+ tar.close()
+ os.rename(temp_tar_name,
+ os.path.join(targetdir,
+ 'samba-backup-{0}.tar.bz2'.format(time_str)))
+ os.rmdir(temp_tar_dir)
+ logger.info('Backup succeeded.')
+
+
+class cmd_domain_backup(samba.netcmd.SuperCommand):
+ """Create or restore a backup of the domain."""
+ subcommands = {'offline': cmd_domain_backup_offline(),
+ 'online': cmd_domain_backup_online(),
+ 'rename': cmd_domain_backup_rename(),
+ 'restore': cmd_domain_backup_restore()}
diff --git a/python/samba/netcmd/domain/claim/__init__.py b/python/samba/netcmd/domain/claim/__init__.py
new file mode 100644
index 0000000..de7c4bb
--- /dev/null
+++ b/python/samba/netcmd/domain/claim/__init__.py
@@ -0,0 +1,35 @@
+# Unix SMB/CIFS implementation.
+#
+# claim management
+#
+# Copyright (C) Catalyst.Net Ltd. 2023
+#
+# Written by Rob van der Linde <rob@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from samba.netcmd import SuperCommand
+
+from .claim_type import cmd_domain_claim_claim_type
+from .value_type import cmd_domain_claim_value_type
+
+
+class cmd_domain_claim(SuperCommand):
+ """Manage claims on the domain."""
+
+ subcommands = {
+ "claim-type": cmd_domain_claim_claim_type(),
+ "value-type": cmd_domain_claim_value_type(),
+ }
diff --git a/python/samba/netcmd/domain/claim/claim_type.py b/python/samba/netcmd/domain/claim/claim_type.py
new file mode 100644
index 0000000..c0825c6
--- /dev/null
+++ b/python/samba/netcmd/domain/claim/claim_type.py
@@ -0,0 +1,361 @@
+# Unix SMB/CIFS implementation.
+#
+# claim type management
+#
+# Copyright (C) Catalyst.Net Ltd. 2023
+#
+# Written by Rob van der Linde <rob@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import binascii
+import os
+
+import samba.getopt as options
+from samba.netcmd import Command, CommandError, Option, SuperCommand
+from samba.netcmd.domain.models import AttributeSchema, ClassSchema,\
+ ClaimType, ValueType
+from samba.netcmd.domain.models.exceptions import ModelError
+
+
+class cmd_domain_claim_claim_type_create(Command):
+ """Create claim types on the domain."""
+
+ synopsis = "%prog -H <URL> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "hostopts": options.HostOptions,
+ }
+
+ takes_options = [
+ Option("--attribute", help="Attribute of claim type to create (required).",
+ dest="attribute_name", action="store", type=str, required=True),
+ Option("--class", help="Object classes to set claim type to.",
+ dest="class_names", action="append", type=str, required=True),
+ Option("--name", help="Optional display name or use attribute name.",
+ dest="name", action="store", type=str),
+ Option("--description",
+ help="Optional description or use from attribute.",
+ dest="description", action="store", type=str),
+ Option("--disable", help="Disable claim type.",
+ dest="disable", action="store_true"),
+ Option("--enable", help="Enable claim type.",
+ dest="enable", action="store_true"),
+ Option("--protect",
+ help="Protect claim type from accidental deletion.",
+ dest="protect", action="store_true"),
+ Option("--unprotect",
+ help="Unprotect claim type from accidental deletion.",
+ dest="unprotect", action="store_true")
+ ]
+
+ def run(self, hostopts=None, sambaopts=None, credopts=None, name=None,
+ attribute_name=None, class_names=None, description=None,
+ disable=None, enable=None, protect=None, unprotect=None):
+
+ # mutually exclusive attributes
+ if enable and disable:
+ raise CommandError("--enable and --disable cannot be used together.")
+ if protect and unprotect:
+ raise CommandError("--protect and --unprotect cannot be used together.")
+
+ ldb = self.ldb_connect(hostopts, sambaopts, credopts)
+
+ display_name = name or attribute_name
+ try:
+ claim_type = ClaimType.get(ldb, display_name=display_name)
+ except ModelError as e:
+ raise CommandError(e)
+
+ # Check if a claim type with this display name already exists.
+ # Note: you can register the same claim type under another display name.
+ if claim_type:
+ raise CommandError(f"Claim type {display_name} already exists, "
+ "but you can use --name to use another name.")
+
+ # Lookup attribute and class names in schema.
+ try:
+ applies_to = [ClassSchema.lookup(ldb, name) for name in class_names]
+ attribute = AttributeSchema.lookup(ldb, attribute_name)
+ value_type = ValueType.lookup(ldb, attribute)
+ except (LookupError, ModelError, ValueError) as e:
+ raise CommandError(e)
+
+ # Generate the new Claim Type cn.
+ # Windows creates a random number here containing 16 hex digits.
+ # We can achieve something similar using urandom(8)
+ instance = binascii.hexlify(os.urandom(8)).decode()
+ cn = f"ad://ext/{display_name}:{instance}"
+
+ # adminDescription should be present but still have a fallback.
+ if description is None:
+ description = attribute.admin_description or display_name
+
+ # claim_is_value_space_restricted is always False because we don't
+ # yet support creating claims with a restricted possible values list.
+ claim_type = ClaimType(
+ cn=cn,
+ description=description,
+ display_name=display_name,
+ enabled=not disable,
+ claim_attribute_source=attribute.dn,
+ claim_is_single_valued=attribute.is_single_valued,
+ claim_is_value_space_restricted=False,
+ claim_source_type="AD",
+ claim_type_applies_to_class=[obj.dn for obj in applies_to],
+ claim_value_type=value_type.claim_value_type,
+ )
+
+ # Either --enable will be set or --disable but never both.
+ # The default if both are missing is enabled=True.
+ if enable is not None:
+ claim_type.enabled = enable
+ else:
+ claim_type.enabled = not disable
+
+ # Create claim type
+ try:
+ claim_type.save(ldb)
+
+ if protect:
+ claim_type.protect(ldb)
+ except ModelError as e:
+ raise CommandError(e)
+
+ # Claim type created successfully.
+ self.outf.write(f"Created claim type: {display_name}")
+ if attribute_name != display_name:
+ self.outf.write(f" ({attribute_name})\n")
+ else:
+ self.outf.write("\n")
+
+
+class cmd_domain_claim_claim_type_modify(Command):
+ """Modify claim types on the domain."""
+
+ synopsis = "%prog -H <URL> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "hostopts": options.HostOptions,
+ }
+
+ takes_options = [
+ Option("--name", help="Display name of claim type to modify (required).",
+ dest="name", action="store", type=str, required=True),
+ Option("--class", help="Object classes to set claim type to.",
+ dest="class_names", action="append", type=str),
+ Option("--description", help="Set the claim type description.",
+ dest="description", action="store", type=str),
+ Option("--enable",
+ help="Enable claim type.",
+ dest="enable", action="store_true"),
+ Option("--disable",
+ help="Disable claim type.",
+ dest="disable", action="store_true"),
+ Option("--protect",
+ help="Protect claim type from accidental deletion.",
+ dest="protect", action="store_true"),
+ Option("--unprotect",
+ help="Unprotect claim type from accidental deletion.",
+ dest="unprotect", action="store_true")
+ ]
+
+ def run(self, hostopts=None, sambaopts=None, credopts=None, name=None,
+ class_names=None, description=None, enable=None, disable=None,
+ protect=None, unprotect=None):
+
+ if enable and disable:
+ raise CommandError("--enable and --disable cannot be used together.")
+ if protect and unprotect:
+ raise CommandError("--protect and --unprotect cannot be used together.")
+
+ ldb = self.ldb_connect(hostopts, sambaopts, credopts)
+
+ try:
+ claim_type = ClaimType.get(ldb, display_name=name)
+ except ModelError as e:
+ raise CommandError(e)
+
+ # Check if claim type exists.
+ if not claim_type:
+ raise CommandError(f"Claim type {name} not found.")
+
+ # Either --enable will be set or --disable but never both.
+ if enable:
+ claim_type.enabled = True
+ elif disable:
+ claim_type.enabled = False
+
+ # Update the description.
+ if description is not None:
+ claim_type.description = description
+
+ # Change class names for claim type.
+ if class_names is not None:
+ try:
+ applies_to = [ClassSchema.lookup(ldb, name)
+ for name in class_names]
+ except (LookupError, ValueError) as e:
+ raise CommandError(e)
+
+ claim_type.claim_type_applies_to_class = [obj.dn for obj in applies_to]
+
+ # Update claim type.
+ try:
+ claim_type.save(ldb)
+
+ if protect:
+ claim_type.protect(ldb)
+ elif unprotect:
+ claim_type.unprotect(ldb)
+ except ModelError as e:
+ raise CommandError(e)
+
+ # Claim type updated successfully.
+ self.outf.write(f"Updated claim type: {name}\n")
+
+
+class cmd_domain_claim_claim_type_delete(Command):
+ """Delete claim types on the domain."""
+
+ synopsis = "%prog -H <URL> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "hostopts": options.HostOptions,
+ }
+
+ takes_options = [
+ Option("--name", help="Display name of claim type to delete (required).",
+ dest="name", action="store", type=str, required=True),
+ Option("--force", help="Force claim type delete even if it is protected.",
+ dest="force", action="store_true")
+ ]
+
+ def run(self, hostopts=None, sambaopts=None, credopts=None,
+ name=None, force=None):
+
+ ldb = self.ldb_connect(hostopts, sambaopts, credopts)
+
+ try:
+ claim_type = ClaimType.get(ldb, display_name=name)
+ except ModelError as e:
+ raise CommandError(e)
+
+ # Check if claim type exists first.
+ if claim_type is None:
+ raise CommandError(f"Claim type {name} not found.")
+
+ # Delete claim type.
+ try:
+ if force:
+ claim_type.unprotect(ldb)
+
+ claim_type.delete(ldb)
+ except ModelError as e:
+ if not force:
+ raise CommandError(
+ f"{e}\nTry --force to delete protected claim types.")
+ else:
+ raise CommandError(e)
+
+ # Claim type deleted successfully.
+ self.outf.write(f"Deleted claim type: {name}\n")
+
+
+class cmd_domain_claim_claim_type_list(Command):
+ """List claim types on the domain."""
+
+ synopsis = "%prog -H <URL> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "hostopts": options.HostOptions,
+ }
+
+ takes_options = [
+ Option("--json", help="Output results in JSON format.",
+ dest="output_format", action="store_const", const="json"),
+ ]
+
+ def run(self, hostopts=None, sambaopts=None, credopts=None,
+ output_format=None):
+
+ ldb = self.ldb_connect(hostopts, sambaopts, credopts)
+
+ # Claim types grouped by displayName.
+ try:
+ claim_types = {claim_type.display_name: claim_type.as_dict()
+ for claim_type in ClaimType.query(ldb)}
+ except ModelError as e:
+ raise CommandError(e)
+
+ # Using json output format gives more detail.
+ if output_format == "json":
+ self.print_json(claim_types)
+ else:
+ for claim_type in claim_types.keys():
+ self.outf.write(f"{claim_type}\n")
+
+
+class cmd_domain_claim_claim_type_view(Command):
+ """View a single claim type on the domain."""
+
+ synopsis = "%prog -H <URL> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "hostopts": options.HostOptions,
+ }
+
+ takes_options = [
+ Option("--name", help="Display name of claim type to view (required).",
+ dest="name", action="store", type=str, required=True),
+ ]
+
+ def run(self, hostopts=None, sambaopts=None, credopts=None, name=None):
+
+ ldb = self.ldb_connect(hostopts, sambaopts, credopts)
+
+ try:
+ claim_type = ClaimType.get(ldb, display_name=name)
+ except ModelError as e:
+ raise CommandError(e)
+
+ # Check if claim type exists first.
+ if claim_type is None:
+ raise CommandError(f"Claim type {name} not found.")
+
+ # Display claim type as JSON.
+ self.print_json(claim_type.as_dict())
+
+
+class cmd_domain_claim_claim_type(SuperCommand):
+ """Manage claim types on the domain."""
+
+ subcommands = {
+ "create": cmd_domain_claim_claim_type_create(),
+ "delete": cmd_domain_claim_claim_type_delete(),
+ "modify": cmd_domain_claim_claim_type_modify(),
+ "list": cmd_domain_claim_claim_type_list(),
+ "view": cmd_domain_claim_claim_type_view(),
+ }
diff --git a/python/samba/netcmd/domain/claim/value_type.py b/python/samba/netcmd/domain/claim/value_type.py
new file mode 100644
index 0000000..a261113
--- /dev/null
+++ b/python/samba/netcmd/domain/claim/value_type.py
@@ -0,0 +1,105 @@
+# Unix SMB/CIFS implementation.
+#
+# claim value type management
+#
+# Copyright (C) Catalyst.Net Ltd. 2023
+#
+# Written by Rob van der Linde <rob@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import samba.getopt as options
+from samba.netcmd import Command, CommandError, Option, SuperCommand
+from samba.netcmd.domain.models import ValueType
+from samba.netcmd.domain.models.exceptions import ModelError
+
+
+class cmd_domain_claim_value_type_list(Command):
+ """List claim values types on the domain."""
+
+ synopsis = "%prog -H <URL> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "hostopts": options.HostOptions,
+ }
+
+ takes_options = [
+ Option("--json", help="Output results in JSON format.",
+ dest="output_format", action="store_const", const="json"),
+ ]
+
+ def run(self, hostopts=None, sambaopts=None, credopts=None,
+ output_format=None):
+
+ ldb = self.ldb_connect(hostopts, sambaopts, credopts)
+
+ # Value types grouped by display name.
+ try:
+ value_types = {value_type.display_name: value_type.as_dict()
+ for value_type in ValueType.query(ldb)}
+ except ModelError as e:
+ raise CommandError(e)
+
+ # Using json output format gives more detail.
+ if output_format == "json":
+ self.print_json(value_types)
+ else:
+ for value_type in value_types.keys():
+ self.outf.write(f"{value_type}\n")
+
+
+class cmd_domain_claim_value_type_view(Command):
+ """View a single claim value type on the domain."""
+
+ synopsis = "%prog -H <URL> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "hostopts": options.HostOptions,
+ }
+
+ takes_options = [
+ Option("--name",
+ help="Display name of claim value type to view (required).",
+ dest="name", action="store", type=str, required=True),
+ ]
+
+ def run(self, hostopts=None, sambaopts=None, credopts=None, name=None):
+
+ ldb = self.ldb_connect(hostopts, sambaopts, credopts)
+
+ try:
+ value_type = ValueType.get(ldb, display_name=name)
+ except ModelError as e:
+ raise CommandError(e)
+
+ # Check if value type exists first.
+ if value_type is None:
+ raise CommandError(f"Value type {name} not found.")
+
+ # Display vale type as JSON.
+ self.print_json(value_type.as_dict())
+
+
+class cmd_domain_claim_value_type(SuperCommand):
+ """Manage claim value types on the domain."""
+
+ subcommands = {
+ "list": cmd_domain_claim_value_type_list(),
+ "view": cmd_domain_claim_value_type_view(),
+ }
diff --git a/python/samba/netcmd/domain/classicupgrade.py b/python/samba/netcmd/domain/classicupgrade.py
new file mode 100644
index 0000000..5b6a8a8
--- /dev/null
+++ b/python/samba/netcmd/domain/classicupgrade.py
@@ -0,0 +1,189 @@
+# domain management - domain classicupgrade
+#
+# Copyright Matthias Dieter Wallnoefer 2009
+# Copyright Andrew Kroeger 2009
+# Copyright Jelmer Vernooij 2007-2012
+# Copyright Giampaolo Lauria 2011
+# Copyright Matthieu Patou <mat@matws.net> 2011
+# Copyright Andrew Bartlett 2008-2015
+# Copyright Stefan Metzmacher 2012
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import os
+import tempfile
+import subprocess
+
+import samba
+import samba.getopt as options
+from samba.auth import system_session
+from samba.auth_util import system_session_unix
+from samba.common import get_string
+from samba.netcmd import Command, CommandError, Option
+from samba.samba3 import Samba3
+from samba.samba3 import param as s3param
+from samba.upgrade import upgrade_from_samba3
+
+from .common import common_ntvfs_options
+
+
+def get_testparm_var(testparm, smbconf, varname):
+ errfile = open(os.devnull, 'w')
+ p = subprocess.Popen([testparm, '-s', '-l',
+ '--parameter-name=%s' % varname, smbconf],
+ stdout=subprocess.PIPE, stderr=errfile)
+ (out, err) = p.communicate()
+ errfile.close()
+ lines = out.split(b'\n')
+ if lines:
+ return get_string(lines[0]).strip()
+ return ""
+
+
+class cmd_domain_classicupgrade(Command):
+ """Upgrade from Samba classic (NT4-like) database to Samba AD DC database.
+
+ Specify either a directory with all Samba classic DC databases and state files (with --dbdir) or
+ the testparm utility from your classic installation (with --testparm).
+ """
+
+ synopsis = "%prog [options] <classic_smb_conf>"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions
+ }
+
+ takes_options = [
+ Option("--dbdir", type="string", metavar="DIR",
+ help="Path to samba classic DC database directory"),
+ Option("--testparm", type="string", metavar="PATH",
+ help="Path to samba classic DC testparm utility from the previous installation. This allows the default paths of the previous installation to be followed"),
+ Option("--targetdir", type="string", metavar="DIR",
+ help="Path prefix where the new Samba 4.0 AD domain should be initialised"),
+ Option("-q", "--quiet", help="Be quiet", action="store_true"),
+ Option("-v", "--verbose", help="Be verbose", action="store_true"),
+ Option("--dns-backend", type="choice", metavar="NAMESERVER-BACKEND",
+ choices=["SAMBA_INTERNAL", "BIND9_FLATFILE", "BIND9_DLZ", "NONE"],
+ help="The DNS server backend. SAMBA_INTERNAL is the builtin name server (default), "
+ "BIND9_FLATFILE uses bind9 text database to store zone information, "
+ "BIND9_DLZ uses samba4 AD to store zone information, "
+ "NONE skips the DNS setup entirely (this DC will not be a DNS server)",
+ default="SAMBA_INTERNAL")
+ ]
+
+ ntvfs_options = [
+ Option("--use-xattrs", type="choice", choices=["yes", "no", "auto"],
+ metavar="[yes|no|auto]",
+ help="Define if we should use the native fs capabilities or a tdb file for "
+ "storing attributes likes ntacl when --use-ntvfs is set. "
+ "auto tries to make an intelligent guess based on the user rights and system capabilities",
+ default="auto")
+ ]
+ if samba.is_ntvfs_fileserver_built():
+ takes_options.extend(common_ntvfs_options)
+ takes_options.extend(ntvfs_options)
+
+ takes_args = ["smbconf"]
+
+ def run(self, smbconf=None, targetdir=None, dbdir=None, testparm=None,
+ quiet=False, verbose=False, use_xattrs="auto", sambaopts=None, versionopts=None,
+ dns_backend=None, use_ntvfs=False):
+
+ if not os.path.exists(smbconf):
+ raise CommandError("File %s does not exist" % smbconf)
+
+ if testparm and not os.path.exists(testparm):
+ raise CommandError("Testparm utility %s does not exist" % testparm)
+
+ if dbdir and not os.path.exists(dbdir):
+ raise CommandError("Directory %s does not exist" % dbdir)
+
+ if not dbdir and not testparm:
+ raise CommandError("Please specify either dbdir or testparm")
+
+ logger = self.get_logger(verbose=verbose, quiet=quiet)
+
+ if dbdir and testparm:
+ logger.warning("both dbdir and testparm specified, ignoring dbdir.")
+ dbdir = None
+
+ lp = sambaopts.get_loadparm()
+
+ s3conf = s3param.get_context()
+
+ if sambaopts.realm:
+ s3conf.set("realm", sambaopts.realm)
+
+ if targetdir is not None:
+ if not os.path.isdir(targetdir):
+ os.mkdir(targetdir)
+
+ eadb = True
+ if use_xattrs == "yes":
+ eadb = False
+ elif use_xattrs == "auto" and not use_ntvfs:
+ eadb = False
+ elif not use_ntvfs:
+ raise CommandError("--use-xattrs=no requires --use-ntvfs (not supported for production use). "
+ "Please re-run with --use-xattrs omitted.")
+ elif use_xattrs == "auto" and not s3conf.get("posix:eadb"):
+ if targetdir:
+ tmpfile = tempfile.NamedTemporaryFile(dir=os.path.abspath(targetdir))
+ else:
+ tmpfile = tempfile.NamedTemporaryFile(dir=os.path.abspath(os.path.dirname(lp.get("private dir"))))
+ try:
+ try:
+ samba.ntacls.setntacl(lp, tmpfile.name,
+ "O:S-1-5-32G:S-1-5-32",
+ "S-1-5-32",
+ system_session_unix(),
+ "native")
+ eadb = False
+ except Exception:
+ # FIXME: Don't catch all exceptions here
+ logger.info("You are not root or your system does not support xattr, using tdb backend for attributes. "
+ "If you intend to use this provision in production, rerun the script as root on a system supporting xattrs.")
+ finally:
+ tmpfile.close()
+
+ # Set correct default values from dbdir or testparm
+ paths = {}
+ if dbdir:
+ paths["state directory"] = dbdir
+ paths["private dir"] = dbdir
+ paths["lock directory"] = dbdir
+ paths["smb passwd file"] = dbdir + "/smbpasswd"
+ else:
+ paths["state directory"] = get_testparm_var(testparm, smbconf, "state directory")
+ paths["private dir"] = get_testparm_var(testparm, smbconf, "private dir")
+ paths["smb passwd file"] = get_testparm_var(testparm, smbconf, "smb passwd file")
+ paths["lock directory"] = get_testparm_var(testparm, smbconf, "lock directory")
+ # "testparm" from Samba 3 < 3.4.x is not aware of the parameter
+ # "state directory", instead make use of "lock directory"
+ if len(paths["state directory"]) == 0:
+ paths["state directory"] = paths["lock directory"]
+
+ for p in paths:
+ s3conf.set(p, paths[p])
+
+ # load smb.conf parameters
+ logger.info("Reading smb.conf")
+ s3conf.load(smbconf)
+ samba3 = Samba3(smbconf, s3conf)
+
+ logger.info("Provisioning")
+ upgrade_from_samba3(samba3, logger, targetdir, session_info=system_session(),
+ useeadb=eadb, dns_backend=dns_backend, use_ntvfs=use_ntvfs)
diff --git a/python/samba/netcmd/domain/common.py b/python/samba/netcmd/domain/common.py
new file mode 100644
index 0000000..144d22b
--- /dev/null
+++ b/python/samba/netcmd/domain/common.py
@@ -0,0 +1,64 @@
+# domain management - common code
+#
+# Copyright Matthias Dieter Wallnoefer 2009
+# Copyright Andrew Kroeger 2009
+# Copyright Jelmer Vernooij 2007-2012
+# Copyright Giampaolo Lauria 2011
+# Copyright Matthieu Patou <mat@matws.net> 2011
+# Copyright Andrew Bartlett 2008-2015
+# Copyright Stefan Metzmacher 2012
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from samba.netcmd import Option
+from samba.samdb import get_default_backend_store
+
+common_ntvfs_options = [
+ Option("--use-ntvfs", help="Use NTVFS for the fileserver (default = no)",
+ action="store_true")
+]
+
+common_provision_join_options = [
+ Option("--machinepass", type="string", metavar="PASSWORD",
+ help="choose machine password (otherwise random)"),
+ Option("--plaintext-secrets", action="store_true",
+ help="Store secret/sensitive values as plain text on disk" +
+ "(default is to encrypt secret/sensitive values)"),
+ Option("--backend-store", type="choice", metavar="BACKENDSTORE",
+ choices=["tdb", "mdb"],
+ help="Specify the database backend to be used "
+ "(default is %s)" % get_default_backend_store()),
+ Option("--backend-store-size", type="bytes", metavar="SIZE",
+ help="Specify the size of the backend database, currently only " +
+ "supported by lmdb backends (default is 8 Gb)."),
+ Option("--targetdir", metavar="DIR",
+ help="Set target directory (where to store provision)", type=str),
+ Option("-q", "--quiet", help="Be quiet", action="store_true"),
+]
+
+common_join_options = [
+ Option("--server", help="DC to join", type=str),
+ Option("--site", help="site to join", type=str),
+ Option("--domain-critical-only",
+ help="only replicate critical domain objects",
+ action="store_true"),
+ Option("--dns-backend", type="choice", metavar="NAMESERVER-BACKEND",
+ choices=["SAMBA_INTERNAL", "BIND9_DLZ", "NONE"],
+ help="The DNS server backend. SAMBA_INTERNAL is the builtin name server (default), "
+ "BIND9_DLZ uses samba4 AD to store zone information, "
+ "NONE skips the DNS setup entirely (this DC will not be a DNS server)",
+ default="SAMBA_INTERNAL"),
+ Option("-v", "--verbose", help="Be verbose", action="store_true")
+]
diff --git a/python/samba/netcmd/domain/dcpromo.py b/python/samba/netcmd/domain/dcpromo.py
new file mode 100644
index 0000000..bf78b74
--- /dev/null
+++ b/python/samba/netcmd/domain/dcpromo.py
@@ -0,0 +1,90 @@
+# domain management - domain dcpromo
+#
+# Copyright Matthias Dieter Wallnoefer 2009
+# Copyright Andrew Kroeger 2009
+# Copyright Jelmer Vernooij 2007-2012
+# Copyright Giampaolo Lauria 2011
+# Copyright Matthieu Patou <mat@matws.net> 2011
+# Copyright Andrew Bartlett 2008-2015
+# Copyright Stefan Metzmacher 2012
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import samba
+import samba.getopt as options
+from samba.join import join_DC, join_RODC
+from samba.net import Net
+from samba.netcmd import Command, CommandError
+
+from .common import (common_join_options, common_ntvfs_options,
+ common_provision_join_options)
+
+
+class cmd_domain_dcpromo(Command):
+ """Promote an existing domain member or NT4 PDC to an AD DC."""
+
+ synopsis = "%prog <dnsdomain> [DC|RODC] [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = []
+ takes_options.extend(common_join_options)
+
+ takes_options.extend(common_provision_join_options)
+
+ if samba.is_ntvfs_fileserver_built():
+ takes_options.extend(common_ntvfs_options)
+
+ takes_args = ["domain", "role?"]
+
+ def run(self, domain, role=None, sambaopts=None, credopts=None,
+ versionopts=None, server=None, site=None, targetdir=None,
+ domain_critical_only=False, machinepass=None,
+ use_ntvfs=False, dns_backend=None,
+ quiet=False, verbose=False, plaintext_secrets=False,
+ backend_store=None, backend_store_size=None):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+
+ logger = self.get_logger(verbose=verbose, quiet=quiet)
+
+ netbios_name = lp.get("netbios name")
+
+ if role is not None:
+ role = role.upper()
+
+ if role == "DC":
+ join_DC(logger=logger, server=server, creds=creds, lp=lp, domain=domain,
+ site=site, netbios_name=netbios_name, targetdir=targetdir,
+ domain_critical_only=domain_critical_only,
+ machinepass=machinepass, use_ntvfs=use_ntvfs,
+ dns_backend=dns_backend,
+ promote_existing=True, plaintext_secrets=plaintext_secrets,
+ backend_store=backend_store,
+ backend_store_size=backend_store_size)
+ elif role == "RODC":
+ join_RODC(logger=logger, server=server, creds=creds, lp=lp, domain=domain,
+ site=site, netbios_name=netbios_name, targetdir=targetdir,
+ domain_critical_only=domain_critical_only,
+ machinepass=machinepass, use_ntvfs=use_ntvfs, dns_backend=dns_backend,
+ promote_existing=True, plaintext_secrets=plaintext_secrets,
+ backend_store=backend_store,
+ backend_store_size=backend_store_size)
+ else:
+ raise CommandError("Invalid role '%s' (possible values: DC, RODC)" % role)
diff --git a/python/samba/netcmd/domain/demote.py b/python/samba/netcmd/domain/demote.py
new file mode 100644
index 0000000..ae4d11d
--- /dev/null
+++ b/python/samba/netcmd/domain/demote.py
@@ -0,0 +1,335 @@
+# domain management - domain demote
+#
+# Copyright Matthias Dieter Wallnoefer 2009
+# Copyright Andrew Kroeger 2009
+# Copyright Jelmer Vernooij 2007-2012
+# Copyright Giampaolo Lauria 2011
+# Copyright Matthieu Patou <mat@matws.net> 2011
+# Copyright Andrew Bartlett 2008-2015
+# Copyright Stefan Metzmacher 2012
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import ldb
+import samba.getopt as options
+from samba import dsdb, remove_dc, werror
+from samba.auth import system_session
+from samba.dcerpc import drsuapi, misc
+from samba.drs_utils import drsuapi_connect
+from samba.dsdb import (
+ DS_NTDSDSA_OPT_DISABLE_INBOUND_REPL,
+ DS_NTDSDSA_OPT_DISABLE_OUTBOUND_REPL,
+ UF_PARTIAL_SECRETS_ACCOUNT,
+ UF_SERVER_TRUST_ACCOUNT,
+ UF_TRUSTED_FOR_DELEGATION,
+ UF_WORKSTATION_TRUST_ACCOUNT
+)
+from samba.net import Net
+from samba.netcmd import Command, CommandError, Option
+from samba.samdb import SamDB
+
+
+class cmd_domain_demote(Command):
+ """Demote ourselves from the role of Domain Controller."""
+
+ synopsis = "%prog [options]"
+
+ takes_options = [
+ Option("--server", help="writable DC to write demotion changes on", type=str),
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H"),
+ Option("--remove-other-dead-server", help="Dead DC (name or NTDS GUID) "
+ "to remove ALL references to (rather than this DC)", type=str),
+ Option("-q", "--quiet", help="Be quiet", action="store_true"),
+ Option("-v", "--verbose", help="Be verbose", action="store_true"),
+ ]
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ def run(self, sambaopts=None, credopts=None,
+ versionopts=None, server=None,
+ remove_other_dead_server=None, H=None,
+ verbose=False, quiet=False):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+
+ logger = self.get_logger(verbose=verbose, quiet=quiet)
+
+ if remove_other_dead_server is not None:
+ if server is not None:
+ samdb = SamDB(url="ldap://%s" % server,
+ session_info=system_session(),
+ credentials=creds, lp=lp)
+ else:
+ samdb = SamDB(url=H, session_info=system_session(), credentials=creds, lp=lp)
+ try:
+ remove_dc.remove_dc(samdb, logger, remove_other_dead_server)
+ except remove_dc.DemoteException as err:
+ raise CommandError("Demote failed: %s" % err)
+ return
+
+ netbios_name = lp.get("netbios name")
+ samdb = SamDB(url=H, session_info=system_session(), credentials=creds, lp=lp)
+ if not server:
+ res = samdb.search(expression='(&(objectClass=computer)(serverReferenceBL=*))', attrs=["dnsHostName", "name"])
+ if (len(res) == 0):
+ raise CommandError("Unable to search for servers")
+
+ if (len(res) == 1):
+ raise CommandError("You are the last server in the domain")
+
+ server = None
+ for e in res:
+ if str(e["name"]).lower() != netbios_name.lower():
+ server = e["dnsHostName"]
+ break
+
+ ntds_guid = samdb.get_ntds_GUID()
+ msg = samdb.search(base=str(samdb.get_config_basedn()),
+ scope=ldb.SCOPE_SUBTREE, expression="(objectGUID=%s)" % ntds_guid,
+ attrs=['options'])
+ if len(msg) == 0 or "options" not in msg[0]:
+ raise CommandError("Failed to find options on %s" % ntds_guid)
+
+ ntds_dn = msg[0].dn
+ dsa_options = int(str(msg[0]['options']))
+
+ res = samdb.search(expression="(fSMORoleOwner=%s)" % str(ntds_dn),
+ controls=["search_options:1:2"])
+
+ if len(res) != 0:
+ raise CommandError("Current DC is still the owner of %d role(s), "
+ "use the role command to transfer roles to "
+ "another DC" %
+ len(res))
+
+ self.errf.write("Using %s as partner server for the demotion\n" %
+ server)
+ (drsuapiBind, drsuapi_handle, supportedExtensions) = drsuapi_connect(server, lp, creds)
+
+ self.errf.write("Deactivating inbound replication\n")
+
+ nmsg = ldb.Message()
+ nmsg.dn = msg[0].dn
+
+ if not (dsa_options & DS_NTDSDSA_OPT_DISABLE_OUTBOUND_REPL) and not samdb.am_rodc():
+ dsa_options |= DS_NTDSDSA_OPT_DISABLE_INBOUND_REPL
+ nmsg["options"] = ldb.MessageElement(str(dsa_options), ldb.FLAG_MOD_REPLACE, "options")
+ samdb.modify(nmsg)
+
+ self.errf.write("Asking partner server %s to synchronize from us\n"
+ % server)
+ for part in (samdb.get_schema_basedn(),
+ samdb.get_config_basedn(),
+ samdb.get_root_basedn()):
+ nc = drsuapi.DsReplicaObjectIdentifier()
+ nc.dn = str(part)
+
+ req1 = drsuapi.DsReplicaSyncRequest1()
+ req1.naming_context = nc
+ req1.options = drsuapi.DRSUAPI_DRS_WRIT_REP
+ req1.source_dsa_guid = misc.GUID(ntds_guid)
+
+ try:
+ drsuapiBind.DsReplicaSync(drsuapi_handle, 1, req1)
+ except RuntimeError as e1:
+ (werr, string) = e1.args
+ if werr == werror.WERR_DS_DRA_NO_REPLICA:
+ pass
+ else:
+ self.errf.write(
+ "Error while replicating out last local changes from '%s' for demotion, "
+ "re-enabling inbound replication\n" % part)
+ dsa_options ^= DS_NTDSDSA_OPT_DISABLE_INBOUND_REPL
+ nmsg["options"] = ldb.MessageElement(str(dsa_options), ldb.FLAG_MOD_REPLACE, "options")
+ samdb.modify(nmsg)
+ raise CommandError("Error while sending a DsReplicaSync for partition '%s'" % str(part), string)
+ try:
+ remote_samdb = SamDB(url="ldap://%s" % server,
+ session_info=system_session(),
+ credentials=creds, lp=lp)
+
+ self.errf.write("Changing userControl and container\n")
+ res = remote_samdb.search(base=str(remote_samdb.domain_dn()),
+ expression="(&(objectClass=user)(sAMAccountName=%s$))" %
+ netbios_name.upper(),
+ attrs=["userAccountControl"])
+ dc_dn = res[0].dn
+ uac = int(str(res[0]["userAccountControl"]))
+
+ except Exception as e:
+ if not (dsa_options & DS_NTDSDSA_OPT_DISABLE_OUTBOUND_REPL) and not samdb.am_rodc():
+ self.errf.write(
+ "Error while demoting, re-enabling inbound replication\n")
+ dsa_options ^= DS_NTDSDSA_OPT_DISABLE_INBOUND_REPL
+ nmsg["options"] = ldb.MessageElement(str(dsa_options), ldb.FLAG_MOD_REPLACE, "options")
+ samdb.modify(nmsg)
+ raise CommandError("Error while changing account control", e)
+
+ if (len(res) != 1):
+ if not (dsa_options & DS_NTDSDSA_OPT_DISABLE_OUTBOUND_REPL) and not samdb.am_rodc():
+ self.errf.write(
+ "Error while demoting, re-enabling inbound replication\n")
+ dsa_options ^= DS_NTDSDSA_OPT_DISABLE_INBOUND_REPL
+ nmsg["options"] = ldb.MessageElement(str(dsa_options), ldb.FLAG_MOD_REPLACE, "options")
+ samdb.modify(nmsg)
+ raise CommandError("Unable to find object with samaccountName = %s$"
+ " in the remote dc" % netbios_name.upper())
+
+ uac &= ~(UF_SERVER_TRUST_ACCOUNT |
+ UF_TRUSTED_FOR_DELEGATION |
+ UF_PARTIAL_SECRETS_ACCOUNT)
+ uac |= UF_WORKSTATION_TRUST_ACCOUNT
+
+ msg = ldb.Message()
+ msg.dn = dc_dn
+
+ msg["userAccountControl"] = ldb.MessageElement("%d" % uac,
+ ldb.FLAG_MOD_REPLACE,
+ "userAccountControl")
+ try:
+ remote_samdb.modify(msg)
+ except Exception as e:
+ if not (dsa_options & DS_NTDSDSA_OPT_DISABLE_OUTBOUND_REPL) and not samdb.am_rodc():
+ self.errf.write(
+ "Error while demoting, re-enabling inbound replication\n")
+ dsa_options ^= DS_NTDSDSA_OPT_DISABLE_INBOUND_REPL
+ nmsg["options"] = ldb.MessageElement(str(dsa_options), ldb.FLAG_MOD_REPLACE, "options")
+ samdb.modify(nmsg)
+
+ raise CommandError("Error while changing account control", e)
+
+ dc_name = res[0].dn.get_rdn_value()
+ rdn = "CN=%s" % dc_name
+
+ # Let's move to the Computer container
+ i = 0
+ newrdn = str(rdn)
+
+ computer_dn = remote_samdb.get_wellknown_dn(
+ remote_samdb.get_default_basedn(),
+ dsdb.DS_GUID_COMPUTERS_CONTAINER)
+ res = remote_samdb.search(base=computer_dn, expression=rdn, scope=ldb.SCOPE_ONELEVEL)
+
+ if (len(res) != 0):
+ res = remote_samdb.search(base=computer_dn, expression="%s-%d" % (rdn, i),
+ scope=ldb.SCOPE_ONELEVEL)
+ while(len(res) != 0 and i < 100):
+ i = i + 1
+ res = remote_samdb.search(base=computer_dn, expression="%s-%d" % (rdn, i),
+ scope=ldb.SCOPE_ONELEVEL)
+
+ if i == 100:
+ if not (dsa_options & DS_NTDSDSA_OPT_DISABLE_OUTBOUND_REPL) and not samdb.am_rodc():
+ self.errf.write(
+ "Error while demoting, re-enabling inbound replication\n")
+ dsa_options ^= DS_NTDSDSA_OPT_DISABLE_INBOUND_REPL
+ nmsg["options"] = ldb.MessageElement(str(dsa_options), ldb.FLAG_MOD_REPLACE, "options")
+ samdb.modify(nmsg)
+
+ msg = ldb.Message()
+ msg.dn = dc_dn
+
+ msg["userAccountControl"] = ldb.MessageElement("%d" % uac,
+ ldb.FLAG_MOD_REPLACE,
+ "userAccountControl")
+
+ remote_samdb.modify(msg)
+
+ raise CommandError("Unable to find a slot for renaming %s,"
+ " all names from %s-1 to %s-%d seemed used" %
+ (str(dc_dn), rdn, rdn, i - 9))
+
+ newrdn = "%s-%d" % (rdn, i)
+
+ try:
+ newdn = ldb.Dn(remote_samdb, "%s,%s" % (newrdn, str(computer_dn)))
+ remote_samdb.rename(dc_dn, newdn)
+ except Exception as e:
+ if not (dsa_options & DS_NTDSDSA_OPT_DISABLE_OUTBOUND_REPL) and not samdb.am_rodc():
+ self.errf.write(
+ "Error while demoting, re-enabling inbound replication\n")
+ dsa_options ^= DS_NTDSDSA_OPT_DISABLE_INBOUND_REPL
+ nmsg["options"] = ldb.MessageElement(str(dsa_options), ldb.FLAG_MOD_REPLACE, "options")
+ samdb.modify(nmsg)
+
+ msg = ldb.Message()
+ msg.dn = dc_dn
+
+ msg["userAccountControl"] = ldb.MessageElement("%d" % uac,
+ ldb.FLAG_MOD_REPLACE,
+ "userAccountControl")
+
+ remote_samdb.modify(msg)
+ raise CommandError("Error while renaming %s to %s" % (str(dc_dn), str(newdn)), e)
+
+ server_dsa_dn = samdb.get_serverName()
+ domain = remote_samdb.get_root_basedn()
+
+ try:
+ req1 = drsuapi.DsRemoveDSServerRequest1()
+ req1.server_dn = str(server_dsa_dn)
+ req1.domain_dn = str(domain)
+ req1.commit = 1
+
+ drsuapiBind.DsRemoveDSServer(drsuapi_handle, 1, req1)
+ except RuntimeError as e3:
+ (werr, string) = e3.args
+ if not (dsa_options & DS_NTDSDSA_OPT_DISABLE_OUTBOUND_REPL) and not samdb.am_rodc():
+ self.errf.write(
+ "Error while demoting, re-enabling inbound replication\n")
+ dsa_options ^= DS_NTDSDSA_OPT_DISABLE_INBOUND_REPL
+ nmsg["options"] = ldb.MessageElement(str(dsa_options), ldb.FLAG_MOD_REPLACE, "options")
+ samdb.modify(nmsg)
+
+ msg = ldb.Message()
+ msg.dn = newdn
+
+ msg["userAccountControl"] = ldb.MessageElement("%d" % uac,
+ ldb.FLAG_MOD_REPLACE,
+ "userAccountControl")
+ remote_samdb.modify(msg)
+ remote_samdb.rename(newdn, dc_dn)
+ if werr == werror.WERR_DS_DRA_NO_REPLICA:
+ raise CommandError("The DC %s is not present on (already "
+ "removed from) the remote server: %s" %
+ (server_dsa_dn, e3))
+ else:
+ raise CommandError("Error while sending a removeDsServer "
+ "of %s: %s" %
+ (server_dsa_dn, e3))
+
+ remove_dc.remove_sysvol_references(remote_samdb, logger, dc_name)
+
+ # These are objects under the computer account that should be deleted
+ for s in ("CN=Enterprise,CN=NTFRS Subscriptions",
+ "CN=%s, CN=NTFRS Subscriptions" % lp.get("realm"),
+ "CN=Domain system Volumes (SYSVOL Share), CN=NTFRS Subscriptions",
+ "CN=NTFRS Subscriptions"):
+ try:
+ remote_samdb.delete(ldb.Dn(remote_samdb,
+ "%s,%s" % (s, str(newdn))))
+ except ldb.LdbError:
+ pass
+
+ # get dns host name for target server to demote, remove dns references
+ remove_dc.remove_dns_references(remote_samdb, logger, samdb.host_dns_name(),
+ ignore_no_name=True)
+
+ self.errf.write("Demote successful\n")
diff --git a/python/samba/netcmd/domain/functional_prep.py b/python/samba/netcmd/domain/functional_prep.py
new file mode 100644
index 0000000..3e1d4e1
--- /dev/null
+++ b/python/samba/netcmd/domain/functional_prep.py
@@ -0,0 +1,145 @@
+# domain management - domain functional_prep
+#
+# Copyright Matthias Dieter Wallnoefer 2009
+# Copyright Andrew Kroeger 2009
+# Copyright Jelmer Vernooij 2007-2012
+# Copyright Giampaolo Lauria 2011
+# Copyright Matthieu Patou <mat@matws.net> 2011
+# Copyright Andrew Bartlett 2008-2015
+# Copyright Stefan Metzmacher 2012
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import ldb
+import samba.getopt as options
+from samba.auth import system_session
+from samba.dsdb import DS_DOMAIN_FUNCTION_2008, DS_DOMAIN_FUNCTION_2008_R2
+from samba.netcmd import Command, CommandError, Option
+from samba.netcmd.fsmo import get_fsmo_roleowner
+from samba.samdb import SamDB
+
+from samba import functional_level
+
+
+class cmd_domain_functional_prep(Command):
+ """Domain functional level preparation"""
+
+ synopsis = "%prog [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H"),
+ Option("-q", "--quiet", help="Be quiet", action="store_true"),
+ Option("-v", "--verbose", help="Be verbose", action="store_true"),
+ Option("--function-level", type="choice", metavar="FUNCTION_LEVEL",
+ choices=["2008_R2", "2012", "2012_R2", "2016"],
+ help="The functional level to prepare for. Default is (Windows) 2016.",
+ default="2016"),
+ Option("--forest-prep", action="store_true",
+ help="Run the forest prep (by default, both the domain and forest prep are run)."),
+ Option("--domain-prep", action="store_true",
+ help="Run the domain prep (by default, both the domain and forest prep are run).")
+ ]
+
+ def run(self, **kwargs):
+ updates_allowed_overridden = False
+ sambaopts = kwargs.get("sambaopts")
+ credopts = kwargs.get("credopts")
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+ H = kwargs.get("H")
+ function_level = kwargs.get("function_level")
+ try:
+ target_level = functional_level.string_to_level(function_level)
+ except KeyError:
+ raise CommandError(f"'{function_level}' is not known to Samba as an AD functional level")
+
+ forest_prep = kwargs.get("forest_prep")
+ domain_prep = kwargs.get("domain_prep")
+
+ samdb = SamDB(url=H, session_info=system_session(), credentials=creds, lp=lp)
+
+ # we're not going to get far if the config doesn't allow schema updates
+ if lp.get("dsdb:schema update allowed") is None:
+ lp.set("dsdb:schema update allowed", "yes")
+ print("Temporarily overriding 'dsdb:schema update allowed' setting")
+ updates_allowed_overridden = True
+
+ if forest_prep is None and domain_prep is None:
+ forest_prep = True
+ domain_prep = True
+
+ own_dn = ldb.Dn(samdb, samdb.get_dsServiceName())
+ if forest_prep:
+ master = get_fsmo_roleowner(samdb, str(samdb.get_schema_basedn()),
+ 'schema')
+ if own_dn != master:
+ raise CommandError("This server is not the schema master.")
+
+ if domain_prep:
+ domain_dn = samdb.domain_dn()
+ infrastructure_dn = "CN=Infrastructure," + domain_dn
+ master = get_fsmo_roleowner(samdb, infrastructure_dn,
+ 'infrastructure')
+ if own_dn != master:
+ raise CommandError("This server is not the infrastructure master.")
+
+ exception_encountered = None
+
+ if forest_prep and exception_encountered is None:
+ samdb.transaction_start()
+ try:
+ from samba.forest_update import ForestUpdate
+ forest = ForestUpdate(samdb, fix=True)
+
+ forest.check_updates_iterator([11, 54, 79, 80, 81, 82, 83])
+ forest.check_updates_functional_level(target_level,
+ DS_DOMAIN_FUNCTION_2008_R2,
+ update_revision=True)
+
+ samdb.transaction_commit()
+ except Exception as e:
+ print("Exception: %s" % e)
+ samdb.transaction_cancel()
+ exception_encountered = e
+
+ if domain_prep and exception_encountered is None:
+ samdb.transaction_start()
+ try:
+ from samba.domain_update import DomainUpdate
+
+ domain = DomainUpdate(samdb, fix=True)
+ domain.check_updates_functional_level(target_level,
+ DS_DOMAIN_FUNCTION_2008,
+ update_revision=True)
+
+ samdb.transaction_commit()
+ except Exception as e:
+ print("Exception: %s" % e)
+ samdb.transaction_cancel()
+ exception_encountered = e
+
+ if updates_allowed_overridden:
+ lp.set("dsdb:schema update allowed", "no")
+
+ if exception_encountered is not None:
+ raise CommandError('Failed to perform functional prep: %r' %
+ exception_encountered)
diff --git a/python/samba/netcmd/domain/info.py b/python/samba/netcmd/domain/info.py
new file mode 100644
index 0000000..8454cb3
--- /dev/null
+++ b/python/samba/netcmd/domain/info.py
@@ -0,0 +1,58 @@
+# domain management - domain info
+#
+# Copyright Matthias Dieter Wallnoefer 2009
+# Copyright Andrew Kroeger 2009
+# Copyright Jelmer Vernooij 2007-2012
+# Copyright Giampaolo Lauria 2011
+# Copyright Matthieu Patou <mat@matws.net> 2011
+# Copyright Andrew Bartlett 2008-2015
+# Copyright Stefan Metzmacher 2012
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import samba.getopt as options
+from samba.netcmd import Command, CommandError
+from samba.netcmd.common import netcmd_get_domain_infos_via_cldap
+
+
+class cmd_domain_info(Command):
+ """Print basic info about a domain and the DC passed as parameter."""
+
+ synopsis = "%prog <ip_address> [options]"
+
+ takes_options = [
+ ]
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ takes_args = ["address"]
+
+ def run(self, address, credopts=None, sambaopts=None, versionopts=None):
+ lp = sambaopts.get_loadparm()
+ try:
+ res = netcmd_get_domain_infos_via_cldap(lp, None, address)
+ except RuntimeError:
+ raise CommandError("Invalid IP address '" + address + "'!")
+ self.outf.write("Forest : %s\n" % res.forest)
+ self.outf.write("Domain : %s\n" % res.dns_domain)
+ self.outf.write("Netbios domain : %s\n" % res.domain_name)
+ self.outf.write("DC name : %s\n" % res.pdc_dns_name)
+ self.outf.write("DC netbios name : %s\n" % res.pdc_name)
+ self.outf.write("Server site : %s\n" % res.server_site)
+ self.outf.write("Client site : %s\n" % res.client_site)
diff --git a/python/samba/netcmd/domain/join.py b/python/samba/netcmd/domain/join.py
new file mode 100644
index 0000000..936cfa8
--- /dev/null
+++ b/python/samba/netcmd/domain/join.py
@@ -0,0 +1,146 @@
+# domain management - domain join
+#
+# Copyright Matthias Dieter Wallnoefer 2009
+# Copyright Andrew Kroeger 2009
+# Copyright Jelmer Vernooij 2007-2012
+# Copyright Giampaolo Lauria 2011
+# Copyright Matthieu Patou <mat@matws.net> 2011
+# Copyright Andrew Bartlett 2008-2015
+# Copyright Stefan Metzmacher 2012
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import os
+import tempfile
+
+import samba
+import samba.getopt as options
+from samba import is_ad_dc_built
+from samba.dcerpc import nbt
+from samba.join import join_DC, join_RODC
+from samba.net import LIBNET_JOIN_AUTOMATIC, Net
+from samba.net_s3 import Net as s3_Net
+from samba.netcmd import Command, CommandError, Option
+from samba.param import default_path
+from samba.samba3 import param as s3param
+
+from .common import common_join_options, common_provision_join_options
+
+
+class cmd_domain_join(Command):
+ """Join domain as either member or backup domain controller."""
+
+ synopsis = "%prog <dnsdomain> [DC|RODC|MEMBER] [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ ntvfs_options = [
+ Option(
+ "--use-ntvfs", help="Use NTVFS for the fileserver (default = no)",
+ action="store_true")
+ ]
+
+ selftest_options = [
+ Option("--experimental-s4-member", action="store_true",
+ help="Perform member joins using the s4 Net join_member. "
+ "Don't choose this unless you know what you're doing")
+ ]
+
+ takes_options = [
+ Option("--no-dns-updates", action="store_true",
+ help="Disable DNS updates")
+ ]
+ takes_options.extend(common_join_options)
+ takes_options.extend(common_provision_join_options)
+
+ if samba.is_ntvfs_fileserver_built():
+ takes_options.extend(ntvfs_options)
+
+ if samba.is_selftest_enabled():
+ takes_options.extend(selftest_options)
+
+ takes_args = ["domain", "role?"]
+
+ def run(self, domain, role=None, sambaopts=None, credopts=None,
+ versionopts=None, server=None, site=None, targetdir=None,
+ domain_critical_only=False, machinepass=None,
+ use_ntvfs=False, experimental_s4_member=False, dns_backend=None,
+ quiet=False, verbose=False, no_dns_updates=False,
+ plaintext_secrets=False,
+ backend_store=None, backend_store_size=None):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+ net = Net(creds, lp, server=credopts.ipaddress)
+
+ logger = self.get_logger(verbose=verbose, quiet=quiet)
+
+ netbios_name = lp.get("netbios name")
+
+ if role is not None:
+ role = role.upper()
+
+ if role is None or role == "MEMBER":
+ if experimental_s4_member:
+ (join_password, sid, domain_name) = net.join_member(
+ domain, netbios_name, LIBNET_JOIN_AUTOMATIC,
+ machinepass=machinepass)
+ else:
+ lp.set('realm', domain)
+ if lp.get('workgroup') == 'WORKGROUP':
+ lp.set('workgroup', net.finddc(domain=domain,
+ flags=(nbt.NBT_SERVER_LDAP |
+ nbt.NBT_SERVER_DS)).domain_name)
+ lp.set('server role', 'member server')
+ smb_conf = lp.configfile if lp.configfile else default_path()
+ with tempfile.NamedTemporaryFile(delete=False,
+ dir=os.path.dirname(smb_conf)) as f:
+ lp.dump(False, f.name)
+ if os.path.exists(smb_conf):
+ mode = os.stat(smb_conf).st_mode
+ os.chmod(f.name, mode)
+ os.rename(f.name, smb_conf)
+ s3_lp = s3param.get_context()
+ s3_lp.load(smb_conf)
+ s3_net = s3_Net(creds, s3_lp, server=server)
+ (sid, domain_name) = s3_net.join_member(netbios_name,
+ machinepass=machinepass,
+ debug=verbose,
+ noDnsUpdates=no_dns_updates)
+
+ self.errf.write("Joined domain %s (%s)\n" % (domain_name, sid))
+ elif role == "DC" and is_ad_dc_built():
+ join_DC(logger=logger, server=server, creds=creds, lp=lp, domain=domain,
+ site=site, netbios_name=netbios_name, targetdir=targetdir,
+ domain_critical_only=domain_critical_only,
+ machinepass=machinepass, use_ntvfs=use_ntvfs,
+ dns_backend=dns_backend,
+ plaintext_secrets=plaintext_secrets,
+ backend_store=backend_store,
+ backend_store_size=backend_store_size)
+ elif role == "RODC" and is_ad_dc_built():
+ join_RODC(logger=logger, server=server, creds=creds, lp=lp, domain=domain,
+ site=site, netbios_name=netbios_name, targetdir=targetdir,
+ domain_critical_only=domain_critical_only,
+ machinepass=machinepass, use_ntvfs=use_ntvfs,
+ dns_backend=dns_backend,
+ plaintext_secrets=plaintext_secrets,
+ backend_store=backend_store,
+ backend_store_size=backend_store_size)
+ else:
+ raise CommandError("Invalid role '%s' (possible values: MEMBER, DC, RODC)" % role)
diff --git a/python/samba/netcmd/domain/keytab.py b/python/samba/netcmd/domain/keytab.py
new file mode 100644
index 0000000..b0955ca
--- /dev/null
+++ b/python/samba/netcmd/domain/keytab.py
@@ -0,0 +1,55 @@
+# domain management - domain keytab
+#
+# Copyright Matthias Dieter Wallnoefer 2009
+# Copyright Andrew Kroeger 2009
+# Copyright Jelmer Vernooij 2007-2012
+# Copyright Giampaolo Lauria 2011
+# Copyright Matthieu Patou <mat@matws.net> 2011
+# Copyright Andrew Bartlett 2008-2015
+# Copyright Stefan Metzmacher 2012
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import samba.getopt as options
+from samba import enable_net_export_keytab
+from samba.net import Net
+from samba.netcmd import Command, Option
+
+try:
+ enable_net_export_keytab()
+except ImportError:
+ cmd_domain_export_keytab = None
+else:
+ class cmd_domain_export_keytab(Command):
+ """Dump Kerberos keys of the domain into a keytab."""
+
+ synopsis = "%prog <keytab> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ takes_options = [
+ Option("--principal", help="extract only this principal", type=str),
+ ]
+
+ takes_args = ["keytab"]
+
+ def run(self, keytab, credopts=None, sambaopts=None, versionopts=None, principal=None):
+ lp = sambaopts.get_loadparm()
+ net = Net(None, lp)
+ net.export_keytab(keytab=keytab, principal=principal)
diff --git a/python/samba/netcmd/domain/leave.py b/python/samba/netcmd/domain/leave.py
new file mode 100644
index 0000000..0d58360
--- /dev/null
+++ b/python/samba/netcmd/domain/leave.py
@@ -0,0 +1,59 @@
+# domain management - domain leave
+#
+# Copyright Matthias Dieter Wallnoefer 2009
+# Copyright Andrew Kroeger 2009
+# Copyright Jelmer Vernooij 2007-2012
+# Copyright Giampaolo Lauria 2011
+# Copyright Matthieu Patou <mat@matws.net> 2011
+# Copyright Andrew Bartlett 2008-2015
+# Copyright Stefan Metzmacher 2012
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import samba.getopt as options
+from samba.net_s3 import Net as s3_Net
+from samba.netcmd import Command, Option
+from samba.param import default_path
+from samba.samba3 import param as s3param
+
+
+class cmd_domain_leave(Command):
+ """Cause a domain member to leave the joined domain."""
+
+ synopsis = "%prog [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("--keep-account", action="store_true",
+ help="Disable the machine account instead of deleting it.")
+ ]
+
+ takes_args = []
+
+ def run(self, sambaopts=None, credopts=None, versionopts=None,
+ keep_account=False):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+
+ s3_lp = s3param.get_context()
+ smb_conf = lp.configfile if lp.configfile else default_path()
+ s3_lp.load(smb_conf)
+ s3_net = s3_Net(creds, s3_lp)
+ s3_net.leave(keep_account)
diff --git a/python/samba/netcmd/domain/level.py b/python/samba/netcmd/domain/level.py
new file mode 100644
index 0000000..eefe360
--- /dev/null
+++ b/python/samba/netcmd/domain/level.py
@@ -0,0 +1,250 @@
+# domain management - domain level
+#
+# Copyright Matthias Dieter Wallnoefer 2009
+# Copyright Andrew Kroeger 2009
+# Copyright Jelmer Vernooij 2007-2012
+# Copyright Giampaolo Lauria 2011
+# Copyright Matthieu Patou <mat@matws.net> 2011
+# Copyright Andrew Bartlett 2008-2015
+# Copyright Stefan Metzmacher 2012
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import ldb
+import samba.getopt as options
+from samba.auth import system_session
+from samba.dsdb import check_and_update_fl, DS_DOMAIN_FUNCTION_2000
+from samba.netcmd import Command, CommandError, Option
+from samba.samdb import SamDB
+
+from samba import functional_level
+
+
+class cmd_domain_level(Command):
+ """Raise domain and forest function levels."""
+
+ synopsis = "%prog (show|raise <options>) [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H"),
+ Option("-q", "--quiet", help="Be quiet", action="store_true"), # unused
+ Option("--forest-level", type="choice", choices=["2003", "2008", "2008_R2", "2012", "2012_R2", "2016"],
+ help="The forest function level (2003 | 2008 | 2008_R2 | 2012 | 2012_R2 | 2016)"),
+ Option("--domain-level", type="choice", choices=["2003", "2008", "2008_R2", "2012", "2012_R2", "2016"],
+ help="The domain function level (2003 | 2008 | 2008_R2 | 2012 | 2012_R2 | 2016)")
+ ]
+
+ takes_args = ["subcommand"]
+
+ def run(self, subcommand, H=None, forest_level=None, domain_level=None,
+ quiet=False, credopts=None, sambaopts=None, versionopts=None):
+ if subcommand not in ["show", "raise"]:
+ raise CommandError("invalid argument: '%s' (choose from 'show', 'raise')" % subcommand)
+
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp, fallback_machine=True)
+
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+
+ domain_dn = samdb.domain_dn()
+
+ in_transaction = False
+ if subcommand == "raise" and (H is None or not H.startswith("ldap")):
+ samdb.transaction_start()
+ in_transaction = True
+ try:
+ check_and_update_fl(samdb, lp)
+ except Exception as e:
+ samdb.transaction_cancel()
+ raise e
+
+ try:
+ res_forest = samdb.search("CN=Partitions,%s" % samdb.get_config_basedn(),
+ scope=ldb.SCOPE_BASE, attrs=["msDS-Behavior-Version"])
+ assert len(res_forest) == 1
+
+ res_domain = samdb.search(domain_dn, scope=ldb.SCOPE_BASE,
+ attrs=["msDS-Behavior-Version", "nTMixedDomain"])
+ assert len(res_domain) == 1
+
+ res_domain_cross = samdb.search("CN=Partitions,%s" % samdb.get_config_basedn(),
+ scope=ldb.SCOPE_SUBTREE,
+ expression="(&(objectClass=crossRef)(nCName=%s))" % domain_dn,
+ attrs=["msDS-Behavior-Version"])
+ assert len(res_domain_cross) == 1
+
+ res_dc_s = samdb.search("CN=Sites,%s" % samdb.get_config_basedn(),
+ scope=ldb.SCOPE_SUBTREE, expression="(objectClass=nTDSDSA)",
+ attrs=["msDS-Behavior-Version"])
+ assert len(res_dc_s) >= 1
+
+ # default values, since "msDS-Behavior-Version" does not exist on Windows 2000 AD
+ level_forest = DS_DOMAIN_FUNCTION_2000
+ level_domain = DS_DOMAIN_FUNCTION_2000
+
+ if "msDS-Behavior-Version" in res_forest[0]:
+ level_forest = int(res_forest[0]["msDS-Behavior-Version"][0])
+ if "msDS-Behavior-Version" in res_domain[0]:
+ level_domain = int(res_domain[0]["msDS-Behavior-Version"][0])
+ level_domain_mixed = int(res_domain[0]["nTMixedDomain"][0])
+
+ min_level_dc = None
+ for msg in res_dc_s:
+ if "msDS-Behavior-Version" in msg:
+ if min_level_dc is None or int(msg["msDS-Behavior-Version"][0]) < min_level_dc:
+ min_level_dc = int(msg["msDS-Behavior-Version"][0])
+ else:
+ min_level_dc = DS_DOMAIN_FUNCTION_2000
+ # well, this is the least
+ break
+
+ if level_forest < DS_DOMAIN_FUNCTION_2000 or level_domain < DS_DOMAIN_FUNCTION_2000:
+ raise CommandError("Domain and/or forest function level(s) is/are invalid. Correct them or reprovision!")
+ if min_level_dc < DS_DOMAIN_FUNCTION_2000:
+ raise CommandError("Lowest function level of a DC is invalid. Correct this or reprovision!")
+ if level_forest > level_domain:
+ raise CommandError("Forest function level is higher than the domain level(s). Correct this or reprovision!")
+ if level_domain > min_level_dc:
+ raise CommandError("Domain function level is higher than the lowest function level of a DC. Correct this or reprovision!")
+ except Exception as e:
+ if in_transaction:
+ samdb.transaction_cancel()
+ raise e
+
+ def do_show():
+ self.message("Domain and forest function level for domain '%s'" % domain_dn)
+ if level_forest == DS_DOMAIN_FUNCTION_2000 and level_domain_mixed != 0:
+ self.message("\nATTENTION: You run SAMBA 4 on a forest function level lower than Windows 2000 (Native). This isn't supported! Please raise!")
+ if level_domain == DS_DOMAIN_FUNCTION_2000 and level_domain_mixed != 0:
+ self.message("\nATTENTION: You run SAMBA 4 on a domain function level lower than Windows 2000 (Native). This isn't supported! Please raise!")
+ if min_level_dc == DS_DOMAIN_FUNCTION_2000 and level_domain_mixed != 0:
+ self.message("\nATTENTION: You run SAMBA 4 on a lowest function level of a DC lower than Windows 2003. This isn't supported! Please step-up or upgrade the concerning DC(s)!")
+
+ self.message("")
+
+ outstr = functional_level.level_to_string(level_forest)
+ self.message("Forest function level: (Windows) " + outstr)
+
+ if level_domain == DS_DOMAIN_FUNCTION_2000 and level_domain_mixed:
+ outstr = "2000 mixed (NT4 DC support)"
+ else:
+ outstr = functional_level.level_to_string(level_domain)
+ self.message("Domain function level: (Windows) " + outstr)
+
+ outstr = functional_level.level_to_string(min_level_dc)
+ self.message("Lowest function level of a DC: (Windows) " + outstr)
+
+ def do_raise():
+ msgs = []
+
+ current_level_domain = level_domain
+
+ if domain_level is not None:
+ try:
+ new_level_domain = functional_level.string_to_level(domain_level)
+ except KeyError:
+ raise CommandError(f"New functional level '{domain_level}' is not known to Samba as an AD functional level")
+
+ if new_level_domain <= level_domain and level_domain_mixed == 0:
+ raise CommandError("Domain function level can't be smaller than or equal to the actual one!")
+ if new_level_domain > min_level_dc:
+ raise CommandError("Domain function level can't be higher than the lowest function level of a DC!")
+
+ # Deactivate mixed/interim domain support
+ if level_domain_mixed != 0:
+ # Directly on the base DN
+ m = ldb.Message()
+ m.dn = ldb.Dn(samdb, domain_dn)
+ m["nTMixedDomain"] = ldb.MessageElement("0",
+ ldb.FLAG_MOD_REPLACE, "nTMixedDomain")
+ samdb.modify(m)
+ # Under partitions
+ m = ldb.Message()
+ m.dn = res_domain_cross[0].dn
+ m["nTMixedDomain"] = ldb.MessageElement("0",
+ ldb.FLAG_MOD_REPLACE, "nTMixedDomain")
+ try:
+ samdb.modify(m)
+ except ldb.LdbError as e:
+ (enum, emsg) = e.args
+ if enum != ldb.ERR_UNWILLING_TO_PERFORM:
+ raise
+
+ # Directly on the base DN
+ m = ldb.Message()
+ m.dn = ldb.Dn(samdb, domain_dn)
+ m["msDS-Behavior-Version"] = ldb.MessageElement(
+ str(new_level_domain), ldb.FLAG_MOD_REPLACE,
+ "msDS-Behavior-Version")
+ samdb.modify(m)
+ # Under partitions
+ m = ldb.Message()
+ m.dn = res_domain_cross[0].dn
+ m["msDS-Behavior-Version"] = ldb.MessageElement(
+ str(new_level_domain), ldb.FLAG_MOD_REPLACE,
+ "msDS-Behavior-Version")
+ try:
+ samdb.modify(m)
+ except ldb.LdbError as e2:
+ (enum, emsg) = e2.args
+ if enum != ldb.ERR_UNWILLING_TO_PERFORM:
+ raise
+
+ current_level_domain = new_level_domain
+ msgs.append("Domain function level changed!")
+
+ if forest_level is not None:
+ new_level_forest = functional_level.string_to_level(forest_level)
+
+ if new_level_forest <= level_forest:
+ raise CommandError("Forest function level can't be smaller than or equal to the actual one!")
+ if new_level_forest > current_level_domain:
+ raise CommandError("Forest function level can't be higher than the domain function level(s). Please raise it/them first!")
+
+ m = ldb.Message()
+ m.dn = ldb.Dn(samdb, "CN=Partitions,%s" % samdb.get_config_basedn())
+ m["msDS-Behavior-Version"] = ldb.MessageElement(
+ str(new_level_forest), ldb.FLAG_MOD_REPLACE,
+ "msDS-Behavior-Version")
+ samdb.modify(m)
+ msgs.append("Forest function level changed!")
+ msgs.append("All changes applied successfully!")
+ self.message("\n".join(msgs))
+ return
+
+ if subcommand == "show":
+ assert not in_transaction
+ do_show()
+ return
+ elif subcommand == "raise":
+ try:
+ do_raise()
+ except Exception as e:
+ if in_transaction:
+ samdb.transaction_cancel()
+ raise e
+ if in_transaction:
+ samdb.transaction_commit()
+ return
+
+ raise AssertionError("Internal Error subcommand[%s] not handled" % subcommand)
diff --git a/python/samba/netcmd/domain/models/__init__.py b/python/samba/netcmd/domain/models/__init__.py
new file mode 100644
index 0000000..8a6b254
--- /dev/null
+++ b/python/samba/netcmd/domain/models/__init__.py
@@ -0,0 +1,32 @@
+# Unix SMB/CIFS implementation.
+#
+# Samba domain models.
+#
+# Copyright (C) Catalyst.Net Ltd. 2023
+#
+# Written by Rob van der Linde <rob@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from .auth_policy import AuthenticationPolicy
+from .auth_silo import AuthenticationSilo
+from .claim_type import ClaimType
+from .group import Group
+from .model import MODELS
+from .schema import AttributeSchema, ClassSchema
+from .site import Site
+from .subnet import Subnet
+from .user import User
+from .value_type import ValueType
diff --git a/python/samba/netcmd/domain/models/auth_policy.py b/python/samba/netcmd/domain/models/auth_policy.py
new file mode 100644
index 0000000..c56966c
--- /dev/null
+++ b/python/samba/netcmd/domain/models/auth_policy.py
@@ -0,0 +1,109 @@
+# Unix SMB/CIFS implementation.
+#
+# Authentication policy model.
+#
+# Copyright (C) Catalyst.Net Ltd. 2023
+#
+# Written by Rob van der Linde <rob@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from enum import IntEnum
+from ldb import Dn
+
+from .fields import (BooleanField, EnumField, IntegerField, SDDLField,
+ StringField)
+from .model import Model
+
+# Ticket-Granting-Ticket lifetimes.
+MIN_TGT_LIFETIME = 45
+MAX_TGT_LIFETIME = 2147483647
+
+
+class StrongNTLMPolicy(IntEnum):
+ DISABLED = 0
+ OPTIONAL = 1
+ REQUIRED = 2
+
+ @classmethod
+ def get_choices(cls):
+ return sorted([choice.capitalize() for choice in cls._member_names_])
+
+ @classmethod
+ def choices_str(cls):
+ return ", ".join(cls.get_choices())
+
+
+class AuthenticationPolicy(Model):
+ description = StringField("description")
+ enforced = BooleanField("msDS-AuthNPolicyEnforced")
+ strong_ntlm_policy = EnumField("msDS-StrongNTLMPolicy", StrongNTLMPolicy)
+ user_allow_ntlm_network_auth = BooleanField(
+ "msDS-UserAllowedNTLMNetworkAuthentication")
+ user_tgt_lifetime = IntegerField("msDS-UserTGTLifetime")
+ service_allow_ntlm_network_auth = BooleanField(
+ "msDS-ServiceAllowedNTLMNetworkAuthentication")
+ service_tgt_lifetime = IntegerField("msDS-ServiceTGTLifetime")
+ computer_tgt_lifetime = IntegerField("msDS-ComputerTGTLifetime")
+ user_allowed_to_authenticate_from = SDDLField(
+ "msDS-UserAllowedToAuthenticateFrom", allow_device_in_sddl=False)
+ user_allowed_to_authenticate_to = SDDLField(
+ "msDS-UserAllowedToAuthenticateTo")
+ service_allowed_to_authenticate_from = SDDLField(
+ "msDS-ServiceAllowedToAuthenticateFrom", allow_device_in_sddl=False)
+ service_allowed_to_authenticate_to = SDDLField(
+ "msDS-ServiceAllowedToAuthenticateTo")
+ computer_allowed_to_authenticate_to = SDDLField(
+ "msDS-ComputerAllowedToAuthenticateTo")
+
+ @staticmethod
+ def get_base_dn(ldb):
+ """Return the base DN for the AuthenticationPolicy model.
+
+ :param ldb: Ldb connection
+ :return: Dn object of container
+ """
+ base_dn = ldb.get_config_basedn()
+ base_dn.add_child(
+ "CN=AuthN Policies,CN=AuthN Policy Configuration,CN=Services")
+ return base_dn
+
+ @staticmethod
+ def get_object_class():
+ return "msDS-AuthNPolicy"
+
+ @staticmethod
+ def lookup(ldb, name):
+ """Helper function to return auth policy or raise LookupError.
+
+ :param ldb: Ldb connection
+ :param name: Either DN or name of Authentication Policy
+ :raises: LookupError if not found
+ :raises: ValueError if name is not set
+ """
+ if not name:
+ raise ValueError("Attribute 'name' is required.")
+
+ try:
+ # It's possible name is already a Dn.
+ dn = name if isinstance(name, Dn) else Dn(ldb, name)
+ policy = AuthenticationPolicy.get(ldb, dn=dn)
+ except ValueError:
+ policy = AuthenticationPolicy.get(ldb, cn=name)
+
+ if policy is None:
+ raise LookupError(f"Authentication policy {name} not found.")
+
+ return policy
diff --git a/python/samba/netcmd/domain/models/auth_silo.py b/python/samba/netcmd/domain/models/auth_silo.py
new file mode 100644
index 0000000..9747671
--- /dev/null
+++ b/python/samba/netcmd/domain/models/auth_silo.py
@@ -0,0 +1,104 @@
+# Unix SMB/CIFS implementation.
+#
+# Authentication silo model.
+#
+# Copyright (C) Catalyst.Net Ltd. 2023
+#
+# Written by Rob van der Linde <rob@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from ldb import FLAG_MOD_ADD, FLAG_MOD_DELETE, LdbError, Message, MessageElement
+
+from samba.sd_utils import escaped_claim_id
+
+from .exceptions import GrantMemberError, RevokeMemberError
+from .fields import DnField, BooleanField, StringField
+from .model import Model
+
+
+class AuthenticationSilo(Model):
+ description = StringField("description")
+ enforced = BooleanField("msDS-AuthNPolicySiloEnforced")
+ user_authentication_policy = DnField("msDS-UserAuthNPolicy")
+ service_authentication_policy = DnField("msDS-ServiceAuthNPolicy")
+ computer_authentication_policy = DnField("msDS-ComputerAuthNPolicy")
+ members = DnField("msDS-AuthNPolicySiloMembers", many=True)
+
+ @staticmethod
+ def get_base_dn(ldb):
+ """Return the base DN for the AuthenticationSilo model.
+
+ :param ldb: Ldb connection
+ :return: Dn object of container
+ """
+ base_dn = ldb.get_config_basedn()
+ base_dn.add_child(
+ "CN=AuthN Silos,CN=AuthN Policy Configuration,CN=Services")
+ return base_dn
+
+ @staticmethod
+ def get_object_class():
+ return "msDS-AuthNPolicySilo"
+
+ def grant(self, ldb, member):
+ """Grant a member access to the Authentication Silo.
+
+ Rather than saving the silo object and writing the entire member
+ list out again, just add one member only.
+
+ :param ldb: Ldb connection
+ :param member: Member to grant access to silo
+ """
+ # Create a message with only an add member operation.
+ message = Message(dn=self.dn)
+ message.add(MessageElement(str(member.dn), FLAG_MOD_ADD,
+ "msDS-AuthNPolicySiloMembers"))
+
+ # Update authentication silo.
+ try:
+ ldb.modify(message)
+ except LdbError as e:
+ raise GrantMemberError(f"Failed to grant access to silo member: {e}")
+
+ # If the modify operation was successful refresh members field.
+ self.refresh(ldb, fields=["members"])
+
+ def revoke(self, ldb, member):
+ """Revoke a member from the Authentication Silo.
+
+ Rather than saving the silo object and writing the entire member
+ list out again, just remove one member only.
+
+ :param ldb: Ldb connection
+ :param member: Member to revoke from silo
+ """
+ # Create a message with only a remove member operation.
+ message = Message(dn=self.dn)
+ message.add(MessageElement(str(member.dn), FLAG_MOD_DELETE,
+ "msDS-AuthNPolicySiloMembers"))
+
+ # Update authentication silo.
+ try:
+ ldb.modify(message)
+ except LdbError as e:
+ raise RevokeMemberError(f"Failed to revoke silo member: {e}")
+
+ # If the modify operation was successful refresh members field.
+ self.refresh(ldb, fields=["members"])
+
+ def get_authentication_sddl(self):
+ return ('O:SYG:SYD:(XA;OICI;CR;;;WD;(@USER.ad://ext/'
+ f'AuthenticationSilo == "{escaped_claim_id(self.name)}"))')
diff --git a/python/samba/netcmd/domain/models/claim_type.py b/python/samba/netcmd/domain/models/claim_type.py
new file mode 100644
index 0000000..7e1c816
--- /dev/null
+++ b/python/samba/netcmd/domain/models/claim_type.py
@@ -0,0 +1,58 @@
+# Unix SMB/CIFS implementation.
+#
+# Claim type model.
+#
+# Copyright (C) Catalyst.Net Ltd. 2023
+#
+# Written by Rob van der Linde <rob@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from .fields import BooleanField, DnField, IntegerField,\
+ PossibleClaimValuesField, StringField
+from .model import Model
+
+
+class ClaimType(Model):
+ enabled = BooleanField("Enabled")
+ description = StringField("description")
+ display_name = StringField("displayName")
+ claim_attribute_source = DnField("msDS-ClaimAttributeSource")
+ claim_is_single_valued = BooleanField("msDS-ClaimIsSingleValued")
+ claim_is_value_space_restricted = BooleanField(
+ "msDS-ClaimIsValueSpaceRestricted")
+ claim_possible_values = PossibleClaimValuesField("msDS-ClaimPossibleValues")
+ claim_source_type = StringField("msDS-ClaimSourceType")
+ claim_type_applies_to_class = DnField(
+ "msDS-ClaimTypeAppliesToClass", many=True)
+ claim_value_type = IntegerField("msDS-ClaimValueType")
+
+ @staticmethod
+ def get_base_dn(ldb):
+ """Return the base DN for the ClaimType model.
+
+ :param ldb: Ldb connection
+ :return: Dn object of container
+ """
+ base_dn = ldb.get_config_basedn()
+ base_dn.add_child("CN=Claim Types,CN=Claims Configuration,CN=Services")
+ return base_dn
+
+ @staticmethod
+ def get_object_class():
+ return "msDS-ClaimType"
+
+ def __str__(self):
+ return str(self.display_name)
diff --git a/python/samba/netcmd/domain/models/exceptions.py b/python/samba/netcmd/domain/models/exceptions.py
new file mode 100644
index 0000000..14ebd77
--- /dev/null
+++ b/python/samba/netcmd/domain/models/exceptions.py
@@ -0,0 +1,64 @@
+# Unix SMB/CIFS implementation.
+#
+# Model and ORM exceptions.
+#
+# Copyright (C) Catalyst.Net Ltd. 2023
+#
+# Written by Rob van der Linde <rob@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+class ModelError(Exception):
+ pass
+
+
+class FieldError(ModelError):
+ """A ModelError on a specific field."""
+
+ def __init__(self, *args, field=None):
+ self.field = field
+ super().__init__(*args)
+
+ def __str__(self):
+ message = super().__str__()
+ return f"{self.field.name}: {message}"
+
+
+class MultipleObjectsReturned(ModelError):
+ pass
+
+
+class DoesNotExist(ModelError):
+ pass
+
+
+class GrantMemberError(ModelError):
+ pass
+
+
+class RevokeMemberError(ModelError):
+ pass
+
+
+class ProtectError(ModelError):
+ pass
+
+
+class UnprotectError(ModelError):
+ pass
+
+
+class DeleteError(ModelError):
+ pass
diff --git a/python/samba/netcmd/domain/models/fields.py b/python/samba/netcmd/domain/models/fields.py
new file mode 100644
index 0000000..0b7e1eb
--- /dev/null
+++ b/python/samba/netcmd/domain/models/fields.py
@@ -0,0 +1,507 @@
+# Unix SMB/CIFS implementation.
+#
+# Model fields.
+#
+# Copyright (C) Catalyst.Net Ltd. 2023
+#
+# Written by Rob van der Linde <rob@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from enum import IntEnum
+
+import io
+from abc import ABCMeta, abstractmethod
+from datetime import datetime
+from xml.etree import ElementTree
+
+from ldb import Dn, MessageElement, string_to_time, timestring
+from samba.dcerpc import security
+from samba.dcerpc.misc import GUID
+from samba.ndr import ndr_pack, ndr_unpack
+
+
+class Field(metaclass=ABCMeta):
+ """Base class for all fields.
+
+ Each field will need to implement from_db_value and to_db_value.
+
+ A field must correctly support converting both single valued fields,
+ and list type fields.
+
+ The only thing many=True does is say the field "prefers" to be a list,
+ but really any field can be a list or single value.
+ """
+
+ def __init__(self, name, many=False, default=None, hidden=False,
+ readonly=False):
+ """Creates a new field, should be subclassed.
+
+ :param name: Ldb field name.
+ :param many: If true always convert field to a list when loaded.
+ :param default: Default value or callback method (obj is first argument)
+ :param hidden: If this is True, exclude the field when calling as_dict()
+ :param readonly: If true don't write this value when calling save.
+ """
+ self.name = name
+ self.many = many
+ self.hidden = hidden
+ self.readonly = readonly
+
+ # This ensures that fields with many=True are always lists.
+ # If this is inconsistent anywhere, it isn't so great to use.
+ if self.many and default is None:
+ self.default = []
+ else:
+ self.default = default
+
+ @abstractmethod
+ def from_db_value(self, ldb, value):
+ """Converts value read from the database to Python value.
+
+ :param ldb: Ldb connection
+ :param value: MessageElement value from the database
+ :returns: Parsed value as Python type
+ """
+ pass
+
+ @abstractmethod
+ def to_db_value(self, ldb, value, flags):
+ """Converts value to database value.
+
+ This should return a MessageElement or None, where None means
+ the field will be unset on the next save.
+
+ :param ldb: Ldb connection
+ :param value: Input value from Python field
+ :param flags: MessageElement flags
+ :returns: MessageElement or None
+ """
+ pass
+
+
+class IntegerField(Field):
+ """A simple integer field, can be an int or list of int."""
+
+ def from_db_value(self, ldb, value):
+ """Convert MessageElement to int or list of int."""
+ if value is None:
+ return
+ elif len(value) > 1 or self.many:
+ return [int(item) for item in value]
+ else:
+ return int(value[0])
+
+ def to_db_value(self, ldb, value, flags):
+ """Convert int or list of int to MessageElement."""
+ if value is None:
+ return
+ elif isinstance(value, list):
+ return MessageElement(
+ [str(item) for item in value], flags, self.name)
+ else:
+ return MessageElement(str(value), flags, self.name)
+
+
+class BinaryField(Field):
+ """Similar to StringField but using bytes instead of str.
+
+ This tends to be quite easy because a MessageElement already uses bytes.
+ """
+
+ def from_db_value(self, ldb, value):
+ """Convert MessageElement to bytes or list of bytes.
+
+ The values on the MessageElement should already be bytes so the
+ cast to bytes() is likely not needed in from_db_value.
+ """
+ if value is None:
+ return
+ elif len(value) > 1 or self.many:
+ return [bytes(item) for item in value]
+ else:
+ return bytes(value[0])
+
+ def to_db_value(self, ldb, value, flags):
+ """Convert bytes or list of bytes to MessageElement."""
+ if value is None:
+ return
+ elif isinstance(value, list):
+ return MessageElement(
+ [bytes(item) for item in value], flags, self.name)
+ else:
+ return MessageElement(bytes(value), flags, self.name)
+
+
+class StringField(Field):
+ """A simple string field, may contain str or list of str."""
+
+ def from_db_value(self, ldb, value):
+ """Convert MessageElement to str or list of str."""
+ if value is None:
+ return
+ elif len(value) > 1 or self.many:
+ return [str(item) for item in value]
+ else:
+ return str(value)
+
+ def to_db_value(self, ldb, value, flags):
+ """Convert str or list of str to MessageElement."""
+ if value is None:
+ return
+ elif isinstance(value, list):
+ return MessageElement(
+ [str(item) for item in value], flags, self.name)
+ else:
+ return MessageElement(str(value), flags, self.name)
+
+
+class EnumField(Field):
+ """A field based around Python's Enum type."""
+
+ def __init__(self, name, enum, many=False, default=None):
+ """Create a new EnumField for the given enum class."""
+ self.enum = enum
+ super().__init__(name, many, default)
+
+ def enum_from_value(self, value):
+ """Return Enum instance from value.
+
+ Has a special case for IntEnum as the constructor only accepts int.
+ """
+ if issubclass(self.enum, IntEnum):
+ return self.enum(int(str(value)))
+ else:
+ return self.enum(str(value))
+
+ def from_db_value(self, ldb, value):
+ """Convert MessageElement to enum or list of enum."""
+ if value is None:
+ return
+ elif len(value) > 1 or self.many:
+ return [self.enum_from_value(item) for item in value]
+ else:
+ return self.enum_from_value(value)
+
+ def to_db_value(self, ldb, value, flags):
+ """Convert enum or list of enum to MessageElement."""
+ if value is None:
+ return
+ elif isinstance(value, list):
+ return MessageElement(
+ [str(item.value) for item in value], flags, self.name)
+ else:
+ return MessageElement(str(value.value), flags, self.name)
+
+
+class DateTimeField(Field):
+ """A field for parsing ldb timestamps into Python datetime."""
+
+ def from_db_value(self, ldb, value):
+ """Convert MessageElement to datetime or list of datetime."""
+ if value is None:
+ return
+ elif len(value) > 1 or self.many:
+ return [datetime.fromtimestamp(string_to_time(str(item)))
+ for item in value]
+ else:
+ return datetime.fromtimestamp(string_to_time(str(value)))
+
+ def to_db_value(self, ldb, value, flags):
+ """Convert datetime or list of datetime to MessageElement."""
+ if value is None:
+ return
+ elif isinstance(value, list):
+ return MessageElement(
+ [timestring(int(datetime.timestamp(item))) for item in value],
+ flags, self.name)
+ else:
+ return MessageElement(timestring(int(datetime.timestamp(value))),
+ flags, self.name)
+
+
+class RelatedField(Field):
+ """A field that automatically fetches the related objects.
+
+ Use sparingly, can be a little slow. If in doubt just use DnField instead.
+ """
+
+ def __init__(self, name, model, many=False, default=None):
+ """Create a new RelatedField for the given model."""
+ self.model = model
+ super().__init__(name, many, default)
+
+ def from_db_value(self, ldb, value):
+ """Convert Message element to related object or list of objects.
+
+ Note that fetching related items is not using any sort of lazy
+ loading so use this field sparingly.
+ """
+ if value is None:
+ return
+ elif len(value) > 1 or self.many:
+ return [self.model.get(ldb, dn=Dn(ldb, str(item))) for item in value]
+ else:
+ return self.model.get(ldb, dn=Dn(ldb, str(value)))
+
+ def to_db_value(self, ldb, value, flags):
+ """Convert related object or list of objects to MessageElement."""
+ if value is None:
+ return
+ elif isinstance(value, list):
+ return MessageElement(
+ [str(item.dn) for item in value], flags, self.name)
+ else:
+ return MessageElement(str(value.dn), flags, self.name)
+
+
+class DnField(Field):
+ """A Dn field parses the current field into a Dn object."""
+
+ def from_db_value(self, ldb, value):
+ """Convert MessageElement to a Dn object or list of Dn objects."""
+ if value is None:
+ return
+ elif isinstance(value, Dn):
+ return value
+ elif len(value) > 1 or self.many:
+ return [Dn(ldb, str(item)) for item in value]
+ else:
+ return Dn(ldb, str(value))
+
+ def to_db_value(self, ldb, value, flags):
+ """Convert Dn object or list of Dn objects into a MessageElement."""
+ if value is None:
+ return
+ elif isinstance(value, list):
+ return MessageElement(
+ [str(item) for item in value], flags, self.name)
+ else:
+ return MessageElement(str(value), flags, self.name)
+
+
+class GUIDField(Field):
+ """A GUID field decodes fields containing binary GUIDs."""
+
+ def from_db_value(self, ldb, value):
+ """Convert MessageElement with a GUID into a str or list of str."""
+ if value is None:
+ return
+ elif len(value) > 1 or self.many:
+ return [str(ndr_unpack(GUID, item)) for item in value]
+ else:
+ return str(ndr_unpack(GUID, value[0]))
+
+ def to_db_value(self, ldb, value, flags):
+ """Convert str with GUID into MessageElement."""
+ if value is None:
+ return
+ elif isinstance(value, list):
+ return MessageElement(
+ [ndr_pack(GUID(item)) for item in value], flags, self.name)
+ else:
+ return MessageElement(ndr_pack(GUID(value)), flags, self.name)
+
+
+class SIDField(Field):
+ """A SID field encodes and decodes SID data."""
+
+ def from_db_value(self, ldb, value):
+ """Convert MessageElement with a GUID into a str or list of str."""
+ if value is None:
+ return
+ elif len(value) > 1 or self.many:
+ return [str(ndr_unpack(security.dom_sid, item)) for item in value]
+ else:
+ return str(ndr_unpack(security.dom_sid, value[0]))
+
+ def to_db_value(self, ldb, value, flags):
+ """Convert str with GUID into MessageElement."""
+ if value is None:
+ return
+ elif isinstance(value, list):
+ return MessageElement(
+ [ndr_pack(security.dom_sid(item)) for item in value],
+ flags, self.name)
+ else:
+ return MessageElement(ndr_pack(security.dom_sid(value)),
+ flags, self.name)
+
+
+class SDDLField(Field):
+ """A SDDL field encodes and decodes SDDL data."""
+
+ def __init__(self,
+ name,
+ *,
+ many=False,
+ default=None,
+ hidden=False,
+ allow_device_in_sddl=True):
+ """Create a new SDDLField."""
+ self.allow_device_in_sddl = allow_device_in_sddl
+ super().__init__(name, many=many, default=default, hidden=hidden)
+
+ def from_db_value(self, ldb, value):
+ if value is None:
+ return
+ elif len(value) > 1 or self.many:
+ return [ndr_unpack(security.descriptor, item).as_sddl()
+ for item in value]
+ else:
+ return ndr_unpack(security.descriptor, value[0]).as_sddl()
+
+ def to_db_value(self, ldb, value, flags):
+ domain_sid = security.dom_sid(ldb.get_domain_sid())
+ if value is None:
+ return
+ elif isinstance(value, list):
+ return MessageElement([ndr_pack(security.descriptor.from_sddl(
+ item,
+ domain_sid,
+ allow_device_in_sddl=self.allow_device_in_sddl))
+ for item in value],
+ flags,
+ self.name)
+ else:
+ return MessageElement(
+ ndr_pack(security.descriptor.from_sddl(
+ value,
+ domain_sid,
+ allow_device_in_sddl=self.allow_device_in_sddl)),
+ flags,
+ self.name
+ )
+
+
+class BooleanField(Field):
+ """A simple boolean field, can be a bool or list of bool."""
+
+ def from_db_value(self, ldb, value):
+ """Convert MessageElement into a bool or list of bool."""
+ if value is None:
+ return
+ elif len(value) > 1 or self.many:
+ return [str(item) == "TRUE" for item in value]
+ else:
+ return str(value) == "TRUE"
+
+ def to_db_value(self, ldb, value, flags):
+ """Convert bool or list of bool into a MessageElement."""
+ if value is None:
+ return
+ elif isinstance(value, list):
+ return MessageElement(
+ [str(bool(item)).upper() for item in value], flags, self.name)
+ else:
+ return MessageElement(str(bool(value)).upper(), flags, self.name)
+
+
+class PossibleClaimValuesField(Field):
+ """Field for parsing possible values XML for claim types.
+
+ This field will be represented by a list of dicts as follows:
+
+ [
+ {"ValueGUID": <GUID>},
+ {"ValueDisplayName: "Display name"},
+ {"ValueDescription: "Optional description or None for no description"},
+ {"Value": <Value>},
+ ]
+
+ Note that the GUID needs to be created client-side when adding entries,
+ leaving it as None then saving it doesn't generate the GUID.
+
+ The field itself just converts the XML to list and vice versa, it doesn't
+ automatically generate GUIDs for entries, this is entirely up to the caller.
+ """
+
+ # Namespaces for PossibleValues xml parsing.
+ NAMESPACE = {
+ "xsd": "http://www.w3.org/2001/XMLSchema",
+ "xsi": "http://www.w3.org/2001/XMLSchema-instance",
+ "": "http://schemas.microsoft.com/2010/08/ActiveDirectory/PossibleValues"
+ }
+
+ def from_db_value(self, ldb, value):
+ """Parse MessageElement with XML to list of dicts."""
+ if value is not None:
+ root = ElementTree.fromstring(str(value))
+ string_list = root.find("StringList", self.NAMESPACE)
+
+ values = []
+ for item in string_list.findall("Item", self.NAMESPACE):
+ values.append({
+ "ValueGUID": item.find("ValueGUID", self.NAMESPACE).text,
+ "ValueDisplayName": item.find("ValueDisplayName",
+ self.NAMESPACE).text,
+ "ValueDescription": item.find("ValueDescription",
+ self.NAMESPACE).text,
+ "Value": item.find("Value", self.NAMESPACE).text,
+ })
+
+ return values
+
+ def to_db_value(self, ldb, value, flags):
+ """Convert list of dicts back to XML as a MessageElement."""
+ if value is None:
+ return
+
+ # Possible values should always be a list of dict, but for consistency
+ # with other fields just wrap a single value into a list and continue.
+ if isinstance(value, list):
+ possible_values = value
+ else:
+ possible_values = [value]
+
+ # No point storing XML of an empty list.
+ # Return None, the field will be unset on the next save.
+ if len(possible_values) == 0:
+ return
+
+ # root node
+ root = ElementTree.Element("PossibleClaimValues")
+ for name, url in self.NAMESPACE.items():
+ if name == "":
+ root.set("xmlns", url)
+ else:
+ root.set(f"xmlns:{name}", url)
+
+ # StringList node
+ string_list = ElementTree.SubElement(root, "StringList")
+
+ # List of values
+ for item_dict in possible_values:
+ item = ElementTree.SubElement(string_list, "Item")
+ item_guid = ElementTree.SubElement(item, "ValueGUID")
+ item_guid.text = item_dict["ValueGUID"]
+ item_name = ElementTree.SubElement(item, "ValueDisplayName")
+ item_name.text = item_dict["ValueDisplayName"]
+ item_desc = ElementTree.SubElement(item, "ValueDescription")
+ item_desc.text = item_dict["ValueDescription"]
+ item_value = ElementTree.SubElement(item, "Value")
+ item_value.text = item_dict["Value"]
+
+ # NOTE: indent was only added in Python 3.9 so can't be used yet.
+ # ElementTree.indent(root, space="\t", level=0)
+
+ out = io.BytesIO()
+ ElementTree.ElementTree(root).write(out,
+ encoding="utf-16",
+ xml_declaration=True,
+ short_empty_elements=False)
+
+ # Back to str as that is what MessageElement needs.
+ return MessageElement(out.getvalue().decode("utf-16"), flags, self.name)
diff --git a/python/samba/netcmd/domain/models/group.py b/python/samba/netcmd/domain/models/group.py
new file mode 100644
index 0000000..9473127
--- /dev/null
+++ b/python/samba/netcmd/domain/models/group.py
@@ -0,0 +1,42 @@
+# Unix SMB/CIFS implementation.
+#
+# Group model.
+#
+# Copyright (C) Catalyst.Net Ltd. 2023
+#
+# Written by Rob van der Linde <rob@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from .fields import BooleanField, DnField, IntegerField, SIDField, StringField
+from .model import Model
+
+
+class Group(Model):
+ admin_count = IntegerField("adminCount")
+ description = StringField("description")
+ is_critical_system_object = BooleanField("isCriticalSystemObject",
+ default=False, readonly=True)
+ member = DnField("member", many=True)
+ object_sid = SIDField("objectSid")
+ system_flags = IntegerField("systemFlags")
+
+ @staticmethod
+ def get_object_class():
+ return "group"
+
+ def get_authentication_sddl(self):
+ return "O:SYG:SYD:(XA;OICI;CR;;;WD;(Member_of_any {SID(%s)}))" % (
+ self.object_sid)
diff --git a/python/samba/netcmd/domain/models/model.py b/python/samba/netcmd/domain/models/model.py
new file mode 100644
index 0000000..602c6ca
--- /dev/null
+++ b/python/samba/netcmd/domain/models/model.py
@@ -0,0 +1,426 @@
+# Unix SMB/CIFS implementation.
+#
+# Model and basic ORM for the Ldb database.
+#
+# Copyright (C) Catalyst.Net Ltd. 2023
+#
+# Written by Rob van der Linde <rob@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import inspect
+from abc import ABCMeta, abstractmethod
+
+from ldb import ERR_NO_SUCH_OBJECT, FLAG_MOD_ADD, FLAG_MOD_REPLACE, LdbError,\
+ Message, MessageElement, SCOPE_BASE, SCOPE_SUBTREE, binary_encode
+from samba.sd_utils import SDUtils
+
+from .exceptions import DeleteError, DoesNotExist, FieldError,\
+ ProtectError, UnprotectError
+from .fields import DateTimeField, DnField, Field, GUIDField, IntegerField,\
+ StringField
+from .query import Query
+
+# Keeps track of registered models.
+# This gets populated by the ModelMeta class.
+MODELS = {}
+
+
+class ModelMeta(ABCMeta):
+
+ def __new__(mcls, name, bases, namespace, **kwargs):
+ cls = super().__new__(mcls, name, bases, namespace, **kwargs)
+
+ if cls.__name__ != "Model":
+ cls.fields = dict(inspect.getmembers(cls, lambda f: isinstance(f, Field)))
+ cls.meta = mcls
+ MODELS[name] = cls
+
+ return cls
+
+
+class Model(metaclass=ModelMeta):
+ cn = StringField("cn")
+ distinguished_name = DnField("distinguishedName")
+ dn = DnField("dn")
+ ds_core_propagation_data = DateTimeField("dsCorePropagationData",
+ hidden=True)
+ instance_type = IntegerField("instanceType")
+ name = StringField("name")
+ object_category = DnField("objectCategory")
+ object_class = StringField("objectClass",
+ default=lambda obj: obj.get_object_class())
+ object_guid = GUIDField("objectGUID")
+ usn_changed = IntegerField("uSNChanged", hidden=True)
+ usn_created = IntegerField("uSNCreated", hidden=True)
+ when_changed = DateTimeField("whenChanged", hidden=True)
+ when_created = DateTimeField("whenCreated", hidden=True)
+
+ def __init__(self, **kwargs):
+ """Create a new model instance and optionally populate fields.
+
+ Does not save the object to the database, call .save() for that.
+
+ :param kwargs: Optional input fields to populate object with
+ """
+ # Used by the _apply method, holds the original ldb Message,
+ # which is used by save() to determine what fields changed.
+ self._message = None
+
+ for field_name, field in self.fields.items():
+ if field_name in kwargs:
+ default = kwargs[field_name]
+ elif callable(field.default):
+ default = field.default(self)
+ else:
+ default = field.default
+
+ setattr(self, field_name, default)
+
+ def __repr__(self):
+ """Return object representation for this model."""
+ return f"<{self.__class__.__name__}: {self}>"
+
+ def __str__(self):
+ """Stringify model instance to implement in each model."""
+ return str(self.cn)
+
+ def __eq__(self, other):
+ """Basic object equality check only really checks if the dn matches.
+
+ :param other: The other object to compare with
+ """
+ if other is None:
+ return False
+ else:
+ return self.dn == other.dn
+
+ def __json__(self):
+ """Automatically called by custom JSONEncoder class.
+
+ When turning an object into json any fields of type RelatedField
+ will also end up calling this method.
+ """
+ if self.dn is not None:
+ return str(self.dn)
+
+ @staticmethod
+ def get_base_dn(ldb):
+ """Return the base DN for the container of this model.
+
+ :param ldb: Ldb connection
+ :return: Dn to use for new objects
+ """
+ return ldb.get_default_basedn()
+
+ @classmethod
+ def get_search_dn(cls, ldb):
+ """Return the DN used for querying.
+
+ By default, this just calls get_base_dn, but it is possible to
+ return a different Dn for querying.
+
+ :param ldb: Ldb connection
+ :return: Dn to use for searching
+ """
+ return cls.get_base_dn(ldb)
+
+ @staticmethod
+ @abstractmethod
+ def get_object_class():
+ """Returns the objectClass for this model."""
+ pass
+
+ @classmethod
+ def from_message(cls, ldb, message):
+ """Create a new model instance from the Ldb Message object.
+
+ :param ldb: Ldb connection
+ :param message: Ldb Message object to create instance from
+ """
+ obj = cls()
+ obj._apply(ldb, message)
+ return obj
+
+ def _apply(self, ldb, message):
+ """Internal method to apply Ldb Message to current object.
+
+ :param ldb: Ldb connection
+ :param message: Ldb Message object to apply
+ """
+ # Store the ldb Message so that in save we can see what changed.
+ self._message = message
+
+ for attr, field in self.fields.items():
+ if field.name in message:
+ setattr(self, attr, field.from_db_value(ldb, message[field.name]))
+
+ def refresh(self, ldb, fields=None):
+ """Refresh object from database.
+
+ :param ldb: Ldb connection
+ :param fields: Optional list of field names to refresh
+ """
+ attrs = [self.fields[f].name for f in fields] if fields else None
+
+ # This shouldn't normally happen but in case the object refresh fails.
+ try:
+ res = ldb.search(self.dn, scope=SCOPE_BASE, attrs=attrs)
+ except LdbError as e:
+ if e.args[0] == ERR_NO_SUCH_OBJECT:
+ raise DoesNotExist(f"Refresh failed, object gone: {self.dn}")
+ raise
+
+ self._apply(ldb, res[0])
+
+ def as_dict(self, include_hidden=False):
+ """Returns a dict representation of the model.
+
+ :param include_hidden: Include fields with hidden=True when set
+ :returns: dict representation of model using Ldb field names as keys
+ """
+ obj_dict = {}
+
+ for attr, field in self.fields.items():
+ if not field.hidden or include_hidden:
+ value = getattr(self, attr)
+ if value is not None:
+ obj_dict[field.name] = value
+
+ return obj_dict
+
+ @classmethod
+ def build_expression(cls, **kwargs):
+ """Build LDAP search expression from kwargs.
+
+ :kwargs: fields to use for expression using model field names
+ """
+ # Take a copy, never modify the original if it can be avoided.
+ # Then always add the object_class to the search criteria.
+ criteria = dict(kwargs)
+ criteria["object_class"] = cls.get_object_class()
+
+ # Build search expression.
+ num_fields = len(criteria)
+ expression = "" if num_fields == 1 else "(&"
+
+ for field_name, value in criteria.items():
+ field = cls.fields.get(field_name)
+ if not field:
+ raise ValueError(f"Unknown field '{field_name}'")
+ expression += f"({field.name}={binary_encode(value)})"
+
+ if num_fields > 1:
+ expression += ")"
+
+ return expression
+
+ @classmethod
+ def query(cls, ldb, **kwargs):
+ """Returns a search query for this model.
+
+ :param ldb: Ldb connection
+ :param kwargs: Search criteria as keyword args
+ """
+ base_dn = cls.get_search_dn(ldb)
+
+ # If the container does not exist produce a friendly error message.
+ try:
+ result = ldb.search(base_dn,
+ scope=SCOPE_SUBTREE,
+ expression=cls.build_expression(**kwargs))
+ except LdbError as e:
+ if e.args[0] == ERR_NO_SUCH_OBJECT:
+ raise DoesNotExist(f"Container does not exist: {base_dn}")
+ raise
+
+ return Query(cls, ldb, result)
+
+ @classmethod
+ def get(cls, ldb, **kwargs):
+ """Get one object, must always return one item.
+
+ Either find object by dn=, or any combination of attributes via kwargs.
+ If there are more than one result, MultipleObjectsReturned is raised.
+
+ :param ldb: Ldb connection
+ :param kwargs: Search criteria as keyword args
+ :returns: Model instance or None if not found
+ :raises: MultipleObjects returned if there are more than one results
+ """
+ # If a DN is provided use that to get the object directly.
+ # Otherwise, build a search expression using kwargs provided.
+ dn = kwargs.get("dn")
+
+ if dn:
+ # Handle LDAP error 32 LDAP_NO_SUCH_OBJECT, but raise for the rest.
+ # Return None if the User does not exist.
+ try:
+ res = ldb.search(dn, scope=SCOPE_BASE)
+ except LdbError as e:
+ if e.args[0] == ERR_NO_SUCH_OBJECT:
+ return None
+ else:
+ raise
+
+ return cls.from_message(ldb, res[0])
+ else:
+ return cls.query(ldb, **kwargs).get()
+
+ @classmethod
+ def create(cls, ldb, **kwargs):
+ """Create object constructs object and calls save straight after.
+
+ :param ldb: Ldb connection
+ :param kwargs: Fields to populate object from
+ :returns: object
+ """
+ obj = cls(**kwargs)
+ obj.save(ldb)
+ return obj
+
+ @classmethod
+ def get_or_create(cls, ldb, defaults=None, **kwargs):
+ """Retrieve object and if it doesn't exist create a new instance.
+
+ :param ldb: Ldb connection
+ :param defaults: Attributes only used for create but not search
+ :param kwargs: Attributes used for searching existing object
+ :returns: (object, bool created)
+ """
+ obj = cls.get(ldb, **kwargs)
+ if obj is None:
+ attrs = dict(kwargs)
+ if defaults is not None:
+ attrs.update(defaults)
+ return cls.create(ldb, **attrs), True
+ else:
+ return obj, False
+
+ def save(self, ldb):
+ """Save model to Ldb database.
+
+ The save operation will save all fields excluding fields that
+ return None when calling their `to_db_value` methods.
+
+ The `to_db_value` method can either return a ldb Message object,
+ or None if the field is to be excluded.
+
+ For updates, the existing object is fetched and only fields
+ that are changed are included in the update ldb Message.
+
+ Also for updates, any fields that currently have a value,
+ but are to be set to None will be seen as a delete operation.
+
+ After the save operation the object is refreshed from the server,
+ as often the server will populate some fields.
+
+ :param ldb: Ldb connection
+ """
+ if self.dn is None:
+ dn = self.get_base_dn(ldb)
+ dn.add_child(f"CN={self.cn or self.name}")
+ self.dn = dn
+
+ message = Message(dn=self.dn)
+ for attr, field in self.fields.items():
+ if attr != "dn" and not field.readonly:
+ value = getattr(self, attr)
+ try:
+ db_value = field.to_db_value(ldb, value, FLAG_MOD_ADD)
+ except ValueError as e:
+ raise FieldError(e, field=field)
+
+ # Don't add empty fields.
+ if db_value is not None and len(db_value):
+ message.add(db_value)
+
+ # Create object
+ ldb.add(message)
+
+ # Fetching object refreshes any automatically populated fields.
+ res = ldb.search(dn, scope=SCOPE_BASE)
+ self._apply(ldb, res[0])
+ else:
+ # Existing Message was stored to work out what fields changed.
+ existing_obj = self.from_message(ldb, self._message)
+
+ # Only modify replace or modify fields that have changed.
+ # Any fields that are set to None or an empty list get unset.
+ message = Message(dn=self.dn)
+ for attr, field in self.fields.items():
+ if attr != "dn" and not field.readonly:
+ value = getattr(self, attr)
+ old_value = getattr(existing_obj, attr)
+
+ if value != old_value:
+ try:
+ db_value = field.to_db_value(ldb, value,
+ FLAG_MOD_REPLACE)
+ except ValueError as e:
+ raise FieldError(e, field=field)
+
+ # When a field returns None or empty list, delete attr.
+ if db_value in (None, []):
+ db_value = MessageElement([],
+ FLAG_MOD_REPLACE,
+ field.name)
+ message.add(db_value)
+
+ # Saving nothing only triggers an error.
+ if len(message):
+ ldb.modify(message)
+
+ # Fetching object refreshes any automatically populated fields.
+ self.refresh(ldb)
+
+ def delete(self, ldb):
+ """Delete item from Ldb database.
+
+ If self.dn is None then the object has not yet been saved.
+
+ :param ldb: Ldb connection
+ """
+ if self.dn is None:
+ raise DeleteError("Cannot delete object that doesn't have a dn.")
+
+ try:
+ ldb.delete(self.dn)
+ except LdbError as e:
+ raise DeleteError(f"Delete failed: {e}")
+
+ def protect(self, ldb):
+ """Protect object from accidental deletion.
+
+ :param ldb: Ldb connection
+ """
+ utils = SDUtils(ldb)
+
+ try:
+ utils.dacl_add_ace(self.dn, "(D;;DTSD;;;WD)")
+ except LdbError as e:
+ raise ProtectError(f"Failed to protect object: {e}")
+
+ def unprotect(self, ldb):
+ """Unprotect object from accidental deletion.
+
+ :param ldb: Ldb connection
+ """
+ utils = SDUtils(ldb)
+
+ try:
+ utils.dacl_delete_aces(self.dn, "(D;;DTSD;;;WD)")
+ except LdbError as e:
+ raise UnprotectError(f"Failed to unprotect object: {e}")
diff --git a/python/samba/netcmd/domain/models/query.py b/python/samba/netcmd/domain/models/query.py
new file mode 100644
index 0000000..9cdb650
--- /dev/null
+++ b/python/samba/netcmd/domain/models/query.py
@@ -0,0 +1,81 @@
+# Unix SMB/CIFS implementation.
+#
+# Query class for the ORM to the Ldb database.
+#
+# Copyright (C) Catalyst.Net Ltd. 2023
+#
+# Written by Rob van der Linde <rob@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import re
+
+from .exceptions import DoesNotExist, MultipleObjectsReturned
+
+RE_SPLIT_CAMELCASE = re.compile(r"[A-Z](?:[a-z]+|[A-Z]*(?=[A-Z]|$))")
+
+
+class Query:
+ """Simple Query class used by the `Model.query` method."""
+
+ def __init__(self, model, ldb, result):
+ self.model = model
+ self.ldb = ldb
+ self.result = result
+ self.count = result.count
+ self.name = " ".join(RE_SPLIT_CAMELCASE.findall(model.__name__)).lower()
+
+ def __iter__(self):
+ """Loop over Query class yields Model instances."""
+ for message in self.result:
+ yield self.model.from_message(self.ldb, message)
+
+ def first(self):
+ """Returns the first item in the Query or None for no results."""
+ if self.result.count:
+ return self.model.from_message(self.ldb, self.result[0])
+
+ def last(self):
+ """Returns the last item in the Query or None for no results."""
+ if self.result.count:
+ return self.model.from_message(self.ldb, self.result[-1])
+
+ def get(self):
+ """Returns one item or None if no results were found.
+
+ :returns: Model instance or None if not found.
+ :raises MultipleObjectsReturned: if more than one results were returned
+ """
+ if self.count > 1:
+ raise MultipleObjectsReturned(
+ f"More than one {self.name} objects returned (got {self.count}).")
+ elif self.count:
+ return self.model.from_message(self.ldb, self.result[0])
+
+ def one(self):
+ """Must return EXACTLY one item or raise an exception.
+
+ :returns: Model instance
+ :raises DoesNotExist: if no results were returned
+ :raises MultipleObjectsReturned: if more than one results were returned
+ """
+ if self.count < 1:
+ raise DoesNotExist(
+ f"{self.name.capitalize()} matching query not found")
+ elif self.count > 1:
+ raise MultipleObjectsReturned(
+ f"More than one {self.name} objects returned (got {self.count}).")
+ else:
+ return self.model.from_message(self.ldb, self.result[0])
diff --git a/python/samba/netcmd/domain/models/schema.py b/python/samba/netcmd/domain/models/schema.py
new file mode 100644
index 0000000..59ece05
--- /dev/null
+++ b/python/samba/netcmd/domain/models/schema.py
@@ -0,0 +1,124 @@
+# Unix SMB/CIFS implementation.
+#
+# Class and attribute schema models.
+#
+# Copyright (C) Catalyst.Net Ltd. 2023
+#
+# Written by Rob van der Linde <rob@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from .fields import BinaryField, BooleanField, DnField, GUIDField,\
+ IntegerField, StringField
+from .model import Model
+
+
+class ClassSchema(Model):
+ default_object_category = DnField("defaultObjectCategory")
+ governs_id = StringField("governsID")
+ schema_id_guid = GUIDField("schemaIDGUID")
+ subclass_of = StringField("subclassOf")
+ admin_description = StringField("adminDescription")
+ admin_display_name = StringField("adminDisplayName")
+ default_hiding_value = BooleanField("defaultHidingValue")
+ default_security_descriptor = BinaryField("defaultSecurityDescriptor")
+ ldap_display_name = StringField("lDAPDisplayName")
+ may_contain = StringField("mayContain", many=True)
+ poss_superiors = StringField("possSuperiors", many=True)
+ rdn_att_id = StringField("rDNAttID")
+ show_in_advanced_view_only = BooleanField("showInAdvancedViewOnly")
+ system_only = BooleanField("systemOnly", readonly=True)
+
+ @staticmethod
+ def get_base_dn(ldb):
+ """Return the base DN for the ClassSchema model.
+
+ This is the same as AttributeSchema, but the objectClass is different.
+
+ :param ldb: Ldb connection
+ :return: Dn object of container
+ """
+ return ldb.get_schema_basedn()
+
+ @staticmethod
+ def get_object_class():
+ return "classSchema"
+
+ @classmethod
+ def lookup(cls, ldb, name):
+ """Helper function to lookup class or raise LookupError.
+
+ :param ldb: Ldb connection
+ :param name: Class name
+ :raises: LookupError if not found
+ :raises: ValueError if name is not provided
+ """
+ if not name:
+ raise ValueError("Class name is required.")
+
+ attr = cls.get(ldb, ldap_display_name=name)
+ if attr is None:
+ raise LookupError(f"Could not locate {name} in class schema.")
+
+ return attr
+
+
+class AttributeSchema(Model):
+ attribute_id = StringField("attributeID")
+ attribute_syntax = StringField("attributeSyntax")
+ is_single_valued = BooleanField("isSingleValued")
+ ldap_display_name = StringField("lDAPDisplayName")
+ om_syntax = IntegerField("oMSyntax")
+ admin_description = StringField("adminDescription")
+ admin_display_name = StringField("adminDisplayName")
+ attribute_security_guid = GUIDField("attributeSecurityGUID")
+ schema_flags_ex = IntegerField("schemaFlagsEx")
+ search_flags = IntegerField("searchFlags")
+ show_in_advanced_view_only = BooleanField("showInAdvancedViewOnly")
+ system_flags = IntegerField("systemFlags", readonly=True)
+ system_only = BooleanField("systemOnly", readonly=True)
+
+ @staticmethod
+ def get_base_dn(ldb):
+ """Return the base DN for the AttributeSchema model.
+
+ This is the same as ClassSchema, but the objectClass is different.
+
+ :param ldb: Ldb connection
+ :return: Dn object of container
+ """
+ return ldb.get_schema_basedn()
+
+ @staticmethod
+ def get_object_class():
+ return "attributeSchema"
+
+ @classmethod
+ def lookup(cls, ldb, name):
+ """Helper function to lookup attribute or raise LookupError.
+
+ :param ldb: Ldb connection
+ :param name: Attribute name
+ :raises: LookupError if not found
+ :raises: ValueError if name is not provided
+ """
+ if not name:
+ raise ValueError("Attribute name is required.")
+
+ attr = cls.get(ldb, ldap_display_name=name)
+ if attr is None:
+ raise LookupError(f"Could not locate {name} in attribute schema.")
+
+ return attr
diff --git a/python/samba/netcmd/domain/models/site.py b/python/samba/netcmd/domain/models/site.py
new file mode 100644
index 0000000..44643f3
--- /dev/null
+++ b/python/samba/netcmd/domain/models/site.py
@@ -0,0 +1,47 @@
+# Unix SMB/CIFS implementation.
+#
+# Site model.
+#
+# Copyright (C) Catalyst.Net Ltd. 2023
+#
+# Written by Rob van der Linde <rob@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from .fields import BooleanField, DnField, IntegerField
+from .model import Model
+
+
+class Site(Model):
+ show_in_advanced_view_only = BooleanField("showInAdvancedViewOnly")
+ system_flags = IntegerField("systemFlags", readonly=True)
+
+ # Backlinks
+ site_object_bl = DnField("siteObjectBL", readonly=True)
+
+ @staticmethod
+ def get_base_dn(ldb):
+ """Return the base DN for the Site model.
+
+ :param ldb: Ldb connection
+ :return: Dn to use for new objects
+ """
+ base_dn = ldb.get_config_basedn()
+ base_dn.add_child("CN=Sites")
+ return base_dn
+
+ @staticmethod
+ def get_object_class():
+ return "site"
diff --git a/python/samba/netcmd/domain/models/subnet.py b/python/samba/netcmd/domain/models/subnet.py
new file mode 100644
index 0000000..bb249d4
--- /dev/null
+++ b/python/samba/netcmd/domain/models/subnet.py
@@ -0,0 +1,45 @@
+# Unix SMB/CIFS implementation.
+#
+# Subnet model.
+#
+# Copyright (C) Catalyst.Net Ltd. 2023
+#
+# Written by Rob van der Linde <rob@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from .fields import BooleanField, DnField, IntegerField
+from .model import Model
+
+
+class Subnet(Model):
+ show_in_advanced_view_only = BooleanField("showInAdvancedViewOnly")
+ site_object = DnField("siteObject")
+ system_flags = IntegerField("systemFlags", readonly=True)
+
+ @staticmethod
+ def get_base_dn(ldb):
+ """Return the base DN for the Subnet model.
+
+ :param ldb: Ldb connection
+ :return: Dn to use for new objects
+ """
+ base_dn = ldb.get_config_basedn()
+ base_dn.add_child("CN=Subnets,CN=Sites")
+ return base_dn
+
+ @staticmethod
+ def get_object_class():
+ return "subnet"
diff --git a/python/samba/netcmd/domain/models/user.py b/python/samba/netcmd/domain/models/user.py
new file mode 100644
index 0000000..7b0785a
--- /dev/null
+++ b/python/samba/netcmd/domain/models/user.py
@@ -0,0 +1,75 @@
+# Unix SMB/CIFS implementation.
+#
+# User model.
+#
+# Copyright (C) Catalyst.Net Ltd. 2023
+#
+# Written by Rob van der Linde <rob@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from ldb import Dn
+
+from samba.dsdb import DS_GUID_USERS_CONTAINER
+
+from .fields import DnField, SIDField, StringField
+from .model import Model
+
+
+class User(Model):
+ username = StringField("sAMAccountName")
+ assigned_policy = DnField("msDS-AssignedAuthNPolicy")
+ assigned_silo = DnField("msDS-AssignedAuthNPolicySilo")
+ object_sid = SIDField("objectSid")
+
+ def __str__(self):
+ """Return username rather than cn for User model."""
+ return self.username
+
+ @staticmethod
+ def get_base_dn(ldb):
+ """Return the base DN for the User model.
+
+ :param ldb: Ldb connection
+ :return: Dn to use for new objects
+ """
+ return ldb.get_wellknown_dn(ldb.get_default_basedn(),
+ DS_GUID_USERS_CONTAINER)
+
+ @classmethod
+ def get_search_dn(cls, ldb):
+ """Return Dn used for searching so Computers will also be found.
+
+ :param ldb: Ldb connection
+ :return: Dn to use for searching
+ """
+ return ldb.get_root_basedn()
+
+ @staticmethod
+ def get_object_class():
+ return "user"
+
+ @classmethod
+ def find(cls, ldb, name):
+ """Helper function to find a user first by Dn then username.
+
+ If the Dn can't be parsed, use sAMAccountName instead.
+ """
+ try:
+ query = {"dn": Dn(ldb, name)}
+ except ValueError:
+ query = {"username": name}
+
+ return cls.get(ldb, **query)
diff --git a/python/samba/netcmd/domain/models/value_type.py b/python/samba/netcmd/domain/models/value_type.py
new file mode 100644
index 0000000..00a4e07
--- /dev/null
+++ b/python/samba/netcmd/domain/models/value_type.py
@@ -0,0 +1,96 @@
+# Unix SMB/CIFS implementation.
+#
+# Claim value type model.
+#
+# Copyright (C) Catalyst.Net Ltd. 2023
+#
+# Written by Rob van der Linde <rob@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from .fields import BooleanField, DnField, IntegerField, StringField
+from .model import Model
+
+# LDAP Syntax to Value Type CN lookup table.
+# These are the lookups used by known AD attributes, add new ones as required.
+SYNTAX_TO_VALUE_TYPE_CN = {
+ "2.5.5.1": "MS-DS-Text", # Object(DS-DN)
+ "2.5.5.2": "MS-DS-Text", # String(Object-Identifier)
+ "2.5.5.8": "MS-DS-YesNo", # Boolean
+ "2.5.5.9": "MS-DS-Number", # Integer
+ "2.5.5.12": "MS-DS-Text", # String(Unicode)
+ "2.5.5.15": "MS-DS-Text", # String(NT-Sec-Desc)
+ "2.5.5.16": "MS-DS-Number", # LargeInteger
+}
+
+
+class ValueType(Model):
+ description = StringField("description")
+ display_name = StringField("displayName")
+ claim_is_single_valued = BooleanField("msDS-ClaimIsSingleValued")
+ claim_is_value_space_restricted = BooleanField(
+ "msDS-ClaimIsValueSpaceRestricted")
+ claim_value_type = IntegerField("msDS-ClaimValueType")
+ is_possible_values_present = BooleanField("msDS-IsPossibleValuesPresent")
+ show_in_advanced_view_only = BooleanField("showInAdvancedViewOnly")
+
+ # Backlinks
+ value_type_reference_bl = DnField(
+ "msDS-ValueTypeReferenceBL", readonly=True)
+
+ @staticmethod
+ def get_base_dn(ldb):
+ """Return the base DN for the ValueType model.
+
+ :param ldb: Ldb connection
+ :return: Dn object of container
+ """
+ base_dn = ldb.get_config_basedn()
+ base_dn.add_child("CN=Value Types,CN=Claims Configuration,CN=Services")
+ return base_dn
+
+ @staticmethod
+ def get_object_class():
+ return "msDS-ValueType"
+
+ @classmethod
+ def lookup(cls, ldb, attribute):
+ """Helper function to get ValueType by attribute or raise LookupError.
+
+ :param ldb: Ldb connection
+ :param attribute: AttributeSchema object
+ :raises: LookupError if not found
+ :raises: ValueError for unknown attribute syntax
+ """
+ # If attribute is None.
+ if not attribute:
+ raise ValueError("Attribute is required for value type lookup.")
+
+ # Unknown attribute syntax as it isn't in the lookup table.
+ syntax = attribute.attribute_syntax
+ cn = SYNTAX_TO_VALUE_TYPE_CN.get(syntax)
+ if not cn:
+ raise ValueError(f"Unable to process attribute syntax {syntax}")
+
+ # This should always return something but should still be handled.
+ value_type = cls.get(ldb, cn=cn)
+ if value_type is None:
+ raise LookupError(
+ f"Could not find claim value type for {attribute}.")
+
+ return value_type
+
+ def __str__(self):
+ return str(self.display_name)
diff --git a/python/samba/netcmd/domain/passwordsettings.py b/python/samba/netcmd/domain/passwordsettings.py
new file mode 100644
index 0000000..d0cf47b
--- /dev/null
+++ b/python/samba/netcmd/domain/passwordsettings.py
@@ -0,0 +1,316 @@
+# domain management - domain passwordsettings
+#
+# Copyright Matthias Dieter Wallnoefer 2009
+# Copyright Andrew Kroeger 2009
+# Copyright Jelmer Vernooij 2007-2012
+# Copyright Giampaolo Lauria 2011
+# Copyright Matthieu Patou <mat@matws.net> 2011
+# Copyright Andrew Bartlett 2008-2015
+# Copyright Stefan Metzmacher 2012
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import ldb
+import samba.getopt as options
+from samba.auth import system_session
+from samba.dcerpc.samr import (DOMAIN_PASSWORD_COMPLEX,
+ DOMAIN_PASSWORD_STORE_CLEARTEXT)
+from samba.netcmd import Command, CommandError, Option, SuperCommand
+from samba.netcmd.common import (NEVER_TIMESTAMP, timestamp_to_days,
+ timestamp_to_mins)
+from samba.netcmd.pso import cmd_domain_passwordsettings_pso
+from samba.samdb import SamDB
+
+
+class cmd_domain_passwordsettings_show(Command):
+ """Display current password settings for the domain."""
+
+ synopsis = "%prog [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H"),
+ ]
+
+ def run(self, H=None, credopts=None, sambaopts=None, versionopts=None):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+
+ domain_dn = samdb.domain_dn()
+ res = samdb.search(domain_dn, scope=ldb.SCOPE_BASE,
+ attrs=["pwdProperties", "pwdHistoryLength", "minPwdLength",
+ "minPwdAge", "maxPwdAge", "lockoutDuration", "lockoutThreshold",
+ "lockOutObservationWindow"])
+ assert(len(res) == 1)
+ try:
+ pwd_props = int(res[0]["pwdProperties"][0])
+ pwd_hist_len = int(res[0]["pwdHistoryLength"][0])
+ cur_min_pwd_len = int(res[0]["minPwdLength"][0])
+ # ticks -> days
+ cur_min_pwd_age = timestamp_to_days(res[0]["minPwdAge"][0])
+ cur_max_pwd_age = timestamp_to_days(res[0]["maxPwdAge"][0])
+
+ cur_account_lockout_threshold = int(res[0]["lockoutThreshold"][0])
+
+ # ticks -> mins
+ cur_account_lockout_duration = timestamp_to_mins(res[0]["lockoutDuration"][0])
+ cur_reset_account_lockout_after = timestamp_to_mins(res[0]["lockOutObservationWindow"][0])
+ except Exception as e:
+ raise CommandError("Could not retrieve password properties!", e)
+
+ self.message("Password information for domain '%s'" % domain_dn)
+ self.message("")
+ if pwd_props & DOMAIN_PASSWORD_COMPLEX != 0:
+ self.message("Password complexity: on")
+ else:
+ self.message("Password complexity: off")
+ if pwd_props & DOMAIN_PASSWORD_STORE_CLEARTEXT != 0:
+ self.message("Store plaintext passwords: on")
+ else:
+ self.message("Store plaintext passwords: off")
+ self.message("Password history length: %d" % pwd_hist_len)
+ self.message("Minimum password length: %d" % cur_min_pwd_len)
+ self.message("Minimum password age (days): %d" % cur_min_pwd_age)
+ self.message("Maximum password age (days): %d" % cur_max_pwd_age)
+ self.message("Account lockout duration (mins): %d" % cur_account_lockout_duration)
+ self.message("Account lockout threshold (attempts): %d" % cur_account_lockout_threshold)
+ self.message("Reset account lockout after (mins): %d" % cur_reset_account_lockout_after)
+
+
+class cmd_domain_passwordsettings_set(Command):
+ """Set password settings.
+
+ Password complexity, password lockout policy, history length,
+ minimum password length, the minimum and maximum password age) on
+ a Samba AD DC server.
+
+ Use against a Windows DC is possible, but group policy will override it.
+ """
+
+ synopsis = "%prog <options> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H"),
+ Option("-q", "--quiet", help="Be quiet", action="store_true"), # unused
+ Option("--complexity", type="choice", choices=["on", "off", "default"],
+ help="The password complexity (on | off | default). Default is 'on'"),
+ Option("--store-plaintext", type="choice", choices=["on", "off", "default"],
+ help="Store plaintext passwords where account have 'store passwords with reversible encryption' set (on | off | default). Default is 'off'"),
+ Option("--history-length",
+ help="The password history length (<integer> | default). Default is 24.", type=str),
+ Option("--min-pwd-length",
+ help="The minimum password length (<integer> | default). Default is 7.", type=str),
+ Option("--min-pwd-age",
+ help="The minimum password age (<integer in days> | default). Default is 1.", type=str),
+ Option("--max-pwd-age",
+ help="The maximum password age (<integer in days> | default). Default is 43.", type=str),
+ Option("--account-lockout-duration",
+ help="The length of time an account is locked out after exceeding the limit on bad password attempts (<integer in mins> | default). Default is 30 mins.", type=str),
+ Option("--account-lockout-threshold",
+ help="The number of bad password attempts allowed before locking out the account (<integer> | default). Default is 0 (never lock out).", type=str),
+ Option("--reset-account-lockout-after",
+ help="After this time is elapsed, the recorded number of attempts restarts from zero (<integer> | default). Default is 30.", type=str),
+ ]
+
+ def run(self, H=None, min_pwd_age=None, max_pwd_age=None,
+ quiet=False, complexity=None, store_plaintext=None, history_length=None,
+ min_pwd_length=None, account_lockout_duration=None, account_lockout_threshold=None,
+ reset_account_lockout_after=None, credopts=None, sambaopts=None,
+ versionopts=None):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+
+ domain_dn = samdb.domain_dn()
+ msgs = []
+ m = ldb.Message()
+ m.dn = ldb.Dn(samdb, domain_dn)
+ pwd_props = int(samdb.get_pwdProperties())
+
+ # get the current password age settings
+ max_pwd_age_ticks = samdb.get_maxPwdAge()
+ min_pwd_age_ticks = samdb.get_minPwdAge()
+
+ if complexity is not None:
+ if complexity == "on" or complexity == "default":
+ pwd_props = pwd_props | DOMAIN_PASSWORD_COMPLEX
+ msgs.append("Password complexity activated!")
+ elif complexity == "off":
+ pwd_props = pwd_props & (~DOMAIN_PASSWORD_COMPLEX)
+ msgs.append("Password complexity deactivated!")
+
+ if store_plaintext is not None:
+ if store_plaintext == "on" or store_plaintext == "default":
+ pwd_props = pwd_props | DOMAIN_PASSWORD_STORE_CLEARTEXT
+ msgs.append("Plaintext password storage for changed passwords activated!")
+ elif store_plaintext == "off":
+ pwd_props = pwd_props & (~DOMAIN_PASSWORD_STORE_CLEARTEXT)
+ msgs.append("Plaintext password storage for changed passwords deactivated!")
+
+ if complexity is not None or store_plaintext is not None:
+ m["pwdProperties"] = ldb.MessageElement(str(pwd_props),
+ ldb.FLAG_MOD_REPLACE, "pwdProperties")
+
+ if history_length is not None:
+ if history_length == "default":
+ pwd_hist_len = 24
+ else:
+ pwd_hist_len = int(history_length)
+
+ if pwd_hist_len < 0 or pwd_hist_len > 24:
+ raise CommandError("Password history length must be in the range of 0 to 24!")
+
+ m["pwdHistoryLength"] = ldb.MessageElement(str(pwd_hist_len),
+ ldb.FLAG_MOD_REPLACE, "pwdHistoryLength")
+ msgs.append("Password history length changed!")
+
+ if min_pwd_length is not None:
+ if min_pwd_length == "default":
+ min_pwd_len = 7
+ else:
+ min_pwd_len = int(min_pwd_length)
+
+ if min_pwd_len < 0 or min_pwd_len > 14:
+ raise CommandError("Minimum password length must be in the range of 0 to 14!")
+
+ m["minPwdLength"] = ldb.MessageElement(str(min_pwd_len),
+ ldb.FLAG_MOD_REPLACE, "minPwdLength")
+ msgs.append("Minimum password length changed!")
+
+ if min_pwd_age is not None:
+ if min_pwd_age == "default":
+ min_pwd_age = 1
+ else:
+ min_pwd_age = int(min_pwd_age)
+
+ if min_pwd_age < 0 or min_pwd_age > 998:
+ raise CommandError("Minimum password age must be in the range of 0 to 998!")
+
+ # days -> ticks
+ min_pwd_age_ticks = -int(min_pwd_age * (24 * 60 * 60 * 1e7))
+
+ m["minPwdAge"] = ldb.MessageElement(str(min_pwd_age_ticks),
+ ldb.FLAG_MOD_REPLACE, "minPwdAge")
+ msgs.append("Minimum password age changed!")
+
+ if max_pwd_age is not None:
+ if max_pwd_age == "default":
+ max_pwd_age = 43
+ else:
+ max_pwd_age = int(max_pwd_age)
+
+ if max_pwd_age < 0 or max_pwd_age > 999:
+ raise CommandError("Maximum password age must be in the range of 0 to 999!")
+
+ # days -> ticks
+ if max_pwd_age == 0:
+ max_pwd_age_ticks = NEVER_TIMESTAMP
+ else:
+ max_pwd_age_ticks = -int(max_pwd_age * (24 * 60 * 60 * 1e7))
+
+ m["maxPwdAge"] = ldb.MessageElement(str(max_pwd_age_ticks),
+ ldb.FLAG_MOD_REPLACE, "maxPwdAge")
+ msgs.append("Maximum password age changed!")
+
+ if account_lockout_duration is not None:
+ if account_lockout_duration == "default":
+ account_lockout_duration = 30
+ else:
+ account_lockout_duration = int(account_lockout_duration)
+
+ if account_lockout_duration < 0 or account_lockout_duration > 99999:
+ raise CommandError("Account lockout duration "
+ "must be in the range of 0 to 99999!")
+
+ # minutes -> ticks
+ if account_lockout_duration == 0:
+ account_lockout_duration_ticks = NEVER_TIMESTAMP
+ else:
+ account_lockout_duration_ticks = -int(account_lockout_duration * (60 * 1e7))
+
+ m["lockoutDuration"] = ldb.MessageElement(str(account_lockout_duration_ticks),
+ ldb.FLAG_MOD_REPLACE, "lockoutDuration")
+ msgs.append("Account lockout duration changed!")
+
+ if account_lockout_threshold is not None:
+ if account_lockout_threshold == "default":
+ account_lockout_threshold = 0
+ else:
+ account_lockout_threshold = int(account_lockout_threshold)
+
+ m["lockoutThreshold"] = ldb.MessageElement(str(account_lockout_threshold),
+ ldb.FLAG_MOD_REPLACE, "lockoutThreshold")
+ msgs.append("Account lockout threshold changed!")
+
+ if reset_account_lockout_after is not None:
+ if reset_account_lockout_after == "default":
+ reset_account_lockout_after = 30
+ else:
+ reset_account_lockout_after = int(reset_account_lockout_after)
+
+ if reset_account_lockout_after < 0 or reset_account_lockout_after > 99999:
+ raise CommandError("Maximum password age must be in the range of 0 to 99999!")
+
+ # minutes -> ticks
+ if reset_account_lockout_after == 0:
+ reset_account_lockout_after_ticks = NEVER_TIMESTAMP
+ else:
+ reset_account_lockout_after_ticks = -int(reset_account_lockout_after * (60 * 1e7))
+
+ m["lockOutObservationWindow"] = ldb.MessageElement(str(reset_account_lockout_after_ticks),
+ ldb.FLAG_MOD_REPLACE, "lockOutObservationWindow")
+ msgs.append("Duration to reset account lockout after changed!")
+
+ if max_pwd_age or min_pwd_age:
+ # If we're setting either min or max password, make sure the max is
+ # still greater overall. As either setting could be None, we use the
+ # ticks here (which are always set) and work backwards.
+ max_pwd_age = timestamp_to_days(max_pwd_age_ticks)
+ min_pwd_age = timestamp_to_days(min_pwd_age_ticks)
+ if max_pwd_age != 0 and min_pwd_age >= max_pwd_age:
+ raise CommandError("Maximum password age (%d) must be greater than minimum password age (%d)!" % (max_pwd_age, min_pwd_age))
+
+ if len(m) == 0:
+ raise CommandError("You must specify at least one option to set. Try --help")
+ samdb.modify(m)
+ msgs.append("All changes applied successfully!")
+ self.message("\n".join(msgs))
+
+
+class cmd_domain_passwordsettings(SuperCommand):
+ """Manage password policy settings."""
+
+ subcommands = {}
+ subcommands["pso"] = cmd_domain_passwordsettings_pso()
+ subcommands["show"] = cmd_domain_passwordsettings_show()
+ subcommands["set"] = cmd_domain_passwordsettings_set()
diff --git a/python/samba/netcmd/domain/provision.py b/python/samba/netcmd/domain/provision.py
new file mode 100644
index 0000000..8f13e54
--- /dev/null
+++ b/python/samba/netcmd/domain/provision.py
@@ -0,0 +1,405 @@
+# domain management - domain provision
+#
+# Copyright Matthias Dieter Wallnoefer 2009
+# Copyright Andrew Kroeger 2009
+# Copyright Jelmer Vernooij 2007-2012
+# Copyright Giampaolo Lauria 2011
+# Copyright Matthieu Patou <mat@matws.net> 2011
+# Copyright Andrew Bartlett 2008-2015
+# Copyright Stefan Metzmacher 2012
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import os
+import sys
+import tempfile
+
+import samba
+import samba.getopt as options
+from samba.auth import system_session
+from samba.auth_util import system_session_unix
+from samba.dcerpc import security
+from samba.dsdb import (
+ DS_DOMAIN_FUNCTION_2000,
+ DS_DOMAIN_FUNCTION_2003,
+ DS_DOMAIN_FUNCTION_2008,
+ DS_DOMAIN_FUNCTION_2008_R2,
+ DS_DOMAIN_FUNCTION_2012,
+ DS_DOMAIN_FUNCTION_2012_R2,
+ DS_DOMAIN_FUNCTION_2016
+)
+from samba.netcmd import Command, CommandError, Option
+from samba.provision import DEFAULT_MIN_PWD_LENGTH, ProvisioningError, provision
+from samba.provision.common import FILL_DRS, FILL_FULL, FILL_NT4SYNC
+from samba.samdb import get_default_backend_store
+from samba import functional_level
+
+from .common import common_ntvfs_options, common_provision_join_options
+
+
+class cmd_domain_provision(Command):
+ """Provision a domain."""
+
+ synopsis = "%prog [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ takes_options = [
+ Option("--interactive", help="Ask for names", action="store_true"),
+ Option("--domain", type="string", metavar="DOMAIN",
+ help="NetBIOS domain name to use"),
+ Option("--domain-guid", type="string", metavar="GUID",
+ help="set domainguid (otherwise random)"),
+ Option("--domain-sid", type="string", metavar="SID",
+ help="set domainsid (otherwise random)"),
+ Option("--ntds-guid", type="string", metavar="GUID",
+ help="set NTDS object GUID (otherwise random)"),
+ Option("--invocationid", type="string", metavar="GUID",
+ help="set invocationid (otherwise random)"),
+ Option("--host-name", type="string", metavar="HOSTNAME",
+ help="set hostname"),
+ Option("--host-ip", type="string", metavar="IPADDRESS",
+ help="set IPv4 ipaddress"),
+ Option("--host-ip6", type="string", metavar="IP6ADDRESS",
+ help="set IPv6 ipaddress"),
+ Option("--site", type="string", metavar="SITENAME",
+ help="set site name"),
+ Option("--adminpass", type="string", metavar="PASSWORD",
+ help="choose admin password (otherwise random)"),
+ Option("--krbtgtpass", type="string", metavar="PASSWORD",
+ help="choose krbtgt password (otherwise random)"),
+ Option("--dns-backend", type="choice", metavar="NAMESERVER-BACKEND",
+ choices=["SAMBA_INTERNAL", "BIND9_FLATFILE", "BIND9_DLZ", "NONE"],
+ help="The DNS server backend. SAMBA_INTERNAL is the builtin name server (default), "
+ "BIND9_FLATFILE uses bind9 text database to store zone information, "
+ "BIND9_DLZ uses samba4 AD to store zone information, "
+ "NONE skips the DNS setup entirely (not recommended)",
+ default="SAMBA_INTERNAL"),
+ Option("--dnspass", type="string", metavar="PASSWORD",
+ help="choose dns password (otherwise random)"),
+ Option("--root", type="string", metavar="USERNAME",
+ help="choose 'root' unix username"),
+ Option("--nobody", type="string", metavar="USERNAME",
+ help="choose 'nobody' user"),
+ Option("--users", type="string", metavar="GROUPNAME",
+ help="choose 'users' group"),
+ Option("--blank", action="store_true",
+ help="do not add users or groups, just the structure"),
+ Option("--server-role", type="choice", metavar="ROLE",
+ choices=["domain controller", "dc", "member server", "member", "standalone"],
+ help="The server role (domain controller | dc | member server | member | standalone). Default is dc.",
+ default="domain controller"),
+ Option("--function-level", type="choice", metavar="FOR-FUN-LEVEL",
+ choices=["2000", "2003", "2008", "2008_R2", "2016"],
+ help="The domain and forest function level (2000 | 2003 | 2008 | 2008_R2 - always native | 2016). Default is (Windows) 2008_R2 Native.",
+ default="2008_R2"),
+ Option("--base-schema", type="choice", metavar="BASE-SCHEMA",
+ choices=["2008_R2", "2008_R2_old", "2012", "2012_R2", "2016", "2019"],
+ help="The base schema files to use. Default is (Windows) 2019.",
+ default="2019"),
+ Option("--adprep-level", type="choice", metavar="FUNCTION_LEVEL",
+ choices=["SKIP", "2008_R2", "2012", "2012_R2", "2016"],
+ help="The highest functional level to prepare for. Default is based on --base-schema",
+ default=None),
+ Option("--next-rid", type="int", metavar="NEXTRID", default=1000,
+ help="The initial nextRid value (only needed for upgrades). Default is 1000."),
+ Option("--partitions-only",
+ help="Configure Samba's partitions, but do not modify them (ie, join a BDC)", action="store_true"),
+ Option("--use-rfc2307", action="store_true", help="Use AD to store posix attributes (default = no)"),
+ ]
+
+ ntvfs_options = [
+ Option("--use-xattrs", type="choice", choices=["yes", "no", "auto"],
+ metavar="[yes|no|auto]",
+ help="Define if we should use the native fs capabilities or a tdb file for "
+ "storing attributes likes ntacl when --use-ntvfs is set. "
+ "auto tries to make an intelligent guess based on the user rights and system capabilities",
+ default="auto")
+ ]
+
+ takes_options.extend(common_provision_join_options)
+
+ if samba.is_ntvfs_fileserver_built():
+ takes_options.extend(common_ntvfs_options)
+ takes_options.extend(ntvfs_options)
+
+ takes_args = []
+
+ def run(self, sambaopts=None, versionopts=None,
+ interactive=None,
+ domain=None,
+ domain_guid=None,
+ domain_sid=None,
+ ntds_guid=None,
+ invocationid=None,
+ host_name=None,
+ host_ip=None,
+ host_ip6=None,
+ adminpass=None,
+ site=None,
+ krbtgtpass=None,
+ machinepass=None,
+ dns_backend=None,
+ dns_forwarder=None,
+ dnspass=None,
+ ldapadminpass=None,
+ root=None,
+ nobody=None,
+ users=None,
+ quiet=None,
+ blank=None,
+ server_role=None,
+ function_level=None,
+ adprep_level=None,
+ next_rid=None,
+ partitions_only=None,
+ targetdir=None,
+ use_xattrs="auto",
+ use_ntvfs=False,
+ use_rfc2307=None,
+ base_schema=None,
+ plaintext_secrets=False,
+ backend_store=None,
+ backend_store_size=None):
+
+ self.logger = self.get_logger(name="provision", quiet=quiet)
+
+ lp = sambaopts.get_loadparm()
+ smbconf = lp.configfile
+
+ if dns_forwarder is not None:
+ suggested_forwarder = dns_forwarder
+ else:
+ suggested_forwarder = self._get_nameserver_ip()
+ if suggested_forwarder is None:
+ suggested_forwarder = "none"
+
+ if not self.raw_argv:
+ interactive = True
+
+ if interactive:
+ from getpass import getpass
+ import socket
+
+ def ask(prompt, default=None):
+ if default is not None:
+ print("%s [%s]: " % (prompt, default), end=' ')
+ else:
+ print("%s: " % (prompt,), end=' ')
+ sys.stdout.flush()
+ return sys.stdin.readline().rstrip("\n") or default
+
+ try:
+ default = socket.getfqdn().split(".", 1)[1].upper()
+ except IndexError:
+ default = None
+ realm = ask("Realm", default)
+ if realm in (None, ""):
+ raise CommandError("No realm set!")
+
+ try:
+ default = realm.split(".")[0]
+ except IndexError:
+ default = None
+ domain = ask("Domain", default)
+ if domain is None:
+ raise CommandError("No domain set!")
+
+ server_role = ask("Server Role (dc, member, standalone)", "dc")
+
+ dns_backend = ask("DNS backend (SAMBA_INTERNAL, BIND9_FLATFILE, BIND9_DLZ, NONE)", "SAMBA_INTERNAL")
+ if dns_backend in (None, ''):
+ raise CommandError("No DNS backend set!")
+
+ if dns_backend == "SAMBA_INTERNAL":
+ dns_forwarder = ask("DNS forwarder IP address (write 'none' to disable forwarding)", suggested_forwarder)
+ if dns_forwarder.lower() in (None, 'none'):
+ suggested_forwarder = None
+ dns_forwarder = None
+
+ while True:
+ adminpassplain = getpass("Administrator password: ")
+ issue = self._adminpass_issue(adminpassplain)
+ if issue:
+ self.errf.write("%s.\n" % issue)
+ else:
+ adminpassverify = getpass("Retype password: ")
+ if not adminpassplain == adminpassverify:
+ self.errf.write("Sorry, passwords do not match.\n")
+ else:
+ adminpass = adminpassplain
+ break
+
+ else:
+ realm = sambaopts._lp.get('realm')
+ if realm is None:
+ raise CommandError("No realm set!")
+ if domain is None:
+ raise CommandError("No domain set!")
+
+ if adminpass:
+ issue = self._adminpass_issue(adminpass)
+ if issue:
+ raise CommandError(issue)
+ else:
+ self.logger.info("Administrator password will be set randomly!")
+
+ try:
+ dom_for_fun_level = functional_level.string_to_level(function_level)
+ except KeyError:
+ raise CommandError(f"'{function_level}' is not a valid domain level")
+
+ if adprep_level is None:
+ # Select the adprep_level default based
+ # on what the base schema permits
+ if base_schema in ["2008_R2", "2008_R2_old"]:
+ # without explicit --adprep-level=2008_R2
+ # we will skip the adprep step on
+ # provision
+ adprep_level = "SKIP"
+ elif base_schema in ["2012"]:
+ adprep_level = "2012"
+ elif base_schema in ["2012_R2"]:
+ adprep_level = "2012_R2"
+ else:
+ adprep_level = "2016"
+
+ if adprep_level == "SKIP":
+ provision_adprep_level = None
+ elif adprep_level == "2008R2":
+ provision_adprep_level = DS_DOMAIN_FUNCTION_2008_R2
+ elif adprep_level == "2012":
+ provision_adprep_level = DS_DOMAIN_FUNCTION_2012
+ elif adprep_level == "2012_R2":
+ provision_adprep_level = DS_DOMAIN_FUNCTION_2012_R2
+ elif adprep_level == "2016":
+ provision_adprep_level = DS_DOMAIN_FUNCTION_2016
+
+ if dns_backend == "SAMBA_INTERNAL" and dns_forwarder is None:
+ dns_forwarder = suggested_forwarder
+
+ samdb_fill = FILL_FULL
+ if blank:
+ samdb_fill = FILL_NT4SYNC
+ elif partitions_only:
+ samdb_fill = FILL_DRS
+
+ if targetdir is not None:
+ if not os.path.isdir(targetdir):
+ os.makedirs(targetdir)
+
+ eadb = True
+
+ if use_xattrs == "yes":
+ eadb = False
+ elif use_xattrs == "auto" and not use_ntvfs:
+ eadb = False
+ elif not use_ntvfs:
+ raise CommandError("--use-xattrs=no requires --use-ntvfs (not supported for production use). "
+ "Please re-run with --use-xattrs omitted.")
+ elif use_xattrs == "auto" and not lp.get("posix:eadb"):
+ if targetdir:
+ file = tempfile.NamedTemporaryFile(dir=os.path.abspath(targetdir))
+ else:
+ file = tempfile.NamedTemporaryFile(dir=os.path.abspath(os.path.dirname(lp.get("private dir"))))
+ try:
+ try:
+ samba.ntacls.setntacl(lp, file.name,
+ "O:S-1-5-32G:S-1-5-32",
+ "S-1-5-32",
+ system_session_unix(),
+ "native")
+ eadb = False
+ except Exception:
+ self.logger.info("You are not root or your system does not support xattr, using tdb backend for attributes. ")
+ finally:
+ file.close()
+
+ if eadb:
+ self.logger.info("not using extended attributes to store ACLs and other metadata. If you intend to use this provision in production, rerun the script as root on a system supporting xattrs.")
+
+ if domain_sid is not None:
+ domain_sid = security.dom_sid(domain_sid)
+
+ session = system_session()
+ if backend_store is None:
+ backend_store = get_default_backend_store()
+ try:
+ result = provision(self.logger,
+ session, smbconf=smbconf, targetdir=targetdir,
+ samdb_fill=samdb_fill, realm=realm, domain=domain,
+ domainguid=domain_guid, domainsid=domain_sid,
+ hostname=host_name,
+ hostip=host_ip, hostip6=host_ip6,
+ sitename=site, ntdsguid=ntds_guid,
+ invocationid=invocationid, adminpass=adminpass,
+ krbtgtpass=krbtgtpass, machinepass=machinepass,
+ dns_backend=dns_backend, dns_forwarder=dns_forwarder,
+ dnspass=dnspass, root=root, nobody=nobody,
+ users=users,
+ serverrole=server_role, dom_for_fun_level=dom_for_fun_level,
+ useeadb=eadb, next_rid=next_rid, lp=lp, use_ntvfs=use_ntvfs,
+ use_rfc2307=use_rfc2307, skip_sysvolacl=False,
+ base_schema=base_schema,
+ adprep_level=provision_adprep_level,
+ plaintext_secrets=plaintext_secrets,
+ backend_store=backend_store,
+ backend_store_size=backend_store_size)
+
+ except ProvisioningError as e:
+ raise CommandError("Provision failed", e)
+
+ result.report_logger(self.logger)
+
+ def _get_nameserver_ip(self):
+ """Grab the nameserver IP address from /etc/resolv.conf."""
+ from os import path
+ RESOLV_CONF = "/etc/resolv.conf"
+
+ if not path.isfile(RESOLV_CONF):
+ self.logger.warning("Failed to locate %s" % RESOLV_CONF)
+ return None
+
+ handle = None
+ try:
+ handle = open(RESOLV_CONF, 'r')
+ for line in handle:
+ if not line.startswith('nameserver'):
+ continue
+ # we want the last non-space continuous string of the line
+ return line.strip().split()[-1]
+ finally:
+ if handle is not None:
+ handle.close()
+
+ self.logger.warning("No nameserver found in %s" % RESOLV_CONF)
+
+ def _adminpass_issue(self, adminpass):
+ """Returns error string for a bad administrator password,
+ or None if acceptable"""
+ if isinstance(adminpass, bytes):
+ adminpass = adminpass.decode('utf8')
+ if len(adminpass) < DEFAULT_MIN_PWD_LENGTH:
+ return "Administrator password does not meet the default minimum" \
+ " password length requirement (%d characters)" \
+ % DEFAULT_MIN_PWD_LENGTH
+ elif not samba.check_password_quality(adminpass):
+ return "Administrator password does not meet the default" \
+ " quality standards"
+ else:
+ return None
diff --git a/python/samba/netcmd/domain/samba3upgrade.py b/python/samba/netcmd/domain/samba3upgrade.py
new file mode 100644
index 0000000..67f4b42
--- /dev/null
+++ b/python/samba/netcmd/domain/samba3upgrade.py
@@ -0,0 +1,34 @@
+# domain management - domain samba3upgrade
+#
+# Copyright Matthias Dieter Wallnoefer 2009
+# Copyright Andrew Kroeger 2009
+# Copyright Jelmer Vernooij 2007-2012
+# Copyright Giampaolo Lauria 2011
+# Copyright Matthieu Patou <mat@matws.net> 2011
+# Copyright Andrew Bartlett 2008-2015
+# Copyright Stefan Metzmacher 2012
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from .classicupgrade import cmd_domain_classicupgrade
+
+
+class cmd_domain_samba3upgrade(cmd_domain_classicupgrade):
+ __doc__ = cmd_domain_classicupgrade.__doc__
+
+ # This command is present for backwards compatibility only,
+ # and should not be shown.
+
+ hidden = True
diff --git a/python/samba/netcmd/domain/schemaupgrade.py b/python/samba/netcmd/domain/schemaupgrade.py
new file mode 100644
index 0000000..ff00a77
--- /dev/null
+++ b/python/samba/netcmd/domain/schemaupgrade.py
@@ -0,0 +1,350 @@
+# domain management - domain schemaupgrade
+#
+# Copyright Matthias Dieter Wallnoefer 2009
+# Copyright Andrew Kroeger 2009
+# Copyright Jelmer Vernooij 2007-2012
+# Copyright Giampaolo Lauria 2011
+# Copyright Matthieu Patou <mat@matws.net> 2011
+# Copyright Andrew Bartlett 2008-2015
+# Copyright Stefan Metzmacher 2012
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import os
+import shutil
+import subprocess
+import tempfile
+
+import ldb
+import samba.getopt as options
+from samba.auth import system_session
+from samba.netcmd import Command, CommandError, Option
+from samba.netcmd.fsmo import get_fsmo_roleowner
+from samba.provision import setup_path
+from samba.samdb import SamDB
+
+
+class ldif_schema_update:
+ """Helper class for applying LDIF schema updates"""
+
+ def __init__(self):
+ self.is_defunct = False
+ self.unknown_oid = None
+ self.dn = None
+ self.ldif = ""
+
+ def can_ignore_failure(self, error):
+ """Checks if we can safely ignore failure to apply an LDIF update"""
+ (num, errstr) = error.args
+
+ # Microsoft has marked objects as defunct that Samba doesn't know about
+ if num == ldb.ERR_NO_SUCH_OBJECT and self.is_defunct:
+ print("Defunct object %s doesn't exist, skipping" % self.dn)
+ return True
+ elif self.unknown_oid is not None:
+ print("Skipping unknown OID %s for object %s" % (self.unknown_oid, self.dn))
+ return True
+
+ return False
+
+ def apply(self, samdb):
+ """Applies a single LDIF update to the schema"""
+
+ try:
+ try:
+ samdb.modify_ldif(self.ldif, controls=['relax:0'])
+ except ldb.LdbError as e:
+ if e.args[0] == ldb.ERR_INVALID_ATTRIBUTE_SYNTAX:
+
+ # REFRESH after a failed change
+
+ # Otherwise the OID-to-attribute mapping in
+ # _apply_updates_in_file() won't work, because it
+ # can't lookup the new OID in the schema
+ samdb.set_schema_update_now()
+
+ samdb.modify_ldif(self.ldif, controls=['relax:0'])
+ else:
+ raise
+ except ldb.LdbError as e:
+ if self.can_ignore_failure(e):
+ return 0
+ else:
+ print("Exception: %s" % e)
+ print("Encountered while trying to apply the following LDIF")
+ print("----------------------------------------------------")
+ print("%s" % self.ldif)
+
+ raise
+
+ return 1
+
+
+class cmd_domain_schema_upgrade(Command):
+ """Domain schema upgrading"""
+
+ synopsis = "%prog [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H"),
+ Option("-q", "--quiet", help="Be quiet", action="store_true"), # unused
+ Option("-v", "--verbose", help="Be verbose", action="store_true"),
+ Option("--schema", type="choice", metavar="SCHEMA",
+ choices=["2012", "2012_R2", "2016", "2019"],
+ help="The schema file to upgrade to. Default is (Windows) 2019.",
+ default="2019"),
+ Option("--ldf-file", type=str, default=None,
+ help="Just apply the schema updates in the adprep/.LDF file(s) specified"),
+ Option("--base-dir", type=str, default=None,
+ help="Location of ldf files Default is ${SETUPDIR}/adprep.")
+ ]
+
+ def _apply_updates_in_file(self, samdb, ldif_file):
+ """
+ Applies a series of updates specified in an .LDIF file. The .LDIF file
+ is based on the adprep Schema updates provided by Microsoft.
+ """
+ count = 0
+ ldif_op = ldif_schema_update()
+
+ # parse the file line by line and work out each update operation to apply
+ for line in ldif_file:
+
+ line = line.rstrip()
+
+ # the operations in the .LDIF file are separated by blank lines. If
+ # we hit a blank line, try to apply the update we've parsed so far
+ if line == '':
+
+ # keep going if we haven't parsed anything yet
+ if ldif_op.ldif == '':
+ continue
+
+ # Apply the individual change
+ count += ldif_op.apply(samdb)
+
+ # start storing the next operation from scratch again
+ ldif_op = ldif_schema_update()
+ continue
+
+ # replace the placeholder domain name in the .ldif file with the real domain
+ if line.upper().endswith('DC=X'):
+ line = line[:-len('DC=X')] + str(samdb.get_default_basedn())
+ elif line.upper().endswith('CN=X'):
+ line = line[:-len('CN=X')] + str(samdb.get_default_basedn())
+
+ values = line.split(':')
+
+ if values[0].lower() == 'dn':
+ ldif_op.dn = values[1].strip()
+
+ # replace the Windows-specific operation with the Samba one
+ if values[0].lower() == 'changetype':
+ line = line.lower().replace(': ntdsschemaadd',
+ ': add')
+ line = line.lower().replace(': ntdsschemamodify',
+ ': modify')
+ line = line.lower().replace(': ntdsschemamodrdn',
+ ': modrdn')
+ line = line.lower().replace(': ntdsschemadelete',
+ ': delete')
+
+ if values[0].lower() in ['rdnattid', 'subclassof',
+ 'systemposssuperiors',
+ 'systemmaycontain',
+ 'systemauxiliaryclass']:
+ _, value = values
+
+ # The Microsoft updates contain some OIDs we don't recognize.
+ # Query the DB to see if we can work out the OID this update is
+ # referring to. If we find a match, then replace the OID with
+ # the ldapDisplayname
+ if '.' in value:
+ res = samdb.search(base=samdb.get_schema_basedn(),
+ expression="(|(attributeId=%s)(governsId=%s))" %
+ (value, value),
+ attrs=['ldapDisplayName'])
+
+ if len(res) != 1:
+ ldif_op.unknown_oid = value
+ else:
+ display_name = str(res[0]['ldapDisplayName'][0])
+ line = line.replace(value, ' ' + display_name)
+
+ # Microsoft has marked objects as defunct that Samba doesn't know about
+ if values[0].lower() == 'isdefunct' and values[1].strip().lower() == 'true':
+ ldif_op.is_defunct = True
+
+ # Samba has added the showInAdvancedViewOnly attribute to all objects,
+ # so rather than doing an add, we need to do a replace
+ if values[0].lower() == 'add' and values[1].strip().lower() == 'showinadvancedviewonly':
+ line = 'replace: showInAdvancedViewOnly'
+
+ # Add the line to the current LDIF operation (including the newline
+ # we stripped off at the start of the loop)
+ ldif_op.ldif += line + '\n'
+
+ return count
+
+ def _apply_update(self, samdb, update_file, base_dir):
+ """Wrapper function for parsing an LDIF file and applying the updates"""
+
+ print("Applying %s updates..." % update_file)
+
+ ldif_file = None
+ try:
+ ldif_file = open(os.path.join(base_dir, update_file))
+
+ count = self._apply_updates_in_file(samdb, ldif_file)
+
+ finally:
+ if ldif_file:
+ ldif_file.close()
+
+ print("%u changes applied" % count)
+
+ return count
+
+ def run(self, **kwargs):
+ try:
+ from samba.ms_schema_markdown import read_ms_markdown
+ except ImportError as e:
+ self.outf.write("Exception in importing markdown: %s\n" % e)
+ raise CommandError('Failed to import module markdown')
+ from samba.schema import Schema
+
+ updates_allowed_overridden = False
+ sambaopts = kwargs.get("sambaopts")
+ credopts = kwargs.get("credopts")
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+ H = kwargs.get("H")
+ target_schema = kwargs.get("schema")
+ ldf_files = kwargs.get("ldf_file")
+ base_dir = kwargs.get("base_dir")
+
+ temp_folder = None
+
+ samdb = SamDB(url=H, session_info=system_session(), credentials=creds, lp=lp)
+
+ # we're not going to get far if the config doesn't allow schema updates
+ if lp.get("dsdb:schema update allowed") is None:
+ lp.set("dsdb:schema update allowed", "yes")
+ print("Temporarily overriding 'dsdb:schema update allowed' setting")
+ updates_allowed_overridden = True
+
+ own_dn = ldb.Dn(samdb, samdb.get_dsServiceName())
+ master = get_fsmo_roleowner(samdb, str(samdb.get_schema_basedn()),
+ 'schema')
+ if own_dn != master:
+ raise CommandError("This server is not the schema master.")
+
+ # if specific LDIF files were specified, just apply them
+ if ldf_files:
+ schema_updates = ldf_files.split(",")
+ else:
+ schema_updates = []
+
+ # work out the version of the target schema we're upgrading to
+ end = Schema.get_version(target_schema)
+
+ # work out the version of the schema we're currently using
+ res = samdb.search(base=samdb.get_schema_basedn(),
+ scope=ldb.SCOPE_BASE, attrs=['objectVersion'])
+
+ if len(res) != 1:
+ raise CommandError('Could not determine current schema version')
+ start = int(res[0]['objectVersion'][0]) + 1
+
+ diff_dir = setup_path("adprep/WindowsServerDocs")
+ if base_dir is None:
+ # Read from the Schema-Updates.md file
+ temp_folder = tempfile.mkdtemp()
+
+ update_file = setup_path("adprep/WindowsServerDocs/Schema-Updates.md")
+
+ try:
+ read_ms_markdown(update_file, temp_folder)
+ except Exception as e:
+ print("Exception in markdown parsing: %s" % e)
+ shutil.rmtree(temp_folder)
+ raise CommandError('Failed to upgrade schema')
+
+ base_dir = temp_folder
+
+ for version in range(start, end + 1):
+ update = 'Sch%d.ldf' % version
+ schema_updates.append(update)
+
+ # Apply patches if we parsed the Schema-Updates.md file
+ diff = os.path.abspath(os.path.join(diff_dir, update + '.diff'))
+ if temp_folder and os.path.exists(diff):
+ try:
+ p = subprocess.Popen(['patch', update, '-i', diff],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE, cwd=temp_folder)
+ except (OSError, IOError):
+ shutil.rmtree(temp_folder)
+ raise CommandError("Failed to upgrade schema. "
+ "Is '/usr/bin/patch' missing?")
+
+ stdout, stderr = p.communicate()
+
+ if p.returncode:
+ print("Exception in patch: %s\n%s" % (stdout, stderr))
+ shutil.rmtree(temp_folder)
+ raise CommandError('Failed to upgrade schema')
+
+ print("Patched %s using %s" % (update, diff))
+
+ if base_dir is None:
+ base_dir = setup_path("adprep")
+
+ samdb.transaction_start()
+ count = 0
+ error_encountered = False
+
+ try:
+ # Apply the schema updates needed to move to the new schema version
+ for ldif_file in schema_updates:
+ count += self._apply_update(samdb, ldif_file, base_dir)
+
+ if count > 0:
+ samdb.transaction_commit()
+ print("Schema successfully updated")
+ else:
+ print("No changes applied to schema")
+ samdb.transaction_cancel()
+ except Exception as e:
+ print("Exception: %s" % e)
+ print("Error encountered, aborting schema upgrade")
+ samdb.transaction_cancel()
+ error_encountered = True
+
+ if updates_allowed_overridden:
+ lp.set("dsdb:schema update allowed", "no")
+
+ if temp_folder:
+ shutil.rmtree(temp_folder)
+
+ if error_encountered:
+ raise CommandError('Failed to upgrade schema')
diff --git a/python/samba/netcmd/domain/tombstones.py b/python/samba/netcmd/domain/tombstones.py
new file mode 100644
index 0000000..673bb9a
--- /dev/null
+++ b/python/samba/netcmd/domain/tombstones.py
@@ -0,0 +1,116 @@
+# domain management - domain tombstones
+#
+# Copyright Matthias Dieter Wallnoefer 2009
+# Copyright Andrew Kroeger 2009
+# Copyright Jelmer Vernooij 2007-2012
+# Copyright Giampaolo Lauria 2011
+# Copyright Matthieu Patou <mat@matws.net> 2011
+# Copyright Andrew Bartlett 2008-2015
+# Copyright Stefan Metzmacher 2012
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import time
+
+import ldb
+import samba.getopt as options
+from samba.auth import system_session
+from samba.netcmd import Command, CommandError, Option, SuperCommand
+from samba.samdb import SamDB
+
+
+class cmd_domain_tombstones_expunge(Command):
+ """Expunge tombstones from the database.
+
+This command expunges tombstones from the database."""
+ synopsis = "%prog NC [NC [...]] [options]"
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H"),
+ Option("--current-time",
+ help="The current time to evaluate the tombstone lifetime from, expressed as YYYY-MM-DD",
+ type=str),
+ Option("--tombstone-lifetime", help="Number of days a tombstone should be preserved for", type=int),
+ ]
+
+ takes_args = ["nc*"]
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ def run(self, *ncs, **kwargs):
+ sambaopts = kwargs.get("sambaopts")
+ credopts = kwargs.get("credopts")
+ H = kwargs.get("H")
+ current_time_string = kwargs.get("current_time")
+ tombstone_lifetime = kwargs.get("tombstone_lifetime")
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+
+ if current_time_string is None and tombstone_lifetime is None:
+ print("Note: without --current-time or --tombstone-lifetime "
+ "only tombstones already scheduled for deletion will "
+ "be deleted.", file=self.outf)
+ print("To remove all tombstones, use --tombstone-lifetime=0.",
+ file=self.outf)
+
+ if current_time_string is not None:
+ current_time_obj = time.strptime(current_time_string, "%Y-%m-%d")
+ current_time = int(time.mktime(current_time_obj))
+
+ else:
+ current_time = int(time.time())
+
+ if len(ncs) == 0:
+ res = samdb.search(expression="", base="", scope=ldb.SCOPE_BASE,
+ attrs=["namingContexts"])
+
+ ncs = []
+ for nc in res[0]["namingContexts"]:
+ ncs.append(str(nc))
+ else:
+ ncs = list(ncs)
+
+ started_transaction = False
+ try:
+ samdb.transaction_start()
+ started_transaction = True
+ (removed_objects,
+ removed_links) = samdb.garbage_collect_tombstones(ncs,
+ current_time=current_time,
+ tombstone_lifetime=tombstone_lifetime)
+
+ except Exception as err:
+ if started_transaction:
+ samdb.transaction_cancel()
+ raise CommandError("Failed to expunge / garbage collect tombstones", err)
+
+ samdb.transaction_commit()
+
+ self.outf.write("Removed %d objects and %d links successfully\n"
+ % (removed_objects, removed_links))
+
+
+class cmd_domain_tombstones(SuperCommand):
+ """Domain tombstone and recycled object management."""
+
+ subcommands = {}
+ subcommands["expunge"] = cmd_domain_tombstones_expunge()
diff --git a/python/samba/netcmd/domain/trust.py b/python/samba/netcmd/domain/trust.py
new file mode 100644
index 0000000..e930f00
--- /dev/null
+++ b/python/samba/netcmd/domain/trust.py
@@ -0,0 +1,2338 @@
+# domain management - domain trust
+#
+# Copyright Matthias Dieter Wallnoefer 2009
+# Copyright Andrew Kroeger 2009
+# Copyright Jelmer Vernooij 2007-2012
+# Copyright Giampaolo Lauria 2011
+# Copyright Matthieu Patou <mat@matws.net> 2011
+# Copyright Andrew Bartlett 2008-2015
+# Copyright Stefan Metzmacher 2012
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import ctypes
+from getpass import getpass
+
+import ldb
+import samba.getopt as options
+import samba.ntacls
+from samba import NTSTATUSError, ntstatus, string_to_byte_array, werror
+from samba.auth import system_session
+from samba.dcerpc import drsblobs, lsa, nbt, netlogon, security
+from samba.net import Net
+from samba.netcmd import Command, CommandError, Option, SuperCommand
+from samba.samdb import SamDB
+from samba.trust_utils import CreateTrustedDomainRelax
+
+
+class LocalDCCredentialsOptions(options.CredentialsOptions):
+ def __init__(self, parser):
+ options.CredentialsOptions.__init__(self, parser, special_name="local-dc")
+
+
+class DomainTrustCommand(Command):
+ """List domain trusts."""
+
+ def __init__(self):
+ Command.__init__(self)
+ self.local_lp = None
+
+ self.local_server = None
+ self.local_binding_string = None
+ self.local_creds = None
+
+ self.remote_server = None
+ self.remote_binding_string = None
+ self.remote_creds = None
+
+ def _uint32(self, v):
+ return ctypes.c_uint32(v).value
+
+ def check_runtime_error(self, runtime, val):
+ if runtime is None:
+ return False
+
+ err32 = self._uint32(runtime.args[0])
+ if err32 == val:
+ return True
+
+ return False
+
+ class LocalRuntimeError(CommandError):
+ def __init__(exception_self, self, runtime, message):
+ err32 = self._uint32(runtime.args[0])
+ errstr = runtime.args[1]
+ msg = "LOCAL_DC[%s]: %s - ERROR(0x%08X) - %s" % (
+ self.local_server, message, err32, errstr)
+ CommandError.__init__(exception_self, msg)
+
+ class RemoteRuntimeError(CommandError):
+ def __init__(exception_self, self, runtime, message):
+ err32 = self._uint32(runtime.args[0])
+ errstr = runtime.args[1]
+ msg = "REMOTE_DC[%s]: %s - ERROR(0x%08X) - %s" % (
+ self.remote_server, message, err32, errstr)
+ CommandError.__init__(exception_self, msg)
+
+ class LocalLdbError(CommandError):
+ def __init__(exception_self, self, ldb_error, message):
+ errval = ldb_error.args[0]
+ errstr = ldb_error.args[1]
+ msg = "LOCAL_DC[%s]: %s - ERROR(%d) - %s" % (
+ self.local_server, message, errval, errstr)
+ CommandError.__init__(exception_self, msg)
+
+ def setup_local_server(self, sambaopts, localdcopts):
+ if self.local_server is not None:
+ return self.local_server
+
+ lp = sambaopts.get_loadparm()
+
+ local_server = localdcopts.ipaddress
+ if local_server is None:
+ server_role = lp.server_role()
+ if server_role != "ROLE_ACTIVE_DIRECTORY_DC":
+ raise CommandError("Invalid server_role %s" % (server_role))
+ local_server = lp.get('netbios name')
+ local_transport = "ncalrpc"
+ local_binding_options = ""
+ local_binding_options += ",auth_type=ncalrpc_as_system"
+ local_ldap_url = None
+ local_creds = None
+ else:
+ local_transport = "ncacn_np"
+ local_binding_options = ""
+ local_ldap_url = "ldap://%s" % local_server
+ local_creds = localdcopts.get_credentials(lp)
+
+ self.local_lp = lp
+
+ self.local_server = local_server
+ self.local_binding_string = "%s:%s[%s]" % (local_transport, local_server, local_binding_options)
+ self.local_ldap_url = local_ldap_url
+ self.local_creds = local_creds
+ return self.local_server
+
+ def new_local_lsa_connection(self):
+ return lsa.lsarpc(self.local_binding_string, self.local_lp, self.local_creds)
+
+ def new_local_netlogon_connection(self):
+ return netlogon.netlogon(self.local_binding_string, self.local_lp, self.local_creds)
+
+ def new_local_ldap_connection(self):
+ return SamDB(url=self.local_ldap_url,
+ session_info=system_session(),
+ credentials=self.local_creds,
+ lp=self.local_lp)
+
+ def setup_remote_server(self, credopts, domain,
+ require_pdc=True,
+ require_writable=True):
+
+ if require_pdc:
+ assert require_writable
+
+ if self.remote_server is not None:
+ return self.remote_server
+
+ self.remote_server = "__unknown__remote_server__.%s" % domain
+ assert self.local_server is not None
+
+ remote_creds = credopts.get_credentials(self.local_lp)
+ remote_server = credopts.ipaddress
+ remote_binding_options = ""
+
+ # TODO: we should also support NT4 domains
+ # we could use local_netlogon.netr_DsRGetDCNameEx2() with the remote domain name
+ # and delegate NBT or CLDAP to the local netlogon server
+ try:
+ remote_net = Net(remote_creds, self.local_lp, server=remote_server)
+ remote_flags = nbt.NBT_SERVER_LDAP | nbt.NBT_SERVER_DS
+ if require_writable:
+ remote_flags |= nbt.NBT_SERVER_WRITABLE
+ if require_pdc:
+ remote_flags |= nbt.NBT_SERVER_PDC
+ remote_info = remote_net.finddc(flags=remote_flags, domain=domain, address=remote_server)
+ except NTSTATUSError as error:
+ raise CommandError("Failed to find a writeable DC for domain '%s': %s" %
+ (domain, error.args[1]))
+ except Exception:
+ raise CommandError("Failed to find a writeable DC for domain '%s'" % domain)
+ flag_map = {
+ nbt.NBT_SERVER_PDC: "PDC",
+ nbt.NBT_SERVER_GC: "GC",
+ nbt.NBT_SERVER_LDAP: "LDAP",
+ nbt.NBT_SERVER_DS: "DS",
+ nbt.NBT_SERVER_KDC: "KDC",
+ nbt.NBT_SERVER_TIMESERV: "TIMESERV",
+ nbt.NBT_SERVER_CLOSEST: "CLOSEST",
+ nbt.NBT_SERVER_WRITABLE: "WRITABLE",
+ nbt.NBT_SERVER_GOOD_TIMESERV: "GOOD_TIMESERV",
+ nbt.NBT_SERVER_NDNC: "NDNC",
+ nbt.NBT_SERVER_SELECT_SECRET_DOMAIN_6: "SELECT_SECRET_DOMAIN_6",
+ nbt.NBT_SERVER_FULL_SECRET_DOMAIN_6: "FULL_SECRET_DOMAIN_6",
+ nbt.NBT_SERVER_ADS_WEB_SERVICE: "ADS_WEB_SERVICE",
+ nbt.NBT_SERVER_DS_8: "DS_8",
+ nbt.NBT_SERVER_DS_9: "DS_9",
+ nbt.NBT_SERVER_DS_10: "DS_10",
+ nbt.NBT_SERVER_HAS_DNS_NAME: "HAS_DNS_NAME",
+ nbt.NBT_SERVER_IS_DEFAULT_NC: "IS_DEFAULT_NC",
+ nbt.NBT_SERVER_FOREST_ROOT: "FOREST_ROOT",
+ }
+ server_type_string = self.generic_bitmap_to_string(flag_map,
+ remote_info.server_type, names_only=True)
+ self.outf.write("RemoteDC Netbios[%s] DNS[%s] ServerType[%s]\n" % (
+ remote_info.pdc_name,
+ remote_info.pdc_dns_name,
+ server_type_string))
+
+ self.remote_server = remote_info.pdc_dns_name
+ self.remote_binding_string = "ncacn_np:%s[%s]" % (self.remote_server, remote_binding_options)
+ self.remote_creds = remote_creds
+ return self.remote_server
+
+ def new_remote_lsa_connection(self):
+ return lsa.lsarpc(self.remote_binding_string, self.local_lp, self.remote_creds)
+
+ def new_remote_netlogon_connection(self):
+ return netlogon.netlogon(self.remote_binding_string, self.local_lp, self.remote_creds)
+
+ def get_lsa_info(self, conn, policy_access):
+ objectAttr = lsa.ObjectAttribute()
+ objectAttr.sec_qos = lsa.QosInfo()
+
+ policy = conn.OpenPolicy2(b''.decode('utf-8'),
+ objectAttr, policy_access)
+
+ info = conn.QueryInfoPolicy2(policy, lsa.LSA_POLICY_INFO_DNS)
+
+ return (policy, info)
+
+ def get_netlogon_dc_unc(self, conn, server, domain):
+ try:
+ info = conn.netr_DsRGetDCNameEx2(server,
+ None, 0, None, None, None,
+ netlogon.DS_RETURN_DNS_NAME)
+ return info.dc_unc
+ except RuntimeError:
+ return conn.netr_GetDcName(server, domain)
+
+ def get_netlogon_dc_info(self, conn, server):
+ info = conn.netr_DsRGetDCNameEx2(server,
+ None, 0, None, None, None,
+ netlogon.DS_RETURN_DNS_NAME)
+ return info
+
+ def netr_DomainTrust_to_name(self, t):
+ if t.trust_type == lsa.LSA_TRUST_TYPE_DOWNLEVEL:
+ return t.netbios_name
+
+ return t.dns_name
+
+ def netr_DomainTrust_to_type(self, a, t):
+ primary = None
+ primary_parent = None
+ for _t in a:
+ if _t.trust_flags & netlogon.NETR_TRUST_FLAG_PRIMARY:
+ primary = _t
+ if not _t.trust_flags & netlogon.NETR_TRUST_FLAG_TREEROOT:
+ primary_parent = a[_t.parent_index]
+ break
+
+ if t.trust_flags & netlogon.NETR_TRUST_FLAG_IN_FOREST:
+ if t is primary_parent:
+ return "Parent"
+
+ if t.trust_flags & netlogon.NETR_TRUST_FLAG_TREEROOT:
+ return "TreeRoot"
+
+ parent = a[t.parent_index]
+ if parent is primary:
+ return "Child"
+
+ return "Shortcut"
+
+ if t.trust_attributes & lsa.LSA_TRUST_ATTRIBUTE_FOREST_TRANSITIVE:
+ return "Forest"
+
+ return "External"
+
+ def netr_DomainTrust_to_transitive(self, t):
+ if t.trust_flags & netlogon.NETR_TRUST_FLAG_IN_FOREST:
+ return "Yes"
+
+ if t.trust_attributes & lsa.LSA_TRUST_ATTRIBUTE_NON_TRANSITIVE:
+ return "No"
+
+ if t.trust_attributes & lsa.LSA_TRUST_ATTRIBUTE_FOREST_TRANSITIVE:
+ return "Yes"
+
+ return "No"
+
+ def netr_DomainTrust_to_direction(self, t):
+ if t.trust_flags & netlogon.NETR_TRUST_FLAG_INBOUND and \
+ t.trust_flags & netlogon.NETR_TRUST_FLAG_OUTBOUND:
+ return "BOTH"
+
+ if t.trust_flags & netlogon.NETR_TRUST_FLAG_INBOUND:
+ return "INCOMING"
+
+ if t.trust_flags & netlogon.NETR_TRUST_FLAG_OUTBOUND:
+ return "OUTGOING"
+
+ return "INVALID"
+
+ def generic_enum_to_string(self, e_dict, v, names_only=False):
+ try:
+ w = e_dict[v]
+ except KeyError:
+ v32 = self._uint32(v)
+ w = "__unknown__%08X__" % v32
+
+ r = "0x%x (%s)" % (v, w)
+ return r
+
+ def generic_bitmap_to_string(self, b_dict, v, names_only=False):
+
+ s = []
+
+ c = v
+ for b in sorted(b_dict.keys()):
+ if not (c & b):
+ continue
+ c &= ~b
+ s += [b_dict[b]]
+
+ if c != 0:
+ c32 = self._uint32(c)
+ s += ["__unknown_%08X__" % c32]
+
+ w = ",".join(s)
+ if names_only:
+ return w
+ r = "0x%x (%s)" % (v, w)
+ return r
+
+ def trustType_string(self, v):
+ types = {
+ lsa.LSA_TRUST_TYPE_DOWNLEVEL: "DOWNLEVEL",
+ lsa.LSA_TRUST_TYPE_UPLEVEL: "UPLEVEL",
+ lsa.LSA_TRUST_TYPE_MIT: "MIT",
+ lsa.LSA_TRUST_TYPE_DCE: "DCE",
+ }
+ return self.generic_enum_to_string(types, v)
+
+ def trustDirection_string(self, v):
+ directions = {
+ lsa.LSA_TRUST_DIRECTION_INBOUND |
+ lsa.LSA_TRUST_DIRECTION_OUTBOUND: "BOTH",
+ lsa.LSA_TRUST_DIRECTION_INBOUND: "INBOUND",
+ lsa.LSA_TRUST_DIRECTION_OUTBOUND: "OUTBOUND",
+ }
+ return self.generic_enum_to_string(directions, v)
+
+ def trustAttributes_string(self, v):
+ attributes = {
+ lsa.LSA_TRUST_ATTRIBUTE_NON_TRANSITIVE: "NON_TRANSITIVE",
+ lsa.LSA_TRUST_ATTRIBUTE_UPLEVEL_ONLY: "UPLEVEL_ONLY",
+ lsa.LSA_TRUST_ATTRIBUTE_QUARANTINED_DOMAIN: "QUARANTINED_DOMAIN",
+ lsa.LSA_TRUST_ATTRIBUTE_FOREST_TRANSITIVE: "FOREST_TRANSITIVE",
+ lsa.LSA_TRUST_ATTRIBUTE_CROSS_ORGANIZATION: "CROSS_ORGANIZATION",
+ lsa.LSA_TRUST_ATTRIBUTE_WITHIN_FOREST: "WITHIN_FOREST",
+ lsa.LSA_TRUST_ATTRIBUTE_TREAT_AS_EXTERNAL: "TREAT_AS_EXTERNAL",
+ lsa.LSA_TRUST_ATTRIBUTE_USES_RC4_ENCRYPTION: "USES_RC4_ENCRYPTION",
+ }
+ return self.generic_bitmap_to_string(attributes, v)
+
+ def kerb_EncTypes_string(self, v):
+ enctypes = {
+ security.KERB_ENCTYPE_DES_CBC_CRC: "DES_CBC_CRC",
+ security.KERB_ENCTYPE_DES_CBC_MD5: "DES_CBC_MD5",
+ security.KERB_ENCTYPE_RC4_HMAC_MD5: "RC4_HMAC_MD5",
+ security.KERB_ENCTYPE_AES128_CTS_HMAC_SHA1_96: "AES128_CTS_HMAC_SHA1_96",
+ security.KERB_ENCTYPE_AES256_CTS_HMAC_SHA1_96: "AES256_CTS_HMAC_SHA1_96",
+ security.KERB_ENCTYPE_AES256_CTS_HMAC_SHA1_96_SK: "AES256_CTS_HMAC_SHA1_96-SK",
+ security.KERB_ENCTYPE_FAST_SUPPORTED: "FAST_SUPPORTED",
+ security.KERB_ENCTYPE_COMPOUND_IDENTITY_SUPPORTED: "COMPOUND_IDENTITY_SUPPORTED",
+ security.KERB_ENCTYPE_CLAIMS_SUPPORTED: "CLAIMS_SUPPORTED",
+ security.KERB_ENCTYPE_RESOURCE_SID_COMPRESSION_DISABLED: "RESOURCE_SID_COMPRESSION_DISABLED",
+ }
+ return self.generic_bitmap_to_string(enctypes, v)
+
+ def entry_tln_status(self, e_flags, ):
+ if e_flags == 0:
+ return "Status[Enabled]"
+
+ flags = {
+ lsa.LSA_TLN_DISABLED_NEW: "Disabled-New",
+ lsa.LSA_TLN_DISABLED_ADMIN: "Disabled",
+ lsa.LSA_TLN_DISABLED_CONFLICT: "Disabled-Conflicting",
+ }
+ return "Status[%s]" % self.generic_bitmap_to_string(flags, e_flags, names_only=True)
+
+ def entry_dom_status(self, e_flags):
+ if e_flags == 0:
+ return "Status[Enabled]"
+
+ flags = {
+ lsa.LSA_SID_DISABLED_ADMIN: "Disabled-SID",
+ lsa.LSA_SID_DISABLED_CONFLICT: "Disabled-SID-Conflicting",
+ lsa.LSA_NB_DISABLED_ADMIN: "Disabled-NB",
+ lsa.LSA_NB_DISABLED_CONFLICT: "Disabled-NB-Conflicting",
+ }
+ return "Status[%s]" % self.generic_bitmap_to_string(flags, e_flags, names_only=True)
+
+ def write_forest_trust_info(self, fti, tln=None, collisions=None):
+ if tln is not None:
+ tln_string = " TDO[%s]" % tln
+ else:
+ tln_string = ""
+
+ self.outf.write("Namespaces[%d]%s:\n" % (
+ len(fti.entries), tln_string))
+
+ for i, e in enumerate(fti.entries):
+
+ flags = e.flags
+ collision_string = ""
+
+ if collisions is not None:
+ for c in collisions.entries:
+ if c.index != i:
+ continue
+ flags = c.flags
+ collision_string = " Collision[%s]" % (c.name.string)
+
+ d = e.forest_trust_data
+ if e.type == lsa.LSA_FOREST_TRUST_TOP_LEVEL_NAME:
+ self.outf.write("TLN: %-32s DNS[*.%s]%s\n" % (
+ self.entry_tln_status(flags),
+ d.string, collision_string))
+ elif e.type == lsa.LSA_FOREST_TRUST_TOP_LEVEL_NAME_EX:
+ self.outf.write("TLN_EX: %-29s DNS[*.%s]\n" % (
+ "", d.string))
+ elif e.type == lsa.LSA_FOREST_TRUST_DOMAIN_INFO:
+ self.outf.write("DOM: %-32s DNS[%s] Netbios[%s] SID[%s]%s\n" % (
+ self.entry_dom_status(flags),
+ d.dns_domain_name.string,
+ d.netbios_domain_name.string,
+ d.domain_sid, collision_string))
+ return
+
+
+class cmd_domain_trust_list(DomainTrustCommand):
+ """List domain trusts."""
+
+ synopsis = "%prog [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "localdcopts": LocalDCCredentialsOptions,
+ }
+
+ takes_options = [
+ ]
+
+ def run(self, sambaopts=None, versionopts=None, localdcopts=None):
+
+ local_server = self.setup_local_server(sambaopts, localdcopts)
+ try:
+ local_netlogon = self.new_local_netlogon_connection()
+ except RuntimeError as error:
+ raise self.LocalRuntimeError(self, error, "failed to connect netlogon server")
+
+ try:
+ local_netlogon_trusts = \
+ local_netlogon.netr_DsrEnumerateDomainTrusts(local_server,
+ netlogon.NETR_TRUST_FLAG_IN_FOREST |
+ netlogon.NETR_TRUST_FLAG_OUTBOUND |
+ netlogon.NETR_TRUST_FLAG_INBOUND)
+ except RuntimeError as error:
+ if self.check_runtime_error(error, werror.WERR_RPC_S_PROCNUM_OUT_OF_RANGE):
+ # TODO: we could implement a fallback to lsa.EnumTrustDom()
+ raise CommandError("LOCAL_DC[%s]: netr_DsrEnumerateDomainTrusts not supported." % (
+ local_server))
+ raise self.LocalRuntimeError(self, error, "netr_DsrEnumerateDomainTrusts failed")
+
+ a = local_netlogon_trusts.array
+ for t in a:
+ if t.trust_flags & netlogon.NETR_TRUST_FLAG_PRIMARY:
+ continue
+ self.outf.write("%-14s %-15s %-19s %s\n" % (
+ "Type[%s]" % self.netr_DomainTrust_to_type(a, t),
+ "Transitive[%s]" % self.netr_DomainTrust_to_transitive(t),
+ "Direction[%s]" % self.netr_DomainTrust_to_direction(t),
+ "Name[%s]" % self.netr_DomainTrust_to_name(t)))
+ return
+
+
+class cmd_domain_trust_show(DomainTrustCommand):
+ """Show trusted domain details."""
+
+ synopsis = "%prog NAME [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "localdcopts": LocalDCCredentialsOptions,
+ }
+
+ takes_options = [
+ ]
+
+ takes_args = ["domain"]
+
+ def run(self, domain, sambaopts=None, versionopts=None, localdcopts=None):
+
+ self.setup_local_server(sambaopts, localdcopts)
+ try:
+ local_lsa = self.new_local_lsa_connection()
+ except RuntimeError as error:
+ raise self.LocalRuntimeError(self, error, "failed to connect lsa server")
+
+ try:
+ local_policy_access = lsa.LSA_POLICY_VIEW_LOCAL_INFORMATION
+ (local_policy, local_lsa_info) = self.get_lsa_info(local_lsa, local_policy_access)
+ except RuntimeError as error:
+ raise self.LocalRuntimeError(self, error, "failed to query LSA_POLICY_INFO_DNS")
+
+ self.outf.write("LocalDomain Netbios[%s] DNS[%s] SID[%s]\n" % (
+ local_lsa_info.name.string,
+ local_lsa_info.dns_domain.string,
+ local_lsa_info.sid))
+
+ lsaString = lsa.String()
+ lsaString.string = domain
+ try:
+ local_tdo_full = \
+ local_lsa.QueryTrustedDomainInfoByName(local_policy,
+ lsaString,
+ lsa.LSA_TRUSTED_DOMAIN_INFO_FULL_INFO)
+ local_tdo_info = local_tdo_full.info_ex
+ local_tdo_posix = local_tdo_full.posix_offset
+ except NTSTATUSError as error:
+ if self.check_runtime_error(error, ntstatus.NT_STATUS_OBJECT_NAME_NOT_FOUND):
+ raise CommandError("trusted domain object does not exist for domain [%s]" % domain)
+
+ raise self.LocalRuntimeError(self, error, "QueryTrustedDomainInfoByName(FULL_INFO) failed")
+
+ try:
+ local_tdo_enctypes = \
+ local_lsa.QueryTrustedDomainInfoByName(local_policy,
+ lsaString,
+ lsa.LSA_TRUSTED_DOMAIN_SUPPORTED_ENCRYPTION_TYPES)
+ except NTSTATUSError as error:
+ if self.check_runtime_error(error, ntstatus.NT_STATUS_INVALID_PARAMETER):
+ error = None
+ if self.check_runtime_error(error, ntstatus.NT_STATUS_INVALID_INFO_CLASS):
+ error = None
+
+ if error is not None:
+ raise self.LocalRuntimeError(self, error,
+ "QueryTrustedDomainInfoByName(SUPPORTED_ENCRYPTION_TYPES) failed")
+
+ local_tdo_enctypes = lsa.TrustDomainInfoSupportedEncTypes()
+ local_tdo_enctypes.enc_types = 0
+
+ try:
+ local_tdo_forest = None
+ if local_tdo_info.trust_attributes & lsa.LSA_TRUST_ATTRIBUTE_FOREST_TRANSITIVE:
+ local_tdo_forest = \
+ local_lsa.lsaRQueryForestTrustInformation(local_policy,
+ lsaString,
+ lsa.LSA_FOREST_TRUST_DOMAIN_INFO)
+ except RuntimeError as error:
+ if self.check_runtime_error(error, ntstatus.NT_STATUS_RPC_PROCNUM_OUT_OF_RANGE):
+ error = None
+ if self.check_runtime_error(error, ntstatus.NT_STATUS_NOT_FOUND):
+ error = None
+ if error is not None:
+ raise self.LocalRuntimeError(self, error, "lsaRQueryForestTrustInformation failed")
+
+ local_tdo_forest = lsa.ForestTrustInformation()
+ local_tdo_forest.count = 0
+ local_tdo_forest.entries = []
+
+ self.outf.write("TrustedDomain:\n\n")
+ self.outf.write("NetbiosName: %s\n" % local_tdo_info.netbios_name.string)
+ if local_tdo_info.netbios_name.string != local_tdo_info.domain_name.string:
+ self.outf.write("DnsName: %s\n" % local_tdo_info.domain_name.string)
+ self.outf.write("SID: %s\n" % local_tdo_info.sid)
+ self.outf.write("Type: %s\n" % self.trustType_string(local_tdo_info.trust_type))
+ self.outf.write("Direction: %s\n" % self.trustDirection_string(local_tdo_info.trust_direction))
+ self.outf.write("Attributes: %s\n" % self.trustAttributes_string(local_tdo_info.trust_attributes))
+ posix_offset_u32 = ctypes.c_uint32(local_tdo_posix.posix_offset).value
+ posix_offset_i32 = ctypes.c_int32(local_tdo_posix.posix_offset).value
+ self.outf.write("PosixOffset: 0x%08X (%d)\n" % (posix_offset_u32, posix_offset_i32))
+ self.outf.write("kerb_EncTypes: %s\n" % self.kerb_EncTypes_string(local_tdo_enctypes.enc_types))
+
+ if local_tdo_info.trust_attributes & lsa.LSA_TRUST_ATTRIBUTE_FOREST_TRANSITIVE:
+ self.write_forest_trust_info(local_tdo_forest,
+ tln=local_tdo_info.domain_name.string)
+
+ return
+
+class cmd_domain_trust_modify(DomainTrustCommand):
+ """Show trusted domain details."""
+
+ synopsis = "%prog NAME [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "localdcopts": LocalDCCredentialsOptions,
+ }
+
+ takes_options = [
+ Option("--use-aes-keys", action="store_true",
+ help="The trust uses AES kerberos keys.",
+ dest='use_aes_keys',
+ default=None),
+ Option("--no-aes-keys", action="store_true",
+ help="The trust does not have any support for AES kerberos keys.",
+ dest='disable_aes_keys',
+ default=None),
+ Option("--raw-kerb-enctypes", action="store",
+ help="The raw kerberos enctype bits",
+ dest='kerb_enctypes',
+ default=None),
+ ]
+
+ takes_args = ["domain"]
+
+ def run(self, domain, sambaopts=None, versionopts=None, localdcopts=None,
+ disable_aes_keys=None, use_aes_keys=None, kerb_enctypes=None):
+
+ num_modifications = 0
+
+ enctype_args = 0
+ if kerb_enctypes is not None:
+ enctype_args += 1
+ if use_aes_keys is not None:
+ enctype_args += 1
+ if disable_aes_keys is not None:
+ enctype_args += 1
+ if enctype_args > 1:
+ raise CommandError("--no-aes-keys, --use-aes-keys and --raw-kerb-enctypes are mutually exclusive")
+ if enctype_args == 1:
+ num_modifications += 1
+
+ if num_modifications == 0:
+ raise CommandError("modification arguments are required, try --help")
+
+ self.setup_local_server(sambaopts, localdcopts)
+ try:
+ local_lsa = self.new_local_lsa_connection()
+ except RuntimeError as error:
+ raise self.LocalRuntimeError(self, error, "failed to connect to lsa server")
+
+ try:
+ local_policy_access = lsa.LSA_POLICY_VIEW_LOCAL_INFORMATION
+ local_policy_access |= lsa.LSA_POLICY_TRUST_ADMIN
+ (local_policy, local_lsa_info) = self.get_lsa_info(local_lsa, local_policy_access)
+ except RuntimeError as error:
+ raise self.LocalRuntimeError(self, error, "failed to query LSA_POLICY_INFO_DNS")
+
+ self.outf.write("LocalDomain Netbios[%s] DNS[%s] SID[%s]\n" % (
+ local_lsa_info.name.string,
+ local_lsa_info.dns_domain.string,
+ local_lsa_info.sid))
+
+ if enctype_args == 1:
+ lsaString = lsa.String()
+ lsaString.string = domain
+
+ try:
+ local_tdo_enctypes = \
+ local_lsa.QueryTrustedDomainInfoByName(local_policy,
+ lsaString,
+ lsa.LSA_TRUSTED_DOMAIN_SUPPORTED_ENCRYPTION_TYPES)
+ except NTSTATUSError as error:
+ if self.check_runtime_error(error, ntstatus.NT_STATUS_INVALID_PARAMETER):
+ error = None
+ if self.check_runtime_error(error, ntstatus.NT_STATUS_INVALID_INFO_CLASS):
+ error = None
+
+ if error is not None:
+ raise self.LocalRuntimeError(self, error,
+ "QueryTrustedDomainInfoByName(SUPPORTED_ENCRYPTION_TYPES) failed")
+
+ local_tdo_enctypes = lsa.TrustDomainInfoSupportedEncTypes()
+ local_tdo_enctypes.enc_types = 0
+
+ self.outf.write("Old kerb_EncTypes: %s\n" % self.kerb_EncTypes_string(local_tdo_enctypes.enc_types))
+
+ enc_types = lsa.TrustDomainInfoSupportedEncTypes()
+ if kerb_enctypes is not None:
+ enc_types.enc_types = int(kerb_enctypes, base=0)
+ elif use_aes_keys is not None:
+ enc_types.enc_types = security.KERB_ENCTYPE_AES128_CTS_HMAC_SHA1_96
+ enc_types.enc_types |= security.KERB_ENCTYPE_AES256_CTS_HMAC_SHA1_96
+ elif disable_aes_keys is not None:
+ # CVE-2022-37966: Trust objects are no longer assumed to support
+ # RC4, so we must indicate support explicitly.
+ enc_types.enc_types = security.KERB_ENCTYPE_RC4_HMAC_MD5
+ else:
+ raise CommandError("Internal error should be checked above")
+
+ if enc_types.enc_types != local_tdo_enctypes.enc_types:
+ try:
+ local_tdo_enctypes = \
+ local_lsa.SetTrustedDomainInfoByName(local_policy,
+ lsaString,
+ lsa.LSA_TRUSTED_DOMAIN_SUPPORTED_ENCRYPTION_TYPES,
+ enc_types)
+ self.outf.write("New kerb_EncTypes: %s\n" % self.kerb_EncTypes_string(enc_types.enc_types))
+ except NTSTATUSError as error:
+ if error is not None:
+ raise self.LocalRuntimeError(self, error,
+ "SetTrustedDomainInfoByName(SUPPORTED_ENCRYPTION_TYPES) failed")
+ else:
+ self.outf.write("No kerb_EncTypes update needed\n")
+
+ return
+
+class cmd_domain_trust_create(DomainTrustCommand):
+ """Create a domain or forest trust."""
+
+ synopsis = "%prog DOMAIN [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ "localdcopts": LocalDCCredentialsOptions,
+ }
+
+ takes_options = [
+ Option("--type", type="choice", metavar="TYPE",
+ choices=["external", "forest"],
+ help="The type of the trust: 'external' or 'forest'.",
+ dest='trust_type',
+ default="external"),
+ Option("--direction", type="choice", metavar="DIRECTION",
+ choices=["incoming", "outgoing", "both"],
+ help="The trust direction: 'incoming', 'outgoing' or 'both'.",
+ dest='trust_direction',
+ default="both"),
+ Option("--create-location", type="choice", metavar="LOCATION",
+ choices=["local", "both"],
+ help="Where to create the trusted domain object: 'local' or 'both'.",
+ dest='create_location',
+ default="both"),
+ Option("--cross-organisation", action="store_true",
+ help="The related domains does not belong to the same organisation.",
+ dest='cross_organisation',
+ default=False),
+ Option("--quarantined", type="choice", metavar="yes|no",
+ choices=["yes", "no", None],
+ help="Special SID filtering rules are applied to the trust. "
+ "With --type=external the default is yes. "
+ "With --type=forest the default is no.",
+ dest='quarantined_arg',
+ default=None),
+ Option("--not-transitive", action="store_true",
+ help="The forest trust is not transitive.",
+ dest='not_transitive',
+ default=False),
+ Option("--treat-as-external", action="store_true",
+ help="The treat the forest trust as external.",
+ dest='treat_as_external',
+ default=False),
+ Option("--no-aes-keys", action="store_false",
+ help="The trust does not use AES kerberos keys.",
+ dest='use_aes_keys',
+ default=True),
+ Option("--skip-validation", action="store_false",
+ help="Skip validation of the trust.",
+ dest='validate',
+ default=True),
+ ]
+
+ takes_args = ["domain"]
+
+ def run(self, domain, sambaopts=None, localdcopts=None, credopts=None, versionopts=None,
+ trust_type=None, trust_direction=None, create_location=None,
+ cross_organisation=False, quarantined_arg=None,
+ not_transitive=False, treat_as_external=False,
+ use_aes_keys=False, validate=True):
+
+ lsaString = lsa.String()
+
+ quarantined = False
+ if quarantined_arg is None:
+ if trust_type == 'external':
+ quarantined = True
+ elif quarantined_arg == 'yes':
+ quarantined = True
+
+ if trust_type != 'forest':
+ if not_transitive:
+ raise CommandError("--not-transitive requires --type=forest")
+ if treat_as_external:
+ raise CommandError("--treat-as-external requires --type=forest")
+
+ enc_types = lsa.TrustDomainInfoSupportedEncTypes()
+ if use_aes_keys:
+ enc_types.enc_types = security.KERB_ENCTYPE_AES128_CTS_HMAC_SHA1_96
+ enc_types.enc_types |= security.KERB_ENCTYPE_AES256_CTS_HMAC_SHA1_96
+ else:
+ # CVE-2022-37966: Trust objects are no longer assumed to support
+ # RC4, so we must indicate support explicitly.
+ enc_types.enc_types = security.KERB_ENCTYPE_RC4_HMAC_MD5
+
+ local_policy_access = lsa.LSA_POLICY_VIEW_LOCAL_INFORMATION
+ local_policy_access |= lsa.LSA_POLICY_TRUST_ADMIN
+ local_policy_access |= lsa.LSA_POLICY_CREATE_SECRET
+
+ local_trust_info = lsa.TrustDomainInfoInfoEx()
+ local_trust_info.trust_type = lsa.LSA_TRUST_TYPE_UPLEVEL
+ local_trust_info.trust_direction = 0
+ if trust_direction == "both":
+ local_trust_info.trust_direction |= lsa.LSA_TRUST_DIRECTION_INBOUND
+ local_trust_info.trust_direction |= lsa.LSA_TRUST_DIRECTION_OUTBOUND
+ elif trust_direction == "incoming":
+ local_trust_info.trust_direction |= lsa.LSA_TRUST_DIRECTION_INBOUND
+ elif trust_direction == "outgoing":
+ local_trust_info.trust_direction |= lsa.LSA_TRUST_DIRECTION_OUTBOUND
+ local_trust_info.trust_attributes = 0
+ if cross_organisation:
+ local_trust_info.trust_attributes |= lsa.LSA_TRUST_ATTRIBUTE_CROSS_ORGANIZATION
+ if quarantined:
+ local_trust_info.trust_attributes |= lsa.LSA_TRUST_ATTRIBUTE_QUARANTINED_DOMAIN
+ if trust_type == "forest":
+ local_trust_info.trust_attributes |= lsa.LSA_TRUST_ATTRIBUTE_FOREST_TRANSITIVE
+ if not_transitive:
+ local_trust_info.trust_attributes |= lsa.LSA_TRUST_ATTRIBUTE_NON_TRANSITIVE
+ if treat_as_external:
+ local_trust_info.trust_attributes |= lsa.LSA_TRUST_ATTRIBUTE_TREAT_AS_EXTERNAL
+
+ def get_password(name):
+ password = None
+ while True:
+ if password is not None and password != '':
+ return password
+ password = getpass("New %s Password: " % name)
+ passwordverify = getpass("Retype %s Password: " % name)
+ if not password == passwordverify:
+ password = None
+ self.outf.write("Sorry, passwords do not match.\n")
+
+ incoming_secret = None
+ outgoing_secret = None
+ remote_policy_access = lsa.LSA_POLICY_VIEW_LOCAL_INFORMATION
+ if create_location == "local":
+ if local_trust_info.trust_direction & lsa.LSA_TRUST_DIRECTION_INBOUND:
+ incoming_password = get_password("Incoming Trust")
+ incoming_secret = string_to_byte_array(incoming_password.encode('utf-16-le'))
+ if local_trust_info.trust_direction & lsa.LSA_TRUST_DIRECTION_OUTBOUND:
+ outgoing_password = get_password("Outgoing Trust")
+ outgoing_secret = string_to_byte_array(outgoing_password.encode('utf-16-le'))
+
+ remote_trust_info = None
+ else:
+ # We use 240 random bytes.
+ # Windows uses 28 or 240 random bytes. I guess it's
+ # based on the trust type external vs. forest.
+ #
+ # The initial trust password can be up to 512 bytes
+ # while the versioned passwords used for periodic updates
+ # can only be up to 498 bytes, as netr_ServerPasswordSet2()
+ # needs to pass the NL_PASSWORD_VERSION structure within the
+ # 512 bytes and a 2 bytes confounder is required.
+ #
+ def random_trust_secret(length):
+ pw = samba.generate_random_machine_password(length // 2, length // 2)
+ return string_to_byte_array(pw.encode('utf-16-le'))
+
+ if local_trust_info.trust_direction & lsa.LSA_TRUST_DIRECTION_INBOUND:
+ incoming_secret = random_trust_secret(240)
+ if local_trust_info.trust_direction & lsa.LSA_TRUST_DIRECTION_OUTBOUND:
+ outgoing_secret = random_trust_secret(240)
+
+ remote_policy_access |= lsa.LSA_POLICY_TRUST_ADMIN
+ remote_policy_access |= lsa.LSA_POLICY_CREATE_SECRET
+
+ remote_trust_info = lsa.TrustDomainInfoInfoEx()
+ remote_trust_info.trust_type = lsa.LSA_TRUST_TYPE_UPLEVEL
+ remote_trust_info.trust_direction = 0
+ if trust_direction == "both":
+ remote_trust_info.trust_direction |= lsa.LSA_TRUST_DIRECTION_INBOUND
+ remote_trust_info.trust_direction |= lsa.LSA_TRUST_DIRECTION_OUTBOUND
+ elif trust_direction == "incoming":
+ remote_trust_info.trust_direction |= lsa.LSA_TRUST_DIRECTION_OUTBOUND
+ elif trust_direction == "outgoing":
+ remote_trust_info.trust_direction |= lsa.LSA_TRUST_DIRECTION_INBOUND
+ remote_trust_info.trust_attributes = 0
+ if cross_organisation:
+ remote_trust_info.trust_attributes |= lsa.LSA_TRUST_ATTRIBUTE_CROSS_ORGANIZATION
+ if quarantined:
+ remote_trust_info.trust_attributes |= lsa.LSA_TRUST_ATTRIBUTE_QUARANTINED_DOMAIN
+ if trust_type == "forest":
+ remote_trust_info.trust_attributes |= lsa.LSA_TRUST_ATTRIBUTE_FOREST_TRANSITIVE
+ if not_transitive:
+ remote_trust_info.trust_attributes |= lsa.LSA_TRUST_ATTRIBUTE_NON_TRANSITIVE
+ if treat_as_external:
+ remote_trust_info.trust_attributes |= lsa.LSA_TRUST_ATTRIBUTE_TREAT_AS_EXTERNAL
+
+ local_server = self.setup_local_server(sambaopts, localdcopts)
+ try:
+ local_lsa = self.new_local_lsa_connection()
+ except RuntimeError as error:
+ raise self.LocalRuntimeError(self, error, "failed to connect lsa server")
+
+ try:
+ (local_policy, local_lsa_info) = self.get_lsa_info(local_lsa, local_policy_access)
+ except RuntimeError as error:
+ raise self.LocalRuntimeError(self, error, "failed to query LSA_POLICY_INFO_DNS")
+
+ self.outf.write("LocalDomain Netbios[%s] DNS[%s] SID[%s]\n" % (
+ local_lsa_info.name.string,
+ local_lsa_info.dns_domain.string,
+ local_lsa_info.sid))
+
+ try:
+ remote_server = self.setup_remote_server(credopts, domain)
+ except RuntimeError as error:
+ raise self.RemoteRuntimeError(self, error, "failed to locate remote server")
+
+ try:
+ remote_lsa = self.new_remote_lsa_connection()
+ except RuntimeError as error:
+ raise self.RemoteRuntimeError(self, error, "failed to connect lsa server")
+
+ try:
+ (remote_policy, remote_lsa_info) = self.get_lsa_info(remote_lsa, remote_policy_access)
+ except RuntimeError as error:
+ raise self.RemoteRuntimeError(self, error, "failed to query LSA_POLICY_INFO_DNS")
+
+ self.outf.write("RemoteDomain Netbios[%s] DNS[%s] SID[%s]\n" % (
+ remote_lsa_info.name.string,
+ remote_lsa_info.dns_domain.string,
+ remote_lsa_info.sid))
+
+ local_trust_info.domain_name.string = remote_lsa_info.dns_domain.string
+ local_trust_info.netbios_name.string = remote_lsa_info.name.string
+ local_trust_info.sid = remote_lsa_info.sid
+
+ if remote_trust_info:
+ remote_trust_info.domain_name.string = local_lsa_info.dns_domain.string
+ remote_trust_info.netbios_name.string = local_lsa_info.name.string
+ remote_trust_info.sid = local_lsa_info.sid
+
+ try:
+ lsaString.string = local_trust_info.domain_name.string
+ local_lsa.QueryTrustedDomainInfoByName(local_policy,
+ lsaString,
+ lsa.LSA_TRUSTED_DOMAIN_INFO_FULL_INFO)
+ raise CommandError("TrustedDomain %s already exist'" % lsaString.string)
+ except NTSTATUSError as error:
+ if not self.check_runtime_error(error, ntstatus.NT_STATUS_OBJECT_NAME_NOT_FOUND):
+ raise self.LocalRuntimeError(self, error,
+ "QueryTrustedDomainInfoByName(%s, FULL_INFO) failed" % (
+ lsaString.string))
+
+ try:
+ lsaString.string = local_trust_info.netbios_name.string
+ local_lsa.QueryTrustedDomainInfoByName(local_policy,
+ lsaString,
+ lsa.LSA_TRUSTED_DOMAIN_INFO_FULL_INFO)
+ raise CommandError("TrustedDomain %s already exist'" % lsaString.string)
+ except NTSTATUSError as error:
+ if not self.check_runtime_error(error, ntstatus.NT_STATUS_OBJECT_NAME_NOT_FOUND):
+ raise self.LocalRuntimeError(self, error,
+ "QueryTrustedDomainInfoByName(%s, FULL_INFO) failed" % (
+ lsaString.string))
+
+ if remote_trust_info:
+ try:
+ lsaString.string = remote_trust_info.domain_name.string
+ remote_lsa.QueryTrustedDomainInfoByName(remote_policy,
+ lsaString,
+ lsa.LSA_TRUSTED_DOMAIN_INFO_FULL_INFO)
+ raise CommandError("TrustedDomain %s already exist'" % lsaString.string)
+ except NTSTATUSError as error:
+ if not self.check_runtime_error(error, ntstatus.NT_STATUS_OBJECT_NAME_NOT_FOUND):
+ raise self.RemoteRuntimeError(self, error,
+ "QueryTrustedDomainInfoByName(%s, FULL_INFO) failed" % (
+ lsaString.string))
+
+ try:
+ lsaString.string = remote_trust_info.netbios_name.string
+ remote_lsa.QueryTrustedDomainInfoByName(remote_policy,
+ lsaString,
+ lsa.LSA_TRUSTED_DOMAIN_INFO_FULL_INFO)
+ raise CommandError("TrustedDomain %s already exist'" % lsaString.string)
+ except NTSTATUSError as error:
+ if not self.check_runtime_error(error, ntstatus.NT_STATUS_OBJECT_NAME_NOT_FOUND):
+ raise self.RemoteRuntimeError(self, error,
+ "QueryTrustedDomainInfoByName(%s, FULL_INFO) failed" % (
+ lsaString.string))
+
+ try:
+ local_netlogon = self.new_local_netlogon_connection()
+ except RuntimeError as error:
+ raise self.LocalRuntimeError(self, error, "failed to connect netlogon server")
+
+ try:
+ local_netlogon_info = self.get_netlogon_dc_info(local_netlogon, local_server)
+ except RuntimeError as error:
+ raise self.LocalRuntimeError(self, error, "failed to get netlogon dc info")
+
+ if remote_trust_info:
+ try:
+ remote_netlogon = self.new_remote_netlogon_connection()
+ except RuntimeError as error:
+ raise self.RemoteRuntimeError(self, error, "failed to connect netlogon server")
+
+ try:
+ remote_netlogon_dc_unc = self.get_netlogon_dc_unc(remote_netlogon,
+ remote_server, domain)
+ except RuntimeError as error:
+ raise self.RemoteRuntimeError(self, error, "failed to get netlogon dc info")
+
+ def generate_AuthInOutBlob(secret, update_time):
+ if secret is None:
+ blob = drsblobs.trustAuthInOutBlob()
+ blob.count = 0
+
+ return blob
+
+ clear = drsblobs.AuthInfoClear()
+ clear.size = len(secret)
+ clear.password = secret
+
+ info = drsblobs.AuthenticationInformation()
+ info.LastUpdateTime = samba.unix2nttime(update_time)
+ info.AuthType = lsa.TRUST_AUTH_TYPE_CLEAR
+ info.AuthInfo = clear
+
+ array = drsblobs.AuthenticationInformationArray()
+ array.count = 1
+ array.array = [info]
+
+ blob = drsblobs.trustAuthInOutBlob()
+ blob.count = 1
+ blob.current = array
+
+ return blob
+
+ update_time = samba.current_unix_time()
+ incoming_blob = generate_AuthInOutBlob(incoming_secret, update_time)
+ outgoing_blob = generate_AuthInOutBlob(outgoing_secret, update_time)
+
+ local_tdo_handle = None
+ remote_tdo_handle = None
+
+ try:
+ if remote_trust_info:
+ self.outf.write("Creating remote TDO.\n")
+ current_request = {"location": "remote", "name": "CreateTrustedDomainEx2"}
+ remote_tdo_handle = CreateTrustedDomainRelax(remote_lsa,
+ remote_policy,
+ remote_trust_info,
+ lsa.LSA_TRUSTED_DOMAIN_ALL_ACCESS,
+ outgoing_blob,
+ incoming_blob)
+ self.outf.write("Remote TDO created.\n")
+ if enc_types:
+ self.outf.write("Setting supported encryption types on remote TDO.\n")
+ current_request = {"location": "remote", "name": "SetInformationTrustedDomain"}
+ remote_lsa.SetInformationTrustedDomain(remote_tdo_handle,
+ lsa.LSA_TRUSTED_DOMAIN_SUPPORTED_ENCRYPTION_TYPES,
+ enc_types)
+
+ self.outf.write("Creating local TDO.\n")
+ current_request = {"location": "local", "name": "CreateTrustedDomainEx2"}
+ local_tdo_handle = CreateTrustedDomainRelax(local_lsa,
+ local_policy,
+ local_trust_info,
+ lsa.LSA_TRUSTED_DOMAIN_ALL_ACCESS,
+ incoming_blob,
+ outgoing_blob)
+ self.outf.write("Local TDO created\n")
+ if enc_types:
+ self.outf.write("Setting supported encryption types on local TDO.\n")
+ current_request = {"location": "local", "name": "SetInformationTrustedDomain"}
+ local_lsa.SetInformationTrustedDomain(local_tdo_handle,
+ lsa.LSA_TRUSTED_DOMAIN_SUPPORTED_ENCRYPTION_TYPES,
+ enc_types)
+ except RuntimeError as error:
+ self.outf.write("Error: %s failed %sly - cleaning up\n" % (
+ current_request['name'], current_request['location']))
+ if remote_tdo_handle:
+ self.outf.write("Deleting remote TDO.\n")
+ remote_lsa.DeleteObject(remote_tdo_handle)
+ remote_tdo_handle = None
+ if local_tdo_handle:
+ self.outf.write("Deleting local TDO.\n")
+ local_lsa.DeleteObject(local_tdo_handle)
+ local_tdo_handle = None
+ if current_request['location'] == "remote":
+ raise self.RemoteRuntimeError(self, error, "%s" % (
+ current_request['name']))
+ raise self.LocalRuntimeError(self, error, "%s" % (
+ current_request['name']))
+
+ if validate:
+ if local_trust_info.trust_attributes & lsa.LSA_TRUST_ATTRIBUTE_FOREST_TRANSITIVE:
+ self.outf.write("Setup local forest trust information...\n")
+ try:
+ # get all information about the remote trust
+ # this triggers netr_GetForestTrustInformation to the remote domain
+ # and lsaRSetForestTrustInformation() locally, but new top level
+ # names are disabled by default.
+ local_forest_info = \
+ local_netlogon.netr_DsRGetForestTrustInformation(local_netlogon_info.dc_unc,
+ remote_lsa_info.dns_domain.string,
+ netlogon.DS_GFTI_UPDATE_TDO)
+ except RuntimeError as error:
+ raise self.LocalRuntimeError(self, error, "netr_DsRGetForestTrustInformation() failed")
+
+ try:
+ # here we try to enable all top level names
+ local_forest_collision = \
+ local_lsa.lsaRSetForestTrustInformation(local_policy,
+ remote_lsa_info.dns_domain,
+ lsa.LSA_FOREST_TRUST_DOMAIN_INFO,
+ local_forest_info,
+ 0)
+ except RuntimeError as error:
+ raise self.LocalRuntimeError(self, error, "lsaRSetForestTrustInformation() failed")
+
+ self.write_forest_trust_info(local_forest_info,
+ tln=remote_lsa_info.dns_domain.string,
+ collisions=local_forest_collision)
+
+ if remote_trust_info:
+ self.outf.write("Setup remote forest trust information...\n")
+ try:
+ # get all information about the local trust (from the perspective of the remote domain)
+ # this triggers netr_GetForestTrustInformation to our domain.
+ # and lsaRSetForestTrustInformation() remotely, but new top level
+ # names are disabled by default.
+ remote_forest_info = \
+ remote_netlogon.netr_DsRGetForestTrustInformation(remote_netlogon_dc_unc,
+ local_lsa_info.dns_domain.string,
+ netlogon.DS_GFTI_UPDATE_TDO)
+ except RuntimeError as error:
+ raise self.RemoteRuntimeError(self, error, "netr_DsRGetForestTrustInformation() failed")
+
+ try:
+ # here we try to enable all top level names
+ remote_forest_collision = \
+ remote_lsa.lsaRSetForestTrustInformation(remote_policy,
+ local_lsa_info.dns_domain,
+ lsa.LSA_FOREST_TRUST_DOMAIN_INFO,
+ remote_forest_info,
+ 0)
+ except RuntimeError as error:
+ raise self.RemoteRuntimeError(self, error, "lsaRSetForestTrustInformation() failed")
+
+ self.write_forest_trust_info(remote_forest_info,
+ tln=local_lsa_info.dns_domain.string,
+ collisions=remote_forest_collision)
+
+ if local_trust_info.trust_direction & lsa.LSA_TRUST_DIRECTION_OUTBOUND:
+ self.outf.write("Validating outgoing trust...\n")
+ try:
+ local_trust_verify = local_netlogon.netr_LogonControl2Ex(local_netlogon_info.dc_unc,
+ netlogon.NETLOGON_CONTROL_TC_VERIFY,
+ 2,
+ remote_lsa_info.dns_domain.string)
+ except RuntimeError as error:
+ raise self.LocalRuntimeError(self, error, "NETLOGON_CONTROL_TC_VERIFY failed")
+
+ local_trust_status = self._uint32(local_trust_verify.pdc_connection_status[0])
+ local_conn_status = self._uint32(local_trust_verify.tc_connection_status[0])
+
+ if local_trust_verify.flags & netlogon.NETLOGON_VERIFY_STATUS_RETURNED:
+ local_validation = "LocalValidation: DC[%s] CONNECTION[%s] TRUST[%s] VERIFY_STATUS_RETURNED" % (
+ local_trust_verify.trusted_dc_name,
+ local_trust_verify.tc_connection_status[1],
+ local_trust_verify.pdc_connection_status[1])
+ else:
+ local_validation = "LocalValidation: DC[%s] CONNECTION[%s] TRUST[%s]" % (
+ local_trust_verify.trusted_dc_name,
+ local_trust_verify.tc_connection_status[1],
+ local_trust_verify.pdc_connection_status[1])
+
+ if local_trust_status != werror.WERR_SUCCESS or local_conn_status != werror.WERR_SUCCESS:
+ raise CommandError(local_validation)
+ else:
+ self.outf.write("OK: %s\n" % local_validation)
+
+ if remote_trust_info:
+ if remote_trust_info.trust_direction & lsa.LSA_TRUST_DIRECTION_OUTBOUND:
+ self.outf.write("Validating incoming trust...\n")
+ try:
+ remote_trust_verify = \
+ remote_netlogon.netr_LogonControl2Ex(remote_netlogon_dc_unc,
+ netlogon.NETLOGON_CONTROL_TC_VERIFY,
+ 2,
+ local_lsa_info.dns_domain.string)
+ except RuntimeError as error:
+ raise self.RemoteRuntimeError(self, error, "NETLOGON_CONTROL_TC_VERIFY failed")
+
+ remote_trust_status = self._uint32(remote_trust_verify.pdc_connection_status[0])
+ remote_conn_status = self._uint32(remote_trust_verify.tc_connection_status[0])
+
+ if remote_trust_verify.flags & netlogon.NETLOGON_VERIFY_STATUS_RETURNED:
+ remote_validation = "RemoteValidation: DC[%s] CONNECTION[%s] TRUST[%s] VERIFY_STATUS_RETURNED" % (
+ remote_trust_verify.trusted_dc_name,
+ remote_trust_verify.tc_connection_status[1],
+ remote_trust_verify.pdc_connection_status[1])
+ else:
+ remote_validation = "RemoteValidation: DC[%s] CONNECTION[%s] TRUST[%s]" % (
+ remote_trust_verify.trusted_dc_name,
+ remote_trust_verify.tc_connection_status[1],
+ remote_trust_verify.pdc_connection_status[1])
+
+ if remote_trust_status != werror.WERR_SUCCESS or remote_conn_status != werror.WERR_SUCCESS:
+ raise CommandError(remote_validation)
+ else:
+ self.outf.write("OK: %s\n" % remote_validation)
+
+ if remote_tdo_handle is not None:
+ try:
+ remote_lsa.Close(remote_tdo_handle)
+ except RuntimeError:
+ pass
+ remote_tdo_handle = None
+ if local_tdo_handle is not None:
+ try:
+ local_lsa.Close(local_tdo_handle)
+ except RuntimeError:
+ pass
+ local_tdo_handle = None
+
+ self.outf.write("Success.\n")
+ return
+
+
+class cmd_domain_trust_delete(DomainTrustCommand):
+ """Delete a domain trust."""
+
+ synopsis = "%prog DOMAIN [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ "localdcopts": LocalDCCredentialsOptions,
+ }
+
+ takes_options = [
+ Option("--delete-location", type="choice", metavar="LOCATION",
+ choices=["local", "both"],
+ help="Where to delete the trusted domain object: 'local' or 'both'.",
+ dest='delete_location',
+ default="both"),
+ ]
+
+ takes_args = ["domain"]
+
+ def run(self, domain, sambaopts=None, localdcopts=None, credopts=None, versionopts=None,
+ delete_location=None):
+
+ local_policy_access = lsa.LSA_POLICY_VIEW_LOCAL_INFORMATION
+ local_policy_access |= lsa.LSA_POLICY_TRUST_ADMIN
+ local_policy_access |= lsa.LSA_POLICY_CREATE_SECRET
+
+ if delete_location == "local":
+ remote_policy_access = None
+ else:
+ remote_policy_access = lsa.LSA_POLICY_VIEW_LOCAL_INFORMATION
+ remote_policy_access |= lsa.LSA_POLICY_TRUST_ADMIN
+ remote_policy_access |= lsa.LSA_POLICY_CREATE_SECRET
+
+ self.setup_local_server(sambaopts, localdcopts)
+ try:
+ local_lsa = self.new_local_lsa_connection()
+ except RuntimeError as error:
+ raise self.LocalRuntimeError(self, error, "failed to connect lsa server")
+
+ try:
+ (local_policy, local_lsa_info) = self.get_lsa_info(local_lsa, local_policy_access)
+ except RuntimeError as error:
+ raise self.LocalRuntimeError(self, error, "failed to query LSA_POLICY_INFO_DNS")
+
+ self.outf.write("LocalDomain Netbios[%s] DNS[%s] SID[%s]\n" % (
+ local_lsa_info.name.string,
+ local_lsa_info.dns_domain.string,
+ local_lsa_info.sid))
+
+ local_tdo_info = None
+ local_tdo_handle = None
+ remote_tdo_info = None
+ remote_tdo_handle = None
+
+ lsaString = lsa.String()
+ try:
+ lsaString.string = domain
+ local_tdo_info = local_lsa.QueryTrustedDomainInfoByName(local_policy,
+ lsaString, lsa.LSA_TRUSTED_DOMAIN_INFO_INFO_EX)
+ except NTSTATUSError as error:
+ if self.check_runtime_error(error, ntstatus.NT_STATUS_OBJECT_NAME_NOT_FOUND):
+ raise CommandError("Failed to find trust for domain '%s'" % domain)
+ raise self.RemoteRuntimeError(self, error, "failed to locate remote server")
+
+ if remote_policy_access is not None:
+ try:
+ self.setup_remote_server(credopts, domain)
+ except RuntimeError as error:
+ raise self.RemoteRuntimeError(self, error, "failed to locate remote server")
+
+ try:
+ remote_lsa = self.new_remote_lsa_connection()
+ except RuntimeError as error:
+ raise self.RemoteRuntimeError(self, error, "failed to connect lsa server")
+
+ try:
+ (remote_policy, remote_lsa_info) = self.get_lsa_info(remote_lsa, remote_policy_access)
+ except RuntimeError as error:
+ raise self.RemoteRuntimeError(self, error, "failed to query LSA_POLICY_INFO_DNS")
+
+ self.outf.write("RemoteDomain Netbios[%s] DNS[%s] SID[%s]\n" % (
+ remote_lsa_info.name.string,
+ remote_lsa_info.dns_domain.string,
+ remote_lsa_info.sid))
+
+ if remote_lsa_info.sid != local_tdo_info.sid or \
+ remote_lsa_info.name.string != local_tdo_info.netbios_name.string or \
+ remote_lsa_info.dns_domain.string != local_tdo_info.domain_name.string:
+ raise CommandError("LocalTDO inconsistent: Netbios[%s] DNS[%s] SID[%s]" % (
+ local_tdo_info.netbios_name.string,
+ local_tdo_info.domain_name.string,
+ local_tdo_info.sid))
+
+ try:
+ lsaString.string = local_lsa_info.dns_domain.string
+ remote_tdo_info = \
+ remote_lsa.QueryTrustedDomainInfoByName(remote_policy,
+ lsaString,
+ lsa.LSA_TRUSTED_DOMAIN_INFO_INFO_EX)
+ except NTSTATUSError as error:
+ if not self.check_runtime_error(error, ntstatus.NT_STATUS_OBJECT_NAME_NOT_FOUND):
+ raise self.RemoteRuntimeError(self, error, "QueryTrustedDomainInfoByName(%s)" % (
+ lsaString.string))
+
+ if remote_tdo_info is not None:
+ if local_lsa_info.sid != remote_tdo_info.sid or \
+ local_lsa_info.name.string != remote_tdo_info.netbios_name.string or \
+ local_lsa_info.dns_domain.string != remote_tdo_info.domain_name.string:
+ raise CommandError("RemoteTDO inconsistent: Netbios[%s] DNS[%s] SID[%s]" % (
+ remote_tdo_info.netbios_name.string,
+ remote_tdo_info.domain_name.string,
+ remote_tdo_info.sid))
+
+ if local_tdo_info is not None:
+ try:
+ lsaString.string = local_tdo_info.domain_name.string
+ local_tdo_handle = \
+ local_lsa.OpenTrustedDomainByName(local_policy,
+ lsaString,
+ security.SEC_STD_DELETE)
+ except RuntimeError as error:
+ raise self.LocalRuntimeError(self, error, "OpenTrustedDomainByName(%s)" % (
+ lsaString.string))
+
+ local_lsa.DeleteObject(local_tdo_handle)
+ local_tdo_handle = None
+
+ if remote_tdo_info is not None:
+ try:
+ lsaString.string = remote_tdo_info.domain_name.string
+ remote_tdo_handle = \
+ remote_lsa.OpenTrustedDomainByName(remote_policy,
+ lsaString,
+ security.SEC_STD_DELETE)
+ except RuntimeError as error:
+ raise self.RemoteRuntimeError(self, error, "OpenTrustedDomainByName(%s)" % (
+ lsaString.string))
+
+ if remote_tdo_handle is not None:
+ try:
+ remote_lsa.DeleteObject(remote_tdo_handle)
+ remote_tdo_handle = None
+ self.outf.write("RemoteTDO deleted.\n")
+ except RuntimeError as error:
+ self.outf.write("%s\n" % self.RemoteRuntimeError(self, error, "DeleteObject() failed"))
+
+ return
+
+
+class cmd_domain_trust_validate(DomainTrustCommand):
+ """Validate a domain trust."""
+
+ synopsis = "%prog DOMAIN [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ "localdcopts": LocalDCCredentialsOptions,
+ }
+
+ takes_options = [
+ Option("--validate-location", type="choice", metavar="LOCATION",
+ choices=["local", "both"],
+ help="Where to validate the trusted domain object: 'local' or 'both'.",
+ dest='validate_location',
+ default="both"),
+ ]
+
+ takes_args = ["domain"]
+
+ def run(self, domain, sambaopts=None, versionopts=None, credopts=None, localdcopts=None,
+ validate_location=None):
+
+ local_policy_access = lsa.LSA_POLICY_VIEW_LOCAL_INFORMATION
+
+ local_server = self.setup_local_server(sambaopts, localdcopts)
+ try:
+ local_lsa = self.new_local_lsa_connection()
+ except RuntimeError as error:
+ raise self.LocalRuntimeError(self, error, "failed to connect lsa server")
+
+ try:
+ (local_policy, local_lsa_info) = self.get_lsa_info(local_lsa, local_policy_access)
+ except RuntimeError as error:
+ raise self.LocalRuntimeError(self, error, "failed to query LSA_POLICY_INFO_DNS")
+
+ self.outf.write("LocalDomain Netbios[%s] DNS[%s] SID[%s]\n" % (
+ local_lsa_info.name.string,
+ local_lsa_info.dns_domain.string,
+ local_lsa_info.sid))
+
+ try:
+ lsaString = lsa.String()
+ lsaString.string = domain
+ local_tdo_info = \
+ local_lsa.QueryTrustedDomainInfoByName(local_policy,
+ lsaString,
+ lsa.LSA_TRUSTED_DOMAIN_INFO_INFO_EX)
+ except NTSTATUSError as error:
+ if self.check_runtime_error(error, ntstatus.NT_STATUS_OBJECT_NAME_NOT_FOUND):
+ raise CommandError("trusted domain object does not exist for domain [%s]" % domain)
+
+ raise self.LocalRuntimeError(self, error, "QueryTrustedDomainInfoByName(INFO_EX) failed")
+
+ self.outf.write("LocalTDO Netbios[%s] DNS[%s] SID[%s]\n" % (
+ local_tdo_info.netbios_name.string,
+ local_tdo_info.domain_name.string,
+ local_tdo_info.sid))
+
+ try:
+ local_netlogon = self.new_local_netlogon_connection()
+ except RuntimeError as error:
+ raise self.LocalRuntimeError(self, error, "failed to connect netlogon server")
+
+ try:
+ local_trust_verify = \
+ local_netlogon.netr_LogonControl2Ex(local_server,
+ netlogon.NETLOGON_CONTROL_TC_VERIFY,
+ 2,
+ local_tdo_info.domain_name.string)
+ except RuntimeError as error:
+ raise self.LocalRuntimeError(self, error, "NETLOGON_CONTROL_TC_VERIFY failed")
+
+ local_trust_status = self._uint32(local_trust_verify.pdc_connection_status[0])
+ local_conn_status = self._uint32(local_trust_verify.tc_connection_status[0])
+
+ if local_trust_verify.flags & netlogon.NETLOGON_VERIFY_STATUS_RETURNED:
+ local_validation = "LocalValidation: DC[%s] CONNECTION[%s] TRUST[%s] VERIFY_STATUS_RETURNED" % (
+ local_trust_verify.trusted_dc_name,
+ local_trust_verify.tc_connection_status[1],
+ local_trust_verify.pdc_connection_status[1])
+ else:
+ local_validation = "LocalValidation: DC[%s] CONNECTION[%s] TRUST[%s]" % (
+ local_trust_verify.trusted_dc_name,
+ local_trust_verify.tc_connection_status[1],
+ local_trust_verify.pdc_connection_status[1])
+
+ if local_trust_status != werror.WERR_SUCCESS or local_conn_status != werror.WERR_SUCCESS:
+ raise CommandError(local_validation)
+ else:
+ self.outf.write("OK: %s\n" % local_validation)
+
+ try:
+ server = local_trust_verify.trusted_dc_name.replace('\\', '')
+ domain_and_server = "%s\\%s" % (local_tdo_info.domain_name.string, server)
+ local_trust_rediscover = \
+ local_netlogon.netr_LogonControl2Ex(local_server,
+ netlogon.NETLOGON_CONTROL_REDISCOVER,
+ 2,
+ domain_and_server)
+ except RuntimeError as error:
+ raise self.LocalRuntimeError(self, error, "NETLOGON_CONTROL_REDISCOVER failed")
+
+ local_conn_status = self._uint32(local_trust_rediscover.tc_connection_status[0])
+ local_rediscover = "LocalRediscover: DC[%s] CONNECTION[%s]" % (
+ local_trust_rediscover.trusted_dc_name,
+ local_trust_rediscover.tc_connection_status[1])
+
+ if local_conn_status != werror.WERR_SUCCESS:
+ raise CommandError(local_rediscover)
+ else:
+ self.outf.write("OK: %s\n" % local_rediscover)
+
+ if validate_location != "local":
+ try:
+ remote_server = self.setup_remote_server(credopts, domain, require_pdc=False)
+ except RuntimeError as error:
+ raise self.RemoteRuntimeError(self, error, "failed to locate remote server")
+
+ try:
+ remote_netlogon = self.new_remote_netlogon_connection()
+ except RuntimeError as error:
+ raise self.RemoteRuntimeError(self, error, "failed to connect netlogon server")
+
+ try:
+ remote_trust_verify = \
+ remote_netlogon.netr_LogonControl2Ex(remote_server,
+ netlogon.NETLOGON_CONTROL_TC_VERIFY,
+ 2,
+ local_lsa_info.dns_domain.string)
+ except RuntimeError as error:
+ raise self.RemoteRuntimeError(self, error, "NETLOGON_CONTROL_TC_VERIFY failed")
+
+ remote_trust_status = self._uint32(remote_trust_verify.pdc_connection_status[0])
+ remote_conn_status = self._uint32(remote_trust_verify.tc_connection_status[0])
+
+ if remote_trust_verify.flags & netlogon.NETLOGON_VERIFY_STATUS_RETURNED:
+ remote_validation = "RemoteValidation: DC[%s] CONNECTION[%s] TRUST[%s] VERIFY_STATUS_RETURNED" % (
+ remote_trust_verify.trusted_dc_name,
+ remote_trust_verify.tc_connection_status[1],
+ remote_trust_verify.pdc_connection_status[1])
+ else:
+ remote_validation = "RemoteValidation: DC[%s] CONNECTION[%s] TRUST[%s]" % (
+ remote_trust_verify.trusted_dc_name,
+ remote_trust_verify.tc_connection_status[1],
+ remote_trust_verify.pdc_connection_status[1])
+
+ if remote_trust_status != werror.WERR_SUCCESS or remote_conn_status != werror.WERR_SUCCESS:
+ raise CommandError(remote_validation)
+ else:
+ self.outf.write("OK: %s\n" % remote_validation)
+
+ try:
+ server = remote_trust_verify.trusted_dc_name.replace('\\', '')
+ domain_and_server = "%s\\%s" % (local_lsa_info.dns_domain.string, server)
+ remote_trust_rediscover = \
+ remote_netlogon.netr_LogonControl2Ex(remote_server,
+ netlogon.NETLOGON_CONTROL_REDISCOVER,
+ 2,
+ domain_and_server)
+ except RuntimeError as error:
+ raise self.RemoteRuntimeError(self, error, "NETLOGON_CONTROL_REDISCOVER failed")
+
+ remote_conn_status = self._uint32(remote_trust_rediscover.tc_connection_status[0])
+
+ remote_rediscover = "RemoteRediscover: DC[%s] CONNECTION[%s]" % (
+ remote_trust_rediscover.trusted_dc_name,
+ remote_trust_rediscover.tc_connection_status[1])
+
+ if remote_conn_status != werror.WERR_SUCCESS:
+ raise CommandError(remote_rediscover)
+ else:
+ self.outf.write("OK: %s\n" % remote_rediscover)
+
+ return
+
+
+class cmd_domain_trust_namespaces(DomainTrustCommand):
+ """Manage forest trust namespaces."""
+
+ synopsis = "%prog [DOMAIN] [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "localdcopts": LocalDCCredentialsOptions,
+ }
+
+ takes_options = [
+ Option("--refresh", type="choice", metavar="check|store",
+ choices=["check", "store", None],
+ help="List and maybe store refreshed forest trust information: 'check' or 'store'.",
+ dest='refresh',
+ default=None),
+ Option("--enable-all", action="store_true",
+ help="Try to update disabled entries, not allowed with --refresh=check.",
+ dest='enable_all',
+ default=False),
+ Option("--enable-tln", action="append", metavar='DNSDOMAIN',
+ help="Enable a top level name entry. Can be specified multiple times.",
+ dest='enable_tln',
+ default=[]),
+ Option("--disable-tln", action="append", metavar='DNSDOMAIN',
+ help="Disable a top level name entry. Can be specified multiple times.",
+ dest='disable_tln',
+ default=[]),
+ Option("--add-tln-ex", action="append", metavar='DNSDOMAIN',
+ help="Add a top level exclusion entry. Can be specified multiple times.",
+ dest='add_tln_ex',
+ default=[]),
+ Option("--delete-tln-ex", action="append", metavar='DNSDOMAIN',
+ help="Delete a top level exclusion entry. Can be specified multiple times.",
+ dest='delete_tln_ex',
+ default=[]),
+ Option("--enable-nb", action="append", metavar='NETBIOSDOMAIN',
+ help="Enable a netbios name in a domain entry. Can be specified multiple times.",
+ dest='enable_nb',
+ default=[]),
+ Option("--disable-nb", action="append", metavar='NETBIOSDOMAIN',
+ help="Disable a netbios name in a domain entry. Can be specified multiple times.",
+ dest='disable_nb',
+ default=[]),
+ Option("--enable-sid", action="append", metavar='DOMAINSID',
+ help="Enable a SID in a domain entry. Can be specified multiple times.",
+ dest='enable_sid_str',
+ default=[]),
+ Option("--disable-sid", action="append", metavar='DOMAINSID',
+ help="Disable a SID in a domain entry. Can be specified multiple times.",
+ dest='disable_sid_str',
+ default=[]),
+ Option("--add-upn-suffix", action="append", metavar='DNSDOMAIN',
+ help="Add a new uPNSuffixes attribute for the local forest. Can be specified multiple times.",
+ dest='add_upn',
+ default=[]),
+ Option("--delete-upn-suffix", action="append", metavar='DNSDOMAIN',
+ help="Delete an existing uPNSuffixes attribute of the local forest. Can be specified multiple times.",
+ dest='delete_upn',
+ default=[]),
+ Option("--add-spn-suffix", action="append", metavar='DNSDOMAIN',
+ help="Add a new msDS-SPNSuffixes attribute for the local forest. Can be specified multiple times.",
+ dest='add_spn',
+ default=[]),
+ Option("--delete-spn-suffix", action="append", metavar='DNSDOMAIN',
+ help="Delete an existing msDS-SPNSuffixes attribute of the local forest. Can be specified multiple times.",
+ dest='delete_spn',
+ default=[]),
+ ]
+
+ takes_args = ["domain?"]
+
+ def run(self, domain=None, sambaopts=None, localdcopts=None, versionopts=None,
+ refresh=None, enable_all=False,
+ enable_tln=None, disable_tln=None, add_tln_ex=None, delete_tln_ex=None,
+ enable_sid_str=None, disable_sid_str=None, enable_nb=None, disable_nb=None,
+ add_upn=None, delete_upn=None, add_spn=None, delete_spn=None):
+
+ if enable_tln is None:
+ enable_tln = []
+ if disable_tln is None:
+ disable_tln = []
+ if add_tln_ex is None:
+ add_tln_ex = []
+ if delete_tln_ex is None:
+ delete_tln_ex = []
+ if enable_sid_str is None:
+ enable_sid_str = []
+ if disable_sid_str is None:
+ disable_sid_str = []
+ if enable_nb is None:
+ enable_nb = []
+ if disable_nb is None:
+ disable_nb = []
+ if add_upn is None:
+ add_upn = []
+ if delete_upn is None:
+ delete_upn = []
+ if add_spn is None:
+ add_spn = []
+ if delete_spn is None:
+ delete_spn = []
+
+ require_update = False
+
+ if domain is None:
+ if refresh == "store":
+ raise CommandError("--refresh=%s not allowed without DOMAIN" % refresh)
+
+ if enable_all:
+ raise CommandError("--enable-all not allowed without DOMAIN")
+
+ if len(enable_tln) > 0:
+ raise CommandError("--enable-tln not allowed without DOMAIN")
+ if len(disable_tln) > 0:
+ raise CommandError("--disable-tln not allowed without DOMAIN")
+
+ if len(add_tln_ex) > 0:
+ raise CommandError("--add-tln-ex not allowed without DOMAIN")
+ if len(delete_tln_ex) > 0:
+ raise CommandError("--delete-tln-ex not allowed without DOMAIN")
+
+ if len(enable_nb) > 0:
+ raise CommandError("--enable-nb not allowed without DOMAIN")
+ if len(disable_nb) > 0:
+ raise CommandError("--disable-nb not allowed without DOMAIN")
+
+ if len(enable_sid_str) > 0:
+ raise CommandError("--enable-sid not allowed without DOMAIN")
+ if len(disable_sid_str) > 0:
+ raise CommandError("--disable-sid not allowed without DOMAIN")
+
+ if len(add_upn) > 0:
+ for n in add_upn:
+ if not n.startswith("*."):
+ continue
+ raise CommandError("value[%s] specified for --add-upn-suffix should not include with '*.'" % n)
+ require_update = True
+ if len(delete_upn) > 0:
+ for n in delete_upn:
+ if not n.startswith("*."):
+ continue
+ raise CommandError("value[%s] specified for --delete-upn-suffix should not include with '*.'" % n)
+ require_update = True
+ for a in add_upn:
+ for d in delete_upn:
+ if a.lower() != d.lower():
+ continue
+ raise CommandError("value[%s] specified for --add-upn-suffix and --delete-upn-suffix" % a)
+
+ if len(add_spn) > 0:
+ for n in add_spn:
+ if not n.startswith("*."):
+ continue
+ raise CommandError("value[%s] specified for --add-spn-suffix should not include with '*.'" % n)
+ require_update = True
+ if len(delete_spn) > 0:
+ for n in delete_spn:
+ if not n.startswith("*."):
+ continue
+ raise CommandError("value[%s] specified for --delete-spn-suffix should not include with '*.'" % n)
+ require_update = True
+ for a in add_spn:
+ for d in delete_spn:
+ if a.lower() != d.lower():
+ continue
+ raise CommandError("value[%s] specified for --add-spn-suffix and --delete-spn-suffix" % a)
+ else:
+ if len(add_upn) > 0:
+ raise CommandError("--add-upn-suffix not allowed together with DOMAIN")
+ if len(delete_upn) > 0:
+ raise CommandError("--delete-upn-suffix not allowed together with DOMAIN")
+ if len(add_spn) > 0:
+ raise CommandError("--add-spn-suffix not allowed together with DOMAIN")
+ if len(delete_spn) > 0:
+ raise CommandError("--delete-spn-suffix not allowed together with DOMAIN")
+
+ if refresh is not None:
+ if refresh == "store":
+ require_update = True
+
+ if enable_all and refresh != "store":
+ raise CommandError("--enable-all not allowed together with --refresh=%s" % refresh)
+
+ if len(enable_tln) > 0:
+ raise CommandError("--enable-tln not allowed together with --refresh")
+ if len(disable_tln) > 0:
+ raise CommandError("--disable-tln not allowed together with --refresh")
+
+ if len(add_tln_ex) > 0:
+ raise CommandError("--add-tln-ex not allowed together with --refresh")
+ if len(delete_tln_ex) > 0:
+ raise CommandError("--delete-tln-ex not allowed together with --refresh")
+
+ if len(enable_nb) > 0:
+ raise CommandError("--enable-nb not allowed together with --refresh")
+ if len(disable_nb) > 0:
+ raise CommandError("--disable-nb not allowed together with --refresh")
+
+ if len(enable_sid_str) > 0:
+ raise CommandError("--enable-sid not allowed together with --refresh")
+ if len(disable_sid_str) > 0:
+ raise CommandError("--disable-sid not allowed together with --refresh")
+ else:
+ if enable_all:
+ require_update = True
+
+ if len(enable_tln) > 0:
+ raise CommandError("--enable-tln not allowed together with --enable-all")
+
+ if len(enable_nb) > 0:
+ raise CommandError("--enable-nb not allowed together with --enable-all")
+
+ if len(enable_sid_str) > 0:
+ raise CommandError("--enable-sid not allowed together with --enable-all")
+
+ if len(enable_tln) > 0:
+ require_update = True
+ if len(disable_tln) > 0:
+ require_update = True
+ for e in enable_tln:
+ for d in disable_tln:
+ if e.lower() != d.lower():
+ continue
+ raise CommandError("value[%s] specified for --enable-tln and --disable-tln" % e)
+
+ if len(add_tln_ex) > 0:
+ for n in add_tln_ex:
+ if not n.startswith("*."):
+ continue
+ raise CommandError("value[%s] specified for --add-tln-ex should not include with '*.'" % n)
+ require_update = True
+ if len(delete_tln_ex) > 0:
+ for n in delete_tln_ex:
+ if not n.startswith("*."):
+ continue
+ raise CommandError("value[%s] specified for --delete-tln-ex should not include with '*.'" % n)
+ require_update = True
+ for a in add_tln_ex:
+ for d in delete_tln_ex:
+ if a.lower() != d.lower():
+ continue
+ raise CommandError("value[%s] specified for --add-tln-ex and --delete-tln-ex" % a)
+
+ if len(enable_nb) > 0:
+ require_update = True
+ if len(disable_nb) > 0:
+ require_update = True
+ for e in enable_nb:
+ for d in disable_nb:
+ if e.upper() != d.upper():
+ continue
+ raise CommandError("value[%s] specified for --enable-nb and --disable-nb" % e)
+
+ enable_sid = []
+ for s in enable_sid_str:
+ try:
+ sid = security.dom_sid(s)
+ except (ValueError, TypeError):
+ raise CommandError("value[%s] specified for --enable-sid is not a valid SID" % s)
+ enable_sid.append(sid)
+ disable_sid = []
+ for s in disable_sid_str:
+ try:
+ sid = security.dom_sid(s)
+ except (ValueError, TypeError):
+ raise CommandError("value[%s] specified for --disable-sid is not a valid SID" % s)
+ disable_sid.append(sid)
+ if len(enable_sid) > 0:
+ require_update = True
+ if len(disable_sid) > 0:
+ require_update = True
+ for e in enable_sid:
+ for d in disable_sid:
+ if e != d:
+ continue
+ raise CommandError("value[%s] specified for --enable-sid and --disable-sid" % e)
+
+ local_policy_access = lsa.LSA_POLICY_VIEW_LOCAL_INFORMATION
+ if require_update:
+ local_policy_access |= lsa.LSA_POLICY_TRUST_ADMIN
+
+ local_server = self.setup_local_server(sambaopts, localdcopts)
+ try:
+ local_lsa = self.new_local_lsa_connection()
+ except RuntimeError as error:
+ raise self.LocalRuntimeError(self, error, "failed to connect lsa server")
+
+ try:
+ (local_policy, local_lsa_info) = self.get_lsa_info(local_lsa, local_policy_access)
+ except RuntimeError as error:
+ raise self.LocalRuntimeError(self, error, "failed to query LSA_POLICY_INFO_DNS")
+
+ self.outf.write("LocalDomain Netbios[%s] DNS[%s] SID[%s]\n" % (
+ local_lsa_info.name.string,
+ local_lsa_info.dns_domain.string,
+ local_lsa_info.sid))
+
+ if domain is None:
+ try:
+ local_netlogon = self.new_local_netlogon_connection()
+ except RuntimeError as error:
+ raise self.LocalRuntimeError(self, error, "failed to connect netlogon server")
+
+ try:
+ local_netlogon_info = self.get_netlogon_dc_info(local_netlogon, local_server)
+ except RuntimeError as error:
+ raise self.LocalRuntimeError(self, error, "failed to get netlogon dc info")
+
+ if local_netlogon_info.domain_name != local_netlogon_info.forest_name:
+ raise CommandError("The local domain [%s] is not the forest root [%s]" % (
+ local_netlogon_info.domain_name,
+ local_netlogon_info.forest_name))
+
+ try:
+ # get all information about our own forest
+ own_forest_info = local_netlogon.netr_DsRGetForestTrustInformation(local_netlogon_info.dc_unc,
+ None, 0)
+ except RuntimeError as error:
+ if self.check_runtime_error(error, werror.WERR_RPC_S_PROCNUM_OUT_OF_RANGE):
+ raise CommandError("LOCAL_DC[%s]: netr_DsRGetForestTrustInformation() not supported." % (
+ local_server))
+
+ if self.check_runtime_error(error, werror.WERR_INVALID_FUNCTION):
+ raise CommandError("LOCAL_DC[%s]: netr_DsRGetForestTrustInformation() not supported." % (
+ local_server))
+
+ if self.check_runtime_error(error, werror.WERR_NERR_ACFNOTLOADED):
+ raise CommandError("LOCAL_DC[%s]: netr_DsRGetForestTrustInformation() not supported." % (
+ local_server))
+
+ raise self.LocalRuntimeError(self, error, "netr_DsRGetForestTrustInformation() failed")
+
+ self.outf.write("Own forest trust information...\n")
+ self.write_forest_trust_info(own_forest_info,
+ tln=local_lsa_info.dns_domain.string)
+
+ try:
+ local_samdb = self.new_local_ldap_connection()
+ except RuntimeError as error:
+ raise self.LocalRuntimeError(self, error, "failed to connect to SamDB")
+
+ local_partitions_dn = "CN=Partitions,%s" % str(local_samdb.get_config_basedn())
+ attrs = ['uPNSuffixes', 'msDS-SPNSuffixes']
+ try:
+ msgs = local_samdb.search(base=local_partitions_dn,
+ scope=ldb.SCOPE_BASE,
+ expression="(objectClass=crossRefContainer)",
+ attrs=attrs)
+ stored_msg = msgs[0]
+ except ldb.LdbError as error:
+ raise self.LocalLdbError(self, error, "failed to search partition dn")
+
+ stored_upn_vals = []
+ if 'uPNSuffixes' in stored_msg:
+ stored_upn_vals.extend(stored_msg['uPNSuffixes'])
+
+ stored_spn_vals = []
+ if 'msDS-SPNSuffixes' in stored_msg:
+ stored_spn_vals.extend(stored_msg['msDS-SPNSuffixes'])
+
+ self.outf.write("Stored uPNSuffixes attributes[%d]:\n" % len(stored_upn_vals))
+ for v in stored_upn_vals:
+ self.outf.write("TLN: %-32s DNS[*.%s]\n" % ("", v))
+ self.outf.write("Stored msDS-SPNSuffixes attributes[%d]:\n" % len(stored_spn_vals))
+ for v in stored_spn_vals:
+ self.outf.write("TLN: %-32s DNS[*.%s]\n" % ("", v))
+
+ if not require_update:
+ return
+
+ replace_upn = False
+ update_upn_vals = []
+ update_upn_vals.extend(stored_upn_vals)
+
+ replace_spn = False
+ update_spn_vals = []
+ update_spn_vals.extend(stored_spn_vals)
+
+ for upn in add_upn:
+ for v in update_upn_vals:
+ if str(v).lower() == upn.lower():
+ raise CommandError("Entry already present for "
+ "value[%s] specified for "
+ "--add-upn-suffix" % upn)
+ update_upn_vals.append(upn)
+ replace_upn = True
+
+ for upn in delete_upn:
+ idx = None
+ for i, v in enumerate(update_upn_vals):
+ if str(v).lower() != upn.lower():
+ continue
+ idx = i
+ break
+ if idx is None:
+ raise CommandError("Entry not found for value[%s] specified for --delete-upn-suffix" % upn)
+
+ update_upn_vals.pop(idx)
+ replace_upn = True
+
+ for spn in add_spn:
+ for v in update_spn_vals:
+ if str(v).lower() == spn.lower():
+ raise CommandError("Entry already present for "
+ "value[%s] specified for "
+ "--add-spn-suffix" % spn)
+ update_spn_vals.append(spn)
+ replace_spn = True
+
+ for spn in delete_spn:
+ idx = None
+ for i, v in enumerate(update_spn_vals):
+ if str(v).lower() != spn.lower():
+ continue
+ idx = i
+ break
+ if idx is None:
+ raise CommandError("Entry not found for value[%s] specified for --delete-spn-suffix" % spn)
+
+ update_spn_vals.pop(idx)
+ replace_spn = True
+
+ self.outf.write("Update uPNSuffixes attributes[%d]:\n" % len(update_upn_vals))
+ for v in update_upn_vals:
+ self.outf.write("TLN: %-32s DNS[*.%s]\n" % ("", v))
+ self.outf.write("Update msDS-SPNSuffixes attributes[%d]:\n" % len(update_spn_vals))
+ for v in update_spn_vals:
+ self.outf.write("TLN: %-32s DNS[*.%s]\n" % ("", v))
+
+ update_msg = ldb.Message()
+ update_msg.dn = stored_msg.dn
+
+ if replace_upn:
+ update_msg['uPNSuffixes'] = ldb.MessageElement(update_upn_vals,
+ ldb.FLAG_MOD_REPLACE,
+ 'uPNSuffixes')
+ if replace_spn:
+ update_msg['msDS-SPNSuffixes'] = ldb.MessageElement(update_spn_vals,
+ ldb.FLAG_MOD_REPLACE,
+ 'msDS-SPNSuffixes')
+ try:
+ local_samdb.modify(update_msg)
+ except ldb.LdbError as error:
+ raise self.LocalLdbError(self, error, "failed to update partition dn")
+
+ try:
+ stored_forest_info = local_netlogon.netr_DsRGetForestTrustInformation(local_netlogon_info.dc_unc,
+ None, 0)
+ except RuntimeError as error:
+ raise self.LocalRuntimeError(self, error, "netr_DsRGetForestTrustInformation() failed")
+
+ self.outf.write("Stored forest trust information...\n")
+ self.write_forest_trust_info(stored_forest_info,
+ tln=local_lsa_info.dns_domain.string)
+ return
+
+ try:
+ lsaString = lsa.String()
+ lsaString.string = domain
+ local_tdo_info = \
+ local_lsa.QueryTrustedDomainInfoByName(local_policy,
+ lsaString,
+ lsa.LSA_TRUSTED_DOMAIN_INFO_INFO_EX)
+ except NTSTATUSError as error:
+ if self.check_runtime_error(error, ntstatus.NT_STATUS_OBJECT_NAME_NOT_FOUND):
+ raise CommandError("trusted domain object does not exist for domain [%s]" % domain)
+
+ raise self.LocalRuntimeError(self, error, "QueryTrustedDomainInfoByName(INFO_EX) failed")
+
+ self.outf.write("LocalTDO Netbios[%s] DNS[%s] SID[%s]\n" % (
+ local_tdo_info.netbios_name.string,
+ local_tdo_info.domain_name.string,
+ local_tdo_info.sid))
+
+ if not local_tdo_info.trust_attributes & lsa.LSA_TRUST_ATTRIBUTE_FOREST_TRANSITIVE:
+ raise CommandError("trusted domain object for domain [%s] is not marked as FOREST_TRANSITIVE." % domain)
+
+ if refresh is not None:
+ try:
+ local_netlogon = self.new_local_netlogon_connection()
+ except RuntimeError as error:
+ raise self.LocalRuntimeError(self, error, "failed to connect netlogon server")
+
+ try:
+ local_netlogon_info = self.get_netlogon_dc_info(local_netlogon, local_server)
+ except RuntimeError as error:
+ raise self.LocalRuntimeError(self, error, "failed to get netlogon dc info")
+
+ lsa_update_check = 1
+ if refresh == "store":
+ netlogon_update_tdo = netlogon.DS_GFTI_UPDATE_TDO
+ if enable_all:
+ lsa_update_check = 0
+ else:
+ netlogon_update_tdo = 0
+
+ try:
+ # get all information about the remote trust
+ # this triggers netr_GetForestTrustInformation to the remote domain
+ # and lsaRSetForestTrustInformation() locally, but new top level
+ # names are disabled by default.
+ fresh_forest_info = \
+ local_netlogon.netr_DsRGetForestTrustInformation(local_netlogon_info.dc_unc,
+ local_tdo_info.domain_name.string,
+ netlogon_update_tdo)
+ except RuntimeError as error:
+ raise self.LocalRuntimeError(self, error, "netr_DsRGetForestTrustInformation() failed")
+
+ try:
+ fresh_forest_collision = \
+ local_lsa.lsaRSetForestTrustInformation(local_policy,
+ local_tdo_info.domain_name,
+ lsa.LSA_FOREST_TRUST_DOMAIN_INFO,
+ fresh_forest_info,
+ lsa_update_check)
+ except RuntimeError as error:
+ raise self.LocalRuntimeError(self, error, "lsaRSetForestTrustInformation() failed")
+
+ self.outf.write("Fresh forest trust information...\n")
+ self.write_forest_trust_info(fresh_forest_info,
+ tln=local_tdo_info.domain_name.string,
+ collisions=fresh_forest_collision)
+
+ if refresh == "store":
+ try:
+ lsaString = lsa.String()
+ lsaString.string = local_tdo_info.domain_name.string
+ stored_forest_info = \
+ local_lsa.lsaRQueryForestTrustInformation(local_policy,
+ lsaString,
+ lsa.LSA_FOREST_TRUST_DOMAIN_INFO)
+ except RuntimeError as error:
+ raise self.LocalRuntimeError(self, error, "lsaRQueryForestTrustInformation() failed")
+
+ self.outf.write("Stored forest trust information...\n")
+ self.write_forest_trust_info(stored_forest_info,
+ tln=local_tdo_info.domain_name.string)
+
+ return
+
+ #
+ # The none --refresh path
+ #
+
+ try:
+ lsaString = lsa.String()
+ lsaString.string = local_tdo_info.domain_name.string
+ local_forest_info = \
+ local_lsa.lsaRQueryForestTrustInformation(local_policy,
+ lsaString,
+ lsa.LSA_FOREST_TRUST_DOMAIN_INFO)
+ except RuntimeError as error:
+ raise self.LocalRuntimeError(self, error, "lsaRQueryForestTrustInformation() failed")
+
+ self.outf.write("Local forest trust information...\n")
+ self.write_forest_trust_info(local_forest_info,
+ tln=local_tdo_info.domain_name.string)
+
+ if not require_update:
+ return
+
+ entries = []
+ entries.extend(local_forest_info.entries)
+ update_forest_info = lsa.ForestTrustInformation()
+ update_forest_info.count = len(entries)
+ update_forest_info.entries = entries
+
+ if enable_all:
+ for r in update_forest_info.entries:
+ if r.type != lsa.LSA_FOREST_TRUST_TOP_LEVEL_NAME:
+ continue
+ if r.flags == 0:
+ continue
+ r.time = 0
+ r.flags &= ~lsa.LSA_TLN_DISABLED_MASK
+ for r in update_forest_info.entries:
+ if r.type != lsa.LSA_FOREST_TRUST_DOMAIN_INFO:
+ continue
+ if r.flags == 0:
+ continue
+ r.time = 0
+ r.flags &= ~lsa.LSA_NB_DISABLED_MASK
+ r.flags &= ~lsa.LSA_SID_DISABLED_MASK
+
+ for tln in enable_tln:
+ idx = None
+ for i, r in enumerate(update_forest_info.entries):
+ if r.type != lsa.LSA_FOREST_TRUST_TOP_LEVEL_NAME:
+ continue
+ if r.forest_trust_data.string.lower() != tln.lower():
+ continue
+ idx = i
+ break
+ if idx is None:
+ raise CommandError("Entry not found for value[%s] specified for --enable-tln" % tln)
+ if not update_forest_info.entries[idx].flags & lsa.LSA_TLN_DISABLED_MASK:
+ raise CommandError("Entry found for value[%s] specified for --enable-tln is already enabled" % tln)
+ update_forest_info.entries[idx].time = 0
+ update_forest_info.entries[idx].flags &= ~lsa.LSA_TLN_DISABLED_MASK
+
+ for tln in disable_tln:
+ idx = None
+ for i, r in enumerate(update_forest_info.entries):
+ if r.type != lsa.LSA_FOREST_TRUST_TOP_LEVEL_NAME:
+ continue
+ if r.forest_trust_data.string.lower() != tln.lower():
+ continue
+ idx = i
+ break
+ if idx is None:
+ raise CommandError("Entry not found for value[%s] specified for --disable-tln" % tln)
+ if update_forest_info.entries[idx].flags & lsa.LSA_TLN_DISABLED_ADMIN:
+ raise CommandError("Entry found for value[%s] specified for --disable-tln is already disabled" % tln)
+ update_forest_info.entries[idx].time = 0
+ update_forest_info.entries[idx].flags &= ~lsa.LSA_TLN_DISABLED_MASK
+ update_forest_info.entries[idx].flags |= lsa.LSA_TLN_DISABLED_ADMIN
+
+ for tln_ex in add_tln_ex:
+ idx = None
+ for i, r in enumerate(update_forest_info.entries):
+ if r.type != lsa.LSA_FOREST_TRUST_TOP_LEVEL_NAME_EX:
+ continue
+ if r.forest_trust_data.string.lower() != tln_ex.lower():
+ continue
+ idx = i
+ break
+ if idx is not None:
+ raise CommandError("Entry already present for value[%s] specified for --add-tln-ex" % tln_ex)
+
+ tln_dot = ".%s" % tln_ex.lower()
+ idx = None
+ for i, r in enumerate(update_forest_info.entries):
+ if r.type != lsa.LSA_FOREST_TRUST_TOP_LEVEL_NAME:
+ continue
+ r_dot = ".%s" % r.forest_trust_data.string.lower()
+ if tln_dot == r_dot:
+ raise CommandError("TLN entry present for value[%s] specified for --add-tln-ex" % tln_ex)
+ if not tln_dot.endswith(r_dot):
+ continue
+ idx = i
+ break
+
+ if idx is None:
+ raise CommandError("No TLN parent present for value[%s] specified for --add-tln-ex" % tln_ex)
+
+ r = lsa.ForestTrustRecord()
+ r.type = lsa.LSA_FOREST_TRUST_TOP_LEVEL_NAME_EX
+ r.flags = 0
+ r.time = 0
+ r.forest_trust_data.string = tln_ex
+
+ entries = []
+ entries.extend(update_forest_info.entries)
+ entries.insert(idx + 1, r)
+ update_forest_info.count = len(entries)
+ update_forest_info.entries = entries
+
+ for tln_ex in delete_tln_ex:
+ idx = None
+ for i, r in enumerate(update_forest_info.entries):
+ if r.type != lsa.LSA_FOREST_TRUST_TOP_LEVEL_NAME_EX:
+ continue
+ if r.forest_trust_data.string.lower() != tln_ex.lower():
+ continue
+ idx = i
+ break
+ if idx is None:
+ raise CommandError("Entry not found for value[%s] specified for --delete-tln-ex" % tln_ex)
+
+ entries = []
+ entries.extend(update_forest_info.entries)
+ entries.pop(idx)
+ update_forest_info.count = len(entries)
+ update_forest_info.entries = entries
+
+ for nb in enable_nb:
+ idx = None
+ for i, r in enumerate(update_forest_info.entries):
+ if r.type != lsa.LSA_FOREST_TRUST_DOMAIN_INFO:
+ continue
+ if r.forest_trust_data.netbios_domain_name.string.upper() != nb.upper():
+ continue
+ idx = i
+ break
+ if idx is None:
+ raise CommandError("Entry not found for value[%s] specified for --enable-nb" % nb)
+ if not update_forest_info.entries[idx].flags & lsa.LSA_NB_DISABLED_MASK:
+ raise CommandError("Entry found for value[%s] specified for --enable-nb is already enabled" % nb)
+ update_forest_info.entries[idx].time = 0
+ update_forest_info.entries[idx].flags &= ~lsa.LSA_NB_DISABLED_MASK
+
+ for nb in disable_nb:
+ idx = None
+ for i, r in enumerate(update_forest_info.entries):
+ if r.type != lsa.LSA_FOREST_TRUST_DOMAIN_INFO:
+ continue
+ if r.forest_trust_data.netbios_domain_name.string.upper() != nb.upper():
+ continue
+ idx = i
+ break
+ if idx is None:
+ raise CommandError("Entry not found for value[%s] specified for --delete-nb" % nb)
+ if update_forest_info.entries[idx].flags & lsa.LSA_NB_DISABLED_ADMIN:
+ raise CommandError("Entry found for value[%s] specified for --disable-nb is already disabled" % nb)
+ update_forest_info.entries[idx].time = 0
+ update_forest_info.entries[idx].flags &= ~lsa.LSA_NB_DISABLED_MASK
+ update_forest_info.entries[idx].flags |= lsa.LSA_NB_DISABLED_ADMIN
+
+ for sid in enable_sid:
+ idx = None
+ for i, r in enumerate(update_forest_info.entries):
+ if r.type != lsa.LSA_FOREST_TRUST_DOMAIN_INFO:
+ continue
+ if r.forest_trust_data.domain_sid != sid:
+ continue
+ idx = i
+ break
+ if idx is None:
+ raise CommandError("Entry not found for value[%s] specified for --enable-sid" % sid)
+ if not update_forest_info.entries[idx].flags & lsa.LSA_SID_DISABLED_MASK:
+ raise CommandError("Entry found for value[%s] specified for --enable-sid is already enabled" % nb)
+ update_forest_info.entries[idx].time = 0
+ update_forest_info.entries[idx].flags &= ~lsa.LSA_SID_DISABLED_MASK
+
+ for sid in disable_sid:
+ idx = None
+ for i, r in enumerate(update_forest_info.entries):
+ if r.type != lsa.LSA_FOREST_TRUST_DOMAIN_INFO:
+ continue
+ if r.forest_trust_data.domain_sid != sid:
+ continue
+ idx = i
+ break
+ if idx is None:
+ raise CommandError("Entry not found for value[%s] specified for --delete-sid" % sid)
+ if update_forest_info.entries[idx].flags & lsa.LSA_SID_DISABLED_ADMIN:
+ raise CommandError("Entry found for value[%s] specified for --disable-sid is already disabled" % nb)
+ update_forest_info.entries[idx].time = 0
+ update_forest_info.entries[idx].flags &= ~lsa.LSA_SID_DISABLED_MASK
+ update_forest_info.entries[idx].flags |= lsa.LSA_SID_DISABLED_ADMIN
+
+ try:
+ update_forest_collision = local_lsa.lsaRSetForestTrustInformation(local_policy,
+ local_tdo_info.domain_name,
+ lsa.LSA_FOREST_TRUST_DOMAIN_INFO,
+ update_forest_info, 0)
+ except RuntimeError as error:
+ raise self.LocalRuntimeError(self, error, "lsaRSetForestTrustInformation() failed")
+
+ self.outf.write("Updated forest trust information...\n")
+ self.write_forest_trust_info(update_forest_info,
+ tln=local_tdo_info.domain_name.string,
+ collisions=update_forest_collision)
+
+ try:
+ lsaString = lsa.String()
+ lsaString.string = local_tdo_info.domain_name.string
+ stored_forest_info = local_lsa.lsaRQueryForestTrustInformation(local_policy,
+ lsaString,
+ lsa.LSA_FOREST_TRUST_DOMAIN_INFO)
+ except RuntimeError as error:
+ raise self.LocalRuntimeError(self, error, "lsaRQueryForestTrustInformation() failed")
+
+ self.outf.write("Stored forest trust information...\n")
+ self.write_forest_trust_info(stored_forest_info,
+ tln=local_tdo_info.domain_name.string)
+ return
+
+
+class cmd_domain_trust(SuperCommand):
+ """Domain and forest trust management."""
+
+ subcommands = {}
+ subcommands["list"] = cmd_domain_trust_list()
+ subcommands["show"] = cmd_domain_trust_show()
+ subcommands["create"] = cmd_domain_trust_create()
+ subcommands["modify"] = cmd_domain_trust_modify()
+ subcommands["delete"] = cmd_domain_trust_delete()
+ subcommands["validate"] = cmd_domain_trust_validate()
+ subcommands["namespaces"] = cmd_domain_trust_namespaces()
diff --git a/python/samba/netcmd/drs.py b/python/samba/netcmd/drs.py
new file mode 100644
index 0000000..c5a9f48
--- /dev/null
+++ b/python/samba/netcmd/drs.py
@@ -0,0 +1,874 @@
+# implement samba_tool drs commands
+#
+# Copyright Andrew Tridgell 2010
+# Copyright Andrew Bartlett 2017
+#
+# based on C implementation by Kamen Mazdrashki <kamen.mazdrashki@postpath.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import samba.getopt as options
+import ldb
+import logging
+from . import common
+import json
+
+from samba.auth import system_session
+from samba.netcmd import (
+ Command,
+ CommandError,
+ Option,
+ SuperCommand,
+)
+from samba.netcmd.common import attr_default
+from samba.samdb import SamDB
+from samba import drs_utils, nttime2string, dsdb
+from samba.dcerpc import drsuapi, misc
+from samba.join import join_clone
+from samba import colour
+
+from samba.uptodateness import (
+ get_partition_maps,
+ get_utdv_edges,
+ get_utdv_distances,
+ get_utdv_summary,
+ get_kcc_and_dsas,
+)
+from samba.common import get_string
+from samba.samdb import get_default_backend_store
+
+def drsuapi_connect(ctx):
+ """make a DRSUAPI connection to the server"""
+ try:
+ (ctx.drsuapi, ctx.drsuapi_handle, ctx.bind_supported_extensions) = drs_utils.drsuapi_connect(ctx.server, ctx.lp, ctx.creds)
+ except Exception as e:
+ raise CommandError("DRS connection to %s failed" % ctx.server, e)
+
+
+def samdb_connect(ctx):
+ """make a ldap connection to the server"""
+ try:
+ ctx.samdb = SamDB(url="ldap://%s" % ctx.server,
+ session_info=system_session(),
+ credentials=ctx.creds, lp=ctx.lp)
+ except Exception as e:
+ raise CommandError("LDAP connection to %s failed" % ctx.server, e)
+
+
+def drs_errmsg(werr):
+ """return "was successful" or an error string"""
+ (ecode, estring) = werr
+ if ecode == 0:
+ return "was successful"
+ return "failed, result %u (%s)" % (ecode, estring)
+
+
+def drs_parse_ntds_dn(ntds_dn):
+ """parse a NTDS DN returning a site and server"""
+ a = ntds_dn.split(',')
+ if a[0] != "CN=NTDS Settings" or a[2] != "CN=Servers" or a[4] != 'CN=Sites':
+ raise RuntimeError("bad NTDS DN %s" % ntds_dn)
+ server = a[1].split('=')[1]
+ site = a[3].split('=')[1]
+ return (site, server)
+
+
+DEFAULT_SHOWREPL_FORMAT = 'classic'
+
+
+class cmd_drs_showrepl(Command):
+ """Show replication status."""
+
+ synopsis = "%prog [<DC>] [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("--json", help="replication details in JSON format",
+ dest='format', action='store_const', const='json'),
+ Option("--summary", help=("summarize overall DRS health as seen "
+ "from this server"),
+ dest='format', action='store_const', const='summary'),
+ Option("--pull-summary", help=("Have we successfully replicated "
+ "from all relevant servers?"),
+ dest='format', action='store_const', const='pull_summary'),
+ Option("--notify-summary", action='store_const',
+ const='notify_summary', dest='format',
+ help=("Have we successfully notified all relevant servers of "
+ "local changes, and did they say they successfully "
+ "replicated?")),
+ Option("--classic", help="print local replication details",
+ dest='format', action='store_const', const='classic',
+ default=DEFAULT_SHOWREPL_FORMAT),
+ Option("-v", "--verbose", help="Be verbose", action="store_true"),
+ ]
+
+ takes_args = ["DC?"]
+
+ def parse_neighbour(self, n):
+ """Convert an ldb neighbour object into a python dictionary"""
+ dsa_objectguid = str(n.source_dsa_obj_guid)
+ d = {
+ 'NC dn': n.naming_context_dn,
+ "DSA objectGUID": dsa_objectguid,
+ "last attempt time": nttime2string(n.last_attempt),
+ "last attempt message": drs_errmsg(n.result_last_attempt),
+ "consecutive failures": n.consecutive_sync_failures,
+ "last success": nttime2string(n.last_success),
+ "NTDS DN": str(n.source_dsa_obj_dn),
+ 'is deleted': False
+ }
+
+ try:
+ self.samdb.search(base="<GUID=%s>" % dsa_objectguid,
+ scope=ldb.SCOPE_BASE,
+ attrs=[])
+ except ldb.LdbError as e:
+ (errno, _) = e.args
+ if errno == ldb.ERR_NO_SUCH_OBJECT:
+ d['is deleted'] = True
+ else:
+ raise
+ try:
+ (site, server) = drs_parse_ntds_dn(n.source_dsa_obj_dn)
+ d["DSA"] = "%s\\%s" % (site, server)
+ except RuntimeError:
+ pass
+ return d
+
+ def print_neighbour(self, d):
+ """print one set of neighbour information"""
+ self.message("%s" % d['NC dn'])
+ if 'DSA' in d:
+ self.message("\t%s via RPC" % d['DSA'])
+ else:
+ self.message("\tNTDS DN: %s" % d['NTDS DN'])
+ self.message("\t\tDSA object GUID: %s" % d['DSA objectGUID'])
+ self.message("\t\tLast attempt @ %s %s" % (d['last attempt time'],
+ d['last attempt message']))
+ self.message("\t\t%u consecutive failure(s)." %
+ d['consecutive failures'])
+ self.message("\t\tLast success @ %s" % d['last success'])
+ self.message("")
+
+ def get_neighbours(self, info_type):
+ req1 = drsuapi.DsReplicaGetInfoRequest1()
+ req1.info_type = info_type
+ try:
+ (info_type, info) = self.drsuapi.DsReplicaGetInfo(
+ self.drsuapi_handle, 1, req1)
+ except Exception as e:
+ raise CommandError("DsReplicaGetInfo of type %u failed" % info_type, e)
+
+ reps = [self.parse_neighbour(n) for n in info.array]
+ return reps
+
+ def run(self, DC=None, sambaopts=None,
+ credopts=None, versionopts=None,
+ format=DEFAULT_SHOWREPL_FORMAT,
+ verbose=False):
+ self.lp = sambaopts.get_loadparm()
+ if DC is None:
+ DC = common.netcmd_dnsname(self.lp)
+ self.server = DC
+ self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
+ self.verbose = verbose
+
+ output_function = {
+ 'summary': self.summary_output,
+ 'notify_summary': self.notify_summary_output,
+ 'pull_summary': self.pull_summary_output,
+ 'json': self.json_output,
+ 'classic': self.classic_output,
+ }.get(format)
+ if output_function is None:
+ raise CommandError("unknown showrepl format %s" % format)
+
+ return output_function()
+
+ def json_output(self):
+ data = self.get_local_repl_data()
+ del data['site']
+ del data['server']
+ json.dump(data, self.outf, indent=2)
+
+ def summary_output_handler(self, typeof_output):
+ """Print a short message if every seems fine, but print details of any
+ links that seem broken."""
+ failing_repsto = []
+ failing_repsfrom = []
+
+ local_data = self.get_local_repl_data()
+
+ if typeof_output != "pull_summary":
+ for rep in local_data['repsTo']:
+ if rep['is deleted']:
+ continue
+ if rep["consecutive failures"] != 0 or rep["last success"] == 0:
+ failing_repsto.append(rep)
+
+ if typeof_output != "notify_summary":
+ for rep in local_data['repsFrom']:
+ if rep['is deleted']:
+ continue
+ if rep["consecutive failures"] != 0 or rep["last success"] == 0:
+ failing_repsfrom.append(rep)
+
+ if failing_repsto or failing_repsfrom:
+ self.message(colour.c_RED("There are failing connections"))
+ if failing_repsto:
+ self.message(colour.c_RED("Failing outbound connections:"))
+ for rep in failing_repsto:
+ self.print_neighbour(rep)
+ if failing_repsfrom:
+ self.message(colour.c_RED("Failing inbound connection:"))
+ for rep in failing_repsfrom:
+ self.print_neighbour(rep)
+
+ return 1
+
+ self.message(colour.c_GREEN("[ALL GOOD]"))
+
+ def summary_output(self):
+ return self.summary_output_handler("summary")
+
+ def notify_summary_output(self):
+ return self.summary_output_handler("notify_summary")
+
+ def pull_summary_output(self):
+ return self.summary_output_handler("pull_summary")
+
+ def get_local_repl_data(self):
+ drsuapi_connect(self)
+ samdb_connect(self)
+
+ # show domain information
+ ntds_dn = self.samdb.get_dsServiceName()
+
+ (site, server) = drs_parse_ntds_dn(ntds_dn)
+ try:
+ ntds = self.samdb.search(base=ntds_dn, scope=ldb.SCOPE_BASE, attrs=['options', 'objectGUID', 'invocationId'])
+ except Exception as e:
+ raise CommandError("Failed to search NTDS DN %s" % ntds_dn)
+
+ dsa_details = {
+ "options": int(attr_default(ntds[0], "options", 0)),
+ "objectGUID": get_string(self.samdb.schema_format_value(
+ "objectGUID", ntds[0]["objectGUID"][0])),
+ "invocationId": get_string(self.samdb.schema_format_value(
+ "objectGUID", ntds[0]["invocationId"][0]))
+ }
+
+ conn = self.samdb.search(base=ntds_dn, expression="(objectClass=nTDSConnection)")
+ repsfrom = self.get_neighbours(drsuapi.DRSUAPI_DS_REPLICA_INFO_NEIGHBORS)
+ repsto = self.get_neighbours(drsuapi.DRSUAPI_DS_REPLICA_INFO_REPSTO)
+
+ conn_details = []
+ for c in conn:
+ c_rdn, sep, c_server_dn = str(c['fromServer'][0]).partition(',')
+ d = {
+ 'name': str(c['name']),
+ 'remote DN': str(c['fromServer'][0]),
+ 'options': int(attr_default(c, 'options', 0)),
+ 'enabled': (get_string(attr_default(c, 'enabledConnection',
+ 'TRUE')).upper() == 'TRUE')
+ }
+
+ conn_details.append(d)
+ try:
+ c_server_res = self.samdb.search(base=c_server_dn,
+ scope=ldb.SCOPE_BASE,
+ attrs=["dnsHostName"])
+ d['dns name'] = str(c_server_res[0]["dnsHostName"][0])
+ except ldb.LdbError as e:
+ (errno, _) = e.args
+ if errno == ldb.ERR_NO_SUCH_OBJECT:
+ d['is deleted'] = True
+ except (KeyError, IndexError):
+ pass
+
+ d['replicates NC'] = []
+ for r in c.get('mS-DS-ReplicatesNCReason', []):
+ a = str(r).split(':')
+ d['replicates NC'].append((a[3], int(a[2])))
+
+ return {
+ 'dsa': dsa_details,
+ 'repsFrom': repsfrom,
+ 'repsTo': repsto,
+ 'NTDSConnections': conn_details,
+ 'site': site,
+ 'server': server
+ }
+
+ def classic_output(self):
+ data = self.get_local_repl_data()
+ dsa_details = data['dsa']
+ repsfrom = data['repsFrom']
+ repsto = data['repsTo']
+ conn_details = data['NTDSConnections']
+ site = data['site']
+ server = data['server']
+
+ self.message("%s\\%s" % (site, server))
+ self.message("DSA Options: 0x%08x" % dsa_details["options"])
+ self.message("DSA object GUID: %s" % dsa_details["objectGUID"])
+ self.message("DSA invocationId: %s\n" % dsa_details["invocationId"])
+
+ self.message("==== INBOUND NEIGHBORS ====\n")
+ for n in repsfrom:
+ self.print_neighbour(n)
+
+ self.message("==== OUTBOUND NEIGHBORS ====\n")
+ for n in repsto:
+ self.print_neighbour(n)
+
+ reasons = ['NTDSCONN_KCC_GC_TOPOLOGY',
+ 'NTDSCONN_KCC_RING_TOPOLOGY',
+ 'NTDSCONN_KCC_MINIMIZE_HOPS_TOPOLOGY',
+ 'NTDSCONN_KCC_STALE_SERVERS_TOPOLOGY',
+ 'NTDSCONN_KCC_OSCILLATING_CONNECTION_TOPOLOGY',
+ 'NTDSCONN_KCC_INTERSITE_GC_TOPOLOGY',
+ 'NTDSCONN_KCC_INTERSITE_TOPOLOGY',
+ 'NTDSCONN_KCC_SERVER_FAILOVER_TOPOLOGY',
+ 'NTDSCONN_KCC_SITE_FAILOVER_TOPOLOGY',
+ 'NTDSCONN_KCC_REDUNDANT_SERVER_TOPOLOGY']
+
+ self.message("==== KCC CONNECTION OBJECTS ====\n")
+ for d in conn_details:
+ self.message("Connection --")
+ if d.get('is deleted'):
+ self.message("\tWARNING: Connection to DELETED server!")
+
+ self.message("\tConnection name: %s" % d['name'])
+ self.message("\tEnabled : %s" % str(d['enabled']).upper())
+ self.message("\tServer DNS name : %s" % d.get('dns name'))
+ self.message("\tServer DN name : %s" % d['remote DN'])
+ self.message("\t\tTransportType: RPC")
+ self.message("\t\toptions: 0x%08X" % d['options'])
+
+ if d['replicates NC']:
+ for nc, reason in d['replicates NC']:
+ self.message("\t\tReplicatesNC: %s" % nc)
+ self.message("\t\tReason: 0x%08x" % reason)
+ for s in reasons:
+ if getattr(dsdb, s, 0) & reason:
+ self.message("\t\t\t%s" % s)
+ else:
+ self.message("Warning: No NC replicated for Connection!")
+
+
+class cmd_drs_kcc(Command):
+ """Trigger knowledge consistency center run."""
+
+ synopsis = "%prog [<DC>] [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_args = ["DC?"]
+
+ def run(self, DC=None, sambaopts=None,
+ credopts=None, versionopts=None):
+
+ self.lp = sambaopts.get_loadparm()
+ if DC is None:
+ DC = common.netcmd_dnsname(self.lp)
+ self.server = DC
+
+ self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
+
+ drsuapi_connect(self)
+
+ req1 = drsuapi.DsExecuteKCC1()
+ try:
+ self.drsuapi.DsExecuteKCC(self.drsuapi_handle, 1, req1)
+ except Exception as e:
+ raise CommandError("DsExecuteKCC failed", e)
+ self.message("Consistency check on %s successful." % DC)
+
+
+class cmd_drs_replicate(Command):
+ """Replicate a naming context between two DCs."""
+
+ synopsis = "%prog <destinationDC> <sourceDC> <NC> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_args = ["DEST_DC", "SOURCE_DC", "NC"]
+
+ takes_options = [
+ Option("--add-ref", help="use ADD_REF to add to repsTo on source", action="store_true"),
+ Option("--sync-forced", help="use SYNC_FORCED to force inbound replication", action="store_true"),
+ Option("--sync-all", help="use SYNC_ALL to replicate from all DCs", action="store_true"),
+ Option("--full-sync", help="resync all objects", action="store_true"),
+ Option("--local", help="pull changes directly into the local database (destination DC is ignored)", action="store_true"),
+ Option("--local-online", help="pull changes into the local database (destination DC is ignored) as a normal online replication", action="store_true"),
+ Option("--async-op", help="use ASYNC_OP for the replication", action="store_true"),
+ Option("--single-object", help="Replicate only the object specified, instead of the whole Naming Context (only with --local)", action="store_true"),
+ ]
+
+ def drs_local_replicate(self, SOURCE_DC, NC, full_sync=False,
+ single_object=False,
+ sync_forced=False):
+ """replicate from a source DC to the local SAM"""
+
+ self.server = SOURCE_DC
+ drsuapi_connect(self)
+
+ # Override the default flag LDB_FLG_DONT_CREATE_DB
+ self.local_samdb = SamDB(session_info=system_session(), url=None,
+ credentials=self.creds, lp=self.lp,
+ flags=0)
+
+ self.samdb = SamDB(url="ldap://%s" % self.server,
+ session_info=system_session(),
+ credentials=self.creds, lp=self.lp)
+
+ # work out the source and destination GUIDs
+ res = self.local_samdb.search(base="", scope=ldb.SCOPE_BASE,
+ attrs=["dsServiceName"])
+ self.ntds_dn = res[0]["dsServiceName"][0]
+
+ res = self.local_samdb.search(base=self.ntds_dn, scope=ldb.SCOPE_BASE,
+ attrs=["objectGUID"])
+ self.ntds_guid = misc.GUID(
+ self.samdb.schema_format_value("objectGUID",
+ res[0]["objectGUID"][0]))
+
+ source_dsa_invocation_id = misc.GUID(self.samdb.get_invocation_id())
+ dest_dsa_invocation_id = misc.GUID(self.local_samdb.get_invocation_id())
+ destination_dsa_guid = self.ntds_guid
+
+ exop = drsuapi.DRSUAPI_EXOP_NONE
+
+ if single_object:
+ exop = drsuapi.DRSUAPI_EXOP_REPL_OBJ
+ full_sync = True
+
+ self.samdb.transaction_start()
+ repl = drs_utils.drs_Replicate("ncacn_ip_tcp:%s[seal]" % self.server,
+ self.lp,
+ self.creds, self.local_samdb,
+ dest_dsa_invocation_id)
+
+ # Work out if we are an RODC, so that a forced local replicate
+ # with the admin pw does not sync passwords
+ rodc = self.local_samdb.am_rodc()
+ try:
+ (num_objects, num_links) = repl.replicate(NC,
+ source_dsa_invocation_id,
+ destination_dsa_guid,
+ rodc=rodc,
+ full_sync=full_sync,
+ exop=exop,
+ sync_forced=sync_forced)
+ except Exception as e:
+ raise CommandError("Error replicating DN %s" % NC, e)
+ self.samdb.transaction_commit()
+
+ if full_sync:
+ self.message("Full Replication of all %d objects and %d links "
+ "from %s to %s was successful." %
+ (num_objects, num_links, SOURCE_DC,
+ self.local_samdb.url))
+ else:
+ self.message("Incremental replication of %d objects and %d links "
+ "from %s to %s was successful." %
+ (num_objects, num_links, SOURCE_DC,
+ self.local_samdb.url))
+
+ def run(self, DEST_DC, SOURCE_DC, NC,
+ add_ref=False, sync_forced=False, sync_all=False, full_sync=False,
+ local=False, local_online=False, async_op=False, single_object=False,
+ sambaopts=None, credopts=None, versionopts=None):
+
+ self.server = DEST_DC
+ self.lp = sambaopts.get_loadparm()
+
+ self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
+
+ if local:
+ self.drs_local_replicate(SOURCE_DC, NC, full_sync=full_sync,
+ single_object=single_object,
+ sync_forced=sync_forced)
+ return
+
+ if local_online:
+ server_bind = drsuapi.drsuapi("irpc:dreplsrv", lp_ctx=self.lp)
+ server_bind_handle = misc.policy_handle()
+ else:
+ drsuapi_connect(self)
+ server_bind = self.drsuapi
+ server_bind_handle = self.drsuapi_handle
+
+ if not async_op:
+ # Give the sync replication 5 minutes time
+ server_bind.request_timeout = 5 * 60
+
+ samdb_connect(self)
+
+ # we need to find the NTDS GUID of the source DC
+ msg = self.samdb.search(base=self.samdb.get_config_basedn(),
+ expression="(&(objectCategory=server)(|(name=%s)(dNSHostName=%s)))" % (
+ ldb.binary_encode(SOURCE_DC),
+ ldb.binary_encode(SOURCE_DC)),
+ attrs=[])
+ if len(msg) == 0:
+ raise CommandError("Failed to find source DC %s" % SOURCE_DC)
+ server_dn = msg[0]['dn']
+
+ msg = self.samdb.search(base=server_dn, scope=ldb.SCOPE_ONELEVEL,
+ expression="(|(objectCategory=nTDSDSA)(objectCategory=nTDSDSARO))",
+ attrs=['objectGUID', 'options'])
+ if len(msg) == 0:
+ raise CommandError("Failed to find source NTDS DN %s" % SOURCE_DC)
+ source_dsa_guid = msg[0]['objectGUID'][0]
+ dsa_options = int(attr_default(msg, 'options', 0))
+
+ req_options = 0
+ if not (dsa_options & dsdb.DS_NTDSDSA_OPT_DISABLE_OUTBOUND_REPL):
+ req_options |= drsuapi.DRSUAPI_DRS_WRIT_REP
+ if add_ref:
+ req_options |= drsuapi.DRSUAPI_DRS_ADD_REF
+ if sync_forced:
+ req_options |= drsuapi.DRSUAPI_DRS_SYNC_FORCED
+ if sync_all:
+ req_options |= drsuapi.DRSUAPI_DRS_SYNC_ALL
+ if full_sync:
+ req_options |= drsuapi.DRSUAPI_DRS_FULL_SYNC_NOW
+ if async_op:
+ req_options |= drsuapi.DRSUAPI_DRS_ASYNC_OP
+
+ try:
+ drs_utils.sendDsReplicaSync(server_bind, server_bind_handle, source_dsa_guid, NC, req_options)
+ except drs_utils.drsException as estr:
+ raise CommandError("DsReplicaSync failed", estr)
+ if async_op:
+ self.message("Replicate from %s to %s was started." % (SOURCE_DC, DEST_DC))
+ else:
+ self.message("Replicate from %s to %s was successful." % (SOURCE_DC, DEST_DC))
+
+
+class cmd_drs_bind(Command):
+ """Show DRS capabilities of a server."""
+
+ synopsis = "%prog [<DC>] [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_args = ["DC?"]
+
+ def run(self, DC=None, sambaopts=None,
+ credopts=None, versionopts=None):
+
+ self.lp = sambaopts.get_loadparm()
+ if DC is None:
+ DC = common.netcmd_dnsname(self.lp)
+ self.server = DC
+ self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
+
+ drsuapi_connect(self)
+
+ bind_info = drsuapi.DsBindInfoCtr()
+ bind_info.length = 28
+ bind_info.info = drsuapi.DsBindInfo28()
+ (info, handle) = self.drsuapi.DsBind(misc.GUID(drsuapi.DRSUAPI_DS_BIND_GUID), bind_info)
+
+ optmap = [
+ ("DRSUAPI_SUPPORTED_EXTENSION_BASE", "DRS_EXT_BASE"),
+ ("DRSUAPI_SUPPORTED_EXTENSION_ASYNC_REPLICATION", "DRS_EXT_ASYNCREPL"),
+ ("DRSUAPI_SUPPORTED_EXTENSION_REMOVEAPI", "DRS_EXT_REMOVEAPI"),
+ ("DRSUAPI_SUPPORTED_EXTENSION_MOVEREQ_V2", "DRS_EXT_MOVEREQ_V2"),
+ ("DRSUAPI_SUPPORTED_EXTENSION_GETCHG_COMPRESS", "DRS_EXT_GETCHG_DEFLATE"),
+ ("DRSUAPI_SUPPORTED_EXTENSION_DCINFO_V1", "DRS_EXT_DCINFO_V1"),
+ ("DRSUAPI_SUPPORTED_EXTENSION_RESTORE_USN_OPTIMIZATION", "DRS_EXT_RESTORE_USN_OPTIMIZATION"),
+ ("DRSUAPI_SUPPORTED_EXTENSION_ADDENTRY", "DRS_EXT_ADDENTRY"),
+ ("DRSUAPI_SUPPORTED_EXTENSION_KCC_EXECUTE", "DRS_EXT_KCC_EXECUTE"),
+ ("DRSUAPI_SUPPORTED_EXTENSION_ADDENTRY_V2", "DRS_EXT_ADDENTRY_V2"),
+ ("DRSUAPI_SUPPORTED_EXTENSION_LINKED_VALUE_REPLICATION", "DRS_EXT_LINKED_VALUE_REPLICATION"),
+ ("DRSUAPI_SUPPORTED_EXTENSION_DCINFO_V2", "DRS_EXT_DCINFO_V2"),
+ ("DRSUAPI_SUPPORTED_EXTENSION_INSTANCE_TYPE_NOT_REQ_ON_MOD", "DRS_EXT_INSTANCE_TYPE_NOT_REQ_ON_MOD"),
+ ("DRSUAPI_SUPPORTED_EXTENSION_CRYPTO_BIND", "DRS_EXT_CRYPTO_BIND"),
+ ("DRSUAPI_SUPPORTED_EXTENSION_GET_REPL_INFO", "DRS_EXT_GET_REPL_INFO"),
+ ("DRSUAPI_SUPPORTED_EXTENSION_STRONG_ENCRYPTION", "DRS_EXT_STRONG_ENCRYPTION"),
+ ("DRSUAPI_SUPPORTED_EXTENSION_DCINFO_V01", "DRS_EXT_DCINFO_VFFFFFFFF"),
+ ("DRSUAPI_SUPPORTED_EXTENSION_TRANSITIVE_MEMBERSHIP", "DRS_EXT_TRANSITIVE_MEMBERSHIP"),
+ ("DRSUAPI_SUPPORTED_EXTENSION_ADD_SID_HISTORY", "DRS_EXT_ADD_SID_HISTORY"),
+ ("DRSUAPI_SUPPORTED_EXTENSION_POST_BETA3", "DRS_EXT_POST_BETA3"),
+ ("DRSUAPI_SUPPORTED_EXTENSION_GETCHGREQ_V5", "DRS_EXT_GETCHGREQ_V5"),
+ ("DRSUAPI_SUPPORTED_EXTENSION_GET_MEMBERSHIPS2", "DRS_EXT_GETMEMBERSHIPS2"),
+ ("DRSUAPI_SUPPORTED_EXTENSION_GETCHGREQ_V6", "DRS_EXT_GETCHGREQ_V6"),
+ ("DRSUAPI_SUPPORTED_EXTENSION_NONDOMAIN_NCS", "DRS_EXT_NONDOMAIN_NCS"),
+ ("DRSUAPI_SUPPORTED_EXTENSION_GETCHGREQ_V8", "DRS_EXT_GETCHGREQ_V8"),
+ ("DRSUAPI_SUPPORTED_EXTENSION_GETCHGREPLY_V5", "DRS_EXT_GETCHGREPLY_V5"),
+ ("DRSUAPI_SUPPORTED_EXTENSION_GETCHGREPLY_V6", "DRS_EXT_GETCHGREPLY_V6"),
+ ("DRSUAPI_SUPPORTED_EXTENSION_ADDENTRYREPLY_V3", "DRS_EXT_WHISTLER_BETA3"),
+ ("DRSUAPI_SUPPORTED_EXTENSION_GETCHGREPLY_V7", "DRS_EXT_WHISTLER_BETA3"),
+ ("DRSUAPI_SUPPORTED_EXTENSION_VERIFY_OBJECT", "DRS_EXT_WHISTLER_BETA3"),
+ ("DRSUAPI_SUPPORTED_EXTENSION_XPRESS_COMPRESS", "DRS_EXT_W2K3_DEFLATE"),
+ ("DRSUAPI_SUPPORTED_EXTENSION_GETCHGREQ_V10", "DRS_EXT_GETCHGREQ_V10"),
+ ("DRSUAPI_SUPPORTED_EXTENSION_RESERVED_PART2", "DRS_EXT_RESERVED_FOR_WIN2K_OR_DOTNET_PART2"),
+ ("DRSUAPI_SUPPORTED_EXTENSION_RESERVED_PART3", "DRS_EXT_RESERVED_FOR_WIN2K_OR_DOTNET_PART3")
+ ]
+
+ optmap_ext = [
+ ("DRSUAPI_SUPPORTED_EXTENSION_ADAM", "DRS_EXT_ADAM"),
+ ("DRSUAPI_SUPPORTED_EXTENSION_LH_BETA2", "DRS_EXT_LH_BETA2"),
+ ("DRSUAPI_SUPPORTED_EXTENSION_RECYCLE_BIN", "DRS_EXT_RECYCLE_BIN")]
+
+ self.message("Bind to %s succeeded." % DC)
+ self.message("Extensions supported:")
+ for (opt, str) in optmap:
+ optval = getattr(drsuapi, opt, 0)
+ if info.info.supported_extensions & optval:
+ yesno = "Yes"
+ else:
+ yesno = "No "
+ self.message(" %-60s: %s (%s)" % (opt, yesno, str))
+
+ if isinstance(info.info, drsuapi.DsBindInfo48):
+ self.message("\nExtended Extensions supported:")
+ for (opt, str) in optmap_ext:
+ optval = getattr(drsuapi, opt, 0)
+ if info.info.supported_extensions_ext & optval:
+ yesno = "Yes"
+ else:
+ yesno = "No "
+ self.message(" %-60s: %s (%s)" % (opt, yesno, str))
+
+ self.message("\nSite GUID: %s" % info.info.site_guid)
+ self.message("Repl epoch: %u" % info.info.repl_epoch)
+ if isinstance(info.info, drsuapi.DsBindInfo48):
+ self.message("Forest GUID: %s" % info.info.config_dn_guid)
+
+
+class cmd_drs_options(Command):
+ """Query or change 'options' for NTDS Settings object of a Domain Controller."""
+
+ synopsis = "%prog [<DC>] [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_args = ["DC?"]
+
+ takes_options = [
+ Option("--dsa-option", help="DSA option to enable/disable", type="str",
+ metavar="{+|-}IS_GC | {+|-}DISABLE_INBOUND_REPL | {+|-}DISABLE_OUTBOUND_REPL | {+|-}DISABLE_NTDSCONN_XLATE"),
+ ]
+
+ option_map = {"IS_GC": 0x00000001,
+ "DISABLE_INBOUND_REPL": 0x00000002,
+ "DISABLE_OUTBOUND_REPL": 0x00000004,
+ "DISABLE_NTDSCONN_XLATE": 0x00000008}
+
+ def run(self, DC=None, dsa_option=None,
+ sambaopts=None, credopts=None, versionopts=None):
+
+ self.lp = sambaopts.get_loadparm()
+ if DC is None:
+ DC = common.netcmd_dnsname(self.lp)
+ self.server = DC
+ self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
+
+ samdb_connect(self)
+
+ ntds_dn = self.samdb.get_dsServiceName()
+ res = self.samdb.search(base=ntds_dn, scope=ldb.SCOPE_BASE, attrs=["options"])
+ dsa_opts = int(res[0]["options"][0])
+
+ # print out current DSA options
+ cur_opts = [x for x in self.option_map if self.option_map[x] & dsa_opts]
+ self.message("Current DSA options: " + ", ".join(cur_opts))
+
+ # modify options
+ if dsa_option:
+ if dsa_option[:1] not in ("+", "-"):
+ raise CommandError("Unknown option %s" % dsa_option)
+ flag = dsa_option[1:]
+ if flag not in self.option_map.keys():
+ raise CommandError("Unknown option %s" % dsa_option)
+ if dsa_option[:1] == "+":
+ dsa_opts |= self.option_map[flag]
+ else:
+ dsa_opts &= ~self.option_map[flag]
+ # save new options
+ m = ldb.Message()
+ m.dn = ldb.Dn(self.samdb, ntds_dn)
+ m["options"] = ldb.MessageElement(str(dsa_opts), ldb.FLAG_MOD_REPLACE, "options")
+ self.samdb.modify(m)
+ # print out new DSA options
+ cur_opts = [x for x in self.option_map if self.option_map[x] & dsa_opts]
+ self.message("New DSA options: " + ", ".join(cur_opts))
+
+
+class cmd_drs_clone_dc_database(Command):
+ """Replicate an initial clone of domain, but DO NOT JOIN it."""
+
+ synopsis = "%prog <dnsdomain> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("--server", help="DC to join", type=str),
+ Option("--targetdir", help="where to store provision (required)", type=str),
+ Option("-q", "--quiet", help="Be quiet", action="store_true"),
+ Option("--include-secrets", help="Also replicate secret values", action="store_true"),
+ Option("--backend-store", type="choice", metavar="BACKENDSTORE",
+ choices=["tdb", "mdb"],
+ help="Specify the database backend to be used "
+ "(default is %s)" % get_default_backend_store()),
+ Option("--backend-store-size", type="bytes", metavar="SIZE",
+ help="Specify the size of the backend database, currently" +
+ "only supported by lmdb backends (default is 8 Gb).")
+ ]
+
+ takes_args = ["domain"]
+
+ def run(self, domain, sambaopts=None, credopts=None,
+ versionopts=None, server=None, targetdir=None,
+ quiet=False, verbose=False, include_secrets=False,
+ backend_store=None, backend_store_size=None):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+
+ logger = self.get_logger(verbose=verbose, quiet=quiet)
+
+ if targetdir is None:
+ raise CommandError("--targetdir option must be specified")
+
+ join_clone(logger=logger, server=server, creds=creds, lp=lp,
+ domain=domain, dns_backend='SAMBA_INTERNAL',
+ targetdir=targetdir, include_secrets=include_secrets,
+ backend_store=backend_store,
+ backend_store_size=backend_store_size)
+
+
+class cmd_drs_uptodateness(Command):
+ """Show uptodateness status"""
+
+ synopsis = "%prog [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", metavar="URL", dest="H",
+ help="LDB URL for database or target server"),
+ Option("-p", "--partition",
+ help="restrict to this partition"),
+ Option("--json", action='store_true',
+ help="Print data in json format"),
+ Option("--maximum", action='store_true',
+ help="Print maximum out-of-date-ness only"),
+ Option("--median", action='store_true',
+ help="Print median out-of-date-ness only"),
+ Option("--full", action='store_true',
+ help="Print full out-of-date-ness data"),
+ ]
+
+ def format_as_json(self, partitions_summaries):
+ return json.dumps(partitions_summaries, indent=2)
+
+ def format_as_text(self, partitions_summaries):
+ lines = []
+ for part_name, summary in partitions_summaries.items():
+ items = ['%s: %s' % (k, v) for k, v in summary.items()]
+ line = '%-15s %s' % (part_name, ' '.join(items))
+ lines.append(line)
+ return '\n'.join(lines)
+
+ def run(self, H=None, partition=None,
+ json=False, maximum=False, median=False, full=False,
+ sambaopts=None, credopts=None, versionopts=None,
+ quiet=False, verbose=False):
+
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp, fallback_machine=True)
+ local_kcc, dsas = get_kcc_and_dsas(H, lp, creds)
+ samdb = local_kcc.samdb
+ short_partitions, _ = get_partition_maps(samdb)
+ if partition:
+ if partition in short_partitions:
+ part_dn = short_partitions[partition]
+ # narrow down to specified partition only
+ short_partitions = {partition: part_dn}
+ else:
+ raise CommandError("unknown partition %s" % partition)
+
+ filters = []
+ if maximum:
+ filters.append('maximum')
+ if median:
+ filters.append('median')
+
+ partitions_distances = {}
+ partitions_summaries = {}
+ for part_name, part_dn in short_partitions.items():
+ utdv_edges = get_utdv_edges(local_kcc, dsas, part_dn, lp, creds)
+ distances = get_utdv_distances(utdv_edges, dsas)
+ summary = get_utdv_summary(distances, filters=filters)
+ partitions_distances[part_name] = distances
+ partitions_summaries[part_name] = summary
+
+ if full:
+ # always print json format
+ output = self.format_as_json(partitions_distances)
+ else:
+ if json:
+ output = self.format_as_json(partitions_summaries)
+ else:
+ output = self.format_as_text(partitions_summaries)
+
+ print(output, file=self.outf)
+
+
+class cmd_drs(SuperCommand):
+ """Directory Replication Services (DRS) management."""
+
+ subcommands = {}
+ subcommands["bind"] = cmd_drs_bind()
+ subcommands["kcc"] = cmd_drs_kcc()
+ subcommands["replicate"] = cmd_drs_replicate()
+ subcommands["showrepl"] = cmd_drs_showrepl()
+ subcommands["options"] = cmd_drs_options()
+ subcommands["clone-dc-database"] = cmd_drs_clone_dc_database()
+ subcommands["uptodateness"] = cmd_drs_uptodateness()
diff --git a/python/samba/netcmd/dsacl.py b/python/samba/netcmd/dsacl.py
new file mode 100644
index 0000000..527c534
--- /dev/null
+++ b/python/samba/netcmd/dsacl.py
@@ -0,0 +1,217 @@
+# Manipulate ACLs on directory objects
+#
+# Copyright (C) Nadezhda Ivanova <nivanova@samba.org> 2010
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import samba.getopt as options
+from samba import sd_utils
+from samba.dcerpc import security
+from samba.samdb import SamDB
+from samba.ndr import ndr_unpack, ndr_pack
+from samba.dcerpc.security import (
+ GUID_DRS_ALLOCATE_RIDS, GUID_DRS_CHANGE_DOMAIN_MASTER,
+ GUID_DRS_CHANGE_INFR_MASTER, GUID_DRS_CHANGE_PDC,
+ GUID_DRS_CHANGE_RID_MASTER, GUID_DRS_CHANGE_SCHEMA_MASTER,
+ GUID_DRS_GET_CHANGES, GUID_DRS_GET_ALL_CHANGES,
+ GUID_DRS_GET_FILTERED_ATTRIBUTES, GUID_DRS_MANAGE_TOPOLOGY,
+ GUID_DRS_MONITOR_TOPOLOGY, GUID_DRS_REPL_SYNCRONIZE,
+ GUID_DRS_RO_REPL_SECRET_SYNC)
+
+
+import ldb
+from ldb import SCOPE_BASE
+import re
+
+from samba.auth import system_session
+from samba.netcmd import (
+ Command,
+ CommandError,
+ SuperCommand,
+ Option,
+)
+
+class cmd_dsacl_base(Command):
+ """Base class for DSACL commands."""
+
+ synopsis = "%prog [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ def print_acl(self, sd_helper, object_dn, prefix=''):
+ desc_sddl = sd_helper.get_sd_as_sddl(object_dn)
+ self.outf.write("%sdescriptor for %s:\n" % (prefix, object_dn))
+ self.outf.write(desc_sddl + "\n")
+
+
+class cmd_dsacl_set(cmd_dsacl_base):
+ """Modify access list on a directory object."""
+
+ car_help = """ The access control right to allow or deny """
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server",
+ type=str, metavar="URL", dest="H"),
+ Option("--car", type="choice", choices=["change-rid",
+ "change-pdc",
+ "change-infrastructure",
+ "change-schema",
+ "change-naming",
+ "allocate_rids",
+ "get-changes",
+ "get-changes-all",
+ "get-changes-filtered",
+ "topology-manage",
+ "topology-monitor",
+ "repl-sync",
+ "ro-repl-secret-sync"],
+ help=car_help),
+ Option("--action", type="choice", choices=["allow", "deny"],
+ help="""Deny or allow access"""),
+ Option("--objectdn", help="DN of the object whose SD to modify",
+ type="string"),
+ Option("--trusteedn", help="DN of the entity that gets access",
+ type="string"),
+ Option("--sddl", help="An ACE or group of ACEs to be added on the object",
+ type="string"),
+ ]
+
+ def find_trustee_sid(self, samdb, trusteedn):
+ res = samdb.search(base=trusteedn, expression="(objectClass=*)",
+ scope=SCOPE_BASE)
+ assert(len(res) == 1)
+ return ndr_unpack(security.dom_sid, res[0]["objectSid"][0])
+
+ def add_ace(self, sd_helper, object_dn, new_ace):
+ """Add new ace explicitly."""
+ ai,ii = sd_helper.dacl_prepend_aces(object_dn, new_ace)
+ for ace in ii:
+ sddl = ace.as_sddl(sd_helper.domain_sid)
+ self.outf.write("WARNING: ignored INHERITED_ACE (%s).\n" % sddl)
+ for ace in ai:
+ sddl = ace.as_sddl(sd_helper.domain_sid)
+ self.outf.write("WARNING: (%s) was already found in the current security descriptor.\n" % sddl)
+
+ def run(self, car, action, objectdn, trusteedn, sddl,
+ H=None, credopts=None, sambaopts=None, versionopts=None):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+
+ if sddl is None and (car is None or action is None
+ or objectdn is None or trusteedn is None):
+ return self.usage()
+
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+ sd_helper = sd_utils.SDUtils(samdb)
+ cars = {'change-rid': GUID_DRS_CHANGE_RID_MASTER,
+ 'change-pdc': GUID_DRS_CHANGE_PDC,
+ 'change-infrastructure': GUID_DRS_CHANGE_INFR_MASTER,
+ 'change-schema': GUID_DRS_CHANGE_SCHEMA_MASTER,
+ 'change-naming': GUID_DRS_CHANGE_DOMAIN_MASTER,
+ 'allocate_rids': GUID_DRS_ALLOCATE_RIDS,
+ 'get-changes': GUID_DRS_GET_CHANGES,
+ 'get-changes-all': GUID_DRS_GET_ALL_CHANGES,
+ 'get-changes-filtered': GUID_DRS_GET_FILTERED_ATTRIBUTES,
+ 'topology-manage': GUID_DRS_MANAGE_TOPOLOGY,
+ 'topology-monitor': GUID_DRS_MONITOR_TOPOLOGY,
+ 'repl-sync': GUID_DRS_REPL_SYNCRONIZE,
+ 'ro-repl-secret-sync': GUID_DRS_RO_REPL_SECRET_SYNC,
+ }
+ sid = self.find_trustee_sid(samdb, trusteedn)
+ if sddl:
+ new_ace = sddl
+ elif action == "allow":
+ new_ace = "(OA;;CR;%s;;%s)" % (cars[car], str(sid))
+ elif action == "deny":
+ new_ace = "(OD;;CR;%s;;%s)" % (cars[car], str(sid))
+ else:
+ raise CommandError("Wrong argument '%s'!" % action)
+
+ self.print_acl(sd_helper, objectdn, prefix='old ')
+ self.add_ace(sd_helper, objectdn, new_ace)
+ self.print_acl(sd_helper, objectdn, prefix='new ')
+
+
+class cmd_dsacl_get(cmd_dsacl_base):
+ """Print access list on a directory object."""
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server",
+ type=str, metavar="URL", dest="H"),
+ Option("--objectdn", help="DN of the object whose SD to modify",
+ type="string"),
+ ]
+
+ def run(self, objectdn,
+ H=None, credopts=None, sambaopts=None, versionopts=None):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+ sd_helper = sd_utils.SDUtils(samdb)
+ self.print_acl(sd_helper, objectdn)
+
+
+class cmd_dsacl_delete(cmd_dsacl_base):
+ """Delete an access list entry on a directory object."""
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server",
+ type=str, metavar="URL", dest="H"),
+ Option("--objectdn", help="DN of the object whose SD to modify",
+ type="string"),
+ Option("--sddl", help="An ACE or group of ACEs to be deleted from the object",
+ type="string"),
+ ]
+
+ def run(self, objectdn, sddl, H=None, credopts=None, sambaopts=None, versionopts=None):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+
+ if sddl is None or objectdn is None:
+ return self.usage()
+
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+ sd_helper = sd_utils.SDUtils(samdb)
+
+ self.print_acl(sd_helper, objectdn, prefix='old ')
+ self.delete_ace(sd_helper, objectdn, sddl)
+ self.print_acl(sd_helper, objectdn, prefix='new ')
+
+ def delete_ace(self, sd_helper, object_dn, delete_aces):
+ """Delete ace explicitly."""
+ di,ii = sd_helper.dacl_delete_aces(object_dn, delete_aces)
+ for ace in ii:
+ sddl = ace.as_sddl(sd_helper.domain_sid)
+ self.outf.write("WARNING: ignored INHERITED_ACE (%s).\n" % sddl)
+ for ace in di:
+ sddl = ace.as_sddl(sd_helper.domain_sid)
+ self.outf.write("WARNING: (%s) was not found in the current security descriptor.\n" % sddl)
+
+
+class cmd_dsacl(SuperCommand):
+ """DS ACLs manipulation."""
+
+ subcommands = {}
+ subcommands["set"] = cmd_dsacl_set()
+ subcommands["get"] = cmd_dsacl_get()
+ subcommands["delete"] = cmd_dsacl_delete()
diff --git a/python/samba/netcmd/encoders.py b/python/samba/netcmd/encoders.py
new file mode 100644
index 0000000..7d32b68
--- /dev/null
+++ b/python/samba/netcmd/encoders.py
@@ -0,0 +1,49 @@
+# Unix SMB/CIFS implementation.
+#
+# encoders: JSONEncoder class for dealing with object fields.
+#
+# Copyright (C) Catalyst.Net Ltd. 2023
+#
+# Written by Rob van der Linde <rob@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import json
+from datetime import datetime
+from decimal import Decimal
+from enum import Enum
+
+from ldb import Dn
+
+
+class JSONEncoder(json.JSONEncoder):
+ """Custom JSON encoder class to help out with some data types.
+
+ For example, the json module has no idea how to encode a Dn object to str.
+ Another common object that is handled is Decimal types.
+
+ In addition, any objects that have a __json__ method will get called.
+ """
+
+ def default(self, obj):
+ if isinstance(obj, (Decimal, Dn)):
+ return str(obj)
+ elif isinstance(obj, Enum):
+ return str(obj.value)
+ elif isinstance(obj, datetime):
+ return obj.isoformat()
+ elif getattr(obj, "__json__", None) and callable(obj.__json__):
+ return obj.__json__()
+ return obj
diff --git a/python/samba/netcmd/forest.py b/python/samba/netcmd/forest.py
new file mode 100644
index 0000000..4a5293c
--- /dev/null
+++ b/python/samba/netcmd/forest.py
@@ -0,0 +1,167 @@
+# domain management
+#
+# Copyright William Brown <william@blackhats.net.au> 2018
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import ldb
+import samba.getopt as options
+from samba.auth import system_session
+from samba.samdb import SamDB
+from samba.netcmd import (
+ Command,
+ CommandError,
+ SuperCommand,
+ Option
+)
+
+
+class cmd_forest_show(Command):
+ """Display forest settings.
+
+ These settings control the behaviour of all domain controllers in this
+ forest. This displays those settings from the replicated configuration
+ partition.
+ """
+
+ synopsis = "%prog [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server",
+ type=str, metavar="URL", dest="H"),
+ ]
+
+ def run(self, H=None, credopts=None, sambaopts=None, versionopts=None):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+
+ domain_dn = samdb.domain_dn()
+ object_dn = "%s,%s" % (self.objectdn, domain_dn)
+
+ # Show all the settings we know how to set in the forest object!
+ res = samdb.search(base=object_dn, scope=ldb.SCOPE_BASE,
+ attrs=self.attributes)
+
+ # Now we just display these attributes. The value is that
+ # we make them a bit prettier and human accessible.
+ # There should only be one response!
+ res_object = res[0]
+
+ self.outf.write("Settings for %s\n" % object_dn)
+ for attr in self.attributes:
+ try:
+ self.outf.write("%s: %s\n" % (attr, res_object[attr][0]))
+ except KeyError:
+ self.outf.write("%s: <NO VALUE>\n" % attr)
+
+
+class cmd_forest_set(Command):
+ """Modify forest settings.
+
+ This will alter the setting specified to value.
+ """
+
+ attribute = None
+ objectdn = None
+
+ synopsis = "%prog value [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server",
+ type=str, metavar="URL", dest="H"),
+ ]
+
+ takes_args = ["value"]
+
+ def run(self, value, H=None, credopts=None, sambaopts=None, versionopts=None):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+
+ domain_dn = samdb.domain_dn()
+ object_dn = "%s,%s" % (self.objectdn, domain_dn)
+
+ # Create the modification
+ m = ldb.Message()
+ m.dn = ldb.Dn(samdb, object_dn)
+ m[self.attribute] = ldb.MessageElement(
+ value, ldb.FLAG_MOD_REPLACE, self.attribute)
+
+ samdb.modify(m)
+ self.outf.write("set %s: %s\n" % (self.attribute, value))
+
+
+# Then you override it for each setting name:
+
+class cmd_forest_show_directory_service(cmd_forest_show):
+ """Display Directory Service settings for the forest.
+
+ These settings control how the Directory Service behaves on all domain
+ controllers in the forest.
+ """
+ objectdn = "CN=Directory Service,CN=Windows NT,CN=Services,CN=Configuration"
+ attributes = ['dsheuristics']
+
+
+class cmd_forest_set_directory_service_dsheuristics(cmd_forest_set):
+ """Set the value of dsheuristics on the Directory Service.
+
+ This value alters the behaviour of the Directory Service on all domain
+ controllers in the forest. Documentation related to this parameter can be
+ found here: https://msdn.microsoft.com/en-us/library/cc223560.aspx
+
+ In summary each "character" of the number-string, controls a setting.
+ A common setting is to set the value "2" in the 7th character. This controls
+ anonymous search behaviour.
+
+ Example: dsheuristics 0000002
+
+ This would allow anonymous LDAP searches to the domain (you may still need
+ to alter access controls to allow this).
+ """
+ objectdn = "CN=Directory Service,CN=Windows NT,CN=Services,CN=Configuration"
+ attribute = 'dsheuristics'
+
+
+class cmd_forest_directory_service(SuperCommand):
+ """Forest configuration partition management."""
+
+ subcommands = {}
+ subcommands["show"] = cmd_forest_show_directory_service()
+ subcommands["dsheuristics"] = cmd_forest_set_directory_service_dsheuristics()
+
+
+class cmd_forest(SuperCommand):
+ """Forest management."""
+
+ subcommands = {}
+ subcommands["directory_service"] = cmd_forest_directory_service()
diff --git a/python/samba/netcmd/fsmo.py b/python/samba/netcmd/fsmo.py
new file mode 100644
index 0000000..643d0ae
--- /dev/null
+++ b/python/samba/netcmd/fsmo.py
@@ -0,0 +1,535 @@
+# Changes a FSMO role owner
+#
+# Copyright Nadezhda Ivanova 2009
+# Copyright Jelmer Vernooij 2009
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import samba
+import samba.getopt as options
+import ldb
+from ldb import LdbError
+from samba.dcerpc import drsuapi, misc
+from samba.auth import system_session
+import samba.drs_utils
+from samba.netcmd import (
+ Command,
+ CommandError,
+ SuperCommand,
+ Option,
+)
+from samba.samdb import SamDB
+
+
+def get_fsmo_roleowner(samdb, roledn, role):
+ """Gets the owner of an FSMO role
+
+ :param roledn: The DN of the FSMO role
+ :param role: The FSMO role
+ """
+ try:
+ res = samdb.search(roledn,
+ scope=ldb.SCOPE_BASE, attrs=["fSMORoleOwner"])
+ except LdbError as e7:
+ (num, msg) = e7.args
+ if num == ldb.ERR_NO_SUCH_OBJECT:
+ raise CommandError("The '%s' role is not present in this domain" % role)
+ raise
+
+ if 'fSMORoleOwner' in res[0]:
+ master_owner = (ldb.Dn(samdb, res[0]["fSMORoleOwner"][0].decode('utf8')))
+ else:
+ master_owner = None
+
+ return master_owner
+
+
+def transfer_dns_role(outf, sambaopts, credopts, role, samdb):
+ """Transfer dns FSMO role. """
+
+ if role == "domaindns":
+ domain_dn = samdb.domain_dn()
+ role_object = "CN=Infrastructure,DC=DomainDnsZones," + domain_dn
+ elif role == "forestdns":
+ forest_dn = samba.dn_from_dns_name(samdb.forest_dns_name())
+ role_object = "CN=Infrastructure,DC=ForestDnsZones," + forest_dn
+
+ new_host_dns_name = samdb.host_dns_name()
+
+ res = samdb.search(role_object,
+ attrs=["fSMORoleOwner"],
+ scope=ldb.SCOPE_BASE,
+ controls=["extended_dn:1:1"])
+
+ if 'fSMORoleOwner' in res[0]:
+ try:
+ master_guid = str(misc.GUID(ldb.Dn(samdb,
+ res[0]['fSMORoleOwner'][0].decode('utf8'))
+ .get_extended_component('GUID')))
+ master_owner = str(ldb.Dn(samdb, res[0]['fSMORoleOwner'][0].decode('utf8')))
+ except LdbError as e3:
+ (num, msg) = e3.args
+ raise CommandError("No GUID found in naming master DN %s : %s \n" %
+ (res[0]['fSMORoleOwner'][0], msg))
+ else:
+ outf.write("* The '%s' role does not have an FSMO roleowner\n" % role)
+ return False
+
+ if role == "domaindns":
+ master_dns_name = '%s._msdcs.%s' % (master_guid,
+ samdb.domain_dns_name())
+ new_dns_name = '%s._msdcs.%s' % (samdb.get_ntds_GUID(),
+ samdb.domain_dns_name())
+ elif role == "forestdns":
+ master_dns_name = '%s._msdcs.%s' % (master_guid,
+ samdb.forest_dns_name())
+ new_dns_name = '%s._msdcs.%s' % (samdb.get_ntds_GUID(),
+ samdb.forest_dns_name())
+
+ new_owner = samdb.get_dsServiceName()
+
+ if master_dns_name != new_dns_name:
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp, fallback_machine=True)
+ samdb = SamDB(url="ldap://%s" % (master_dns_name),
+ session_info=system_session(),
+ credentials=creds, lp=lp)
+
+ m = ldb.Message()
+ m.dn = ldb.Dn(samdb, role_object)
+ m["fSMORoleOwner_Del"] = ldb.MessageElement(master_owner,
+ ldb.FLAG_MOD_DELETE,
+ "fSMORoleOwner")
+ m["fSMORoleOwner_Add"] = ldb.MessageElement(new_owner,
+ ldb.FLAG_MOD_ADD,
+ "fSMORoleOwner")
+ try:
+ samdb.modify(m)
+ except LdbError as e5:
+ (num, msg) = e5.args
+ raise CommandError("Failed to add role '%s': %s" % (role, msg))
+
+ try:
+ connection = samba.drs_utils.drsuapi_connect(new_host_dns_name,
+ lp, creds)
+ except samba.drs_utils.drsException as e:
+ raise CommandError("Drsuapi Connect failed", e)
+
+ try:
+ drsuapi_connection = connection[0]
+ drsuapi_handle = connection[1]
+ req_options = drsuapi.DRSUAPI_DRS_WRIT_REP
+ NC = role_object[18:]
+ samba.drs_utils.sendDsReplicaSync(drsuapi_connection,
+ drsuapi_handle,
+ master_guid,
+ NC, req_options)
+ except samba.drs_utils.drsException as estr:
+ raise CommandError("Replication failed", estr)
+
+ outf.write("FSMO transfer of '%s' role successful\n" % role)
+ return True
+ else:
+ outf.write("This DC already has the '%s' FSMO role\n" % role)
+ return False
+
+
+def transfer_role(outf, role, samdb):
+ """Transfer standard FSMO role. """
+
+ domain_dn = samdb.domain_dn()
+ rid_dn = "CN=RID Manager$,CN=System," + domain_dn
+ naming_dn = "CN=Partitions,%s" % samdb.get_config_basedn()
+ infrastructure_dn = "CN=Infrastructure," + domain_dn
+ schema_dn = str(samdb.get_schema_basedn())
+ new_owner = ldb.Dn(samdb, samdb.get_dsServiceName())
+ m = ldb.Message()
+ m.dn = ldb.Dn(samdb, "")
+ if role == "rid":
+ master_owner = get_fsmo_roleowner(samdb, rid_dn, role)
+ m["becomeRidMaster"] = ldb.MessageElement(
+ "1", ldb.FLAG_MOD_REPLACE,
+ "becomeRidMaster")
+ elif role == "pdc":
+ master_owner = get_fsmo_roleowner(samdb, domain_dn, role)
+
+ res = samdb.search(domain_dn,
+ scope=ldb.SCOPE_BASE, attrs=["objectSid"])
+ assert len(res) == 1
+ sid = res[0]["objectSid"][0]
+ m["becomePdc"] = ldb.MessageElement(
+ sid, ldb.FLAG_MOD_REPLACE,
+ "becomePdc")
+ elif role == "naming":
+ master_owner = get_fsmo_roleowner(samdb, naming_dn, role)
+ m["becomeDomainMaster"] = ldb.MessageElement(
+ "1", ldb.FLAG_MOD_REPLACE,
+ "becomeDomainMaster")
+ elif role == "infrastructure":
+ master_owner = get_fsmo_roleowner(samdb, infrastructure_dn, role)
+ m["becomeInfrastructureMaster"] = ldb.MessageElement(
+ "1", ldb.FLAG_MOD_REPLACE,
+ "becomeInfrastructureMaster")
+ elif role == "schema":
+ master_owner = get_fsmo_roleowner(samdb, schema_dn, role)
+ m["becomeSchemaMaster"] = ldb.MessageElement(
+ "1", ldb.FLAG_MOD_REPLACE,
+ "becomeSchemaMaster")
+ else:
+ raise CommandError("Invalid FSMO role.")
+
+ if master_owner is None:
+ outf.write("Cannot transfer, no DC assigned to the %s role. Try 'seize' instead\n" % role)
+ return False
+
+ if master_owner != new_owner:
+ try:
+ samdb.modify(m)
+ except LdbError as e6:
+ (num, msg) = e6.args
+ raise CommandError("Transfer of '%s' role failed: %s" %
+ (role, msg))
+
+ outf.write("FSMO transfer of '%s' role successful\n" % role)
+ return True
+ else:
+ outf.write("This DC already has the '%s' FSMO role\n" % role)
+ return False
+
+
+class cmd_fsmo_seize(Command):
+ """Seize the role."""
+
+ synopsis = "%prog [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server",
+ type=str, metavar="URL", dest="H"),
+ Option("--force",
+ help="Force seizing of role without attempting to transfer.",
+ action="store_true"),
+ Option("--role", type="choice", choices=["rid", "pdc", "infrastructure",
+ "schema", "naming", "domaindns", "forestdns", "all"],
+ help="""The FSMO role to seize or transfer.\n
+rid=RidAllocationMasterRole\n
+schema=SchemaMasterRole\n
+pdc=PdcEmulationMasterRole\n
+naming=DomainNamingMasterRole\n
+infrastructure=InfrastructureMasterRole\n
+domaindns=DomainDnsZonesMasterRole\n
+forestdns=ForestDnsZonesMasterRole\n
+all=all of the above\n
+You must provide an Admin user and password."""),
+ ]
+
+ takes_args = []
+
+ def seize_role(self, role, samdb, force):
+ """Seize standard fsmo role. """
+
+ serviceName = samdb.get_dsServiceName()
+ domain_dn = samdb.domain_dn()
+ self.infrastructure_dn = "CN=Infrastructure," + domain_dn
+ self.naming_dn = "CN=Partitions,%s" % samdb.get_config_basedn()
+ self.schema_dn = str(samdb.get_schema_basedn())
+ self.rid_dn = "CN=RID Manager$,CN=System," + domain_dn
+
+ m = ldb.Message()
+ if role == "rid":
+ m.dn = ldb.Dn(samdb, self.rid_dn)
+ elif role == "pdc":
+ m.dn = ldb.Dn(samdb, domain_dn)
+ elif role == "naming":
+ m.dn = ldb.Dn(samdb, self.naming_dn)
+ elif role == "infrastructure":
+ m.dn = ldb.Dn(samdb, self.infrastructure_dn)
+ elif role == "schema":
+ m.dn = ldb.Dn(samdb, self.schema_dn)
+ else:
+ raise CommandError("Invalid FSMO role.")
+ # first try to transfer to avoid problem if the owner is still active
+ seize = False
+ master_owner = get_fsmo_roleowner(samdb, m.dn, role)
+ # if there is a different owner
+ if master_owner is not None:
+ # if there is a different owner
+ if master_owner != serviceName:
+ # if --force isn't given, attempt transfer
+ if force is None:
+ self.message("Attempting transfer...")
+ try:
+ transfer_role(self.outf, role, samdb)
+ except:
+ # transfer failed, use the big axe...
+ seize = True
+ self.message("Transfer unsuccessful, seizing...")
+ else:
+ self.message("Transfer successful, not seizing role")
+ return True
+ else:
+ self.outf.write("This DC already has the '%s' FSMO role\n" %
+ role)
+ return False
+ else:
+ seize = True
+
+ if force is not None or seize:
+ self.message("Seizing %s FSMO role..." % role)
+ m["fSMORoleOwner"] = ldb.MessageElement(
+ serviceName, ldb.FLAG_MOD_REPLACE,
+ "fSMORoleOwner")
+
+ samdb.transaction_start()
+ try:
+ samdb.modify(m)
+ if role == "rid":
+ # We may need to allocate the initial RID Set
+ samdb.create_own_rid_set()
+
+ except LdbError as e1:
+ (num, msg) = e1.args
+ if role == "rid" and num == ldb.ERR_ENTRY_ALREADY_EXISTS:
+
+ # Try again without the RID Set allocation
+ # (normal). We have to manage the transaction as
+ # we do not have nested transactions and creating
+ # a RID set touches multiple objects. :-(
+ samdb.transaction_cancel()
+ samdb.transaction_start()
+ try:
+ samdb.modify(m)
+ except LdbError as e:
+ (num, msg) = e.args
+ samdb.transaction_cancel()
+ raise CommandError("Failed to seize '%s' role: %s" %
+ (role, msg))
+
+ else:
+ samdb.transaction_cancel()
+ raise CommandError("Failed to seize '%s' role: %s" %
+ (role, msg))
+ samdb.transaction_commit()
+ self.outf.write("FSMO seize of '%s' role successful\n" % role)
+
+ return True
+
+ def seize_dns_role(self, role, samdb, credopts, sambaopts,
+ versionopts, force):
+ """Seize DNS FSMO role. """
+
+ serviceName = samdb.get_dsServiceName()
+ domain_dn = samdb.domain_dn()
+ forest_dn = samba.dn_from_dns_name(samdb.forest_dns_name())
+ self.domaindns_dn = "CN=Infrastructure,DC=DomainDnsZones," + domain_dn
+ self.forestdns_dn = "CN=Infrastructure,DC=ForestDnsZones," + forest_dn
+
+ m = ldb.Message()
+ if role == "domaindns":
+ m.dn = ldb.Dn(samdb, self.domaindns_dn)
+ elif role == "forestdns":
+ m.dn = ldb.Dn(samdb, self.forestdns_dn)
+ else:
+ raise CommandError("Invalid FSMO role.")
+ # first try to transfer to avoid problem if the owner is still active
+ seize = False
+ master_owner = get_fsmo_roleowner(samdb, m.dn, role)
+ if master_owner is not None:
+ # if there is a different owner
+ if master_owner != serviceName:
+ # if --force isn't given, attempt transfer
+ if force is None:
+ self.message("Attempting transfer...")
+ try:
+ transfer_dns_role(self.outf, sambaopts, credopts, role,
+ samdb)
+ except:
+ # transfer failed, use the big axe...
+ seize = True
+ self.message("Transfer unsuccessful, seizing...")
+ else:
+ self.message("Transfer successful, not seizing role\n")
+ return True
+ else:
+ self.outf.write("This DC already has the '%s' FSMO role\n" %
+ role)
+ return False
+ else:
+ seize = True
+
+ if force is not None or seize:
+ self.message("Seizing %s FSMO role..." % role)
+ m["fSMORoleOwner"] = ldb.MessageElement(
+ serviceName, ldb.FLAG_MOD_REPLACE,
+ "fSMORoleOwner")
+ try:
+ samdb.modify(m)
+ except LdbError as e2:
+ (num, msg) = e2.args
+ raise CommandError("Failed to seize '%s' role: %s" %
+ (role, msg))
+ self.outf.write("FSMO seize of '%s' role successful\n" % role)
+ return True
+
+ def run(self, force=None, H=None, role=None,
+ credopts=None, sambaopts=None, versionopts=None):
+
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp, fallback_machine=True)
+
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+
+ if role == "all":
+ self.seize_role("rid", samdb, force)
+ self.seize_role("pdc", samdb, force)
+ self.seize_role("naming", samdb, force)
+ self.seize_role("infrastructure", samdb, force)
+ self.seize_role("schema", samdb, force)
+ self.seize_dns_role("domaindns", samdb, credopts, sambaopts,
+ versionopts, force)
+ self.seize_dns_role("forestdns", samdb, credopts, sambaopts,
+ versionopts, force)
+ else:
+ if role == "domaindns" or role == "forestdns":
+ self.seize_dns_role(role, samdb, credopts, sambaopts,
+ versionopts, force)
+ else:
+ self.seize_role(role, samdb, force)
+
+
+class cmd_fsmo_show(Command):
+ """Show the roles."""
+
+ synopsis = "%prog [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server",
+ type=str, metavar="URL", dest="H"),
+ ]
+
+ takes_args = []
+
+ def run(self, H=None, credopts=None, sambaopts=None, versionopts=None):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp, fallback_machine=True)
+
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+
+ domain_dn = samdb.domain_dn()
+ forest_dn = samba.dn_from_dns_name(samdb.forest_dns_name())
+ infrastructure_dn = "CN=Infrastructure," + domain_dn
+ naming_dn = "CN=Partitions,%s" % samdb.get_config_basedn()
+ schema_dn = samdb.get_schema_basedn()
+ rid_dn = "CN=RID Manager$,CN=System," + domain_dn
+ domaindns_dn = "CN=Infrastructure,DC=DomainDnsZones," + domain_dn
+ forestdns_dn = "CN=Infrastructure,DC=ForestDnsZones," + forest_dn
+
+ masters = [(schema_dn, "schema", "SchemaMasterRole"),
+ (infrastructure_dn, "infrastructure", "InfrastructureMasterRole"),
+ (rid_dn, "rid", "RidAllocationMasterRole"),
+ (domain_dn, "pdc", "PdcEmulationMasterRole"),
+ (naming_dn, "naming", "DomainNamingMasterRole"),
+ (domaindns_dn, "domaindns", "DomainDnsZonesMasterRole"),
+ (forestdns_dn, "forestdns", "ForestDnsZonesMasterRole"),
+ ]
+
+ for master in masters:
+ (dn, short_name, long_name) = master
+ try:
+ master = get_fsmo_roleowner(samdb, dn, short_name)
+ if master is not None:
+ self.message("%s owner: %s" % (long_name, str(master)))
+ else:
+ self.message("%s has no current owner" % (long_name))
+ except CommandError as e:
+ self.message("%s: * %s" % (long_name, e.message))
+
+
+class cmd_fsmo_transfer(Command):
+ """Transfer the role."""
+
+ synopsis = "%prog [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server",
+ type=str, metavar="URL", dest="H"),
+ Option("--role", type="choice", choices=["rid", "pdc", "infrastructure",
+ "schema", "naming", "domaindns", "forestdns", "all"],
+ help="""The FSMO role to seize or transfer.\n
+rid=RidAllocationMasterRole\n
+schema=SchemaMasterRole\n
+pdc=PdcEmulationMasterRole\n
+naming=DomainNamingMasterRole\n
+infrastructure=InfrastructureMasterRole\n
+domaindns=DomainDnsZonesMasterRole\n
+forestdns=ForestDnsZonesMasterRole\n
+all=all of the above\n
+You must provide an Admin user and password."""),
+ ]
+
+ takes_args = []
+
+ def run(self, force=None, H=None, role=None,
+ credopts=None, sambaopts=None, versionopts=None):
+
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp, fallback_machine=True)
+
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+
+ if role == "all":
+ transfer_role(self.outf, "rid", samdb)
+ transfer_role(self.outf, "pdc", samdb)
+ transfer_role(self.outf, "naming", samdb)
+ transfer_role(self.outf, "infrastructure", samdb)
+ transfer_role(self.outf, "schema", samdb)
+ transfer_dns_role(self.outf, sambaopts, credopts,
+ "domaindns", samdb)
+ transfer_dns_role(self.outf, sambaopts, credopts, "forestdns",
+ samdb)
+ else:
+ if role == "domaindns" or role == "forestdns":
+ transfer_dns_role(self.outf, sambaopts, credopts, role, samdb)
+ else:
+ transfer_role(self.outf, role, samdb)
+
+
+class cmd_fsmo(SuperCommand):
+ """Flexible Single Master Operations (FSMO) roles management."""
+
+ subcommands = {}
+ subcommands["seize"] = cmd_fsmo_seize()
+ subcommands["show"] = cmd_fsmo_show()
+ subcommands["transfer"] = cmd_fsmo_transfer()
diff --git a/python/samba/netcmd/gpcommon.py b/python/samba/netcmd/gpcommon.py
new file mode 100644
index 0000000..b8ac09e
--- /dev/null
+++ b/python/samba/netcmd/gpcommon.py
@@ -0,0 +1,55 @@
+# Samba common group policy functions
+#
+# Copyright Andrew Tridgell 2010
+# Copyright Amitay Isaacs 2011-2012 <amitay@gmail.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+import ldb
+from samba.credentials import SMB_SIGNING_REQUIRED
+from samba.samba3 import param as s3param
+from samba.samba3 import libsmb_samba_internal as libsmb
+from samba.netcmd import CommandError
+
+def get_gpo_dn(samdb, gpo):
+ """Construct the DN for gpo"""
+
+ dn = samdb.get_default_basedn()
+ dn.add_child(ldb.Dn(samdb, "CN=Policies,CN=System"))
+ dn.add_child(ldb.Dn(samdb, "CN=%s" % gpo))
+ return dn
+
+def create_directory_hier(conn, remotedir):
+ elems = remotedir.replace('/', '\\').split('\\')
+ path = ""
+ for e in elems:
+ path = path + '\\' + e
+ if not conn.chkpath(path):
+ conn.mkdir(path)
+
+def smb_connection(dc_hostname, service, lp, creds):
+ # SMB connect to DC
+ # Force signing for the smb connection
+ saved_signing_state = creds.get_smb_signing()
+ creds.set_smb_signing(SMB_SIGNING_REQUIRED)
+ try:
+ # the SMB bindings rely on having a s3 loadparm
+ s3_lp = s3param.get_context()
+ s3_lp.load(lp.configfile)
+ conn = libsmb.Conn(dc_hostname, service, lp=s3_lp, creds=creds)
+ except Exception:
+ raise CommandError("Error connecting to '%s' using SMB" % dc_hostname)
+ # Reset signing state
+ creds.set_smb_signing(saved_signing_state)
+ return conn
diff --git a/python/samba/netcmd/gpo.py b/python/samba/netcmd/gpo.py
new file mode 100644
index 0000000..ba55b2e
--- /dev/null
+++ b/python/samba/netcmd/gpo.py
@@ -0,0 +1,4513 @@
+# implement samba_tool gpo commands
+#
+# Copyright Andrew Tridgell 2010
+# Copyright Amitay Isaacs 2011-2012 <amitay@gmail.com>
+#
+# based on C implementation by Guenther Deschner and Wilco Baan Hofman
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+import os
+import sys
+import samba.getopt as options
+import ldb
+import re
+import xml.etree.ElementTree as ET
+import shutil
+import tempfile
+
+from samba.auth import system_session
+from samba.netcmd import (
+ Command,
+ CommandError,
+ Option,
+ SuperCommand,
+)
+from samba.samdb import SamDB
+from samba import dsdb
+from samba.dcerpc import security
+from samba.ndr import ndr_unpack, ndr_pack
+from samba.dcerpc import preg
+import samba.security
+import samba.auth
+from samba.auth import AUTH_SESSION_INFO_DEFAULT_GROUPS, AUTH_SESSION_INFO_AUTHENTICATED, AUTH_SESSION_INFO_SIMPLE_PRIVILEGES
+from samba.netcmd.common import netcmd_finddc
+from samba import policy
+from samba.samba3 import libsmb_samba_internal as libsmb
+from samba import NTSTATUSError
+import uuid
+from samba.ntacls import dsacl2fsacl
+from samba.dcerpc import nbt
+from samba.net import Net
+from samba.gp_parse import GPParser, GPNoParserException, GPGeneralizeException
+from samba.gp_parse.gp_pol import GPPolParser
+from samba.gp_parse.gp_ini import (
+ GPIniParser,
+ GPTIniParser,
+ GPFDeploy1IniParser,
+ GPScriptsIniParser
+)
+from samba.gp_parse.gp_csv import GPAuditCsvParser
+from samba.gp_parse.gp_inf import GptTmplInfParser
+from samba.gp_parse.gp_aas import GPAasParser
+from samba import param
+from samba.netcmd.common import attr_default
+from samba.common import get_bytes, get_string
+from configparser import ConfigParser
+from io import StringIO, BytesIO
+from samba.gp.vgp_files_ext import calc_mode, stat_from_mode
+import hashlib
+import json
+from samba.registry import str_regtype
+from samba.ntstatus import (
+ NT_STATUS_OBJECT_NAME_INVALID,
+ NT_STATUS_OBJECT_NAME_NOT_FOUND,
+ NT_STATUS_OBJECT_PATH_NOT_FOUND,
+ NT_STATUS_OBJECT_NAME_COLLISION,
+ NT_STATUS_ACCESS_DENIED
+)
+from samba.netcmd.gpcommon import (
+ create_directory_hier,
+ smb_connection,
+ get_gpo_dn
+)
+from samba.policies import RegistryGroupPolicies
+from samba.dcerpc.misc import REG_MULTI_SZ
+from samba.gp.gpclass import register_gp_extension, list_gp_extensions, \
+ unregister_gp_extension
+
+
+def gpo_flags_string(value):
+ """return gpo flags string"""
+ flags = policy.get_gpo_flags(value)
+ if not flags:
+ ret = 'NONE'
+ else:
+ ret = ' '.join(flags)
+ return ret
+
+
+def gplink_options_string(value):
+ """return gplink options string"""
+ options = policy.get_gplink_options(value)
+ if not options:
+ ret = 'NONE'
+ else:
+ ret = ' '.join(options)
+ return ret
+
+
+def parse_gplink(gplink):
+ """parse a gPLink into an array of dn and options"""
+ ret = []
+
+ if gplink.strip() == '':
+ return ret
+
+ a = gplink.split(']')
+ for g in a:
+ if not g:
+ continue
+ d = g.split(';')
+ if len(d) != 2 or not d[0].startswith("[LDAP://"):
+ raise RuntimeError("Badly formed gPLink '%s'" % g)
+ ret.append({'dn': d[0][8:], 'options': int(d[1])})
+ return ret
+
+
+def encode_gplink(gplist):
+ """Encode an array of dn and options into gPLink string"""
+ ret = "".join("[LDAP://%s;%d]" % (g['dn'], g['options']) for g in gplist)
+ return ret
+
+
+def dc_url(lp, creds, url=None, dc=None):
+ """If URL is not specified, return URL for writable DC.
+ If dc is provided, use that to construct ldap URL"""
+
+ if url is None:
+ if dc is None:
+ try:
+ dc = netcmd_finddc(lp, creds)
+ except Exception as e:
+ raise RuntimeError("Could not find a DC for domain", e)
+ url = 'ldap://' + dc
+ return url
+
+
+def get_gpo_info(samdb, gpo=None, displayname=None, dn=None,
+ sd_flags=(security.SECINFO_OWNER |
+ security.SECINFO_GROUP |
+ security.SECINFO_DACL |
+ security.SECINFO_SACL)):
+ """Get GPO information using gpo, displayname or dn"""
+
+ policies_dn = samdb.get_default_basedn()
+ policies_dn.add_child(ldb.Dn(samdb, "CN=Policies,CN=System"))
+
+ base_dn = policies_dn
+ search_expr = "(objectClass=groupPolicyContainer)"
+ search_scope = ldb.SCOPE_ONELEVEL
+
+ if gpo is not None:
+ search_expr = "(&(objectClass=groupPolicyContainer)(name=%s))" % ldb.binary_encode(gpo)
+
+ if displayname is not None:
+ search_expr = "(&(objectClass=groupPolicyContainer)(displayname=%s))" % ldb.binary_encode(displayname)
+
+ if dn is not None:
+ base_dn = dn
+ search_scope = ldb.SCOPE_BASE
+
+ try:
+ msg = samdb.search(base=base_dn, scope=search_scope,
+ expression=search_expr,
+ attrs=['nTSecurityDescriptor',
+ 'versionNumber',
+ 'flags',
+ 'name',
+ 'displayName',
+ 'gPCFileSysPath',
+ 'gPCMachineExtensionNames',
+ 'gPCUserExtensionNames'],
+ controls=['sd_flags:1:%d' % sd_flags])
+ except Exception as e:
+ if gpo is not None:
+ mesg = "Cannot get information for GPO %s" % gpo
+ else:
+ mesg = "Cannot get information for GPOs"
+ raise CommandError(mesg, e)
+
+ return msg
+
+
+def get_gpo_containers(samdb, gpo):
+ """lists dn of containers for a GPO"""
+
+ search_expr = "(&(objectClass=*)(gPLink=*%s*))" % gpo
+ try:
+ msg = samdb.search(expression=search_expr, attrs=['gPLink'])
+ except Exception as e:
+ raise CommandError("Could not find container(s) with GPO %s" % gpo, e)
+
+ return msg
+
+
+def del_gpo_link(samdb, container_dn, gpo):
+ """delete GPO link for the container"""
+ # Check if valid Container DN and get existing GPlinks
+ try:
+ msg = samdb.search(base=container_dn, scope=ldb.SCOPE_BASE,
+ expression="(objectClass=*)",
+ attrs=['gPLink'])[0]
+ except Exception as e:
+ raise CommandError("Container '%s' does not exist" % container_dn, e)
+
+ found = False
+ gpo_dn = str(get_gpo_dn(samdb, gpo))
+ if 'gPLink' in msg:
+ gplist = parse_gplink(str(msg['gPLink'][0]))
+ for g in gplist:
+ if g['dn'].lower() == gpo_dn.lower():
+ gplist.remove(g)
+ found = True
+ break
+ else:
+ raise CommandError("No GPO(s) linked to this container")
+
+ if not found:
+ raise CommandError("GPO '%s' not linked to this container" % gpo)
+
+ m = ldb.Message()
+ m.dn = container_dn
+ if gplist:
+ gplink_str = encode_gplink(gplist)
+ m['r0'] = ldb.MessageElement(gplink_str, ldb.FLAG_MOD_REPLACE, 'gPLink')
+ else:
+ m['d0'] = ldb.MessageElement(msg['gPLink'][0], ldb.FLAG_MOD_DELETE, 'gPLink')
+ try:
+ samdb.modify(m)
+ except Exception as e:
+ raise CommandError("Error removing GPO from container", e)
+
+
+def parse_unc(unc):
+ """Parse UNC string into a hostname, a service, and a filepath"""
+ tmp = []
+ if unc.startswith('\\\\'):
+ tmp = unc[2:].split('\\', 2)
+ elif unc.startswith('//'):
+ tmp = unc[2:].split('/', 2)
+
+ if len(tmp) != 3:
+ raise ValueError("Invalid UNC string: %s" % unc)
+
+ return tmp
+
+
+def find_parser(name, flags=re.IGNORECASE):
+ if re.match(r'fdeploy1\.ini$', name, flags=flags):
+ return GPFDeploy1IniParser()
+ if re.match(r'audit\.csv$', name, flags=flags):
+ return GPAuditCsvParser()
+ if re.match(r'GptTmpl\.inf$', name, flags=flags):
+ return GptTmplInfParser()
+ if re.match(r'GPT\.INI$', name, flags=flags):
+ return GPTIniParser()
+ if re.match(r'scripts\.ini$', name, flags=flags):
+ return GPScriptsIniParser()
+ if re.match(r'psscripts\.ini$', name, flags=flags):
+ return GPScriptsIniParser()
+ if re.match(r'GPE\.INI$', name, flags=flags):
+ # This file does not appear in the protocol specifications!
+ #
+ # It appears to be a legacy file used to maintain gPCUserExtensionNames
+ # and gPCMachineExtensionNames. We should just copy the file as binary.
+ return GPParser()
+ if re.match(r'.*\.ini$', name, flags=flags):
+ return GPIniParser()
+ if re.match(r'.*\.pol$', name, flags=flags):
+ return GPPolParser()
+ if re.match(r'.*\.aas$', name, flags=flags):
+ return GPAasParser()
+
+ return GPParser()
+
+
+def backup_directory_remote_to_local(conn, remotedir, localdir):
+ SUFFIX = '.SAMBABACKUP'
+ if not os.path.isdir(localdir):
+ os.mkdir(localdir)
+ r_dirs = [ remotedir ]
+ l_dirs = [ localdir ]
+ while r_dirs:
+ r_dir = r_dirs.pop()
+ l_dir = l_dirs.pop()
+
+ dirlist = conn.list(r_dir, attribs=attr_flags)
+ dirlist.sort(key=lambda x : x['name'])
+ for e in dirlist:
+ r_name = r_dir + '\\' + e['name']
+ l_name = os.path.join(l_dir, e['name'])
+
+ if e['attrib'] & libsmb.FILE_ATTRIBUTE_DIRECTORY:
+ r_dirs.append(r_name)
+ l_dirs.append(l_name)
+ os.mkdir(l_name)
+ else:
+ data = conn.loadfile(r_name)
+ with open(l_name + SUFFIX, 'wb') as f:
+ f.write(data)
+
+ parser = find_parser(e['name'])
+ parser.parse(data)
+ parser.write_xml(l_name + '.xml')
+
+
+attr_flags = libsmb.FILE_ATTRIBUTE_SYSTEM | \
+ libsmb.FILE_ATTRIBUTE_DIRECTORY | \
+ libsmb.FILE_ATTRIBUTE_ARCHIVE | \
+ libsmb.FILE_ATTRIBUTE_HIDDEN
+
+
+def copy_directory_remote_to_local(conn, remotedir, localdir):
+ if not os.path.isdir(localdir):
+ os.mkdir(localdir)
+ r_dirs = [remotedir]
+ l_dirs = [localdir]
+ while r_dirs:
+ r_dir = r_dirs.pop()
+ l_dir = l_dirs.pop()
+
+ dirlist = conn.list(r_dir, attribs=attr_flags)
+ dirlist.sort(key=lambda x : x['name'])
+ for e in dirlist:
+ r_name = r_dir + '\\' + e['name']
+ l_name = os.path.join(l_dir, e['name'])
+
+ if e['attrib'] & libsmb.FILE_ATTRIBUTE_DIRECTORY:
+ r_dirs.append(r_name)
+ l_dirs.append(l_name)
+ os.mkdir(l_name)
+ else:
+ data = conn.loadfile(r_name)
+ open(l_name, 'wb').write(data)
+
+
+def copy_directory_local_to_remote(conn, localdir, remotedir,
+ ignore_existing_dir=False,
+ keep_existing_files=False):
+ if not conn.chkpath(remotedir):
+ conn.mkdir(remotedir)
+ l_dirs = [localdir]
+ r_dirs = [remotedir]
+ while l_dirs:
+ l_dir = l_dirs.pop()
+ r_dir = r_dirs.pop()
+
+ dirlist = os.listdir(l_dir)
+ dirlist.sort()
+ for e in dirlist:
+ l_name = os.path.join(l_dir, e)
+ r_name = r_dir + '\\' + e
+
+ if os.path.isdir(l_name):
+ l_dirs.append(l_name)
+ r_dirs.append(r_name)
+ try:
+ conn.mkdir(r_name)
+ except NTSTATUSError:
+ if not ignore_existing_dir:
+ raise
+ else:
+ if keep_existing_files:
+ try:
+ conn.loadfile(r_name)
+ continue
+ except NTSTATUSError:
+ pass
+
+ data = open(l_name, 'rb').read()
+ conn.savefile(r_name, data)
+
+
+class GPOCommand(Command):
+ def construct_tmpdir(self, tmpdir, gpo):
+ """Ensure that the temporary directory structure used in fetch,
+ backup, create, and restore is consistent.
+
+ If --tmpdir is used the named directory must be present, which may
+ contain a 'policy' subdirectory, but 'policy' must not itself have
+ a subdirectory with the gpo name. The policy and gpo directories
+ will be created.
+
+ If --tmpdir is not used, a temporary directory is securely created.
+ """
+ if tmpdir is None:
+ tmpdir = tempfile.mkdtemp()
+ print("Using temporary directory %s (use --tmpdir to change)" % tmpdir,
+ file=self.outf)
+
+ if not os.path.isdir(tmpdir):
+ raise CommandError("Temporary directory '%s' does not exist" % tmpdir)
+
+ localdir = os.path.join(tmpdir, "policy")
+ if not os.path.isdir(localdir):
+ os.mkdir(localdir)
+
+ gpodir = os.path.join(localdir, gpo)
+ if os.path.isdir(gpodir):
+ raise CommandError(
+ "GPO directory '%s' already exists, refusing to overwrite" % gpodir)
+
+ try:
+ os.mkdir(gpodir)
+ except (IOError, OSError) as e:
+ raise CommandError("Error creating teporary GPO directory", e)
+
+ return tmpdir, gpodir
+
+ def samdb_connect(self):
+ """make a ldap connection to the server"""
+ try:
+ self.samdb = SamDB(url=self.url,
+ session_info=system_session(),
+ credentials=self.creds, lp=self.lp)
+ except Exception as e:
+ raise CommandError("LDAP connection to %s failed " % self.url, e)
+
+
+class cmd_listall(GPOCommand):
+ """List all GPOs."""
+
+ synopsis = "%prog [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H")
+ ]
+
+ def run(self, H=None, sambaopts=None, credopts=None, versionopts=None):
+
+ self.lp = sambaopts.get_loadparm()
+ self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
+
+ self.url = dc_url(self.lp, self.creds, H)
+
+ self.samdb_connect()
+
+ msg = get_gpo_info(self.samdb, None)
+
+ for m in msg:
+ self.outf.write("GPO : %s\n" % m['name'][0])
+ self.outf.write("display name : %s\n" % m['displayName'][0])
+ self.outf.write("path : %s\n" % m['gPCFileSysPath'][0])
+ self.outf.write("dn : %s\n" % m.dn)
+ self.outf.write("version : %s\n" % attr_default(m, 'versionNumber', '0'))
+ self.outf.write("flags : %s\n" % gpo_flags_string(int(attr_default(m, 'flags', 0))))
+ self.outf.write("\n")
+
+
+class cmd_list(GPOCommand):
+ """List GPOs for an account."""
+
+ synopsis = "%prog <username|machinename> [options]"
+
+ takes_args = ['accountname']
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server",
+ type=str, metavar="URL", dest="H")
+ ]
+
+ def run(self, accountname, H=None, sambaopts=None, credopts=None, versionopts=None):
+
+ self.lp = sambaopts.get_loadparm()
+ self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
+
+ self.url = dc_url(self.lp, self.creds, H)
+
+ self.samdb_connect()
+
+ try:
+ msg = self.samdb.search(expression='(&(|(samAccountName=%s)(samAccountName=%s$))(objectClass=User))' %
+ (ldb.binary_encode(accountname), ldb.binary_encode(accountname)))
+ user_dn = msg[0].dn
+ except Exception:
+ raise CommandError("Failed to find account %s" % accountname)
+
+ # check if its a computer account
+ try:
+ msg = self.samdb.search(base=user_dn, scope=ldb.SCOPE_BASE, attrs=['objectClass'])[0]
+ is_computer = 'computer' in msg['objectClass']
+ except Exception:
+ raise CommandError("Failed to find objectClass for %s" % accountname)
+
+ session_info_flags = (AUTH_SESSION_INFO_DEFAULT_GROUPS |
+ AUTH_SESSION_INFO_AUTHENTICATED)
+
+ # When connecting to a remote server, don't look up the local privilege DB
+ if self.url is not None and self.url.startswith('ldap'):
+ session_info_flags |= AUTH_SESSION_INFO_SIMPLE_PRIVILEGES
+
+ session = samba.auth.user_session(self.samdb, lp_ctx=self.lp, dn=user_dn,
+ session_info_flags=session_info_flags)
+
+ token = session.security_token
+
+ gpos = []
+
+ inherit = True
+ dn = ldb.Dn(self.samdb, str(user_dn)).parent()
+ while True:
+ msg = self.samdb.search(base=dn, scope=ldb.SCOPE_BASE, attrs=['gPLink', 'gPOptions'])[0]
+ if 'gPLink' in msg:
+ glist = parse_gplink(str(msg['gPLink'][0]))
+ for g in glist:
+ if not inherit and not (g['options'] & dsdb.GPLINK_OPT_ENFORCE):
+ continue
+ if g['options'] & dsdb.GPLINK_OPT_DISABLE:
+ continue
+
+ try:
+ sd_flags = (security.SECINFO_OWNER |
+ security.SECINFO_GROUP |
+ security.SECINFO_DACL)
+ gmsg = self.samdb.search(base=g['dn'], scope=ldb.SCOPE_BASE,
+ attrs=['name', 'displayName', 'flags',
+ 'nTSecurityDescriptor'],
+ controls=['sd_flags:1:%d' % sd_flags])
+ secdesc_ndr = gmsg[0]['nTSecurityDescriptor'][0]
+ secdesc = ndr_unpack(security.descriptor, secdesc_ndr)
+ except Exception:
+ self.outf.write("Failed to fetch gpo object with nTSecurityDescriptor %s\n" %
+ g['dn'])
+ continue
+
+ try:
+ samba.security.access_check(secdesc, token,
+ security.SEC_STD_READ_CONTROL |
+ security.SEC_ADS_LIST |
+ security.SEC_ADS_READ_PROP)
+ except RuntimeError:
+ self.outf.write("Failed access check on %s\n" % msg.dn)
+ continue
+
+ # check the flags on the GPO
+ flags = int(attr_default(gmsg[0], 'flags', 0))
+ if is_computer and (flags & dsdb.GPO_FLAG_MACHINE_DISABLE):
+ continue
+ if not is_computer and (flags & dsdb.GPO_FLAG_USER_DISABLE):
+ continue
+ gpos.append((gmsg[0]['displayName'][0], gmsg[0]['name'][0]))
+
+ # check if this blocks inheritance
+ gpoptions = int(attr_default(msg, 'gPOptions', 0))
+ if gpoptions & dsdb.GPO_BLOCK_INHERITANCE:
+ inherit = False
+
+ if dn == self.samdb.get_default_basedn():
+ break
+ dn = dn.parent()
+
+ if is_computer:
+ msg_str = 'computer'
+ else:
+ msg_str = 'user'
+
+ self.outf.write("GPOs for %s %s\n" % (msg_str, accountname))
+ for g in gpos:
+ self.outf.write(" %s %s\n" % (g[0], g[1]))
+
+
+class cmd_show(GPOCommand):
+ """Show information for a GPO."""
+
+ synopsis = "%prog <gpo> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_args = ['gpo']
+
+ takes_options = [
+ Option("-H", help="LDB URL for database or target server", type=str)
+ ]
+
+ def run(self, gpo, H=None, sambaopts=None, credopts=None, versionopts=None):
+
+ self.lp = sambaopts.get_loadparm()
+ self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
+
+ # We need to know writable DC to setup SMB connection
+ if H and H.startswith('ldap://'):
+ dc_hostname = H[7:]
+ self.url = H
+ else:
+ dc_hostname = netcmd_finddc(self.lp, self.creds)
+ self.url = dc_url(self.lp, self.creds, dc=dc_hostname)
+
+ self.samdb_connect()
+
+ try:
+ msg = get_gpo_info(self.samdb, gpo)[0]
+ except Exception:
+ raise CommandError("GPO '%s' does not exist" % gpo)
+
+ try:
+ secdesc_ndr = msg['nTSecurityDescriptor'][0]
+ secdesc = ndr_unpack(security.descriptor, secdesc_ndr)
+ secdesc_sddl = secdesc.as_sddl()
+ except Exception:
+ secdesc_sddl = "<hidden>"
+
+ self.outf.write("GPO : %s\n" % msg['name'][0])
+ self.outf.write("display name : %s\n" % msg['displayName'][0])
+ self.outf.write("path : %s\n" % msg['gPCFileSysPath'][0])
+ if 'gPCMachineExtensionNames' in msg:
+ self.outf.write("Machine Exts : %s\n" % msg['gPCMachineExtensionNames'][0])
+ if 'gPCUserExtensionNames' in msg:
+ self.outf.write("User Exts : %s\n" % msg['gPCUserExtensionNames'][0])
+ self.outf.write("dn : %s\n" % msg.dn)
+ self.outf.write("version : %s\n" % attr_default(msg, 'versionNumber', '0'))
+ self.outf.write("flags : %s\n" % gpo_flags_string(int(attr_default(msg, 'flags', 0))))
+ self.outf.write("ACL : %s\n" % secdesc_sddl)
+
+ # SMB connect to DC
+ conn = smb_connection(dc_hostname,
+ 'sysvol',
+ lp=self.lp,
+ creds=self.creds)
+
+ realm = self.lp.get('realm')
+ pol_file = '\\'.join([realm.lower(), 'Policies', gpo,
+ '%s\\Registry.pol'])
+ policy_defs = []
+ for policy_class in ['MACHINE', 'USER']:
+ try:
+ pol_data = ndr_unpack(preg.file,
+ conn.loadfile(pol_file % policy_class))
+ except NTSTATUSError as e:
+ if e.args[0] in [NT_STATUS_OBJECT_NAME_INVALID,
+ NT_STATUS_OBJECT_NAME_NOT_FOUND,
+ NT_STATUS_OBJECT_PATH_NOT_FOUND]:
+ continue # The file doesn't exist, so there is nothing to list
+ if e.args[0] == NT_STATUS_ACCESS_DENIED:
+ raise CommandError("The authenticated user does "
+ "not have sufficient privileges")
+ raise
+
+ for entry in pol_data.entries:
+ if entry.valuename == "**delvals.":
+ continue
+ defs = {}
+ defs['keyname'] = entry.keyname
+ defs['valuename'] = entry.valuename
+ defs['class'] = policy_class
+ defs['type'] = str_regtype(entry.type)
+ defs['data'] = entry.data
+ # Bytes aren't JSON serializable
+ if type(defs['data']) == bytes:
+ if entry.type == REG_MULTI_SZ:
+ data = defs['data'].decode('utf-16-le')
+ defs['data'] = data.rstrip('\x00').split('\x00')
+ else:
+ defs['data'] = list(defs['data'])
+ policy_defs.append(defs)
+ self.outf.write("Policies :\n")
+ json.dump(policy_defs, self.outf, indent=4)
+ self.outf.write("\n")
+
+
+class cmd_load(GPOCommand):
+ """Load policies onto a GPO.
+
+ Reads json from standard input until EOF, unless a json formatted
+ file is provided via --content.
+
+ Example json_input:
+ [
+ {
+ "keyname": "Software\\Policies\\Mozilla\\Firefox\\Homepage",
+ "valuename": "StartPage",
+ "class": "USER",
+ "type": "REG_SZ",
+ "data": "homepage"
+ },
+ {
+ "keyname": "Software\\Policies\\Mozilla\\Firefox\\Homepage",
+ "valuename": "URL",
+ "class": "USER",
+ "type": "REG_SZ",
+ "data": "google.com"
+ },
+ {
+ "keyname": "Software\\Microsoft\\Internet Explorer\\Toolbar",
+ "valuename": "IEToolbar",
+ "class": "USER",
+ "type": "REG_BINARY",
+ "data": [0]
+ },
+ {
+ "keyname": "Software\\Policies\\Microsoft\\InputPersonalization",
+ "valuename": "RestrictImplicitTextCollection",
+ "class": "USER",
+ "type": "REG_DWORD",
+ "data": 1
+ }
+ ]
+
+ Valid class attributes: MACHINE|USER|BOTH
+ Data arrays are interpreted as bytes.
+
+ The --machine-ext-name and --user-ext-name options are multi-value inputs
+ which respectively set the gPCMachineExtensionNames and gPCUserExtensionNames
+ ldap attributes on the GPO. These attributes must be set to the correct GUID
+ names for Windows Group Policy to work correctly. These GUIDs represent
+ the client side extensions to apply on the machine. Linux Group Policy does
+ not enforce this constraint.
+ {35378EAC-683F-11D2-A89A-00C04FBBCFA2} is provided by default, which
+ enables most Registry policies.
+ """
+
+ synopsis = "%prog <gpo> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_args = ['gpo']
+
+ takes_options = [
+ Option("-H", help="LDB URL for database or target server", type=str),
+ Option("--content", help="JSON file of policy inputs", type=str),
+ Option("--machine-ext-name",
+ action="append", dest="machine_exts",
+ default=['{35378EAC-683F-11D2-A89A-00C04FBBCFA2}'],
+ help="A machine extension name to add to gPCMachineExtensionNames"),
+ Option("--user-ext-name",
+ action="append", dest="user_exts",
+ default=['{35378EAC-683F-11D2-A89A-00C04FBBCFA2}'],
+ help="A user extension name to add to gPCUserExtensionNames"),
+ Option("--replace", action='store_true', default=False,
+ help="Replace the existing Group Policies, rather than merging")
+ ]
+
+ def run(self, gpo, H=None, content=None,
+ machine_exts=None,
+ user_exts=None,
+ replace=False, sambaopts=None, credopts=None, versionopts=None):
+ if machine_exts is None:
+ machine_exts = ['{35378EAC-683F-11D2-A89A-00C04FBBCFA2}']
+ if user_exts is None:
+ user_exts = ['{35378EAC-683F-11D2-A89A-00C04FBBCFA2}']
+ if content is None:
+ policy_defs = json.loads(sys.stdin.read())
+ elif os.path.exists(content):
+ with open(content, 'rb') as r:
+ policy_defs = json.load(r)
+ else:
+ raise CommandError("The JSON content file does not exist")
+
+ self.lp = sambaopts.get_loadparm()
+ self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
+ self.url = dc_url(self.lp, self.creds, H)
+ self.samdb_connect()
+ reg = RegistryGroupPolicies(gpo, self.lp, self.creds, self.samdb, H)
+ for ext_name in machine_exts:
+ reg.register_extension_name(ext_name, 'gPCMachineExtensionNames')
+ for ext_name in user_exts:
+ reg.register_extension_name(ext_name, 'gPCUserExtensionNames')
+ try:
+ if replace:
+ reg.replace_s(policy_defs)
+ else:
+ reg.merge_s(policy_defs)
+ except NTSTATUSError as e:
+ if e.args[0] == NT_STATUS_ACCESS_DENIED:
+ raise CommandError("The authenticated user does "
+ "not have sufficient privileges")
+ else:
+ raise
+
+
+class cmd_remove(GPOCommand):
+ """Remove policies from a GPO.
+
+ Reads json from standard input until EOF, unless a json formatted
+ file is provided via --content.
+
+ Example json_input:
+ [
+ {
+ "keyname": "Software\\Policies\\Mozilla\\Firefox\\Homepage",
+ "valuename": "StartPage",
+ "class": "USER",
+ },
+ {
+ "keyname": "Software\\Policies\\Mozilla\\Firefox\\Homepage",
+ "valuename": "URL",
+ "class": "USER",
+ },
+ {
+ "keyname": "Software\\Microsoft\\Internet Explorer\\Toolbar",
+ "valuename": "IEToolbar",
+ "class": "USER"
+ },
+ {
+ "keyname": "Software\\Policies\\Microsoft\\InputPersonalization",
+ "valuename": "RestrictImplicitTextCollection",
+ "class": "USER"
+ }
+ ]
+
+ Valid class attributes: MACHINE|USER|BOTH
+ """
+
+ synopsis = "%prog <gpo> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_args = ['gpo']
+
+ takes_options = [
+ Option("-H", help="LDB URL for database or target server", type=str),
+ Option("--content", help="JSON file of policy inputs", type=str),
+ Option("--machine-ext-name",
+ action="append", default=[], dest="machine_exts",
+ help="A machine extension name to remove from gPCMachineExtensionNames"),
+ Option("--user-ext-name",
+ action="append", default=[], dest="user_exts",
+ help="A user extension name to remove from gPCUserExtensionNames")
+ ]
+
+ def run(self, gpo, H=None, content=None, machine_exts=None, user_exts=None,
+ sambaopts=None, credopts=None, versionopts=None):
+ if machine_exts is None:
+ machine_exts = []
+ if user_exts is None:
+ user_exts = []
+ if content is None:
+ policy_defs = json.loads(sys.stdin.read())
+ elif os.path.exists(content):
+ with open(content, 'rb') as r:
+ policy_defs = json.load(r)
+ else:
+ raise CommandError("The JSON content file does not exist")
+
+ self.lp = sambaopts.get_loadparm()
+ self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
+ self.url = dc_url(self.lp, self.creds, H)
+ self.samdb_connect()
+ reg = RegistryGroupPolicies(gpo, self.lp, self.creds, self.samdb, H)
+ for ext_name in machine_exts:
+ reg.unregister_extension_name(ext_name, 'gPCMachineExtensionNames')
+ for ext_name in user_exts:
+ reg.unregister_extension_name(ext_name, 'gPCUserExtensionNames')
+ try:
+ reg.remove_s(policy_defs)
+ except NTSTATUSError as e:
+ if e.args[0] == NT_STATUS_ACCESS_DENIED:
+ raise CommandError("The authenticated user does "
+ "not have sufficient privileges")
+ else:
+ raise
+
+
+class cmd_getlink(GPOCommand):
+ """List GPO Links for a container."""
+
+ synopsis = "%prog <container_dn> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_args = ['container_dn']
+
+ takes_options = [
+ Option("-H", help="LDB URL for database or target server", type=str)
+ ]
+
+ def run(self, container_dn, H=None, sambaopts=None, credopts=None,
+ versionopts=None):
+
+ self.lp = sambaopts.get_loadparm()
+ self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
+
+ self.url = dc_url(self.lp, self.creds, H)
+
+ self.samdb_connect()
+
+ try:
+ msg = self.samdb.search(base=container_dn, scope=ldb.SCOPE_BASE,
+ expression="(objectClass=*)",
+ attrs=['gPLink'])[0]
+ except Exception:
+ raise CommandError("Container '%s' does not exist" % container_dn)
+
+ if 'gPLink' in msg and msg['gPLink']:
+ self.outf.write("GPO(s) linked to DN %s\n" % container_dn)
+ gplist = parse_gplink(str(msg['gPLink'][0]))
+ for g in gplist:
+ msg = get_gpo_info(self.samdb, dn=g['dn'])
+ self.outf.write(" GPO : %s\n" % msg[0]['name'][0])
+ self.outf.write(" Name : %s\n" % msg[0]['displayName'][0])
+ self.outf.write(" Options : %s\n" % gplink_options_string(g['options']))
+ self.outf.write("\n")
+ else:
+ self.outf.write("No GPO(s) linked to DN=%s\n" % container_dn)
+
+
+class cmd_setlink(GPOCommand):
+ """Add or update a GPO link to a container."""
+
+ synopsis = "%prog <container_dn> <gpo> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_args = ['container_dn', 'gpo']
+
+ takes_options = [
+ Option("-H", help="LDB URL for database or target server", type=str),
+ Option("--disable", dest="disabled", default=False, action='store_true',
+ help="Disable policy"),
+ Option("--enforce", dest="enforced", default=False, action='store_true',
+ help="Enforce policy")
+ ]
+
+ def run(self, container_dn, gpo, H=None, disabled=False, enforced=False,
+ sambaopts=None, credopts=None, versionopts=None):
+
+ self.lp = sambaopts.get_loadparm()
+ self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
+
+ self.url = dc_url(self.lp, self.creds, H)
+
+ self.samdb_connect()
+
+ gplink_options = 0
+ if disabled:
+ gplink_options |= dsdb.GPLINK_OPT_DISABLE
+ if enforced:
+ gplink_options |= dsdb.GPLINK_OPT_ENFORCE
+
+ # Check if valid GPO DN
+ try:
+ get_gpo_info(self.samdb, gpo=gpo)[0]
+ except Exception:
+ raise CommandError("GPO '%s' does not exist" % gpo)
+ gpo_dn = str(get_gpo_dn(self.samdb, gpo))
+
+ # Check if valid Container DN
+ try:
+ msg = self.samdb.search(base=container_dn, scope=ldb.SCOPE_BASE,
+ expression="(objectClass=*)",
+ attrs=['gPLink'])[0]
+ except Exception:
+ raise CommandError("Container '%s' does not exist" % container_dn)
+
+ # Update existing GPlinks or Add new one
+ existing_gplink = False
+ if 'gPLink' in msg:
+ gplist = parse_gplink(str(msg['gPLink'][0]))
+ existing_gplink = True
+ found = False
+ for g in gplist:
+ if g['dn'].lower() == gpo_dn.lower():
+ g['options'] = gplink_options
+ found = True
+ break
+ if found:
+ raise CommandError("GPO '%s' already linked to this container" % gpo)
+ else:
+ gplist.insert(0, {'dn': gpo_dn, 'options': gplink_options})
+ else:
+ gplist = []
+ gplist.append({'dn': gpo_dn, 'options': gplink_options})
+
+ gplink_str = encode_gplink(gplist)
+
+ m = ldb.Message()
+ m.dn = ldb.Dn(self.samdb, container_dn)
+
+ if existing_gplink:
+ m['new_value'] = ldb.MessageElement(gplink_str, ldb.FLAG_MOD_REPLACE, 'gPLink')
+ else:
+ m['new_value'] = ldb.MessageElement(gplink_str, ldb.FLAG_MOD_ADD, 'gPLink')
+
+ try:
+ self.samdb.modify(m)
+ except Exception as e:
+ raise CommandError("Error adding GPO Link", e)
+
+ self.outf.write("Added/Updated GPO link\n")
+ cmd_getlink().run(container_dn, H, sambaopts, credopts, versionopts)
+
+
+class cmd_dellink(GPOCommand):
+ """Delete GPO link from a container."""
+
+ synopsis = "%prog <container_dn> <gpo> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_args = ['container', 'gpo']
+
+ takes_options = [
+ Option("-H", help="LDB URL for database or target server", type=str),
+ ]
+
+ def run(self, container, gpo, H=None, sambaopts=None, credopts=None,
+ versionopts=None):
+
+ self.lp = sambaopts.get_loadparm()
+ self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
+
+ self.url = dc_url(self.lp, self.creds, H)
+
+ self.samdb_connect()
+
+ # Check if valid GPO
+ try:
+ get_gpo_info(self.samdb, gpo=gpo)[0]
+ except Exception:
+ raise CommandError("GPO '%s' does not exist" % gpo)
+
+ container_dn = ldb.Dn(self.samdb, container)
+ del_gpo_link(self.samdb, container_dn, gpo)
+ self.outf.write("Deleted GPO link.\n")
+ cmd_getlink().run(container_dn, H, sambaopts, credopts, versionopts)
+
+
+class cmd_listcontainers(GPOCommand):
+ """List all linked containers for a GPO."""
+
+ synopsis = "%prog <gpo> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_args = ['gpo']
+
+ takes_options = [
+ Option("-H", help="LDB URL for database or target server", type=str)
+ ]
+
+ def run(self, gpo, H=None, sambaopts=None, credopts=None,
+ versionopts=None):
+
+ self.lp = sambaopts.get_loadparm()
+ self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
+
+ self.url = dc_url(self.lp, self.creds, H)
+
+ self.samdb_connect()
+
+ msg = get_gpo_containers(self.samdb, gpo)
+ if len(msg):
+ self.outf.write("Container(s) using GPO %s\n" % gpo)
+ for m in msg:
+ self.outf.write(" DN: %s\n" % m['dn'])
+ else:
+ self.outf.write("No Containers using GPO %s\n" % gpo)
+
+
+class cmd_getinheritance(GPOCommand):
+ """Get inheritance flag for a container."""
+
+ synopsis = "%prog <container_dn> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_args = ['container_dn']
+
+ takes_options = [
+ Option("-H", help="LDB URL for database or target server", type=str)
+ ]
+
+ def run(self, container_dn, H=None, sambaopts=None, credopts=None,
+ versionopts=None):
+
+ self.lp = sambaopts.get_loadparm()
+ self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
+
+ self.url = dc_url(self.lp, self.creds, H)
+
+ self.samdb_connect()
+
+ try:
+ msg = self.samdb.search(base=container_dn, scope=ldb.SCOPE_BASE,
+ expression="(objectClass=*)",
+ attrs=['gPOptions'])[0]
+ except Exception:
+ raise CommandError("Container '%s' does not exist" % container_dn)
+
+ inheritance = 0
+ if 'gPOptions' in msg:
+ inheritance = int(msg['gPOptions'][0])
+
+ if inheritance == dsdb.GPO_BLOCK_INHERITANCE:
+ self.outf.write("Container has GPO_BLOCK_INHERITANCE\n")
+ else:
+ self.outf.write("Container has GPO_INHERIT\n")
+
+
+class cmd_setinheritance(GPOCommand):
+ """Set inheritance flag on a container."""
+
+ synopsis = "%prog <container_dn> <block|inherit> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_args = ['container_dn', 'inherit_state']
+
+ takes_options = [
+ Option("-H", help="LDB URL for database or target server", type=str)
+ ]
+
+ def run(self, container_dn, inherit_state, H=None, sambaopts=None, credopts=None,
+ versionopts=None):
+
+ if inherit_state.lower() == 'block':
+ inheritance = dsdb.GPO_BLOCK_INHERITANCE
+ elif inherit_state.lower() == 'inherit':
+ inheritance = dsdb.GPO_INHERIT
+ else:
+ raise CommandError("Unknown inheritance state (%s)" % inherit_state)
+
+ self.lp = sambaopts.get_loadparm()
+ self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
+
+ self.url = dc_url(self.lp, self.creds, H)
+
+ self.samdb_connect()
+ try:
+ msg = self.samdb.search(base=container_dn, scope=ldb.SCOPE_BASE,
+ expression="(objectClass=*)",
+ attrs=['gPOptions'])[0]
+ except Exception:
+ raise CommandError("Container '%s' does not exist" % container_dn)
+
+ m = ldb.Message()
+ m.dn = ldb.Dn(self.samdb, container_dn)
+
+ if 'gPOptions' in msg:
+ m['new_value'] = ldb.MessageElement(str(inheritance), ldb.FLAG_MOD_REPLACE, 'gPOptions')
+ else:
+ m['new_value'] = ldb.MessageElement(str(inheritance), ldb.FLAG_MOD_ADD, 'gPOptions')
+
+ try:
+ self.samdb.modify(m)
+ except Exception as e:
+ raise CommandError("Error setting inheritance state %s" % inherit_state, e)
+
+
+class cmd_fetch(GPOCommand):
+ """Download a GPO."""
+
+ synopsis = "%prog <gpo> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_args = ['gpo']
+
+ takes_options = [
+ Option("-H", help="LDB URL for database or target server", type=str),
+ Option("--tmpdir", help="Temporary directory for copying policy files", type=str)
+ ]
+
+ def run(self, gpo, H=None, tmpdir=None, sambaopts=None, credopts=None, versionopts=None):
+
+ self.lp = sambaopts.get_loadparm()
+ self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
+
+ # We need to know writable DC to setup SMB connection
+ if H and H.startswith('ldap://'):
+ dc_hostname = H[7:]
+ self.url = H
+ else:
+ dc_hostname = netcmd_finddc(self.lp, self.creds)
+ self.url = dc_url(self.lp, self.creds, dc=dc_hostname)
+
+ self.samdb_connect()
+ try:
+ msg = get_gpo_info(self.samdb, gpo)[0]
+ except Exception:
+ raise CommandError("GPO '%s' does not exist" % gpo)
+
+ # verify UNC path
+ unc = str(msg['gPCFileSysPath'][0])
+ try:
+ [dom_name, service, sharepath] = parse_unc(unc)
+ except ValueError:
+ raise CommandError("Invalid GPO path (%s)" % unc)
+
+ # SMB connect to DC
+ conn = smb_connection(dc_hostname, service, lp=self.lp,
+ creds=self.creds)
+
+ # Copy GPT
+ tmpdir, gpodir = self.construct_tmpdir(tmpdir, gpo)
+
+ try:
+ copy_directory_remote_to_local(conn, sharepath, gpodir)
+ except Exception as e:
+ # FIXME: Catch more specific exception
+ raise CommandError("Error copying GPO from DC", e)
+ self.outf.write('GPO copied to %s\n' % gpodir)
+
+
+class cmd_backup(GPOCommand):
+ """Backup a GPO."""
+
+ synopsis = "%prog <gpo> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_args = ['gpo']
+
+ takes_options = [
+ Option("-H", help="LDB URL for database or target server", type=str),
+ Option("--tmpdir", help="Temporary directory for copying policy files", type=str),
+ Option("--generalize", help="Generalize XML entities to restore",
+ default=False, action='store_true'),
+ Option("--entities", help="File to export defining XML entities for the restore",
+ dest='ent_file', type=str)
+ ]
+
+ def run(self, gpo, H=None, tmpdir=None, generalize=False, sambaopts=None,
+ credopts=None, versionopts=None, ent_file=None):
+
+ self.lp = sambaopts.get_loadparm()
+ self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
+
+ # We need to know writable DC to setup SMB connection
+ if H and H.startswith('ldap://'):
+ dc_hostname = H[7:]
+ self.url = H
+ else:
+ dc_hostname = netcmd_finddc(self.lp, self.creds)
+ self.url = dc_url(self.lp, self.creds, dc=dc_hostname)
+
+ self.samdb_connect()
+ try:
+ msg = get_gpo_info(self.samdb, gpo)[0]
+ except Exception:
+ raise CommandError("GPO '%s' does not exist" % gpo)
+
+ # verify UNC path
+ unc = str(msg['gPCFileSysPath'][0])
+ try:
+ [dom_name, service, sharepath] = parse_unc(unc)
+ except ValueError:
+ raise CommandError("Invalid GPO path (%s)" % unc)
+
+ # SMB connect to DC
+ conn = smb_connection(dc_hostname, service, lp=self.lp,
+ creds=self.creds)
+
+ # Copy GPT
+ tmpdir, gpodir = self.construct_tmpdir(tmpdir, gpo)
+
+ try:
+ backup_directory_remote_to_local(conn, sharepath, gpodir)
+ except Exception as e:
+ # FIXME: Catch more specific exception
+ raise CommandError("Error copying GPO from DC", e)
+
+ self.outf.write('GPO copied to %s\n' % gpodir)
+
+ if generalize:
+ self.outf.write('\nAttempting to generalize XML entities:\n')
+ entities = cmd_backup.generalize_xml_entities(self.outf, gpodir,
+ gpodir)
+ import operator
+ ents = "".join('<!ENTITY {} "{}\n">'.format(ent[1].strip('&;'), ent[0]) \
+ for ent in sorted(entities.items(), key=operator.itemgetter(1)))
+
+ if ent_file:
+ with open(ent_file, 'w') as f:
+ f.write(ents)
+ self.outf.write('Entities successfully written to %s\n' %
+ ent_file)
+ else:
+ self.outf.write('\nEntities:\n')
+ self.outf.write(ents)
+
+ # Backup the enabled GPO extension names
+ for ext in ('gPCMachineExtensionNames', 'gPCUserExtensionNames'):
+ if ext in msg:
+ with open(os.path.join(gpodir, ext + '.SAMBAEXT'), 'wb') as f:
+ f.write(msg[ext][0])
+
+ @staticmethod
+ def generalize_xml_entities(outf, sourcedir, targetdir):
+ entities = {}
+
+ if not os.path.exists(targetdir):
+ os.mkdir(targetdir)
+
+ l_dirs = [ sourcedir ]
+ r_dirs = [ targetdir ]
+ while l_dirs:
+ l_dir = l_dirs.pop()
+ r_dir = r_dirs.pop()
+
+ dirlist = os.listdir(l_dir)
+ dirlist.sort()
+ for e in dirlist:
+ l_name = os.path.join(l_dir, e)
+ r_name = os.path.join(r_dir, e)
+
+ if os.path.isdir(l_name):
+ l_dirs.append(l_name)
+ r_dirs.append(r_name)
+ if not os.path.exists(r_name):
+ os.mkdir(r_name)
+ else:
+ if l_name.endswith('.xml'):
+ # Restore the xml file if possible
+
+ # Get the filename to find the parser
+ to_parse = os.path.basename(l_name)[:-4]
+
+ parser = find_parser(to_parse)
+ try:
+ with open(l_name, 'r') as ltemp:
+ data = ltemp.read()
+
+ concrete_xml = ET.fromstring(data)
+ found_entities = parser.generalize_xml(concrete_xml, r_name, entities)
+ except GPGeneralizeException:
+ outf.write('SKIPPING: Generalizing failed for %s\n' % to_parse)
+
+ else:
+ # No need to generalize non-xml files.
+ #
+ # TODO This could be improved with xml files stored in
+ # the renamed backup file (with custom extension) by
+ # inlining them into the exported backups.
+ if not os.path.samefile(l_name, r_name):
+ shutil.copy2(l_name, r_name)
+
+ return entities
+
+
+class cmd_create(GPOCommand):
+ """Create an empty GPO."""
+
+ synopsis = "%prog <displayname> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_args = ['displayname']
+
+ takes_options = [
+ Option("-H", help="LDB URL for database or target server", type=str),
+ Option("--tmpdir", help="Temporary directory for copying policy files", type=str)
+ ]
+
+ def run(self, displayname, H=None, tmpdir=None, sambaopts=None, credopts=None,
+ versionopts=None):
+
+ self.lp = sambaopts.get_loadparm()
+ self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
+
+ net = Net(creds=self.creds, lp=self.lp)
+
+ # We need to know writable DC to setup SMB connection
+ if H and H.startswith('ldap://'):
+ dc_hostname = H[7:]
+ self.url = H
+ flags = (nbt.NBT_SERVER_LDAP |
+ nbt.NBT_SERVER_DS |
+ nbt.NBT_SERVER_WRITABLE)
+ cldap_ret = net.finddc(address=dc_hostname, flags=flags)
+ else:
+ flags = (nbt.NBT_SERVER_LDAP |
+ nbt.NBT_SERVER_DS |
+ nbt.NBT_SERVER_WRITABLE)
+ cldap_ret = net.finddc(domain=self.lp.get('realm'), flags=flags)
+ dc_hostname = cldap_ret.pdc_dns_name
+ self.url = dc_url(self.lp, self.creds, dc=dc_hostname)
+
+ self.samdb_connect()
+
+ msg = get_gpo_info(self.samdb, displayname=displayname)
+ if msg.count > 0:
+ raise CommandError("A GPO already existing with name '%s'" % displayname)
+
+ # Create new GUID
+ guid = str(uuid.uuid4())
+ gpo = "{%s}" % guid.upper()
+
+ self.gpo_name = gpo
+
+ realm = cldap_ret.dns_domain
+ unc_path = "\\\\%s\\sysvol\\%s\\Policies\\%s" % (realm, realm, gpo)
+
+ # Create GPT
+ self.tmpdir, gpodir = self.construct_tmpdir(tmpdir, gpo)
+ self.gpodir = gpodir
+
+ try:
+ os.mkdir(os.path.join(gpodir, "Machine"))
+ os.mkdir(os.path.join(gpodir, "User"))
+ gpt_contents = "[General]\r\nVersion=0\r\n"
+ open(os.path.join(gpodir, "GPT.INI"), "w").write(gpt_contents)
+ except Exception as e:
+ raise CommandError("Error Creating GPO files", e)
+
+ # Connect to DC over SMB
+ [dom_name, service, sharepath] = parse_unc(unc_path)
+ self.sharepath = sharepath
+ conn = smb_connection(dc_hostname, service, lp=self.lp,
+ creds=self.creds)
+
+ self.conn = conn
+
+ self.samdb.transaction_start()
+ try:
+ # Add cn=<guid>
+ gpo_dn = get_gpo_dn(self.samdb, gpo)
+
+ m = ldb.Message()
+ m.dn = gpo_dn
+ m['a01'] = ldb.MessageElement("groupPolicyContainer", ldb.FLAG_MOD_ADD, "objectClass")
+ self.samdb.add(m)
+
+ # Add cn=User,cn=<guid>
+ m = ldb.Message()
+ m.dn = ldb.Dn(self.samdb, "CN=User,%s" % str(gpo_dn))
+ m['a01'] = ldb.MessageElement("container", ldb.FLAG_MOD_ADD, "objectClass")
+ self.samdb.add(m)
+
+ # Add cn=Machine,cn=<guid>
+ m = ldb.Message()
+ m.dn = ldb.Dn(self.samdb, "CN=Machine,%s" % str(gpo_dn))
+ m['a01'] = ldb.MessageElement("container", ldb.FLAG_MOD_ADD, "objectClass")
+ self.samdb.add(m)
+
+ # Get new security descriptor
+ ds_sd_flags = (security.SECINFO_OWNER |
+ security.SECINFO_GROUP |
+ security.SECINFO_DACL)
+ msg = get_gpo_info(self.samdb, gpo=gpo, sd_flags=ds_sd_flags)[0]
+ ds_sd_ndr = msg['nTSecurityDescriptor'][0]
+ ds_sd = ndr_unpack(security.descriptor, ds_sd_ndr).as_sddl()
+
+ # Create a file system security descriptor
+ domain_sid = security.dom_sid(self.samdb.get_domain_sid())
+ sddl = dsacl2fsacl(ds_sd, domain_sid)
+ fs_sd = security.descriptor.from_sddl(sddl, domain_sid)
+
+ # Copy GPO directory
+ create_directory_hier(conn, sharepath)
+
+ # Set ACL
+ sio = (security.SECINFO_OWNER |
+ security.SECINFO_GROUP |
+ security.SECINFO_DACL |
+ security.SECINFO_PROTECTED_DACL)
+ conn.set_acl(sharepath, fs_sd, sio)
+
+ # Copy GPO files over SMB
+ copy_directory_local_to_remote(conn, gpodir, sharepath)
+
+ m = ldb.Message()
+ m.dn = gpo_dn
+ m['a02'] = ldb.MessageElement(displayname, ldb.FLAG_MOD_REPLACE, "displayName")
+ m['a03'] = ldb.MessageElement(unc_path, ldb.FLAG_MOD_REPLACE, "gPCFileSysPath")
+ m['a05'] = ldb.MessageElement("0", ldb.FLAG_MOD_REPLACE, "versionNumber")
+ m['a07'] = ldb.MessageElement("2", ldb.FLAG_MOD_REPLACE, "gpcFunctionalityVersion")
+ m['a04'] = ldb.MessageElement("0", ldb.FLAG_MOD_REPLACE, "flags")
+ controls = ["permissive_modify:0"]
+ self.samdb.modify(m, controls=controls)
+ except Exception:
+ self.samdb.transaction_cancel()
+ raise
+ else:
+ self.samdb.transaction_commit()
+
+ if tmpdir is None:
+ # Without --tmpdir, we created one in /tmp/. It must go.
+ shutil.rmtree(self.tmpdir)
+
+ self.outf.write("GPO '%s' created as %s\n" % (displayname, gpo))
+
+
+class cmd_restore(cmd_create):
+ """Restore a GPO to a new container."""
+
+ synopsis = "%prog <displayname> <backup location> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_args = ['displayname', 'backup']
+
+ takes_options = [
+ Option("-H", help="LDB URL for database or target server", type=str),
+ Option("--tmpdir", help="Temporary directory for copying policy files", type=str),
+ Option("--entities", help="File defining XML entities to insert into DOCTYPE header", type=str),
+ Option("--restore-metadata", help="Keep the old GPT.INI file and associated version number",
+ default=False, action="store_true")
+ ]
+
+ def restore_from_backup_to_local_dir(self, sourcedir, targetdir, dtd_header=''):
+ SUFFIX = '.SAMBABACKUP'
+
+ if not os.path.exists(targetdir):
+ os.mkdir(targetdir)
+
+ l_dirs = [ sourcedir ]
+ r_dirs = [ targetdir ]
+ while l_dirs:
+ l_dir = l_dirs.pop()
+ r_dir = r_dirs.pop()
+
+ dirlist = os.listdir(l_dir)
+ dirlist.sort()
+ for e in dirlist:
+ l_name = os.path.join(l_dir, e)
+ r_name = os.path.join(r_dir, e)
+
+ if os.path.isdir(l_name):
+ l_dirs.append(l_name)
+ r_dirs.append(r_name)
+ if not os.path.exists(r_name):
+ os.mkdir(r_name)
+ else:
+ if l_name.endswith('.xml'):
+ # Restore the xml file if possible
+
+ # Get the filename to find the parser
+ to_parse = os.path.basename(l_name)[:-4]
+
+ parser = find_parser(to_parse)
+ try:
+ with open(l_name, 'r') as ltemp:
+ data = ltemp.read()
+ xml_head = '<?xml version="1.0" encoding="utf-8"?>'
+
+ if data.startswith(xml_head):
+ # It appears that sometimes the DTD rejects
+ # the xml header being after it.
+ data = data[len(xml_head):]
+
+ # Load the XML file with the DTD (entity) header
+ parser.load_xml(ET.fromstring(xml_head + dtd_header + data))
+ else:
+ parser.load_xml(ET.fromstring(dtd_header + data))
+
+ # Write out the substituted files in the output
+ # location, ready to copy over.
+ parser.write_binary(r_name[:-4])
+
+ except GPNoParserException:
+ # In the failure case, we fallback
+ original_file = l_name[:-4] + SUFFIX
+ shutil.copy2(original_file, r_name[:-4])
+
+ self.outf.write('WARNING: No such parser for %s\n' % to_parse)
+ self.outf.write('WARNING: Falling back to simple copy-restore.\n')
+ except:
+ import traceback
+ traceback.print_exc()
+
+ # In the failure case, we fallback
+ original_file = l_name[:-4] + SUFFIX
+ shutil.copy2(original_file, r_name[:-4])
+
+ self.outf.write('WARNING: Error during parsing for %s\n' % l_name)
+ self.outf.write('WARNING: Falling back to simple copy-restore.\n')
+
+ def run(self, displayname, backup, H=None, tmpdir=None, entities=None, sambaopts=None, credopts=None,
+ versionopts=None, restore_metadata=None):
+
+ dtd_header = ''
+
+ if not os.path.exists(backup):
+ raise CommandError("Backup directory does not exist %s" % backup)
+
+ if entities is not None:
+ # DOCTYPE name is meant to match root element, but ElementTree does
+ # not seem to care, so this seems to be enough.
+
+ dtd_header = '<!DOCTYPE foobar [\n'
+
+ if not os.path.exists(entities):
+ raise CommandError("Entities file does not exist %s" %
+ entities)
+ with open(entities, 'r') as entities_file:
+ entities_content = entities_file.read()
+
+ # Do a basic regex test of the entities file format
+ if re.match(r'(\s*<!ENTITY\s*[a-zA-Z0-9_]+\s*.*?>)+\s*\Z',
+ entities_content, flags=re.MULTILINE) is None:
+ raise CommandError("Entities file does not appear to "
+ "conform to format\n"
+ 'e.g. <!ENTITY entity "value">')
+ dtd_header += entities_content.strip()
+
+ dtd_header += '\n]>\n'
+
+ super().run(displayname, H, tmpdir, sambaopts, credopts, versionopts)
+
+ try:
+ if tmpdir is None:
+ # Create GPT
+ self.tmpdir, gpodir = self.construct_tmpdir(tmpdir, self.gpo_name)
+ self.gpodir = gpodir
+
+ # Iterate over backup files and restore with DTD
+ self.restore_from_backup_to_local_dir(backup, self.gpodir,
+ dtd_header)
+
+ keep_new_files = not restore_metadata
+
+ # Copy GPO files over SMB
+ copy_directory_local_to_remote(self.conn, self.gpodir,
+ self.sharepath,
+ ignore_existing_dir=True,
+ keep_existing_files=keep_new_files)
+
+ gpo_dn = get_gpo_dn(self.samdb, self.gpo_name)
+
+ # Restore the enabled extensions
+ for ext in ('gPCMachineExtensionNames', 'gPCUserExtensionNames'):
+ ext_file = os.path.join(backup, ext + '.SAMBAEXT')
+ if os.path.exists(ext_file):
+ with open(ext_file, 'rb') as f:
+ data = f.read()
+
+ m = ldb.Message()
+ m.dn = gpo_dn
+ m[ext] = ldb.MessageElement(data, ldb.FLAG_MOD_REPLACE,
+ ext)
+
+ self.samdb.modify(m)
+
+ if tmpdir is None:
+ # Without --tmpdir, we created one in /tmp/. It must go.
+ shutil.rmtree(self.tmpdir)
+
+ except Exception as e:
+ import traceback
+ traceback.print_exc()
+ self.outf.write(str(e) + '\n')
+
+ self.outf.write("Failed to restore GPO -- deleting...\n")
+ cmd = cmd_del()
+ cmd.run(self.gpo_name, H, sambaopts, credopts, versionopts)
+
+ raise CommandError("Failed to restore: %s" % e)
+
+
+class cmd_del(GPOCommand):
+ """Delete a GPO."""
+
+ synopsis = "%prog <gpo> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_args = ['gpo']
+
+ takes_options = [
+ Option("-H", help="LDB URL for database or target server", type=str),
+ ]
+
+ def run(self, gpo, H=None, sambaopts=None, credopts=None,
+ versionopts=None):
+
+ self.lp = sambaopts.get_loadparm()
+ self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
+
+ # We need to know writable DC to setup SMB connection
+ if H and H.startswith('ldap://'):
+ dc_hostname = H[7:]
+ self.url = H
+ else:
+ dc_hostname = netcmd_finddc(self.lp, self.creds)
+ self.url = dc_url(self.lp, self.creds, dc=dc_hostname)
+
+ self.samdb_connect()
+
+ # Check if valid GPO
+ try:
+ msg = get_gpo_info(self.samdb, gpo=gpo)[0]
+ unc_path = str(msg['gPCFileSysPath'][0])
+ except Exception:
+ raise CommandError("GPO '%s' does not exist" % gpo)
+
+ # Connect to DC over SMB
+ [dom_name, service, sharepath] = parse_unc(unc_path)
+ conn = smb_connection(dc_hostname, service, lp=self.lp,
+ creds=self.creds)
+
+ self.samdb.transaction_start()
+ try:
+ # Check for existing links
+ msg = get_gpo_containers(self.samdb, gpo)
+
+ if len(msg):
+ self.outf.write("GPO %s is linked to containers\n" % gpo)
+ for m in msg:
+ del_gpo_link(self.samdb, m['dn'], gpo)
+ self.outf.write(" Removed link from %s.\n" % m['dn'])
+
+ # Remove LDAP entries
+ gpo_dn = get_gpo_dn(self.samdb, gpo)
+ self.samdb.delete(ldb.Dn(self.samdb, "CN=User,%s" % str(gpo_dn)))
+ self.samdb.delete(ldb.Dn(self.samdb, "CN=Machine,%s" % str(gpo_dn)))
+ self.samdb.delete(gpo_dn)
+
+ # Remove GPO files
+ conn.deltree(sharepath)
+
+ except Exception:
+ self.samdb.transaction_cancel()
+ raise
+ else:
+ self.samdb.transaction_commit()
+
+ self.outf.write("GPO %s deleted.\n" % gpo)
+
+
+class cmd_aclcheck(GPOCommand):
+ """Check all GPOs have matching LDAP and DS ACLs."""
+
+ synopsis = "%prog [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H")
+ ]
+
+ def run(self, H=None, sambaopts=None, credopts=None, versionopts=None):
+
+ self.lp = sambaopts.get_loadparm()
+ self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
+
+ self.url = dc_url(self.lp, self.creds, H)
+
+ # We need to know writable DC to setup SMB connection
+ if H and H.startswith('ldap://'):
+ dc_hostname = H[7:]
+ self.url = H
+ else:
+ dc_hostname = netcmd_finddc(self.lp, self.creds)
+ self.url = dc_url(self.lp, self.creds, dc=dc_hostname)
+
+ self.samdb_connect()
+
+ msg = get_gpo_info(self.samdb, None)
+
+ for m in msg:
+ # verify UNC path
+ unc = str(m['gPCFileSysPath'][0])
+ try:
+ [dom_name, service, sharepath] = parse_unc(unc)
+ except ValueError:
+ raise CommandError("Invalid GPO path (%s)" % unc)
+
+ # SMB connect to DC
+ conn = smb_connection(dc_hostname, service, lp=self.lp,
+ creds=self.creds)
+
+ fs_sd = conn.get_acl(sharepath, security.SECINFO_OWNER | security.SECINFO_GROUP | security.SECINFO_DACL, security.SEC_FLAG_MAXIMUM_ALLOWED)
+
+ if 'nTSecurityDescriptor' not in m:
+ raise CommandError("Could not read nTSecurityDescriptor. "
+ "This requires an Administrator account")
+
+ ds_sd_ndr = m['nTSecurityDescriptor'][0]
+ ds_sd = ndr_unpack(security.descriptor, ds_sd_ndr).as_sddl()
+
+ # Create a file system security descriptor
+ domain_sid = security.dom_sid(self.samdb.get_domain_sid())
+ expected_fs_sddl = dsacl2fsacl(ds_sd, domain_sid)
+
+ if (fs_sd.as_sddl(domain_sid) != expected_fs_sddl):
+ raise CommandError("Invalid GPO ACL %s on path (%s), should be %s" % (fs_sd.as_sddl(domain_sid), sharepath, expected_fs_sddl))
+
+class cmd_admxload(Command):
+ """Loads samba admx files to sysvol"""
+
+ synopsis = "%prog [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H"),
+ Option("--admx-dir", help="Directory where admx templates are stored",
+ type=str, default=os.path.join(param.data_dir(), 'samba/admx'))
+ ]
+
+ def run(self, H=None, sambaopts=None, credopts=None, versionopts=None,
+ admx_dir=None):
+ self.lp = sambaopts.get_loadparm()
+ self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
+
+ # We need to know writable DC to setup SMB connection
+ if H and H.startswith('ldap://'):
+ dc_hostname = H[7:]
+ self.url = H
+ else:
+ dc_hostname = netcmd_finddc(self.lp, self.creds)
+ self.url = dc_url(self.lp, self.creds, dc=dc_hostname)
+
+ # SMB connect to DC
+ conn = smb_connection(dc_hostname,
+ 'sysvol',
+ lp=self.lp,
+ creds=self.creds)
+
+ smb_dir = '\\'.join([self.lp.get('realm').lower(),
+ 'Policies', 'PolicyDefinitions'])
+ try:
+ conn.mkdir(smb_dir)
+ except NTSTATUSError as e:
+ if e.args[0] == NT_STATUS_ACCESS_DENIED:
+ raise CommandError("The authenticated user does "
+ "not have sufficient privileges")
+ elif e.args[0] != NT_STATUS_OBJECT_NAME_COLLISION:
+ raise
+
+ for dirname, dirs, files in os.walk(admx_dir):
+ for fname in files:
+ path_in_admx = dirname.replace(admx_dir, '')
+ full_path = os.path.join(dirname, fname)
+ sub_dir = '\\'.join([smb_dir, path_in_admx]).replace('/', '\\')
+ smb_path = '\\'.join([sub_dir, fname])
+ try:
+ create_directory_hier(conn, sub_dir)
+ except NTSTATUSError as e:
+ if e.args[0] == NT_STATUS_ACCESS_DENIED:
+ raise CommandError("The authenticated user does "
+ "not have sufficient privileges")
+ elif e.args[0] != NT_STATUS_OBJECT_NAME_COLLISION:
+ raise
+ with open(full_path, 'rb') as f:
+ try:
+ conn.savefile(smb_path, f.read())
+ except NTSTATUSError as e:
+ if e.args[0] == NT_STATUS_ACCESS_DENIED:
+ raise CommandError("The authenticated user does "
+ "not have sufficient privileges")
+ self.outf.write('Installing ADMX templates to the Central Store '
+ 'prevents Windows from displaying its own templates '
+ 'in the Group Policy Management Console. You will '
+ 'need to install these templates '
+ 'from https://www.microsoft.com/en-us/download/102157 '
+ 'to continue using Windows Administrative Templates.\n')
+
+class cmd_add_sudoers(GPOCommand):
+ """Adds a Samba Sudoers Group Policy to the sysvol
+
+This command adds a sudo rule to the sysvol for applying to winbind clients.
+
+The command argument indicates the final field in the sudo rule.
+The user argument indicates the user specified in the parentheses.
+The users and groups arguments are comma separated lists, which are combined to
+form the first field in the sudo rule.
+The --passwd argument specifies whether the sudo entry will require a password
+be specified. The default is False, meaning the NOPASSWD field will be
+specified in the sudo entry.
+
+Example:
+samba-tool gpo manage sudoers add {31B2F340-016D-11D2-945F-00C04FB984F9} ALL ALL fakeu fakeg
+
+The example command will generate the following sudoers entry:
+fakeu,fakeg% ALL=(ALL) NOPASSWD: ALL
+ """
+
+ synopsis = "%prog <gpo> <command> <user> <users> [groups] [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H"),
+ Option("--passwd", action='store_true', default=False,
+ help="Specify to indicate that sudo entry must provide a password")
+ ]
+
+ takes_args = ["gpo", "command", "user", "users", "groups?"]
+
+ def run(self, gpo, command, user, users, groups=None, passwd=None,
+ H=None, sambaopts=None, credopts=None, versionopts=None):
+ self.lp = sambaopts.get_loadparm()
+ self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
+
+ # We need to know writable DC to setup SMB connection
+ if H and H.startswith('ldap://'):
+ dc_hostname = H[7:]
+ self.url = H
+ else:
+ dc_hostname = netcmd_finddc(self.lp, self.creds)
+ self.url = dc_url(self.lp, self.creds, dc=dc_hostname)
+
+ # SMB connect to DC
+ conn = smb_connection(dc_hostname,
+ 'sysvol',
+ lp=self.lp,
+ creds=self.creds)
+
+ self.samdb_connect()
+ reg = RegistryGroupPolicies(gpo, self.lp, self.creds, self.samdb, H)
+
+ realm = self.lp.get('realm')
+ vgp_dir = '\\'.join([realm.lower(), 'Policies', gpo,
+ 'MACHINE\\VGP\\VTLA\\Sudo',
+ 'SudoersConfiguration'])
+ vgp_xml = '\\'.join([vgp_dir, 'manifest.xml'])
+ try:
+ xml_data = ET.ElementTree(ET.fromstring(conn.loadfile(vgp_xml)))
+ policysetting = xml_data.getroot().find('policysetting')
+ data = policysetting.find('data')
+ except NTSTATUSError as e:
+ if e.args[0] in [NT_STATUS_OBJECT_NAME_INVALID,
+ NT_STATUS_OBJECT_NAME_NOT_FOUND,
+ NT_STATUS_OBJECT_PATH_NOT_FOUND]:
+ # The file doesn't exist, so create the xml structure
+ xml_data = ET.ElementTree(ET.Element('vgppolicy'))
+ policysetting = ET.SubElement(xml_data.getroot(),
+ 'policysetting')
+ pv = ET.SubElement(policysetting, 'version')
+ pv.text = '1'
+ name = ET.SubElement(policysetting, 'name')
+ name.text = 'Sudo Policy'
+ description = ET.SubElement(policysetting, 'description')
+ description.text = 'Sudoers File Configuration Policy'
+ apply_mode = ET.SubElement(policysetting, 'apply_mode')
+ apply_mode.text = 'merge'
+ data = ET.SubElement(policysetting, 'data')
+ load_plugin = ET.SubElement(data, 'load_plugin')
+ load_plugin.text = 'true'
+ elif e.args[0] == NT_STATUS_ACCESS_DENIED:
+ raise CommandError("The authenticated user does "
+ "not have sufficient privileges")
+ else:
+ raise
+
+ sudoers_entry = ET.SubElement(data, 'sudoers_entry')
+ if passwd:
+ ET.SubElement(sudoers_entry, 'password')
+ command_elm = ET.SubElement(sudoers_entry, 'command')
+ command_elm.text = command
+ user_elm = ET.SubElement(sudoers_entry, 'user')
+ user_elm.text = user
+ listelement = ET.SubElement(sudoers_entry, 'listelement')
+ for u in users.split(','):
+ principal = ET.SubElement(listelement, 'principal')
+ principal.text = u
+ principal.attrib['type'] = 'user'
+ if groups is not None:
+ for g in groups.split():
+ principal = ET.SubElement(listelement, 'principal')
+ principal.text = g
+ principal.attrib['type'] = 'group'
+
+ out = BytesIO()
+ xml_data.write(out, encoding='UTF-8', xml_declaration=True)
+ out.seek(0)
+ try:
+ create_directory_hier(conn, vgp_dir)
+ conn.savefile(vgp_xml, out.read())
+ reg.increment_gpt_ini(machine_changed=True)
+ except NTSTATUSError as e:
+ if e.args[0] == NT_STATUS_ACCESS_DENIED:
+ raise CommandError("The authenticated user does "
+ "not have sufficient privileges")
+ raise
+
+class cmd_list_sudoers(Command):
+ """List Samba Sudoers Group Policy from the sysvol
+
+This command lists sudo rules from the sysvol that will be applied to winbind clients.
+
+Example:
+samba-tool gpo manage sudoers list {31B2F340-016D-11D2-945F-00C04FB984F9}
+ """
+
+ synopsis = "%prog <gpo> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H"),
+ ]
+
+ takes_args = ["gpo"]
+
+ def run(self, gpo, H=None, sambaopts=None, credopts=None, versionopts=None):
+ self.lp = sambaopts.get_loadparm()
+ self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
+
+ # We need to know writable DC to setup SMB connection
+ if H and H.startswith('ldap://'):
+ dc_hostname = H[7:]
+ self.url = H
+ else:
+ dc_hostname = netcmd_finddc(self.lp, self.creds)
+ self.url = dc_url(self.lp, self.creds, dc=dc_hostname)
+
+ # SMB connect to DC
+ conn = smb_connection(dc_hostname,
+ 'sysvol',
+ lp=self.lp,
+ creds=self.creds)
+
+ realm = self.lp.get('realm')
+ vgp_xml = '\\'.join([realm.lower(), 'Policies', gpo,
+ 'MACHINE\\VGP\\VTLA\\Sudo',
+ 'SudoersConfiguration\\manifest.xml'])
+ try:
+ xml_data = ET.fromstring(conn.loadfile(vgp_xml))
+ except NTSTATUSError as e:
+ if e.args[0] in [NT_STATUS_OBJECT_NAME_INVALID,
+ NT_STATUS_OBJECT_NAME_NOT_FOUND,
+ NT_STATUS_OBJECT_PATH_NOT_FOUND]:
+ # The file doesn't exist, so there is nothing to list
+ xml_data = None
+ elif e.args[0] == NT_STATUS_ACCESS_DENIED:
+ raise CommandError("The authenticated user does "
+ "not have sufficient privileges")
+ else:
+ raise
+
+ if xml_data is not None:
+ policy = xml_data.find('policysetting')
+ data = policy.find('data')
+ for entry in data.findall('sudoers_entry'):
+ command = entry.find('command').text
+ user = entry.find('user').text
+ listelements = entry.findall('listelement')
+ principals = []
+ for listelement in listelements:
+ principals.extend(listelement.findall('principal'))
+ if len(principals) > 0:
+ uname = ','.join([u.text if u.attrib['type'] == 'user' \
+ else '%s%%' % u.text for u in principals])
+ else:
+ uname = 'ALL'
+ nopassword = entry.find('password') is None
+ np_entry = ' NOPASSWD:' if nopassword else ''
+ p = '%s ALL=(%s)%s %s' % (uname, user, np_entry, command)
+ self.outf.write('%s\n' % p)
+
+ pol_file = '\\'.join([realm.lower(), 'Policies', gpo,
+ 'MACHINE\\Registry.pol'])
+ try:
+ pol_data = ndr_unpack(preg.file, conn.loadfile(pol_file))
+ except NTSTATUSError as e:
+ if e.args[0] in [NT_STATUS_OBJECT_NAME_INVALID,
+ NT_STATUS_OBJECT_NAME_NOT_FOUND,
+ NT_STATUS_OBJECT_PATH_NOT_FOUND]:
+ return # The file doesn't exist, so there is nothing to list
+ if e.args[0] == NT_STATUS_ACCESS_DENIED:
+ raise CommandError("The authenticated user does "
+ "not have sufficient privileges")
+ raise
+
+ # Also list the policies set from the GPME
+ keyname = b'Software\\Policies\\Samba\\Unix Settings\\Sudo Rights'
+ for entry in pol_data.entries:
+ if get_bytes(entry.keyname) == keyname and \
+ get_string(entry.data).strip():
+ self.outf.write('%s\n' % entry.data)
+
+class cmd_remove_sudoers(GPOCommand):
+ """Removes a Samba Sudoers Group Policy from the sysvol
+
+This command removes a sudo rule from the sysvol from applying to winbind clients.
+
+Example:
+samba-tool gpo manage sudoers remove {31B2F340-016D-11D2-945F-00C04FB984F9} 'fakeu ALL=(ALL) NOPASSWD: ALL'
+ """
+
+ synopsis = "%prog <gpo> <entry> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H"),
+ ]
+
+ takes_args = ["gpo", "entry"]
+
+ def run(self, gpo, entry, H=None, sambaopts=None, credopts=None, versionopts=None):
+ self.lp = sambaopts.get_loadparm()
+ self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
+
+ # We need to know writable DC to setup SMB connection
+ if H and H.startswith('ldap://'):
+ dc_hostname = H[7:]
+ self.url = H
+ else:
+ dc_hostname = netcmd_finddc(self.lp, self.creds)
+ self.url = dc_url(self.lp, self.creds, dc=dc_hostname)
+
+ # SMB connect to DC
+ conn = smb_connection(dc_hostname,
+ 'sysvol',
+ lp=self.lp,
+ creds=self.creds)
+
+ self.samdb_connect()
+ reg = RegistryGroupPolicies(gpo, self.lp, self.creds, self.samdb, H)
+
+ realm = self.lp.get('realm')
+ vgp_dir = '\\'.join([realm.lower(), 'Policies', gpo,
+ 'MACHINE\\VGP\\VTLA\\Sudo',
+ 'SudoersConfiguration'])
+ vgp_xml = '\\'.join([vgp_dir, 'manifest.xml'])
+ try:
+ xml_data = ET.ElementTree(ET.fromstring(conn.loadfile(vgp_xml)))
+ policysetting = xml_data.getroot().find('policysetting')
+ data = policysetting.find('data')
+ except NTSTATUSError as e:
+ if e.args[0] in [NT_STATUS_OBJECT_NAME_INVALID,
+ NT_STATUS_OBJECT_NAME_NOT_FOUND,
+ NT_STATUS_OBJECT_PATH_NOT_FOUND]:
+ data = None
+ elif e.args[0] == NT_STATUS_ACCESS_DENIED:
+ raise CommandError("The authenticated user does "
+ "not have sufficient privileges")
+ else:
+ raise
+
+ pol_file = '\\'.join([realm.lower(), 'Policies', gpo,
+ 'MACHINE\\Registry.pol'])
+ try:
+ pol_data = ndr_unpack(preg.file, conn.loadfile(pol_file))
+ except NTSTATUSError as e:
+ if e.args[0] in [NT_STATUS_OBJECT_NAME_INVALID,
+ NT_STATUS_OBJECT_NAME_NOT_FOUND,
+ NT_STATUS_OBJECT_PATH_NOT_FOUND]:
+ pol_data = None
+ elif e.args[0] == NT_STATUS_ACCESS_DENIED:
+ raise CommandError("The authenticated user does "
+ "not have sufficient privileges")
+ else:
+ raise
+
+ entries = {}
+ for e in data.findall('sudoers_entry') if data else []:
+ command = e.find('command').text
+ user = e.find('user').text
+ listelements = e.findall('listelement')
+ principals = []
+ for listelement in listelements:
+ principals.extend(listelement.findall('principal'))
+ if len(principals) > 0:
+ uname = ','.join([u.text if u.attrib['type'] == 'user' \
+ else '%s%%' % u.text for u in principals])
+ else:
+ uname = 'ALL'
+ nopassword = e.find('password') is None
+ np_entry = ' NOPASSWD:' if nopassword else ''
+ p = '%s ALL=(%s)%s %s' % (uname, user, np_entry, command)
+ entries[p] = e
+
+ if entry in entries.keys():
+ data.remove(entries[entry])
+
+ out = BytesIO()
+ xml_data.write(out, encoding='UTF-8', xml_declaration=True)
+ out.seek(0)
+ try:
+ create_directory_hier(conn, vgp_dir)
+ conn.savefile(vgp_xml, out.read())
+ reg.increment_gpt_ini(machine_changed=True)
+ except NTSTATUSError as e:
+ if e.args[0] == NT_STATUS_ACCESS_DENIED:
+ raise CommandError("The authenticated user does "
+ "not have sufficient privileges")
+ raise
+ elif entry in ([e.data for e in pol_data.entries] if pol_data else []):
+ entries = [e for e in pol_data.entries if e.data != entry]
+ pol_data.num_entries = len(entries)
+ pol_data.entries = entries
+
+ try:
+ conn.savefile(pol_file, ndr_pack(pol_data))
+ reg.increment_gpt_ini(machine_changed=True)
+ except NTSTATUSError as e:
+ if e.args[0] == NT_STATUS_ACCESS_DENIED:
+ raise CommandError("The authenticated user does "
+ "not have sufficient privileges")
+ raise
+ else:
+ raise CommandError("Cannot remove '%s' because it does not exist" %
+ entry)
+
+class cmd_sudoers(SuperCommand):
+ """Manage Sudoers Group Policy Objects"""
+ subcommands = {}
+ subcommands["add"] = cmd_add_sudoers()
+ subcommands["list"] = cmd_list_sudoers()
+ subcommands["remove"] = cmd_remove_sudoers()
+
+class cmd_set_security(GPOCommand):
+ """Set Samba Security Group Policy to the sysvol
+
+This command sets a security setting to the sysvol for applying to winbind
+clients. Not providing a value will unset the policy.
+These settings only apply to the ADDC.
+
+Example:
+samba-tool gpo manage security set {31B2F340-016D-11D2-945F-00C04FB984F9} MaxTicketAge 10
+
+Possible policies:
+MaxTicketAge Maximum lifetime for user ticket
+ Defined in hours
+
+MaxServiceAge Maximum lifetime for service ticket
+ Defined in minutes
+
+MaxRenewAge Maximum lifetime for user ticket renewal
+ Defined in minutes
+
+MinimumPasswordAge Minimum password age
+ Defined in days
+
+MaximumPasswordAge Maximum password age
+ Defined in days
+
+MinimumPasswordLength Minimum password length
+ Defined in characters
+
+PasswordComplexity Password must meet complexity requirements
+ 1 is Enabled, 0 is Disabled
+ """
+
+ synopsis = "%prog <gpo> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H"),
+ ]
+
+ takes_args = ["gpo", "policy", "value?"]
+
+ def run(self, gpo, policy, value=None, H=None, sambaopts=None,
+ credopts=None, versionopts=None):
+ self.lp = sambaopts.get_loadparm()
+ self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
+
+ # We need to know writable DC to setup SMB connection
+ if H and H.startswith('ldap://'):
+ dc_hostname = H[7:]
+ self.url = H
+ else:
+ dc_hostname = netcmd_finddc(self.lp, self.creds)
+ self.url = dc_url(self.lp, self.creds, dc=dc_hostname)
+
+ # SMB connect to DC
+ conn = smb_connection(dc_hostname,
+ 'sysvol',
+ lp=self.lp,
+ creds=self.creds)
+
+ self.samdb_connect()
+ reg = RegistryGroupPolicies(gpo, self.lp, self.creds, self.samdb, H)
+
+ realm = self.lp.get('realm')
+ inf_dir = '\\'.join([realm.lower(), 'Policies', gpo,
+ 'MACHINE\\Microsoft\\Windows NT\\SecEdit'])
+ inf_file = '\\'.join([inf_dir, 'GptTmpl.inf'])
+ try:
+ inf_data = ConfigParser(interpolation=None)
+ inf_data.optionxform=str
+ raw = conn.loadfile(inf_file)
+ try:
+ inf_data.read_file(StringIO(raw.decode()))
+ except UnicodeDecodeError:
+ inf_data.read_file(StringIO(raw.decode('utf-16')))
+ except NTSTATUSError as e:
+ if e.args[0] == NT_STATUS_ACCESS_DENIED:
+ raise CommandError("The authenticated user does "
+ "not have sufficient privileges")
+ if e.args[0] not in [NT_STATUS_OBJECT_NAME_INVALID,
+ NT_STATUS_OBJECT_NAME_NOT_FOUND,
+ NT_STATUS_OBJECT_PATH_NOT_FOUND]:
+ raise
+
+ section_map = { 'MaxTicketAge' : 'Kerberos Policy',
+ 'MaxServiceAge' : 'Kerberos Policy',
+ 'MaxRenewAge' : 'Kerberos Policy',
+ 'MinimumPasswordAge' : 'System Access',
+ 'MaximumPasswordAge' : 'System Access',
+ 'MinimumPasswordLength' : 'System Access',
+ 'PasswordComplexity' : 'System Access'
+ }
+
+ section = section_map[policy]
+ if not inf_data.has_section(section):
+ inf_data.add_section(section)
+ if value is not None:
+ inf_data.set(section, policy, value)
+ else:
+ inf_data.remove_option(section, policy)
+ if len(inf_data.options(section)) == 0:
+ inf_data.remove_section(section)
+
+ out = StringIO()
+ inf_data.write(out)
+ try:
+ create_directory_hier(conn, inf_dir)
+ conn.savefile(inf_file, get_bytes(out.getvalue()))
+ reg.increment_gpt_ini(machine_changed=True)
+ except NTSTATUSError as e:
+ if e.args[0] == NT_STATUS_ACCESS_DENIED:
+ raise CommandError("The authenticated user does "
+ "not have sufficient privileges")
+ else:
+ raise
+
+class cmd_list_security(Command):
+ """List Samba Security Group Policy from the sysvol
+
+This command lists security settings from the sysvol that will be applied to winbind clients.
+These settings only apply to the ADDC.
+
+Example:
+samba-tool gpo manage security list {31B2F340-016D-11D2-945F-00C04FB984F9}
+ """
+
+ synopsis = "%prog <gpo> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H"),
+ ]
+
+ takes_args = ["gpo"]
+
+ def run(self, gpo, H=None, sambaopts=None, credopts=None, versionopts=None):
+ self.lp = sambaopts.get_loadparm()
+ self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
+
+ # We need to know writable DC to setup SMB connection
+ if H and H.startswith('ldap://'):
+ dc_hostname = H[7:]
+ self.url = H
+ else:
+ dc_hostname = netcmd_finddc(self.lp, self.creds)
+ self.url = dc_url(self.lp, self.creds, dc=dc_hostname)
+
+ # SMB connect to DC
+ conn = smb_connection(dc_hostname,
+ 'sysvol',
+ lp=self.lp,
+ creds=self.creds)
+
+ realm = self.lp.get('realm')
+ inf_file = '\\'.join([realm.lower(), 'Policies', gpo,
+ 'MACHINE\\Microsoft\\Windows NT\\SecEdit\\GptTmpl.inf'])
+ try:
+ inf_data = ConfigParser(interpolation=None)
+ inf_data.optionxform=str
+ raw = conn.loadfile(inf_file)
+ try:
+ inf_data.read_file(StringIO(raw.decode()))
+ except UnicodeDecodeError:
+ inf_data.read_file(StringIO(raw.decode('utf-16')))
+ except NTSTATUSError as e:
+ if e.args[0] in [NT_STATUS_OBJECT_NAME_INVALID,
+ NT_STATUS_OBJECT_NAME_NOT_FOUND,
+ NT_STATUS_OBJECT_PATH_NOT_FOUND]:
+ return # The file doesn't exist, so there is nothing to list
+ if e.args[0] == NT_STATUS_ACCESS_DENIED:
+ raise CommandError("The authenticated user does "
+ "not have sufficient privileges")
+ raise
+
+ for section in inf_data.sections():
+ if section not in ['Kerberos Policy', 'System Access']:
+ continue
+ for key, value in inf_data.items(section):
+ self.outf.write('%s = %s\n' % (key, value))
+
+class cmd_security(SuperCommand):
+ """Manage Security Group Policy Objects"""
+ subcommands = {}
+ subcommands["set"] = cmd_set_security()
+ subcommands["list"] = cmd_list_security()
+
+class cmd_list_smb_conf(Command):
+ """List Samba smb.conf Group Policy from the sysvol
+
+This command lists smb.conf settings from the sysvol that will be applied to winbind clients.
+
+Example:
+samba-tool gpo manage smb_conf list {31B2F340-016D-11D2-945F-00C04FB984F9}
+ """
+
+ synopsis = "%prog <gpo> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H"),
+ ]
+
+ takes_args = ["gpo"]
+
+ def run(self, gpo, H=None, sambaopts=None, credopts=None, versionopts=None):
+ self.lp = sambaopts.get_loadparm()
+ self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
+
+ # We need to know writable DC to setup SMB connection
+ if H and H.startswith('ldap://'):
+ dc_hostname = H[7:]
+ self.url = H
+ else:
+ dc_hostname = netcmd_finddc(self.lp, self.creds)
+ self.url = dc_url(self.lp, self.creds, dc=dc_hostname)
+
+ # SMB connect to DC
+ conn = smb_connection(dc_hostname,
+ 'sysvol',
+ lp=self.lp,
+ creds=self.creds)
+
+ realm = self.lp.get('realm')
+ pol_file = '\\'.join([realm.lower(), 'Policies', gpo,
+ 'MACHINE\\Registry.pol'])
+ try:
+ pol_data = ndr_unpack(preg.file, conn.loadfile(pol_file))
+ except NTSTATUSError as e:
+ if e.args[0] in [NT_STATUS_OBJECT_NAME_INVALID,
+ NT_STATUS_OBJECT_NAME_NOT_FOUND,
+ NT_STATUS_OBJECT_PATH_NOT_FOUND]:
+ return # The file doesn't exist, so there is nothing to list
+ if e.args[0] == NT_STATUS_ACCESS_DENIED:
+ raise CommandError("The authenticated user does "
+ "not have sufficient privileges")
+ raise
+
+ keyname = b'Software\\Policies\\Samba\\smb_conf'
+ lp = param.LoadParm()
+ for entry in pol_data.entries:
+ if get_bytes(entry.keyname) == keyname:
+ lp.set(entry.valuename, str(entry.data))
+ val = lp.get(entry.valuename)
+ self.outf.write('%s = %s\n' % (entry.valuename, val))
+
+class cmd_set_smb_conf(GPOCommand):
+ """Sets a Samba smb.conf Group Policy to the sysvol
+
+This command sets an smb.conf setting to the sysvol for applying to winbind
+clients. Not providing a value will unset the policy.
+
+Example:
+samba-tool gpo manage smb_conf set {31B2F340-016D-11D2-945F-00C04FB984F9} 'apply gpo policies' yes
+ """
+
+ synopsis = "%prog <gpo> <entry> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H"),
+ ]
+
+ takes_args = ["gpo", "setting", "value?"]
+
+ def run(self, gpo, setting, value=None, H=None, sambaopts=None, credopts=None,
+ versionopts=None):
+ self.lp = sambaopts.get_loadparm()
+ self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
+
+ # We need to know writable DC to setup SMB connection
+ if H and H.startswith('ldap://'):
+ dc_hostname = H[7:]
+ self.url = H
+ else:
+ dc_hostname = netcmd_finddc(self.lp, self.creds)
+ self.url = dc_url(self.lp, self.creds, dc=dc_hostname)
+
+ # SMB connect to DC
+ conn = smb_connection(dc_hostname,
+ 'sysvol',
+ lp=self.lp,
+ creds=self.creds)
+
+ self.samdb_connect()
+ reg = RegistryGroupPolicies(gpo, self.lp, self.creds, self.samdb, H)
+
+ realm = self.lp.get('realm')
+ pol_dir = '\\'.join([realm.lower(), 'Policies', gpo, 'MACHINE'])
+ pol_file = '\\'.join([pol_dir, 'Registry.pol'])
+ try:
+ pol_data = ndr_unpack(preg.file, conn.loadfile(pol_file))
+ except NTSTATUSError as e:
+ if e.args[0] in [NT_STATUS_OBJECT_NAME_INVALID,
+ NT_STATUS_OBJECT_NAME_NOT_FOUND,
+ NT_STATUS_OBJECT_PATH_NOT_FOUND]:
+ pol_data = preg.file() # The file doesn't exist
+ elif e.args[0] == NT_STATUS_ACCESS_DENIED:
+ raise CommandError("The authenticated user does "
+ "not have sufficient privileges")
+ else:
+ raise
+
+ if value is None:
+ if setting not in [e.valuename for e in pol_data.entries]:
+ raise CommandError("Cannot remove '%s' because it does "
+ "not exist" % setting)
+ entries = [e for e in pol_data.entries \
+ if e.valuename != setting]
+ pol_data.entries = entries
+ pol_data.num_entries = len(entries)
+ else:
+ if get_string(value).lower() in ['yes', 'true', '1']:
+ etype = 4
+ val = 1
+ elif get_string(value).lower() in ['no', 'false', '0']:
+ etype = 4
+ val = 0
+ elif get_string(value).isnumeric():
+ etype = 4
+ val = int(get_string(value))
+ else:
+ etype = 1
+ val = get_bytes(value)
+ e = preg.entry()
+ e.keyname = b'Software\\Policies\\Samba\\smb_conf'
+ e.valuename = get_bytes(setting)
+ e.type = etype
+ e.data = val
+ entries = list(pol_data.entries)
+ entries.append(e)
+ pol_data.entries = entries
+ pol_data.num_entries = len(entries)
+
+ try:
+ create_directory_hier(conn, pol_dir)
+ conn.savefile(pol_file, ndr_pack(pol_data))
+ reg.increment_gpt_ini(machine_changed=True)
+ except NTSTATUSError as e:
+ if e.args[0] == NT_STATUS_ACCESS_DENIED:
+ raise CommandError("The authenticated user does "
+ "not have sufficient privileges")
+ raise
+
+class cmd_smb_conf(SuperCommand):
+ """Manage smb.conf Group Policy Objects"""
+ subcommands = {}
+ subcommands["list"] = cmd_list_smb_conf()
+ subcommands["set"] = cmd_set_smb_conf()
+
+class cmd_list_symlink(Command):
+ """List VGP Symbolic Link Group Policy from the sysvol
+
+This command lists symlink settings from the sysvol that will be applied to winbind clients.
+
+Example:
+samba-tool gpo manage symlink list {31B2F340-016D-11D2-945F-00C04FB984F9}
+ """
+
+ synopsis = "%prog <gpo> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H"),
+ ]
+
+ takes_args = ["gpo"]
+
+ def run(self, gpo, H=None, sambaopts=None, credopts=None, versionopts=None):
+ self.lp = sambaopts.get_loadparm()
+ self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
+
+ # We need to know writable DC to setup SMB connection
+ if H and H.startswith('ldap://'):
+ dc_hostname = H[7:]
+ self.url = H
+ else:
+ dc_hostname = netcmd_finddc(self.lp, self.creds)
+ self.url = dc_url(self.lp, self.creds, dc=dc_hostname)
+
+ # SMB connect to DC
+ conn = smb_connection(dc_hostname,
+ 'sysvol',
+ lp=self.lp,
+ creds=self.creds)
+
+ realm = self.lp.get('realm')
+ vgp_xml = '\\'.join([realm.lower(), 'Policies', gpo,
+ 'MACHINE\\VGP\\VTLA\\Unix',
+ 'Symlink\\manifest.xml'])
+ try:
+ xml_data = ET.fromstring(conn.loadfile(vgp_xml))
+ except NTSTATUSError as e:
+ if e.args[0] in [NT_STATUS_OBJECT_NAME_INVALID,
+ NT_STATUS_OBJECT_NAME_NOT_FOUND,
+ NT_STATUS_OBJECT_PATH_NOT_FOUND]:
+ return # The file doesn't exist, so there is nothing to list
+ if e.args[0] == NT_STATUS_ACCESS_DENIED:
+ raise CommandError("The authenticated user does "
+ "not have sufficient privileges")
+ raise
+
+ policy = xml_data.find('policysetting')
+ data = policy.find('data')
+ for file_properties in data.findall('file_properties'):
+ source = file_properties.find('source')
+ target = file_properties.find('target')
+ self.outf.write('ln -s %s %s\n' % (source.text, target.text))
+
+class cmd_add_symlink(GPOCommand):
+ """Adds a VGP Symbolic Link Group Policy to the sysvol
+
+This command adds a symlink setting to the sysvol that will be applied to winbind clients.
+
+Example:
+samba-tool gpo manage symlink add {31B2F340-016D-11D2-945F-00C04FB984F9} /tmp/source /tmp/target
+ """
+
+ synopsis = "%prog <gpo> <source> <target> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H"),
+ ]
+
+ takes_args = ["gpo", "source", "target"]
+
+ def run(self, gpo, source, target, H=None, sambaopts=None, credopts=None,
+ versionopts=None):
+ self.lp = sambaopts.get_loadparm()
+ self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
+
+ # We need to know writable DC to setup SMB connection
+ if H and H.startswith('ldap://'):
+ dc_hostname = H[7:]
+ self.url = H
+ else:
+ dc_hostname = netcmd_finddc(self.lp, self.creds)
+ self.url = dc_url(self.lp, self.creds, dc=dc_hostname)
+
+ # SMB connect to DC
+ conn = smb_connection(dc_hostname,
+ 'sysvol',
+ lp=self.lp,
+ creds=self.creds)
+
+ self.samdb_connect()
+ reg = RegistryGroupPolicies(gpo, self.lp, self.creds, self.samdb, H)
+
+ realm = self.lp.get('realm')
+ vgp_dir = '\\'.join([realm.lower(), 'Policies', gpo,
+ 'MACHINE\\VGP\\VTLA\\Unix\\Symlink'])
+ vgp_xml = '\\'.join([vgp_dir, 'manifest.xml'])
+ try:
+ xml_data = ET.ElementTree(ET.fromstring(conn.loadfile(vgp_xml)))
+ policy = xml_data.getroot().find('policysetting')
+ data = policy.find('data')
+ except NTSTATUSError as e:
+ if e.args[0] in [NT_STATUS_OBJECT_NAME_INVALID,
+ NT_STATUS_OBJECT_NAME_NOT_FOUND,
+ NT_STATUS_OBJECT_PATH_NOT_FOUND]:
+ # The file doesn't exist, so create the xml structure
+ xml_data = ET.ElementTree(ET.Element('vgppolicy'))
+ policysetting = ET.SubElement(xml_data.getroot(),
+ 'policysetting')
+ pv = ET.SubElement(policysetting, 'version')
+ pv.text = '1'
+ name = ET.SubElement(policysetting, 'name')
+ name.text = 'Symlink Policy'
+ description = ET.SubElement(policysetting, 'description')
+ description.text = 'Specifies symbolic link data'
+ data = ET.SubElement(policysetting, 'data')
+ elif e.args[0] == NT_STATUS_ACCESS_DENIED:
+ raise CommandError("The authenticated user does "
+ "not have sufficient privileges")
+ else:
+ raise
+
+ file_properties = ET.SubElement(data, 'file_properties')
+ source_elm = ET.SubElement(file_properties, 'source')
+ source_elm.text = source
+ target_elm = ET.SubElement(file_properties, 'target')
+ target_elm.text = target
+
+ out = BytesIO()
+ xml_data.write(out, encoding='UTF-8', xml_declaration=True)
+ out.seek(0)
+ try:
+ create_directory_hier(conn, vgp_dir)
+ conn.savefile(vgp_xml, out.read())
+ reg.increment_gpt_ini(machine_changed=True)
+ except NTSTATUSError as e:
+ if e.args[0] == NT_STATUS_ACCESS_DENIED:
+ raise CommandError("The authenticated user does "
+ "not have sufficient privileges")
+ raise
+
+class cmd_remove_symlink(GPOCommand):
+ """Removes a VGP Symbolic Link Group Policy from the sysvol
+
+This command removes a symlink setting from the sysvol from applying to winbind
+clients.
+
+Example:
+samba-tool gpo manage symlink remove {31B2F340-016D-11D2-945F-00C04FB984F9} /tmp/source /tmp/target
+ """
+
+ synopsis = "%prog <gpo> <source> <target> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H"),
+ ]
+
+ takes_args = ["gpo", "source", "target"]
+
+ def run(self, gpo, source, target, H=None, sambaopts=None, credopts=None,
+ versionopts=None):
+ self.lp = sambaopts.get_loadparm()
+ self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
+
+ # We need to know writable DC to setup SMB connection
+ if H and H.startswith('ldap://'):
+ dc_hostname = H[7:]
+ self.url = H
+ else:
+ dc_hostname = netcmd_finddc(self.lp, self.creds)
+ self.url = dc_url(self.lp, self.creds, dc=dc_hostname)
+
+ # SMB connect to DC
+ conn = smb_connection(dc_hostname,
+ 'sysvol',
+ lp=self.lp,
+ creds=self.creds)
+
+ self.samdb_connect()
+ reg = RegistryGroupPolicies(gpo, self.lp, self.creds, self.samdb, H)
+
+ realm = self.lp.get('realm')
+ vgp_dir = '\\'.join([realm.lower(), 'Policies', gpo,
+ 'MACHINE\\VGP\\VTLA\\Unix\\Symlink'])
+ vgp_xml = '\\'.join([vgp_dir, 'manifest.xml'])
+ try:
+ xml_data = ET.ElementTree(ET.fromstring(conn.loadfile(vgp_xml)))
+ policy = xml_data.getroot().find('policysetting')
+ data = policy.find('data')
+ except NTSTATUSError as e:
+ if e.args[0] in [NT_STATUS_OBJECT_NAME_INVALID,
+ NT_STATUS_OBJECT_NAME_NOT_FOUND,
+ NT_STATUS_OBJECT_PATH_NOT_FOUND]:
+ raise CommandError("Cannot remove link from '%s' to '%s' "
+ "because it does not exist" % source, target)
+ elif e.args[0] == NT_STATUS_ACCESS_DENIED:
+ raise CommandError("The authenticated user does "
+ "not have sufficient privileges")
+ else:
+ raise
+
+ for file_properties in data.findall('file_properties'):
+ source_elm = file_properties.find('source')
+ target_elm = file_properties.find('target')
+ if source_elm.text == source and target_elm.text == target:
+ data.remove(file_properties)
+ break
+ else:
+ raise CommandError("Cannot remove link from '%s' to '%s' "
+ "because it does not exist" % source, target)
+
+
+ out = BytesIO()
+ xml_data.write(out, encoding='UTF-8', xml_declaration=True)
+ out.seek(0)
+ try:
+ create_directory_hier(conn, vgp_dir)
+ conn.savefile(vgp_xml, out.read())
+ reg.increment_gpt_ini(machine_changed=True)
+ except NTSTATUSError as e:
+ if e.args[0] == NT_STATUS_ACCESS_DENIED:
+ raise CommandError("The authenticated user does "
+ "not have sufficient privileges")
+ raise
+
+class cmd_symlink(SuperCommand):
+ """Manage symlink Group Policy Objects"""
+ subcommands = {}
+ subcommands["list"] = cmd_list_symlink()
+ subcommands["add"] = cmd_add_symlink()
+ subcommands["remove"] = cmd_remove_symlink()
+
+class cmd_list_files(Command):
+ """List VGP Files Group Policy from the sysvol
+
+This command lists files which will be copied from the sysvol and applied to winbind clients.
+
+Example:
+samba-tool gpo manage files list {31B2F340-016D-11D2-945F-00C04FB984F9}
+ """
+
+ synopsis = "%prog <gpo> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H"),
+ ]
+
+ takes_args = ["gpo"]
+
+ def run(self, gpo, H=None, sambaopts=None, credopts=None, versionopts=None):
+ self.lp = sambaopts.get_loadparm()
+ self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
+
+ # We need to know writable DC to setup SMB connection
+ if H and H.startswith('ldap://'):
+ dc_hostname = H[7:]
+ self.url = H
+ else:
+ dc_hostname = netcmd_finddc(self.lp, self.creds)
+ self.url = dc_url(self.lp, self.creds, dc=dc_hostname)
+
+ # SMB connect to DC
+ conn = smb_connection(dc_hostname,
+ 'sysvol',
+ lp=self.lp,
+ creds=self.creds)
+
+ realm = self.lp.get('realm')
+ vgp_xml = '\\'.join([realm.lower(), 'Policies', gpo,
+ 'MACHINE\\VGP\\VTLA\\Unix',
+ 'Files\\manifest.xml'])
+ try:
+ xml_data = ET.fromstring(conn.loadfile(vgp_xml))
+ except NTSTATUSError as e:
+ if e.args[0] in [NT_STATUS_OBJECT_NAME_INVALID,
+ NT_STATUS_OBJECT_NAME_NOT_FOUND,
+ NT_STATUS_OBJECT_PATH_NOT_FOUND]:
+ return # The file doesn't exist, so there is nothing to list
+ if e.args[0] == NT_STATUS_ACCESS_DENIED:
+ raise CommandError("The authenticated user does "
+ "not have sufficient privileges")
+ raise
+
+ policy = xml_data.find('policysetting')
+ data = policy.find('data')
+ for entry in data.findall('file_properties'):
+ source = entry.find('source').text
+ target = entry.find('target').text
+ user = entry.find('user').text
+ group = entry.find('group').text
+ mode = calc_mode(entry)
+ p = '%s\t%s\t%s\t%s -> %s' % \
+ (stat_from_mode(mode), user, group, target, source)
+ self.outf.write('%s\n' % p)
+
+class cmd_add_files(GPOCommand):
+ """Add VGP Files Group Policy to the sysvol
+
+This command adds files which will be copied from the sysvol and applied to winbind clients.
+
+Example:
+samba-tool gpo manage files add {31B2F340-016D-11D2-945F-00C04FB984F9} ./source.txt /usr/share/doc/target.txt root root 600
+ """
+
+ synopsis = "%prog <gpo> <source> <target> <user> <group> <mode> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H"),
+ ]
+
+ takes_args = ["gpo", "source", "target", "user", "group", "mode"]
+
+ def run(self, gpo, source, target, user, group, mode, H=None,
+ sambaopts=None, credopts=None, versionopts=None):
+ self.lp = sambaopts.get_loadparm()
+ self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
+
+ if not os.path.exists(source):
+ raise CommandError("Source '%s' does not exist" % source)
+
+ # We need to know writable DC to setup SMB connection
+ if H and H.startswith('ldap://'):
+ dc_hostname = H[7:]
+ self.url = H
+ else:
+ dc_hostname = netcmd_finddc(self.lp, self.creds)
+ self.url = dc_url(self.lp, self.creds, dc=dc_hostname)
+
+ # SMB connect to DC
+ conn = smb_connection(dc_hostname,
+ 'sysvol',
+ lp=self.lp,
+ creds=self.creds)
+
+ self.samdb_connect()
+ reg = RegistryGroupPolicies(gpo, self.lp, self.creds, self.samdb, H)
+
+ realm = self.lp.get('realm')
+ vgp_dir = '\\'.join([realm.lower(), 'Policies', gpo,
+ 'MACHINE\\VGP\\VTLA\\Unix\\Files'])
+ vgp_xml = '\\'.join([vgp_dir, 'manifest.xml'])
+ try:
+ xml_data = ET.ElementTree(ET.fromstring(conn.loadfile(vgp_xml)))
+ policy = xml_data.getroot().find('policysetting')
+ data = policy.find('data')
+ except NTSTATUSError as e:
+ if e.args[0] in [NT_STATUS_OBJECT_NAME_INVALID,
+ NT_STATUS_OBJECT_NAME_NOT_FOUND,
+ NT_STATUS_OBJECT_PATH_NOT_FOUND]:
+ # The file doesn't exist, so create the xml structure
+ xml_data = ET.ElementTree(ET.Element('vgppolicy'))
+ policysetting = ET.SubElement(xml_data.getroot(),
+ 'policysetting')
+ pv = ET.SubElement(policysetting, 'version')
+ pv.text = '1'
+ name = ET.SubElement(policysetting, 'name')
+ name.text = 'Files'
+ description = ET.SubElement(policysetting, 'description')
+ description.text = 'Represents file data to set/copy on clients'
+ data = ET.SubElement(policysetting, 'data')
+ elif e.args[0] == NT_STATUS_ACCESS_DENIED:
+ raise CommandError("The authenticated user does "
+ "not have sufficient privileges")
+ else:
+ raise
+
+ file_properties = ET.SubElement(data, 'file_properties')
+ source_elm = ET.SubElement(file_properties, 'source')
+ source_elm.text = os.path.basename(source)
+ target_elm = ET.SubElement(file_properties, 'target')
+ target_elm.text = target
+ user_elm = ET.SubElement(file_properties, 'user')
+ user_elm.text = user
+ group_elm = ET.SubElement(file_properties, 'group')
+ group_elm.text = group
+ for ptype, shift in [('user', 6), ('group', 3), ('other', 0)]:
+ permissions = ET.SubElement(file_properties, 'permissions')
+ permissions.set('type', ptype)
+ if int(mode, 8) & (0o4 << shift):
+ ET.SubElement(permissions, 'read')
+ if int(mode, 8) & (0o2 << shift):
+ ET.SubElement(permissions, 'write')
+ if int(mode, 8) & (0o1 << shift):
+ ET.SubElement(permissions, 'execute')
+
+ out = BytesIO()
+ xml_data.write(out, encoding='UTF-8', xml_declaration=True)
+ out.seek(0)
+ source_data = open(source, 'rb').read()
+ sysvol_source = '\\'.join([vgp_dir, os.path.basename(source)])
+ try:
+ create_directory_hier(conn, vgp_dir)
+ conn.savefile(vgp_xml, out.read())
+ conn.savefile(sysvol_source, source_data)
+ reg.increment_gpt_ini(machine_changed=True)
+ except NTSTATUSError as e:
+ if e.args[0] == NT_STATUS_ACCESS_DENIED:
+ raise CommandError("The authenticated user does "
+ "not have sufficient privileges")
+ raise
+
+class cmd_remove_files(GPOCommand):
+ """Remove VGP Files Group Policy from the sysvol
+
+This command removes files which would be copied from the sysvol and applied to winbind clients.
+
+Example:
+samba-tool gpo manage files remove {31B2F340-016D-11D2-945F-00C04FB984F9} /usr/share/doc/target.txt
+ """
+
+ synopsis = "%prog <gpo> <target> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H"),
+ ]
+
+ takes_args = ["gpo", "target"]
+
+ def run(self, gpo, target, H=None, sambaopts=None, credopts=None,
+ versionopts=None):
+ self.lp = sambaopts.get_loadparm()
+ self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
+
+ # We need to know writable DC to setup SMB connection
+ if H and H.startswith('ldap://'):
+ dc_hostname = H[7:]
+ self.url = H
+ else:
+ dc_hostname = netcmd_finddc(self.lp, self.creds)
+ self.url = dc_url(self.lp, self.creds, dc=dc_hostname)
+
+ # SMB connect to DC
+ conn = smb_connection(dc_hostname,
+ 'sysvol',
+ lp=self.lp,
+ creds=self.creds)
+
+ self.samdb_connect()
+ reg = RegistryGroupPolicies(gpo, self.lp, self.creds, self.samdb, H)
+
+ realm = self.lp.get('realm')
+ vgp_dir = '\\'.join([realm.lower(), 'Policies', gpo,
+ 'MACHINE\\VGP\\VTLA\\Unix\\Files'])
+ vgp_xml = '\\'.join([vgp_dir, 'manifest.xml'])
+ try:
+ xml_data = ET.ElementTree(ET.fromstring(conn.loadfile(vgp_xml)))
+ policy = xml_data.getroot().find('policysetting')
+ data = policy.find('data')
+ except NTSTATUSError as e:
+ if e.args[0] in [NT_STATUS_OBJECT_NAME_INVALID,
+ NT_STATUS_OBJECT_NAME_NOT_FOUND,
+ NT_STATUS_OBJECT_PATH_NOT_FOUND]:
+ raise CommandError("Cannot remove file '%s' "
+ "because it does not exist" % target)
+ elif e.args[0] == NT_STATUS_ACCESS_DENIED:
+ raise CommandError("The authenticated user does "
+ "not have sufficient privileges")
+ else:
+ raise
+
+ for file_properties in data.findall('file_properties'):
+ source_elm = file_properties.find('source')
+ target_elm = file_properties.find('target')
+ if target_elm.text == target:
+ source = '\\'.join([vgp_dir, source_elm.text])
+ conn.unlink(source)
+ data.remove(file_properties)
+ break
+ else:
+ raise CommandError("Cannot remove file '%s' "
+ "because it does not exist" % target)
+
+
+ out = BytesIO()
+ xml_data.write(out, encoding='UTF-8', xml_declaration=True)
+ out.seek(0)
+ try:
+ create_directory_hier(conn, vgp_dir)
+ conn.savefile(vgp_xml, out.read())
+ reg.increment_gpt_ini(machine_changed=True)
+ except NTSTATUSError as e:
+ if e.args[0] == NT_STATUS_ACCESS_DENIED:
+ raise CommandError("The authenticated user does "
+ "not have sufficient privileges")
+ raise
+
+class cmd_files(SuperCommand):
+ """Manage Files Group Policy Objects"""
+ subcommands = {}
+ subcommands["list"] = cmd_list_files()
+ subcommands["add"] = cmd_add_files()
+ subcommands["remove"] = cmd_remove_files()
+
+class cmd_list_openssh(Command):
+ """List VGP OpenSSH Group Policy from the sysvol
+
+This command lists openssh options from the sysvol that will be applied to winbind clients.
+
+Example:
+samba-tool gpo manage openssh list {31B2F340-016D-11D2-945F-00C04FB984F9}
+ """
+
+ synopsis = "%prog <gpo> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H"),
+ ]
+
+ takes_args = ["gpo"]
+
+ def run(self, gpo, H=None, sambaopts=None, credopts=None, versionopts=None):
+ self.lp = sambaopts.get_loadparm()
+ self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
+
+ # We need to know writable DC to setup SMB connection
+ if H and H.startswith('ldap://'):
+ dc_hostname = H[7:]
+ self.url = H
+ else:
+ dc_hostname = netcmd_finddc(self.lp, self.creds)
+ self.url = dc_url(self.lp, self.creds, dc=dc_hostname)
+
+ # SMB connect to DC
+ conn = smb_connection(dc_hostname,
+ 'sysvol',
+ lp=self.lp,
+ creds=self.creds)
+
+ realm = self.lp.get('realm')
+ vgp_xml = '\\'.join([realm.lower(), 'Policies', gpo,
+ 'MACHINE\\VGP\\VTLA\\SshCfg',
+ 'SshD\\manifest.xml'])
+ try:
+ xml_data = ET.fromstring(conn.loadfile(vgp_xml))
+ except NTSTATUSError as e:
+ if e.args[0] in [NT_STATUS_OBJECT_NAME_INVALID,
+ NT_STATUS_OBJECT_NAME_NOT_FOUND,
+ NT_STATUS_OBJECT_PATH_NOT_FOUND]:
+ return # The file doesn't exist, so there is nothing to list
+ if e.args[0] == NT_STATUS_ACCESS_DENIED:
+ raise CommandError("The authenticated user does "
+ "not have sufficient privileges")
+ raise
+
+ policy = xml_data.find('policysetting')
+ data = policy.find('data')
+ configfile = data.find('configfile')
+ for configsection in configfile.findall('configsection'):
+ if configsection.find('sectionname').text:
+ continue
+ for kv in configsection.findall('keyvaluepair'):
+ self.outf.write('%s %s\n' % (kv.find('key').text,
+ kv.find('value').text))
+
+class cmd_set_openssh(GPOCommand):
+ """Sets a VGP OpenSSH Group Policy to the sysvol
+
+This command sets an openssh setting to the sysvol for applying to winbind
+clients. Not providing a value will unset the policy.
+
+Example:
+samba-tool gpo manage openssh set {31B2F340-016D-11D2-945F-00C04FB984F9} KerberosAuthentication Yes
+ """
+
+ synopsis = "%prog <gpo> <setting> [value] [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H"),
+ ]
+
+ takes_args = ["gpo", "setting", "value?"]
+
+ def run(self, gpo, setting, value=None, H=None, sambaopts=None,
+ credopts=None, versionopts=None):
+ self.lp = sambaopts.get_loadparm()
+ self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
+
+ # We need to know writable DC to setup SMB connection
+ if H and H.startswith('ldap://'):
+ dc_hostname = H[7:]
+ self.url = H
+ else:
+ dc_hostname = netcmd_finddc(self.lp, self.creds)
+ self.url = dc_url(self.lp, self.creds, dc=dc_hostname)
+
+ # SMB connect to DC
+ conn = smb_connection(dc_hostname,
+ 'sysvol',
+ lp=self.lp,
+ creds=self.creds)
+
+ self.samdb_connect()
+ reg = RegistryGroupPolicies(gpo, self.lp, self.creds, self.samdb, H)
+
+ realm = self.lp.get('realm')
+ vgp_dir = '\\'.join([realm.lower(), 'Policies', gpo,
+ 'MACHINE\\VGP\\VTLA\\SshCfg\\SshD'])
+ vgp_xml = '\\'.join([vgp_dir, 'manifest.xml'])
+ try:
+ xml_data = ET.ElementTree(ET.fromstring(conn.loadfile(vgp_xml)))
+ policy = xml_data.getroot().find('policysetting')
+ data = policy.find('data')
+ configfile = data.find('configfile')
+ except NTSTATUSError as e:
+ if e.args[0] in [NT_STATUS_OBJECT_NAME_INVALID,
+ NT_STATUS_OBJECT_NAME_NOT_FOUND,
+ NT_STATUS_OBJECT_PATH_NOT_FOUND]:
+ # The file doesn't exist, so create the xml structure
+ xml_data = ET.ElementTree(ET.Element('vgppolicy'))
+ policysetting = ET.SubElement(xml_data.getroot(),
+ 'policysetting')
+ pv = ET.SubElement(policysetting, 'version')
+ pv.text = '1'
+ name = ET.SubElement(policysetting, 'name')
+ name.text = 'Configuration File'
+ description = ET.SubElement(policysetting, 'description')
+ description.text = 'Represents Unix configuration file settings'
+ apply_mode = ET.SubElement(policysetting, 'apply_mode')
+ apply_mode.text = 'merge'
+ data = ET.SubElement(policysetting, 'data')
+ configfile = ET.SubElement(data, 'configfile')
+ configsection = ET.SubElement(configfile, 'configsection')
+ ET.SubElement(configsection, 'sectionname')
+ elif e.args[0] == NT_STATUS_ACCESS_DENIED:
+ raise CommandError("The authenticated user does "
+ "not have sufficient privileges")
+ else:
+ raise
+
+ if value is not None:
+ for configsection in configfile.findall('configsection'):
+ if configsection.find('sectionname').text:
+ continue # Ignore Quest SSH settings
+ settings = {}
+ for kv in configsection.findall('keyvaluepair'):
+ settings[kv.find('key')] = kv
+ if setting in settings.keys():
+ settings[setting].text = value
+ else:
+ keyvaluepair = ET.SubElement(configsection, 'keyvaluepair')
+ key = ET.SubElement(keyvaluepair, 'key')
+ key.text = setting
+ dvalue = ET.SubElement(keyvaluepair, 'value')
+ dvalue.text = value
+ else:
+ for configsection in configfile.findall('configsection'):
+ if configsection.find('sectionname').text:
+ continue # Ignore Quest SSH settings
+ settings = {}
+ for kv in configsection.findall('keyvaluepair'):
+ settings[kv.find('key').text] = kv
+ if setting in settings.keys():
+ configsection.remove(settings[setting])
+ else:
+ raise CommandError("Cannot remove '%s' because it does " \
+ "not exist" % setting)
+
+ out = BytesIO()
+ xml_data.write(out, encoding='UTF-8', xml_declaration=True)
+ out.seek(0)
+ try:
+ create_directory_hier(conn, vgp_dir)
+ conn.savefile(vgp_xml, out.read())
+ reg.increment_gpt_ini(machine_changed=True)
+ except NTSTATUSError as e:
+ if e.args[0] == NT_STATUS_ACCESS_DENIED:
+ raise CommandError("The authenticated user does "
+ "not have sufficient privileges")
+ raise
+
+class cmd_openssh(SuperCommand):
+ """Manage OpenSSH Group Policy Objects"""
+ subcommands = {}
+ subcommands["list"] = cmd_list_openssh()
+ subcommands["set"] = cmd_set_openssh()
+
+class cmd_list_startup(Command):
+ """List VGP Startup Script Group Policy from the sysvol
+
+This command lists the startup script policies currently set on the sysvol.
+
+Example:
+samba-tool gpo manage scripts startup list {31B2F340-016D-11D2-945F-00C04FB984F9}
+ """
+
+ synopsis = "%prog <gpo> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H"),
+ ]
+
+ takes_args = ["gpo"]
+
+ def run(self, gpo, H=None, sambaopts=None, credopts=None, versionopts=None):
+ self.lp = sambaopts.get_loadparm()
+ self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
+
+ # We need to know writable DC to setup SMB connection
+ if H and H.startswith('ldap://'):
+ dc_hostname = H[7:]
+ self.url = H
+ else:
+ dc_hostname = netcmd_finddc(self.lp, self.creds)
+ self.url = dc_url(self.lp, self.creds, dc=dc_hostname)
+
+ # SMB connect to DC
+ conn = smb_connection(dc_hostname,
+ 'sysvol',
+ lp=self.lp,
+ creds=self.creds)
+
+ realm = self.lp.get('realm')
+ vgp_xml = '\\'.join([realm.lower(), 'Policies', gpo,
+ 'MACHINE\\VGP\\VTLA\\Unix',
+ 'Scripts\\Startup\\manifest.xml'])
+ try:
+ xml_data = ET.fromstring(conn.loadfile(vgp_xml))
+ except NTSTATUSError as e:
+ if e.args[0] in [NT_STATUS_OBJECT_NAME_INVALID,
+ NT_STATUS_OBJECT_NAME_NOT_FOUND,
+ NT_STATUS_OBJECT_PATH_NOT_FOUND]:
+ return # The file doesn't exist, so there is nothing to list
+ if e.args[0] == NT_STATUS_ACCESS_DENIED:
+ raise CommandError("The authenticated user does "
+ "not have sufficient privileges")
+ raise
+
+ policy = xml_data.find('policysetting')
+ data = policy.find('data')
+ for listelement in data.findall('listelement'):
+ script = listelement.find('script')
+ script_path = '\\'.join(['\\', realm.lower(), 'Policies', gpo,
+ 'MACHINE\\VGP\\VTLA\\Unix\\Scripts',
+ 'Startup', script.text])
+ parameters = listelement.find('parameters')
+ run_as = listelement.find('run_as')
+ if run_as is not None:
+ run_as = run_as.text
+ else:
+ run_as = 'root'
+ if parameters is not None:
+ parameters = parameters.text
+ else:
+ parameters = ''
+ self.outf.write('@reboot %s %s %s\n' % (run_as, script_path,
+ parameters))
+
+class cmd_add_startup(GPOCommand):
+ """Adds VGP Startup Script Group Policy to the sysvol
+
+This command adds a startup script policy to the sysvol.
+
+Example:
+samba-tool gpo manage scripts startup add {31B2F340-016D-11D2-945F-00C04FB984F9} test_script.sh '\\-n \\-p all'
+ """
+
+ synopsis = "%prog <gpo> <script> [args] [run_as] [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H"),
+ Option("--run-once", dest="run_once", default=False, action='store_true',
+ help="Whether to run the script only once"),
+ ]
+
+ takes_args = ["gpo", "script", "args?", "run_as?"]
+
+ def run(self, gpo, script, args=None, run_as=None, run_once=None,
+ H=None, sambaopts=None, credopts=None, versionopts=None):
+ self.lp = sambaopts.get_loadparm()
+ self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
+
+ if not os.path.exists(script):
+ raise CommandError("Script '%s' does not exist" % script)
+
+ # We need to know writable DC to setup SMB connection
+ if H and H.startswith('ldap://'):
+ dc_hostname = H[7:]
+ self.url = H
+ else:
+ dc_hostname = netcmd_finddc(self.lp, self.creds)
+ self.url = dc_url(self.lp, self.creds, dc=dc_hostname)
+
+ # SMB connect to DC
+ conn = smb_connection(dc_hostname,
+ 'sysvol',
+ lp=self.lp,
+ creds=self.creds)
+
+ self.samdb_connect()
+ reg = RegistryGroupPolicies(gpo, self.lp, self.creds, self.samdb, H)
+
+ realm = self.lp.get('realm')
+ vgp_dir = '\\'.join([realm.lower(), 'Policies', gpo,
+ 'MACHINE\\VGP\\VTLA\\Unix\\Scripts\\Startup'])
+ vgp_xml = '\\'.join([vgp_dir, 'manifest.xml'])
+ try:
+ xml_data = ET.ElementTree(ET.fromstring(conn.loadfile(vgp_xml)))
+ policy = xml_data.getroot().find('policysetting')
+ data = policy.find('data')
+ except NTSTATUSError as e:
+ if e.args[0] in [NT_STATUS_OBJECT_NAME_INVALID,
+ NT_STATUS_OBJECT_NAME_NOT_FOUND,
+ NT_STATUS_OBJECT_PATH_NOT_FOUND]:
+ # The file doesn't exist, so create the xml structure
+ xml_data = ET.ElementTree(ET.Element('vgppolicy'))
+ policysetting = ET.SubElement(xml_data.getroot(),
+ 'policysetting')
+ pv = ET.SubElement(policysetting, 'version')
+ pv.text = '1'
+ name = ET.SubElement(policysetting, 'name')
+ name.text = 'Unix Scripts'
+ description = ET.SubElement(policysetting, 'description')
+ description.text = \
+ 'Represents Unix scripts to run on Group Policy clients'
+ data = ET.SubElement(policysetting, 'data')
+ elif e.args[0] == NT_STATUS_ACCESS_DENIED:
+ raise CommandError("The authenticated user does "
+ "not have sufficient privileges")
+ else:
+ raise
+
+ script_data = open(script, 'rb').read()
+ listelement = ET.SubElement(data, 'listelement')
+ script_elm = ET.SubElement(listelement, 'script')
+ script_elm.text = os.path.basename(script)
+ hash = ET.SubElement(listelement, 'hash')
+ hash.text = hashlib.md5(script_data).hexdigest().upper()
+ if args is not None:
+ parameters = ET.SubElement(listelement, 'parameters')
+ parameters.text = args.strip('"').strip("'").replace('\\-', '-')
+ if run_as is not None:
+ run_as_elm = ET.SubElement(listelement, 'run_as')
+ run_as_elm.text = run_as
+ if run_once:
+ ET.SubElement(listelement, 'run_once')
+
+ out = BytesIO()
+ xml_data.write(out, encoding='UTF-8', xml_declaration=True)
+ out.seek(0)
+ sysvol_script = '\\'.join([vgp_dir, os.path.basename(script)])
+ try:
+ create_directory_hier(conn, vgp_dir)
+ conn.savefile(vgp_xml, out.read())
+ conn.savefile(sysvol_script, script_data)
+ reg.increment_gpt_ini(machine_changed=True)
+ except NTSTATUSError as e:
+ if e.args[0] == NT_STATUS_ACCESS_DENIED:
+ raise CommandError("The authenticated user does "
+ "not have sufficient privileges")
+ raise
+
+class cmd_remove_startup(GPOCommand):
+ """Removes VGP Startup Script Group Policy from the sysvol
+
+This command removes a startup script policy from the sysvol.
+
+Example:
+samba-tool gpo manage scripts startup remove {31B2F340-016D-11D2-945F-00C04FB984F9} test_script.sh
+ """
+
+ synopsis = "%prog <gpo> <script> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H"),
+ ]
+
+ takes_args = ["gpo", "script"]
+
+ def run(self, gpo, script, H=None, sambaopts=None, credopts=None,
+ versionopts=None):
+ self.lp = sambaopts.get_loadparm()
+ self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
+
+ # We need to know writable DC to setup SMB connection
+ if H and H.startswith('ldap://'):
+ dc_hostname = H[7:]
+ self.url = H
+ else:
+ dc_hostname = netcmd_finddc(self.lp, self.creds)
+ self.url = dc_url(self.lp, self.creds, dc=dc_hostname)
+
+ # SMB connect to DC
+ conn = smb_connection(dc_hostname,
+ 'sysvol',
+ lp=self.lp,
+ creds=self.creds)
+
+ self.samdb_connect()
+ reg = RegistryGroupPolicies(gpo, self.lp, self.creds, self.samdb, H)
+
+ realm = self.lp.get('realm')
+ vgp_dir = '\\'.join([realm.lower(), 'Policies', gpo,
+ 'MACHINE\\VGP\\VTLA\\Unix\\Scripts\\Startup'])
+ vgp_xml = '\\'.join([vgp_dir, 'manifest.xml'])
+ try:
+ xml_data = ET.ElementTree(ET.fromstring(conn.loadfile(vgp_xml)))
+ policy = xml_data.getroot().find('policysetting')
+ data = policy.find('data')
+ except NTSTATUSError as e:
+ if e.args[0] in [NT_STATUS_OBJECT_NAME_INVALID,
+ NT_STATUS_OBJECT_NAME_NOT_FOUND,
+ NT_STATUS_OBJECT_PATH_NOT_FOUND]:
+ raise CommandError("Cannot remove script '%s' "
+ "because it does not exist" % script)
+ elif e.args[0] == NT_STATUS_ACCESS_DENIED:
+ raise CommandError("The authenticated user does "
+ "not have sufficient privileges")
+ else:
+ raise
+
+ for listelement in data.findall('listelement'):
+ script_elm = listelement.find('script')
+ if script_elm.text == os.path.basename(script.replace('\\', '/')):
+ data.remove(listelement)
+ break
+ else:
+ raise CommandError("Cannot remove script '%s' "
+ "because it does not exist" % script)
+
+ out = BytesIO()
+ xml_data.write(out, encoding='UTF-8', xml_declaration=True)
+ out.seek(0)
+ try:
+ create_directory_hier(conn, vgp_dir)
+ conn.savefile(vgp_xml, out.read())
+ reg.increment_gpt_ini(machine_changed=True)
+ except NTSTATUSError as e:
+ if e.args[0] == NT_STATUS_ACCESS_DENIED:
+ raise CommandError("The authenticated user does "
+ "not have sufficient privileges")
+ raise
+
+class cmd_startup(SuperCommand):
+ """Manage Startup Scripts Group Policy Objects"""
+ subcommands = {}
+ subcommands["list"] = cmd_list_startup()
+ subcommands["add"] = cmd_add_startup()
+ subcommands["remove"] = cmd_remove_startup()
+
+class cmd_scripts(SuperCommand):
+ """Manage Scripts Group Policy Objects"""
+ subcommands = {}
+ subcommands["startup"] = cmd_startup()
+
+class cmd_list_motd(Command):
+ """List VGP MOTD Group Policy from the sysvol
+
+This command lists the Message of the Day from the sysvol that will be applied
+to winbind clients.
+
+Example:
+samba-tool gpo manage motd list {31B2F340-016D-11D2-945F-00C04FB984F9}
+ """
+
+ synopsis = "%prog <gpo> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H"),
+ ]
+
+ takes_args = ["gpo"]
+
+ def run(self, gpo, H=None, sambaopts=None, credopts=None, versionopts=None):
+ self.lp = sambaopts.get_loadparm()
+ self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
+
+ # We need to know writable DC to setup SMB connection
+ if H and H.startswith('ldap://'):
+ dc_hostname = H[7:]
+ self.url = H
+ else:
+ dc_hostname = netcmd_finddc(self.lp, self.creds)
+ self.url = dc_url(self.lp, self.creds, dc=dc_hostname)
+
+ # SMB connect to DC
+ conn = smb_connection(dc_hostname,
+ 'sysvol',
+ lp=self.lp,
+ creds=self.creds)
+
+ realm = self.lp.get('realm')
+ vgp_xml = '\\'.join([realm.lower(), 'Policies', gpo,
+ 'MACHINE\\VGP\\VTLA\\Unix',
+ 'MOTD\\manifest.xml'])
+ try:
+ xml_data = ET.fromstring(conn.loadfile(vgp_xml))
+ except NTSTATUSError as e:
+ if e.args[0] in [NT_STATUS_OBJECT_NAME_INVALID,
+ NT_STATUS_OBJECT_NAME_NOT_FOUND,
+ NT_STATUS_OBJECT_PATH_NOT_FOUND]:
+ return # The file doesn't exist, so there is nothing to list
+ if e.args[0] == NT_STATUS_ACCESS_DENIED:
+ raise CommandError("The authenticated user does "
+ "not have sufficient privileges")
+ raise
+
+ policy = xml_data.find('policysetting')
+ data = policy.find('data')
+ text = data.find('text')
+ self.outf.write(text.text)
+
+class cmd_set_motd(GPOCommand):
+ """Sets a VGP MOTD Group Policy to the sysvol
+
+This command sets the Message of the Day to the sysvol for applying to winbind
+clients. Not providing a value will unset the policy.
+
+Example:
+samba-tool gpo manage motd set {31B2F340-016D-11D2-945F-00C04FB984F9} "Message for today"
+ """
+
+ synopsis = "%prog <gpo> [value] [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H"),
+ ]
+
+ takes_args = ["gpo", "value?"]
+
+ def run(self, gpo, value=None, H=None, sambaopts=None, credopts=None,
+ versionopts=None):
+ self.lp = sambaopts.get_loadparm()
+ self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
+
+ # We need to know writable DC to setup SMB connection
+ if H and H.startswith('ldap://'):
+ dc_hostname = H[7:]
+ self.url = H
+ else:
+ dc_hostname = netcmd_finddc(self.lp, self.creds)
+ self.url = dc_url(self.lp, self.creds, dc=dc_hostname)
+
+ # SMB connect to DC
+ conn = smb_connection(dc_hostname,
+ 'sysvol',
+ lp=self.lp,
+ creds=self.creds)
+
+ self.samdb_connect()
+ reg = RegistryGroupPolicies(gpo, self.lp, self.creds, self.samdb, H)
+
+ realm = self.lp.get('realm')
+ vgp_dir = '\\'.join([realm.lower(), 'Policies', gpo,
+ 'MACHINE\\VGP\\VTLA\\Unix\\MOTD'])
+ vgp_xml = '\\'.join([vgp_dir, 'manifest.xml'])
+
+ if value is None:
+ conn.unlink(vgp_xml)
+ reg.increment_gpt_ini(machine_changed=True)
+ return
+
+ try:
+ xml_data = ET.fromstring(conn.loadfile(vgp_xml))
+ except NTSTATUSError as e:
+ if e.args[0] in [NT_STATUS_OBJECT_NAME_INVALID,
+ NT_STATUS_OBJECT_NAME_NOT_FOUND,
+ NT_STATUS_OBJECT_PATH_NOT_FOUND]:
+ # The file doesn't exist, so create the xml structure
+ xml_data = ET.ElementTree(ET.Element('vgppolicy'))
+ policysetting = ET.SubElement(xml_data.getroot(),
+ 'policysetting')
+ pv = ET.SubElement(policysetting, 'version')
+ pv.text = '1'
+ name = ET.SubElement(policysetting, 'name')
+ name.text = 'Text File'
+ description = ET.SubElement(policysetting, 'description')
+ description.text = 'Represents a Generic Text File'
+ apply_mode = ET.SubElement(policysetting, 'apply_mode')
+ apply_mode.text = 'replace'
+ data = ET.SubElement(policysetting, 'data')
+ filename = ET.SubElement(data, 'filename')
+ filename.text = 'motd'
+ elif e.args[0] == NT_STATUS_ACCESS_DENIED:
+ raise CommandError("The authenticated user does "
+ "not have sufficient privileges")
+ else:
+ raise
+
+ text = ET.SubElement(data, 'text')
+ text.text = value
+
+ out = BytesIO()
+ xml_data.write(out, encoding='UTF-8', xml_declaration=True)
+ out.seek(0)
+ try:
+ create_directory_hier(conn, vgp_dir)
+ conn.savefile(vgp_xml, out.read())
+ reg.increment_gpt_ini(machine_changed=True)
+ except NTSTATUSError as e:
+ if e.args[0] == NT_STATUS_ACCESS_DENIED:
+ raise CommandError("The authenticated user does "
+ "not have sufficient privileges")
+ raise
+
+class cmd_motd(SuperCommand):
+ """Manage Message of the Day Group Policy Objects"""
+ subcommands = {}
+ subcommands["list"] = cmd_list_motd()
+ subcommands["set"] = cmd_set_motd()
+
+class cmd_list_issue(Command):
+ """List VGP Issue Group Policy from the sysvol
+
+This command lists the Prelogin Message from the sysvol that will be applied
+to winbind clients.
+
+Example:
+samba-tool gpo manage issue list {31B2F340-016D-11D2-945F-00C04FB984F9}
+ """
+
+ synopsis = "%prog <gpo> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H"),
+ ]
+
+ takes_args = ["gpo"]
+
+ def run(self, gpo, H=None, sambaopts=None, credopts=None, versionopts=None):
+ self.lp = sambaopts.get_loadparm()
+ self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
+
+ # We need to know writable DC to setup SMB connection
+ if H and H.startswith('ldap://'):
+ dc_hostname = H[7:]
+ self.url = H
+ else:
+ dc_hostname = netcmd_finddc(self.lp, self.creds)
+ self.url = dc_url(self.lp, self.creds, dc=dc_hostname)
+
+ # SMB connect to DC
+ conn = smb_connection(dc_hostname,
+ 'sysvol',
+ lp=self.lp,
+ creds=self.creds)
+
+ realm = self.lp.get('realm')
+ vgp_xml = '\\'.join([realm.lower(), 'Policies', gpo,
+ 'MACHINE\\VGP\\VTLA\\Unix',
+ 'Issue\\manifest.xml'])
+ try:
+ xml_data = ET.fromstring(conn.loadfile(vgp_xml))
+ except NTSTATUSError as e:
+ if e.args[0] in [NT_STATUS_OBJECT_NAME_INVALID,
+ NT_STATUS_OBJECT_NAME_NOT_FOUND,
+ NT_STATUS_OBJECT_PATH_NOT_FOUND]:
+ return # The file doesn't exist, so there is nothing to list
+ if e.args[0] == NT_STATUS_ACCESS_DENIED:
+ raise CommandError("The authenticated user does "
+ "not have sufficient privileges")
+ raise
+
+ policy = xml_data.find('policysetting')
+ data = policy.find('data')
+ text = data.find('text')
+ self.outf.write(text.text)
+
+class cmd_set_issue(GPOCommand):
+ """Sets a VGP Issue Group Policy to the sysvol
+
+This command sets the Prelogin Message to the sysvol for applying to winbind
+clients. Not providing a value will unset the policy.
+
+Example:
+samba-tool gpo manage issue set {31B2F340-016D-11D2-945F-00C04FB984F9} "Welcome to Samba!"
+ """
+
+ synopsis = "%prog <gpo> [value] [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H"),
+ ]
+
+ takes_args = ["gpo", "value?"]
+
+ def run(self, gpo, value=None, H=None, sambaopts=None, credopts=None,
+ versionopts=None):
+ self.lp = sambaopts.get_loadparm()
+ self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
+
+ # We need to know writable DC to setup SMB connection
+ if H and H.startswith('ldap://'):
+ dc_hostname = H[7:]
+ self.url = H
+ else:
+ dc_hostname = netcmd_finddc(self.lp, self.creds)
+ self.url = dc_url(self.lp, self.creds, dc=dc_hostname)
+
+ # SMB connect to DC
+ conn = smb_connection(dc_hostname,
+ 'sysvol',
+ lp=self.lp,
+ creds=self.creds)
+
+ self.samdb_connect()
+ reg = RegistryGroupPolicies(gpo, self.lp, self.creds, self.samdb, H)
+
+ realm = self.lp.get('realm')
+ vgp_dir = '\\'.join([realm.lower(), 'Policies', gpo,
+ 'MACHINE\\VGP\\VTLA\\Unix\\Issue'])
+ vgp_xml = '\\'.join([vgp_dir, 'manifest.xml'])
+
+ if value is None:
+ conn.unlink(vgp_xml)
+ reg.increment_gpt_ini(machine_changed=True)
+ return
+
+ try:
+ xml_data = ET.fromstring(conn.loadfile(vgp_xml))
+ except NTSTATUSError as e:
+ if e.args[0] in [NT_STATUS_OBJECT_NAME_INVALID,
+ NT_STATUS_OBJECT_NAME_NOT_FOUND,
+ NT_STATUS_OBJECT_PATH_NOT_FOUND]:
+ # The file doesn't exist, so create the xml structure
+ xml_data = ET.ElementTree(ET.Element('vgppolicy'))
+ policysetting = ET.SubElement(xml_data.getroot(),
+ 'policysetting')
+ pv = ET.SubElement(policysetting, 'version')
+ pv.text = '1'
+ name = ET.SubElement(policysetting, 'name')
+ name.text = 'Text File'
+ description = ET.SubElement(policysetting, 'description')
+ description.text = 'Represents a Generic Text File'
+ apply_mode = ET.SubElement(policysetting, 'apply_mode')
+ apply_mode.text = 'replace'
+ data = ET.SubElement(policysetting, 'data')
+ filename = ET.SubElement(data, 'filename')
+ filename.text = 'issue'
+ elif e.args[0] == NT_STATUS_ACCESS_DENIED:
+ raise CommandError("The authenticated user does "
+ "not have sufficient privileges")
+ else:
+ raise
+
+ text = ET.SubElement(data, 'text')
+ text.text = value
+
+ out = BytesIO()
+ xml_data.write(out, encoding='UTF-8', xml_declaration=True)
+ out.seek(0)
+ try:
+ create_directory_hier(conn, vgp_dir)
+ conn.savefile(vgp_xml, out.read())
+ reg.increment_gpt_ini(machine_changed=True)
+ except NTSTATUSError as e:
+ if e.args[0] == NT_STATUS_ACCESS_DENIED:
+ raise CommandError("The authenticated user does "
+ "not have sufficient privileges")
+ raise
+
+class cmd_issue(SuperCommand):
+ """Manage Issue Group Policy Objects"""
+ subcommands = {}
+ subcommands["list"] = cmd_list_issue()
+ subcommands["set"] = cmd_set_issue()
+
+class cmd_list_access(Command):
+ """List VGP Host Access Group Policy from the sysvol
+
+This command lists host access rules from the sysvol that will be applied to winbind clients.
+
+Example:
+samba-tool gpo manage access list {31B2F340-016D-11D2-945F-00C04FB984F9}
+ """
+
+ synopsis = "%prog <gpo> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H"),
+ ]
+
+ takes_args = ["gpo"]
+
+ def run(self, gpo, H=None, sambaopts=None, credopts=None, versionopts=None):
+ self.lp = sambaopts.get_loadparm()
+ self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
+
+ # We need to know writable DC to setup SMB connection
+ if H and H.startswith('ldap://'):
+ dc_hostname = H[7:]
+ self.url = H
+ else:
+ dc_hostname = netcmd_finddc(self.lp, self.creds)
+ self.url = dc_url(self.lp, self.creds, dc=dc_hostname)
+
+ # SMB connect to DC
+ conn = smb_connection(dc_hostname,
+ 'sysvol',
+ lp=self.lp,
+ creds=self.creds)
+
+ realm = self.lp.get('realm')
+ vgp_xml = '\\'.join([realm.lower(), 'Policies', gpo,
+ 'MACHINE\\VGP\\VTLA\\VAS',
+ 'HostAccessControl\\Allow\\manifest.xml'])
+ try:
+ allow = ET.fromstring(conn.loadfile(vgp_xml))
+ except NTSTATUSError as e:
+ if e.args[0] in [NT_STATUS_OBJECT_NAME_INVALID,
+ NT_STATUS_OBJECT_NAME_NOT_FOUND,
+ NT_STATUS_OBJECT_PATH_NOT_FOUND]:
+ allow = None # The file doesn't exist, ignore it
+ elif e.args[0] == NT_STATUS_ACCESS_DENIED:
+ raise CommandError("The authenticated user does "
+ "not have sufficient privileges")
+ else:
+ raise
+
+ if allow is not None:
+ policy = allow.find('policysetting')
+ data = policy.find('data')
+ for listelement in data.findall('listelement'):
+ adobject = listelement.find('adobject')
+ name = adobject.find('name')
+ domain = adobject.find('domain')
+ self.outf.write('+:%s\\%s:ALL\n' % (domain.text, name.text))
+
+ vgp_xml = '\\'.join([realm.lower(), 'Policies', gpo,
+ 'MACHINE\\VGP\\VTLA\\VAS',
+ 'HostAccessControl\\Deny\\manifest.xml'])
+ try:
+ deny = ET.fromstring(conn.loadfile(vgp_xml))
+ except NTSTATUSError as e:
+ if e.args[0] in [NT_STATUS_OBJECT_NAME_INVALID,
+ NT_STATUS_OBJECT_NAME_NOT_FOUND,
+ NT_STATUS_OBJECT_PATH_NOT_FOUND]:
+ deny = None # The file doesn't exist, ignore it
+ elif e.args[0] == NT_STATUS_ACCESS_DENIED:
+ raise CommandError("The authenticated user does "
+ "not have sufficient privileges")
+ else:
+ raise
+
+ if deny is not None:
+ policy = deny.find('policysetting')
+ data = policy.find('data')
+ for listelement in data.findall('listelement'):
+ adobject = listelement.find('adobject')
+ name = adobject.find('name')
+ domain = adobject.find('domain')
+ self.outf.write('-:%s\\%s:ALL\n' % (domain.text, name.text))
+
+class cmd_add_access(GPOCommand):
+ """Adds a VGP Host Access Group Policy to the sysvol
+
+This command adds a host access setting to the sysvol for applying to winbind
+clients. Any time an allow entry is detected by the client, an implicit deny
+ALL will be assumed.
+
+Example:
+samba-tool gpo manage access add {31B2F340-016D-11D2-945F-00C04FB984F9} allow goodguy example.com
+ """
+
+ synopsis = "%prog <gpo> <allow/deny> <cn> <domain> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H"),
+ ]
+
+ takes_args = ["gpo", "etype", "cn", "domain"]
+
+ def run(self, gpo, etype, cn, domain, H=None, sambaopts=None,
+ credopts=None, versionopts=None):
+ self.lp = sambaopts.get_loadparm()
+ self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
+
+ # We need to know writable DC to setup SMB connection
+ if H and H.startswith('ldap://'):
+ dc_hostname = H[7:]
+ self.url = H
+ else:
+ dc_hostname = netcmd_finddc(self.lp, self.creds)
+ self.url = dc_url(self.lp, self.creds, dc=dc_hostname)
+
+ # SMB connect to DC
+ conn = smb_connection(dc_hostname,
+ 'sysvol',
+ lp=self.lp,
+ creds=self.creds)
+
+ self.samdb_connect()
+ reg = RegistryGroupPolicies(gpo, self.lp, self.creds, self.samdb, H)
+
+ realm = self.lp.get('realm')
+ if etype == 'allow':
+ vgp_dir = '\\'.join([realm.lower(), 'Policies', gpo,
+ 'MACHINE\\VGP\\VTLA\\VAS',
+ 'HostAccessControl\\Allow'])
+ elif etype == 'deny':
+ vgp_dir = '\\'.join([realm.lower(), 'Policies', gpo,
+ 'MACHINE\\VGP\\VTLA\\VAS',
+ 'HostAccessControl\\Deny'])
+ else:
+ raise CommandError("The entry type must be either 'allow' or "
+ "'deny'. Unknown type '%s'" % etype)
+ vgp_xml = '\\'.join([vgp_dir, 'manifest.xml'])
+ try:
+ xml_data = ET.ElementTree(ET.fromstring(conn.loadfile(vgp_xml)))
+ policy = xml_data.getroot().find('policysetting')
+ data = policy.find('data')
+ except NTSTATUSError as e:
+ if e.args[0] in [NT_STATUS_OBJECT_NAME_INVALID,
+ NT_STATUS_OBJECT_NAME_NOT_FOUND,
+ NT_STATUS_OBJECT_PATH_NOT_FOUND]:
+ # The file doesn't exist, so create the xml structure
+ xml_data = ET.ElementTree(ET.Element('vgppolicy'))
+ policysetting = ET.SubElement(xml_data.getroot(),
+ 'policysetting')
+ pv = ET.SubElement(policysetting, 'version')
+ pv.text = '1'
+ name = ET.SubElement(policysetting, 'name')
+ name.text = 'Host Access Control'
+ description = ET.SubElement(policysetting, 'description')
+ description.text = 'Represents host access control data (pam_access)'
+ apply_mode = ET.SubElement(policysetting, 'apply_mode')
+ apply_mode.text = 'merge'
+ data = ET.SubElement(policysetting, 'data')
+ elif e.args[0] == NT_STATUS_ACCESS_DENIED:
+ raise CommandError("The authenticated user does "
+ "not have sufficient privileges")
+ else:
+ raise
+
+ url = dc_url(self.lp, self.creds, dc=domain)
+ samdb = SamDB(url=url, session_info=system_session(),
+ credentials=self.creds, lp=self.lp)
+
+ res = samdb.search(base=samdb.domain_dn(),
+ scope=ldb.SCOPE_SUBTREE,
+ expression="(cn=%s)" % cn,
+ attrs=['userPrincipalName',
+ 'samaccountname',
+ 'objectClass'])
+ if len(res) == 0:
+ raise CommandError('Unable to find user or group "%s"' % cn)
+
+ objectclass = get_string(res[0]['objectClass'][-1])
+ if objectclass not in ['user', 'group']:
+ raise CommandError('%s is not a user or group' % cn)
+
+ listelement = ET.SubElement(data, 'listelement')
+ etype = ET.SubElement(listelement, 'type')
+ etype.text = objectclass.upper()
+ entry = ET.SubElement(listelement, 'entry')
+ entry.text = '%s\\%s' % (samdb.domain_netbios_name(),
+ get_string(res[0]['samaccountname'][-1]))
+ if objectclass == 'group':
+ groupattr = ET.SubElement(data, 'groupattr')
+ groupattr.text = 'samAccountName'
+ adobject = ET.SubElement(listelement, 'adobject')
+ name = ET.SubElement(adobject, 'name')
+ name.text = get_string(res[0]['samaccountname'][-1])
+ domain_elm = ET.SubElement(adobject, 'domain')
+ domain_elm.text = domain
+ etype = ET.SubElement(adobject, 'type')
+ etype.text = objectclass
+
+ out = BytesIO()
+ xml_data.write(out, encoding='UTF-8', xml_declaration=True)
+ out.seek(0)
+ try:
+ create_directory_hier(conn, vgp_dir)
+ conn.savefile(vgp_xml, out.read())
+ reg.increment_gpt_ini(machine_changed=True)
+ except NTSTATUSError as e:
+ if e.args[0] == NT_STATUS_ACCESS_DENIED:
+ raise CommandError("The authenticated user does "
+ "not have sufficient privileges")
+ raise
+
+class cmd_remove_access(GPOCommand):
+ """Remove a VGP Host Access Group Policy from the sysvol
+
+This command removes a host access setting from the sysvol for applying to
+winbind clients.
+
+Example:
+samba-tool gpo manage access remove {31B2F340-016D-11D2-945F-00C04FB984F9} allow goodguy example.com
+ """
+
+ synopsis = "%prog <gpo> <allow/deny> <name> <domain> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H"),
+ ]
+
+ takes_args = ["gpo", "etype", "name", "domain"]
+
+ def run(self, gpo, etype, name, domain, H=None, sambaopts=None,
+ credopts=None, versionopts=None):
+ self.lp = sambaopts.get_loadparm()
+ self.creds = credopts.get_credentials(self.lp, fallback_machine=True)
+
+ # We need to know writable DC to setup SMB connection
+ if H and H.startswith('ldap://'):
+ dc_hostname = H[7:]
+ self.url = H
+ else:
+ dc_hostname = netcmd_finddc(self.lp, self.creds)
+ self.url = dc_url(self.lp, self.creds, dc=dc_hostname)
+
+ # SMB connect to DC
+ conn = smb_connection(dc_hostname,
+ 'sysvol',
+ lp=self.lp,
+ creds=self.creds)
+
+ self.samdb_connect()
+ reg = RegistryGroupPolicies(gpo, self.lp, self.creds, self.samdb, H)
+
+ realm = self.lp.get('realm')
+ if etype == 'allow':
+ vgp_dir = '\\'.join([realm.lower(), 'Policies', gpo,
+ 'MACHINE\\VGP\\VTLA\\VAS',
+ 'HostAccessControl\\Allow'])
+ elif etype == 'deny':
+ vgp_dir = '\\'.join([realm.lower(), 'Policies', gpo,
+ 'MACHINE\\VGP\\VTLA\\VAS',
+ 'HostAccessControl\\Deny'])
+ else:
+ raise CommandError("The entry type must be either 'allow' or "
+ "'deny'. Unknown type '%s'" % etype)
+ vgp_xml = '\\'.join([vgp_dir, 'manifest.xml'])
+ try:
+ xml_data = ET.ElementTree(ET.fromstring(conn.loadfile(vgp_xml)))
+ policy = xml_data.getroot().find('policysetting')
+ data = policy.find('data')
+ except NTSTATUSError as e:
+ if e.args[0] in [NT_STATUS_OBJECT_NAME_INVALID,
+ NT_STATUS_OBJECT_NAME_NOT_FOUND,
+ NT_STATUS_OBJECT_PATH_NOT_FOUND]:
+ raise CommandError("Cannot remove %s entry because it does "
+ "not exist" % etype)
+ elif e.args[0] == NT_STATUS_ACCESS_DENIED:
+ raise CommandError("The authenticated user does "
+ "not have sufficient privileges")
+ else:
+ raise
+
+ for listelement in data.findall('listelement'):
+ adobject = listelement.find('adobject')
+ name_elm = adobject.find('name')
+ domain_elm = adobject.find('domain')
+ if name_elm is not None and name_elm.text == name and \
+ domain_elm is not None and domain_elm.text == domain:
+ data.remove(listelement)
+ break
+ else:
+ raise CommandError("Cannot remove %s entry because it does "
+ "not exist" % etype)
+
+ out = BytesIO()
+ xml_data.write(out, encoding='UTF-8', xml_declaration=True)
+ out.seek(0)
+ try:
+ create_directory_hier(conn, vgp_dir)
+ conn.savefile(vgp_xml, out.read())
+ reg.increment_gpt_ini(machine_changed=True)
+ except NTSTATUSError as e:
+ if e.args[0] == NT_STATUS_ACCESS_DENIED:
+ raise CommandError("The authenticated user does "
+ "not have sufficient privileges")
+ raise
+
+class cmd_cse_register(Command):
+ """Register a Client Side Extension (CSE) on the current host
+
+This command takes a CSE filename as an argument, and registers it for
+applying policy on the current host. This is not necessary for CSEs which
+are distributed with the current version of Samba, but is useful for installing
+experimental CSEs or custom built CSEs.
+The <cse_file> argument MUST be a permanent location for the CSE. The register
+command does not copy the file to some other directory. The samba-gpupdate
+command will execute the CSE from the exact location specified from this
+command.
+
+Example:
+samba-tool gpo cse register ./gp_chromium_ext.py gp_chromium_ext --machine
+ """
+
+ synopsis = "%prog <cse_file> <cse_name> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ takes_options = [
+ Option("--machine", default=False, action='store_true',
+ help="Whether to register the CSE as Machine policy"),
+ Option("--user", default=False, action='store_true',
+ help="Whether to register the CSE as User policy"),
+ ]
+
+ takes_args = ["cse_file", "cse_name"]
+
+ def run(self, cse_file, cse_name, machine=False, user=False,
+ sambaopts=None, versionopts=None):
+ self.lp = sambaopts.get_loadparm()
+
+ if machine == False and user == False:
+ raise CommandError("Either --machine or --user must be selected")
+
+ ext_guid = "{%s}" % str(uuid.uuid4())
+ ext_path = os.path.realpath(cse_file)
+ ret = register_gp_extension(ext_guid, cse_name, ext_path,
+ smb_conf=self.lp.configfile,
+ machine=machine, user=user)
+ if not ret:
+ raise CommandError('Failed to register CSE "%s"' % cse_name)
+
+class cmd_cse_list(Command):
+ """List the registered Client Side Extensions (CSEs) on the current host
+
+This command lists the currently registered CSEs on the host.
+
+Example:
+samba-tool gpo cse list
+ """
+
+ synopsis = "%prog [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ def run(self, sambaopts=None, versionopts=None):
+ self.lp = sambaopts.get_loadparm()
+
+ cses = list_gp_extensions(self.lp.configfile)
+ for guid, gp_ext in cses.items():
+ self.outf.write("UniqueGUID : %s\n" % guid)
+ self.outf.write("FileName : %s\n" % gp_ext['DllName'])
+ self.outf.write("ProcessGroupPolicy : %s\n" % \
+ gp_ext['ProcessGroupPolicy'])
+ self.outf.write("MachinePolicy : %s\n" % \
+ str(gp_ext['MachinePolicy']))
+ self.outf.write("UserPolicy : %s\n\n" % \
+ str(gp_ext['UserPolicy']))
+
+class cmd_cse_unregister(Command):
+ """Unregister a Client Side Extension (CSE) from the current host
+
+This command takes a unique GUID as an argument (representing a registered
+CSE), and unregisters it for applying policy on the current host. Use the
+`samba-tool gpo cse list` command to determine the unique GUIDs of CSEs.
+
+Example:
+samba-tool gpo cse unregister {3F60F344-92BF-11ED-A1EB-0242AC120002}
+ """
+
+ synopsis = "%prog <guid> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ takes_args = ["guid"]
+
+ def run(self, guid, sambaopts=None, versionopts=None):
+ self.lp = sambaopts.get_loadparm()
+
+ ret = unregister_gp_extension(guid, self.lp.configfile)
+ if not ret:
+ raise CommandError('Failed to unregister CSE "%s"' % guid)
+
+class cmd_cse(SuperCommand):
+ """Manage Client Side Extensions"""
+ subcommands = {}
+ subcommands["register"] = cmd_cse_register()
+ subcommands["list"] = cmd_cse_list()
+ subcommands["unregister"] = cmd_cse_unregister()
+
+class cmd_access(SuperCommand):
+ """Manage Host Access Group Policy Objects"""
+ subcommands = {}
+ subcommands["list"] = cmd_list_access()
+ subcommands["add"] = cmd_add_access()
+ subcommands["remove"] = cmd_remove_access()
+
+class cmd_manage(SuperCommand):
+ """Manage Group Policy Objects"""
+ subcommands = {}
+ subcommands["sudoers"] = cmd_sudoers()
+ subcommands["security"] = cmd_security()
+ subcommands["smb_conf"] = cmd_smb_conf()
+ subcommands["symlink"] = cmd_symlink()
+ subcommands["files"] = cmd_files()
+ subcommands["openssh"] = cmd_openssh()
+ subcommands["scripts"] = cmd_scripts()
+ subcommands["motd"] = cmd_motd()
+ subcommands["issue"] = cmd_issue()
+ subcommands["access"] = cmd_access()
+
+class cmd_gpo(SuperCommand):
+ """Group Policy Object (GPO) management."""
+
+ subcommands = {}
+ subcommands["listall"] = cmd_listall()
+ subcommands["list"] = cmd_list()
+ subcommands["show"] = cmd_show()
+ subcommands["load"] = cmd_load()
+ subcommands["remove"] = cmd_remove()
+ subcommands["getlink"] = cmd_getlink()
+ subcommands["setlink"] = cmd_setlink()
+ subcommands["dellink"] = cmd_dellink()
+ subcommands["listcontainers"] = cmd_listcontainers()
+ subcommands["getinheritance"] = cmd_getinheritance()
+ subcommands["setinheritance"] = cmd_setinheritance()
+ subcommands["fetch"] = cmd_fetch()
+ subcommands["create"] = cmd_create()
+ subcommands["del"] = cmd_del()
+ subcommands["aclcheck"] = cmd_aclcheck()
+ subcommands["backup"] = cmd_backup()
+ subcommands["restore"] = cmd_restore()
+ subcommands["admxload"] = cmd_admxload()
+ subcommands["manage"] = cmd_manage()
+ subcommands["cse"] = cmd_cse()
diff --git a/python/samba/netcmd/group.py b/python/samba/netcmd/group.py
new file mode 100644
index 0000000..a705560
--- /dev/null
+++ b/python/samba/netcmd/group.py
@@ -0,0 +1,1416 @@
+# Copyright Jelmer Vernooij 2008
+#
+# Based on the original in EJS:
+# Copyright Andrew Tridgell 2005
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import samba.getopt as options
+from samba.netcmd import Command, SuperCommand, CommandError, Option
+import ldb
+from samba.ndr import ndr_pack, ndr_unpack
+from samba.dcerpc import security
+
+from samba.auth import system_session
+from samba.samdb import SamDB
+from samba.dsdb import (
+ ATYPE_SECURITY_GLOBAL_GROUP,
+ DS_GUID_USERS_CONTAINER,
+ GTYPE_SECURITY_BUILTIN_LOCAL_GROUP,
+ GTYPE_SECURITY_DOMAIN_LOCAL_GROUP,
+ GTYPE_SECURITY_GLOBAL_GROUP,
+ GTYPE_SECURITY_UNIVERSAL_GROUP,
+ GTYPE_DISTRIBUTION_DOMAIN_LOCAL_GROUP,
+ GTYPE_DISTRIBUTION_GLOBAL_GROUP,
+ GTYPE_DISTRIBUTION_UNIVERSAL_GROUP,
+ SYSTEM_FLAG_DISALLOW_DELETE,
+ SYSTEM_FLAG_DOMAIN_DISALLOW_MOVE,
+ SYSTEM_FLAG_DOMAIN_DISALLOW_RENAME,
+ UF_ACCOUNTDISABLE,
+)
+from collections import defaultdict
+from subprocess import check_call, CalledProcessError
+from samba.common import get_bytes, normalise_int32
+import os
+import tempfile
+from . import common
+
+security_group = dict({"Builtin": GTYPE_SECURITY_BUILTIN_LOCAL_GROUP,
+ "Domain": GTYPE_SECURITY_DOMAIN_LOCAL_GROUP,
+ "Global": GTYPE_SECURITY_GLOBAL_GROUP,
+ "Universal": GTYPE_SECURITY_UNIVERSAL_GROUP})
+distribution_group = dict({"Domain": GTYPE_DISTRIBUTION_DOMAIN_LOCAL_GROUP,
+ "Global": GTYPE_DISTRIBUTION_GLOBAL_GROUP,
+ "Universal": GTYPE_DISTRIBUTION_UNIVERSAL_GROUP})
+
+
+class cmd_group_add(Command):
+ """Creates a new AD group.
+
+This command adds a new Active Directory group. The groupname specified on the command is a unique sAMAccountName.
+
+An Active Directory group may contain user and computer accounts as well as other groups. An administrator adds a new group and adds members to that group so they can be managed as a single entity. This helps to simplify security and system administration.
+
+Groups may also be used to establish email distribution lists, using --group-type=Distribution.
+
+Groups are located in domains in organizational units (OUs). The group's scope is a characteristic of the group that designates the extent to which the group is applied within the domain tree or forest.
+
+The group location (OU), type (security or distribution) and scope may all be specified on the samba-tool command when the group is created.
+
+The command may be run from the root userid or another authorized userid. The
+-H or --URL= option can be used to execute the command on a remote server.
+
+Example1:
+samba-tool group add Group1 -H ldap://samba.samdom.example.com --description='Simple group'
+
+Example1 adds a new group with the name Group1 added to the Users container on a remote LDAP server. The -U parameter is used to pass the userid and password of a user that exists on the remote server and is authorized to issue the command on that server. It defaults to the security type and global scope.
+
+Example2:
+sudo samba-tool group add Group2 --group-type=Distribution
+
+Example2 adds a new distribution group to the local server. The command is run under root using the sudo command.
+
+Example3:
+samba-tool group add Group3 --nis-domain=samdom --gid-number=12345
+
+Example3 adds a new RFC2307 enabled group for NIS domain samdom and GID 12345 (both options are required to enable this feature).
+"""
+
+ synopsis = "%prog <groupname> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H"),
+ Option("--groupou",
+ help="Alternative location (without domainDN counterpart) to default CN=Users in which new user object will be created",
+ type=str),
+ Option("--group-scope", type="choice", choices=["Domain", "Global", "Universal"],
+ help="Group scope (Domain | Global | Universal)"),
+ Option("--group-type", type="choice", choices=["Security", "Distribution"],
+ help="Group type (Security | Distribution)"),
+ Option("--description", help="Group's description", type=str),
+ Option("--mail-address", help="Group's email address", type=str),
+ Option("--notes", help="Group's notes", type=str),
+ Option("--gid-number", help="Group's Unix/RFC2307 GID number", type=int),
+ Option("--nis-domain", help="SFU30 NIS Domain", type=str),
+ Option("--special", help="Add a special predefined group", action="store_true", default=False),
+ ]
+
+ takes_args = ["groupname"]
+
+ def run(self, groupname, credopts=None, sambaopts=None,
+ versionopts=None, H=None, groupou=None, group_scope=None,
+ group_type=None, description=None, mail_address=None, notes=None, gid_number=None, nis_domain=None,
+ special=False):
+
+ if (group_type or "Security") == "Security":
+ gtype = security_group.get(group_scope, GTYPE_SECURITY_GLOBAL_GROUP)
+ else:
+ gtype = distribution_group.get(group_scope, GTYPE_DISTRIBUTION_GLOBAL_GROUP)
+
+ if (gid_number is None and nis_domain is not None) or (gid_number is not None and nis_domain is None):
+ raise CommandError('Both --gid-number and --nis-domain have to be set for a RFC2307-enabled group. Operation cancelled.')
+
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp, fallback_machine=True)
+
+ try:
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+ except Exception as e:
+ # FIXME: catch more specific exception
+ raise CommandError(f'Failed to add group "{groupname}"', e)
+
+ if special:
+ invalid_option = None
+ if group_scope is not None:
+ invalid_option = 'group-scope'
+ elif group_type is not None:
+ invalid_option = 'group-type'
+ elif description is not None:
+ invalid_option = 'description'
+ elif mail_address is not None:
+ invalid_option = 'mail-address'
+ elif notes is not None:
+ invalid_option = 'notes'
+ elif gid_number is not None:
+ invalid_option = 'gid-number'
+ elif nis_domain is not None:
+ invalid_option = 'nis-domain'
+
+ if invalid_option is not None:
+ raise CommandError(f'Superfluous option --{invalid_option} '
+ f'specified with --special')
+
+ if not samdb.am_pdc():
+ raise CommandError('Adding special groups is only permitted '
+ 'against the PDC!')
+
+ special_groups = {
+ # On Windows, this group is added automatically when the PDC
+ # role is held by a DC running Windows Server 2012 R2 or later.
+ # https://docs.microsoft.com/en-us/windows-server/security/credentials-protection-and-management/protected-users-security-group#BKMK_Requirements
+ 'Protected Users'.lower(): (
+ 'Protected Users',
+ GTYPE_SECURITY_GLOBAL_GROUP,
+ security.DOMAIN_RID_PROTECTED_USERS,
+ 'Members of this group are afforded additional '
+ 'protections against authentication security threats'),
+ }
+
+ special_group = special_groups.get(groupname.lower())
+ if special_group is None:
+ raise CommandError(f'Unknown special group "{groupname}".')
+
+ groupname, gtype, rid, description = special_group
+ group_type = normalise_int32(gtype)
+
+ group_dn = samdb.get_default_basedn()
+
+ if gtype == GTYPE_SECURITY_GLOBAL_GROUP:
+ object_sid = security.dom_sid(
+ f'{samdb.get_domain_sid()}-{rid}')
+ system_flags = None
+
+ if not groupou:
+ group_dn = samdb.get_wellknown_dn(group_dn,
+ DS_GUID_USERS_CONTAINER)
+
+ elif gtype == GTYPE_SECURITY_BUILTIN_LOCAL_GROUP:
+ object_sid = security.dom_sid(f'S-1-5-32-{rid}')
+ system_flags = (SYSTEM_FLAG_DOMAIN_DISALLOW_MOVE |
+ SYSTEM_FLAG_DOMAIN_DISALLOW_RENAME |
+ SYSTEM_FLAG_DISALLOW_DELETE)
+
+ if not groupou:
+ try:
+ group_dn.add_child('CN=Builtin')
+ except ldb.LdbError:
+ raise RuntimeError('Error getting Builtin objects DN')
+ else:
+ raise RuntimeError(f'Unknown group type {gtype}')
+
+ if groupou:
+ try:
+ group_dn.add_child(groupou)
+ except ldb.LdbError:
+ raise CommandError(f'Invalid group OU "{groupou}"')
+
+ try:
+ group_dn.add_child(f'CN={groupname}')
+ except ldb.LdbError:
+ raise CommandError(f'Invalid group name "{groupname}"')
+
+ msg = {
+ 'dn': group_dn,
+ 'sAMAccountName': groupname,
+ 'objectClass': 'group',
+ 'groupType': group_type,
+ 'description': description,
+ 'objectSid': ndr_pack(object_sid),
+ 'isCriticalSystemObject': 'TRUE',
+ }
+
+ if system_flags is not None:
+ msg['systemFlags'] = system_flags
+
+ try:
+ samdb.add(msg, controls=['relax:0'])
+ except ldb.LdbError as e:
+ num, estr = e.args
+ if num == ldb.ERR_CONSTRAINT_VIOLATION:
+ try:
+ res = samdb.search(
+ expression=f'(objectSid={object_sid})',
+ attrs=['sAMAccountName'])
+ except ldb.LdbError:
+ raise CommandError(
+ f'Failed to add group "{groupname}"', e)
+
+ if len(res) != 1:
+ raise CommandError(
+ f'Failed to add group "{groupname}"', e)
+
+ name = res[0].get('sAMAccountName', idx=0)
+ if name:
+ with_name = f' with name "{name}"'
+ else:
+ with_name = ''
+
+ raise CommandError(
+ f'Failed to add group "{groupname}" - Special group '
+ f'already exists{with_name} at "{res[0].dn}".')
+
+ elif num == ldb.ERR_ENTRY_ALREADY_EXISTS:
+ try:
+ res = samdb.search(base=group_dn,
+ scope=ldb.SCOPE_BASE,
+ attrs=['sAMAccountName',
+ 'objectSid',
+ 'groupType'])
+ except ldb.LdbError:
+ try:
+ res = samdb.search(
+ expression=f'(sAMAccountName={groupname})',
+ attrs=['sAMAccountName',
+ 'objectSid',
+ 'groupType'])
+ except ldb.LdbError:
+ raise CommandError(
+ f'Failed to add group "{groupname}"', e)
+
+ if len(res) != 1:
+ raise CommandError(
+ f'Failed to add group "{groupname}"', e)
+
+ got_name = res[0].get('sAMAccountName', idx=0)
+ if got_name:
+ named = f'named "{got_name}"'
+ else:
+ named = 'with no name'
+
+ got_group_type = res[0].get('groupType',
+ idx=0).decode('utf-8')
+ if group_type != got_group_type:
+ raise CommandError(
+ f'Failed to add group "{groupname}" - An object '
+ f'{named} at "{res[0].dn}" already exists, but it '
+ f'is not a security group. Rename or remove this '
+ f'existing object before attempting to add this '
+ f'special group.')
+
+ sid = res[0].get('objectSid', idx=0)
+ if sid is None:
+ raise CommandError(
+ f'Failed to add group "{groupname}" - A security '
+ f'group {named} at "{res[0].dn}" already exists, '
+ f'but it lacks a SID. Rename or remove this '
+ f'existing object before attempting to add this '
+ f'special group.')
+ else:
+ sid = ndr_unpack(security.dom_sid, sid)
+ if sid == object_sid:
+ raise CommandError(
+ f'Failed to add group "{groupname}" - The '
+ f'security group {named} at "{res[0].dn}" '
+ f'already exists.')
+ else:
+ raise CommandError(
+ f'Failed to add group "{groupname}" - A '
+ f'security group {named} at "{res[0].dn}" '
+ f'already exists, but it has the wrong SID, '
+ f'and will not function as expected. Rename '
+ f'or remove this existing object before '
+ f'attempting to add this special group.')
+ else:
+ raise CommandError(f'Failed to add group "{groupname}"', e)
+ else:
+ self.outf.write(f'Added group {groupname}\n')
+
+ return
+
+ try:
+ samdb.newgroup(groupname, groupou=groupou, grouptype=gtype,
+ description=description, mailaddress=mail_address, notes=notes,
+ gidnumber=gid_number, nisdomain=nis_domain)
+ except Exception as e:
+ # FIXME: catch more specific exception
+ raise CommandError('Failed to add group "%s"' % groupname, e)
+ self.outf.write("Added group %s\n" % groupname)
+
+
+class cmd_group_delete(Command):
+ """Deletes an AD group.
+
+The command deletes an existing AD group from the Active Directory domain. The groupname specified on the command is the sAMAccountName.
+
+Deleting a group is a permanent operation. When a group is deleted, all permissions and rights that users in the group had inherited from the group account are deleted as well.
+
+The command may be run from the root userid or another authorized userid. The -H or --URL option can be used to execute the command on a remote server.
+
+Example1:
+samba-tool group delete Group1 -H ldap://samba.samdom.example.com -Uadministrator%passw0rd
+
+Example1 shows how to delete an AD group from a remote LDAP server. The -U parameter is used to pass the userid and password of a user that exists on the remote server and is authorized to issue the command on that server.
+
+Example2:
+sudo samba-tool group delete Group2
+
+Example2 deletes group Group2 from the local server. The command is run under root using the sudo command.
+"""
+
+ synopsis = "%prog <groupname> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H"),
+ ]
+
+ takes_args = ["groupname"]
+
+ def run(self, groupname, credopts=None, sambaopts=None, versionopts=None, H=None):
+
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp, fallback_machine=True)
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+
+ filter = ("(&(sAMAccountName=%s)(objectClass=group))" %
+ ldb.binary_encode(groupname))
+
+ try:
+ res = samdb.search(base=samdb.domain_dn(),
+ scope=ldb.SCOPE_SUBTREE,
+ expression=filter,
+ attrs=["dn"])
+ group_dn = res[0].dn
+ except IndexError:
+ raise CommandError('Unable to find group "%s"' % (groupname))
+
+ try:
+ samdb.delete(group_dn)
+ except Exception as e:
+ # FIXME: catch more specific exception
+ raise CommandError('Failed to remove group "%s"' % groupname, e)
+ self.outf.write("Deleted group %s\n" % groupname)
+
+
+class cmd_group_add_members(Command):
+ """Add members to an AD group.
+
+This command adds one or more members to an existing Active Directory group. The command accepts one or more group member names separated by commas. A group member may be a user or computer account or another Active Directory group.
+
+When a member is added to a group the member may inherit permissions and rights from the group. Likewise, when permission or rights of a group are changed, the changes may reflect in the members through inheritance.
+
+The member names specified on the command must be the sAMaccountName.
+
+Example1:
+samba-tool group addmembers supergroup Group1,Group2,User1 -H ldap://samba.samdom.example.com -Uadministrator%passw0rd
+
+Example1 shows how to add two groups, Group1 and Group2 and one user account, User1, to the existing AD group named supergroup. The command will be run on a remote server specified with the -H. The -U parameter is used to pass the userid and password of a user authorized to issue the command on the remote server.
+
+Example2:
+sudo samba-tool group addmembers supergroup User2
+
+Example2 shows how to add a single user account, User2, to the supergroup AD group. It uses the sudo command to run as root when issuing the command.
+"""
+
+ synopsis = "%prog <groupname> (<listofmembers>]|--member-dn=<member-dn>) [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H"),
+ Option("--member-dn",
+ help=("DN of the new group member to be added.\n"
+ "The --object-types option will be ignored."),
+ type=str,
+ action="append"),
+ Option("--object-types",
+ help=("Comma separated list of object types.\n"
+ "The types are used to filter the search for the "
+ "specified members.\n"
+ "Valid values are: user, group, computer, serviceaccount, "
+ "contact and all.\n"
+ "Default: user,group,computer"),
+ default="user,group,computer",
+ type=str),
+ Option("--member-base-dn",
+ help=("Base DN for group member search.\n"
+ "Default is the domain DN."),
+ type=str),
+ ]
+
+ takes_args = ["groupname", "listofmembers?"]
+
+ def run(self,
+ groupname,
+ listofmembers=None,
+ credopts=None,
+ sambaopts=None,
+ versionopts=None,
+ H=None,
+ member_base_dn=None,
+ member_dn=None,
+ object_types="user,group,computer"):
+
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp, fallback_machine=True)
+
+ if member_dn is None and listofmembers is None:
+ self.usage()
+ raise CommandError(
+ 'Either listofmembers or --member-dn must be specified.')
+
+ try:
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+ groupmembers = []
+ if member_dn is not None:
+ groupmembers += member_dn
+ if listofmembers is not None:
+ groupmembers += listofmembers.split(',')
+ group_member_types = object_types.split(',')
+
+ if member_base_dn is not None:
+ member_base_dn = samdb.normalize_dn_in_domain(member_base_dn)
+
+ samdb.add_remove_group_members(groupname, groupmembers,
+ add_members_operation=True,
+ member_types=group_member_types,
+ member_base_dn=member_base_dn)
+ except Exception as e:
+ # FIXME: catch more specific exception
+ raise CommandError('Failed to add members %r to group "%s" - %s' % (
+ groupmembers, groupname, e))
+ self.outf.write("Added members to group %s\n" % groupname)
+
+
+class cmd_group_remove_members(Command):
+ """Remove members from an AD group.
+
+This command removes one or more members from an existing Active Directory group. The command accepts one or more group member names separated by commas. A group member may be a user or computer account or another Active Directory group that is a member of the group specified on the command.
+
+When a member is removed from a group, inherited permissions and rights will no longer apply to the member.
+
+Example1:
+samba-tool group removemembers supergroup Group1 -H ldap://samba.samdom.example.com -Uadministrator%passw0rd
+
+Example1 shows how to remove Group1 from supergroup. The command will run on the remote server specified on the -H parameter. The -U parameter is used to pass the userid and password of a user authorized to issue the command on the remote server.
+
+Example2:
+sudo samba-tool group removemembers supergroup User1
+
+Example2 shows how to remove a single user account, User2, from the supergroup AD group. It uses the sudo command to run as root when issuing the command.
+"""
+
+ synopsis = "%prog <groupname> (<listofmembers>]|--member-dn=<member-dn>) [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H"),
+ Option("--member-dn",
+ help=("DN of the group member to be removed.\n"
+ "The --object-types option will be ignored."),
+ type=str,
+ action="append"),
+ Option("--object-types",
+ help=("Comma separated list of object types.\n"
+ "The types are used to filter the search for the "
+ "specified members.\n"
+ "Valid values are: user, group, computer, serviceaccount, "
+ "contact and all.\n"
+ "Default: user,group,computer"),
+ default="user,group,computer",
+ type=str),
+ Option("--member-base-dn",
+ help=("Base DN for group member search.\n"
+ "Default is the domain DN."),
+ type=str),
+ ]
+
+ takes_args = ["groupname", "listofmembers?"]
+
+ def run(self,
+ groupname,
+ listofmembers=None,
+ credopts=None,
+ sambaopts=None,
+ versionopts=None,
+ H=None,
+ member_base_dn=None,
+ member_dn=None,
+ object_types="user,group,computer"):
+
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp, fallback_machine=True)
+
+ if member_dn is None and listofmembers is None:
+ self.usage()
+ raise CommandError(
+ 'Either listofmembers or --member-dn must be specified.')
+
+ try:
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+ groupmembers = []
+ if member_dn is not None:
+ groupmembers += member_dn
+ if listofmembers is not None:
+ groupmembers += listofmembers.split(',')
+ group_member_types = object_types.split(',')
+
+ if member_base_dn is not None:
+ member_base_dn = samdb.normalize_dn_in_domain(member_base_dn)
+
+ samdb.add_remove_group_members(groupname,
+ groupmembers,
+ add_members_operation=False,
+ member_types=group_member_types,
+ member_base_dn=member_base_dn)
+ except Exception as e:
+ # FIXME: Catch more specific exception
+ raise CommandError('Failed to remove members %r from group "%s"' % (listofmembers, groupname), e)
+ self.outf.write("Removed members from group %s\n" % groupname)
+
+
+class cmd_group_list(Command):
+ """List all groups."""
+
+ synopsis = "%prog [options]"
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H"),
+ Option("-v", "--verbose",
+ help="Verbose output, showing group type and group scope.",
+ action="store_true"),
+ Option("-b", "--base-dn",
+ help="Specify base DN to use.",
+ type=str),
+ Option("--full-dn", dest="full_dn",
+ default=False,
+ action='store_true',
+ help="Display DN instead of the sAMAccountName."),
+ ]
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ def run(self,
+ sambaopts=None,
+ credopts=None,
+ versionopts=None,
+ H=None,
+ verbose=False,
+ base_dn=None,
+ full_dn=False):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp, fallback_machine=True)
+
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+ attrs=["samaccountname"]
+
+ if verbose:
+ attrs += ["grouptype", "member"]
+ domain_dn = samdb.domain_dn()
+ if base_dn:
+ domain_dn = samdb.normalize_dn_in_domain(base_dn)
+ res = samdb.search(domain_dn, scope=ldb.SCOPE_SUBTREE,
+ expression=("(objectClass=group)"),
+ attrs=attrs)
+ if (len(res) == 0):
+ return
+
+ if verbose:
+ self.outf.write("Group Name Group Type Group Scope Members\n")
+ self.outf.write("--------------------------------------------------------------------------------\n")
+
+ for msg in res:
+ self.outf.write("%-44s" % msg.get("samaccountname", idx=0))
+ hgtype = hex(int("%s" % msg["grouptype"]) & 0x00000000FFFFFFFF)
+ if (hgtype == hex(int(security_group.get("Builtin")))):
+ self.outf.write("Security Builtin ")
+ elif (hgtype == hex(int(security_group.get("Domain")))):
+ self.outf.write("Security Domain ")
+ elif (hgtype == hex(int(security_group.get("Global")))):
+ self.outf.write("Security Global ")
+ elif (hgtype == hex(int(security_group.get("Universal")))):
+ self.outf.write("Security Universal")
+ elif (hgtype == hex(int(distribution_group.get("Global")))):
+ self.outf.write("Distribution Global ")
+ elif (hgtype == hex(int(distribution_group.get("Domain")))):
+ self.outf.write("Distribution Domain ")
+ elif (hgtype == hex(int(distribution_group.get("Universal")))):
+ self.outf.write("Distribution Universal")
+ else:
+ self.outf.write(" ")
+ num_members = len(msg.get("member", default=[]))
+ self.outf.write(" %6u\n" % num_members)
+ else:
+ for msg in res:
+ if full_dn:
+ self.outf.write("%s\n" % msg.get("dn"))
+ continue
+
+ self.outf.write("%s\n" % msg.get("samaccountname", idx=0))
+
+
+class cmd_group_list_members(Command):
+ """List all members of an AD group.
+
+This command lists members from an existing Active Directory group. The command accepts one group name.
+
+Example1:
+samba-tool group listmembers \"Domain Users\" -H ldap://samba.samdom.example.com -Uadministrator%passw0rd
+"""
+
+ synopsis = "%prog <groupname> [options]"
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H"),
+ Option("--hide-expired",
+ help="Do not list expired group members",
+ default=False,
+ action='store_true'),
+ Option("--hide-disabled",
+ default=False,
+ action='store_true',
+ help="Do not list disabled group members"),
+ Option("--full-dn", dest="full_dn",
+ default=False,
+ action='store_true',
+ help="Display DN instead of the sAMAccountName.")
+ ]
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ takes_args = ["groupname"]
+
+ def run(self,
+ groupname,
+ credopts=None,
+ sambaopts=None,
+ versionopts=None,
+ H=None,
+ hide_expired=False,
+ hide_disabled=False,
+ full_dn=False):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp, fallback_machine=True)
+
+ try:
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+
+ search_filter = ("(&(objectClass=group)(sAMAccountName=%s))" %
+ ldb.binary_encode(groupname))
+ try:
+ res = samdb.search(samdb.domain_dn(), scope=ldb.SCOPE_SUBTREE,
+ expression=(search_filter),
+ attrs=["objectSid"])
+ group_sid_binary = res[0].get('objectSid', idx=0)
+ except IndexError:
+ raise CommandError('Unable to find group "%s"' % (groupname))
+
+ group_sid = ndr_unpack(security.dom_sid, group_sid_binary)
+ (group_dom_sid, rid) = group_sid.split()
+ group_sid_dn = "<SID=%s>" % (group_sid)
+
+ filter_expires = ""
+ if hide_expired is True:
+ current_nttime = samdb.get_nttime()
+ filter_expires = ("(|"
+ "(!(accountExpires=*))"
+ "(accountExpires=0)"
+ "(accountExpires>=%u)"
+ ")" % (current_nttime))
+
+ filter_disabled = ""
+ if hide_disabled is True:
+ filter_disabled = "(!(userAccountControl:%s:=%u))" % (
+ ldb.OID_COMPARATOR_AND, UF_ACCOUNTDISABLE)
+
+ filter = "(&(|(primaryGroupID=%s)(memberOf=%s))%s%s)" % (
+ rid, group_sid_dn, filter_disabled, filter_expires)
+
+ res = samdb.search(samdb.domain_dn(), scope=ldb.SCOPE_SUBTREE,
+ expression=filter,
+ attrs=["samAccountName", "cn"])
+
+ if (len(res) == 0):
+ return
+
+ for msg in res:
+ if full_dn:
+ self.outf.write("%s\n" % msg.get("dn"))
+ continue
+
+ member_name = msg.get("samAccountName", idx=0)
+ if member_name is None:
+ member_name = msg.get("cn", idx=0)
+ self.outf.write("%s\n" % member_name)
+
+ except Exception as e:
+ raise CommandError('Failed to list members of "%s" group - %s' %
+ (groupname, e))
+
+
+class cmd_group_move(Command):
+ """Move a group to an organizational unit/container.
+
+ This command moves a group object into the specified organizational unit
+ or container.
+ The groupname specified on the command is the sAMAccountName.
+ The name of the organizational unit or container can be specified as a
+ full DN or without the domainDN component.
+
+ The command may be run from the root userid or another authorized userid.
+
+ The -H or --URL= option can be used to execute the command against a remote
+ server.
+
+ Example1:
+ samba-tool group move Group1 'OU=OrgUnit,DC=samdom.DC=example,DC=com' \\
+ -H ldap://samba.samdom.example.com -U administrator
+
+ Example1 shows how to move a group Group1 into the 'OrgUnit' organizational
+ unit on a remote LDAP server.
+
+ The -H parameter is used to specify the remote target server.
+
+ Example2:
+ samba-tool group move Group1 CN=Users
+
+ Example2 shows how to move a group Group1 back into the CN=Users container
+ on the local server.
+ """
+
+ synopsis = "%prog <groupname> <new_parent_dn> [options]"
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server",
+ type=str, metavar="URL", dest="H"),
+ ]
+
+ takes_args = ["groupname", "new_parent_dn"]
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ def run(self, groupname, new_parent_dn, credopts=None, sambaopts=None,
+ versionopts=None, H=None):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp, fallback_machine=True)
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+ domain_dn = ldb.Dn(samdb, samdb.domain_dn())
+
+ filter = ("(&(sAMAccountName=%s)(objectClass=group))" %
+ ldb.binary_encode(groupname))
+ try:
+ res = samdb.search(base=domain_dn,
+ expression=filter,
+ scope=ldb.SCOPE_SUBTREE)
+ group_dn = res[0].dn
+ except IndexError:
+ raise CommandError('Unable to find group "%s"' % (groupname))
+
+ try:
+ full_new_parent_dn = samdb.normalize_dn_in_domain(new_parent_dn)
+ except Exception as e:
+ raise CommandError('Invalid new_parent_dn "%s": %s' %
+ (new_parent_dn, e.message))
+
+ full_new_group_dn = ldb.Dn(samdb, str(group_dn))
+ full_new_group_dn.remove_base_components(len(group_dn) - 1)
+ full_new_group_dn.add_base(full_new_parent_dn)
+
+ try:
+ samdb.rename(group_dn, full_new_group_dn)
+ except Exception as e:
+ raise CommandError('Failed to move group "%s"' % groupname, e)
+ self.outf.write('Moved group "%s" into "%s"\n' %
+ (groupname, full_new_parent_dn))
+
+
+class cmd_group_show(Command):
+ """Display a group AD object.
+
+This command displays a group object and it's attributes in the Active
+Directory domain.
+The group name specified on the command is the sAMAccountName of the group.
+
+The command may be run from the root userid or another authorized userid.
+
+The -H or --URL= option can be used to execute the command against a remote
+server.
+
+Example1:
+samba-tool group show Group1 -H ldap://samba.samdom.example.com \\
+ -U administrator --password=passw1rd
+
+Example1 shows how to display a group's attributes in the domain against a
+remote LDAP server.
+
+The -H parameter is used to specify the remote target server.
+
+Example2:
+samba-tool group show Group2
+
+Example2 shows how to display a group's attributes in the domain against a local
+LDAP server.
+
+Example3:
+samba-tool group show Group3 --attributes=member,objectGUID
+
+Example3 shows how to display a groups objectGUID and member attributes.
+"""
+ synopsis = "%prog <group name> [options]"
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server",
+ type=str, metavar="URL", dest="H"),
+ Option("--attributes",
+ help=("Comma separated list of attributes, "
+ "which will be printed."),
+ type=str, dest="group_attrs"),
+ ]
+
+ takes_args = ["groupname"]
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ def run(self, groupname, credopts=None, sambaopts=None, versionopts=None,
+ H=None, group_attrs=None):
+
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp, fallback_machine=True)
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+
+ attrs = None
+ if group_attrs:
+ attrs = group_attrs.split(",")
+
+ filter = ("(&(objectCategory=group)(sAMAccountName=%s))" %
+ ldb.binary_encode(groupname))
+
+ domaindn = samdb.domain_dn()
+
+ try:
+ res = samdb.search(base=domaindn, expression=filter,
+ scope=ldb.SCOPE_SUBTREE, attrs=attrs)
+ user_dn = res[0].dn
+ except IndexError:
+ raise CommandError('Unable to find group "%s"' % (groupname))
+
+ for msg in res:
+ group_ldif = common.get_ldif_for_editor(samdb, msg)
+ self.outf.write(group_ldif)
+
+
+class cmd_group_stats(Command):
+ """Summary statistics about group memberships."""
+
+ synopsis = "%prog [options]"
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H"),
+ ]
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ def num_in_range(self, range_min, range_max, group_freqs):
+ total_count = 0
+ for members, count in group_freqs.items():
+ if range_min <= members and members <= range_max:
+ total_count += count
+
+ return total_count
+
+ def run(self, sambaopts=None, credopts=None, versionopts=None, H=None):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp, fallback_machine=True)
+
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+
+ domain_dn = samdb.domain_dn()
+ res = samdb.search(domain_dn, scope=ldb.SCOPE_SUBTREE,
+ expression=("(objectClass=group)"),
+ attrs=["samaccountname", "member"])
+
+ # first count up how many members each group has
+ group_assignments = {}
+ total_memberships = 0
+
+ for msg in res:
+ name = str(msg.get("samaccountname"))
+ num_members = len(msg.get("member", default=[]))
+ group_assignments[name] = num_members
+ total_memberships += num_members
+
+ num_groups = res.count
+ self.outf.write("Group membership statistics*\n")
+ self.outf.write("-------------------------------------------------\n")
+ self.outf.write("Total groups: {0}\n".format(num_groups))
+ self.outf.write("Total memberships: {0}\n".format(total_memberships))
+ average = total_memberships / float(num_groups)
+ self.outf.write("Average members per group: %.2f\n" % average)
+
+ # find the max and median memberships (note that some default groups
+ # always have zero members, so displaying the min is not very helpful)
+ group_names = list(group_assignments.keys())
+ group_members = list(group_assignments.values())
+ idx = group_members.index(max(group_members))
+ max_members = group_members[idx]
+ self.outf.write("Max members: {0} ({1})\n".format(max_members,
+ group_names[idx]))
+ group_members.sort()
+ midpoint = num_groups // 2
+ median = group_members[midpoint]
+ if num_groups % 2 == 0:
+ median = (median + group_members[midpoint - 1]) / 2
+ self.outf.write("Median members per group: {0}\n\n".format(median))
+
+ # convert this to the frequency of group membership, i.e. how many
+ # groups have 5 members, how many have 6 members, etc
+ group_freqs = defaultdict(int)
+ for group, num_members in group_assignments.items():
+ group_freqs[num_members] += 1
+
+ # now squash this down even further, so that we just display the number
+ # of groups that fall into one of the following membership bands
+ bands = [(0, 1), (2, 4), (5, 9), (10, 14), (15, 19), (20, 24),
+ (25, 29), (30, 39), (40, 49), (50, 59), (60, 69), (70, 79),
+ (80, 89), (90, 99), (100, 149), (150, 199), (200, 249),
+ (250, 299), (300, 399), (400, 499), (500, 999), (1000, 1999),
+ (2000, 2999), (3000, 3999), (4000, 4999), (5000, 9999),
+ (10000, max_members)]
+
+ self.outf.write("Members Number of Groups\n")
+ self.outf.write("-------------------------------------------------\n")
+
+ for band in bands:
+ band_start = band[0]
+ band_end = band[1]
+ if band_start > max_members:
+ break
+
+ num_groups = self.num_in_range(band_start, band_end, group_freqs)
+
+ if num_groups != 0:
+ band_str = "{0}-{1}".format(band_start, band_end)
+ self.outf.write("%13s %u\n" % (band_str, num_groups))
+
+ self.outf.write("\n* Note this does not include nested group memberships\n")
+
+
+class cmd_group_edit(Command):
+ """Modify Group AD object.
+
+ This command will allow editing of a group account in the Active Directory
+ domain. You will then be able to add or change attributes and their values.
+
+ The groupname specified on the command is the sAMAccountName.
+
+ The command may be run from the root userid or another authorized userid.
+
+ The -H or --URL= option can be used to execute the command against a remote
+ server.
+
+ Example1:
+ samba-tool group edit Group1 -H ldap://samba.samdom.example.com \\
+ -U administrator --password=passw1rd
+
+ Example1 shows how to edit a groups attributes in the domain against a
+ remote LDAP server.
+
+ The -H parameter is used to specify the remote target server.
+
+ Example2:
+ samba-tool group edit Group2
+
+ Example2 shows how to edit a groups attributes in the domain against a local
+ server.
+
+ Example3:
+ samba-tool group edit Group3 --editor=nano
+
+ Example3 shows how to edit a groups attributes in the domain against a local
+ server using the 'nano' editor.
+ """
+ synopsis = "%prog <groupname> [options]"
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server",
+ type=str, metavar="URL", dest="H"),
+ Option("--editor", help="Editor to use instead of the system default,"
+ " or 'vi' if no system default is set.", type=str),
+ ]
+
+ takes_args = ["groupname"]
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ def run(self, groupname, credopts=None, sambaopts=None, versionopts=None,
+ H=None, editor=None):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp, fallback_machine=True)
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+
+ filter = ("(&(sAMAccountName=%s)(objectClass=group))" %
+ ldb.binary_encode(groupname))
+
+ domaindn = samdb.domain_dn()
+
+ try:
+ res = samdb.search(base=domaindn,
+ expression=filter,
+ scope=ldb.SCOPE_SUBTREE)
+ group_dn = res[0].dn
+ except IndexError:
+ raise CommandError('Unable to find group "%s"' % (groupname))
+
+ if len(res) != 1:
+ raise CommandError('Invalid number of results: for "%s": %d' %
+ ((groupname), len(res)))
+
+ msg = res[0]
+ result_ldif = common.get_ldif_for_editor(samdb, msg)
+
+ if editor is None:
+ editor = os.environ.get('EDITOR')
+ if editor is None:
+ editor = 'vi'
+
+ with tempfile.NamedTemporaryFile(suffix=".tmp") as t_file:
+ t_file.write(get_bytes(result_ldif))
+ t_file.flush()
+ try:
+ check_call([editor, t_file.name])
+ except CalledProcessError as e:
+ raise CalledProcessError("ERROR: ", e)
+ with open(t_file.name) as edited_file:
+ edited_message = edited_file.read()
+
+ msgs_edited = samdb.parse_ldif(edited_message)
+ msg_edited = next(msgs_edited)[1]
+
+ res_msg_diff = samdb.msg_diff(msg, msg_edited)
+ if len(res_msg_diff) == 0:
+ self.outf.write("Nothing to do\n")
+ return
+
+ try:
+ samdb.modify(res_msg_diff)
+ except Exception as e:
+ raise CommandError("Failed to modify group '%s': " % groupname, e)
+
+ self.outf.write("Modified group '%s' successfully\n" % groupname)
+
+
+class cmd_group_add_unix_attrs(Command):
+ """Add RFC2307 attributes to a group.
+
+This command adds Unix attributes to a group account in the Active
+Directory domain.
+The groupname specified on the command is the sAMaccountName.
+
+Unix (RFC2307) attributes will be added to the group account.
+
+Add 'idmap_ldb:use rfc2307 = Yes' to smb.conf to use these attributes for
+UID/GID mapping.
+
+The command may be run from the root userid or another authorized userid.
+The -H or --URL= option can be used to execute the command against a
+remote server.
+
+Example1:
+samba-tool group addunixattrs Group1 10000
+
+Example1 shows how to add RFC2307 attributes to a domain enabled group
+account.
+
+The groups Unix ID will be set to '10000', provided this ID isn't already
+in use.
+
+"""
+ synopsis = "%prog <groupname> <gidnumber> [options]"
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server",
+ type=str, metavar="URL", dest="H"),
+ ]
+
+ takes_args = ["groupname", "gidnumber"]
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ def run(self, groupname, gidnumber, credopts=None, sambaopts=None,
+ versionopts=None, H=None):
+
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+
+ domaindn = samdb.domain_dn()
+
+ # Check group exists and doesn't have a gidNumber
+ filter = "(samaccountname={})".format(ldb.binary_encode(groupname))
+ res = samdb.search(domaindn,
+ scope=ldb.SCOPE_SUBTREE,
+ expression=filter)
+ if (len(res) == 0):
+ raise CommandError("Unable to find group '{}'".format(groupname))
+
+ group_dn = res[0].dn
+
+ if "gidNumber" in res[0]:
+ raise CommandError("Group {} is a Unix group.".format(groupname))
+
+ # Check if supplied gidnumber isn't already being used
+ filter = "(&(objectClass=group)(gidNumber={}))".format(gidnumber)
+ res = samdb.search(domaindn,
+ scope=ldb.SCOPE_SUBTREE,
+ expression=filter)
+ if (len(res) != 0):
+ raise CommandError('gidNumber {} already used.'.format(gidnumber))
+
+ if not lp.get("idmap_ldb:use rfc2307"):
+ self.outf.write("You are setting a Unix/RFC2307 GID. "
+ "You may want to set 'idmap_ldb:use rfc2307 = Yes'"
+ " in smb.conf to use the attributes for "
+ "XID/SID-mapping.\n")
+
+ group_mod = """
+dn: {0}
+changetype: modify
+add: gidNumber
+gidNumber: {1}
+""".format(group_dn, gidnumber)
+
+ try:
+ samdb.modify_ldif(group_mod)
+ except ldb.LdbError as e:
+ raise CommandError("Failed to modify group '{0}': {1}"
+ .format(groupname, e))
+
+ self.outf.write("Modified Group '{}' successfully\n".format(groupname))
+
+
+class cmd_group_rename(Command):
+ """Rename a group and related attributes.
+
+ This command allows to set the group's name related attributes. The
+ group's CN will be renamed automatically.
+
+ The group's CN will be the sAMAccountName.
+ Use the --force-new-cn option to specify the new CN manually and the
+ --reset-cn to reset this change.
+
+ Use an empty attribute value to remove the specified attribute.
+
+ The groupname specified on the command is the sAMAccountName.
+
+ The command may be run locally from the root userid or another authorized
+ userid.
+
+ The -H or --URL= option can be used to execute the command against a remote
+ server.
+
+ Example1:
+ samba-tool group rename employees --samaccountname=staff
+
+ Example1 shows how to change the samaccountname of a group 'employees' to
+ 'staff'. The CN of the group employees will also be changed to 'staff',
+ if the previous CN was the previous sAMAccountName.
+
+ Example2:
+ samba-tool group rename employees --mail-address='staff@company.com' \\
+ -H ldap://samba.samdom.example.com -U administrator
+
+ Example2 shows how to rename the mail address of a group 'employees' to
+ 'staff@company.com'.
+ The -H parameter is used to specify the remote target server.
+ """
+
+ synopsis = "%prog <groupname> [options]"
+
+ takes_options = [
+ Option("-H", "--URL",
+ help="LDB URL for database or target server",
+ type=str, metavar="URL", dest="H"),
+ Option("--force-new-cn",
+ help="Specify a new CN (RND) instead of using the sAMAccountName.",
+ type=str),
+ Option("--reset-cn",
+ help="Set the CN (RDN) to the sAMAccountName. Use this option "
+ "to reset the changes made with the --force-new-cn option.",
+ action="store_true"),
+ Option("--mail-address",
+ help="New mail address",
+ type=str),
+ Option("--samaccountname",
+ help="New account name (sAMAccountName/logon name)",
+ type=str)
+ ]
+
+ takes_args = ["groupname"]
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ def run(self, groupname, credopts=None, sambaopts=None, versionopts=None,
+ H=None, mail_address=None, samaccountname=None, force_new_cn=None,
+ reset_cn=None):
+ # illegal options
+ if force_new_cn and reset_cn:
+ raise CommandError("It is not allowed to specify --force-new-cn "
+ "together with --reset-cn.")
+ if force_new_cn == "":
+ raise CommandError("Failed to rename group - delete protected "
+ "attribute 'CN'")
+ if samaccountname == "":
+ raise CommandError("Failed to rename group - delete protected "
+ "attribute 'sAMAccountName'")
+
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp, fallback_machine=True)
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+ domain_dn = ldb.Dn(samdb, samdb.domain_dn())
+
+ filter = ("(&(objectClass=group)(samaccountname=%s))" %
+ ldb.binary_encode(groupname))
+ try:
+ res = samdb.search(base=domain_dn,
+ scope=ldb.SCOPE_SUBTREE,
+ expression=filter,
+ attrs=["sAMAccountName",
+ "cn",
+ "mail"]
+ )
+ old_group = res[0]
+ group_dn = old_group.dn
+ except IndexError:
+ raise CommandError('Unable to find group "%s"' % (groupname))
+
+ group_parent_dn = group_dn.parent()
+ old_cn = old_group["cn"][0]
+
+ # get the actual and the new group cn and the new dn
+ if force_new_cn is not None:
+ new_cn = force_new_cn
+ elif samaccountname is not None:
+ new_cn = samaccountname
+ else:
+ new_cn = old_group["sAMAccountName"]
+
+ # CN must change, if the new CN is different and the old CN is the
+ # standard CN or the change is forced with force-new-cn or reset-cn
+ expected_cn = old_group["sAMAccountName"]
+ must_change_cn = str(old_cn) != str(new_cn) and \
+ (str(old_cn) == str(expected_cn) or \
+ reset_cn or bool(force_new_cn))
+
+ new_group_dn = ldb.Dn(samdb, "CN=%s" % new_cn)
+ new_group_dn.add_base(group_parent_dn)
+
+ # format given attributes
+ group_attrs = ldb.Message()
+ group_attrs.dn = group_dn
+ samdb.prepare_attr_replace(group_attrs, old_group, "sAMAccountName",
+ samaccountname)
+ samdb.prepare_attr_replace(group_attrs, old_group, "mail", mail_address)
+
+ group_attributes_changed = len(group_attrs) > 0
+
+ # update the group with formatted attributes
+ samdb.transaction_start()
+ try:
+ if group_attributes_changed:
+ samdb.modify(group_attrs)
+ if must_change_cn:
+ samdb.rename(group_dn, new_group_dn)
+ except Exception as e:
+ samdb.transaction_cancel()
+ raise CommandError('Failed to rename group "%s"' % groupname, e)
+ samdb.transaction_commit()
+
+ if must_change_cn:
+ self.outf.write('Renamed CN of group "%s" from "%s" to "%s" '
+ 'successfully\n' % (groupname, old_cn, new_cn))
+
+ if group_attributes_changed:
+ self.outf.write('Following attributes of group "%s" have been '
+ 'changed successfully:\n' % (groupname))
+ for attr in group_attrs.keys():
+ if attr == "dn":
+ continue
+ self.outf.write('%s: %s\n' % (attr, group_attrs[attr]
+ if group_attrs[attr] else '[removed]'))
+
+class cmd_group(SuperCommand):
+ """Group management."""
+
+ subcommands = {}
+ subcommands["add"] = cmd_group_add()
+ subcommands["create"] = cmd_group_add()
+ subcommands["delete"] = cmd_group_delete()
+ subcommands["edit"] = cmd_group_edit()
+ subcommands["addmembers"] = cmd_group_add_members()
+ subcommands["removemembers"] = cmd_group_remove_members()
+ subcommands["list"] = cmd_group_list()
+ subcommands["listmembers"] = cmd_group_list_members()
+ subcommands["move"] = cmd_group_move()
+ subcommands["show"] = cmd_group_show()
+ subcommands["stats"] = cmd_group_stats()
+ subcommands["addunixattrs"] = cmd_group_add_unix_attrs()
+ subcommands["rename"] = cmd_group_rename()
diff --git a/python/samba/netcmd/ldapcmp.py b/python/samba/netcmd/ldapcmp.py
new file mode 100644
index 0000000..ff7d8be
--- /dev/null
+++ b/python/samba/netcmd/ldapcmp.py
@@ -0,0 +1,984 @@
+# Unix SMB/CIFS implementation.
+# A command to compare differences of objects and attributes between
+# two LDAP servers both running at the same time. It generally compares
+# one of the three pratitions DOMAIN, CONFIGURATION or SCHEMA. Users
+# that have to be provided sheould be able to read objects in any of the
+# above partitions.
+
+# Copyright (C) Zahari Zahariev <zahari.zahariev@postpath.com> 2009, 2010
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import os
+import re
+import sys
+
+import samba
+import samba.getopt as options
+from samba import Ldb
+from samba.ndr import ndr_unpack
+from samba.dcerpc import security
+from ldb import SCOPE_SUBTREE, SCOPE_ONELEVEL, SCOPE_BASE, ERR_NO_SUCH_OBJECT, LdbError
+from samba.netcmd import (
+ Command,
+ CommandError,
+ Option,
+)
+
+RE_RANGED_RESULT = re.compile(r"^([^;]+);range=(\d+)-(\d+|\*)$")
+
+
+class LDAPBase(object):
+
+ def __init__(self, host, creds, lp,
+ two=False, quiet=False, descriptor=False, sort_aces=False, verbose=False,
+ view="section", base="", scope="SUB",
+ outf=sys.stdout, errf=sys.stderr, skip_missing_dn=True):
+ ldb_options = []
+ samdb_url = host
+ if "://" not in host:
+ if os.path.isfile(host):
+ samdb_url = "tdb://%s" % host
+ else:
+ samdb_url = "ldap://%s" % host
+ # use 'paged_search' module when connecting remotely
+ if samdb_url.lower().startswith("ldap://"):
+ ldb_options = ["modules:paged_searches"]
+ self.outf = outf
+ self.errf = errf
+ self.ldb = Ldb(url=samdb_url,
+ credentials=creds,
+ lp=lp,
+ options=ldb_options)
+ self.search_base = base
+ self.search_scope = scope
+ self.two_domains = two
+ self.quiet = quiet
+ self.descriptor = descriptor
+ self.sort_aces = sort_aces
+ self.view = view
+ self.verbose = verbose
+ self.host = host
+ self.skip_missing_dn = skip_missing_dn
+ self.base_dn = str(self.ldb.get_default_basedn())
+ self.root_dn = str(self.ldb.get_root_basedn())
+ self.config_dn = str(self.ldb.get_config_basedn())
+ self.schema_dn = str(self.ldb.get_schema_basedn())
+ self.domain_netbios = self.find_netbios()
+ self.server_names = self.find_servers()
+ self.domain_name = re.sub("[Dd][Cc]=", "", self.base_dn).replace(",", ".")
+ self.domain_sid = self.find_domain_sid()
+ self.get_sid_map()
+ #
+ # Log some domain controller specific place-holers that are being used
+ # when compare content of two DCs. Uncomment for DEBUG purposes.
+ if self.two_domains and not self.quiet:
+ self.outf.write("\n* Place-holders for %s:\n" % self.host)
+ self.outf.write(4 * " " + "${DOMAIN_DN} => %s\n" %
+ self.base_dn)
+ self.outf.write(4 * " " + "${DOMAIN_NETBIOS} => %s\n" %
+ self.domain_netbios)
+ self.outf.write(4 * " " + "${SERVER_NAME} => %s\n" %
+ self.server_names)
+ self.outf.write(4 * " " + "${DOMAIN_NAME} => %s\n" %
+ self.domain_name)
+
+ def find_domain_sid(self):
+ res = self.ldb.search(base=self.base_dn, expression="(objectClass=*)", scope=SCOPE_BASE)
+ return ndr_unpack(security.dom_sid, res[0]["objectSid"][0])
+
+ def find_servers(self):
+ """
+ """
+ res = self.ldb.search(base="OU=Domain Controllers,%s" % self.base_dn,
+ scope=SCOPE_SUBTREE, expression="(objectClass=computer)", attrs=["cn"])
+ assert len(res) > 0
+ return [str(x["cn"][0]) for x in res]
+
+ def find_netbios(self):
+ try:
+ res = self.ldb.search(base="CN=Partitions,%s" % self.config_dn,
+ scope=SCOPE_SUBTREE, attrs=["nETBIOSName"])
+ except LdbError as e:
+ enum, estr = e
+ if estr in ["Operation unavailable without authentication"]:
+ raise CommandError(estr, e)
+
+ if len(res) == 0:
+ raise CommandError("Could not find netbios name")
+
+ for x in res:
+ if "nETBIOSName" in x:
+ return x["nETBIOSName"][0].decode()
+
+ def object_exists(self, object_dn):
+ res = None
+ try:
+ res = self.ldb.search(base=object_dn, scope=SCOPE_BASE)
+ except LdbError as e2:
+ (enum, estr) = e2.args
+ if enum == ERR_NO_SUCH_OBJECT:
+ return False
+ raise
+ return len(res) == 1
+
+ def delete_force(self, object_dn):
+ try:
+ self.ldb.delete(object_dn)
+ except Ldb.LdbError as e:
+ assert "No such object" in str(e)
+
+ def get_attribute_name(self, key):
+ """ Returns the real attribute name
+ It resolved ranged results e.g. member;range=0-1499
+ """
+
+ m = RE_RANGED_RESULT.match(key)
+ if m is None:
+ return key
+
+ return m.group(1)
+
+ def get_attribute_values(self, object_dn, key, vals):
+ """ Returns list with all attribute values
+ It resolved ranged results e.g. member;range=0-1499
+ """
+
+ m = RE_RANGED_RESULT.match(key)
+ if m is None:
+ # no range, just return the values
+ return vals
+
+ attr = m.group(1)
+ hi = int(m.group(3))
+
+ # get additional values in a loop
+ # until we get a response with '*' at the end
+ while True:
+
+ n = "%s;range=%d-*" % (attr, hi + 1)
+ res = self.ldb.search(base=object_dn, scope=SCOPE_BASE, attrs=[n])
+ assert len(res) == 1
+ res = dict(res[0])
+ del res["dn"]
+
+ fm = None
+ fvals = None
+
+ for key in res:
+ m = RE_RANGED_RESULT.match(key)
+
+ if m is None:
+ continue
+
+ if m.group(1) != attr:
+ continue
+
+ fm = m
+ fvals = list(res[key])
+ break
+
+ if fm is None:
+ break
+
+ vals.extend(fvals)
+ if fm.group(3) == "*":
+ # if we got "*" we're done
+ break
+
+ assert int(fm.group(2)) == hi + 1
+ hi = int(fm.group(3))
+
+ return vals
+
+ def get_attributes(self, object_dn):
+ """ Returns dict with all default visible attributes
+ """
+ res = self.ldb.search(base=object_dn, scope=SCOPE_BASE, attrs=["*"])
+ assert len(res) == 1
+ res = dict(res[0])
+ # 'Dn' element is not iterable and we have it as 'distinguishedName'
+ del res["dn"]
+
+ attributes = {}
+ for key, vals in res.items():
+ name = self.get_attribute_name(key)
+ # sort vals and return a list, help to compare
+ vals = sorted(vals)
+ attributes[name] = self.get_attribute_values(object_dn, key, vals)
+
+ return attributes
+
+ def get_descriptor_sddl(self, object_dn):
+ res = self.ldb.search(base=object_dn, scope=SCOPE_BASE, attrs=["nTSecurityDescriptor"])
+ desc = res[0]["nTSecurityDescriptor"][0]
+ desc = ndr_unpack(security.descriptor, desc)
+ return desc.as_sddl(self.domain_sid)
+
+ def guid_as_string(self, guid_blob):
+ """ Translate binary representation of schemaIDGUID to standard string representation.
+ @gid_blob: binary schemaIDGUID
+ """
+ blob = "%s" % guid_blob
+ stops = [4, 2, 2, 2, 6]
+ index = 0
+ res = ""
+ x = 0
+ while x < len(stops):
+ tmp = ""
+ y = 0
+ while y < stops[x]:
+ c = hex(ord(blob[index])).replace("0x", "")
+ c = [None, "0" + c, c][len(c)]
+ if 2 * index < len(blob):
+ tmp = c + tmp
+ else:
+ tmp += c
+ index += 1
+ y += 1
+ res += tmp + " "
+ x += 1
+ assert index == len(blob)
+ return res.strip().replace(" ", "-")
+
+ def get_sid_map(self):
+ """ Build dictionary that maps GUID to 'name' attribute found in Schema or Extended-Rights.
+ """
+ self.sid_map = {}
+ res = self.ldb.search(base=self.base_dn,
+ expression="(objectSid=*)", scope=SCOPE_SUBTREE, attrs=["objectSid", "sAMAccountName"])
+ for item in res:
+ try:
+ self.sid_map["%s" % ndr_unpack(security.dom_sid, item["objectSid"][0])] = str(item["sAMAccountName"][0])
+ except KeyError:
+ pass
+
+
+class Descriptor(object):
+ def __init__(self, connection, dn, outf=sys.stdout, errf=sys.stderr):
+ self.outf = outf
+ self.errf = errf
+ self.con = connection
+ self.dn = dn
+ self.sddl = self.con.get_descriptor_sddl(self.dn)
+ self.dacl_list = self.extract_dacl()
+ if self.con.sort_aces:
+ self.dacl_list.sort()
+
+ def extract_dacl(self):
+ """ Extracts the DACL as a list of ACE string (with the brackets).
+ """
+ try:
+ if "S:" in self.sddl:
+ res = re.search(r"D:(.*?)(\(.*?\))S:", self.sddl).group(2)
+ else:
+ res = re.search(r"D:(.*?)(\(.*\))", self.sddl).group(2)
+ except AttributeError:
+ return []
+ return re.findall(r"(\(.*?\))", res)
+
+ def fix_sid(self, ace):
+ res = "%s" % ace
+ sids = re.findall("S-[-0-9]+", res)
+ # If there are not SIDs to replace return the same ACE
+ if len(sids) == 0:
+ return res
+ for sid in sids:
+ try:
+ name = self.con.sid_map[sid]
+ res = res.replace(sid, name)
+ except KeyError:
+ # Do not bother if the SID is not found in baseDN
+ pass
+ return res
+
+ def diff_1(self, other):
+ res = ""
+ if len(self.dacl_list) != len(other.dacl_list):
+ res += 4 * " " + "Difference in ACE count:\n"
+ res += 8 * " " + "=> %s\n" % len(self.dacl_list)
+ res += 8 * " " + "=> %s\n" % len(other.dacl_list)
+ #
+ i = 0
+ flag = True
+ while True:
+ self_ace = None
+ other_ace = None
+ try:
+ self_ace = "%s" % self.dacl_list[i]
+ except IndexError:
+ self_ace = ""
+ #
+ try:
+ other_ace = "%s" % other.dacl_list[i]
+ except IndexError:
+ other_ace = ""
+ if len(self_ace) + len(other_ace) == 0:
+ break
+ self_ace_fixed = "%s" % self.fix_sid(self_ace)
+ other_ace_fixed = "%s" % other.fix_sid(other_ace)
+ if self_ace_fixed != other_ace_fixed:
+ res += "%60s * %s\n" % (self_ace_fixed, other_ace_fixed)
+ flag = False
+ else:
+ res += "%60s | %s\n" % (self_ace_fixed, other_ace_fixed)
+ i += 1
+ return (flag, res)
+
+ def diff_2(self, other):
+ res = ""
+ if len(self.dacl_list) != len(other.dacl_list):
+ res += 4 * " " + "Difference in ACE count:\n"
+ res += 8 * " " + "=> %s\n" % len(self.dacl_list)
+ res += 8 * " " + "=> %s\n" % len(other.dacl_list)
+ #
+ common_aces = []
+ self_aces = []
+ other_aces = []
+ self_dacl_list_fixed = [self.fix_sid(ace) for ace in self.dacl_list]
+ other_dacl_list_fixed = [other.fix_sid(ace) for ace in other.dacl_list]
+ for ace in self_dacl_list_fixed:
+ try:
+ other_dacl_list_fixed.index(ace)
+ except ValueError:
+ self_aces.append(ace)
+ else:
+ common_aces.append(ace)
+ self_aces = sorted(self_aces)
+ if len(self_aces) > 0:
+ res += 4 * " " + "ACEs found only in %s:\n" % self.con.host
+ for ace in self_aces:
+ res += 8 * " " + ace + "\n"
+ #
+ for ace in other_dacl_list_fixed:
+ try:
+ self_dacl_list_fixed.index(ace)
+ except ValueError:
+ other_aces.append(ace)
+ else:
+ common_aces.append(ace)
+ other_aces = sorted(other_aces)
+ if len(other_aces) > 0:
+ res += 4 * " " + "ACEs found only in %s:\n" % other.con.host
+ for ace in other_aces:
+ res += 8 * " " + ace + "\n"
+ #
+ common_aces = sorted(list(set(common_aces)))
+ if self.con.verbose:
+ res += 4 * " " + "ACEs found in both:\n"
+ for ace in common_aces:
+ res += 8 * " " + ace + "\n"
+ return (self_aces == [] and other_aces == [], res)
+
+
+class LDAPObject(object):
+ def __init__(self, connection, dn, summary, filter_list,
+ outf=sys.stdout, errf=sys.stderr):
+ self.outf = outf
+ self.errf = errf
+ self.con = connection
+ self.two_domains = self.con.two_domains
+ self.quiet = self.con.quiet
+ self.verbose = self.con.verbose
+ self.summary = summary
+ self.dn = dn.replace("${DOMAIN_DN}", self.con.base_dn)
+ self.dn = self.dn.replace("CN=${DOMAIN_NETBIOS}", "CN=%s" % self.con.domain_netbios)
+ for x in self.con.server_names:
+ self.dn = self.dn.replace("CN=${SERVER_NAME}", "CN=%s" % x)
+ self.attributes = self.con.get_attributes(self.dn)
+ # One domain - two domain controllers
+ #
+ # Some attributes are defined as FLAG_ATTR_NOT_REPLICATED
+ #
+ # The following list was generated by
+ # egrep '^systemFlags: |^ldapDisplayName: |^linkID: ' \
+ # source4/setup/ad-schema/MS-AD_Schema_2K8_R2_Attributes.txt | \
+ # grep -B1 FLAG_ATTR_NOT_REPLICATED | \
+ # grep ldapDisplayName | \
+ # cut -d ' ' -f2
+ self.non_replicated_attributes = [
+ "badPasswordTime",
+ "badPwdCount",
+ "dSCorePropagationData",
+ "lastLogoff",
+ "lastLogon",
+ "logonCount",
+ "modifiedCount",
+ "msDS-Cached-Membership",
+ "msDS-Cached-Membership-Time-Stamp",
+ "msDS-EnabledFeatureBL",
+ "msDS-ExecuteScriptPassword",
+ "msDS-NcType",
+ "msDS-ReplicationEpoch",
+ "msDS-RetiredReplNCSignatures",
+ "msDS-USNLastSyncSuccess",
+ # "distinguishedName", # This is implicitly replicated
+ # "objectGUID", # This is implicitly replicated
+ "partialAttributeDeletionList",
+ "partialAttributeSet",
+ "pekList",
+ "prefixMap",
+ "replPropertyMetaData",
+ "replUpToDateVector",
+ "repsFrom",
+ "repsTo",
+ "rIDNextRID",
+ "rIDPreviousAllocationPool",
+ "schemaUpdate",
+ "serverState",
+ "subRefs",
+ "uSNChanged",
+ "uSNCreated",
+ "uSNLastObjRem",
+ "whenChanged", # This is implicitly replicated, but may diverge on updates of non-replicated attributes
+ ]
+ self.ignore_attributes = self.non_replicated_attributes
+ self.ignore_attributes += ["msExchServer1HighestUSN"]
+ if filter_list:
+ self.ignore_attributes += filter_list
+
+ self.dn_attributes = []
+ self.domain_attributes = []
+ self.servername_attributes = []
+ self.netbios_attributes = []
+ self.other_attributes = []
+ # Two domains - two domain controllers
+
+ if self.two_domains:
+ self.ignore_attributes += [
+ "objectCategory", "objectGUID", "objectSid", "whenCreated",
+ "whenChanged", "pwdLastSet", "uSNCreated", "creationTime",
+ "modifiedCount", "priorSetTime", "rIDManagerReference",
+ "gPLink", "ipsecNFAReference", "fRSPrimaryMember",
+ "fSMORoleOwner", "masteredBy", "ipsecOwnersReference",
+ "wellKnownObjects", "otherWellKnownObjects", "badPwdCount",
+ "ipsecISAKMPReference", "ipsecFilterReference",
+ "msDs-masteredBy", "lastSetTime",
+ "ipsecNegotiationPolicyReference", "subRefs", "gPCFileSysPath",
+ "accountExpires", "invocationId",
+ "operatingSystem", "operatingSystemVersion",
+ "oEMInformation", "schemaInfo",
+ # After Exchange preps
+ "targetAddress", "msExchMailboxGuid", "siteFolderGUID"]
+ #
+ # Attributes that contain the unique DN tail part e.g. 'DC=samba,DC=org'
+ self.dn_attributes = [
+ "distinguishedName", "defaultObjectCategory", "member", "memberOf", "siteList", "nCName",
+ "homeMDB", "homeMTA", "interSiteTopologyGenerator", "serverReference",
+ "msDS-HasInstantiatedNCs", "hasMasterNCs", "msDS-hasMasterNCs", "msDS-HasDomainNCs", "dMDLocation",
+ "msDS-IsDomainFor", "rIDSetReferences", "serverReferenceBL",
+ # After Exchange preps
+ "msExchHomeRoutingGroup", "msExchResponsibleMTAServer", "siteFolderServer", "msExchRoutingMasterDN",
+ "msExchRoutingGroupMembersBL", "homeMDBBL", "msExchHomePublicMDB", "msExchOwningServer", "templateRoots",
+ "addressBookRoots", "msExchPolicyRoots", "globalAddressList", "msExchOwningPFTree",
+ "msExchResponsibleMTAServerBL", "msExchOwningPFTreeBL",
+ # After 2012 R2 functional preparation
+ "msDS-MembersOfResourcePropertyListBL",
+ "msDS-ValueTypeReference",
+ "msDS-MembersOfResourcePropertyList",
+ "msDS-ValueTypeReferenceBL",
+ "msDS-ClaimTypeAppliesToClass",
+ ]
+ self.dn_attributes = [x.upper() for x in self.dn_attributes]
+ #
+ # Attributes that contain the Domain name e.g. 'samba.org'
+ self.domain_attributes = [
+ "proxyAddresses", "mail", "userPrincipalName", "msExchSmtpFullyQualifiedDomainName",
+ "dnsHostName", "networkAddress", "dnsRoot", "servicePrincipalName", ]
+ self.domain_attributes = [x.upper() for x in self.domain_attributes]
+ #
+ # May contain DOMAIN_NETBIOS and SERVER_NAME
+ self.servername_attributes = ["distinguishedName", "name", "CN", "sAMAccountName", "dNSHostName",
+ "servicePrincipalName", "rIDSetReferences", "serverReference", "serverReferenceBL",
+ "msDS-IsDomainFor", "interSiteTopologyGenerator", ]
+ self.servername_attributes = [x.upper() for x in self.servername_attributes]
+ #
+ self.netbios_attributes = ["servicePrincipalName", "CN", "distinguishedName", "nETBIOSName", "name", ]
+ self.netbios_attributes = [x.upper() for x in self.netbios_attributes]
+ #
+ self.other_attributes = ["name", "DC", ]
+ self.other_attributes = [x.upper() for x in self.other_attributes]
+ #
+ self.ignore_attributes = set([x.upper() for x in self.ignore_attributes])
+
+ def log(self, msg):
+ """
+ Log on the screen if there is no --quiet option set
+ """
+ if not self.quiet:
+ self.outf.write(msg +"\n")
+
+ def fix_dn(self, s):
+ res = "%s" % s
+ if not self.two_domains:
+ return res
+ if res.upper().endswith(self.con.base_dn.upper()):
+ res = res[:len(res) - len(self.con.base_dn)] + "${DOMAIN_DN}"
+ return res
+
+ def fix_domain_name(self, s):
+ res = "%s" % s
+ if not self.two_domains:
+ return res
+ res = res.replace(self.con.domain_name.lower(), self.con.domain_name.upper())
+ res = res.replace(self.con.domain_name.upper(), "${DOMAIN_NAME}")
+ return res
+
+ def fix_domain_netbios(self, s):
+ res = "%s" % s
+ if not self.two_domains:
+ return res
+ res = res.replace(self.con.domain_netbios.lower(), self.con.domain_netbios.upper())
+ res = res.replace(self.con.domain_netbios.upper(), "${DOMAIN_NETBIOS}")
+ return res
+
+ def fix_server_name(self, s):
+ res = "%s" % s
+ if not self.two_domains or len(self.con.server_names) > 1:
+ return res
+ for x in self.con.server_names:
+ res = res.upper().replace(x, "${SERVER_NAME}")
+ return res
+
+ def __eq__(self, other):
+ if self.con.descriptor:
+ return self.cmp_desc(other)
+ return self.cmp_attrs(other)
+
+ def cmp_desc(self, other):
+ d1 = Descriptor(self.con, self.dn, outf=self.outf, errf=self.errf)
+ d2 = Descriptor(other.con, other.dn, outf=self.outf, errf=self.errf)
+ if self.con.view == "section":
+ res = d1.diff_2(d2)
+ elif self.con.view == "collision":
+ res = d1.diff_1(d2)
+ else:
+ raise ValueError(f"Unknown --view option value: {self.con.view}")
+ #
+ self.screen_output = res[1]
+ other.screen_output = res[1]
+ #
+ return res[0]
+
+ def cmp_attrs(self, other):
+ res = ""
+ self.df_value_attrs = []
+
+ self_attrs = set([attr.upper() for attr in self.attributes])
+ other_attrs = set([attr.upper() for attr in other.attributes])
+
+ self_unique_attrs = self_attrs - other_attrs - other.ignore_attributes
+ if self_unique_attrs:
+ res += 4 * " " + "Attributes found only in %s:" % self.con.host
+ for x in self_unique_attrs:
+ res += 8 * " " + x + "\n"
+
+ other_unique_attrs = other_attrs - self_attrs - self.ignore_attributes
+ if other_unique_attrs:
+ res += 4 * " " + "Attributes found only in %s:" % other.con.host
+ for x in other_unique_attrs:
+ res += 8 * " " + x + "\n"
+
+ missing_attrs = self_unique_attrs & other_unique_attrs
+ title = 4 * " " + "Difference in attribute values:"
+ for x in self.attributes:
+ if x.upper() in self.ignore_attributes or x.upper() in missing_attrs:
+ continue
+ ours = self.attributes[x]
+ theirs = other.attributes.get(x)
+
+ if isinstance(ours, list) and isinstance(theirs, list):
+ ours = sorted(ours)
+ theirs = sorted(theirs)
+
+ if ours != theirs:
+ p = None
+ q = None
+ m = None
+ n = None
+ # First check if the difference can be fixed but shunting the first part
+ # of the DomainHostName e.g. 'mysamba4.test.local' => 'mysamba4'
+ if x.upper() in self.other_attributes:
+ p = [self.con.domain_name.split(".")[0] == j for j in ours]
+ q = [other.con.domain_name.split(".")[0] == j for j in theirs]
+ if p == q:
+ continue
+ # Attribute values that are list that contain DN based values that may differ
+ elif x.upper() in self.dn_attributes:
+ m = ours
+ n = theirs
+ p = [self.fix_dn(j) for j in m]
+ q = [other.fix_dn(j) for j in n]
+ if p == q:
+ continue
+ # Attributes that contain the Domain name in them
+ if x.upper() in self.domain_attributes:
+ m = p
+ n = q
+ if not p and not q:
+ m = ours
+ n = theirs
+ p = [self.fix_domain_name(j) for j in m]
+ q = [other.fix_domain_name(j) for j in n]
+ if p == q:
+ continue
+ #
+ if x.upper() in self.servername_attributes:
+ # Attributes with SERVER_NAME
+ m = p
+ n = q
+ if not p and not q:
+ m = ours
+ n = theirs
+ p = [self.fix_server_name(j) for j in m]
+ q = [other.fix_server_name(j) for j in n]
+ if p == q:
+ continue
+ #
+ if x.upper() in self.netbios_attributes:
+ # Attributes with NETBIOS Domain name
+ m = p
+ n = q
+ if not p and not q:
+ m = ours
+ n = theirs
+ p = [self.fix_domain_netbios(j) for j in m]
+ q = [other.fix_domain_netbios(j) for j in n]
+ if p == q:
+ continue
+ #
+ if title:
+ res += title + "\n"
+ title = None
+ if p and q:
+ res += 8 * " " + x + " => \n%s\n%s" % (p, q) + "\n"
+ else:
+ res += 8 * " " + x + " => \n%s\n%s" % (ours, theirs) + "\n"
+ self.df_value_attrs.append(x)
+ #
+ if missing_attrs:
+ assert self_unique_attrs != other_unique_attrs
+ self.summary["unique_attrs"] += list(self_unique_attrs)
+ self.summary["df_value_attrs"] += self.df_value_attrs
+ other.summary["unique_attrs"] += list(other_unique_attrs)
+ other.summary["df_value_attrs"] += self.df_value_attrs # they are the same
+ #
+ self.screen_output = res
+ other.screen_output = res
+ #
+ return res == ""
+
+
+class LDAPBundle(object):
+
+ def __init__(self, connection, context, dn_list=None, filter_list=None,
+ outf=sys.stdout, errf=sys.stderr):
+ self.outf = outf
+ self.errf = errf
+ self.con = connection
+ self.two_domains = self.con.two_domains
+ self.quiet = self.con.quiet
+ self.verbose = self.con.verbose
+ self.search_base = self.con.search_base
+ self.search_scope = self.con.search_scope
+ self.skip_missing_dn = self.con.skip_missing_dn
+ self.summary = {}
+ self.summary["unique_attrs"] = []
+ self.summary["df_value_attrs"] = []
+ self.summary["known_ignored_dn"] = []
+ self.summary["abnormal_ignored_dn"] = []
+ self.filter_list = filter_list
+ if dn_list:
+ self.dn_list = dn_list
+ elif context.upper() in ["DOMAIN", "CONFIGURATION", "SCHEMA", "DNSDOMAIN", "DNSFOREST"]:
+ self.context = context.upper()
+ self.dn_list = self.get_dn_list(context)
+ else:
+ raise Exception("Unknown initialization data for LDAPBundle().")
+ counter = 0
+ while counter < len(self.dn_list) and self.two_domains:
+ # Use alias reference
+ tmp = self.dn_list[counter]
+ tmp = tmp[:len(tmp) - len(self.con.base_dn)] + "${DOMAIN_DN}"
+ tmp = tmp.replace("CN=%s" % self.con.domain_netbios, "CN=${DOMAIN_NETBIOS}")
+ if len(self.con.server_names) == 1:
+ for x in self.con.server_names:
+ tmp = tmp.replace("CN=%s" % x, "CN=${SERVER_NAME}")
+ self.dn_list[counter] = tmp
+ counter += 1
+ self.dn_list = list(set(self.dn_list))
+ self.dn_list = sorted(self.dn_list)
+ self.size = len(self.dn_list)
+
+ def log(self, msg):
+ """
+ Log on the screen if there is no --quiet option set
+ """
+ if not self.quiet:
+ self.outf.write(msg + "\n")
+
+ def update_size(self):
+ self.size = len(self.dn_list)
+ self.dn_list = sorted(self.dn_list)
+
+ def diff(self, other):
+ res = True
+ if self.size != other.size:
+ self.log("\n* DN lists have different size: %s != %s" % (self.size, other.size))
+ if not self.skip_missing_dn:
+ res = False
+
+ self_dns = set([q.upper() for q in self.dn_list])
+ other_dns = set([q.upper() for q in other.dn_list])
+
+ #
+ # This is the case where we want to explicitly compare two objects with different DNs.
+ # It does not matter if they are in the same DC, in two DC in one domain or in two
+ # different domains.
+ if self.search_scope != SCOPE_BASE and not self.skip_missing_dn:
+
+ self_only = self_dns - other_dns # missing in other
+ if self_only:
+ res = False
+ self.log("\n* DNs found only in %s:" % self.con.host)
+ for x in sorted(self_only):
+ self.log(4 * " " + x)
+
+ other_only = other_dns - self_dns # missing in self
+ if other_only:
+ res = False
+ self.log("\n* DNs found only in %s:" % other.con.host)
+ for x in sorted(other_only):
+ self.log(4 * " " + x)
+
+ common_dns = self_dns & other_dns
+ self.log("\n* Objects to be compared: %d" % len(common_dns))
+
+ for dn in common_dns:
+
+ try:
+ object1 = LDAPObject(connection=self.con,
+ dn=dn,
+ summary=self.summary,
+ filter_list=self.filter_list,
+ outf=self.outf, errf=self.errf)
+ except LdbError as e:
+ self.log("LdbError for dn %s: %s" % (dn, e))
+ continue
+
+ try:
+ object2 = LDAPObject(connection=other.con,
+ dn=dn,
+ summary=other.summary,
+ filter_list=self.filter_list,
+ outf=self.outf, errf=self.errf)
+ except LdbError as e:
+ self.log("LdbError for dn %s: %s" % (dn, e))
+ continue
+
+ if object1 == object2:
+ if self.con.verbose:
+ self.log("\nComparing:")
+ self.log("'%s' [%s]" % (object1.dn, object1.con.host))
+ self.log("'%s' [%s]" % (object2.dn, object2.con.host))
+ self.log(4 * " " + "OK")
+ else:
+ self.log("\nComparing:")
+ self.log("'%s' [%s]" % (object1.dn, object1.con.host))
+ self.log("'%s' [%s]" % (object2.dn, object2.con.host))
+ self.log(object1.screen_output)
+ self.log(4 * " " + "FAILED")
+ res = False
+ self.summary = object1.summary
+ other.summary = object2.summary
+
+ return res
+
+ def get_dn_list(self, context):
+ """ Query LDAP server about the DNs of certain naming self.con.ext Domain (or Default), Configuration, Schema.
+ Parse all DNs and filter those that are 'strange' or abnormal.
+ """
+ if context.upper() == "DOMAIN":
+ search_base = self.con.base_dn
+ elif context.upper() == "CONFIGURATION":
+ search_base = self.con.config_dn
+ elif context.upper() == "SCHEMA":
+ search_base = self.con.schema_dn
+ elif context.upper() == "DNSDOMAIN":
+ search_base = "DC=DomainDnsZones,%s" % self.con.base_dn
+ elif context.upper() == "DNSFOREST":
+ search_base = "DC=ForestDnsZones,%s" % self.con.root_dn
+
+ dn_list = []
+ if not self.search_base:
+ self.search_base = search_base
+ self.search_scope = self.search_scope.upper()
+ if self.search_scope == "SUB":
+ self.search_scope = SCOPE_SUBTREE
+ elif self.search_scope == "BASE":
+ self.search_scope = SCOPE_BASE
+ elif self.search_scope == "ONE":
+ self.search_scope = SCOPE_ONELEVEL
+ else:
+ raise ValueError("Wrong 'scope' given. Choose from: SUB, ONE, BASE")
+ try:
+ res = self.con.ldb.search(base=self.search_base, scope=self.search_scope, attrs=["dn"])
+ except LdbError as e3:
+ (enum, estr) = e3.args
+ self.outf.write("Failed search of base=%s\n" % self.search_base)
+ raise
+ for x in res:
+ dn_list.append(x["dn"].get_linearized())
+ return dn_list
+
+ def print_summary(self):
+ self.summary["unique_attrs"] = list(set(self.summary["unique_attrs"]))
+ self.summary["df_value_attrs"] = list(set(self.summary["df_value_attrs"]))
+ #
+ if self.summary["unique_attrs"]:
+ self.log("\nAttributes found only in %s:" % self.con.host)
+ self.log("".join([str("\n" + 4 * " " + x) for x in self.summary["unique_attrs"]]))
+ #
+ if self.summary["df_value_attrs"]:
+ self.log("\nAttributes with different values:")
+ self.log("".join([str("\n" + 4 * " " + x) for x in self.summary["df_value_attrs"]]))
+ self.summary["df_value_attrs"] = []
+
+
+class cmd_ldapcmp(Command):
+ """Compare two ldap databases."""
+ synopsis = "%prog <URL1> <URL2> (domain|configuration|schema|dnsdomain|dnsforest) [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptionsDouble,
+ }
+
+ takes_args = ["URL1", "URL2", "context1?", "context2?", "context3?", "context4?", "context5?"]
+
+ takes_options = [
+ Option("-w", "--two", dest="two", action="store_true", default=False,
+ help="Hosts are in two different domains"),
+ Option("-q", "--quiet", dest="quiet", action="store_true", default=False,
+ help="Do not print anything but relay on just exit code"),
+ Option("-v", "--verbose", dest="verbose", action="store_true", default=False,
+ help="Print all DN pairs that have been compared"),
+ Option("--sd", dest="descriptor", action="store_true", default=False,
+ help="Compare nTSecurityDescriptor attributes only"),
+ Option("--sort-aces", dest="sort_aces", action="store_true", default=False,
+ help="Sort ACEs before comparison of nTSecurityDescriptor attribute"),
+ Option("--view", dest="view", default="section", choices=["section", "collision"],
+ help="Display mode for nTSecurityDescriptor results. Possible values: section or collision."),
+ Option("--base", dest="base", default="",
+ help="Pass search base that will build DN list for the first DC."),
+ Option("--base2", dest="base2", default="",
+ help="Pass search base that will build DN list for the second DC. Used when --two or when compare two different DNs."),
+ Option("--scope", dest="scope", default="SUB", choices=["SUB", "ONE", "BASE"],
+ help="Pass search scope that builds DN list. Options: SUB, ONE, BASE"),
+ Option("--filter", dest="filter", default="",
+ help="List of comma separated attributes to ignore in the comparison"),
+ Option("--skip-missing-dn", dest="skip_missing_dn", action="store_true", default=False,
+ help="Skip report and failure due to missing DNs in one server or another"),
+ ]
+
+ def run(self, URL1, URL2,
+ context1=None, context2=None, context3=None, context4=None, context5=None,
+ two=False, quiet=False, verbose=False, descriptor=False, sort_aces=False,
+ view="section", base="", base2="", scope="SUB", filter="",
+ credopts=None, sambaopts=None, versionopts=None, skip_missing_dn=False):
+
+ lp = sambaopts.get_loadparm()
+
+ using_ldap = URL1.startswith("ldap") or URL2.startswith("ldap")
+
+ if using_ldap:
+ creds = credopts.get_credentials(lp, fallback_machine=True)
+ else:
+ creds = None
+ creds2 = credopts.get_credentials2(lp, guess=False)
+ if creds2.is_anonymous():
+ creds2 = creds
+ else:
+ creds2.set_domain("")
+ creds2.set_workstation("")
+ if using_ldap and not creds.authentication_requested():
+ raise CommandError("You must supply at least one username/password pair")
+
+ # make a list of contexts to compare in
+ contexts = []
+ if context1 is None:
+ if base and base2:
+ # If search bases are specified context is defaulted to
+ # DOMAIN so the given search bases can be verified.
+ contexts = ["DOMAIN"]
+ else:
+ # if no argument given, we compare all contexts
+ contexts = ["DOMAIN", "CONFIGURATION", "SCHEMA", "DNSDOMAIN", "DNSFOREST"]
+ else:
+ for c in [context1, context2, context3, context4, context5]:
+ if c is None:
+ continue
+ if not c.upper() in ["DOMAIN", "CONFIGURATION", "SCHEMA", "DNSDOMAIN", "DNSFOREST"]:
+ raise CommandError("Incorrect argument: %s" % c)
+ contexts.append(c.upper())
+
+ if verbose and quiet:
+ raise CommandError("You cannot set --verbose and --quiet together")
+ if (not base and base2) or (base and not base2):
+ raise CommandError("You need to specify both --base and --base2 at the same time")
+
+ con1 = LDAPBase(URL1, creds, lp,
+ two=two, quiet=quiet, descriptor=descriptor, sort_aces=sort_aces,
+ verbose=verbose, view=view, base=base, scope=scope,
+ outf=self.outf, errf=self.errf, skip_missing_dn=skip_missing_dn)
+ assert len(con1.base_dn) > 0
+
+ con2 = LDAPBase(URL2, creds2, lp,
+ two=two, quiet=quiet, descriptor=descriptor, sort_aces=sort_aces,
+ verbose=verbose, view=view, base=base2, scope=scope,
+ outf=self.outf, errf=self.errf, skip_missing_dn=skip_missing_dn)
+ assert len(con2.base_dn) > 0
+
+ filter_list = filter.split(",")
+
+ status = 0
+ for context in contexts:
+ if not quiet:
+ self.outf.write("\n* Comparing [%s] context...\n" % context)
+
+ b1 = LDAPBundle(con1, context=context, filter_list=filter_list,
+ outf=self.outf, errf=self.errf)
+ b2 = LDAPBundle(con2, context=context, filter_list=filter_list,
+ outf=self.outf, errf=self.errf)
+
+ if b1.diff(b2):
+ if not quiet:
+ self.outf.write("\n* Result for [%s]: SUCCESS\n" %
+ context)
+ else:
+ if not quiet:
+ self.outf.write("\n* Result for [%s]: FAILURE\n" % context)
+ if not descriptor:
+ assert len(b1.summary["df_value_attrs"]) == len(b2.summary["df_value_attrs"])
+ b2.summary["df_value_attrs"] = []
+ self.outf.write("\nSUMMARY\n")
+ self.outf.write("---------\n")
+ b1.print_summary()
+ b2.print_summary()
+ # mark exit status as FAILURE if a least one comparison failed
+ status = -1
+ if status != 0:
+ raise CommandError("Compare failed: %d" % status)
diff --git a/python/samba/netcmd/main.py b/python/samba/netcmd/main.py
new file mode 100644
index 0000000..f1a0afb
--- /dev/null
+++ b/python/samba/netcmd/main.py
@@ -0,0 +1,98 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2011
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""The main samba-tool command implementation."""
+
+from samba import getopt as options
+
+from samba.netcmd import SuperCommand
+
+
+class cache_loader(dict):
+ """
+ We only load subcommand tools if they are actually used.
+ This significantly reduces the amount of time spent starting up
+ samba-tool
+ """
+ def __getitem__(self, attr):
+ item = dict.__getitem__(self, attr)
+ if item is None:
+ package = 'nettime' if attr == 'time' else attr
+ self[attr] = getattr(__import__('samba.netcmd.%s' % package,
+ fromlist=['cmd_%s' % attr]),
+ 'cmd_%s' % attr)()
+ return dict.__getitem__(self, attr)
+
+ def get(self, attr, default=None):
+ try:
+ return self[attr]
+ except KeyError:
+ return default
+
+ def items(self):
+ for key in self:
+ yield (key, self[key])
+
+
+class cmd_sambatool(SuperCommand):
+ """Main samba administration tool."""
+
+ takes_optiongroups = {
+ "versionopts": options.VersionOptions,
+ }
+
+ subcommands = cache_loader()
+
+ subcommands["computer"] = None
+ subcommands["contact"] = None
+ subcommands["dbcheck"] = None
+ subcommands["delegation"] = None
+ subcommands["dns"] = None
+ subcommands["domain"] = None
+ subcommands["drs"] = None
+ subcommands["dsacl"] = None
+ subcommands["forest"] = None
+ subcommands["fsmo"] = None
+ subcommands["gpo"] = None
+ subcommands["group"] = None
+ subcommands["ldapcmp"] = None
+ subcommands["ntacl"] = None
+ subcommands["rodc"] = None
+ subcommands["schema"] = None
+ subcommands["shell"] = None
+ subcommands["sites"] = None
+ subcommands["spn"] = None
+ subcommands["testparm"] = None
+ subcommands["time"] = None
+ subcommands["user"] = None
+ subcommands["ou"] = None
+ subcommands["processes"] = None
+ subcommands["visualize"] = None
+
+
+def samba_tool(*args, **kwargs):
+ """A single function that runs samba-tool, returning an error code on
+ error, and None on success."""
+ try:
+ cmd, argv = cmd_sambatool()._resolve("samba-tool", *args, **kwargs)
+ ret = cmd._run(*argv)
+ except SystemExit as e:
+ ret = e.code
+ except Exception as e:
+ cmd.show_command_error(e)
+ ret = 1
+ return ret
diff --git a/python/samba/netcmd/nettime.py b/python/samba/netcmd/nettime.py
new file mode 100644
index 0000000..8d78279
--- /dev/null
+++ b/python/samba/netcmd/nettime.py
@@ -0,0 +1,60 @@
+# time
+#
+# Copyright Jelmer Vernooij 2010 <jelmer@samba.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import samba.getopt as options
+from . import common
+from samba.net import Net
+
+from samba.netcmd import (
+ Command,
+)
+
+
+class cmd_time(Command):
+ """Retrieve the time on a server.
+
+This command returns the date and time of the Active Directory server specified on the command. The server name specified may be the local server or a remote server. If the servername is not specified, the command returns the time and date of the local AD server.
+
+Example1:
+samba-tool time samdom.example.com
+
+Example1 returns the date and time of the server samdom.example.com.
+
+Example2:
+samba-tool time
+
+Example2 return the date and time of the local server.
+"""
+ synopsis = "%prog [server-name] [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ takes_args = ["server_name?"]
+
+ def run(self, server_name=None, credopts=None, sambaopts=None,
+ versionopts=None):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp, fallback_machine=True)
+ net = Net(creds, lp, server=credopts.ipaddress)
+ if server_name is None:
+ server_name = common.netcmd_dnsname(lp)
+ self.outf.write(net.time(server_name) + "\n")
diff --git a/python/samba/netcmd/ntacl.py b/python/samba/netcmd/ntacl.py
new file mode 100644
index 0000000..34675c7
--- /dev/null
+++ b/python/samba/netcmd/ntacl.py
@@ -0,0 +1,503 @@
+# Manipulate file NT ACLs
+#
+# Copyright Matthieu Patou 2010 <mat@matws.net>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import optparse
+import os
+
+import samba.getopt as options
+from samba import provision
+from samba.auth import system_session
+from samba.auth_util import system_session_unix
+from samba.credentials import DONT_USE_KERBEROS
+from samba.dcerpc import security, idmap
+from samba.ndr import ndr_print
+from samba.ntacls import setntacl, getntacl, getdosinfo
+from samba.samba3 import param as s3param, passdb
+from samba.samdb import SamDB
+
+from . import Command, CommandError, SuperCommand, Option
+
+
+def get_local_domain_sid(lp):
+ is_ad_dc = False
+ server_role = lp.server_role()
+ if server_role == "ROLE_ACTIVE_DIRECTORY_DC":
+ is_ad_dc = True
+
+ s3conf = s3param.get_context()
+ s3conf.load(lp.configfile)
+
+ if is_ad_dc:
+ try:
+ samdb = SamDB(session_info=system_session(),
+ lp=lp)
+ except Exception as e:
+ raise CommandError("Unable to open samdb:", e)
+ # ensure we are using the right samba_dsdb passdb backend, no
+ # matter what
+ s3conf.set("passdb backend", "samba_dsdb:%s" % samdb.url)
+
+ try:
+ if is_ad_dc:
+ domain_sid = security.dom_sid(samdb.domain_sid)
+ else:
+ domain_sid = passdb.get_domain_sid()
+ except:
+ raise CommandError("Unable to read domain SID from configuration "
+ "files")
+ return domain_sid
+
+
+class cmd_ntacl_set(Command):
+ """Set ACLs on a file."""
+
+ synopsis = "%prog <acl> <path> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ takes_options = [
+ # --quiet is not used at all...
+ Option("-q", "--quiet", help=optparse.SUPPRESS_HELP, action="store_true"),
+ Option("-v", "--verbose", help="Be verbose", action="store_true"),
+ Option("--xattr-backend", type="choice", help="xattr backend type (native fs or tdb)",
+ choices=["native", "tdb"]),
+ Option("--eadb-file", help="Name of the tdb file where attributes are stored", type="string"),
+ Option("--use-ntvfs", help="Set the ACLs directly to the TDB or xattr for use with the ntvfs file server", action="store_true"),
+ Option("--use-s3fs", help="Set the ACLs for use with the default s3fs file server via the VFS layer", action="store_true"),
+ Option("--recursive", help="Set the ACLs for directories and their contents recursively", action="store_true"),
+ Option("--follow-symlinks", help="Follow symlinks", action="store_true"),
+ Option("--service", help="Name of the smb.conf service to use when applying the ACLs", type="string")
+ ]
+
+ takes_args = ["acl", "path"]
+
+ def run(self, acl, path, use_ntvfs=False, use_s3fs=False,
+ quiet=False, verbose=False, xattr_backend=None, eadb_file=None,
+ credopts=None, sambaopts=None, versionopts=None,
+ recursive=False, follow_symlinks=False, service=None):
+ logger = self.get_logger()
+ lp = sambaopts.get_loadparm()
+ domain_sid = get_local_domain_sid(lp)
+
+ if not use_ntvfs and not use_s3fs:
+ use_ntvfs = "smb" in lp.get("server services")
+ elif use_s3fs:
+ use_ntvfs = False
+
+ def _setntacl_path(_path):
+ if not follow_symlinks and os.path.islink(_path):
+ if recursive:
+ self.outf.write("ignored symlink: %s\n" % _path)
+ return
+ raise CommandError("symlink: %s: requires --follow-symlinks" % (_path))
+
+ if verbose:
+ if os.path.islink(_path):
+ self.outf.write("symlink: %s\n" % _path)
+ elif os.path.isdir(_path):
+ self.outf.write("dir: %s\n" % _path)
+ else:
+ self.outf.write("file: %s\n" % _path)
+ try:
+ setntacl(lp,
+ _path,
+ acl,
+ str(domain_sid),
+ system_session_unix(),
+ xattr_backend,
+ eadb_file,
+ use_ntvfs=use_ntvfs,
+ service=service)
+ except Exception as e:
+ raise CommandError("Could not set acl for %s: %s" % (_path, e))
+
+ _setntacl_path(path)
+
+ if recursive and os.path.isdir(path):
+ for root, dirs, files in os.walk(path, followlinks=follow_symlinks):
+ for name in files:
+ _setntacl_path(os.path.join(root, name))
+ for name in dirs:
+ _setntacl_path(os.path.join(root, name))
+
+ if use_ntvfs:
+ logger.warning("Please note that POSIX permissions have NOT been changed, only the stored NT ACL")
+
+
+class cmd_dosinfo_get(Command):
+ """Get DOS info of a file from xattr."""
+ synopsis = "%prog <file> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ takes_args = ["file"]
+
+ def run(self, file, credopts=None, sambaopts=None, versionopts=None):
+ lp = sambaopts.get_loadparm()
+ s3conf = s3param.get_context()
+ s3conf.load(lp.configfile)
+
+ dosinfo = getdosinfo(lp, file)
+ if dosinfo:
+ self.outf.write(ndr_print(dosinfo))
+
+
+class cmd_ntacl_get(Command):
+ """Get ACLs of a file."""
+ synopsis = "%prog <file> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ takes_options = [
+ Option("--as-sddl", help="Output ACL in the SDDL format", action="store_true"),
+ Option("--xattr-backend", type="choice", help="xattr backend type (native fs or tdb)",
+ choices=["native", "tdb"]),
+ Option("--eadb-file", help="Name of the tdb file where attributes are stored", type="string"),
+ Option("--use-ntvfs", help="Get the ACLs directly from the TDB or xattr used with the ntvfs file server", action="store_true"),
+ Option("--use-s3fs", help="Get the ACLs for use via the VFS layer used by the default s3fs file server", action="store_true"),
+ Option("--service", help="Name of the smb.conf service to use when getting the ACLs", type="string")
+ ]
+
+ takes_args = ["file"]
+
+ def run(self, file, use_ntvfs=False, use_s3fs=False,
+ as_sddl=False, xattr_backend=None, eadb_file=None,
+ credopts=None, sambaopts=None, versionopts=None,
+ service=None):
+ lp = sambaopts.get_loadparm()
+ domain_sid = get_local_domain_sid(lp)
+
+ if not use_ntvfs and not use_s3fs:
+ use_ntvfs = "smb" in lp.get("server services")
+ elif use_s3fs:
+ use_ntvfs = False
+
+ acl = getntacl(lp,
+ file,
+ system_session_unix(),
+ xattr_backend,
+ eadb_file,
+ direct_db_access=use_ntvfs,
+ service=service)
+ if as_sddl:
+ self.outf.write(acl.as_sddl(domain_sid) + "\n")
+ else:
+ self.outf.write(ndr_print(acl))
+
+
+class cmd_ntacl_changedomsid(Command):
+ """Change the domain SID for ACLs"""
+ synopsis = "%prog <Orig-Domain-SID> <New-Domain-SID> <file> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ }
+
+ takes_options = [
+ Option(
+ "--service",
+ help="Name of the smb.conf service to use",
+ type="string"),
+ Option(
+ "--use-ntvfs",
+ help=("Set the ACLs directly to the TDB or xattr for use with the "
+ "ntvfs file server"),
+ action="store_true"),
+ Option(
+ "--use-s3fs",
+ help=("Set the ACLs for use with the default s3fs file server via "
+ "the VFS layer"),
+ action="store_true"),
+ Option(
+ "--eadb-file",
+ help="Name of the tdb file where attributes are stored",
+ type="string"),
+ Option(
+ "--xattr-backend",
+ type="choice",
+ help="xattr backend type (native fs or tdb)",
+ choices=["native", "tdb"]),
+ Option(
+ "-r",
+ "--recursive",
+ help="Set the ACLs for directories and their contents recursively",
+ action="store_true"),
+ Option(
+ "--follow-symlinks",
+ help="Follow symlinks",
+ action="store_true"),
+ Option(
+ "-v",
+ "--verbose",
+ help="Be verbose",
+ action="store_true"),
+ ]
+
+ takes_args = ["old_domain_sid", "new_domain_sid", "path"]
+
+ def run(self,
+ old_domain_sid_str,
+ new_domain_sid_str,
+ path,
+ use_ntvfs=False,
+ use_s3fs=False,
+ service=None,
+ xattr_backend=None,
+ eadb_file=None,
+ sambaopts=None,
+ recursive=False,
+ follow_symlinks=False,
+ verbose=False):
+ logger = self.get_logger()
+ lp = sambaopts.get_loadparm()
+ domain_sid = get_local_domain_sid(lp)
+
+ if not use_ntvfs and not use_s3fs:
+ use_ntvfs = "smb" in lp.get("server services")
+ elif use_s3fs:
+ use_ntvfs = False
+
+ if not use_ntvfs and not service:
+ raise CommandError(
+ "Must provide a share name with --service=<share>")
+
+ try:
+ old_domain_sid = security.dom_sid(old_domain_sid_str)
+ except Exception as e:
+ raise CommandError("Could not parse old sid %s: %s" %
+ (old_domain_sid_str, e))
+
+ try:
+ new_domain_sid = security.dom_sid(new_domain_sid_str)
+ except Exception as e:
+ raise CommandError("Could not parse old sid %s: %s" %
+ (new_domain_sid_str, e))
+
+ def changedom_sids(_path):
+ if not follow_symlinks and os.path.islink(_path):
+ if recursive:
+ self.outf.write("ignored symlink: %s\n" % _path)
+ return
+ raise CommandError("symlink: %s: requires --follow-symlinks" % (_path))
+
+ if verbose:
+ if os.path.islink(_path):
+ self.outf.write("symlink: %s\n" % _path)
+ elif os.path.isdir(_path):
+ self.outf.write("dir: %s\n" % _path)
+ else:
+ self.outf.write("file: %s\n" % _path)
+
+ try:
+ acl = getntacl(lp,
+ _path,
+ system_session_unix(),
+ xattr_backend,
+ eadb_file,
+ direct_db_access=use_ntvfs,
+ service=service)
+ except Exception as e:
+ raise CommandError("Could not get acl for %s: %s" % (_path, e))
+
+ orig_sddl = acl.as_sddl(domain_sid)
+ if verbose:
+ self.outf.write("before:\n%s\n" % orig_sddl)
+
+ def replace_domain_sid(sid):
+ (dom, rid) = sid.split()
+ if dom == old_domain_sid:
+ return security.dom_sid("%s-%i" % (new_domain_sid, rid))
+ return sid
+
+ acl.owner_sid = replace_domain_sid(acl.owner_sid)
+ acl.group_sid = replace_domain_sid(acl.group_sid)
+
+ if acl.sacl:
+ for ace in acl.sacl.aces:
+ ace.trustee = replace_domain_sid(ace.trustee)
+ if acl.dacl:
+ for ace in acl.dacl.aces:
+ ace.trustee = replace_domain_sid(ace.trustee)
+
+ new_sddl = acl.as_sddl(domain_sid)
+ if verbose:
+ self.outf.write("after:\n%s\n" % new_sddl)
+
+ if orig_sddl == new_sddl:
+ if verbose:
+ self.outf.write("nothing to do\n")
+ return True
+
+ try:
+ setntacl(lp,
+ _path,
+ acl,
+ new_domain_sid,
+ system_session_unix(),
+ xattr_backend,
+ eadb_file,
+ use_ntvfs=use_ntvfs,
+ service=service)
+ except Exception as e:
+ raise CommandError("Could not set acl for %s: %s" % (_path, e))
+
+ def recursive_changedom_sids(_path):
+ for root, dirs, files in os.walk(_path, followlinks=follow_symlinks):
+ for f in files:
+ changedom_sids(os.path.join(root, f))
+
+ for d in dirs:
+ changedom_sids(os.path.join(root, d))
+
+ changedom_sids(path)
+ if recursive and os.path.isdir(path):
+ recursive_changedom_sids(path)
+
+ if use_ntvfs:
+ logger.warning("Please note that POSIX permissions have NOT been "
+ "changed, only the stored NT ACL.")
+
+
+class cmd_ntacl_sysvolreset(Command):
+ """Reset sysvol ACLs to defaults (including correct ACLs on GPOs)."""
+ synopsis = "%prog <file> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ takes_options = [
+ Option("--use-ntvfs", help="Set the ACLs for use with the ntvfs file server", action="store_true"),
+ Option("--use-s3fs", help="Set the ACLs for use with the default s3fs file server", action="store_true")
+ ]
+
+ def run(self, use_ntvfs=False, use_s3fs=False,
+ credopts=None, sambaopts=None, versionopts=None):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+ creds.set_kerberos_state(DONT_USE_KERBEROS)
+ logger = self.get_logger()
+
+ sysvol = lp.get("path", "sysvol")
+ try:
+ samdb = SamDB(session_info=system_session(),
+ lp=lp)
+ except Exception as e:
+ raise CommandError("Unable to open samdb:", e)
+
+ if not use_ntvfs and not use_s3fs:
+ use_ntvfs = "smb" in lp.get("server services")
+ elif use_s3fs:
+ use_ntvfs = False
+
+ domain_sid = security.dom_sid(samdb.domain_sid)
+
+ s3conf = s3param.get_context()
+ s3conf.load(lp.configfile)
+ # ensure we are using the right samba_dsdb passdb backend, no matter what
+ s3conf.set("passdb backend", "samba_dsdb:%s" % samdb.url)
+
+ LA_sid = security.dom_sid(str(domain_sid)
+ + "-" + str(security.DOMAIN_RID_ADMINISTRATOR))
+ BA_sid = security.dom_sid(security.SID_BUILTIN_ADMINISTRATORS)
+
+ s4_passdb = passdb.PDB(s3conf.get("passdb backend"))
+
+ # These assertions correct for current ad_dc selftest
+ # configuration. When other environments have a broad range of
+ # groups mapped via passdb, we can relax some of these checks
+ (LA_uid, LA_type) = s4_passdb.sid_to_id(LA_sid)
+ if (LA_type != idmap.ID_TYPE_UID and LA_type != idmap.ID_TYPE_BOTH):
+ raise CommandError("SID %s is not mapped to a UID" % LA_sid)
+ (BA_gid, BA_type) = s4_passdb.sid_to_id(BA_sid)
+ if (BA_type != idmap.ID_TYPE_GID and BA_type != idmap.ID_TYPE_BOTH):
+ raise CommandError("SID %s is not mapped to a GID" % BA_sid)
+
+ if use_ntvfs:
+ logger.warning("Please note that POSIX permissions have NOT been changed, only the stored NT ACL")
+
+ try:
+ provision.setsysvolacl(samdb, sysvol,
+ LA_uid, BA_gid, domain_sid,
+ lp.get("realm").lower(), samdb.domain_dn(),
+ lp, use_ntvfs=use_ntvfs)
+ except OSError as e:
+ if not e.filename:
+ raise
+ raise CommandError(f"Could not access {e.filename}: {e.strerror}", e)
+
+
+class cmd_ntacl_sysvolcheck(Command):
+ """Check sysvol ACLs match defaults (including correct ACLs on GPOs)."""
+ synopsis = "%prog <file> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ def run(self, credopts=None, sambaopts=None, versionopts=None):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+ creds.set_kerberos_state(DONT_USE_KERBEROS)
+
+ netlogon = lp.get("path", "netlogon")
+ sysvol = lp.get("path", "sysvol")
+ try:
+ samdb = SamDB(session_info=system_session(), lp=lp)
+ except Exception as e:
+ raise CommandError("Unable to open samdb:", e)
+
+ domain_sid = security.dom_sid(samdb.domain_sid)
+
+ try:
+ provision.checksysvolacl(samdb, netlogon, sysvol,
+ domain_sid,
+ lp.get("realm").lower(), samdb.domain_dn(),
+ lp)
+ except OSError as e:
+ if not e.filename:
+ raise
+ raise CommandError(f"Could not access {e.filename}: {e.strerror}", e)
+
+
+class cmd_ntacl(SuperCommand):
+ """NT ACLs manipulation."""
+
+ subcommands = {}
+ subcommands["set"] = cmd_ntacl_set()
+ subcommands["get"] = cmd_ntacl_get()
+ subcommands["changedomsid"] = cmd_ntacl_changedomsid()
+ subcommands["sysvolreset"] = cmd_ntacl_sysvolreset()
+ subcommands["sysvolcheck"] = cmd_ntacl_sysvolcheck()
+ subcommands["getdosinfo"] = cmd_dosinfo_get()
diff --git a/python/samba/netcmd/ou.py b/python/samba/netcmd/ou.py
new file mode 100644
index 0000000..71f61e4
--- /dev/null
+++ b/python/samba/netcmd/ou.py
@@ -0,0 +1,411 @@
+# implement samba-tool ou commands
+#
+# Copyright Bjoern Baumbach 2018-2019 <bb@samba.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import samba.getopt as options
+import ldb
+
+from samba.auth import system_session
+from samba.netcmd import (
+ Command,
+ CommandError,
+ Option,
+ SuperCommand,
+)
+from samba.samdb import SamDB
+from operator import attrgetter
+
+
+class cmd_rename(Command):
+ """Rename an organizational unit.
+
+ The name of the organizational units can be specified as a full DN
+ or without the domainDN component.
+
+ Examples:
+ samba-tool ou rename 'OU=OrgUnit,DC=samdom,DC=example,DC=com' \\
+ 'OU=NewNameOfOrgUnit,DC=samdom,DC=example,DC=com'
+ samba-tool ou rename 'OU=OrgUnit' 'OU=NewNameOfOrgUnit'
+
+ The examples show how an administrator would rename an ou 'OrgUnit'
+ to 'NewNameOfOrgUnit'. The new DN would be
+ 'OU=NewNameOfOrgUnit,DC=samdom,DC=example,DC=com'
+ """
+
+ synopsis = "%prog <old_ou_dn> <new_ou_dn> [options]"
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server",
+ type=str, metavar="URL", dest="H"),
+ ]
+
+ takes_args = ["old_ou_dn", "new_ou_dn"]
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ def run(self, old_ou_dn, new_ou_dn, credopts=None, sambaopts=None,
+ versionopts=None, H=None):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp, fallback_machine=True)
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+
+ try:
+ full_old_ou_dn = samdb.normalize_dn_in_domain(old_ou_dn)
+ except Exception as e:
+ raise CommandError('Invalid old_ou_dn "%s": %s' %
+ (old_ou_dn, e))
+ try:
+ full_new_ou_dn = samdb.normalize_dn_in_domain(new_ou_dn)
+ except Exception as e:
+ raise CommandError('Invalid new_ou_dn "%s": %s' %
+ (new_ou_dn, e))
+
+ try:
+ res = samdb.search(base=full_old_ou_dn,
+ expression="(objectclass=organizationalUnit)",
+ scope=ldb.SCOPE_BASE, attrs=[])
+ if len(res) == 0:
+ self.outf.write('Unable to find ou "%s"\n' % old_ou_dn)
+ return
+
+ samdb.rename(full_old_ou_dn, full_new_ou_dn)
+ except Exception as e:
+ raise CommandError('Failed to rename ou "%s"' % full_old_ou_dn, e)
+ self.outf.write('Renamed ou "%s" to "%s"\n' % (full_old_ou_dn,
+ full_new_ou_dn))
+
+
+class cmd_move(Command):
+ """Move an organizational unit.
+
+ The name of the organizational units can be specified as a full DN
+ or without the domainDN component.
+
+ Examples:
+ samba-tool ou move 'OU=OrgUnit,DC=samdom,DC=example,DC=com' \\
+ 'OU=NewParentOfOrgUnit,DC=samdom,DC=example,DC=com'
+ samba-tool ou rename 'OU=OrgUnit' 'OU=NewParentOfOrgUnit'
+
+ The examples show how an administrator would move an ou 'OrgUnit'
+ into the ou 'NewParentOfOrgUnit'. The ou 'OrgUnit' would become
+ a child of the 'NewParentOfOrgUnit' ou. The new DN would be
+ 'OU=OrgUnit,OU=NewParentOfOrgUnit,DC=samdom,DC=example,DC=com'
+ """
+
+ synopsis = "%prog <old_ou_dn> <new_parent_dn> [options]"
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server",
+ type=str, metavar="URL", dest="H"),
+ ]
+
+ takes_args = ["old_ou_dn", "new_parent_dn"]
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ def run(self, old_ou_dn, new_parent_dn, credopts=None, sambaopts=None,
+ versionopts=None, H=None):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp, fallback_machine=True)
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+
+ try:
+ full_old_ou_dn = samdb.normalize_dn_in_domain(old_ou_dn)
+ except Exception as e:
+ raise CommandError('Invalid old_ou_dn "%s": %s' %
+ (old_ou_dn, e))
+ try:
+ full_new_parent_dn = samdb.normalize_dn_in_domain(new_parent_dn)
+ except Exception as e:
+ raise CommandError('Invalid new_parent_dn "%s": %s' %
+ (new_parent_dn, e))
+
+ full_new_ou_dn = ldb.Dn(samdb, str(full_old_ou_dn))
+ full_new_ou_dn.remove_base_components(len(full_old_ou_dn) - 1)
+ full_new_ou_dn.add_base(full_new_parent_dn)
+
+ try:
+ res = samdb.search(base=full_old_ou_dn,
+ expression="(objectclass=organizationalUnit)",
+ scope=ldb.SCOPE_BASE, attrs=[])
+ if len(res) == 0:
+ self.outf.write('Unable to find ou "%s"\n' % full_old_ou_dn)
+ return
+ samdb.rename(full_old_ou_dn, full_new_ou_dn)
+ except Exception as e:
+ raise CommandError('Failed to move ou "%s"' % full_old_ou_dn, e)
+ self.outf.write('Moved ou "%s" into "%s"\n' %
+ (full_old_ou_dn, full_new_parent_dn))
+
+
+class cmd_add(Command):
+ """Add a new organizational unit.
+
+ The name of the new ou can be specified as a full DN or without the
+ domainDN component.
+
+ Examples:
+ samba-tool ou add 'OU=OrgUnit'
+ samba-tool ou add 'OU=SubOU,OU=OrgUnit,DC=samdom,DC=example,DC=com'
+
+ The examples show how an administrator would add a new ou 'OrgUnit'
+ and a new ou 'SubOU' as a child of the ou 'OrgUnit'.
+ """
+
+ synopsis = "%prog <ou_dn> [options]"
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server",
+ type=str, metavar="URL", dest="H"),
+ Option("--description", help="OU's description",
+ type=str, dest="description"),
+ ]
+
+ takes_args = ["ou_dn"]
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ def run(self, ou_dn, credopts=None, sambaopts=None, versionopts=None,
+ H=None, description=None):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp, fallback_machine=True)
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+
+ try:
+ full_ou_dn = samdb.normalize_dn_in_domain(ou_dn)
+ except Exception as e:
+ raise CommandError('Invalid ou_dn "%s": %s' % (ou_dn, e))
+
+ try:
+ samdb.create_ou(full_ou_dn, description=description)
+ except Exception as e:
+ raise CommandError('Failed to add ou "%s"' % full_ou_dn, e)
+
+ self.outf.write('Added ou "%s"\n' % full_ou_dn)
+
+
+class cmd_listobjects(Command):
+ """List all objects in an organizational unit.
+
+ The name of the organizational unit can be specified as a full DN
+ or without the domainDN component.
+
+ Examples:
+ samba-tool ou listobjects 'OU=OrgUnit,DC=samdom,DC=example,DC=com'
+ samba-tool ou listobjects 'OU=OrgUnit'
+
+ The examples show how an administrator would list all child objects
+ of the ou 'OrgUnit'.
+ """
+ synopsis = "%prog <ou_dn> [options]"
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server",
+ type=str, metavar="URL", dest="H"),
+ Option("--full-dn", dest="full_dn", default=False, action='store_true',
+ help="Display DNs including the base DN."),
+ Option("-r", "--recursive", dest="recursive", default=False,
+ action='store_true', help="List objects recursively."),
+ ]
+
+ takes_args = ["ou_dn"]
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ def run(self, ou_dn, credopts=None, sambaopts=None, versionopts=None,
+ H=None, full_dn=False, recursive=False):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp, fallback_machine=True)
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+ domain_dn = ldb.Dn(samdb, samdb.domain_dn())
+
+ try:
+ full_ou_dn = samdb.normalize_dn_in_domain(ou_dn)
+ except Exception as e:
+ raise CommandError('Invalid ou_dn "%s": %s' % (ou_dn, e))
+
+ minchildren = 0
+ scope = ldb.SCOPE_ONELEVEL
+ if recursive:
+ minchildren = 1
+ scope = ldb.SCOPE_SUBTREE
+
+ try:
+ children = samdb.search(base=full_ou_dn,
+ expression="(objectclass=*)",
+ scope=scope, attrs=[])
+ if len(children) <= minchildren:
+ self.outf.write('ou "%s" is empty\n' % ou_dn)
+ return
+
+ for child in sorted(children, key=attrgetter('dn')):
+ if child.dn == full_ou_dn:
+ continue
+ if not full_dn:
+ child.dn.remove_base_components(len(domain_dn))
+ self.outf.write("%s\n" % child.dn)
+
+ except Exception as e:
+ raise CommandError('Failed to list contents of ou "%s"' %
+ full_ou_dn, e)
+
+
+class cmd_list(Command):
+ """List all organizational units.
+
+ Example:
+ samba-tool ou listobjects
+
+ The example shows how an administrator would list all organizational
+ units.
+ """
+
+ synopsis = "%prog [options]"
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server",
+ type=str, metavar="URL", dest="H"),
+ Option("-b", "--base-dn",
+ help="Specify base DN to use.",
+ type=str),
+ Option("--full-dn", dest="full_dn", default=False, action='store_true',
+ help="Display DNs including the base DN."),
+ ]
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ def run(self,
+ sambaopts=None,
+ credopts=None,
+ versionopts=None,
+ H=None,
+ base_dn=None,
+ full_dn=False):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp, fallback_machine=True)
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+
+ search_dn = ldb.Dn(samdb, samdb.domain_dn())
+ if base_dn:
+ search_dn = samdb.normalize_dn_in_domain(base_dn)
+
+ res = samdb.search(search_dn,
+ scope=ldb.SCOPE_SUBTREE,
+ expression="(objectClass=organizationalUnit)",
+ attrs=[])
+ if (len(res) == 0):
+ return
+
+ for msg in sorted(res, key=attrgetter('dn')):
+ if not full_dn:
+ domain_dn = ldb.Dn(samdb, samdb.domain_dn())
+ msg.dn.remove_base_components(len(domain_dn))
+ self.outf.write("%s\n" % str(msg.dn))
+
+
+class cmd_delete(Command):
+ """Delete an organizational unit.
+
+ The name of the organizational unit can be specified as a full DN
+ or without the domainDN component.
+
+ Examples:
+ samba-tool ou delete 'OU=OrgUnit,DC=samdom,DC=example,DC=com'
+ samba-tool ou delete 'OU=OrgUnit'
+
+ The examples show how an administrator would delete the ou 'OrgUnit'.
+ """
+
+ synopsis = "%prog <ou_dn> [options]"
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server",
+ type=str, metavar="URL", dest="H"),
+ Option("--force-subtree-delete", dest="force_subtree_delete",
+ default=False, action='store_true',
+ help="Delete organizational unit and all children recursively"),
+ ]
+
+ takes_args = ["ou_dn"]
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ def run(self, ou_dn, credopts=None, sambaopts=None, versionopts=None,
+ H=None, force_subtree_delete=False):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp, fallback_machine=True)
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+
+ try:
+ full_ou_dn = samdb.normalize_dn_in_domain(ou_dn)
+ except Exception as e:
+ raise CommandError('Invalid ou_dn "%s": %s' % (ou_dn, e))
+
+ controls = []
+ if force_subtree_delete:
+ controls = ["tree_delete:1"]
+
+ try:
+ res = samdb.search(base=full_ou_dn,
+ expression="(objectclass=organizationalUnit)",
+ scope=ldb.SCOPE_BASE, attrs=[])
+ if len(res) == 0:
+ self.outf.write('Unable to find ou "%s"\n' % ou_dn)
+ return
+ samdb.delete(full_ou_dn, controls)
+ except Exception as e:
+ raise CommandError('Failed to delete ou "%s"' % full_ou_dn, e)
+
+ self.outf.write('Deleted ou "%s"\n' % full_ou_dn)
+
+
+class cmd_ou(SuperCommand):
+ """Organizational Units (OU) management."""
+
+ subcommands = {}
+ subcommands["add"] = cmd_add()
+ subcommands["create"] = cmd_add()
+ subcommands["delete"] = cmd_delete()
+ subcommands["move"] = cmd_move()
+ subcommands["rename"] = cmd_rename()
+ subcommands["list"] = cmd_list()
+ subcommands["listobjects"] = cmd_listobjects()
diff --git a/python/samba/netcmd/processes.py b/python/samba/netcmd/processes.py
new file mode 100644
index 0000000..12a05a6
--- /dev/null
+++ b/python/samba/netcmd/processes.py
@@ -0,0 +1,142 @@
+# Unix SMB/CIFS implementation.
+# List processes (to aid debugging on systems without setproctitle)
+# Copyright (C) 2010-2011 Jelmer Vernooij <jelmer@samba.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Testbed for loadparm.c/params.c
+#
+# This module simply loads a specified configuration file and
+# if successful, dumps it's contents to stdout. Note that the
+# operation is performed with DEBUGLEVEL at 3.
+#
+# Useful for a quick 'syntax check' of a configuration file.
+#
+
+import samba
+import samba.getopt as options
+from samba.netcmd import Command, CommandError, Option
+from samba.messaging import Messaging
+
+
+class cmd_processes(Command):
+ """List processes (to aid debugging on systems without setproctitle)."""
+
+ synopsis = "%prog [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions
+ }
+
+ takes_options = [
+ Option("--name", type=str,
+ help="Return only processes associated with one particular name"),
+ Option("--pid", type=int,
+ help="Return only names associated with one particular PID"),
+ ]
+
+ takes_args = []
+
+ #
+ # Get details of the samba services currently registered in irpc
+ # The prefork process model registers names in the form:
+ # prefork-master-<service> and prefork-worker-<service>-<instance>
+ #
+ # To allow this routine to identify pre-fork master and worker process
+ #
+ # returns a tuple (filtered, masters, workers)
+ #
+ # filtered - is a list of services with the prefork-* removed
+ # masters - dictionary keyed on service name of prefork master processes
+ # workers - dictionary keyed on service name containing an ordered list
+ # of worker processes.
+ def get_service_data(self, msg_ctx):
+ services = msg_ctx.irpc_all_servers()
+ filtered = []
+ masters = {}
+ workers = {}
+ for service in services:
+ if service.name.startswith("prefork-master"):
+ ns = service.name.split("-")
+ name = ns[2] + "_server"
+ masters[name] = service.ids[0].pid
+ elif service.name.startswith("prefork-worker"):
+ ns = service.name.split("-")
+ name = ns[2] + "_server"
+ instance = int(ns[3])
+ pid = service.ids[0].pid
+ if name not in workers:
+ workers[name] = {}
+ workers[name][instance] = (instance, pid)
+ else:
+ filtered.append(service)
+ return (filtered, masters, workers)
+
+ def run(self, sambaopts, versionopts, section_name=None,
+ name=None, pid=None):
+
+ lp = sambaopts.get_loadparm()
+ logger = self.get_logger("processes")
+
+ msg_ctx = Messaging()
+
+ if name is not None:
+ try:
+ ids = msg_ctx.irpc_servers_byname(name)
+ except KeyError:
+ ids = []
+
+ for server_id in ids:
+ self.outf.write("%d\n" % server_id.pid)
+ elif pid is not None:
+ names = msg_ctx.irpc_all_servers()
+ for name in names:
+ for server_id in name.ids:
+ if server_id.pid == int(pid):
+ self.outf.write("%s\n" % name.name)
+ else:
+ seen = {} # Service entries already printed, service names can
+ # be registered multiple times against a process
+ # but we should only display them once.
+ prefork = {} # Services running in the prefork process model
+ # want to ensure that the master process and workers
+ # are grouped to together.
+ (services, masters, workers) = self.get_service_data(msg_ctx)
+ self.outf.write(" Service: PID\n")
+ self.outf.write("--------------------------------------\n")
+
+ for service in sorted(services, key=lambda x: x.name):
+ if service.name in masters:
+ # If this service is running in a pre-forked process we
+ # want to print the master process followed by all the
+ # worker processes
+ pid = masters[service.name]
+ if pid not in prefork:
+ prefork[pid] = True
+ self.outf.write("%-26s %6d\n" %
+ (service.name, pid))
+ if service.name in workers:
+ ws = workers[service.name]
+ for w in ws:
+ (instance, pid) = ws[w]
+ sn = "{0}(worker {1})".format(
+ service.name, instance)
+ self.outf.write("%-26s %6d\n" % (sn, pid))
+ else:
+ for server_id in service.ids:
+ if (service.name, server_id.pid) not in seen:
+ self.outf.write("%-26s %6d\n"
+ % (service.name, server_id.pid))
+ seen[(service.name, server_id.pid)] = True
diff --git a/python/samba/netcmd/pso.py b/python/samba/netcmd/pso.py
new file mode 100644
index 0000000..d260e3b
--- /dev/null
+++ b/python/samba/netcmd/pso.py
@@ -0,0 +1,794 @@
+# Manages Password Settings Objects
+#
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2018
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+import samba.getopt as options
+import ldb
+from samba.samdb import SamDB
+from samba.netcmd import (Command, CommandError, Option, SuperCommand)
+from samba.dcerpc.samr import (DOMAIN_PASSWORD_COMPLEX,
+ DOMAIN_PASSWORD_STORE_CLEARTEXT)
+from samba.auth import system_session
+from samba.netcmd.common import (NEVER_TIMESTAMP,
+ timestamp_to_mins,
+ timestamp_to_days)
+
+
+def pso_container(samdb):
+ return "CN=Password Settings Container,CN=System,%s" % samdb.domain_dn()
+
+
+def mins_to_timestamp(mins):
+ """Converts a value in minutes to -100 nanosecond units"""
+ timestamp = -int((1e7) * 60 * mins)
+ return str(timestamp)
+
+
+def days_to_timestamp(days):
+ """Converts a value in days to -100 nanosecond units"""
+ timestamp = mins_to_timestamp(days * 60 * 24)
+ return str(timestamp)
+
+
+def show_pso_by_dn(outf, samdb, dn, show_applies_to=True):
+ """Displays the password settings for a PSO specified by DN"""
+
+ # map from the boolean LDB value to the CLI string the user sees
+ on_off_str = {"TRUE": "on", "FALSE": "off"}
+
+ pso_attrs = ['name', 'msDS-PasswordSettingsPrecedence',
+ 'msDS-PasswordReversibleEncryptionEnabled',
+ 'msDS-PasswordHistoryLength', 'msDS-MinimumPasswordLength',
+ 'msDS-PasswordComplexityEnabled', 'msDS-MinimumPasswordAge',
+ 'msDS-MaximumPasswordAge', 'msDS-LockoutObservationWindow',
+ 'msDS-LockoutThreshold', 'msDS-LockoutDuration',
+ 'msDS-PSOAppliesTo']
+
+ res = samdb.search(dn, scope=ldb.SCOPE_BASE, attrs=pso_attrs)
+ pso_res = res[0]
+ outf.write("Password information for PSO '%s'\n" % pso_res['name'])
+ outf.write("\n")
+
+ outf.write("Precedence (lowest is best): %s\n" %
+ pso_res['msDS-PasswordSettingsPrecedence'])
+ bool_str = str(pso_res['msDS-PasswordComplexityEnabled'])
+ outf.write("Password complexity: %s\n" % on_off_str[bool_str])
+ bool_str = str(pso_res['msDS-PasswordReversibleEncryptionEnabled'])
+ outf.write("Store plaintext passwords: %s\n" % on_off_str[bool_str])
+ outf.write("Password history length: %s\n" %
+ pso_res['msDS-PasswordHistoryLength'])
+ outf.write("Minimum password length: %s\n" %
+ pso_res['msDS-MinimumPasswordLength'])
+ outf.write("Minimum password age (days): %d\n" %
+ timestamp_to_days(pso_res['msDS-MinimumPasswordAge'][0]))
+ outf.write("Maximum password age (days): %d\n" %
+ timestamp_to_days(pso_res['msDS-MaximumPasswordAge'][0]))
+ outf.write("Account lockout duration (mins): %d\n" %
+ timestamp_to_mins(pso_res['msDS-LockoutDuration'][0]))
+ outf.write("Account lockout threshold (attempts): %s\n" %
+ pso_res['msDS-LockoutThreshold'])
+ outf.write("Reset account lockout after (mins): %d\n" %
+ timestamp_to_mins(pso_res['msDS-LockoutObservationWindow'][0]))
+
+ if show_applies_to:
+ if 'msDS-PSOAppliesTo' in pso_res:
+ outf.write("\nPSO applies directly to %d groups/users:\n" %
+ len(pso_res['msDS-PSOAppliesTo']))
+ for dn in pso_res['msDS-PSOAppliesTo']:
+ outf.write(" %s\n" % dn)
+ else:
+ outf.write("\nNote: PSO does not apply to any users or groups.\n")
+
+
+def check_pso_valid(samdb, pso_dn, name):
+ """Gracefully bail out if we can't view/modify the PSO specified"""
+ # the base scope search for the PSO throws an error if it doesn't exist
+ try:
+ res = samdb.search(pso_dn, scope=ldb.SCOPE_BASE,
+ attrs=['msDS-PasswordSettingsPrecedence'])
+ except ldb.LdbError as e:
+ if e.args[0] == ldb.ERR_NO_SUCH_OBJECT:
+ raise CommandError("Unable to find PSO '%s'" % name)
+ raise
+
+ # users need admin permission to modify/view a PSO. In this case, the
+ # search succeeds, but it doesn't return any attributes
+ if 'msDS-PasswordSettingsPrecedence' not in res[0]:
+ raise CommandError("You may not have permission to view/modify PSOs")
+
+
+def show_pso_for_user(outf, samdb, username):
+ """Displays the password settings for a specific user"""
+
+ search_filter = "(&(sAMAccountName=%s)(objectClass=user))" % username
+
+ res = samdb.search(samdb.domain_dn(), scope=ldb.SCOPE_SUBTREE,
+ expression=search_filter,
+ attrs=['msDS-ResultantPSO', 'msDS-PSOApplied'])
+
+ if len(res) == 0:
+ outf.write("User '%s' not found.\n" % username)
+ elif 'msDS-ResultantPSO' not in res[0]:
+ outf.write("No PSO applies to user '%s'. "
+ "The default domain settings apply.\n" % username)
+ outf.write("Refer to 'samba-tool domain passwordsettings show'.\n")
+ else:
+ # sanity-check user has permissions to view PSO details (non-admin
+ # users can view msDS-ResultantPSO, but not the actual PSO details)
+ check_pso_valid(samdb, res[0]['msDS-ResultantPSO'][0], "???")
+ outf.write("The following PSO settings apply to user '%s'.\n\n" %
+ username)
+ show_pso_by_dn(outf, samdb, res[0]['msDS-ResultantPSO'][0],
+ show_applies_to=False)
+ # PSOs that apply directly to a user don't necessarily have the best
+ # precedence, which could be a little confusing for PSO management
+ if 'msDS-PSOApplied' in res[0]:
+ outf.write("\nNote: PSO applies directly to user "
+ "(any group PSOs are overridden)\n")
+ else:
+ outf.write("\nPSO applies to user via group membership.\n")
+
+
+def msg_add_attr(msg, attr_name, value, ldb_oper):
+ msg[attr_name] = ldb.MessageElement(value, ldb_oper, attr_name)
+
+
+def make_pso_ldb_msg(outf, samdb, pso_dn, create, lockout_threshold=None,
+ complexity=None, precedence=None, store_plaintext=None,
+ history_length=None, min_pwd_length=None,
+ min_pwd_age=None, max_pwd_age=None, lockout_duration=None,
+ reset_lockout_after=None):
+ """Packs the given PSO settings into an LDB message"""
+
+ m = ldb.Message()
+ m.dn = ldb.Dn(samdb, pso_dn)
+
+ if create:
+ ldb_oper = ldb.FLAG_MOD_ADD
+ m["msDS-objectClass"] = ldb.MessageElement("msDS-PasswordSettings",
+ ldb_oper, "objectClass")
+ else:
+ ldb_oper = ldb.FLAG_MOD_REPLACE
+
+ if precedence is not None:
+ msg_add_attr(m, "msDS-PasswordSettingsPrecedence", str(precedence),
+ ldb_oper)
+
+ if complexity is not None:
+ bool_str = "TRUE" if complexity == "on" else "FALSE"
+ msg_add_attr(m, "msDS-PasswordComplexityEnabled", bool_str, ldb_oper)
+
+ if store_plaintext is not None:
+ bool_str = "TRUE" if store_plaintext == "on" else "FALSE"
+ msg_add_attr(m, "msDS-PasswordReversibleEncryptionEnabled",
+ bool_str, ldb_oper)
+
+ if history_length is not None:
+ msg_add_attr(m, "msDS-PasswordHistoryLength", str(history_length),
+ ldb_oper)
+
+ if min_pwd_length is not None:
+ msg_add_attr(m, "msDS-MinimumPasswordLength", str(min_pwd_length),
+ ldb_oper)
+
+ if min_pwd_age is not None:
+ min_pwd_age_ticks = days_to_timestamp(min_pwd_age)
+ msg_add_attr(m, "msDS-MinimumPasswordAge", min_pwd_age_ticks,
+ ldb_oper)
+
+ if max_pwd_age is not None:
+ # Windows won't let you set max-pwd-age to zero. Here we take zero to
+ # mean 'never expire' and use the timestamp corresponding to 'never'
+ if max_pwd_age == 0:
+ max_pwd_age_ticks = str(NEVER_TIMESTAMP)
+ else:
+ max_pwd_age_ticks = days_to_timestamp(max_pwd_age)
+ msg_add_attr(m, "msDS-MaximumPasswordAge", max_pwd_age_ticks, ldb_oper)
+
+ if lockout_duration is not None:
+ lockout_duration_ticks = mins_to_timestamp(lockout_duration)
+ msg_add_attr(m, "msDS-LockoutDuration", lockout_duration_ticks,
+ ldb_oper)
+
+ if lockout_threshold is not None:
+ msg_add_attr(m, "msDS-LockoutThreshold", str(lockout_threshold),
+ ldb_oper)
+
+ if reset_lockout_after is not None:
+ msg_add_attr(m, "msDS-LockoutObservationWindow",
+ mins_to_timestamp(reset_lockout_after), ldb_oper)
+
+ return m
+
+
+def check_pso_constraints(min_pwd_length=None, history_length=None,
+ min_pwd_age=None, max_pwd_age=None):
+ """Checks PSO settings fall within valid ranges"""
+
+ # check values as per section 3.1.1.5.2.2 Constraints in MS-ADTS spec
+ if history_length is not None and history_length > 1024:
+ raise CommandError("Bad password history length: "
+ "valid range is 0 to 1024")
+
+ if min_pwd_length is not None and min_pwd_length > 255:
+ raise CommandError("Bad minimum password length: "
+ "valid range is 0 to 255")
+
+ if min_pwd_age is not None and max_pwd_age is not None:
+ # note max-age=zero is a special case meaning 'never expire'
+ if min_pwd_age >= max_pwd_age and max_pwd_age != 0:
+ raise CommandError("Minimum password age must be less than "
+ "maximum age")
+
+
+# the same args are used for both create and set commands
+pwd_settings_options = [
+ Option("--complexity", type="choice", choices=["on", "off"],
+ help="The password complexity (on | off)."),
+ Option("--store-plaintext", type="choice", choices=["on", "off"],
+ help="Store plaintext passwords where account have "
+ "'store passwords with reversible encryption' set (on | off)."),
+ Option("--history-length",
+ help="The password history length (<integer>).", type=int),
+ Option("--min-pwd-length",
+ help="The minimum password length (<integer>).", type=int),
+ Option("--min-pwd-age",
+ help=("The minimum password age (<integer in days>). "
+ "Default is domain setting."), type=int),
+ Option("--max-pwd-age",
+ help=("The maximum password age (<integer in days>). "
+ "Default is domain setting."), type=int),
+ Option("--account-lockout-duration", type=int,
+ help=("The length of time an account is locked out after exceeding "
+ "the limit on bad password attempts (<integer in mins>). "
+ "Default is domain setting")),
+ Option("--account-lockout-threshold", type=int,
+ help=("The number of bad password attempts allowed before locking "
+ "out the account (<integer>). Default is domain setting.")),
+ Option("--reset-account-lockout-after",
+ help=("After this time is elapsed, the recorded number of attempts "
+ "restarts from zero (<integer in mins>). "
+ "Default is domain setting."), type=int)]
+
+
+def num_options_in_args(options, args):
+ """
+ Returns the number of options specified that are present in the args.
+ (There can be other args besides just the ones we're interested in, which
+ is why argc on its own is not enough)
+ """
+ num_opts = 0
+ for opt in options:
+ for arg in args:
+ # The option should be a sub-string of the CLI argument for a match
+ if str(opt) in arg:
+ num_opts += 1
+ return num_opts
+
+
+class cmd_domain_pwdsettings_pso_create(Command):
+ """Creates a new Password Settings Object (PSO).
+
+ PSOs are a way to tailor different password settings (lockout policy,
+ minimum password length, etc) for specific users or groups.
+
+ The psoname is a unique name for the new Password Settings Object.
+ When multiple PSOs apply to a user, the precedence determines which PSO
+ will take effect. The PSO with the lowest precedence will take effect.
+
+ For most arguments, the default value (if unspecified) is the current
+ domain passwordsettings value. To see these values, enter the command
+ 'samba-tool domain passwordsettings show'.
+
+ To apply the new PSO to user(s) or group(s), enter the command
+ 'samba-tool domain passwordsettings pso apply'.
+ """
+
+ synopsis = "%prog <psoname> <precedence> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = pwd_settings_options + [
+ Option("-H", "--URL", help="LDB URL for database or target server",
+ metavar="URL", dest="H", type=str)
+ ]
+ takes_args = ["psoname", "precedence"]
+
+ def run(self, psoname, precedence, H=None, min_pwd_age=None,
+ max_pwd_age=None, complexity=None, store_plaintext=None,
+ history_length=None, min_pwd_length=None,
+ account_lockout_duration=None, account_lockout_threshold=None,
+ reset_account_lockout_after=None, credopts=None, sambaopts=None,
+ versionopts=None):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+
+ try:
+ precedence = int(precedence)
+ except ValueError:
+ raise CommandError("The PSO's precedence should be "
+ "a numerical value. Try --help")
+
+ # sanity-check that the PSO doesn't already exist
+ pso_dn = "CN=%s,%s" % (psoname, pso_container(samdb))
+ try:
+ res = samdb.search(pso_dn, scope=ldb.SCOPE_BASE)
+ except ldb.LdbError as e:
+ if e.args[0] == ldb.ERR_NO_SUCH_OBJECT:
+ pass
+ else:
+ raise
+ else:
+ raise CommandError("PSO '%s' already exists" % psoname)
+
+ # we expect the user to specify at least one password-policy setting,
+ # otherwise there's no point in creating a PSO
+ num_pwd_args = num_options_in_args(pwd_settings_options, self.raw_argv)
+ if num_pwd_args == 0:
+ raise CommandError("Please specify at least one password policy "
+ "setting. Try --help")
+
+ # it's unlikely that the user will specify all 9 password policy
+ # settings on the CLI - current domain password-settings as the default
+ # values for unspecified arguments
+ if num_pwd_args < len(pwd_settings_options):
+ self.message("Not all password policy options "
+ "have been specified.")
+ self.message("For unspecified options, the current domain password"
+ " settings will be used as the default values.")
+
+ # lookup the current domain password-settings
+ res = samdb.search(samdb.domain_dn(), scope=ldb.SCOPE_BASE,
+ attrs=["pwdProperties", "pwdHistoryLength", "minPwdLength",
+ "minPwdAge", "maxPwdAge", "lockoutDuration",
+ "lockoutThreshold", "lockOutObservationWindow"])
+ assert(len(res) == 1)
+
+ # use the domain settings for any missing arguments
+ pwd_props = int(res[0]["pwdProperties"][0])
+ if complexity is None:
+ prop_flag = DOMAIN_PASSWORD_COMPLEX
+ complexity = "on" if pwd_props & prop_flag else "off"
+
+ if store_plaintext is None:
+ prop_flag = DOMAIN_PASSWORD_STORE_CLEARTEXT
+ store_plaintext = "on" if pwd_props & prop_flag else "off"
+
+ if history_length is None:
+ history_length = int(res[0]["pwdHistoryLength"][0])
+
+ if min_pwd_length is None:
+ min_pwd_length = int(res[0]["minPwdLength"][0])
+
+ if min_pwd_age is None:
+ min_pwd_age = timestamp_to_days(res[0]["minPwdAge"][0])
+
+ if max_pwd_age is None:
+ max_pwd_age = timestamp_to_days(res[0]["maxPwdAge"][0])
+
+ if account_lockout_duration is None:
+ account_lockout_duration = \
+ timestamp_to_mins(res[0]["lockoutDuration"][0])
+
+ if account_lockout_threshold is None:
+ account_lockout_threshold = int(res[0]["lockoutThreshold"][0])
+
+ if reset_account_lockout_after is None:
+ reset_account_lockout_after = \
+ timestamp_to_mins(res[0]["lockOutObservationWindow"][0])
+
+ check_pso_constraints(max_pwd_age=max_pwd_age, min_pwd_age=min_pwd_age,
+ history_length=history_length,
+ min_pwd_length=min_pwd_length)
+
+ # pack the settings into an LDB message
+ m = make_pso_ldb_msg(self.outf, samdb, pso_dn, create=True,
+ complexity=complexity, precedence=precedence,
+ store_plaintext=store_plaintext,
+ history_length=history_length,
+ min_pwd_length=min_pwd_length,
+ min_pwd_age=min_pwd_age, max_pwd_age=max_pwd_age,
+ lockout_duration=account_lockout_duration,
+ lockout_threshold=account_lockout_threshold,
+ reset_lockout_after=reset_account_lockout_after)
+
+ # create the new PSO
+ try:
+ samdb.add(m)
+ self.message("PSO successfully created: %s" % pso_dn)
+ # display the new PSO's settings
+ show_pso_by_dn(self.outf, samdb, pso_dn, show_applies_to=False)
+ except ldb.LdbError as e:
+ (num, msg) = e.args
+ if num == ldb.ERR_INSUFFICIENT_ACCESS_RIGHTS:
+ raise CommandError("Administrator permissions are needed "
+ "to create a PSO.")
+ else:
+ raise CommandError("Failed to create PSO '%s': %s" % (pso_dn,
+ msg))
+
+
+class cmd_domain_pwdsettings_pso_set(Command):
+ """Modifies a Password Settings Object (PSO)."""
+
+ synopsis = "%prog <psoname> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = pwd_settings_options + [
+ Option("--precedence", type=int,
+ help=("This PSO's precedence relative to other PSOs. "
+ "Lower precedence is better (<integer>).")),
+ Option("-H", "--URL", help="LDB URL for database or target server",
+ type=str, metavar="URL", dest="H"),
+ ]
+ takes_args = ["psoname"]
+
+ def run(self, psoname, H=None, precedence=None, min_pwd_age=None,
+ max_pwd_age=None, complexity=None, store_plaintext=None,
+ history_length=None, min_pwd_length=None,
+ account_lockout_duration=None, account_lockout_threshold=None,
+ reset_account_lockout_after=None, credopts=None, sambaopts=None,
+ versionopts=None):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+
+ # sanity-check the PSO exists
+ pso_dn = "CN=%s,%s" % (psoname, pso_container(samdb))
+ check_pso_valid(samdb, pso_dn, psoname)
+
+ # we expect the user to specify at least one password-policy setting
+ num_pwd_args = num_options_in_args(pwd_settings_options, self.raw_argv)
+ if num_pwd_args == 0 and precedence is None:
+ raise CommandError("Please specify at least one password policy "
+ "setting. Try --help")
+
+ if min_pwd_age is not None or max_pwd_age is not None:
+ # if we're modifying either the max or min pwd-age, check the max
+ # is always larger. We may have to fetch the PSO's setting to
+ # verify this
+ res = samdb.search(pso_dn, scope=ldb.SCOPE_BASE,
+ attrs=['msDS-MinimumPasswordAge',
+ 'msDS-MaximumPasswordAge'])
+ if min_pwd_age is None:
+ min_pwd_ticks = res[0]['msDS-MinimumPasswordAge'][0]
+ min_pwd_age = timestamp_to_days(min_pwd_ticks)
+
+ if max_pwd_age is None:
+ max_pwd_ticks = res[0]['msDS-MaximumPasswordAge'][0]
+ max_pwd_age = timestamp_to_days(max_pwd_ticks)
+
+ check_pso_constraints(max_pwd_age=max_pwd_age, min_pwd_age=min_pwd_age,
+ history_length=history_length,
+ min_pwd_length=min_pwd_length)
+
+ # pack the settings into an LDB message
+ m = make_pso_ldb_msg(self.outf, samdb, pso_dn, create=False,
+ complexity=complexity, precedence=precedence,
+ store_plaintext=store_plaintext,
+ history_length=history_length,
+ min_pwd_length=min_pwd_length,
+ min_pwd_age=min_pwd_age, max_pwd_age=max_pwd_age,
+ lockout_duration=account_lockout_duration,
+ lockout_threshold=account_lockout_threshold,
+ reset_lockout_after=reset_account_lockout_after)
+
+ # update the PSO
+ try:
+ samdb.modify(m)
+ self.message("Successfully updated PSO: %s" % pso_dn)
+ # display the new PSO's settings
+ show_pso_by_dn(self.outf, samdb, pso_dn, show_applies_to=False)
+ except ldb.LdbError as e:
+ (num, msg) = e.args
+ raise CommandError("Failed to update PSO '%s': %s" % (pso_dn, msg))
+
+
+class cmd_domain_pwdsettings_pso_delete(Command):
+ """Deletes a Password Settings Object (PSO)."""
+
+ synopsis = "%prog <psoname> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server",
+ metavar="URL", dest="H", type=str)
+ ]
+ takes_args = ["psoname"]
+
+ def run(self, psoname, H=None, credopts=None, sambaopts=None,
+ versionopts=None):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+
+ pso_dn = "CN=%s,%s" % (psoname, pso_container(samdb))
+ # sanity-check the PSO exists
+ check_pso_valid(samdb, pso_dn, psoname)
+
+ samdb.delete(pso_dn)
+ self.message("Deleted PSO %s" % psoname)
+
+
+def pso_key(a):
+ a_precedence = int(a['msDS-PasswordSettingsPrecedence'][0])
+ return a_precedence
+
+
+class cmd_domain_pwdsettings_pso_list(Command):
+ """Lists all Password Settings Objects (PSOs)."""
+
+ synopsis = "%prog [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server",
+ metavar="URL", dest="H", type=str)
+ ]
+
+ def run(self, H=None, credopts=None, sambaopts=None, versionopts=None):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+
+ res = samdb.search(pso_container(samdb), scope=ldb.SCOPE_SUBTREE,
+ attrs=['name', 'msDS-PasswordSettingsPrecedence'],
+ expression="(objectClass=msDS-PasswordSettings)")
+
+ # an unprivileged search against Windows returns nothing here. On Samba
+ # we get the PSO names, but not their attributes
+ if len(res) == 0 or 'msDS-PasswordSettingsPrecedence' not in res[0]:
+ self.outf.write("No PSOs are present, or you don't have permission"
+ " to view them.\n")
+ return
+
+ # sort the PSOs so they're displayed in order of precedence
+ pso_list = sorted(res, key=pso_key)
+
+ self.outf.write("Precedence | PSO name\n")
+ self.outf.write("--------------------------------------------------\n")
+
+ for pso in pso_list:
+ precedence = pso['msDS-PasswordSettingsPrecedence']
+ self.outf.write("%-10s | %s\n" % (precedence, pso['name']))
+
+
+class cmd_domain_pwdsettings_pso_show(Command):
+ """Display a Password Settings Object's details."""
+
+ synopsis = "%prog <psoname> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server",
+ metavar="URL", dest="H", type=str)
+ ]
+ takes_args = ["psoname"]
+
+ def run(self, psoname, H=None, credopts=None, sambaopts=None,
+ versionopts=None):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+
+ pso_dn = "CN=%s,%s" % (psoname, pso_container(samdb))
+ check_pso_valid(samdb, pso_dn, psoname)
+ show_pso_by_dn(self.outf, samdb, pso_dn)
+
+
+class cmd_domain_pwdsettings_pso_show_user(Command):
+ """Displays the Password Settings that apply to a user."""
+
+ synopsis = "%prog <username> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server",
+ metavar="URL", dest="H", type=str)
+ ]
+ takes_args = ["username"]
+
+ def run(self, username, H=None, credopts=None, sambaopts=None,
+ versionopts=None):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+
+ show_pso_for_user(self.outf, samdb, username)
+
+
+class cmd_domain_pwdsettings_pso_apply(Command):
+ """Applies a PSO's password policy to a user or group.
+
+ When a PSO is applied to a group, it will apply to all users (and groups)
+ that are members of that group. If a PSO applies directly to a user, it
+ will override any group membership PSOs for that user.
+
+ When multiple PSOs apply to a user, either directly or through group
+ membership, the PSO with the lowest precedence will take effect.
+ """
+
+ synopsis = "%prog <psoname> <user-or-group-name> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server",
+ metavar="URL", dest="H", type=str)
+ ]
+ takes_args = ["psoname", "user_or_group"]
+
+ def run(self, psoname, user_or_group, H=None, credopts=None,
+ sambaopts=None, versionopts=None):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+
+ pso_dn = "CN=%s,%s" % (psoname, pso_container(samdb))
+ # sanity-check the PSO exists
+ check_pso_valid(samdb, pso_dn, psoname)
+
+ # lookup the user/group by account-name to gets its DN
+ search_filter = "(sAMAccountName=%s)" % user_or_group
+ res = samdb.search(samdb.domain_dn(), scope=ldb.SCOPE_SUBTREE,
+ expression=search_filter)
+
+ if len(res) == 0:
+ raise CommandError("The specified user or group '%s' was not found"
+ % user_or_group)
+
+ # modify the PSO to apply to the user/group specified
+ target_dn = str(res[0].dn)
+ m = ldb.Message()
+ m.dn = ldb.Dn(samdb, pso_dn)
+ m["msDS-PSOAppliesTo"] = ldb.MessageElement(target_dn,
+ ldb.FLAG_MOD_ADD,
+ "msDS-PSOAppliesTo")
+ try:
+ samdb.modify(m)
+ except ldb.LdbError as e:
+ (num, msg) = e.args
+ # most likely error - PSO already applies to that user/group
+ if num == ldb.ERR_ATTRIBUTE_OR_VALUE_EXISTS:
+ raise CommandError("PSO '%s' already applies to '%s'"
+ % (psoname, user_or_group))
+ else:
+ raise CommandError("Failed to update PSO '%s': %s" % (psoname,
+ msg))
+
+ self.message("PSO '%s' applied to '%s'" % (psoname, user_or_group))
+
+
+class cmd_domain_pwdsettings_pso_unapply(Command):
+ """Updates a PSO to no longer apply to a user or group."""
+
+ synopsis = "%prog <psoname> <user-or-group-name> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server",
+ metavar="URL", dest="H", type=str),
+ ]
+ takes_args = ["psoname", "user_or_group"]
+
+ def run(self, psoname, user_or_group, H=None, credopts=None,
+ sambaopts=None, versionopts=None):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+
+ pso_dn = "CN=%s,%s" % (psoname, pso_container(samdb))
+ # sanity-check the PSO exists
+ check_pso_valid(samdb, pso_dn, psoname)
+
+ # lookup the user/group by account-name to gets its DN
+ search_filter = "(sAMAccountName=%s)" % user_or_group
+ res = samdb.search(samdb.domain_dn(), scope=ldb.SCOPE_SUBTREE,
+ expression=search_filter)
+
+ if len(res) == 0:
+ raise CommandError("The specified user or group '%s' was not found"
+ % user_or_group)
+
+ # modify the PSO to apply to the user/group specified
+ target_dn = str(res[0].dn)
+ m = ldb.Message()
+ m.dn = ldb.Dn(samdb, pso_dn)
+ m["msDS-PSOAppliesTo"] = ldb.MessageElement(target_dn,
+ ldb.FLAG_MOD_DELETE,
+ "msDS-PSOAppliesTo")
+ try:
+ samdb.modify(m)
+ except ldb.LdbError as e:
+ (num, msg) = e.args
+ # most likely error - PSO doesn't apply to that user/group
+ if num == ldb.ERR_NO_SUCH_ATTRIBUTE:
+ raise CommandError("PSO '%s' doesn't apply to '%s'"
+ % (psoname, user_or_group))
+ else:
+ raise CommandError("Failed to update PSO '%s': %s" % (psoname,
+ msg))
+ self.message("PSO '%s' no longer applies to '%s'" % (psoname,
+ user_or_group))
+
+
+class cmd_domain_passwordsettings_pso(SuperCommand):
+ """Manage fine-grained Password Settings Objects (PSOs)."""
+
+ subcommands = {}
+ subcommands["apply"] = cmd_domain_pwdsettings_pso_apply()
+ subcommands["create"] = cmd_domain_pwdsettings_pso_create()
+ subcommands["delete"] = cmd_domain_pwdsettings_pso_delete()
+ subcommands["list"] = cmd_domain_pwdsettings_pso_list()
+ subcommands["set"] = cmd_domain_pwdsettings_pso_set()
+ subcommands["show"] = cmd_domain_pwdsettings_pso_show()
+ subcommands["show-user"] = cmd_domain_pwdsettings_pso_show_user()
+ subcommands["unapply"] = cmd_domain_pwdsettings_pso_unapply()
diff --git a/python/samba/netcmd/rodc.py b/python/samba/netcmd/rodc.py
new file mode 100644
index 0000000..08a1415
--- /dev/null
+++ b/python/samba/netcmd/rodc.py
@@ -0,0 +1,163 @@
+# rodc related commands
+#
+# Copyright Andrew Tridgell 2010
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from samba.netcmd import Command, CommandError, Option, SuperCommand
+import samba.getopt as options
+from samba.samdb import SamDB
+from samba.auth import system_session
+import ldb
+from samba.dcerpc import misc, drsuapi
+from samba.drs_utils import drs_Replicate
+import sys
+
+
+class RODCException(Exception):
+ def __init__(self, value):
+ self.value = value
+
+ def __str__(self):
+ return "%s: %s" % (self.__class__.__name__, self.value)
+
+
+class NamingError(RODCException):
+ pass
+
+
+class ReplicationError(RODCException):
+ pass
+
+
+class cmd_rodc_preload(Command):
+ """Preload accounts for an RODC. Multiple accounts may be requested."""
+
+ synopsis = "%prog (<SID>|<DN>|<accountname>)+ ... [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("--server", help="DC to use", type=str),
+ Option("--file", help="Read account list from a file, or - for stdin (one per line)", type=str),
+ Option("--ignore-errors", help="When preloading multiple accounts, skip any failing accounts", action="store_true"),
+ ]
+
+ takes_args = ["account*"]
+
+ def get_dn(self, samdb, account):
+ """work out what DN they meant"""
+
+ # we accept the account in SID, accountname or DN form
+ if account[0:2] == 'S-':
+ res = samdb.search(base="<SID=%s>" % account,
+ expression="objectclass=user",
+ scope=ldb.SCOPE_BASE, attrs=[])
+ elif account.find('=') >= 0:
+ res = samdb.search(base=account,
+ expression="objectclass=user",
+ scope=ldb.SCOPE_BASE, attrs=[])
+ else:
+ res = samdb.search(expression="(&(samAccountName=%s)(objectclass=user))" % ldb.binary_encode(account),
+ scope=ldb.SCOPE_SUBTREE, attrs=[])
+ if len(res) != 1:
+ raise NamingError("Failed to find account '%s'" % account)
+ return str(res[0]["dn"])
+
+ def run(self, *accounts, **kwargs):
+ sambaopts = kwargs.get("sambaopts")
+ credopts = kwargs.get("credopts")
+ server = kwargs.get("server")
+ accounts_file = kwargs.get("file")
+ ignore_errors = kwargs.get("ignore_errors")
+
+ if server is None:
+ raise Exception("You must supply a server")
+
+ if accounts_file is not None:
+ accounts = []
+ if accounts_file == "-":
+ for line in sys.stdin:
+ accounts.append(line.strip())
+ else:
+ for line in open(accounts_file, 'r'):
+ accounts.append(line.strip())
+
+ lp = sambaopts.get_loadparm()
+
+ creds = credopts.get_credentials(lp, fallback_machine=True)
+
+ # connect to the remote and local SAMs
+ samdb = SamDB(url="ldap://%s" % server,
+ session_info=system_session(),
+ credentials=creds, lp=lp)
+
+ local_samdb = SamDB(url=None, session_info=system_session(),
+ credentials=creds, lp=lp)
+
+ destination_dsa_guid = misc.GUID(local_samdb.get_ntds_GUID())
+
+ binding_options = "seal"
+ if lp.log_level() >= 9:
+ binding_options += ",print"
+ repl = drs_Replicate("ncacn_ip_tcp:%s[%s]" % (server, binding_options),
+ lp, creds,
+ local_samdb, destination_dsa_guid)
+
+ errors = []
+ for account in accounts:
+ # work out the source and destination GUIDs
+ dc_ntds_dn = samdb.get_dsServiceName()
+ res = samdb.search(base=dc_ntds_dn, scope=ldb.SCOPE_BASE, attrs=["invocationId"])
+ source_dsa_invocation_id = misc.GUID(local_samdb.schema_format_value("objectGUID", res[0]["invocationId"][0]))
+
+ try:
+ dn = self.get_dn(samdb, account)
+ except RODCException as e:
+ if not ignore_errors:
+ raise CommandError(str(e))
+ errors.append(e)
+ continue
+
+ self.outf.write("Replicating DN %s\n" % dn)
+
+ local_samdb.transaction_start()
+ try:
+ repl.replicate(dn, source_dsa_invocation_id, destination_dsa_guid,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_SECRET, rodc=True)
+ except Exception as e:
+ local_samdb.transaction_cancel()
+ if not ignore_errors:
+ raise CommandError("Error replicating DN %s" % dn)
+ errors.append(ReplicationError("Error replicating DN %s" % dn))
+ continue
+
+ local_samdb.transaction_commit()
+
+ if len(errors) > 0:
+ self.message("\nPreload encountered problematic users:")
+ for error in errors:
+ self.message(" %s" % error)
+
+
+class cmd_rodc(SuperCommand):
+ """Read-Only Domain Controller (RODC) management."""
+
+ subcommands = {}
+ subcommands["preload"] = cmd_rodc_preload()
diff --git a/python/samba/netcmd/schema.py b/python/samba/netcmd/schema.py
new file mode 100644
index 0000000..e665e83
--- /dev/null
+++ b/python/samba/netcmd/schema.py
@@ -0,0 +1,319 @@
+# Manipulate ACLs on directory objects
+#
+# Copyright (C) William Brown <william@blackhats.net.au> 2018
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import ldb
+import samba.getopt as options
+from samba.ms_schema import bitFields
+from samba.auth import system_session
+from samba.samdb import SamDB
+from samba.netcmd import (
+ Command,
+ CommandError,
+ SuperCommand,
+ Option
+)
+
+
+class cmd_schema_attribute_modify(Command):
+ """Modify attribute settings in the schema partition.
+
+ This commands allows minor modifications to attributes in the schema. Active
+ Directory does not allow many changes to schema, but important modifications
+ are related to indexing. This command overwrites the value of searchflags,
+ so be sure to view the current content before making changes.
+
+ Example1:
+ samba-tool schema attribute modify uid \\
+ --searchflags="fATTINDEX,fPRESERVEONDELETE"
+
+ This alters the uid attribute to be indexed and to be preserved when
+ converted to a tombstone.
+
+ Important search flag values are:
+
+ fATTINDEX: create an equality index for this attribute.
+ fPDNTATTINDEX: create a container index for this attribute (ie OU).
+ fANR: specify that this attribute is a member of the ambiguous name
+ resolution set.
+ fPRESERVEONDELETE: indicate that the value of this attribute should be
+ preserved when the object is converted to a tombstone (deleted).
+ fCOPY: hint to clients that this attribute should be copied.
+ fTUPLEINDEX: create a tuple index for this attribute. This is used in
+ substring queries.
+ fSUBTREEATTINDEX: create a browsing index for this attribute. VLV searches
+ require this.
+ fCONFIDENTIAL: indicate that the attribute is confidential and requires
+ special access checks.
+ fNEVERVALUEAUDIT: indicate that changes to this value should NOT be audited.
+ fRODCFILTEREDATTRIBUTE: indicate that this value should not be replicated to
+ RODCs.
+ fEXTENDEDLINKTRACKING: indicate to the DC to perform extra link tracking.
+ fBASEONLY: indicate that this attribute should only be displayed when the
+ search scope of the query is SCOPE_BASE or a single object result.
+ fPARTITIONSECRET: indicate that this attribute is a partition secret and
+ requires special access checks.
+
+ The authoritative source of this information is the MS-ADTS.
+ """
+ synopsis = "%prog attribute [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("--searchflags", help="Search Flags for the attribute", type=str),
+ Option("-H", "--URL", help="LDB URL for database or target server",
+ type=str, metavar="URL", dest="H"),
+ ]
+
+ takes_args = ["attribute"]
+
+ def run(self, attribute, H=None, credopts=None, sambaopts=None,
+ versionopts=None, searchflags=None):
+
+ if searchflags is None:
+ raise CommandError('A value to modify must be provided.')
+
+ # Parse the search flags to a set of bits to modify.
+
+ searchflags_int = None
+ if searchflags is not None:
+ searchflags_int = 0
+ flags = searchflags.split(',')
+ # We have to normalise all the values. To achieve this predictably
+ # we title case (Fattrindex), then swapcase (fATTINDEX)
+ flags = [x.capitalize().swapcase() for x in flags]
+ for flag in flags:
+ if flag not in bitFields['searchflags'].keys():
+ raise CommandError("Unknown flag '%s', please see --help" % flag)
+ bit_loc = 31 - bitFields['searchflags'][flag]
+ # Now apply the bit.
+ searchflags_int = searchflags_int | (1 << bit_loc)
+
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+
+ schema_dn = samdb.schema_dn()
+ # For now we make assumptions about the CN
+ attr_dn = 'cn=%s,%s' % (attribute, schema_dn)
+
+ m = ldb.Message()
+ m.dn = ldb.Dn(samdb, attr_dn)
+
+ if searchflags_int is not None:
+ m['searchFlags'] = ldb.MessageElement(
+ str(searchflags_int), ldb.FLAG_MOD_REPLACE, 'searchFlags')
+
+ samdb.modify(m)
+ samdb.set_schema_update_now()
+ self.outf.write("modified %s" % attr_dn)
+
+
+class cmd_schema_attribute_show(Command):
+ """Show details about an attribute from the schema.
+
+ Schema attribute definitions define and control the behaviour of directory
+ attributes on objects. This displays the details of a single attribute.
+ """
+ synopsis = "%prog attribute [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server",
+ type=str, metavar="URL", dest="H"),
+ ]
+
+ takes_args = ["attribute"]
+
+ def run(self, attribute, H=None, credopts=None, sambaopts=None, versionopts=None):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+
+ schema_dn = samdb.schema_dn()
+
+ filt = '(&(objectClass=attributeSchema)(|(lDAPDisplayName={0})(cn={0})(name={0})))'.format(attribute)
+
+ res = samdb.search(base=schema_dn, scope=ldb.SCOPE_SUBTREE,
+ expression=filt)
+
+ if len(res) == 0:
+ raise CommandError('No schema objects matched "%s"' % attribute)
+ if len(res) > 1:
+ raise CommandError('Multiple schema objects matched "%s": this is a serious issue you should report!' % attribute)
+
+ # Get the content of searchFlags (if any) and manipulate them to
+ # show our friendly names.
+
+ # WARNING: If you are reading this in the future trying to change an
+ # ldb message dynamically, and wondering why you get an operations
+ # error, it's related to talloc references.
+ #
+ # When you create *any* python reference, IE:
+ # flags = res[0]['attr']
+ # this creates a talloc_reference that may live forever due to pythons
+ # memory management model. However, when you create this reference it
+ # blocks talloc_realloc from functions in msg.add(element).
+ #
+ # As a result, you MUST avoid ALL new variable references UNTIL you have
+ # modified the message as required, even if it makes your code more
+ # verbose.
+
+ if 'searchFlags' in res[0].keys():
+ flags_i = None
+ try:
+ # See above
+ flags_i = int(str(res[0]['searchFlags']))
+ except ValueError:
+ raise CommandError('Invalid schemaFlags value "%s": this is a serious issue you should report!' % res[0]['searchFlags'])
+ # Work out what keys we have.
+ out = []
+ for flag in bitFields['searchflags'].keys():
+ if flags_i & (1 << (31 - bitFields['searchflags'][flag])) != 0:
+ out.append(flag)
+ if len(out) > 0:
+ res[0].add(ldb.MessageElement(out, ldb.FLAG_MOD_ADD, 'searchFlagsDecoded'))
+
+ user_ldif = samdb.write_ldif(res[0], ldb.CHANGETYPE_NONE)
+ self.outf.write(user_ldif)
+
+
+class cmd_schema_attribute_show_oc(Command):
+ """Show what objectclasses MAY or MUST contain an attribute.
+
+ This is useful to determine "if I need uid, what objectclasses could be
+ applied to achieve this."
+ """
+ synopsis = "%prog attribute [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server",
+ type=str, metavar="URL", dest="H"),
+ ]
+
+ takes_args = ["attribute"]
+
+ def run(self, attribute, H=None, credopts=None, sambaopts=None, versionopts=None):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+
+ schema_dn = samdb.schema_dn()
+
+ may_filt = '(&(objectClass=classSchema)' \
+ '(|(mayContain={0})(systemMayContain={0})))'.format(attribute)
+ must_filt = '(&(objectClass=classSchema)' \
+ '(|(mustContain={0})(systemMustContain={0})))'.format(attribute)
+
+ may_res = samdb.search(base=schema_dn, scope=ldb.SCOPE_SUBTREE,
+ expression=may_filt, attrs=['cn'])
+ must_res = samdb.search(base=schema_dn, scope=ldb.SCOPE_SUBTREE,
+ expression=must_filt, attrs=['cn'])
+
+ self.outf.write('--- MAY contain ---\n')
+ for msg in may_res:
+ self.outf.write('%s\n' % msg['cn'][0])
+
+ self.outf.write('--- MUST contain ---\n')
+ for msg in must_res:
+ self.outf.write('%s\n' % msg['cn'][0])
+
+
+class cmd_schema_objectclass_show(Command):
+ """Show details about an objectClass from the schema.
+
+ Schema objectClass definitions define and control the behaviour of directory
+ objects including what attributes they may contain. This displays the
+ details of an objectClass.
+ """
+ synopsis = "%prog objectclass [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server",
+ type=str, metavar="URL", dest="H"),
+ ]
+
+ takes_args = ["objectclass"]
+
+ def run(self, objectclass, H=None, credopts=None, sambaopts=None, versionopts=None):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+
+ schema_dn = samdb.schema_dn()
+
+ filt = '(&(objectClass=classSchema)' \
+ '(|(lDAPDisplayName={0})(cn={0})(name={0})))'.format(objectclass)
+
+ res = samdb.search(base=schema_dn, scope=ldb.SCOPE_SUBTREE,
+ expression=filt)
+
+ for msg in res:
+ user_ldif = samdb.write_ldif(msg, ldb.CHANGETYPE_NONE)
+ self.outf.write(user_ldif)
+
+
+class cmd_schema_attribute(SuperCommand):
+ """Query and manage attributes in the schema partition."""
+ subcommands = {}
+ subcommands["modify"] = cmd_schema_attribute_modify()
+ subcommands["show"] = cmd_schema_attribute_show()
+ subcommands["show_oc"] = cmd_schema_attribute_show_oc()
+
+
+class cmd_schema_objectclass(SuperCommand):
+ """Query and manage objectclasses in the schema partition."""
+ subcommands = {}
+ subcommands["show"] = cmd_schema_objectclass_show()
+
+
+class cmd_schema(SuperCommand):
+ """Schema querying and management."""
+
+ subcommands = {}
+ subcommands["attribute"] = cmd_schema_attribute()
+ subcommands["objectclass"] = cmd_schema_objectclass()
diff --git a/python/samba/netcmd/shell.py b/python/samba/netcmd/shell.py
new file mode 100644
index 0000000..31619eb
--- /dev/null
+++ b/python/samba/netcmd/shell.py
@@ -0,0 +1,74 @@
+# Unix SMB/CIFS implementation.
+#
+# Interactive Python shell for SAMBA
+#
+# Copyright (C) Catalyst.Net Ltd. 2023
+#
+# Written by Rob van der Linde <rob@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import code
+import readline
+import rlcompleter
+
+import ldb
+
+import samba.getopt as options
+from samba import version
+from samba.netcmd import Command
+from samba.netcmd.domain.models import MODELS
+
+
+class cmd_shell(Command):
+ """Open a SAMBA Python shell."""
+
+ synopsis = "%prog -H [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "hostopts": options.HostOptions,
+ }
+
+ def run(self, sambaopts=None, credopts=None, hostopts=None):
+ samdb = self.ldb_connect(hostopts, sambaopts, credopts)
+
+ context = globals()
+ context.update({
+ "samdb": samdb,
+ "ldb": ldb,
+ })
+ context.update(MODELS)
+
+ banner = rf"""
+ _____ __ __ ____
+ / ____| /\ | \/ | _ \ /\
+ | (___ / \ | \ / | |_) | / \
+ \___ \ / /\ \ | |\/| | _ < / /\ \
+ ____) / ____ \| | | | |_) / ____ \
+ |_____/_/ \_\_| |_|____/_/ \_\
+ v{version}
+
+Variables:
+
+samdb = {samdb}
+"""
+ for name, model in MODELS.items():
+ banner += f"{name} = {model}\n"
+
+ readline.parse_and_bind("tab: complete")
+ readline.set_completer(rlcompleter.Completer(context).complete)
+ code.InteractiveConsole(locals=context).interact(banner=banner)
diff --git a/python/samba/netcmd/sites.py b/python/samba/netcmd/sites.py
new file mode 100644
index 0000000..52565d5
--- /dev/null
+++ b/python/samba/netcmd/sites.py
@@ -0,0 +1,348 @@
+# sites management
+#
+# Copyright Matthieu Patou <mat@matws.net> 2011
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from samba import sites, subnets
+import samba.getopt as options
+from samba.netcmd import (
+ Command,
+ CommandError,
+ SuperCommand,
+ Option,
+)
+from samba.netcmd.domain.models import Site, Subnet
+from samba.netcmd.domain.models.exceptions import ModelError
+
+
+class cmd_sites_list(Command):
+ """List sites."""
+
+ synopsis = "%prog [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ "hostopts": options.HostOptions,
+ }
+
+ takes_options = [
+ Option("--json", help="Output results in JSON format.",
+ dest="output_format", action="store_const", const="json"),
+ ]
+
+ def run(self, hostopts=None, sambaopts=None, credopts=None,
+ versionopts=None, output_format=None):
+
+ ldb = self.ldb_connect(hostopts, sambaopts, credopts)
+
+ # sites by cn.
+ try:
+ sites_dict = {site.cn: site.as_dict()
+ for site in Site.query(ldb)}
+ except ModelError as e:
+ raise CommandError(e)
+
+ # Using json output format gives more detail.
+ if output_format == "json":
+ self.print_json(sites_dict)
+ else:
+ for site in sites_dict.keys():
+ self.outf.write(f"{site}\n")
+
+
+class cmd_sites_view(Command):
+ """View one site."""
+
+ synopsis = "%prog <site> [options]"
+
+ takes_args = ["sitename"]
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ "hostopts": options.HostOptions,
+ }
+
+ def run(self, sitename, hostopts=None, sambaopts=None, credopts=None,
+ versionopts=None):
+
+ ldb = self.ldb_connect(hostopts, sambaopts, credopts)
+
+ try:
+ site = Site.get(ldb, cn=sitename)
+ except ModelError as e:
+ raise CommandError(e)
+
+ # Check if site exists first.
+ if site is None:
+ raise CommandError(f"Site {sitename} not found.")
+
+ # Display site as JSON.
+ self.print_json(site.as_dict())
+
+
+class cmd_sites_create(Command):
+ """Create a new site."""
+
+ synopsis = "%prog <site> [options]"
+
+ takes_args = ["sitename"]
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ "hostopts": options.HostOptions,
+ }
+
+ def run(self, sitename, hostopts=None, sambaopts=None, credopts=None,
+ versionopts=None):
+ samdb = self.ldb_connect(hostopts, sambaopts, credopts)
+
+ samdb.transaction_start()
+ try:
+ sites.create_site(samdb, samdb.get_config_basedn(), sitename)
+ samdb.transaction_commit()
+ except sites.SiteAlreadyExistsException as e:
+ samdb.transaction_cancel()
+ raise CommandError("Error while creating site %s, error: %s" %
+ (sitename, str(e)))
+
+ self.outf.write("Site %s created !\n" % sitename)
+
+
+class cmd_sites_delete(Command):
+ """Delete an existing site."""
+
+ synopsis = "%prog <site> [options]"
+
+ takes_args = ["sitename"]
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ "hostopts": options.HostOptions,
+ }
+
+ def run(self, sitename, hostopts=None, sambaopts=None, credopts=None,
+ versionopts=None):
+ samdb = self.ldb_connect(hostopts, sambaopts, credopts)
+
+ samdb.transaction_start()
+ try:
+ sites.delete_site(samdb, samdb.get_config_basedn(), sitename)
+ samdb.transaction_commit()
+ except sites.SiteException as e:
+ samdb.transaction_cancel()
+ raise CommandError(
+ "Error while removing site %s, error: %s" % (sitename, str(e)))
+
+ self.outf.write("Site %s removed!\n" % sitename)
+
+
+class cmd_sites_subnet_list(Command):
+ """List subnets."""
+
+ synopsis = "%prog <site> [options]"
+
+ takes_args = ["sitename"]
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ "hostopts": options.HostOptions,
+ }
+
+ takes_options = [
+ Option("--json", help="Output results in JSON format.",
+ dest="output_format", action="store_const", const="json"),
+ ]
+
+ def run(self, sitename, hostopts=None, sambaopts=None, credopts=None,
+ versionopts=None, output_format=None):
+
+ ldb = self.ldb_connect(hostopts, sambaopts, credopts)
+
+ try:
+ site = Site.get(ldb, cn=sitename)
+ except ModelError as e:
+ raise CommandError(e)
+
+ # Check if site exists first.
+ if site is None:
+ raise CommandError(f"Site {sitename} not found.")
+
+ # subnets by cn.
+ try:
+ subnets_dict = {subnet.cn: subnet.as_dict()
+ for subnet in Subnet.query(ldb,
+ site_object=str(site.dn))}
+ except ModelError as e:
+ raise CommandError(e)
+
+ # Using json output format gives more detail.
+ if output_format == "json":
+ self.print_json(subnets_dict)
+ else:
+ for subnet in subnets_dict.keys():
+ self.outf.write(f"{subnet}\n")
+
+
+class cmd_sites_subnet_view(Command):
+ """View subnet details."""
+
+ synopsis = "%prog <subnet> [options]"
+
+ takes_args = ["subnetname"]
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ "hostopts": options.HostOptions,
+ }
+
+ def run(self, subnetname, hostopts=None, sambaopts=None, credopts=None,
+ versionopts=None):
+
+ ldb = self.ldb_connect(hostopts, sambaopts, credopts)
+
+ try:
+ subnet = Subnet.get(ldb, cn=subnetname)
+ except ModelError as e:
+ raise CommandError(e)
+
+ # Check if subnet exists first.
+ if subnet is None:
+ raise CommandError(f"Subnet {subnetname} not found.")
+
+ # Display subnet as JSON.
+ self.print_json(subnet.as_dict())
+
+
+class cmd_sites_subnet_create(Command):
+ """Create a new subnet."""
+ synopsis = "%prog <subnet> <site-of-subnet> [options]"
+ takes_args = ["subnetname", "site_of_subnet"]
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ "hostopts": options.HostOptions,
+ }
+
+ def run(self, subnetname, site_of_subnet, hostopts=None, sambaopts=None,
+ credopts=None, versionopts=None):
+ samdb = self.ldb_connect(hostopts, sambaopts, credopts)
+
+ samdb.transaction_start()
+ try:
+ subnets.create_subnet(samdb, samdb.get_config_basedn(), subnetname,
+ site_of_subnet)
+ samdb.transaction_commit()
+ except subnets.SubnetException as e:
+ samdb.transaction_cancel()
+ raise CommandError("Error while creating subnet %s: %s" %
+ (subnetname, e))
+
+ self.outf.write("Subnet %s created !\n" % subnetname)
+
+
+class cmd_sites_subnet_delete(Command):
+ """Delete an existing subnet."""
+
+ synopsis = "%prog <subnet> [options]"
+
+ takes_args = ["subnetname"]
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ "hostopts": options.HostOptions,
+ }
+
+ def run(self, subnetname, hostopts=None, sambaopts=None, credopts=None,
+ versionopts=None):
+ samdb = self.ldb_connect(hostopts, sambaopts, credopts)
+
+ samdb.transaction_start()
+ try:
+ subnets.delete_subnet(samdb, samdb.get_config_basedn(), subnetname)
+ samdb.transaction_commit()
+ except subnets.SubnetException as e:
+ samdb.transaction_cancel()
+ raise CommandError("Error while removing subnet %s, error: %s" %
+ (subnetname, e))
+
+ self.outf.write("Subnet %s removed!\n" % subnetname)
+
+
+class cmd_sites_subnet_set_site(Command):
+ """Assign a subnet to a site."""
+ synopsis = "%prog <subnet> <site-of-subnet> [options]"
+ takes_args = ["subnetname", "site_of_subnet"]
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ "hostopts": options.HostOptions,
+ }
+
+ def run(self, subnetname, site_of_subnet, hostopts=None, sambaopts=None,
+ credopts=None, versionopts=None):
+ samdb = self.ldb_connect(hostopts, sambaopts, credopts)
+
+ samdb.transaction_start()
+ try:
+ subnets.set_subnet_site(samdb, samdb.get_config_basedn(),
+ subnetname, site_of_subnet)
+ samdb.transaction_commit()
+ except subnets.SubnetException as e:
+ samdb.transaction_cancel()
+ raise CommandError("Error assigning subnet %s to site %s: %s" %
+ (subnetname, site_of_subnet, e))
+
+ print(("Subnet %s shifted to site %s" %
+ (subnetname, site_of_subnet)), file=self.outf)
+
+
+class cmd_sites_subnet(SuperCommand):
+ """Subnet management subcommands."""
+ subcommands = {
+ "create": cmd_sites_subnet_create(),
+ "remove": cmd_sites_subnet_delete(),
+ "list": cmd_sites_subnet_list(),
+ "view": cmd_sites_subnet_view(),
+ "set-site": cmd_sites_subnet_set_site(),
+ }
+
+
+class cmd_sites(SuperCommand):
+ """Sites management."""
+ subcommands = {}
+ subcommands["list"] = cmd_sites_list()
+ subcommands["view"] = cmd_sites_view()
+ subcommands["create"] = cmd_sites_create()
+ subcommands["remove"] = cmd_sites_delete()
+ subcommands["subnet"] = cmd_sites_subnet()
diff --git a/python/samba/netcmd/spn.py b/python/samba/netcmd/spn.py
new file mode 100644
index 0000000..ab79e9c
--- /dev/null
+++ b/python/samba/netcmd/spn.py
@@ -0,0 +1,210 @@
+# spn management
+#
+# Copyright Matthieu Patou mat@samba.org 2010
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import samba.getopt as options
+import ldb
+from samba.samdb import SamDB
+from samba.auth import system_session
+from samba.netcmd.common import _get_user_realm_domain
+from samba.netcmd import (
+ Command,
+ CommandError,
+ SuperCommand,
+ Option
+)
+
+
+class cmd_spn_list(Command):
+ """List spns of a given user."""
+
+ synopsis = "%prog <user> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server",
+ type=str, metavar="URL", dest="H"),
+ ]
+
+ takes_args = ["user"]
+
+ def run(self, user, H=None,
+ credopts=None,
+ sambaopts=None,
+ versionopts=None):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+ sam = SamDB(H, session_info=system_session(),
+ credentials=creds, lp=lp)
+ # TODO once I understand how, use the domain info to naildown
+ # to the correct domain
+ (cleaneduser, realm, domain) = _get_user_realm_domain(user, sam)
+ self.outf.write(cleaneduser + "\n")
+ res = sam.search(
+ expression="samaccountname=%s" % ldb.binary_encode(cleaneduser),
+ scope=ldb.SCOPE_SUBTREE, attrs=["servicePrincipalName"])
+ if len(res) > 0:
+ spns = res[0].get("servicePrincipalName")
+ if spns is not None:
+ self.outf.write(
+ "User %s has the following servicePrincipalName: \n" %
+ res[0].dn)
+ for e in spns:
+ self.outf.write("\t %s\n" % e)
+ else:
+ self.outf.write("User %s has no servicePrincipalName\n" %
+ res[0].dn)
+ else:
+ raise CommandError("User %s not found" % user)
+
+
+class cmd_spn_add(Command):
+ """Create a new spn."""
+
+ synopsis = "%prog <name> <user> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server",
+ type=str, metavar="URL", dest="H"),
+ ]
+ takes_args = ["name", "user"]
+
+ def run(self, name, user, H=None,
+ credopts=None,
+ sambaopts=None,
+ versionopts=None):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+ sam = SamDB(H, session_info=system_session(),
+ credentials=creds, lp=lp)
+ res = sam.search(
+ expression="servicePrincipalName=%s" % ldb.binary_encode(name),
+ scope=ldb.SCOPE_SUBTREE)
+ if len(res) != 0:
+ raise CommandError("Service principal %s already"
+ " affected to another user" % name)
+
+ (cleaneduser, realm, domain) = _get_user_realm_domain(user, sam)
+ res = sam.search(
+ expression="samaccountname=%s" % ldb.binary_encode(cleaneduser),
+ scope=ldb.SCOPE_SUBTREE, attrs=["servicePrincipalName"])
+ if len(res) > 0:
+ res[0].dn
+ msg = ldb.Message()
+ spns = res[0].get("servicePrincipalName")
+ tab = []
+ found = False
+ flag = ldb.FLAG_MOD_ADD
+ if spns is not None:
+ for e in spns:
+ if str(e) == name:
+ found = True
+ tab.append(str(e))
+ flag = ldb.FLAG_MOD_REPLACE
+ tab.append(name)
+ msg.dn = res[0].dn
+ msg["servicePrincipalName"] = ldb.MessageElement(tab, flag,
+ "servicePrincipalName")
+ if not found:
+ sam.modify(msg)
+ else:
+ raise CommandError("Service principal %s already"
+ " affected to %s" % (name, user))
+ else:
+ raise CommandError("User %s not found" % user)
+
+
+class cmd_spn_delete(Command):
+ """Delete a spn."""
+
+ synopsis = "%prog <name> [user] [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server",
+ type=str, metavar="URL", dest="H"),
+ ]
+
+ takes_args = ["name", "user?"]
+
+ def run(self, name, user=None, H=None, credopts=None, sambaopts=None,
+ versionopts=None):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+ sam = SamDB(H, session_info=system_session(),
+ credentials=creds, lp=lp)
+ res = sam.search(
+ expression="servicePrincipalName=%s" % ldb.binary_encode(name),
+ scope=ldb.SCOPE_SUBTREE,
+ attrs=["servicePrincipalName", "samAccountName"])
+ if len(res) > 0:
+ result = None
+ if user is not None:
+ (cleaneduser, realm, domain) = _get_user_realm_domain(user)
+ for elem in res:
+ if str(elem["samAccountName"]).lower() == cleaneduser:
+ result = elem
+ if result is None:
+ raise CommandError("Unable to find user %s with"
+ " spn %s" % (user, name))
+ else:
+ if len(res) != 1:
+ listUser = ""
+ for r in res:
+ listUser = "%s\n%s" % (listUser, str(r.dn))
+ raise CommandError("More than one user has the spn %s "
+ "and no specific user was specified, list of users"
+ " with this spn:%s" % (name, listUser))
+ else:
+ result = res[0]
+
+ msg = ldb.Message()
+ spns = result.get("servicePrincipalName")
+ tab = []
+ if spns is not None:
+ for e in spns:
+ if str(e) != name:
+ tab.append(str(e))
+ flag = ldb.FLAG_MOD_REPLACE
+ msg.dn = result.dn
+ msg["servicePrincipalName"] = ldb.MessageElement(tab, flag,
+ "servicePrincipalName")
+ sam.modify(msg)
+ else:
+ raise CommandError("Service principal %s not affected" % name)
+
+
+class cmd_spn(SuperCommand):
+ """Service Principal Name (SPN) management."""
+
+ subcommands = {}
+ subcommands["add"] = cmd_spn_add()
+ subcommands["list"] = cmd_spn_list()
+ subcommands["delete"] = cmd_spn_delete()
diff --git a/python/samba/netcmd/testparm.py b/python/samba/netcmd/testparm.py
new file mode 100644
index 0000000..41dbb4b
--- /dev/null
+++ b/python/samba/netcmd/testparm.py
@@ -0,0 +1,236 @@
+# Unix SMB/CIFS implementation.
+# Test validity of smb.conf
+# Copyright (C) 2010-2011 Jelmer Vernooij <jelmer@samba.org>
+#
+# Based on the original in C:
+# Copyright (C) Karl Auer 1993, 1994-1998
+# Extensively modified by Andrew Tridgell, 1995
+# Converted to popt by Jelmer Vernooij (jelmer@nl.linux.org), 2002
+# Updated for Samba4 by Andrew Bartlett <abartlet@samba.org> 2006
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Testbed for loadparm.c/params.c
+#
+# This module simply loads a specified configuration file and
+# if successful, dumps it's contents to stdout. Note that the
+# operation is performed with DEBUGLEVEL at 3.
+#
+# Useful for a quick 'syntax check' of a configuration file.
+#
+
+import os
+import sys
+
+import samba
+import samba.getopt as options
+from samba.netcmd import Command, CommandError, Option
+
+
+class cmd_testparm(Command):
+ """Syntax check the configuration file."""
+
+ synopsis = "%prog [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions
+ }
+
+ takes_options = [
+ Option("--section-name", type=str,
+ help="Limit testparm to a named section"),
+ Option("--parameter-name", type=str,
+ help="Limit testparm to a named parameter"),
+ Option("--client-name", type=str,
+ help="Client DNS name for 'hosts allow' checking "
+ "(should match reverse lookup)"),
+ Option("--client-ip", type=str,
+ help="Client IP address for 'hosts allow' checking"),
+ Option("--suppress-prompt", action="store_true", default=False,
+ help="Suppress prompt for enter"),
+ Option("-v", "--verbose", action="store_true",
+ default=False, help="Show default options too"),
+ # We need support for smb.conf macros before this will work again
+ Option("--server", type=str, help="Set %L macro to servername"),
+ # These are harder to do with the new code structure
+ Option("--show-all-parameters", action="store_true", default=False,
+ help="Show the parameters, type, possible values")
+ ]
+
+ takes_args = []
+
+ def run(self, sambaopts, versionopts, section_name=None,
+ parameter_name=None, client_ip=None, client_name=None,
+ verbose=False, suppress_prompt=None, show_all_parameters=False,
+ server=None):
+ if server:
+ raise NotImplementedError("--server not yet implemented")
+ if show_all_parameters:
+ raise NotImplementedError("--show-all-parameters not yet implemented")
+ if client_name is not None and client_ip is None:
+ raise CommandError("Both a DNS name and an IP address are "
+ "required for the host access check")
+
+ try:
+ lp = sambaopts.get_loadparm()
+ except RuntimeError as err:
+ raise CommandError(err)
+
+ # We need this to force the output
+ samba.set_debug_level(2)
+
+ logger = self.get_logger("testparm")
+
+ logger.info("Loaded smb config files from %s", lp.configfile)
+ logger.info("Loaded services file OK.")
+
+ valid = self.do_global_checks(lp, logger)
+ valid = valid and self.do_share_checks(lp, logger)
+ if client_name is not None and client_ip is not None:
+ self.check_client_access(lp, logger, client_name, client_ip)
+ else:
+ if section_name is not None or parameter_name is not None:
+ if parameter_name is None:
+ try:
+ section = lp[section_name]
+ except KeyError:
+ if section_name in ['global', 'globals']:
+ lp.dump_globals()
+ else:
+ raise CommandError(f"Unknown section {section_name}")
+ else:
+ section.dump(lp.default_service, verbose)
+ else:
+ try:
+ lp.dump_a_parameter(parameter_name, section_name)
+ except RuntimeError as e:
+ raise CommandError(e)
+ else:
+ if not suppress_prompt:
+ self.outf.write("Press enter to see a dump of your service definitions\n")
+ sys.stdin.readline()
+ lp.dump(verbose)
+ if valid:
+ return
+ else:
+ raise CommandError("Invalid smb.conf")
+
+ def do_global_checks(self, lp, logger):
+ valid = True
+
+ netbios_name = lp.get("netbios name")
+ if not samba.valid_netbios_name(netbios_name):
+ logger.error("netbios name %s is not a valid netbios name",
+ netbios_name)
+ valid = False
+
+ workgroup = lp.get("workgroup")
+ if not samba.valid_netbios_name(workgroup):
+ logger.error("workgroup name %s is not a valid netbios name",
+ workgroup)
+ valid = False
+
+ lockdir = lp.get("lockdir")
+
+ if not os.path.isdir(lockdir):
+ logger.error("lock directory %s does not exist", lockdir)
+ valid = False
+
+ piddir = lp.get("pid directory")
+
+ if not os.path.isdir(piddir):
+ logger.error("pid directory %s does not exist", piddir)
+ valid = False
+
+ winbind_separator = lp.get("winbind separator")
+
+ if len(winbind_separator) != 1:
+ logger.error("the 'winbind separator' parameter must be a single "
+ "character.")
+ valid = False
+
+ if winbind_separator == '+':
+ logger.error(
+ "'winbind separator = +' might cause problems with group "
+ "membership.")
+ valid = False
+
+ role = lp.get("server role")
+
+ if role in ["active directory domain controller", "domain controller", "dc"]:
+ charset = lp.get("unix charset").upper()
+ if charset not in ["UTF-8", "UTF8"]:
+ logger.warning(
+ "When acting as Active Directory domain controller, "
+ "unix charset is expected to be UTF-8.")
+ vfsobjects = lp.get("vfs objects")
+ if vfsobjects:
+ for entry in ['dfs_samba4', 'acl_xattr']:
+ if entry not in vfsobjects:
+ logger.warning(
+ "When acting as Active Directory domain controller, " +
+ entry + " should be in vfs objects.")
+
+ return valid
+
+ def allow_access(self, deny_list, allow_list, cname, caddr):
+ raise NotImplementedError(self.allow_access)
+
+ def do_share_checks(self, lp, logger):
+ valid = True
+ for s in lp.services():
+ if len(s) > 12:
+ logger.warning(
+ "You have some share names that are longer than 12 "
+ "characters. These may not be accessible to some older "
+ "clients. (Eg. Windows9x, WindowsMe, and not listed in "
+ "smbclient in Samba 3.0.)")
+ break
+
+ for s in lp.services():
+ deny_list = lp.get("hosts deny", s)
+ allow_list = lp.get("hosts allow", s)
+ if deny_list:
+ for entry in deny_list:
+ if "*" in entry or "?" in entry:
+ logger.error("Invalid character (* or ?) in hosts deny "
+ "list (%s) for service %s.", entry, s)
+ valid = False
+
+ if allow_list:
+ for entry in allow_list:
+ if "*" in entry or "?" in entry:
+ logger.error("Invalid character (* or ?) in hosts allow "
+ "list (%s) for service %s.", entry, s)
+ valid = False
+ return valid
+
+ def check_client_access(self, lp, logger, cname, caddr):
+ # this is totally ugly, a real `quick' hack
+ for s in lp.services():
+ if (self.allow_access(lp.get("hosts deny"), lp.get("hosts allow"), cname,
+ caddr) and
+ self.allow_access(lp.get("hosts deny", s), lp.get("hosts allow", s),
+ cname, caddr)):
+ logger.info("Allow connection from %s (%s) to %s", cname, caddr, s)
+ else:
+ logger.info("Deny connection from %s (%s) to %s", cname, caddr, s)
+
+## FIXME: We need support for smb.conf macros before this will work again
+##
+## if (new_local_machine) {
+## set_local_machine_name(new_local_machine, True)
+## }
+#
diff --git a/python/samba/netcmd/user/__init__.py b/python/samba/netcmd/user/__init__.py
new file mode 100644
index 0000000..fab657c
--- /dev/null
+++ b/python/samba/netcmd/user/__init__.py
@@ -0,0 +1,70 @@
+# user management
+#
+# Copyright Jelmer Vernooij 2010 <jelmer@samba.org>
+# Copyright Theresa Halloran 2011 <theresahalloran@gmail.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from samba.netcmd import SuperCommand
+
+from .add import cmd_user_add
+from .add_unix_attrs import cmd_user_add_unix_attrs
+from .auth import cmd_user_auth
+from .delete import cmd_user_delete
+from .disable import cmd_user_disable
+from .edit import cmd_user_edit
+from .enable import cmd_user_enable
+from .getgroups import cmd_user_getgroups
+from .list import cmd_user_list
+from .move import cmd_user_move
+from .password import cmd_user_password
+from .readpasswords import (cmd_user_getpassword,
+ cmd_user_show,
+ cmd_user_syncpasswords,
+ cmd_user_get_kerberos_ticket)
+from .rename import cmd_user_rename
+from .sensitive import cmd_user_sensitive
+from .setexpiry import cmd_user_setexpiry
+from .setpassword import cmd_user_setpassword
+from .setprimarygroup import cmd_user_setprimarygroup
+from .unlock import cmd_user_unlock
+
+
+class cmd_user(SuperCommand):
+ """User management."""
+
+ subcommands = {}
+ subcommands["auth"] = cmd_user_auth()
+ subcommands["add"] = cmd_user_add()
+ subcommands["create"] = cmd_user_add()
+ subcommands["delete"] = cmd_user_delete()
+ subcommands["disable"] = cmd_user_disable()
+ subcommands["enable"] = cmd_user_enable()
+ subcommands["list"] = cmd_user_list()
+ subcommands["setexpiry"] = cmd_user_setexpiry()
+ subcommands["password"] = cmd_user_password()
+ subcommands["getgroups"] = cmd_user_getgroups()
+ subcommands["setprimarygroup"] = cmd_user_setprimarygroup()
+ subcommands["setpassword"] = cmd_user_setpassword()
+ subcommands["getpassword"] = cmd_user_getpassword()
+ subcommands["get-kerberos-ticket"] = cmd_user_get_kerberos_ticket()
+ subcommands["syncpasswords"] = cmd_user_syncpasswords()
+ subcommands["edit"] = cmd_user_edit()
+ subcommands["show"] = cmd_user_show()
+ subcommands["move"] = cmd_user_move()
+ subcommands["rename"] = cmd_user_rename()
+ subcommands["unlock"] = cmd_user_unlock()
+ subcommands["addunixattrs"] = cmd_user_add_unix_attrs()
+ subcommands["sensitive"] = cmd_user_sensitive()
diff --git a/python/samba/netcmd/user/add.py b/python/samba/netcmd/user/add.py
new file mode 100644
index 0000000..be4e3cc
--- /dev/null
+++ b/python/samba/netcmd/user/add.py
@@ -0,0 +1,209 @@
+# user management
+#
+# add user
+#
+# Copyright Jelmer Vernooij 2010 <jelmer@samba.org>
+# Copyright Theresa Halloran 2011 <theresahalloran@gmail.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import pwd
+from getpass import getpass
+
+import samba.getopt as options
+from samba import generate_random_password
+from samba.auth import system_session
+from samba.netcmd import Command, CommandError, Option
+from samba.samdb import SamDB
+
+
+class cmd_user_add(Command):
+ """Add a new user.
+
+This command adds a new user account to the Active Directory domain. The username specified on the command is the sAMaccountName.
+
+User accounts may represent physical entities, such as people or may be used as service accounts for applications. User accounts are also referred to as security principals and are assigned a security identifier (SID).
+
+A user account enables a user to logon to a computer and domain with an identity that can be authenticated. To maximize security, each user should have their own unique user account and password. A user's access to domain resources is based on permissions assigned to the user account.
+
+Unix (RFC2307) attributes may be added to the user account. Attributes taken from NSS are obtained on the local machine. Explicitly given values override values obtained from NSS. Configure 'idmap_ldb:use rfc2307 = Yes' to use these attributes for UID/GID mapping.
+
+The command may be run from the root userid or another authorized userid. The -H or --URL= option can be used to execute the command against a remote server.
+
+Example1:
+samba-tool user add User1 passw0rd --given-name=John --surname=Smith --must-change-at-next-login -H ldap://samba.samdom.example.com -Uadministrator%passw1rd
+
+Example1 shows how to add a new user to the domain against a remote LDAP server. The -H parameter is used to specify the remote target server. The -U option is used to pass the userid and password authorized to issue the command remotely.
+
+Example2:
+sudo samba-tool user add User2 passw2rd --given-name=Jane --surname=Doe --must-change-at-next-login
+
+Example2 shows how to add a new user to the domain against the local server. sudo is used so a user may run the command as root. In this example, after User2 is created, he/she will be forced to change their password when they logon.
+
+Example3:
+samba-tool user add User3 passw3rd --userou='OU=OrgUnit'
+
+Example3 shows how to add a new user in the OrgUnit organizational unit.
+
+Example4:
+samba-tool user add User4 passw4rd --rfc2307-from-nss --gecos 'some text'
+
+Example4 shows how to add a new user with Unix UID, GID and login-shell set from the local NSS and GECOS set to 'some text'.
+
+Example5:
+samba-tool user add User5 passw5rd --nis-domain=samdom --unix-home=/home/User5 \\
+ --uid-number=10005 --login-shell=/bin/false --gid-number=10000
+
+Example5 shows how to add a new RFC2307/NIS domain enabled user account. If
+--nis-domain is set, then the other four parameters are mandatory.
+
+"""
+ synopsis = "%prog <username> [<password>] [options]"
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H"),
+ Option("--must-change-at-next-login",
+ help="Force password to be changed on next login",
+ action="store_true"),
+ Option("--random-password",
+ help="Generate random password",
+ action="store_true"),
+ Option("--smartcard-required",
+ help="Require a smartcard for interactive logons",
+ action="store_true"),
+ Option("--use-username-as-cn",
+ help="Force use of username as user's CN",
+ action="store_true"),
+ Option("--userou",
+ help="DN of alternative location (without domainDN counterpart) to default CN=Users in which new user object will be created. E. g. 'OU=<OU name>'",
+ type=str),
+ Option("--surname", help="User's surname", type=str),
+ Option("--given-name", help="User's given name", type=str),
+ Option("--initials", help="User's initials", type=str),
+ Option("--profile-path", help="User's profile path", type=str),
+ Option("--script-path", help="User's logon script path", type=str),
+ Option("--home-drive", help="User's home drive letter", type=str),
+ Option("--home-directory", help="User's home directory path", type=str),
+ Option("--job-title", help="User's job title", type=str),
+ Option("--department", help="User's department", type=str),
+ Option("--company", help="User's company", type=str),
+ Option("--description", help="User's description", type=str),
+ Option("--mail-address", help="User's email address", type=str),
+ Option("--internet-address", help="User's home page", type=str),
+ Option("--telephone-number", help="User's phone number", type=str),
+ Option("--physical-delivery-office", help="User's office location", type=str),
+ Option("--rfc2307-from-nss",
+ help="Copy Unix user attributes from NSS (will be overridden by explicit UID/GID/GECOS/shell)",
+ action="store_true"),
+ Option("--nis-domain", help="User's Unix/RFC2307 NIS domain", type=str),
+ Option("--unix-home", help="User's Unix/RFC2307 home directory",
+ type=str),
+ Option("--uid", help="User's Unix/RFC2307 username", type=str),
+ Option("--uid-number", help="User's Unix/RFC2307 numeric UID", type=int),
+ Option("--gid-number", help="User's Unix/RFC2307 primary GID number", type=int),
+ Option("--gecos", help="User's Unix/RFC2307 GECOS field", type=str),
+ Option("--login-shell", help="User's Unix/RFC2307 login shell", type=str),
+ ]
+
+ takes_args = ["username", "password?"]
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ def run(self, username, password=None, credopts=None, sambaopts=None,
+ versionopts=None, H=None, must_change_at_next_login=False,
+ random_password=False, use_username_as_cn=False, userou=None,
+ surname=None, given_name=None, initials=None, profile_path=None,
+ script_path=None, home_drive=None, home_directory=None,
+ job_title=None, department=None, company=None, description=None,
+ mail_address=None, internet_address=None, telephone_number=None,
+ physical_delivery_office=None, rfc2307_from_nss=False,
+ nis_domain=None, unix_home=None, uid=None, uid_number=None,
+ gid_number=None, gecos=None, login_shell=None,
+ smartcard_required=False):
+
+ if smartcard_required:
+ if password is not None and password != '':
+ raise CommandError('It is not allowed to specify '
+ '--newpassword '
+ 'together with --smartcard-required.')
+ if must_change_at_next_login:
+ raise CommandError('It is not allowed to specify '
+ '--must-change-at-next-login '
+ 'together with --smartcard-required.')
+
+ if random_password and not smartcard_required:
+ password = generate_random_password(128, 255)
+
+ while True:
+ if smartcard_required:
+ break
+ if password is not None and password != '':
+ break
+ password = getpass("New Password: ")
+ passwordverify = getpass("Retype Password: ")
+ if not password == passwordverify:
+ password = None
+ self.outf.write("Sorry, passwords do not match.\n")
+
+ if rfc2307_from_nss:
+ pwent = pwd.getpwnam(username)
+ if uid is None:
+ uid = username
+ if uid_number is None:
+ uid_number = pwent[2]
+ if gid_number is None:
+ gid_number = pwent[3]
+ if gecos is None:
+ gecos = pwent[4]
+ if login_shell is None:
+ login_shell = pwent[6]
+
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+
+ if uid_number or gid_number:
+ if not lp.get("idmap_ldb:use rfc2307"):
+ self.outf.write("You are setting a Unix/RFC2307 UID or GID. You may want to set 'idmap_ldb:use rfc2307 = Yes' to use those attributes for XID/SID-mapping.\n")
+
+ if nis_domain is not None:
+ if None in (uid_number, login_shell, unix_home, gid_number):
+ raise CommandError('Missing parameters. To enable NIS features, '
+ 'the following options have to be given: '
+ '--nis-domain=, --uidNumber=, --login-shell='
+ ', --unix-home=, --gid-number= Operation '
+ 'cancelled.')
+
+ try:
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+ samdb.newuser(username, password, force_password_change_at_next_login_req=must_change_at_next_login,
+ useusernameascn=use_username_as_cn, userou=userou, surname=surname, givenname=given_name, initials=initials,
+ profilepath=profile_path, homedrive=home_drive, scriptpath=script_path, homedirectory=home_directory,
+ jobtitle=job_title, department=department, company=company, description=description,
+ mailaddress=mail_address, internetaddress=internet_address,
+ telephonenumber=telephone_number, physicaldeliveryoffice=physical_delivery_office,
+ nisdomain=nis_domain, unixhome=unix_home, uid=uid,
+ uidnumber=uid_number, gidnumber=gid_number,
+ gecos=gecos, loginshell=login_shell,
+ smartcard_required=smartcard_required)
+ except Exception as e:
+ raise CommandError("Failed to add user '%s': " % username, e)
+
+ self.outf.write("User '%s' added successfully\n" % username)
diff --git a/python/samba/netcmd/user/add_unix_attrs.py b/python/samba/netcmd/user/add_unix_attrs.py
new file mode 100644
index 0000000..1b4a363
--- /dev/null
+++ b/python/samba/netcmd/user/add_unix_attrs.py
@@ -0,0 +1,244 @@
+# user management
+#
+# user add_unix_attrs command
+#
+# Copyright Jelmer Vernooij 2010 <jelmer@samba.org>
+# Copyright Theresa Halloran 2011 <theresahalloran@gmail.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import samba.getopt as options
+from samba import ldb
+from samba.auth import system_session
+from samba.netcmd import Command, CommandError, Option
+from samba.samdb import SamDB
+
+
+class cmd_user_add_unix_attrs(Command):
+ """Add RFC2307 attributes to a user.
+
+This command adds Unix attributes to a user account in the Active
+Directory domain.
+
+The username specified on the command is the sAMaccountName.
+
+You must supply a unique uidNumber.
+
+Unix (RFC2307) attributes will be added to the user account.
+
+If you supply a gidNumber with '--gid-number', this will be used for the
+users Unix 'gidNumber' attribute.
+
+If '--gid-number' is not supplied, the users Unix gidNumber will be set to the
+one found in 'Domain Users', this means Domain Users must have a gidNumber
+attribute.
+
+if '--unix-home' is not supplied, the users Unix home directory will be
+set to /home/DOMAIN/username
+
+if '--login-shell' is not supplied, the users Unix login shell will be
+set to '/bin/sh'
+
+if ---gecos' is not supplied, the users Unix gecos field will be set to the
+users 'CN'
+
+Add 'idmap_ldb:use rfc2307 = Yes' to the smb.conf on DCs, to use these
+attributes for UID/GID mapping.
+
+The command may be run from the root userid or another authorised userid.
+The -H or --URL= option can be used to execute the command against a
+remote server.
+
+Example1:
+samba-tool user addunixattrs User1 10001
+
+Example1 shows how to add RFC2307 attributes to a domain enabled user
+account, Domain Users will be set as the users gidNumber.
+
+The users Unix ID will be set to '10001', provided this ID isn't already
+in use.
+
+Example2:
+samba-tool user addunixattrs User2 10002 --gid-number=10001 \
+--unix-home=/home/User2
+
+Example2 shows how to add RFC2307 attributes to a domain enabled user
+account.
+
+The users Unix ID will be set to '10002', provided this ID isn't already
+in use.
+
+The users gidNumber attribute will be set to '10001'
+
+The users Unix home directory will be set to '/home/user2'
+
+Example3:
+samba-tool user addunixattrs User3 10003 --gid-number=10001 \
+--login-shell=/bin/false --gecos='User3 test'
+
+Example3 shows how to add RFC2307 attributes to a domain enabled user
+account.
+
+The users Unix ID will be set to '10003', provided this ID isn't already
+in use.
+
+The users gidNumber attribute will be set to '10001'
+
+The users Unix login shell will be set to '/bin/false'
+
+The users gecos field will be set to 'User3 test'
+
+Example4:
+samba-tool user addunixattrs User4 10004 --gid-number=10001 \
+--unix-home=/home/User4 --login-shell=/bin/bash --gecos='User4 test'
+
+Example4 shows how to add RFC2307 attributes to a domain enabled user
+account.
+
+The users Unix ID will be set to '10004', provided this ID isn't already
+in use.
+
+The users gidNumber attribute will be set to '10001'
+
+The users Unix home directory will be set to '/home/User4'
+
+The users Unix login shell will be set to '/bin/bash'
+
+The users gecos field will be set to 'User4 test'
+
+"""
+
+ synopsis = "%prog <username> <uid-number> [options]"
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server",
+ type=str, metavar="URL", dest="H"),
+ Option("--gid-number", help="User's Unix/RFC2307 GID", type=str),
+ Option("--unix-home", help="User's Unix/RFC2307 home directory",
+ type=str),
+ Option("--login-shell", help="User's Unix/RFC2307 login shell",
+ type=str),
+ Option("--gecos", help="User's Unix/RFC2307 GECOS field", type=str),
+ Option("--uid", help="User's Unix/RFC2307 username", type=str),
+ ]
+
+ takes_args = ["username", "uid-number"]
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ def run(self, username, uid_number, credopts=None, sambaopts=None,
+ versionopts=None, H=None, gid_number=None, unix_home=None,
+ login_shell=None, gecos=None, uid=None):
+
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+
+ domaindn = samdb.domain_dn()
+
+ # Check that uidNumber supplied isn't already in use
+ filter = ("(&(objectClass=person)(uidNumber={}))"
+ .format(uid_number))
+ res = samdb.search(domaindn,
+ scope=ldb.SCOPE_SUBTREE,
+ expression=filter)
+ if (len(res) != 0):
+ raise CommandError("uidNumber {} is already being used."
+ .format(uid_number))
+
+ # Check user exists and doesn't have a uidNumber
+ filter = "(samaccountname={})".format(ldb.binary_encode(username))
+ res = samdb.search(domaindn,
+ scope=ldb.SCOPE_SUBTREE,
+ expression=filter)
+ if (len(res) == 0):
+ raise CommandError("Unable to find user '{}'".format(username))
+
+ user_dn = res[0].dn
+
+ if "uidNumber" in res[0]:
+ raise CommandError("User {} is already a Unix user."
+ .format(username))
+
+ if gecos is None:
+ gecos = res[0]["cn"][0]
+
+ if uid is None:
+ uid = res[0]["cn"][0]
+
+ if gid_number is None:
+ search_filter = ("(samaccountname={})"
+ .format(ldb.binary_encode('Domain Users')))
+ try:
+ res = samdb.search(domaindn,
+ scope=ldb.SCOPE_SUBTREE,
+ expression=search_filter)
+ for msg in res:
+ gid_number = msg.get('gidNumber')
+ except IndexError:
+ raise CommandError('Domain Users does not have a'
+ ' gidNumber attribute')
+
+ if login_shell is None:
+ login_shell = "/bin/sh"
+
+ if unix_home is None:
+ # obtain nETBIOS Domain Name
+ unix_domain = samdb.domain_netbios_name()
+ if unix_domain is None:
+ raise CommandError('Unable to find Unix domain')
+
+ tmpl = lp.get('template homedir')
+ unix_home = tmpl.replace('%D', unix_domain).replace('%U', username)
+
+ if not lp.get("idmap_ldb:use rfc2307"):
+ self.outf.write("You are setting a Unix/RFC2307 UID & GID. "
+ "You may want to set 'idmap_ldb:use rfc2307 = Yes'"
+ " in smb.conf to use the attributes for "
+ "XID/SID-mapping.\n")
+
+ user_mod = """
+dn: {0}
+changetype: modify
+add: uidNumber
+uidNumber: {1}
+add: gidnumber
+gidNumber: {2}
+add: gecos
+gecos: {3}
+add: uid
+uid: {4}
+add: loginshell
+loginShell: {5}
+add: unixHomeDirectory
+unixHomeDirectory: {6}
+""".format(user_dn, uid_number, gid_number, gecos, uid, login_shell, unix_home)
+
+ samdb.transaction_start()
+ try:
+ samdb.modify_ldif(user_mod)
+ except ldb.LdbError as e:
+ raise CommandError("Failed to modify user '{0}': {1}"
+ .format(username, e))
+ else:
+ samdb.transaction_commit()
+ self.outf.write("Modified User '{}' successfully\n"
+ .format(username))
diff --git a/python/samba/netcmd/user/auth/__init__.py b/python/samba/netcmd/user/auth/__init__.py
new file mode 100644
index 0000000..79dd128
--- /dev/null
+++ b/python/samba/netcmd/user/auth/__init__.py
@@ -0,0 +1,35 @@
+# Unix SMB/CIFS implementation.
+#
+# manage assigned authentication policies and silos on a user
+#
+# Copyright (C) Catalyst.Net Ltd. 2023
+#
+# Written by Rob van der Linde <rob@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from samba.netcmd import SuperCommand
+
+from .policy import cmd_user_auth_policy
+from .silo import cmd_user_auth_silo
+
+
+class cmd_user_auth(SuperCommand):
+ """Manage authentication policies and silos on a user."""
+
+ subcommands = {
+ "policy": cmd_user_auth_policy(),
+ "silo": cmd_user_auth_silo(),
+ }
diff --git a/python/samba/netcmd/user/auth/policy.py b/python/samba/netcmd/user/auth/policy.py
new file mode 100644
index 0000000..9c30370
--- /dev/null
+++ b/python/samba/netcmd/user/auth/policy.py
@@ -0,0 +1,170 @@
+# Unix SMB/CIFS implementation.
+#
+# manage assigned authentication policies on a user
+#
+# Copyright (C) Catalyst.Net Ltd. 2023
+#
+# Written by Rob van der Linde <rob@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import samba.getopt as options
+from samba.netcmd import Command, CommandError, Option, SuperCommand
+from samba.netcmd.domain.models import AuthenticationPolicy, User
+from samba.netcmd.domain.models.exceptions import ModelError
+
+
+class cmd_user_auth_policy_assign(Command):
+ """Set the assigned authentication policy on a user."""
+
+ synopsis = "%prog <username> [options]"
+
+ takes_args = ["username"]
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "hostopts": options.HostOptions,
+ }
+
+ takes_options = [
+ Option("--policy", help="Authentication policy name.",
+ action="store", dest="policy_name", type=str, required=True),
+ ]
+
+ def run(self, username, hostopts=None, sambaopts=None, credopts=None,
+ policy_name=None):
+
+ ldb = self.ldb_connect(hostopts, sambaopts, credopts)
+
+ try:
+ user = User.find(ldb, username)
+ policy = AuthenticationPolicy.get(ldb, name=policy_name)
+ except ModelError as e:
+ raise CommandError(e)
+
+ # User and policy exist.
+ if user is None:
+ raise CommandError(f"User {username} not found.")
+ if policy is None:
+ raise CommandError(f"Authentication policy {policy_name} not found.")
+
+ # Set assigned policy.
+ user.assigned_policy = policy.dn
+
+ try:
+ user.save(ldb)
+ except ModelError as e:
+ raise CommandError(f"Set assigned authentication policy failed: {e}")
+
+ print(f"User {username} assigned to authentication policy {policy}",
+ file=self.outf)
+
+
+class cmd_user_auth_policy_remove(Command):
+ """Remove the assigned authentication policy on a user."""
+
+ synopsis = "%prog <username> [options]"
+
+ takes_args = ["username"]
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "hostopts": options.HostOptions,
+ }
+
+ def run(self, username, hostopts=None, sambaopts=None, credopts=None):
+
+ ldb = self.ldb_connect(hostopts, sambaopts, credopts)
+
+ try:
+ user = User.find(ldb, username)
+ except ModelError as e:
+ raise CommandError(e)
+
+ # User exists
+ if user is None:
+ raise CommandError(f"User {username} not found.")
+
+ # Get previous policy for display.
+ if user.assigned_policy:
+ try:
+ policy = AuthenticationPolicy.get(ldb, dn=user.assigned_policy)
+ except ModelError as e:
+ raise CommandError(e)
+ else:
+ policy = None
+
+ # Unset assigned authentication policy
+ user.assigned_policy = None
+
+ try:
+ user.save(ldb)
+ except ModelError as e:
+ raise CommandError(f"Remove assigned authentication policy failed: {e}")
+
+ print(f"User {username} removed from authentication policy {policy}",
+ file=self.outf)
+
+
+class cmd_user_auth_policy_view(Command):
+ """View the current assigned authentication policy on a user."""
+
+ synopsis = "%prog <username> [options]"
+
+ takes_args = ["username"]
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "hostopts": options.HostOptions,
+ }
+
+ def run(self, username, hostopts=None, sambaopts=None, credopts=None):
+
+ ldb = self.ldb_connect(hostopts, sambaopts, credopts)
+
+ try:
+ user = User.find(ldb, username)
+
+ # Check user exists before fetching policy.
+ if user is None:
+ raise CommandError(f"User {username} not found.")
+
+ if user.assigned_policy:
+ policy = AuthenticationPolicy.get(ldb, dn=user.assigned_policy)
+ else:
+ policy = None
+
+ except ModelError as e:
+ raise CommandError(e)
+
+ if policy:
+ print(f"User {username} assigned to authentication policy {policy}",
+ file=self.outf)
+ else:
+ print(f"User {username} has no assigned authentication policy.",
+ file=self.outf)
+
+
+class cmd_user_auth_policy(SuperCommand):
+ """Manage authentication policies on a user."""
+
+ subcommands = {
+ "assign": cmd_user_auth_policy_assign(),
+ "remove": cmd_user_auth_policy_remove(),
+ "view": cmd_user_auth_policy_view(),
+ }
diff --git a/python/samba/netcmd/user/auth/silo.py b/python/samba/netcmd/user/auth/silo.py
new file mode 100644
index 0000000..992f63c
--- /dev/null
+++ b/python/samba/netcmd/user/auth/silo.py
@@ -0,0 +1,189 @@
+# Unix SMB/CIFS implementation.
+#
+# manage assigned authentication silos on a user
+#
+# Copyright (C) Catalyst.Net Ltd. 2023
+#
+# Written by Rob van der Linde <rob@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import samba.getopt as options
+from samba.netcmd import Command, CommandError, Option, SuperCommand
+from samba.netcmd.domain.models import AuthenticationSilo, User
+from samba.netcmd.domain.models.exceptions import ModelError
+
+
+class cmd_user_auth_silo_assign(Command):
+ """Set the assigned authentication silo on a user."""
+
+ synopsis = "%prog <username> [options]"
+
+ takes_args = ["username"]
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "hostopts": options.HostOptions,
+ }
+
+ takes_options = [
+ Option("--silo", help="Authentication silo name.",
+ action="store", dest="silo_name", type=str, required=True),
+ ]
+
+ def run(self, username, hostopts=None, sambaopts=None, credopts=None,
+ silo_name=None):
+
+ ldb = self.ldb_connect(hostopts, sambaopts, credopts)
+
+ try:
+ user = User.find(ldb, username)
+ silo = AuthenticationSilo.get(ldb, name=silo_name)
+ except ModelError as e:
+ raise CommandError(e)
+
+ # User and silo exist.
+ if user is None:
+ raise CommandError(f"User {username} not found.")
+ if silo is None:
+ raise CommandError(f"Authentication silo {silo_name} not found.")
+
+ # Set assigned silo.
+ user.assigned_silo = silo.dn
+
+ try:
+ user.save(ldb)
+ except ModelError as e:
+ raise CommandError(f"Set assigned authentication silo failed: {e}")
+
+ # Display silo member status.
+ if user.dn in silo.members:
+ status = "granted"
+ else:
+ status = "revoked"
+
+ print(f"User {username} assigned to authentication silo {silo} ({status})",
+ file=self.outf)
+
+
+class cmd_user_auth_silo_remove(Command):
+ """Remove the assigned authentication silo on a user."""
+
+ synopsis = "%prog <username> [options]"
+
+ takes_args = ["username"]
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "hostopts": options.HostOptions,
+ }
+
+ def run(self, username, hostopts=None, sambaopts=None, credopts=None):
+
+ ldb = self.ldb_connect(hostopts, sambaopts, credopts)
+
+ try:
+ user = User.find(ldb, username)
+ except ModelError as e:
+ raise CommandError(e)
+
+ # User exists
+ if user is None:
+ raise CommandError(f"User {username} not found.")
+
+ # Get previous silo for display.
+ if user.assigned_silo:
+ try:
+ silo = AuthenticationSilo.get(ldb, dn=user.assigned_silo)
+ except ModelError as e:
+ raise CommandError(e)
+ else:
+ silo = None
+
+ # Unset assigned authentication silo
+ user.assigned_silo = None
+
+ try:
+ user.save(ldb)
+ except ModelError as e:
+ raise CommandError(f"Remove assigned authentication silo failed: {e}")
+
+ # Display silo member status.
+ if silo and user.dn in silo.members:
+ status = "granted"
+ else:
+ status = "revoked"
+
+ print(f"User {username} removed from authentication silo {silo} ({status})",
+ file=self.outf)
+
+
+class cmd_user_auth_silo_view(Command):
+ """View the current assigned authentication silo on a user."""
+
+ synopsis = "%prog <username> [options]"
+
+ takes_args = ["username"]
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "hostopts": options.HostOptions,
+ }
+
+ def run(self, username, hostopts=None, sambaopts=None, credopts=None):
+
+ ldb = self.ldb_connect(hostopts, sambaopts, credopts)
+
+ try:
+ user = User.find(ldb, username)
+
+ # Check user exists before fetching silo.
+ if user is None:
+ raise CommandError(f"User {username} not found.")
+
+ # Only fetch silo is one is assigned.
+ if user.assigned_silo:
+ silo = AuthenticationSilo.get(ldb, dn=user.assigned_silo)
+ else:
+ silo = None
+
+ except ModelError as e:
+ raise CommandError(e)
+
+ # Display silo member status.
+ if silo and user.dn in silo.members:
+ status = "granted"
+ else:
+ status = "revoked"
+
+ if silo:
+ print(f"User {username} assigned to authentication silo {silo} ({status})",
+ file=self.outf)
+ else:
+ print(f"User {username} has no assigned authentication silo.",
+ file=self.outf)
+
+
+class cmd_user_auth_silo(SuperCommand):
+ """Manage authentication silos on a user."""
+
+ subcommands = {
+ "assign": cmd_user_auth_silo_assign(),
+ "remove": cmd_user_auth_silo_remove(),
+ "view": cmd_user_auth_silo_view(),
+ }
diff --git a/python/samba/netcmd/user/delete.py b/python/samba/netcmd/user/delete.py
new file mode 100644
index 0000000..f8858b0
--- /dev/null
+++ b/python/samba/netcmd/user/delete.py
@@ -0,0 +1,87 @@
+# user management
+#
+# delete user
+#
+# Copyright Jelmer Vernooij 2010 <jelmer@samba.org>
+# Copyright Theresa Halloran 2011 <theresahalloran@gmail.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import samba.getopt as options
+from samba import ldb
+from samba.auth import system_session
+from samba.netcmd import Command, CommandError, Option
+from samba.samdb import SamDB
+
+
+class cmd_user_delete(Command):
+ """Delete a user.
+
+This command deletes a user account from the Active Directory domain. The username specified on the command is the sAMAccountName.
+
+Once the account is deleted, all permissions and memberships associated with that account are deleted. If a new user account is added with the same name as a previously deleted account name, the new user does not have the previous permissions. The new account user will be assigned a new security identifier (SID) and permissions and memberships will have to be added.
+
+The command may be run from the root userid or another authorized userid. The -H or --URL= option can be used to execute the command against a remote server.
+
+Example1:
+samba-tool user delete User1 -H ldap://samba.samdom.example.com --username=administrator --password=passw1rd
+
+Example1 shows how to delete a user in the domain against a remote LDAP server. The -H parameter is used to specify the remote target server. The --username= and --password= options are used to pass the username and password of a user that exists on the remote server and is authorized to issue the command on that server.
+
+Example2:
+sudo samba-tool user delete User2
+
+Example2 shows how to delete a user in the domain against the local server. sudo is used so a user may run the command as root.
+
+"""
+ synopsis = "%prog <username> [options]"
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H"),
+ ]
+
+ takes_args = ["username"]
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ def run(self, username, credopts=None, sambaopts=None, versionopts=None,
+ H=None):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp, fallback_machine=True)
+
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+
+ filter = ("(&(sAMAccountName=%s)(sAMAccountType=805306368))" %
+ ldb.binary_encode(username))
+
+ try:
+ res = samdb.search(base=samdb.domain_dn(),
+ scope=ldb.SCOPE_SUBTREE,
+ expression=filter,
+ attrs=["dn"])
+ user_dn = res[0].dn
+ except IndexError:
+ raise CommandError('Unable to find user "%s"' % (username))
+
+ try:
+ samdb.delete(user_dn)
+ except Exception as e:
+ raise CommandError('Failed to remove user "%s"' % username, e)
+ self.outf.write("Deleted user %s\n" % username)
diff --git a/python/samba/netcmd/user/disable.py b/python/samba/netcmd/user/disable.py
new file mode 100644
index 0000000..5042eea
--- /dev/null
+++ b/python/samba/netcmd/user/disable.py
@@ -0,0 +1,64 @@
+# user management
+#
+# disable user
+#
+# Copyright Jelmer Vernooij 2010 <jelmer@samba.org>
+# Copyright Theresa Halloran 2011 <theresahalloran@gmail.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import samba.getopt as options
+from samba import ldb
+from samba.auth import system_session
+from samba.netcmd import Command, CommandError, Option
+from samba.samdb import SamDB
+
+
+class cmd_user_disable(Command):
+ """Disable a user."""
+
+ synopsis = "%prog (<username>|--filter <filter>) [options]"
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H"),
+ Option("--filter", help="LDAP Filter to set password on", type=str),
+ ]
+
+ takes_args = ["username?"]
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ def run(self, username=None, sambaopts=None, credopts=None,
+ versionopts=None, filter=None, H=None):
+ if username is None and filter is None:
+ raise CommandError("Either the username or '--filter' must be specified!")
+
+ if filter is None:
+ filter = "(&(objectClass=user)(sAMAccountName=%s))" % (ldb.binary_encode(username))
+
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp, fallback_machine=True)
+
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+ try:
+ samdb.disable_account(filter)
+ except Exception as msg:
+ raise CommandError("Failed to disable user '%s': %s" % (username or filter, msg))
diff --git a/python/samba/netcmd/user/edit.py b/python/samba/netcmd/user/edit.py
new file mode 100644
index 0000000..4a850f4
--- /dev/null
+++ b/python/samba/netcmd/user/edit.py
@@ -0,0 +1,136 @@
+# user management
+#
+# user edit command
+#
+# Copyright Jelmer Vernooij 2010 <jelmer@samba.org>
+# Copyright Theresa Halloran 2011 <theresahalloran@gmail.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import os
+from subprocess import CalledProcessError, check_call
+
+import samba.getopt as options
+from samba import dsdb, ldb
+from samba.auth import system_session
+from samba.common import get_bytes
+from samba.netcmd import Command, CommandError, Option, common
+from samba.samdb import SamDB
+
+
+class cmd_user_edit(Command):
+ """Modify User AD object.
+
+This command will allow editing of a user account in the Active Directory
+domain. You will then be able to add or change attributes and their values.
+
+The username specified on the command is the sAMAccountName.
+
+The command may be run from the root userid or another authorized userid.
+
+The -H or --URL= option can be used to execute the command against a remote
+server.
+
+Example1:
+samba-tool user edit User1 -H ldap://samba.samdom.example.com \\
+ -U administrator --password=passw1rd
+
+Example1 shows how to edit a users attributes in the domain against a remote
+LDAP server.
+
+The -H parameter is used to specify the remote target server.
+
+Example2:
+samba-tool user edit User2
+
+Example2 shows how to edit a users attributes in the domain against a local
+LDAP server.
+
+Example3:
+samba-tool user edit User3 --editor=nano
+
+Example3 shows how to edit a users attributes in the domain against a local
+LDAP server using the 'nano' editor.
+
+"""
+ synopsis = "%prog <username> [options]"
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server",
+ type=str, metavar="URL", dest="H"),
+ Option("--editor", help="Editor to use instead of the system default,"
+ " or 'vi' if no system default is set.", type=str),
+ ]
+
+ takes_args = ["username"]
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ def run(self, username, credopts=None, sambaopts=None, versionopts=None,
+ H=None, editor=None):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp, fallback_machine=True)
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+
+ filter = ("(&(sAMAccountType=%d)(sAMAccountName=%s))" %
+ (dsdb.ATYPE_NORMAL_ACCOUNT, ldb.binary_encode(username)))
+
+ domaindn = samdb.domain_dn()
+
+ try:
+ res = samdb.search(base=domaindn,
+ expression=filter,
+ scope=ldb.SCOPE_SUBTREE)
+ user_dn = res[0].dn
+ except IndexError:
+ raise CommandError('Unable to find user "%s"' % (username))
+
+ import tempfile
+ for msg in res:
+ result_ldif = common.get_ldif_for_editor(samdb, msg)
+
+ if editor is None:
+ editor = os.environ.get('EDITOR')
+ if editor is None:
+ editor = 'vi'
+
+ with tempfile.NamedTemporaryFile(suffix=".tmp") as t_file:
+ t_file.write(get_bytes(result_ldif))
+ t_file.flush()
+ try:
+ check_call([editor, t_file.name])
+ except CalledProcessError as e:
+ raise CalledProcessError("ERROR: ", e)
+ with open(t_file.name) as edited_file:
+ edited_message = edited_file.read()
+
+ msgs_edited = samdb.parse_ldif(edited_message)
+ msg_edited = next(msgs_edited)[1]
+
+ res_msg_diff = samdb.msg_diff(msg, msg_edited)
+ if len(res_msg_diff) == 0:
+ self.outf.write("Nothing to do\n")
+ return
+
+ try:
+ samdb.modify(res_msg_diff)
+ except Exception as e:
+ raise CommandError("Failed to modify user '%s': " % username, e)
+
+ self.outf.write("Modified User '%s' successfully\n" % username)
diff --git a/python/samba/netcmd/user/enable.py b/python/samba/netcmd/user/enable.py
new file mode 100644
index 0000000..158ddbe
--- /dev/null
+++ b/python/samba/netcmd/user/enable.py
@@ -0,0 +1,94 @@
+# user management
+#
+# enable user
+#
+# Copyright Jelmer Vernooij 2010 <jelmer@samba.org>
+# Copyright Theresa Halloran 2011 <theresahalloran@gmail.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import samba.getopt as options
+from samba import ldb
+from samba.auth import system_session
+from samba.netcmd import Command, CommandError, Option
+from samba.samdb import SamDB
+
+
+class cmd_user_enable(Command):
+ """Enable a user.
+
+This command enables a user account for logon to an Active Directory domain. The username specified on the command is the sAMAccountName. The username may also be specified using the --filter option.
+
+There are many reasons why an account may become disabled. These include:
+- If a user exceeds the account policy for logon attempts
+- If an administrator disables the account
+- If the account expires
+
+The samba-tool user enable command allows an administrator to enable an account which has become disabled.
+
+Additionally, the enable function allows an administrator to have a set of created user accounts defined and setup with default permissions that can be easily enabled for use.
+
+The command may be run from the root userid or another authorized userid. The -H or --URL= option can be used to execute the command against a remote server.
+
+Example1:
+samba-tool user enable Testuser1 --URL=ldap://samba.samdom.example.com --username=administrator --password=passw1rd
+
+Example1 shows how to enable a user in the domain against a remote LDAP server. The --URL parameter is used to specify the remote target server. The --username= and --password= options are used to pass the username and password of a user that exists on the remote server and is authorized to update that server.
+
+Example2:
+su samba-tool user enable Testuser2
+
+Example2 shows how to enable user Testuser2 for use in the domain on the local server. sudo is used so a user may run the command as root.
+
+Example3:
+samba-tool user enable --filter=samaccountname=Testuser3
+
+Example3 shows how to enable a user in the domain against a local LDAP server. It uses the --filter=samaccountname to specify the username.
+
+"""
+ synopsis = "%prog (<username>|--filter <filter>) [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H"),
+ Option("--filter", help="LDAP Filter to set password on", type=str),
+ ]
+
+ takes_args = ["username?"]
+
+ def run(self, username=None, sambaopts=None, credopts=None,
+ versionopts=None, filter=None, H=None):
+ if username is None and filter is None:
+ raise CommandError("Either the username or '--filter' must be specified!")
+
+ if filter is None:
+ filter = "(&(objectClass=user)(sAMAccountName=%s))" % (ldb.binary_encode(username))
+
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp, fallback_machine=True)
+
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+ try:
+ samdb.enable_account(filter)
+ except Exception as msg:
+ raise CommandError("Failed to enable user '%s': %s" % (username or filter, msg))
+ self.outf.write("Enabled user '%s'\n" % (username or filter))
diff --git a/python/samba/netcmd/user/getgroups.py b/python/samba/netcmd/user/getgroups.py
new file mode 100644
index 0000000..68d8eb2
--- /dev/null
+++ b/python/samba/netcmd/user/getgroups.py
@@ -0,0 +1,120 @@
+# user management
+#
+# user getgroups command
+#
+# Copyright Jelmer Vernooij 2010 <jelmer@samba.org>
+# Copyright Theresa Halloran 2011 <theresahalloran@gmail.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import samba.getopt as options
+from samba import ldb
+from samba.auth import system_session
+from samba.dcerpc import security
+from samba.ndr import ndr_unpack
+from samba.netcmd import Command, CommandError, Option
+from samba.samdb import SamDB
+
+
+class cmd_user_getgroups(Command):
+ """Get the direct group memberships of a user account.
+
+The username specified on the command is the sAMAccountName."""
+ synopsis = "%prog <username> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server",
+ type=str, metavar="URL", dest="H"),
+ Option("--full-dn", dest="full_dn",
+ default=False,
+ action='store_true',
+ help="Display DN instead of the sAMAccountName."),
+ ]
+
+ takes_args = ["username"]
+
+ def run(self,
+ username,
+ credopts=None,
+ sambaopts=None,
+ versionopts=None,
+ H=None,
+ full_dn=False):
+
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+
+ filter = ("(&(sAMAccountName=%s)(objectClass=user))" %
+ ldb.binary_encode(username))
+ try:
+ res = samdb.search(base=samdb.domain_dn(),
+ expression=filter,
+ scope=ldb.SCOPE_SUBTREE,
+ attrs=["objectSid",
+ "memberOf",
+ "primaryGroupID"])
+ user_sid_binary = res[0].get('objectSid', idx=0)
+ user_sid = ndr_unpack(security.dom_sid, user_sid_binary)
+ (user_dom_sid, user_rid) = user_sid.split()
+ user_sid_dn = "<SID=%s>" % user_sid
+ user_pgid = int(res[0].get('primaryGroupID', idx=0))
+ user_groups = res[0].get('memberOf')
+ if user_groups is None:
+ user_groups = []
+ except IndexError:
+ raise CommandError("Unable to find user '%s'" % (username))
+
+ primarygroup_sid_dn = "<SID=%s-%u>" % (user_dom_sid, user_pgid)
+
+ filter = "(objectClass=group)"
+ try:
+ res = samdb.search(base=primarygroup_sid_dn,
+ expression=filter,
+ scope=ldb.SCOPE_BASE,
+ attrs=['sAMAccountName'])
+ primary_group_dn = str(res[0].dn)
+ primary_group_name = res[0].get('sAMAccountName')
+ except IndexError:
+ raise CommandError("Unable to find primary group '%s'" % (primarygroup_sid_dn))
+
+ if full_dn:
+ self.outf.write("%s\n" % primary_group_dn)
+ for group_dn in user_groups:
+ self.outf.write("%s\n" % group_dn)
+ return
+
+ group_names = []
+ for gdn in user_groups:
+ try:
+ res = samdb.search(base=gdn,
+ expression=filter,
+ scope=ldb.SCOPE_BASE,
+ attrs=['sAMAccountName'])
+ group_names.extend(res[0].get('sAMAccountName'))
+ except IndexError:
+ raise CommandError("Unable to find group '%s'" % (gdn))
+
+ self.outf.write("%s\n" % primary_group_name)
+ for group_name in group_names:
+ self.outf.write("%s\n" % group_name)
diff --git a/python/samba/netcmd/user/list.py b/python/samba/netcmd/user/list.py
new file mode 100644
index 0000000..10605ca
--- /dev/null
+++ b/python/samba/netcmd/user/list.py
@@ -0,0 +1,108 @@
+# user management
+#
+# list users
+#
+# Copyright Jelmer Vernooij 2010 <jelmer@samba.org>
+# Copyright Theresa Halloran 2011 <theresahalloran@gmail.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import samba.getopt as options
+from samba import dsdb, ldb
+from samba.auth import system_session
+from samba.netcmd import Command, Option
+from samba.samdb import SamDB
+
+
+class cmd_user_list(Command):
+ """List all users."""
+
+ synopsis = "%prog [options]"
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H"),
+ Option("--hide-expired",
+ help="Do not list expired user accounts",
+ default=False,
+ action='store_true'),
+ Option("--hide-disabled",
+ default=False,
+ action='store_true',
+ help="Do not list disabled user accounts"),
+ Option("-b", "--base-dn",
+ help="Specify base DN to use",
+ type=str),
+ Option("--full-dn", dest="full_dn",
+ default=False,
+ action='store_true',
+ help="Display DN instead of the sAMAccountName.")
+ ]
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ def run(self,
+ sambaopts=None,
+ credopts=None,
+ versionopts=None,
+ H=None,
+ hide_expired=False,
+ hide_disabled=False,
+ base_dn=None,
+ full_dn=False):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp, fallback_machine=True)
+
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+
+ search_dn = samdb.domain_dn()
+ if base_dn:
+ search_dn = samdb.normalize_dn_in_domain(base_dn)
+
+ filter_expires = ""
+ if hide_expired is True:
+ current_nttime = samdb.get_nttime()
+ filter_expires = "(|(accountExpires=0)(accountExpires>=%u))" % (
+ current_nttime)
+
+ filter_disabled = ""
+ if hide_disabled is True:
+ filter_disabled = "(!(userAccountControl:%s:=%u))" % (
+ ldb.OID_COMPARATOR_AND, dsdb.UF_ACCOUNTDISABLE)
+
+ filter = "(&(objectClass=user)(userAccountControl:%s:=%u)%s%s)" % (
+ ldb.OID_COMPARATOR_AND,
+ dsdb.UF_NORMAL_ACCOUNT,
+ filter_disabled,
+ filter_expires)
+
+ res = samdb.search(search_dn,
+ scope=ldb.SCOPE_SUBTREE,
+ expression=filter,
+ attrs=["samaccountname"])
+ if (len(res) == 0):
+ return
+
+ for msg in res:
+ if full_dn:
+ self.outf.write("%s\n" % msg.get("dn"))
+ continue
+
+ self.outf.write("%s\n" % msg.get("samaccountname", idx=0))
diff --git a/python/samba/netcmd/user/move.py b/python/samba/netcmd/user/move.py
new file mode 100644
index 0000000..bcb0f9e
--- /dev/null
+++ b/python/samba/netcmd/user/move.py
@@ -0,0 +1,106 @@
+# user management
+#
+# user move command
+#
+# Copyright Jelmer Vernooij 2010 <jelmer@samba.org>
+# Copyright Theresa Halloran 2011 <theresahalloran@gmail.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import samba.getopt as options
+from samba import dsdb, ldb
+from samba.auth import system_session
+from samba.netcmd import Command, CommandError, Option
+from samba.samdb import SamDB
+
+
+class cmd_user_move(Command):
+ """Move a user to an organizational unit/container.
+
+ This command moves a user account into the specified organizational unit
+ or container.
+ The username specified on the command is the sAMAccountName.
+ The name of the organizational unit or container can be specified as a
+ full DN or without the domainDN component.
+
+ The command may be run from the root userid or another authorized userid.
+
+ The -H or --URL= option can be used to execute the command against a remote
+ server.
+
+ Example1:
+ samba-tool user move User1 'OU=OrgUnit,DC=samdom,DC=example,DC=com' \\
+ -H ldap://samba.samdom.example.com -U administrator
+
+ Example1 shows how to move a user User1 into the 'OrgUnit' organizational
+ unit on a remote LDAP server.
+
+ The -H parameter is used to specify the remote target server.
+
+ Example2:
+ samba-tool user move User1 CN=Users
+
+ Example2 shows how to move a user User1 back into the CN=Users container
+ on the local server.
+ """
+
+ synopsis = "%prog <username> <new_parent_dn> [options]"
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server",
+ type=str, metavar="URL", dest="H"),
+ ]
+
+ takes_args = ["username", "new_parent_dn"]
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ def run(self, username, new_parent_dn, credopts=None, sambaopts=None,
+ versionopts=None, H=None):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp, fallback_machine=True)
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+ domain_dn = ldb.Dn(samdb, samdb.domain_dn())
+
+ filter = ("(&(sAMAccountType=%d)(sAMAccountName=%s))" %
+ (dsdb.ATYPE_NORMAL_ACCOUNT, ldb.binary_encode(username)))
+ try:
+ res = samdb.search(base=domain_dn,
+ expression=filter,
+ scope=ldb.SCOPE_SUBTREE)
+ user_dn = res[0].dn
+ except IndexError:
+ raise CommandError('Unable to find user "%s"' % (username))
+
+ try:
+ full_new_parent_dn = samdb.normalize_dn_in_domain(new_parent_dn)
+ except Exception as e:
+ raise CommandError('Invalid new_parent_dn "%s": %s' %
+ (new_parent_dn, e))
+
+ full_new_user_dn = ldb.Dn(samdb, str(user_dn))
+ full_new_user_dn.remove_base_components(len(user_dn) - 1)
+ full_new_user_dn.add_base(full_new_parent_dn)
+
+ try:
+ samdb.rename(user_dn, full_new_user_dn)
+ except Exception as e:
+ raise CommandError('Failed to move user "%s"' % username, e)
+ self.outf.write('Moved user "%s" into "%s"\n' %
+ (username, full_new_parent_dn))
diff --git a/python/samba/netcmd/user/password.py b/python/samba/netcmd/user/password.py
new file mode 100644
index 0000000..3b26b62
--- /dev/null
+++ b/python/samba/netcmd/user/password.py
@@ -0,0 +1,73 @@
+# user management
+#
+# user password
+#
+# Copyright Jelmer Vernooij 2010 <jelmer@samba.org>
+# Copyright Theresa Halloran 2011 <theresahalloran@gmail.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from getpass import getpass
+
+import samba.getopt as options
+from samba.net import Net
+from samba.netcmd import Command, CommandError, Option
+
+
+class cmd_user_password(Command):
+ """Change password for a user account (the one provided in authentication).
+"""
+
+ synopsis = "%prog [options]"
+
+ takes_options = [
+ Option("--newpassword", help="New password", type=str),
+ ]
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ def run(self, credopts=None, sambaopts=None, versionopts=None,
+ newpassword=None):
+
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+
+ # get old password now, to get the password prompts in the right order
+ old_password = creds.get_password()
+
+ net = Net(creds, lp, server=credopts.ipaddress)
+
+ password = newpassword
+ while True:
+ if password is not None and password != '':
+ break
+ password = getpass("New Password: ")
+ passwordverify = getpass("Retype Password: ")
+ if not password == passwordverify:
+ password = None
+ self.outf.write("Sorry, passwords do not match.\n")
+
+ try:
+ if not isinstance(password, str):
+ password = password.decode('utf8')
+ net.change_password(password)
+ except Exception as msg:
+ # FIXME: catch more specific exception
+ raise CommandError("Failed to change password : %s" % msg)
+ self.outf.write("Changed password OK\n")
diff --git a/python/samba/netcmd/user/readpasswords/__init__.py b/python/samba/netcmd/user/readpasswords/__init__.py
new file mode 100644
index 0000000..75ba313
--- /dev/null
+++ b/python/samba/netcmd/user/readpasswords/__init__.py
@@ -0,0 +1,25 @@
+# user management
+#
+# user readpasswords commands
+#
+# Copyright Jelmer Vernooij 2010 <jelmer@samba.org>
+# Copyright Theresa Halloran 2011 <theresahalloran@gmail.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from .getpassword import cmd_user_getpassword
+from .show import cmd_user_show
+from .syncpasswords import cmd_user_syncpasswords
+from .get_kerberos_ticket import cmd_user_get_kerberos_ticket
diff --git a/python/samba/netcmd/user/readpasswords/common.py b/python/samba/netcmd/user/readpasswords/common.py
new file mode 100644
index 0000000..6d44881
--- /dev/null
+++ b/python/samba/netcmd/user/readpasswords/common.py
@@ -0,0 +1,907 @@
+# user management
+#
+# common code
+#
+# Copyright Jelmer Vernooij 2010 <jelmer@samba.org>
+# Copyright Theresa Halloran 2011 <theresahalloran@gmail.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import base64
+import builtins
+import binascii
+import errno
+import io
+import os
+
+import ldb
+from samba import credentials, nttime2float
+from samba.auth import system_session
+from samba.common import get_bytes, get_string
+from samba.dcerpc import drsblobs, security, gmsa
+from samba.ndr import ndr_unpack
+from samba.netcmd import Command, CommandError
+from samba.samdb import SamDB
+
+
+# python[3]-gpgme is abandoned since ubuntu 1804 and debian 9
+# have to use python[3]-gpg instead
+# The API is different, need to adapt.
+
+def _gpgme_decrypt(encrypted_bytes):
+ """
+ Use python[3]-gpgme to decrypt GPG.
+ """
+ ctx = gpgme.Context()
+ ctx.armor = True # use ASCII-armored
+ out = io.BytesIO()
+ ctx.decrypt(io.BytesIO(encrypted_bytes), out)
+ return out.getvalue()
+
+
+def _gpg_decrypt(encrypted_bytes):
+ """
+ Use python[3]-gpg to decrypt GPG.
+ """
+ ciphertext = gpg.Data(string=encrypted_bytes)
+ ctx = gpg.Context(armor=True)
+ # plaintext, result, verify_result
+ plaintext, _, _ = ctx.decrypt(ciphertext)
+ return plaintext
+
+
+gpg_decrypt = None
+
+if not gpg_decrypt:
+ try:
+ import gpgme
+ gpg_decrypt = _gpgme_decrypt
+ except ImportError:
+ pass
+
+if not gpg_decrypt:
+ try:
+ import gpg
+ gpg_decrypt = _gpg_decrypt
+ except ImportError:
+ pass
+
+if gpg_decrypt:
+ decrypt_samba_gpg_help = ("Decrypt the SambaGPG password as "
+ "cleartext source")
+else:
+ decrypt_samba_gpg_help = ("Decrypt the SambaGPG password not supported, "
+ "python[3]-gpgme or python[3]-gpg required")
+
+
+disabled_virtual_attributes = {
+}
+
+virtual_attributes = {
+ "virtualClearTextUTF8": {
+ "flags": ldb.ATTR_FLAG_FORCE_BASE64_LDIF,
+ },
+ "virtualClearTextUTF16": {
+ "flags": ldb.ATTR_FLAG_FORCE_BASE64_LDIF,
+ },
+ "virtualSambaGPG": {
+ "flags": ldb.ATTR_FLAG_FORCE_BASE64_LDIF,
+ },
+ "unicodePwd": {
+ "flags": ldb.ATTR_FLAG_FORCE_BASE64_LDIF,
+ },
+}
+
+
+def get_crypt_value(alg, utf8pw, rounds=0):
+ algs = {
+ "5": {"length": 43},
+ "6": {"length": 86},
+ }
+ assert alg in algs
+ salt = os.urandom(16)
+ # The salt needs to be in [A-Za-z0-9./]
+ # base64 is close enough and as we had 16
+ # random bytes but only need 16 characters
+ # we can ignore the possible == at the end
+ # of the base64 string
+ # we just need to replace '+' by '.'
+ b64salt = base64.b64encode(salt)[0:16].replace(b'+', b'.').decode('utf8')
+ crypt_salt = ""
+ if rounds != 0:
+ crypt_salt = "$%s$rounds=%s$%s$" % (alg, rounds, b64salt)
+ else:
+ crypt_salt = "$%s$%s$" % (alg, b64salt)
+
+ crypt_value = crypt.crypt(utf8pw, crypt_salt)
+ if crypt_value is None:
+ raise NotImplementedError("crypt.crypt(%s) returned None" % (crypt_salt))
+ expected_len = len(crypt_salt) + algs[alg]["length"]
+ if len(crypt_value) != expected_len:
+ raise NotImplementedError("crypt.crypt(%s) returned a value with length %d, expected length is %d" % (
+ crypt_salt, len(crypt_value), expected_len))
+ return crypt_value
+
+
+try:
+ import hashlib
+ hashlib.sha1()
+ virtual_attributes["virtualSSHA"] = {
+ }
+except ImportError as e:
+ reason = "hashlib.sha1()"
+ reason += " required"
+ disabled_virtual_attributes["virtualSSHA"] = {
+ "reason": reason,
+ }
+
+for (alg, attr) in [("5", "virtualCryptSHA256"), ("6", "virtualCryptSHA512")]:
+ try:
+ import crypt
+ get_crypt_value(alg, "")
+ virtual_attributes[attr] = {
+ }
+ except ImportError as e:
+ reason = "crypt"
+ reason += " required"
+ disabled_virtual_attributes[attr] = {
+ "reason": reason,
+ }
+ except NotImplementedError as e:
+ reason = "modern '$%s$' salt in crypt(3) required" % (alg)
+ disabled_virtual_attributes[attr] = {
+ "reason": reason,
+ }
+
+# Add the wDigest virtual attributes, virtualWDigest01 to virtualWDigest29
+for x in range(1, 30):
+ virtual_attributes["virtualWDigest%02d" % x] = {}
+
+# Add Kerberos virtual attributes
+virtual_attributes["virtualKerberosSalt"] = {}
+
+virtual_attributes_help = "The attributes to display (comma separated). "
+virtual_attributes_help += "Possible supported virtual attributes: %s" % ", ".join(sorted(virtual_attributes.keys()))
+if len(disabled_virtual_attributes) != 0:
+ virtual_attributes_help += "Unsupported virtual attributes: %s" % ", ".join(sorted(disabled_virtual_attributes.keys()))
+
+
+class GetPasswordCommand(Command):
+
+ def __init__(self):
+ super().__init__()
+ self.lp = None
+
+ def inject_virtual_attributes(self, samdb):
+ # We use sort here in order to have a predictable processing order
+ # this might not be strictly needed, but also doesn't hurt here
+ for a in sorted(virtual_attributes.keys()):
+ flags = ldb.ATTR_FLAG_HIDDEN | virtual_attributes[a].get("flags", 0)
+ samdb.schema_attribute_add(a, flags, ldb.SYNTAX_OCTET_STRING)
+
+ def connect_for_passwords(self, url,
+ creds=None,
+ require_ldapi=True,
+ verbose=False):
+
+ # using anonymous here, results in no authentication
+ # which means we can get system privileges via
+ # the privileged ldapi socket
+ anon_creds = credentials.Credentials()
+ anon_creds.set_anonymous()
+
+ if url is None and not require_ldapi:
+ pass
+ elif url.lower().startswith("ldapi://"):
+ creds = anon_creds
+ pass
+ elif require_ldapi:
+ raise CommandError("--url requires an ldapi:// url for this command")
+
+ if verbose:
+ self.outf.write("Connecting to '%s'\n" % url)
+
+ samdb = SamDB(url=url, session_info=system_session(),
+ credentials=creds, lp=self.lp)
+
+ if require_ldapi or url is None:
+ try:
+ #
+ # Make sure we're connected as SYSTEM
+ #
+ res = samdb.search(base='', scope=ldb.SCOPE_BASE, attrs=["tokenGroups"])
+ assert len(res) == 1
+ sids = res[0].get("tokenGroups")
+ assert len(sids) == 1
+ sid = ndr_unpack(security.dom_sid, sids[0])
+ assert str(sid) == security.SID_NT_SYSTEM
+ except Exception as msg:
+ raise CommandError("You need to specify an URL that gives privileges as SID_NT_SYSTEM(%s)" %
+ (security.SID_NT_SYSTEM))
+
+ self.inject_virtual_attributes(samdb)
+
+ return samdb
+
+ def get_account_attributes(self, samdb, username, basedn, filter, scope,
+ attrs, decrypt, support_pw_attrs=True):
+
+ def get_option(opts, name):
+ if not opts:
+ return None
+ for o in opts:
+ if o.lower().startswith("%s=" % name.lower()):
+ (key, _, val) = o.partition('=')
+ return val
+ return None
+
+ def get_virtual_attr_definition(attr):
+ for van in sorted(virtual_attributes.keys()):
+ if van.lower() != attr.lower():
+ continue
+ return virtual_attributes[van]
+ return None
+
+ formats = [
+ "GeneralizedTime",
+ "UnixTime",
+ "TimeSpec",
+ ]
+
+ def get_virtual_format_definition(opts):
+ formatname = get_option(opts, "format")
+ if formatname is None:
+ return None
+ for fm in formats:
+ if fm.lower() != formatname.lower():
+ continue
+ return fm
+ return None
+
+ def parse_raw_attr(raw_attr, is_hidden=False):
+ (attr, _, fullopts) = raw_attr.partition(';')
+ if fullopts:
+ opts = fullopts.split(';')
+ else:
+ opts = []
+ a = {}
+ a["raw_attr"] = raw_attr
+ a["attr"] = attr
+ a["opts"] = opts
+ a["vattr"] = get_virtual_attr_definition(attr)
+ a["vformat"] = get_virtual_format_definition(opts)
+ a["is_hidden"] = is_hidden
+ return a
+
+ raw_attrs = attrs[:]
+ has_wildcard_attr = "*" in raw_attrs
+ has_virtual_attrs = False
+ requested_attrs = []
+ implicit_attrs = []
+
+ for raw_attr in raw_attrs:
+ a = parse_raw_attr(raw_attr)
+ requested_attrs.append(a)
+
+ search_attrs = []
+ has_virtual_attrs = False
+ for a in requested_attrs:
+ if a["vattr"] is not None:
+ has_virtual_attrs = True
+ continue
+ if a["vformat"] is not None:
+ # also add it as implicit attr,
+ # where we just do
+ # search_attrs.append(a["attr"])
+ # later on
+ implicit_attrs.append(a)
+ continue
+ if a["raw_attr"] in search_attrs:
+ continue
+ search_attrs.append(a["raw_attr"])
+
+ if not has_wildcard_attr:
+ required_attrs = [
+ "sAMAccountName",
+ "userPrincipalName"
+ ]
+ for required_attr in required_attrs:
+ a = parse_raw_attr(required_attr)
+ implicit_attrs.append(a)
+
+ if has_virtual_attrs:
+ if support_pw_attrs:
+ required_attrs = [
+ "supplementalCredentials",
+ "unicodePwd",
+ "msDS-ManagedPassword",
+ ]
+ for required_attr in required_attrs:
+ a = parse_raw_attr(required_attr, is_hidden=True)
+ implicit_attrs.append(a)
+
+ for a in implicit_attrs:
+ if a["attr"] in search_attrs:
+ continue
+ search_attrs.append(a["attr"])
+
+ if scope == ldb.SCOPE_BASE:
+ search_controls = ["show_deleted:1", "show_recycled:1"]
+ else:
+ search_controls = []
+ try:
+ res = samdb.search(base=basedn, expression=filter,
+ scope=scope, attrs=search_attrs,
+ controls=search_controls)
+ if len(res) == 0:
+ raise Exception('Unable to find user "%s"' % (username or filter))
+ if len(res) > 1:
+ raise Exception('Matched %u multiple users with filter "%s"' % (len(res), filter))
+ except Exception as msg:
+ # FIXME: catch more specific exception
+ raise CommandError("Failed to get password for user '%s': %s" % (username or filter, msg))
+ obj = res[0]
+
+ calculated = {}
+
+ sc = None
+ unicodePwd = None
+ if "supplementalCredentials" in obj:
+ sc_blob = obj["supplementalCredentials"][0]
+ sc = ndr_unpack(drsblobs.supplementalCredentialsBlob, sc_blob)
+ if "unicodePwd" in obj:
+ unicodePwd = obj["unicodePwd"][0]
+ if "msDS-ManagedPassword" in obj:
+ # unpack a GMSA managed password as if we could read the
+ # hidden password attributes.
+ managed_password = obj["msDS-ManagedPassword"][0]
+ unpacked_managed_password = ndr_unpack(gmsa.MANAGEDPASSWORD_BLOB,
+ managed_password)
+ calculated["Primary:CLEARTEXT"] = \
+ unpacked_managed_password.passwords.current
+ calculated["OLDCLEARTEXT"] = \
+ unpacked_managed_password.passwords.previous
+
+ account_name = str(obj["sAMAccountName"][0])
+ if "userPrincipalName" in obj:
+ account_upn = str(obj["userPrincipalName"][0])
+ else:
+ realm = samdb.domain_dns_name()
+ account_upn = "%s@%s" % (account_name, realm.lower())
+
+ def get_package(name, min_idx=0):
+ if name in calculated:
+ return calculated[name]
+ if sc is None:
+ return None
+ if min_idx < 0:
+ min_idx = len(sc.sub.packages) + min_idx
+ idx = 0
+ for p in sc.sub.packages:
+ idx += 1
+ if idx <= min_idx:
+ continue
+ if name != p.name:
+ continue
+
+ return binascii.a2b_hex(p.data)
+ return None
+
+ def get_cleartext(attr_opts):
+ param = get_option(attr_opts, "previous")
+ if param:
+ if param != "1":
+ raise CommandError(
+ f"Invalid attribute parameter ;previous={param}, "
+ "only supported value is previous=1")
+ return calculated.get("OLDCLEARTEXT")
+ else:
+ return get_package("Primary:CLEARTEXT")
+
+ def get_kerberos_ctr():
+ primary_krb5 = get_package("Primary:Kerberos-Newer-Keys")
+ if primary_krb5 is None:
+ primary_krb5 = get_package("Primary:Kerberos")
+ if primary_krb5 is None:
+ return (0, None)
+ krb5_blob = ndr_unpack(drsblobs.package_PrimaryKerberosBlob,
+ primary_krb5)
+ return (krb5_blob.version, krb5_blob.ctr)
+
+ aes256_key = None
+ kerberos_salt = None
+
+ (krb5_v, krb5_ctr) = get_kerberos_ctr()
+ if krb5_v in [3, 4]:
+ kerberos_salt = krb5_ctr.salt.string
+
+ if krb5_ctr.keys:
+ def is_aes256(k):
+ return k.keytype == 18
+ aes256_key = next(builtins.filter(is_aes256, krb5_ctr.keys),
+ None)
+
+ if decrypt:
+ #
+ # Samba adds 'Primary:SambaGPG' at the end.
+ # When Windows sets the password it keeps
+ # 'Primary:SambaGPG' and rotates it to
+ # the beginning. So we can only use the value,
+ # if it is the last one.
+ #
+ # In order to get more protection we verify
+ # the nthash of the decrypted utf16 password
+ # against the stored nthash in unicodePwd if
+ # available, otherwise against the first 16
+ # bytes of the AES256 key.
+ #
+ sgv = get_package("Primary:SambaGPG", min_idx=-1)
+ if sgv is not None:
+ try:
+ cv = gpg_decrypt(sgv)
+ #
+ # We only use the password if it matches
+ # the current nthash stored in the unicodePwd
+ # attribute, or the current AES256 key.
+ #
+ tmp = credentials.Credentials()
+ tmp.set_anonymous()
+ tmp.set_utf16_password(cv)
+
+ decrypted = None
+ current_hash = None
+
+ if unicodePwd is not None:
+ decrypted = tmp.get_nt_hash()
+ current_hash = unicodePwd
+ elif aes256_key is not None and kerberos_salt is not None:
+ decrypted = tmp.get_aes256_key(kerberos_salt)
+ current_hash = aes256_key.value
+
+ if current_hash is not None and current_hash == decrypted:
+ calculated["Primary:CLEARTEXT"] = cv
+
+ except Exception as e:
+ self.outf.write(
+ "WARNING: '%s': SambaGPG can't be decrypted "
+ "into CLEARTEXT: %s\n" % (
+ username or account_name, e))
+
+ def get_utf8(a, b, username):
+ creds_for_charcnv = credentials.Credentials()
+ creds_for_charcnv.set_anonymous()
+ creds_for_charcnv.set_utf16_password(get_bytes(b))
+
+ # This can't fail due to character conversion issues as it
+ # includes a built-in fallback (UTF16_MUNGED) matching
+ # exactly what we need.
+ return creds_for_charcnv.get_password().encode()
+
+ # Extract the WDigest hash for the value specified by i.
+ # Builds an htdigest compatible value
+ DIGEST = "Digest"
+
+ def get_wDigest(i, primary_wdigest, account_name, account_upn,
+ domain, dns_domain):
+ if i == 1:
+ user = account_name
+ realm = domain
+ elif i == 2:
+ user = account_name.lower()
+ realm = domain.lower()
+ elif i == 3:
+ user = account_name.upper()
+ realm = domain.upper()
+ elif i == 4:
+ user = account_name
+ realm = domain.upper()
+ elif i == 5:
+ user = account_name
+ realm = domain.lower()
+ elif i == 6:
+ user = account_name.upper()
+ realm = domain.lower()
+ elif i == 7:
+ user = account_name.lower()
+ realm = domain.upper()
+ elif i == 8:
+ user = account_name
+ realm = dns_domain.lower()
+ elif i == 9:
+ user = account_name.lower()
+ realm = dns_domain.lower()
+ elif i == 10:
+ user = account_name.upper()
+ realm = dns_domain.upper()
+ elif i == 11:
+ user = account_name
+ realm = dns_domain.upper()
+ elif i == 12:
+ user = account_name
+ realm = dns_domain.lower()
+ elif i == 13:
+ user = account_name.upper()
+ realm = dns_domain.lower()
+ elif i == 14:
+ user = account_name.lower()
+ realm = dns_domain.upper()
+ elif i == 15:
+ user = account_upn
+ realm = ""
+ elif i == 16:
+ user = account_upn.lower()
+ realm = ""
+ elif i == 17:
+ user = account_upn.upper()
+ realm = ""
+ elif i == 18:
+ user = "%s\\%s" % (domain, account_name)
+ realm = ""
+ elif i == 19:
+ user = "%s\\%s" % (domain.lower(), account_name.lower())
+ realm = ""
+ elif i == 20:
+ user = "%s\\%s" % (domain.upper(), account_name.upper())
+ realm = ""
+ elif i == 21:
+ user = account_name
+ realm = DIGEST
+ elif i == 22:
+ user = account_name.lower()
+ realm = DIGEST
+ elif i == 23:
+ user = account_name.upper()
+ realm = DIGEST
+ elif i == 24:
+ user = account_upn
+ realm = DIGEST
+ elif i == 25:
+ user = account_upn.lower()
+ realm = DIGEST
+ elif i == 26:
+ user = account_upn.upper()
+ realm = DIGEST
+ elif i == 27:
+ user = "%s\\%s" % (domain, account_name)
+ realm = DIGEST
+ elif i == 28:
+ # Differs from spec, see tests
+ user = "%s\\%s" % (domain.lower(), account_name.lower())
+ realm = DIGEST
+ elif i == 29:
+ # Differs from spec, see tests
+ user = "%s\\%s" % (domain.upper(), account_name.upper())
+ realm = DIGEST
+ else:
+ user = ""
+
+ digests = ndr_unpack(drsblobs.package_PrimaryWDigestBlob,
+ primary_wdigest)
+ try:
+ digest = binascii.hexlify(bytearray(digests.hashes[i - 1].hash))
+ return "%s:%s:%s" % (user, realm, get_string(digest))
+ except IndexError:
+ return None
+
+ # get the value for a virtualCrypt attribute.
+ # look for an exact match on algorithm and rounds in supplemental creds
+ # if not found calculate using Primary:CLEARTEXT
+ # if no Primary:CLEARTEXT return the first supplementalCredential
+ # that matches the algorithm.
+ def get_virtual_crypt_value(a, algorithm, rounds, username, account_name):
+ sv = None
+ fb = None
+ b = get_package("Primary:userPassword")
+ if b is not None:
+ (sv, fb) = get_userPassword_hash(b, algorithm, rounds)
+ if sv is None:
+ # No exact match on algorithm and number of rounds
+ # try and calculate one from the Primary:CLEARTEXT
+ b = get_cleartext(attr_opts)
+ if b is not None:
+ u8 = get_utf8(a, b, username or account_name)
+ if u8 is not None:
+ # in py2 using get_bytes should ensure u8 is unmodified
+ # in py3 it will be decoded
+ sv = get_crypt_value(str(algorithm), get_string(u8), rounds)
+ if sv is None:
+ # Unable to calculate a hash with the specified
+ # number of rounds, fall back to the first hash using
+ # the specified algorithm
+ sv = fb
+ if sv is None:
+ return None
+ return "{CRYPT}" + sv
+
+ def get_userPassword_hash(blob, algorithm, rounds):
+ up = ndr_unpack(drsblobs.package_PrimaryUserPasswordBlob, blob)
+ SCHEME = "{CRYPT}"
+
+ # Check that the NT hash or AES256 key have not been changed
+ # without updating the user password hashes. This indicates that
+ # password has been changed without updating the supplemental
+ # credentials.
+ if unicodePwd is not None:
+ current_hash = unicodePwd
+ elif aes256_key is not None:
+ current_hash = aes256_key.value[:16]
+ else:
+ return None, None
+
+ if current_hash != bytearray(up.current_nt_hash.hash):
+ return None, None
+
+ scheme_prefix = "$%d$" % algorithm
+ prefix = scheme_prefix
+ if rounds > 0:
+ prefix = "$%d$rounds=%d" % (algorithm, rounds)
+ scheme_match = None
+
+ for h in up.hashes:
+ # in PY2 this should just do nothing and in PY3 if bytes
+ # it will decode them
+ h_value = get_string(h.value)
+ if (scheme_match is None and
+ h.scheme == SCHEME and
+ h_value.startswith(scheme_prefix)):
+ scheme_match = h_value
+ if h.scheme == SCHEME and h_value.startswith(prefix):
+ return (h_value, scheme_match)
+
+ # No match on the number of rounds, return the value of the
+ # first matching scheme
+ return (None, scheme_match)
+
+ # Extract the rounds value from the options of a virtualCrypt attribute
+ # i.e. options = "rounds=20;other=ignored;" will return 20
+ # if the rounds option is not found or the value is not a number, 0 is returned
+ # which indicates that the default number of rounds should be used.
+ def get_rounds(opts):
+ val = get_option(opts, "rounds")
+ if val is None:
+ return 0
+ try:
+ return int(val)
+ except ValueError:
+ return 0
+
+ def get_unicode_pwd_hash(pwd):
+ # We can't read unicodePwd directly, but we can regenerate
+ # it from msDS-ManagedPassword
+ tmp = credentials.Credentials()
+ tmp.set_anonymous()
+ tmp.set_utf16_password(pwd)
+ return tmp.get_nt_hash()
+
+ # We use sort here in order to have a predictable processing order
+ for a in sorted(virtual_attributes.keys()):
+ vattr = None
+ for ra in requested_attrs:
+ if ra["vattr"] is None:
+ continue
+ if ra["attr"].lower() != a.lower():
+ continue
+ vattr = ra
+ break
+ if vattr is None:
+ continue
+ attr_opts = vattr["opts"]
+
+ if a == "virtualClearTextUTF8":
+ b = get_cleartext(attr_opts)
+ if b is None:
+ continue
+ u8 = get_utf8(a, b, username or account_name)
+ if u8 is None:
+ continue
+ v = u8
+ elif a == "virtualClearTextUTF16":
+ v = get_cleartext(attr_opts)
+ if v is None:
+ continue
+ elif a == "virtualSSHA":
+ b = get_cleartext(attr_opts)
+ if b is None:
+ continue
+ u8 = get_utf8(a, b, username or account_name)
+ if u8 is None:
+ continue
+ salt = os.urandom(4)
+ h = hashlib.sha1()
+ h.update(u8)
+ h.update(salt)
+ bv = h.digest() + salt
+ v = "{SSHA}" + base64.b64encode(bv).decode('utf8')
+ elif a == "virtualCryptSHA256":
+ rounds = get_rounds(attr_opts)
+ x = get_virtual_crypt_value(a, 5, rounds, username, account_name)
+ if x is None:
+ continue
+ v = x
+ elif a == "virtualCryptSHA512":
+ rounds = get_rounds(attr_opts)
+ x = get_virtual_crypt_value(a, 6, rounds, username, account_name)
+ if x is None:
+ continue
+ v = x
+ elif a == "virtualSambaGPG":
+ # Samba adds 'Primary:SambaGPG' at the end.
+ # When Windows sets the password it keeps
+ # 'Primary:SambaGPG' and rotates it to
+ # the beginning. So we can only use the value,
+ # if it is the last one.
+ v = get_package("Primary:SambaGPG", min_idx=-1)
+ if v is None:
+ continue
+ elif a == "virtualKerberosSalt":
+ v = kerberos_salt
+ if v is None:
+ continue
+ elif a == "unicodePwd" and unicodePwd is None:
+ if "Primary:CLEARTEXT" in calculated and not get_option(attr_opts, "previous"):
+ v = get_unicode_pwd_hash(calculated["Primary:CLEARTEXT"])
+ elif "OLDCLEARTEXT" in calculated and get_option(attr_opts, "previous"):
+ v = get_unicode_pwd_hash(calculated["OLDCLEARTEXT"])
+ else:
+ continue
+ elif a.startswith("virtualWDigest"):
+ primary_wdigest = get_package("Primary:WDigest")
+ if primary_wdigest is None:
+ continue
+ x = a[len("virtualWDigest"):]
+ try:
+ i = int(x)
+ except ValueError:
+ continue
+ domain = samdb.domain_netbios_name()
+ dns_domain = samdb.domain_dns_name()
+ v = get_wDigest(i, primary_wdigest, account_name, account_upn, domain, dns_domain)
+ if v is None:
+ continue
+ else:
+ continue
+ obj[a] = ldb.MessageElement(v, ldb.FLAG_MOD_REPLACE, vattr["raw_attr"])
+
+ def get_src_attrname(srcattrg):
+ srcattrl = srcattrg.lower()
+ srcattr = None
+ for k in obj.keys():
+ if srcattrl != k.lower():
+ continue
+ srcattr = k
+ break
+ return srcattr
+
+ def get_src_time_float(srcattr):
+ if srcattr not in obj:
+ return None
+ vstr = str(obj[srcattr][0])
+ if vstr.endswith(".0Z"):
+ vut = ldb.string_to_time(vstr)
+ vfl = float(vut)
+ return vfl
+
+ try:
+ vnt = int(vstr)
+ except ValueError as e:
+ return None
+ # 0 or 9223372036854775807 mean no value too
+ if vnt == 0:
+ return None
+ if vnt >= 0x7FFFFFFFFFFFFFFF:
+ return None
+ vfl = nttime2float(vnt)
+ return vfl
+
+ def get_generalizedtime(srcattr):
+ vfl = get_src_time_float(srcattr)
+ if vfl is None:
+ return None
+ vut = int(vfl)
+ try:
+ v = "%s" % ldb.timestring(vut)
+ except OSError as e:
+ if e.errno == errno.EOVERFLOW:
+ return None
+ raise
+ return v
+
+ def get_unixepoch(srcattr):
+ vfl = get_src_time_float(srcattr)
+ if vfl is None:
+ return None
+ vut = int(vfl)
+ v = "%d" % vut
+ return v
+
+ def get_timespec(srcattr):
+ vfl = get_src_time_float(srcattr)
+ if vfl is None:
+ return None
+ v = "%.9f" % vfl
+ return v
+
+ generated_formats = {}
+ for fm in formats:
+ for ra in requested_attrs:
+ if ra["vformat"] is None:
+ continue
+ if ra["vformat"] != fm:
+ continue
+ srcattr = get_src_attrname(ra["attr"])
+ if srcattr is None:
+ continue
+ an = "%s;format=%s" % (srcattr, fm)
+ if an in generated_formats:
+ continue
+ generated_formats[an] = fm
+
+ v = None
+ if fm == "GeneralizedTime":
+ v = get_generalizedtime(srcattr)
+ elif fm == "UnixTime":
+ v = get_unixepoch(srcattr)
+ elif fm == "TimeSpec":
+ v = get_timespec(srcattr)
+ if v is None:
+ continue
+ obj[an] = ldb.MessageElement(v, ldb.FLAG_MOD_REPLACE, an)
+
+ # Now filter out implicit attributes
+ for delname in obj.keys():
+ keep = False
+ for ra in requested_attrs:
+ if delname.lower() != ra["raw_attr"].lower():
+ continue
+ keep = True
+ break
+ if keep:
+ continue
+
+ dattr = None
+ for ia in implicit_attrs:
+ if delname.lower() != ia["attr"].lower():
+ continue
+ dattr = ia
+ break
+ if dattr is None:
+ continue
+
+ if has_wildcard_attr and not dattr["is_hidden"]:
+ continue
+ del obj[delname]
+ return obj
+
+ def parse_attributes(self, attributes):
+
+ if attributes is None:
+ raise CommandError("Please specify --attributes")
+ attrs = attributes.split(',')
+ password_attrs = []
+ for pa in attrs:
+ pa = pa.lstrip().rstrip()
+ for da in disabled_virtual_attributes.keys():
+ if pa.lower() == da.lower():
+ r = disabled_virtual_attributes[da]["reason"]
+ raise CommandError("Virtual attribute '%s' not supported: %s" % (
+ da, r))
+ for va in virtual_attributes.keys():
+ if pa.lower() == va.lower():
+ # Take the real name
+ pa = va
+ break
+ password_attrs += [pa]
+
+ return password_attrs
diff --git a/python/samba/netcmd/user/readpasswords/get_kerberos_ticket.py b/python/samba/netcmd/user/readpasswords/get_kerberos_ticket.py
new file mode 100644
index 0000000..3a8296b
--- /dev/null
+++ b/python/samba/netcmd/user/readpasswords/get_kerberos_ticket.py
@@ -0,0 +1,146 @@
+# user management
+#
+# user get-kerberos-ticket command - obtain a TGT for a database user
+#
+# Copyright Jelmer Vernooij 2010 <jelmer@samba.org>
+# Copyright Theresa Halloran 2011 <theresahalloran@gmail.com>
+# Copyright Andrew Bartlett 2023 <abartlet@samba.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import ldb
+import samba.getopt as options
+from samba.netcmd import CommandError, Option
+from samba.credentials import Credentials
+from .common import (
+ GetPasswordCommand,
+ gpg_decrypt,
+ decrypt_samba_gpg_help,
+)
+from samba.dcerpc import samr
+
+class cmd_user_get_kerberos_ticket(GetPasswordCommand):
+ """Get a Kerberos Ticket Granting Ticket as a user
+
+This command gets a Kerberos TGT using the password for a user/computer account.
+
+The username specified on the command is the sAMAccountName.
+The username may also be specified using the --filter option.
+
+The command must be run from the root user id or another authorized
+user id. The '-H' or '--URL' option supports ldap:// for remote Group
+Managed Service accounts, and ldapi:// or tdb:// can be used to
+adjust the local path. tdb:// is used by default for a bare path.
+
+The --output-krb5-ccache option should point to a location for the
+credentials cache. The default is a FILE: type cache if no prefix is
+specified.
+
+The '--decrypt-samba-gpg' option triggers decryption of the
+Primary:SambaGPG buffer to get the password.
+
+Check with '--help' if this feature is available
+in your environment or not (the python-gpgme package is required). Please
+note that you might need to set the GNUPGHOME environment variable. If the
+decryption key has a passphrase you have to make sure that the GPG_AGENT_INFO
+environment variable has been set correctly and the passphrase is already
+known by the gpg-agent.
+
+Example1:
+samba-tool user get-kerberos-ticket TestUser1 --output-krb5-ccache=/srv/service/krb5_ccache
+
+Example2:
+samba-tool user get-kerberos-ticket --filter='(samAccountName=TestUser3)' --output-krb5-ccache=FILE:/srv/service/krb5_ccache
+
+ """
+ synopsis = "%prog (<username>|--filter <filter>) [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ "hostopts": options.HostOptions,
+ }
+
+ takes_options = [
+ Option("--filter", help="LDAP Filter to get Kerberos ticket for (must match single account)", type=str),
+ Option("--output-krb5-ccache", type=str,
+ help="Location of Kerberos credentials cache to write ticket into",
+ metavar="CCACHE", dest="output_krb5_ccache"),
+ Option("--decrypt-samba-gpg",
+ help=decrypt_samba_gpg_help,
+ action="store_true", default=False, dest="decrypt_samba_gpg"),
+ ]
+
+ takes_args = ["username?"]
+
+ def run(self, username=None, H=None, filter=None,
+ attributes=None, decrypt_samba_gpg=None,
+ sambaopts=None, versionopts=None, hostopts=None,
+ credopts=None, output_krb5_ccache=None):
+ self.lp = sambaopts.get_loadparm()
+
+ if decrypt_samba_gpg and not gpg_decrypt:
+ raise CommandError(decrypt_samba_gpg_help)
+
+ if filter is None and username is None:
+ raise CommandError("Either the username or '--filter' must be specified!")
+
+ if filter is None:
+ filter = "(&(objectClass=user)(sAMAccountName=%s))" % (ldb.binary_encode(username))
+
+ password_attrs = ["virtualClearTextUTF16", "samAccountName", "unicodePwd"]
+
+ creds = credopts.get_credentials(self.lp)
+ samdb = self.connect_for_passwords(url=hostopts.H, require_ldapi=False, creds=creds)
+
+ obj = self.get_account_attributes(samdb, username,
+ basedn=None,
+ filter=filter,
+ scope=ldb.SCOPE_SUBTREE,
+ attrs=password_attrs,
+ decrypt=decrypt_samba_gpg)
+
+ lp_ctx = sambaopts.get_loadparm()
+
+ creds = Credentials()
+ creds.set_username(str(obj["samAccountName"][0]))
+ creds.set_realm(samdb.domain_dns_name())
+
+ utf16_pw = None
+ nt_pass = None
+ try:
+ utf16_pw = obj["virtualClearTextUTF16"][0]
+ creds.set_utf16_password(utf16_pw)
+ except KeyError:
+ pass
+
+ if utf16_pw is None:
+ try:
+ nt_pass = samr.Password()
+ nt_pass.hash = list(obj["unicodePwd"][0])
+ creds.set_nt_hash(nt_pass)
+ except KeyError:
+ pass
+
+ if nt_pass is None and utf16_pw is None:
+ if samdb.url.startswith("ldap://") or samdb.url.startswith("ldaps://"):
+ raise CommandError("No password was available for this user. "
+ "Only Group Managed Service accounts allow access to passwords over LDAP, "
+ "you may need to access the sam.ldb directly on the Samba AD DC and export the file.")
+ else:
+ raise CommandError("No password was available for this user")
+ creds.guess(lp_ctx)
+ creds.get_named_ccache(lp_ctx, output_krb5_ccache)
diff --git a/python/samba/netcmd/user/readpasswords/getpassword.py b/python/samba/netcmd/user/readpasswords/getpassword.py
new file mode 100644
index 0000000..f962412
--- /dev/null
+++ b/python/samba/netcmd/user/readpasswords/getpassword.py
@@ -0,0 +1,210 @@
+# user management
+#
+# user getpassword command
+#
+# Copyright Jelmer Vernooij 2010 <jelmer@samba.org>
+# Copyright Theresa Halloran 2011 <theresahalloran@gmail.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import ldb
+import samba.getopt as options
+from samba.netcmd import CommandError, Option
+
+from .common import (
+ GetPasswordCommand,
+ gpg_decrypt,
+ decrypt_samba_gpg_help,
+ virtual_attributes_help
+)
+
+
+class cmd_user_getpassword(GetPasswordCommand):
+ """Get the password fields of a user/computer account.
+
+This command gets the logon password for a user/computer account.
+
+The username specified on the command is the sAMAccountName.
+The username may also be specified using the --filter option.
+
+The command must be run from the root user id or another authorized user id.
+The '-H' or '--URL' option normally only supports ldapi:// or [tdb://] and
+can be used to adjust the local path. By default, tdb:// is used.
+if the target account is a group managed service account, then in this
+case the -H can point to a remote AD DC LDAP server.
+
+The '--attributes' parameter takes a comma separated list of attributes,
+which will be printed or given to the script specified by '--script'. If a
+specified attribute is not available on an object it's silently omitted.
+All attributes defined in the schema (e.g. the unicodePwd attribute holds
+the NTHASH) and the following virtual attributes are possible (see --help
+for which virtual attributes are supported in your environment):
+
+ virtualClearTextUTF16: The raw cleartext as stored in the
+ 'Primary:CLEARTEXT' (or 'Primary:SambaGPG'
+ with '--decrypt-samba-gpg') buffer inside the
+ supplementalCredentials attribute. This typically
+ contains valid UTF-16-LE, but may contain random
+ bytes, e.g. for computer and gMSA accounts.
+ When the account is a group managed service account,
+ and the user is permitted to access
+ msDS-ManagedPassword then the current and previous
+ password can be read over LDAP. Add ;previous=1
+ to read the previous password.
+
+ virtualClearTextUTF8: As virtualClearTextUTF16, but converted to UTF-8
+ (invalid UTF-16-LE is mapped in the same way as
+ Windows).
+
+ virtualSSHA: As virtualClearTextUTF8, but a salted SHA-1
+ checksum, useful for OpenLDAP's '{SSHA}' algorithm.
+
+ virtualCryptSHA256: As virtualClearTextUTF8, but a salted SHA256
+ checksum, useful for OpenLDAP's '{CRYPT}' algorithm,
+ with a $5$... salt, see crypt(3) on modern systems.
+ The number of rounds used to calculate the hash can
+ also be specified. By appending ";rounds=x" to the
+ attribute name i.e. virtualCryptSHA256;rounds=10000
+ will calculate a SHA256 hash with 10,000 rounds.
+ Non-numeric values for rounds are silently ignored.
+ The value is calculated as follows:
+ 1) If a value exists in 'Primary:userPassword' with
+ the specified number of rounds it is returned.
+ 2) If 'Primary:CLEARTEXT', or 'Primary:SambaGPG'
+ with '--decrypt-samba-gpg'. Calculate a hash with
+ the specified number of rounds.
+ 3) Return the first CryptSHA256 value in
+ 'Primary:userPassword'.
+
+
+ virtualCryptSHA512: As virtualClearTextUTF8, but a salted SHA512
+ checksum, useful for OpenLDAP's '{CRYPT}' algorithm,
+ with a $6$... salt, see crypt(3) on modern systems.
+ The number of rounds used to calculate the hash can
+ also be specified. By appending ";rounds=x" to the
+ attribute name i.e. virtualCryptSHA512;rounds=10000
+ will calculate a SHA512 hash with 10,000 rounds.
+ Non-numeric values for rounds are silently ignored.
+ The value is calculated as follows:
+ 1) If a value exists in 'Primary:userPassword' with
+ the specified number of rounds it is returned.
+ 2) If 'Primary:CLEARTEXT', or 'Primary:SambaGPG'
+ with '--decrypt-samba-gpg'. Calculate a hash with
+ the specified number of rounds.
+ 3) Return the first CryptSHA512 value in
+ 'Primary:userPassword'.
+
+ virtualWDigestNN: The individual hash values stored in
+ 'Primary:WDigest' where NN is the hash number in
+ the range 01 to 29.
+ NOTE: As at 22-05-2017 the documentation:
+ 3.1.1.8.11.3.1 WDIGEST_CREDENTIALS Construction
+ https://msdn.microsoft.com/en-us/library/cc245680.aspx
+ is incorrect.
+
+ virtualKerberosSalt: This results the salt string that is used to compute
+ Kerberos keys from a UTF-8 cleartext password.
+
+ virtualSambaGPG: The raw cleartext as stored in the
+ 'Primary:SambaGPG' buffer inside the
+ supplementalCredentials attribute.
+ See the 'password hash gpg key ids' option in
+ smb.conf.
+
+The '--decrypt-samba-gpg' option triggers decryption of the
+Primary:SambaGPG buffer. Check with '--help' if this feature is available
+in your environment or not (the python-gpgme package is required). Please
+note that you might need to set the GNUPGHOME environment variable. If the
+decryption key has a passphrase you have to make sure that the GPG_AGENT_INFO
+environment variable has been set correctly and the passphrase is already
+known by the gpg-agent.
+
+Attributes with time values can take an additional format specifier, which
+converts the time value into the requested format. The format can be specified
+by adding ";format=formatSpecifier" to the requested attribute name, whereby
+"formatSpecifier" must be a valid specifier. The syntax looks like:
+
+ --attributes=attributeName;format=formatSpecifier
+
+The following format specifiers are available:
+ - GeneralizedTime (e.g. 20210224113259.0Z)
+ - UnixTime (e.g. 1614166392)
+ - TimeSpec (e.g. 161416639.267546892)
+
+Attributes with an original NTTIME value of 0 and 9223372036854775807 are
+treated as non-existing value.
+
+Example1:
+samba-tool user getpassword TestUser1 --attributes=pwdLastSet,virtualClearTextUTF8
+
+Example2:
+samba-tool user getpassword --filter=samaccountname=TestUser3 --attributes=msDS-KeyVersionNumber,unicodePwd,virtualClearTextUTF16
+
+"""
+
+ synopsis = "%prog (<username>|--filter <filter>) [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ "hostopts": options.HostOptions,
+ }
+
+ takes_options = [
+ Option("--filter", help="LDAP Filter to get password for (must match single account)", type=str),
+ Option("--attributes", type=str,
+ help=virtual_attributes_help,
+ metavar="ATTRIBUTELIST", dest="attributes"),
+ Option("--decrypt-samba-gpg",
+ help=decrypt_samba_gpg_help,
+ action="store_true", default=False, dest="decrypt_samba_gpg"),
+ ]
+
+ takes_args = ["username?"]
+
+ def run(self, username=None, H=None, filter=None,
+ attributes=None, decrypt_samba_gpg=None,
+ sambaopts=None, versionopts=None, hostopts=None,
+ credopts=None):
+ self.lp = sambaopts.get_loadparm()
+
+ if decrypt_samba_gpg and not gpg_decrypt:
+ raise CommandError(decrypt_samba_gpg_help)
+
+ if filter is None and username is None:
+ raise CommandError("Either the username or '--filter' must be specified!")
+
+ if filter is None:
+ filter = "(&(objectClass=user)(sAMAccountName=%s))" % (ldb.binary_encode(username))
+
+ if attributes is None:
+ raise CommandError("Please specify --attributes")
+
+ password_attrs = self.parse_attributes(attributes)
+
+ creds = credopts.get_credentials(self.lp)
+ samdb = self.connect_for_passwords(url=hostopts.H, require_ldapi=False, creds=creds)
+
+ obj = self.get_account_attributes(samdb, username,
+ basedn=None,
+ filter=filter,
+ scope=ldb.SCOPE_SUBTREE,
+ attrs=password_attrs,
+ decrypt=decrypt_samba_gpg)
+
+ ldif = samdb.write_ldif(obj, ldb.CHANGETYPE_NONE)
+ self.outf.write("%s" % ldif)
+ self.errf.write("Got password OK\n")
diff --git a/python/samba/netcmd/user/readpasswords/show.py b/python/samba/netcmd/user/readpasswords/show.py
new file mode 100644
index 0000000..1cdec89
--- /dev/null
+++ b/python/samba/netcmd/user/readpasswords/show.py
@@ -0,0 +1,144 @@
+# user management
+#
+# user show command
+#
+# Copyright Jelmer Vernooij 2010 <jelmer@samba.org>
+# Copyright Theresa Halloran 2011 <theresahalloran@gmail.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import samba.getopt as options
+from samba import dsdb, ldb
+from samba.auth import system_session
+from samba.netcmd import Option, common
+from samba.samdb import SamDB
+
+from .common import GetPasswordCommand
+
+
+class cmd_user_show(GetPasswordCommand):
+ """Display a user AD object.
+
+This command displays a user account and it's attributes in the Active
+Directory domain.
+The username specified on the command is the sAMAccountName.
+
+The command may be run from the root userid or another authorized userid.
+
+The -H or --URL= option can be used to execute the command against a remote
+server.
+
+The '--attributes' parameter takes a comma separated list of the requested
+attributes. Without '--attributes' or with '--attributes=*' all usually
+available attributes are selected.
+Hidden attributes in addition to all usually available attributes can be
+selected with e.g. '--attributes=*,msDS-UserPasswordExpiryTimeComputed'.
+If a specified attribute is not available on a user object it's silently
+omitted.
+
+Attributes with time values can take an additional format specifier, which
+converts the time value into the requested format. The format can be specified
+by adding ";format=formatSpecifier" to the requested attribute name, whereby
+"formatSpecifier" must be a valid specifier. The syntax looks like:
+
+ --attributes=attributeName;format=formatSpecifier
+
+The following format specifiers are available:
+ - GeneralizedTime (e.g. 20210224113259.0Z)
+ - UnixTime (e.g. 1614166392)
+ - TimeSpec (e.g. 161416639.267546892)
+
+Attributes with an original NTTIME value of 0 and 9223372036854775807 are
+treated as non-existing value.
+
+Example1:
+samba-tool user show User1 -H ldap://samba.samdom.example.com \\
+ -U administrator --password=passw1rd
+
+Example1 shows how to display a users attributes in the domain against a remote
+LDAP server.
+
+The -H parameter is used to specify the remote target server.
+
+Example2:
+samba-tool user show User2
+
+Example2 shows how to display a users attributes in the domain against a local
+LDAP server.
+
+Example3:
+samba-tool user show User2 --attributes=objectSid,memberOf
+
+Example3 shows how to display a users objectSid and memberOf attributes.
+
+Example4:
+samba-tool user show User2 \\
+ --attributes='pwdLastSet;format=GeneralizedTime,pwdLastSet;format=UnixTime'
+
+The result of Example 4 provides the pwdLastSet attribute values in the
+specified format:
+ dn: CN=User2,CN=Users,DC=samdom,DC=example,DC=com
+ pwdLastSet;format=GeneralizedTime: 20210120105207.0Z
+ pwdLastSet;format=UnixTime: 1611139927
+"""
+ synopsis = "%prog <username> [options]"
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server",
+ type=str, metavar="URL", dest="H"),
+ Option("--attributes",
+ help=("Comma separated list of attributes, "
+ "which will be printed. "
+ "Possible supported virtual attributes: "
+ "virtualGeneralizedTime, virtualUnixTime, virtualTimeSpec."),
+ type=str, dest="user_attrs"),
+ ]
+
+ takes_args = ["username"]
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ def run(self, username, credopts=None, sambaopts=None, versionopts=None,
+ H=None, user_attrs=None):
+
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp, fallback_machine=True)
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+
+ self.inject_virtual_attributes(samdb)
+
+ if user_attrs:
+ attrs = self.parse_attributes(user_attrs)
+ else:
+ attrs = ["*"]
+
+ filter = ("(&(sAMAccountType=%d)(sAMAccountName=%s))" %
+ (dsdb.ATYPE_NORMAL_ACCOUNT, ldb.binary_encode(username)))
+
+ domaindn = samdb.domain_dn()
+
+ obj = self.get_account_attributes(samdb, username,
+ basedn=domaindn,
+ filter=filter,
+ scope=ldb.SCOPE_SUBTREE,
+ attrs=attrs,
+ decrypt=False,
+ support_pw_attrs=False)
+ user_ldif = common.get_ldif_for_editor(samdb, obj)
+ self.outf.write(user_ldif)
diff --git a/python/samba/netcmd/user/readpasswords/syncpasswords.py b/python/samba/netcmd/user/readpasswords/syncpasswords.py
new file mode 100644
index 0000000..a909123
--- /dev/null
+++ b/python/samba/netcmd/user/readpasswords/syncpasswords.py
@@ -0,0 +1,878 @@
+# user management
+#
+# user syncpasswords command
+#
+# Copyright Jelmer Vernooij 2010 <jelmer@samba.org>
+# Copyright Theresa Halloran 2011 <theresahalloran@gmail.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import base64
+import errno
+import fcntl
+import os
+import signal
+import time
+from subprocess import Popen, PIPE, STDOUT
+
+import ldb
+import samba.getopt as options
+from samba import Ldb, dsdb
+from samba.dcerpc import misc, security
+from samba.ndr import ndr_unpack
+from samba.common import get_bytes
+from samba.netcmd import CommandError, Option
+
+from .common import (
+ GetPasswordCommand,
+ gpg_decrypt,
+ decrypt_samba_gpg_help,
+ virtual_attributes_help
+)
+
+
+class cmd_user_syncpasswords(GetPasswordCommand):
+ """Sync the password of user accounts.
+
+This syncs logon passwords for user accounts.
+
+Note that this command should run on a single domain controller only
+(typically the PDC-emulator). However the "password hash gpg key ids"
+option should to be configured on all domain controllers.
+
+The command must be run from the root user id or another authorized user id.
+The '-H' or '--URL' option only supports ldapi:// and can be used to adjust the
+local path. By default, ldapi:// is used with the default path to the
+privileged ldapi socket.
+
+This command has three modes: "Cache Initialization", "Sync Loop Run" and
+"Sync Loop Terminate".
+
+
+Cache Initialization
+====================
+
+The first time, this command needs to be called with
+'--cache-ldb-initialize' in order to initialize its cache.
+
+The cache initialization requires '--attributes' and allows the following
+optional options: '--decrypt-samba-gpg', '--script', '--filter' or
+'-H/--URL'.
+
+The '--attributes' parameter takes a comma separated list of attributes,
+which will be printed or given to the script specified by '--script'. If a
+specified attribute is not available on an object it will be silently omitted.
+All attributes defined in the schema (e.g. the unicodePwd attribute holds
+the NTHASH) and the following virtual attributes are possible (see '--help'
+for supported virtual attributes in your environment):
+
+ virtualClearTextUTF16: The raw cleartext as stored in the
+ 'Primary:CLEARTEXT' (or 'Primary:SambaGPG'
+ with '--decrypt-samba-gpg') buffer inside of the
+ supplementalCredentials attribute. This typically
+ contains valid UTF-16-LE, but may contain random
+ bytes, e.g. for computer accounts.
+
+ virtualClearTextUTF8: As virtualClearTextUTF16, but converted to UTF-8
+ (only from valid UTF-16-LE).
+
+ virtualSSHA: As virtualClearTextUTF8, but a salted SHA-1
+ checksum, useful for OpenLDAP's '{SSHA}' algorithm.
+
+ virtualCryptSHA256: As virtualClearTextUTF8, but a salted SHA256
+ checksum, useful for OpenLDAP's '{CRYPT}' algorithm,
+ with a $5$... salt, see crypt(3) on modern systems.
+ The number of rounds used to calculate the hash can
+ also be specified. By appending ";rounds=x" to the
+ attribute name i.e. virtualCryptSHA256;rounds=10000
+ will calculate a SHA256 hash with 10,000 rounds.
+ Non numeric values for rounds are silently ignored.
+ The value is calculated as follows:
+ 1) If a value exists in 'Primary:userPassword' with
+ the specified number of rounds it is returned.
+ 2) If 'Primary:CLEARTEXT', or 'Primary:SambaGPG' with
+ '--decrypt-samba-gpg'. Calculate a hash with
+ the specified number of rounds
+ 3) Return the first CryptSHA256 value in
+ 'Primary:userPassword'.
+
+ virtualCryptSHA512: As virtualClearTextUTF8, but a salted SHA512
+ checksum, useful for OpenLDAP's '{CRYPT}' algorithm,
+ with a $6$... salt, see crypt(3) on modern systems.
+ The number of rounds used to calculate the hash can
+ also be specified. By appending ";rounds=x" to the
+ attribute name i.e. virtualCryptSHA512;rounds=10000
+ will calculate a SHA512 hash with 10,000 rounds.
+ Non numeric values for rounds are silently ignored.
+ The value is calculated as follows:
+ 1) If a value exists in 'Primary:userPassword' with
+ the specified number of rounds it is returned.
+ 2) If 'Primary:CLEARTEXT', or 'Primary:SambaGPG' with
+ '--decrypt-samba-gpg'. Calculate a hash with
+ the specified number of rounds.
+ 3) Return the first CryptSHA512 value in
+ 'Primary:userPassword'.
+
+ virtualWDigestNN: The individual hash values stored in
+ 'Primary:WDigest' where NN is the hash number in
+ the range 01 to 29.
+ NOTE: As at 22-05-2017 the documentation:
+ 3.1.1.8.11.3.1 WDIGEST_CREDENTIALS Construction
+ https://msdn.microsoft.com/en-us/library/cc245680.aspx
+ is incorrect.
+
+ virtualKerberosSalt: This results the salt string that is used to compute
+ Kerberos keys from a UTF-8 cleartext password.
+
+ virtualSambaGPG: The raw cleartext as stored in the
+ 'Primary:SambaGPG' buffer inside of the
+ supplementalCredentials attribute.
+ See the 'password hash gpg key ids' option in
+ smb.conf.
+
+The '--decrypt-samba-gpg' option triggers decryption of the
+Primary:SambaGPG buffer. Check with '--help' if this feature is available
+in your environment or not (the python-gpgme package is required). Please
+note that you might need to set the GNUPGHOME environment variable. If the
+decryption key has a passphrase you have to make sure that the GPG_AGENT_INFO
+environment variable has been set correctly and the passphrase is already
+known by the gpg-agent.
+
+The '--script' option specifies a custom script that is called whenever any
+of the dirsyncAttributes (see below) was changed. The script is called
+without any arguments. It gets the LDIF for exactly one object on STDIN.
+If the script processed the object successfully it has to respond with a
+single line starting with 'DONE-EXIT: ' followed by an optional message.
+
+Note that the script might be called without any password change, e.g. if
+the account was disabled (a userAccountControl change) or the
+sAMAccountName was changed. The objectGUID,isDeleted,isRecycled attributes
+are always returned as unique identifier of the account. It might be useful
+to also ask for non-password attributes like: objectSid, sAMAccountName,
+userPrincipalName, userAccountControl, pwdLastSet and msDS-KeyVersionNumber.
+Depending on the object, some attributes may not be present/available,
+but you always get the current state (and not a diff).
+
+If no '--script' option is specified, the LDIF will be printed on STDOUT or
+into the logfile.
+
+The default filter for the LDAP_SERVER_DIRSYNC_OID search is:
+(&(objectClass=user)(userAccountControl:1.2.840.113556.1.4.803:=512)\\
+ (!(sAMAccountName=krbtgt*)))
+This means only normal (non-krbtgt) user
+accounts are monitored. The '--filter' can modify that, e.g. if it's
+required to also sync computer accounts.
+
+
+Sync Loop Run
+=============
+
+This (default) mode runs in an endless loop waiting for password related
+changes in the active directory database. It makes use of the
+LDAP_SERVER_DIRSYNC_OID and LDAP_SERVER_NOTIFICATION_OID controls in order
+get changes in a reliable fashion. Objects are monitored for changes of the
+following dirsyncAttributes:
+
+ unicodePwd, dBCSPwd, supplementalCredentials, pwdLastSet, sAMAccountName,
+ userPrincipalName and userAccountControl.
+
+It recovers from LDAP disconnects and updates the cache in conservative way
+(in single steps after each successfully processed change). An error from
+the script (specified by '--script') will result in fatal error and this
+command will exit. But the cache state should be still valid and can be
+resumed in the next "Sync Loop Run".
+
+The '--logfile' option specifies an optional (required if '--daemon' is
+specified) logfile that takes all output of the command. The logfile is
+automatically reopened if fstat returns st_nlink == 0.
+
+The optional '--daemon' option will put the command into the background.
+
+You can stop the command without the '--daemon' option, also by hitting
+strg+c.
+
+If you specify the '--no-wait' option the command skips the
+LDAP_SERVER_NOTIFICATION_OID 'waiting' step and exit once
+all LDAP_SERVER_DIRSYNC_OID changes are consumed.
+
+Sync Loop Terminate
+===================
+
+In order to terminate an already running command (likely as daemon) the
+'--terminate' option can be used. This also requires the '--logfile' option
+to be specified.
+
+
+Example1:
+samba-tool user syncpasswords --cache-ldb-initialize \\
+ --attributes=virtualClearTextUTF8
+samba-tool user syncpasswords
+
+Example2:
+samba-tool user syncpasswords --cache-ldb-initialize \\
+ --attributes=objectGUID,objectSID,sAMAccountName,\\
+ userPrincipalName,userAccountControl,pwdLastSet,\\
+ msDS-KeyVersionNumber,virtualCryptSHA512 \\
+ --script=/path/to/my-custom-syncpasswords-script.py
+samba-tool user syncpasswords --daemon \\
+ --logfile=/var/log/samba/user-syncpasswords.log
+samba-tool user syncpasswords --terminate \\
+ --logfile=/var/log/samba/user-syncpasswords.log
+
+"""
+
+ synopsis = "%prog [--cache-ldb-initialize] [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ takes_options = [
+ Option("--cache-ldb-initialize",
+ help="Initialize the cache for the first time",
+ dest="cache_ldb_initialize", action="store_true"),
+ Option("--cache-ldb", help="optional LDB URL user-syncpasswords-cache.ldb", type=str,
+ metavar="CACHE-LDB-PATH", dest="cache_ldb"),
+ Option("-H", "--URL", help="optional LDB URL for a local ldapi server", type=str,
+ metavar="URL", dest="H"),
+ Option("--filter", help="optional LDAP filter to set password on", type=str,
+ metavar="LDAP-SEARCH-FILTER", dest="filter"),
+ Option("--attributes", type=str,
+ help=virtual_attributes_help,
+ metavar="ATTRIBUTELIST", dest="attributes"),
+ Option("--decrypt-samba-gpg",
+ help=decrypt_samba_gpg_help,
+ action="store_true", default=False, dest="decrypt_samba_gpg"),
+ Option("--script", help="Script that is called for each password change", type=str,
+ metavar="/path/to/syncpasswords.script", dest="script"),
+ Option("--no-wait", help="Don't block waiting for changes",
+ action="store_true", default=False, dest="nowait"),
+ Option("--logfile", type=str,
+ help="The logfile to use (required in --daemon mode).",
+ metavar="/path/to/syncpasswords.log", dest="logfile"),
+ Option("--daemon", help="daemonize after initial setup",
+ action="store_true", default=False, dest="daemon"),
+ Option("--terminate",
+ help="Send a SIGTERM to an already running (daemon) process",
+ action="store_true", default=False, dest="terminate"),
+ ]
+
+ def run(self, cache_ldb_initialize=False, cache_ldb=None,
+ H=None, filter=None,
+ attributes=None, decrypt_samba_gpg=None,
+ script=None, nowait=None, logfile=None, daemon=None, terminate=None,
+ sambaopts=None, versionopts=None):
+
+ self.lp = sambaopts.get_loadparm()
+ self.logfile = None
+ self.samdb_url = None
+ self.samdb = None
+ self.cache = None
+
+ if not cache_ldb_initialize:
+ if attributes is not None:
+ raise CommandError("--attributes is only allowed together with --cache-ldb-initialize")
+ if decrypt_samba_gpg:
+ raise CommandError("--decrypt-samba-gpg is only allowed together with --cache-ldb-initialize")
+ if script is not None:
+ raise CommandError("--script is only allowed together with --cache-ldb-initialize")
+ if filter is not None:
+ raise CommandError("--filter is only allowed together with --cache-ldb-initialize")
+ if H is not None:
+ raise CommandError("-H/--URL is only allowed together with --cache-ldb-initialize")
+ else:
+ if nowait is not False:
+ raise CommandError("--no-wait is not allowed together with --cache-ldb-initialize")
+ if logfile is not None:
+ raise CommandError("--logfile is not allowed together with --cache-ldb-initialize")
+ if daemon is not False:
+ raise CommandError("--daemon is not allowed together with --cache-ldb-initialize")
+ if terminate is not False:
+ raise CommandError("--terminate is not allowed together with --cache-ldb-initialize")
+
+ if nowait is True:
+ if daemon is True:
+ raise CommandError("--daemon is not allowed together with --no-wait")
+ if terminate is not False:
+ raise CommandError("--terminate is not allowed together with --no-wait")
+
+ if terminate is True and daemon is True:
+ raise CommandError("--terminate is not allowed together with --daemon")
+
+ if daemon is True and logfile is None:
+ raise CommandError("--daemon is only allowed together with --logfile")
+
+ if terminate is True and logfile is None:
+ raise CommandError("--terminate is only allowed together with --logfile")
+
+ if script is not None:
+ if not os.path.exists(script):
+ raise CommandError("script[%s] does not exist!" % script)
+
+ sync_command = "%s" % os.path.abspath(script)
+ else:
+ sync_command = None
+
+ dirsync_filter = filter
+ if dirsync_filter is None:
+ dirsync_filter = "(&" + \
+ "(objectClass=user)" + \
+ "(userAccountControl:%s:=%u)" % (
+ ldb.OID_COMPARATOR_AND, dsdb.UF_NORMAL_ACCOUNT) + \
+ "(!(sAMAccountName=krbtgt*))" + \
+ ")"
+
+ dirsync_secret_attrs = [
+ "unicodePwd",
+ "dBCSPwd",
+ "supplementalCredentials",
+ ]
+
+ dirsync_attrs = dirsync_secret_attrs + [
+ "pwdLastSet",
+ "sAMAccountName",
+ "userPrincipalName",
+ "userAccountControl",
+ "isDeleted",
+ "isRecycled",
+ ]
+
+ password_attrs = None
+
+ if cache_ldb_initialize:
+ if H is None:
+ H = "ldapi://%s" % os.path.abspath(self.lp.private_path("ldap_priv/ldapi"))
+
+ if decrypt_samba_gpg and not gpg_decrypt:
+ raise CommandError(decrypt_samba_gpg_help)
+
+ password_attrs = self.parse_attributes(attributes)
+ lower_attrs = [x.lower() for x in password_attrs]
+ # We always return these in order to track deletions
+ for a in ["objectGUID", "isDeleted", "isRecycled"]:
+ if a.lower() not in lower_attrs:
+ password_attrs += [a]
+
+ if cache_ldb is not None:
+ if cache_ldb.lower().startswith("ldapi://"):
+ raise CommandError("--cache_ldb ldapi:// is not supported")
+ elif cache_ldb.lower().startswith("ldap://"):
+ raise CommandError("--cache_ldb ldap:// is not supported")
+ elif cache_ldb.lower().startswith("ldaps://"):
+ raise CommandError("--cache_ldb ldaps:// is not supported")
+ elif cache_ldb.lower().startswith("tdb://"):
+ pass
+ else:
+ if not os.path.exists(cache_ldb):
+ cache_ldb = self.lp.private_path(cache_ldb)
+ else:
+ cache_ldb = self.lp.private_path("user-syncpasswords-cache.ldb")
+
+ self.lockfile = "%s.pid" % cache_ldb
+
+ def log_msg(msg):
+ if self.logfile is not None:
+ info = os.fstat(0)
+ if info.st_nlink == 0:
+ logfile = self.logfile
+ self.logfile = None
+ log_msg("Closing logfile[%s] (st_nlink == 0)\n" % (logfile))
+ logfd = os.open(logfile, os.O_WRONLY | os.O_APPEND | os.O_CREAT, 0o600)
+ os.dup2(logfd, 0)
+ os.dup2(logfd, 1)
+ os.dup2(logfd, 2)
+ os.close(logfd)
+ log_msg("Reopened logfile[%s]\n" % (logfile))
+ self.logfile = logfile
+ msg = "%s: pid[%d]: %s" % (
+ time.ctime(),
+ os.getpid(),
+ msg)
+ self.outf.write(msg)
+ return
+
+ def load_cache():
+ cache_attrs = [
+ "samdbUrl",
+ "dirsyncFilter",
+ "dirsyncAttribute",
+ "dirsyncControl",
+ "passwordAttribute",
+ "decryptSambaGPG",
+ "syncCommand",
+ "currentPid",
+ ]
+
+ self.cache = Ldb(cache_ldb)
+ self.cache_dn = ldb.Dn(self.cache, "KEY=USERSYNCPASSWORDS")
+ res = self.cache.search(base=self.cache_dn, scope=ldb.SCOPE_BASE,
+ attrs=cache_attrs)
+ if len(res) == 1:
+ try:
+ self.samdb_url = str(res[0]["samdbUrl"][0])
+ except KeyError as e:
+ self.samdb_url = None
+ else:
+ self.samdb_url = None
+ if self.samdb_url is None and not cache_ldb_initialize:
+ raise CommandError("cache_ldb[%s] not initialized, use --cache-ldb-initialize the first time" % (
+ cache_ldb))
+ if self.samdb_url is not None and cache_ldb_initialize:
+ raise CommandError("cache_ldb[%s] already initialized, --cache-ldb-initialize not allowed" % (
+ cache_ldb))
+ if self.samdb_url is None:
+ self.samdb_url = H
+ self.dirsync_filter = dirsync_filter
+ self.dirsync_attrs = dirsync_attrs
+ self.dirsync_controls = ["dirsync:1:0:0", "extended_dn:1:0"]
+ self.password_attrs = password_attrs
+ self.decrypt_samba_gpg = decrypt_samba_gpg
+ self.sync_command = sync_command
+ add_ldif = "dn: %s\n" % self.cache_dn +\
+ "objectClass: userSyncPasswords\n" +\
+ "samdbUrl:: %s\n" % base64.b64encode(get_bytes(self.samdb_url)).decode('utf8') +\
+ "dirsyncFilter:: %s\n" % base64.b64encode(get_bytes(self.dirsync_filter)).decode('utf8') +\
+ "".join("dirsyncAttribute:: %s\n" % base64.b64encode(get_bytes(a)).decode('utf8') for a in self.dirsync_attrs) +\
+ "dirsyncControl: %s\n" % self.dirsync_controls[0] +\
+ "".join("passwordAttribute:: %s\n" % base64.b64encode(get_bytes(a)).decode('utf8') for a in self.password_attrs)
+ if self.decrypt_samba_gpg:
+ add_ldif += "decryptSambaGPG: TRUE\n"
+ else:
+ add_ldif += "decryptSambaGPG: FALSE\n"
+ if self.sync_command is not None:
+ add_ldif += "syncCommand: %s\n" % self.sync_command
+ add_ldif += "currentTime: %s\n" % ldb.timestring(int(time.time()))
+ self.cache.add_ldif(add_ldif)
+ self.current_pid = None
+ self.outf.write("Initialized cache_ldb[%s]\n" % (cache_ldb))
+ msgs = self.cache.parse_ldif(add_ldif)
+ changetype, msg = next(msgs)
+ ldif = self.cache.write_ldif(msg, ldb.CHANGETYPE_NONE)
+ self.outf.write("%s" % ldif)
+ else:
+ self.dirsync_filter = str(res[0]["dirsyncFilter"][0])
+ self.dirsync_attrs = []
+ for a in res[0]["dirsyncAttribute"]:
+ self.dirsync_attrs.append(str(a))
+ self.dirsync_controls = [str(res[0]["dirsyncControl"][0]), "extended_dn:1:0"]
+ self.password_attrs = []
+ for a in res[0]["passwordAttribute"]:
+ self.password_attrs.append(str(a))
+ decrypt_string = str(res[0]["decryptSambaGPG"][0])
+ assert(decrypt_string in ["TRUE", "FALSE"])
+ if decrypt_string == "TRUE":
+ self.decrypt_samba_gpg = True
+ else:
+ self.decrypt_samba_gpg = False
+ if "syncCommand" in res[0]:
+ self.sync_command = str(res[0]["syncCommand"][0])
+ else:
+ self.sync_command = None
+ if "currentPid" in res[0]:
+ self.current_pid = int(res[0]["currentPid"][0])
+ else:
+ self.current_pid = None
+ log_msg("Using cache_ldb[%s]\n" % (cache_ldb))
+
+ return
+
+ def run_sync_command(dn, ldif):
+ log_msg("Call Popen[%s] for %s\n" % (self.sync_command, dn))
+ sync_command_p = Popen(self.sync_command,
+ stdin=PIPE,
+ stdout=PIPE,
+ stderr=STDOUT)
+
+ res = sync_command_p.poll()
+ assert res is None
+
+ input = "%s" % (ldif)
+ reply = sync_command_p.communicate(
+ input.encode('utf-8'))[0].decode('utf-8')
+ log_msg("%s\n" % (reply))
+ res = sync_command_p.poll()
+ if res is None:
+ sync_command_p.terminate()
+ res = sync_command_p.wait()
+
+ if reply.startswith("DONE-EXIT: "):
+ return
+
+ log_msg("RESULT: %s\n" % (res))
+ raise Exception("ERROR: %s - %s\n" % (res, reply))
+
+ def handle_object(idx, dirsync_obj):
+ binary_guid = dirsync_obj.dn.get_extended_component("GUID")
+ guid = ndr_unpack(misc.GUID, binary_guid)
+ binary_sid = dirsync_obj.dn.get_extended_component("SID")
+ sid = ndr_unpack(security.dom_sid, binary_sid)
+ domain_sid, rid = sid.split()
+ if rid == security.DOMAIN_RID_KRBTGT:
+ log_msg("# Dirsync[%d] SKIP: DOMAIN_RID_KRBTGT\n\n" % (idx))
+ return
+ for a in list(dirsync_obj.keys()):
+ for h in dirsync_secret_attrs:
+ if a.lower() == h.lower():
+ del dirsync_obj[a]
+ dirsync_obj["# %s::" % a] = ["REDACTED SECRET ATTRIBUTE"]
+ dirsync_ldif = self.samdb.write_ldif(dirsync_obj, ldb.CHANGETYPE_NONE)
+ log_msg("# Dirsync[%d] %s %s\n%s" % (idx, guid, sid, dirsync_ldif))
+ obj = self.get_account_attributes(self.samdb,
+ username="%s" % sid,
+ basedn="<GUID=%s>" % guid,
+ filter="(objectClass=user)",
+ scope=ldb.SCOPE_BASE,
+ attrs=self.password_attrs,
+ decrypt=self.decrypt_samba_gpg)
+ ldif = self.samdb.write_ldif(obj, ldb.CHANGETYPE_NONE)
+ log_msg("# Passwords[%d] %s %s\n" % (idx, guid, sid))
+ if self.sync_command is None:
+ self.outf.write("%s" % (ldif))
+ return
+ self.outf.write("# attrs=%s\n" % (sorted(obj.keys())))
+ run_sync_command(obj.dn, ldif)
+
+ def check_current_pid_conflict(terminate):
+ flags = os.O_RDWR
+ if not terminate:
+ flags |= os.O_CREAT
+
+ try:
+ self.lockfd = os.open(self.lockfile, flags, 0o600)
+ except IOError as e4:
+ (err, msg) = e4.args
+ if err == errno.ENOENT:
+ if terminate:
+ return False
+ log_msg("check_current_pid_conflict: failed to open[%s] - %s (%d)" %
+ (self.lockfile, msg, err))
+ raise
+
+ got_exclusive = False
+ try:
+ fcntl.lockf(self.lockfd, fcntl.LOCK_EX | fcntl.LOCK_NB)
+ got_exclusive = True
+ except IOError as e5:
+ (err, msg) = e5.args
+ if err != errno.EACCES and err != errno.EAGAIN:
+ log_msg("check_current_pid_conflict: failed to get exclusive lock[%s] - %s (%d)" %
+ (self.lockfile, msg, err))
+ raise
+
+ if not got_exclusive:
+ buf = os.read(self.lockfd, 64)
+ self.current_pid = None
+ try:
+ self.current_pid = int(buf)
+ except ValueError as e:
+ pass
+ if self.current_pid is not None:
+ return True
+
+ if got_exclusive and terminate:
+ try:
+ os.ftruncate(self.lockfd, 0)
+ except IOError as e2:
+ (err, msg) = e2.args
+ log_msg("check_current_pid_conflict: failed to truncate [%s] - %s (%d)" %
+ (self.lockfile, msg, err))
+ raise
+ os.close(self.lockfd)
+ self.lockfd = -1
+ return False
+
+ try:
+ fcntl.lockf(self.lockfd, fcntl.LOCK_SH)
+ except IOError as e6:
+ (err, msg) = e6.args
+ log_msg("check_current_pid_conflict: failed to get shared lock[%s] - %s (%d)" %
+ (self.lockfile, msg, err))
+
+ # We leave the function with the shared lock.
+ return False
+
+ def update_pid(pid):
+ if self.lockfd != -1:
+ got_exclusive = False
+ # Try 5 times to get the exclusive lock.
+ for i in range(0, 5):
+ try:
+ fcntl.lockf(self.lockfd, fcntl.LOCK_EX | fcntl.LOCK_NB)
+ got_exclusive = True
+ except IOError as e:
+ (err, msg) = e.args
+ if err != errno.EACCES and err != errno.EAGAIN:
+ log_msg("update_pid(%r): failed to get exclusive lock[%s] - %s (%d)" %
+ (pid, self.lockfile, msg, err))
+ raise
+ if got_exclusive:
+ break
+ time.sleep(1)
+ if not got_exclusive:
+ log_msg("update_pid(%r): failed to get exclusive lock[%s]" %
+ (pid, self.lockfile))
+ raise CommandError("update_pid(%r): failed to get "
+ "exclusive lock[%s] after 5 seconds" %
+ (pid, self.lockfile))
+
+ if pid is not None:
+ buf = "%d\n" % pid
+ else:
+ buf = None
+ try:
+ os.ftruncate(self.lockfd, 0)
+ if buf is not None:
+ os.write(self.lockfd, get_bytes(buf))
+ except IOError as e3:
+ (err, msg) = e3.args
+ log_msg("check_current_pid_conflict: failed to write pid to [%s] - %s (%d)" %
+ (self.lockfile, msg, err))
+ raise
+ self.current_pid = pid
+ if self.current_pid is not None:
+ log_msg("currentPid: %d\n" % self.current_pid)
+
+ modify_ldif = "dn: %s\n" % (self.cache_dn) +\
+ "changetype: modify\n" +\
+ "replace: currentPid\n"
+ if self.current_pid is not None:
+ modify_ldif += "currentPid: %d\n" % (self.current_pid)
+ modify_ldif += "replace: currentTime\n" +\
+ "currentTime: %s\n" % ldb.timestring(int(time.time()))
+ self.cache.modify_ldif(modify_ldif)
+ return
+
+ def update_cache(res_controls):
+ assert len(res_controls) > 0
+ assert res_controls[0].oid == "1.2.840.113556.1.4.841"
+ res_controls[0].critical = True
+ self.dirsync_controls = [str(res_controls[0]), "extended_dn:1:0"]
+ # This cookie can be extremely long
+ # log_msg("dirsyncControls: %r\n" % self.dirsync_controls)
+
+ modify_ldif = "dn: %s\n" % (self.cache_dn) +\
+ "changetype: modify\n" +\
+ "replace: dirsyncControl\n" +\
+ "dirsyncControl: %s\n" % (self.dirsync_controls[0]) +\
+ "replace: currentTime\n" +\
+ "currentTime: %s\n" % ldb.timestring(int(time.time()))
+ self.cache.modify_ldif(modify_ldif)
+ return
+
+ def check_object(dirsync_obj, res_controls):
+ assert len(res_controls) > 0
+ assert res_controls[0].oid == "1.2.840.113556.1.4.841"
+
+ binary_sid = dirsync_obj.dn.get_extended_component("SID")
+ sid = ndr_unpack(security.dom_sid, binary_sid)
+ dn = "KEY=%s" % sid
+ lastCookie = str(res_controls[0])
+
+ res = self.cache.search(base=dn, scope=ldb.SCOPE_BASE,
+ expression="(lastCookie=%s)" % (
+ ldb.binary_encode(lastCookie)),
+ attrs=[])
+ if len(res) == 1:
+ return True
+ return False
+
+ def update_object(dirsync_obj, res_controls):
+ assert len(res_controls) > 0
+ assert res_controls[0].oid == "1.2.840.113556.1.4.841"
+
+ binary_sid = dirsync_obj.dn.get_extended_component("SID")
+ sid = ndr_unpack(security.dom_sid, binary_sid)
+ dn = "KEY=%s" % sid
+ lastCookie = str(res_controls[0])
+
+ self.cache.transaction_start()
+ try:
+ res = self.cache.search(base=dn, scope=ldb.SCOPE_BASE,
+ expression="(objectClass=*)",
+ attrs=["lastCookie"])
+ if len(res) == 0:
+ add_ldif = "dn: %s\n" % (dn) +\
+ "objectClass: userCookie\n" +\
+ "lastCookie: %s\n" % (lastCookie) +\
+ "currentTime: %s\n" % ldb.timestring(int(time.time()))
+ self.cache.add_ldif(add_ldif)
+ else:
+ modify_ldif = "dn: %s\n" % (dn) +\
+ "changetype: modify\n" +\
+ "replace: lastCookie\n" +\
+ "lastCookie: %s\n" % (lastCookie) +\
+ "replace: currentTime\n" +\
+ "currentTime: %s\n" % ldb.timestring(int(time.time()))
+ self.cache.modify_ldif(modify_ldif)
+ self.cache.transaction_commit()
+ except Exception as e:
+ self.cache.transaction_cancel()
+
+ return
+
+ def dirsync_loop():
+ while True:
+ res = self.samdb.search(expression=str(self.dirsync_filter),
+ scope=ldb.SCOPE_SUBTREE,
+ attrs=self.dirsync_attrs,
+ controls=self.dirsync_controls)
+ log_msg("dirsync_loop(): results %d\n" % len(res))
+ ri = 0
+ for r in res:
+ done = check_object(r, res.controls)
+ if not done:
+ handle_object(ri, r)
+ update_object(r, res.controls)
+ ri += 1
+ update_cache(res.controls)
+ if len(res) == 0:
+ break
+
+ def sync_loop(wait):
+ notify_attrs = ["name", "uSNCreated", "uSNChanged", "objectClass"]
+ notify_controls = ["notification:1", "show_recycled:1"]
+ notify_handle = self.samdb.search_iterator(expression="objectClass=*",
+ scope=ldb.SCOPE_SUBTREE,
+ attrs=notify_attrs,
+ controls=notify_controls,
+ timeout=-1)
+
+ if wait is True:
+ log_msg("Resuming monitoring\n")
+ else:
+ log_msg("Getting changes\n")
+ self.outf.write("dirsyncFilter: %s\n" % self.dirsync_filter)
+ self.outf.write("dirsyncControls: %r\n" % self.dirsync_controls)
+ self.outf.write("syncCommand: %s\n" % self.sync_command)
+ dirsync_loop()
+
+ if wait is not True:
+ return
+
+ for msg in notify_handle:
+ if not isinstance(msg, ldb.Message):
+ self.outf.write("referral: %s\n" % msg)
+ continue
+ created = msg.get("uSNCreated")[0]
+ changed = msg.get("uSNChanged")[0]
+ log_msg("# Notify %s uSNCreated[%s] uSNChanged[%s]\n" %
+ (msg.dn, created, changed))
+
+ dirsync_loop()
+
+ res = notify_handle.result()
+
+ def daemonize():
+ self.samdb = None
+ self.cache = None
+ orig_pid = os.getpid()
+ pid = os.fork()
+ if pid == 0:
+ os.setsid()
+ pid = os.fork()
+ if pid == 0: # Actual daemon
+ pid = os.getpid()
+ log_msg("Daemonized as pid %d (from %d)\n" % (pid, orig_pid))
+ load_cache()
+ return
+ os._exit(0)
+
+ if cache_ldb_initialize:
+ self.samdb_url = H
+ self.samdb = self.connect_for_passwords(url=self.samdb_url,
+ verbose=True)
+ load_cache()
+ return
+
+ if logfile is not None:
+ import resource # Resource usage information.
+ maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
+ if maxfd == resource.RLIM_INFINITY:
+ maxfd = 1024 # Rough guess at maximum number of open file descriptors.
+ logfd = os.open(logfile, os.O_WRONLY | os.O_APPEND | os.O_CREAT, 0o600)
+ self.outf.write("Using logfile[%s]\n" % logfile)
+ for fd in range(0, maxfd):
+ if fd == logfd:
+ continue
+ try:
+ os.close(fd)
+ except OSError:
+ pass
+ os.dup2(logfd, 0)
+ os.dup2(logfd, 1)
+ os.dup2(logfd, 2)
+ os.close(logfd)
+ log_msg("Attached to logfile[%s]\n" % (logfile))
+ self.logfile = logfile
+
+ load_cache()
+ conflict = check_current_pid_conflict(terminate)
+ if terminate:
+ if self.current_pid is None:
+ log_msg("No process running.\n")
+ return
+ if not conflict:
+ log_msg("Process %d is not running anymore.\n" % (
+ self.current_pid))
+ update_pid(None)
+ return
+ log_msg("Sending SIGTERM to process %d.\n" % (
+ self.current_pid))
+ os.kill(self.current_pid, signal.SIGTERM)
+ return
+ if conflict:
+ raise CommandError("Exiting pid %d, command is already running as pid %d" % (
+ os.getpid(), self.current_pid))
+
+ if daemon is True:
+ daemonize()
+ update_pid(os.getpid())
+
+ wait = True
+ while wait is True:
+ retry_sleep_min = 1
+ retry_sleep_max = 600
+ if nowait is True:
+ wait = False
+ retry_sleep = 0
+ else:
+ retry_sleep = retry_sleep_min
+
+ while self.samdb is None:
+ if retry_sleep != 0:
+ log_msg("Wait before connect - sleep(%d)\n" % retry_sleep)
+ time.sleep(retry_sleep)
+ retry_sleep = retry_sleep * 2
+ if retry_sleep >= retry_sleep_max:
+ retry_sleep = retry_sleep_max
+ log_msg("Connecting to '%s'\n" % self.samdb_url)
+ try:
+ self.samdb = self.connect_for_passwords(url=self.samdb_url)
+ except Exception as msg:
+ self.samdb = None
+ log_msg("Connect to samdb Exception => (%s)\n" % msg)
+ if wait is not True:
+ raise
+
+ try:
+ sync_loop(wait)
+ except ldb.LdbError as e7:
+ (enum, estr) = e7.args
+ self.samdb = None
+ log_msg("ldb.LdbError(%d) => (%s)\n" % (enum, estr))
+
+ update_pid(None)
+ return
diff --git a/python/samba/netcmd/user/rename.py b/python/samba/netcmd/user/rename.py
new file mode 100644
index 0000000..81d5de3
--- /dev/null
+++ b/python/samba/netcmd/user/rename.py
@@ -0,0 +1,249 @@
+# user management
+#
+# user rename command
+#
+# Copyright Jelmer Vernooij 2010 <jelmer@samba.org>
+# Copyright Theresa Halloran 2011 <theresahalloran@gmail.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import samba.getopt as options
+from samba import dsdb, ldb
+from samba.auth import system_session
+from samba.netcmd import Command, CommandError, Option
+from samba.samdb import SamDB
+
+
+class cmd_user_rename(Command):
+ """Rename a user and related attributes.
+
+ This command allows to set the user's name related attributes. The user's
+ CN will be renamed automatically.
+ The user's new CN will be made up by combining the given-name, initials
+ and surname. A dot ('.') will be appended to the initials automatically
+ if required.
+ Use the --force-new-cn option to specify the new CN manually and the
+ --reset-cn option to reset this change.
+
+ Use an empty attribute value to remove the specified attribute.
+
+ The username specified on the command is the sAMAccountName.
+
+ The command may be run locally from the root userid or another authorized
+ userid.
+
+ The -H or --URL= option can be used to execute the command against a remote
+ server.
+
+ Example1:
+ samba-tool user rename johndoe --surname='Bloggs'
+
+ Example1 shows how to change the surname of a user 'johndoe' to 'Bloggs' on
+ the local server. The user's CN will be renamed automatically, based on
+ the given name, initials and surname.
+
+ Example2:
+ samba-tool user rename johndoe --force-new-cn='John Bloggs (Sales)' \\
+ --surname=Bloggs -H ldap://samba.samdom.example.com -U administrator
+
+ Example2 shows how to rename the CN of a user 'johndoe' to 'John Bloggs (Sales)'.
+ Additionally the surname ('sn' attribute) is set to 'Bloggs'.
+ The -H parameter is used to specify the remote target server.
+ """
+
+ synopsis = "%prog <username> [options]"
+
+ takes_options = [
+ Option("-H", "--URL",
+ help="LDB URL for database or target server",
+ type=str, metavar="URL", dest="H"),
+ Option("--surname",
+ help="New surname",
+ type=str),
+ Option("--given-name",
+ help="New given name",
+ type=str),
+ Option("--initials",
+ help="New initials",
+ type=str),
+ Option("--force-new-cn",
+ help="Specify a new CN (RDN) instead of using a combination "
+ "of the given name, initials and surname.",
+ type=str, metavar="NEW_CN"),
+ Option("--reset-cn",
+ help="Set the CN (RDN) to the combination of the given name, "
+ "initials and surname. Use this option to reset "
+ "the changes made with the --force-new-cn option.",
+ action="store_true"),
+ Option("--display-name",
+ help="New display name",
+ type=str),
+ Option("--mail-address",
+ help="New email address",
+ type=str),
+ Option("--samaccountname",
+ help="New account name (sAMAccountName/logon name)",
+ type=str),
+ Option("--upn",
+ help="New user principal name",
+ type=str),
+ ]
+
+ takes_args = ["username"]
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ def run(self, username, credopts=None, sambaopts=None,
+ versionopts=None, H=None, surname=None, given_name=None,
+ initials=None, display_name=None, mail_address=None,
+ samaccountname=None, upn=None, force_new_cn=None,
+ reset_cn=None):
+ # illegal options
+ if force_new_cn and reset_cn:
+ raise CommandError("It is not allowed to specify --force-new-cn "
+ "together with --reset-cn.")
+ if force_new_cn == "":
+ raise CommandError("Failed to rename user - delete protected "
+ "attribute 'CN'")
+ if samaccountname == "":
+ raise CommandError("Failed to rename user - delete protected "
+ "attribute 'sAMAccountName'")
+
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp, fallback_machine=True)
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+ domain_dn = ldb.Dn(samdb, samdb.domain_dn())
+
+ filter = ("(&(sAMAccountType=%d)(sAMAccountName=%s))" %
+ (dsdb.ATYPE_NORMAL_ACCOUNT, ldb.binary_encode(username)))
+ try:
+ res = samdb.search(base=domain_dn,
+ expression=filter,
+ scope=ldb.SCOPE_SUBTREE,
+ attrs=["sAMAccountName",
+ "givenName",
+ "initials",
+ "sn",
+ "mail",
+ "userPrincipalName",
+ "displayName",
+ "cn"])
+ old_user = res[0]
+ user_dn = old_user.dn
+ except IndexError:
+ raise CommandError('Unable to find user "%s"' % (username))
+
+ user_parent_dn = user_dn.parent()
+ old_cn = old_user["cn"][0]
+
+ # use the sAMAccountname as CN if no name is given
+ new_fallback_cn = samaccountname if samaccountname is not None \
+ else old_user["sAMAccountName"]
+
+ if force_new_cn is not None:
+ new_user_cn = force_new_cn
+ else:
+ new_user_cn = samdb.fullname_from_names(old_attrs=old_user,
+ given_name=given_name,
+ initials=initials,
+ surname=surname,
+ fallback_default=new_fallback_cn)
+
+ # CN must change, if the new CN is different and the old CN is the
+ # standard CN or the change is forced with force-new-cn or reset-cn
+ expected_cn = samdb.fullname_from_names(old_attrs=old_user,
+ fallback_default=old_user["sAMAccountName"])
+ must_change_cn = str(old_cn) != str(new_user_cn) and (
+ str(old_cn) == str(expected_cn) or
+ reset_cn or bool(force_new_cn))
+
+ new_user_dn = ldb.Dn(samdb, "CN=%s" % new_user_cn)
+ new_user_dn.add_base(user_parent_dn)
+
+ if upn is not None:
+ if self.is_valid_upn(samdb, upn) == False:
+ raise CommandError('"%s" is not a valid upn. '
+ 'You can manage the upn '
+ 'suffixes with the "samba-tool domain '
+ 'trust namespaces" command.' % upn)
+
+ user_attrs = ldb.Message()
+ user_attrs.dn = user_dn
+ samdb.prepare_attr_replace(user_attrs, old_user, "givenName", given_name)
+ samdb.prepare_attr_replace(user_attrs, old_user, "initials", initials)
+ samdb.prepare_attr_replace(user_attrs, old_user, "sn", surname)
+ samdb.prepare_attr_replace(user_attrs, old_user, "displayName", display_name)
+ samdb.prepare_attr_replace(user_attrs, old_user, "mail", mail_address)
+ samdb.prepare_attr_replace(user_attrs, old_user, "sAMAccountName", samaccountname)
+ samdb.prepare_attr_replace(user_attrs, old_user, "userPrincipalName", upn)
+
+ attributes_changed = len(user_attrs) > 0
+
+ samdb.transaction_start()
+ try:
+ if attributes_changed == True:
+ samdb.modify(user_attrs)
+ if must_change_cn == True:
+ samdb.rename(user_dn, new_user_dn)
+ except Exception as e:
+ samdb.transaction_cancel()
+ raise CommandError('Failed to rename user "%s"' % username, e)
+ samdb.transaction_commit()
+
+ if must_change_cn == True:
+ self.outf.write('Renamed CN of user "%s" from "%s" to "%s" '
+ 'successfully\n' % (username, old_cn, new_user_cn))
+
+ if attributes_changed == True:
+ self.outf.write('Following attributes of user "%s" have been '
+ 'changed successfully:\n' % (username))
+ for attr in user_attrs.keys():
+ if (attr == "dn"):
+ continue
+ self.outf.write('%s: %s\n' % (attr, user_attrs[attr]
+ if user_attrs[attr] else '[removed]'))
+
+ def is_valid_upn(self, samdb, upn):
+ domain_dns = samdb.domain_dns_name()
+ forest_dns = samdb.forest_dns_name()
+ upn_suffixes = [domain_dns, forest_dns]
+
+ config_basedn = samdb.get_config_basedn()
+ partitions_dn = "CN=Partitions,%s" % config_basedn
+ res = samdb.search(
+ base=partitions_dn,
+ scope=ldb.SCOPE_BASE,
+ expression="(objectClass=crossRefContainer)",
+ attrs=['uPNSuffixes'])
+
+ if (len(res) >= 1):
+ msg = res[0]
+ if 'uPNSuffixes' in msg:
+ for s in msg['uPNSuffixes']:
+ upn_suffixes.append(str(s).lower())
+
+ upn_split = upn.split('@')
+ if (len(upn_split) < 2):
+ return False
+
+ upn_suffix = upn_split[-1].lower()
+ if upn_suffix not in upn_suffixes:
+ return False
+
+ return True
diff --git a/python/samba/netcmd/user/sensitive.py b/python/samba/netcmd/user/sensitive.py
new file mode 100644
index 0000000..11edb2f
--- /dev/null
+++ b/python/samba/netcmd/user/sensitive.py
@@ -0,0 +1,83 @@
+# user management
+#
+# user sensitive command
+#
+# Copyright Jelmer Vernooij 2010 <jelmer@samba.org>
+# Copyright Theresa Halloran 2011 <theresahalloran@gmail.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import samba.getopt as options
+from samba import dsdb, ldb
+from samba.auth import system_session
+from samba.netcmd import Command, CommandError, Option
+from samba.samdb import SamDB
+
+
+class cmd_user_sensitive(Command):
+ """Set/unset or show UF_NOT_DELEGATED for an account."""
+
+ synopsis = "%prog <accountname> [(show|on|off)] [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H"),
+ ]
+
+ takes_args = ["accountname", "cmd"]
+
+ def run(self, accountname, cmd, H=None, credopts=None, sambaopts=None,
+ versionopts=None):
+
+ if cmd not in ("show", "on", "off"):
+ raise CommandError("invalid argument: '%s' (choose from 'show', 'on', 'off')" % cmd)
+
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp, fallback_machine=True)
+ sam = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+
+ search_filter = "sAMAccountName=%s" % ldb.binary_encode(accountname)
+ flag = dsdb.UF_NOT_DELEGATED
+
+ if cmd == "show":
+ res = sam.search(scope=ldb.SCOPE_SUBTREE, expression=search_filter,
+ attrs=["userAccountControl"])
+ if len(res) == 0:
+ raise Exception("Unable to find account where '%s'" % search_filter)
+
+ uac = int(res[0].get("userAccountControl")[0])
+
+ self.outf.write("Account-DN: %s\n" % str(res[0].dn))
+ self.outf.write("UF_NOT_DELEGATED: %s\n" % bool(uac & flag))
+
+ return
+
+ if cmd == "on":
+ on = True
+ elif cmd == "off":
+ on = False
+
+ try:
+ sam.toggle_userAccountFlags(search_filter, flag, flags_str="Not-Delegated",
+ on=on, strict=True)
+ except Exception as err:
+ raise CommandError(err)
diff --git a/python/samba/netcmd/user/setexpiry.py b/python/samba/netcmd/user/setexpiry.py
new file mode 100644
index 0000000..7f4af6e
--- /dev/null
+++ b/python/samba/netcmd/user/setexpiry.py
@@ -0,0 +1,101 @@
+# user management
+#
+# set user expiry
+#
+# Copyright Jelmer Vernooij 2010 <jelmer@samba.org>
+# Copyright Theresa Halloran 2011 <theresahalloran@gmail.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import samba.getopt as options
+from samba import ldb
+from samba.auth import system_session
+from samba.netcmd import Command, CommandError, Option
+from samba.samdb import SamDB
+
+
+class cmd_user_setexpiry(Command):
+ """Set the expiration of a user account.
+
+The user can either be specified by their sAMAccountName or using the --filter option.
+
+When a user account expires, it becomes disabled and the user is unable to logon. The administrator may issue the samba-tool user enable command to enable the account for logon. The permissions and memberships associated with the account are retained when the account is enabled.
+
+The command may be run from the root userid or another authorized userid. The -H or --URL= option can be used to execute the command on a remote server.
+
+Example1:
+samba-tool user setexpiry User1 --days=20 --URL=ldap://samba.samdom.example.com --username=administrator --password=passw1rd
+
+Example1 shows how to set the expiration of an account in a remote LDAP server. The --URL parameter is used to specify the remote target server. The --username= and --password= options are used to pass the username and password of a user that exists on the remote server and is authorized to update that server.
+
+Example2:
+sudo samba-tool user setexpiry User2 --noexpiry
+
+Example2 shows how to set the account expiration of user User2 so it will never expire. The user in this example resides on the local server. sudo is used so a user may run the command as root.
+
+Example3:
+samba-tool user setexpiry --days=20 --filter='(samaccountname=User3)'
+
+Example3 shows how to set the account expiration date to end of day 20 days from the current day. The username or sAMAccountName is specified using the --filter= parameter and the username in this example is User3.
+
+Example4:
+samba-tool user setexpiry --noexpiry User4
+Example4 shows how to set the account expiration so that it will never expire. The username and sAMAccountName in this example is User4.
+
+"""
+ synopsis = "%prog (<username>|--filter <filter>) [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H"),
+ Option("--filter", help="LDAP Filter to set password on", type=str),
+ Option("--days", help="Days to expiry", type=int, default=0),
+ Option("--noexpiry", help="Password does never expire", action="store_true", default=False),
+ ]
+
+ takes_args = ["username?"]
+
+ def run(self, username=None, sambaopts=None, credopts=None,
+ versionopts=None, H=None, filter=None, days=None, noexpiry=None):
+ if username is None and filter is None:
+ raise CommandError("Either the username or '--filter' must be specified!")
+
+ if filter is None:
+ filter = "(&(objectClass=user)(sAMAccountName=%s))" % (ldb.binary_encode(username))
+
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+
+ try:
+ samdb.setexpiry(filter, days * 24 * 3600, no_expiry_req=noexpiry)
+ except Exception as msg:
+ # FIXME: Catch more specific exception
+ raise CommandError("Failed to set expiry for user '%s': %s" % (
+ username or filter, msg))
+ if noexpiry:
+ self.outf.write("Expiry for user '%s' disabled.\n" % (
+ username or filter))
+ else:
+ self.outf.write("Expiry for user '%s' set to %u days.\n" % (
+ username or filter, days))
diff --git a/python/samba/netcmd/user/setpassword.py b/python/samba/netcmd/user/setpassword.py
new file mode 100644
index 0000000..6069a01
--- /dev/null
+++ b/python/samba/netcmd/user/setpassword.py
@@ -0,0 +1,161 @@
+# user management
+#
+# user setpassword command
+#
+# Copyright Jelmer Vernooij 2010 <jelmer@samba.org>
+# Copyright Theresa Halloran 2011 <theresahalloran@gmail.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from getpass import getpass
+
+import samba.getopt as options
+from samba import dsdb, generate_random_password, gensec, ldb
+from samba.auth import system_session
+from samba.netcmd import Command, CommandError, Option
+from samba.samdb import SamDB
+
+
+class cmd_user_setpassword(Command):
+ """Set or reset the password of a user account.
+
+This command sets or resets the logon password for a user account. The username specified on the command is the sAMAccountName. The username may also be specified using the --filter option.
+
+If the password is not specified on the command through the --newpassword parameter, the user is prompted for the password to be entered through the command line.
+
+It is good security practice for the administrator to use the --must-change-at-next-login option which requires that when the user logs on to the account for the first time following the password change, he/she must change the password.
+
+The command may be run from the root userid or another authorized userid. The -H or --URL= option can be used to execute the command against a remote server.
+
+Example1:
+samba-tool user setpassword TestUser1 --newpassword=passw0rd --URL=ldap://samba.samdom.example.com -Uadministrator%passw1rd
+
+Example1 shows how to set the password of user TestUser1 on a remote LDAP server. The --URL parameter is used to specify the remote target server. The -U option is used to pass the username and password of a user that exists on the remote server and is authorized to update the server.
+
+Example2:
+sudo samba-tool user setpassword TestUser2 --newpassword=passw0rd --must-change-at-next-login
+
+Example2 shows how an administrator would reset the TestUser2 user's password to passw0rd. The user is running under the root userid using the sudo command. In this example the user TestUser2 must change their password the next time they logon to the account.
+
+Example3:
+samba-tool user setpassword --filter=samaccountname=TestUser3 --newpassword=passw0rd
+
+Example3 shows how an administrator would reset TestUser3 user's password to passw0rd using the --filter= option to specify the username.
+
+"""
+ synopsis = "%prog (<username>|--filter <filter>) [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H"),
+ Option("--filter", help="LDAP Filter to set password on", type=str),
+ Option("--newpassword", help="Set password", type=str),
+ Option("--must-change-at-next-login",
+ help="Force password to be changed on next login",
+ action="store_true"),
+ Option("--random-password",
+ help="Generate random password",
+ action="store_true"),
+ Option("--smartcard-required",
+ help="Require a smartcard for interactive logons",
+ action="store_true"),
+ Option("--clear-smartcard-required",
+ help="Don't require a smartcard for interactive logons",
+ action="store_true"),
+ ]
+
+ takes_args = ["username?"]
+
+ def run(self, username=None, filter=None, credopts=None, sambaopts=None,
+ versionopts=None, H=None, newpassword=None,
+ must_change_at_next_login=False, random_password=False,
+ smartcard_required=False, clear_smartcard_required=False):
+ if filter is None and username is None:
+ raise CommandError("Either the username or '--filter' must be specified!")
+
+ password = newpassword
+
+ if smartcard_required:
+ if password is not None and password != '':
+ raise CommandError('It is not allowed to specify '
+ '--newpassword '
+ 'together with --smartcard-required.')
+ if must_change_at_next_login:
+ raise CommandError('It is not allowed to specify '
+ '--must-change-at-next-login '
+ 'together with --smartcard-required.')
+ if clear_smartcard_required:
+ raise CommandError('It is not allowed to specify '
+ '--clear-smartcard-required '
+ 'together with --smartcard-required.')
+
+ if random_password and not smartcard_required:
+ password = generate_random_password(128, 255)
+
+ while True:
+ if smartcard_required:
+ break
+ if password is not None and password != '':
+ break
+ password = getpass("New Password: ")
+ passwordverify = getpass("Retype Password: ")
+ if not password == passwordverify:
+ password = None
+ self.outf.write("Sorry, passwords do not match.\n")
+
+ if filter is None:
+ filter = "(&(objectClass=user)(sAMAccountName=%s))" % (ldb.binary_encode(username))
+
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+
+ creds.set_gensec_features(creds.get_gensec_features() | gensec.FEATURE_SEAL)
+
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+
+ if smartcard_required:
+ command = ""
+ try:
+ command = "Failed to set UF_SMARTCARD_REQUIRED for user '%s'" % (username or filter)
+ flags = dsdb.UF_SMARTCARD_REQUIRED
+ samdb.toggle_userAccountFlags(filter, flags, on=True)
+ command = "Failed to enable account for user '%s'" % (username or filter)
+ samdb.enable_account(filter)
+ except Exception as msg:
+ # FIXME: catch more specific exception
+ raise CommandError("%s: %s" % (command, msg))
+ self.outf.write("Added UF_SMARTCARD_REQUIRED OK\n")
+ else:
+ command = ""
+ try:
+ if clear_smartcard_required:
+ command = "Failed to remove UF_SMARTCARD_REQUIRED for user '%s'" % (username or filter)
+ flags = dsdb.UF_SMARTCARD_REQUIRED
+ samdb.toggle_userAccountFlags(filter, flags, on=False)
+ command = "Failed to set password for user '%s'" % (username or filter)
+ samdb.setpassword(filter, password,
+ force_change_at_next_login=must_change_at_next_login,
+ username=username)
+ except Exception as msg:
+ # FIXME: catch more specific exception
+ raise CommandError("%s: %s" % (command, msg))
+ self.outf.write("Changed password OK\n")
diff --git a/python/samba/netcmd/user/setprimarygroup.py b/python/samba/netcmd/user/setprimarygroup.py
new file mode 100644
index 0000000..90b957f
--- /dev/null
+++ b/python/samba/netcmd/user/setprimarygroup.py
@@ -0,0 +1,138 @@
+# user management
+#
+# user setprimarygroup command
+#
+# Copyright Jelmer Vernooij 2010 <jelmer@samba.org>
+# Copyright Theresa Halloran 2011 <theresahalloran@gmail.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import samba.getopt as options
+from samba import ldb
+from samba.auth import system_session
+from samba.dcerpc import security
+from samba.ndr import ndr_unpack
+from samba.netcmd import Command, CommandError, Option
+from samba.samdb import SamDB
+
+
+class cmd_user_setprimarygroup(Command):
+ """Set the primary group a user account.
+
+This command sets the primary group a user account. The username specified on
+the command is the sAMAccountName. The primarygroupname is the sAMAccountName
+of the new primary group. The user must be a member of the group.
+
+The command may be run from the root userid or another authorized userid. The
+-H or --URL= option can be used to execute the command against a remote server.
+
+Example1:
+samba-tool user setprimarygroup TestUser1 newPrimaryGroup --URL=ldap://samba.samdom.example.com -Uadministrator%passw1rd
+
+Example1 shows how to set the primary group for TestUser1 on a remote LDAP
+server. The --URL parameter is used to specify the remote target server. The
+-U option is used to pass the username and password of a user that exists on
+the remote server and is authorized to update the server.
+"""
+ synopsis = "%prog <username> <primarygroupname> [options]"
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+
+ takes_options = [
+ Option("-H", "--URL", help="LDB URL for database or target server", type=str,
+ metavar="URL", dest="H"),
+ ]
+
+ takes_args = ["username", "primarygroupname"]
+
+ def run(self, username, primarygroupname, credopts=None, sambaopts=None,
+ versionopts=None, H=None):
+
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp)
+
+ samdb = SamDB(url=H, session_info=system_session(),
+ credentials=creds, lp=lp)
+
+ filter = ("(&(sAMAccountName=%s)(objectClass=user))" %
+ ldb.binary_encode(username))
+ try:
+ res = samdb.search(base=samdb.domain_dn(),
+ expression=filter,
+ scope=ldb.SCOPE_SUBTREE,
+ controls=["extended_dn:1:1"],
+ attrs=["objectSid",
+ "memberOf",
+ "primaryGroupID"])
+ user_sid_binary = res[0].get('objectSid', idx=0)
+ user_sid = ndr_unpack(security.dom_sid, user_sid_binary)
+ (user_dom_sid, user_rid) = user_sid.split()
+ user_sid_dn = "<SID=%s>" % user_sid
+ user_pgid = int(res[0].get('primaryGroupID', idx=0))
+ user_groups = res[0].get('memberOf')
+ if user_groups is None:
+ user_groups = []
+ except IndexError:
+ raise CommandError("Unable to find user '%s'" % (username))
+
+ user_group_sids = []
+ for user_group in user_groups:
+ user_group_dn = ldb.Dn(samdb, str(user_group))
+ user_group_binary_sid = user_group_dn.get_extended_component("SID")
+ user_group_sid = ndr_unpack(security.dom_sid, user_group_binary_sid)
+ user_group_sids.append(user_group_sid)
+
+ filter = ("(&(sAMAccountName=%s)(objectClass=group))" %
+ ldb.binary_encode(primarygroupname))
+ try:
+ res = samdb.search(base=samdb.domain_dn(),
+ expression=filter,
+ scope=ldb.SCOPE_SUBTREE,
+ attrs=["objectSid"])
+ group_sid_binary = res[0].get('objectSid', idx=0)
+ except IndexError:
+ raise CommandError("Unable to find group '%s'" % (primarygroupname))
+
+ primarygroup_sid = ndr_unpack(security.dom_sid, group_sid_binary)
+ (primarygroup_dom_sid, primarygroup_rid) = primarygroup_sid.split()
+
+ if user_dom_sid != primarygroup_dom_sid:
+ raise CommandError("Group '%s' does not belong to the user's "
+ "domain" % primarygroupname)
+
+ if primarygroup_rid != user_pgid and primarygroup_sid not in user_group_sids:
+ raise CommandError("User '%s' is not member of group '%s'" %
+ (username, primarygroupname))
+
+ setprimarygroup_ldif = """
+dn: %s
+changetype: modify
+delete: primaryGroupID
+primaryGroupID: %u
+add: primaryGroupID
+primaryGroupID: %u
+""" % (user_sid_dn, user_pgid, primarygroup_rid)
+
+ try:
+ samdb.modify_ldif(setprimarygroup_ldif)
+ except Exception as msg:
+ raise CommandError("Failed to set primary group '%s' "
+ "for user '%s': %s" %
+ (primarygroupname, username, msg))
+ self.outf.write("Changed primary group to '%s'\n" % primarygroupname)
diff --git a/python/samba/netcmd/user/unlock.py b/python/samba/netcmd/user/unlock.py
new file mode 100644
index 0000000..0f56f0b
--- /dev/null
+++ b/python/samba/netcmd/user/unlock.py
@@ -0,0 +1,99 @@
+# user management
+#
+# user unlock command
+#
+# Copyright Jelmer Vernooij 2010 <jelmer@samba.org>
+# Copyright Theresa Halloran 2011 <theresahalloran@gmail.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import samba.getopt as options
+from samba import ldb
+from samba.auth import system_session
+from samba.netcmd import Command, CommandError, Option
+from samba.samdb import SamDB, SamDBError
+
+
+class cmd_user_unlock(Command):
+ """Unlock a user account.
+
+ This command unlocks a user account in the Active Directory domain. The
+ username specified on the command is the sAMAccountName. The username may
+ also be specified using the --filter option.
+
+ The command may be run from the root userid or another authorized userid.
+ The -H or --URL= option can be used to execute the command against a remote
+ server.
+
+ Example:
+ samba-tool user unlock user1 -H ldap://samba.samdom.example.com \\
+ --username=Administrator --password=Passw0rd
+
+ The example shows how to unlock a user account in the domain against a
+ remote LDAP server. The -H parameter is used to specify the remote target
+ server. The --username= and --password= options are used to pass the
+ username and password of a user that exists on the remote server and is
+ authorized to issue the command on that server.
+"""
+
+ synopsis = "%prog (<username>|--filter <filter>) [options]"
+
+ takes_options = [
+ Option("-H",
+ "--URL",
+ help="LDB URL for database or target server",
+ type=str,
+ metavar="URL",
+ dest="H"),
+ Option("--filter",
+ help="LDAP Filter to set password on",
+ type=str),
+ ]
+
+ takes_args = ["username?"]
+
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "credopts": options.CredentialsOptions,
+ "versionopts": options.VersionOptions,
+ }
+
+ def run(self,
+ username=None,
+ sambaopts=None,
+ credopts=None,
+ versionopts=None,
+ filter=None,
+ H=None):
+ if username is None and filter is None:
+ raise CommandError("Either the username or '--filter' must be "
+ "specified!")
+
+ if filter is None:
+ filter = ("(&(objectClass=user)(sAMAccountName=%s))" % (
+ ldb.binary_encode(username)))
+
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp, fallback_machine=True)
+
+ samdb = SamDB(url=H,
+ session_info=system_session(),
+ credentials=creds,
+ lp=lp)
+ try:
+ samdb.unlock_account(filter)
+ except (SamDBError, ldb.LdbError) as msg:
+ raise CommandError("Failed to unlock user '%s': %s" % (
+ username or filter, msg))
diff --git a/python/samba/netcmd/validators.py b/python/samba/netcmd/validators.py
new file mode 100644
index 0000000..5690341
--- /dev/null
+++ b/python/samba/netcmd/validators.py
@@ -0,0 +1,66 @@
+# Unix SMB/CIFS implementation.
+#
+# validators
+#
+# Copyright (C) Catalyst.Net Ltd. 2023
+#
+# Written by Rob van der Linde <rob@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from samba.getopt import Validator, ValidationError
+
+
+class Range(Validator):
+ """Checks if the value is within range min ... max."""
+
+ def __init__(self, min=None, max=None):
+ if min is None and max is None:
+ raise ValueError("Range without a min and max doesn't make sense.")
+
+ self.min = min
+ self.max = max
+
+ def __call__(self, field, value):
+ """Check if value is within the range min ... max.
+
+ It is possible to omit min, or omit max, in which case a more
+ tailored error message is returned.
+ """
+ if self.min is not None and self.max is None:
+ if value < self.min:
+ raise ValidationError(f"{field} must be at least {self.min}")
+
+ elif self.min is None and self.max is not None:
+ if value > self.max:
+ raise ValidationError(
+ f"{field} cannot be greater than {self.max}")
+
+ elif self.min is not None and self.max is not None:
+ if value < self.min or value > self.max:
+ raise ValidationError(
+ f"{field} must be between {self.min} and {self.max}")
+
+
+class OneOf(Validator):
+ """Checks if the value is in a list of possible choices."""
+
+ def __init__(self, choices):
+ self.choices = sorted(choices)
+
+ def __call__(self, field, value):
+ if value not in self.choices:
+ allowed_choices = ", ".join(self.choices)
+ raise ValidationError(f"{field} must be one of: {allowed_choices}")
diff --git a/python/samba/netcmd/visualize.py b/python/samba/netcmd/visualize.py
new file mode 100644
index 0000000..689d577
--- /dev/null
+++ b/python/samba/netcmd/visualize.py
@@ -0,0 +1,705 @@
+# Visualisation tools
+#
+# Copyright (C) Andrew Bartlett 2015, 2018
+#
+# by Douglas Bagnall <douglas.bagnall@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import sys
+from collections import defaultdict
+import subprocess
+import tempfile
+import samba.getopt as options
+from samba import dsdb
+from samba import nttime2unix
+from samba.netcmd import Command, SuperCommand, CommandError, Option
+from samba.samdb import SamDB
+from samba.graph import dot_graph
+from samba.graph import distance_matrix, COLOUR_SETS
+from samba.graph import full_matrix
+from samba.colour import is_colour_wanted
+
+from ldb import SCOPE_BASE, SCOPE_SUBTREE, LdbError
+import time
+import re
+from samba.kcc import KCC, ldif_import_export
+from samba.kcc.kcc_utils import KCCError
+from samba.uptodateness import (
+ get_partition_maps,
+ get_partition,
+ get_own_cursor,
+ get_utdv,
+ get_utdv_edges,
+ get_utdv_distances,
+ get_utdv_max_distance,
+ get_kcc_and_dsas,
+)
+
+COMMON_OPTIONS = [
+ Option("-H", "--URL", help="LDB URL for database or target server",
+ type=str, metavar="URL", dest="H"),
+ Option("-o", "--output", help="write here (default stdout)",
+ type=str, metavar="FILE", default=None),
+ Option("--distance", help="Distance matrix graph output (default)",
+ dest='format', const='distance', action='store_const'),
+ Option("--utf8", help="Use utf-8 Unicode characters",
+ action='store_true'),
+ Option("--color-scheme", help=("use this colour scheme "
+ "(implies --color=yes)"),
+ choices=list(COLOUR_SETS.keys())),
+ Option("-S", "--shorten-names",
+ help="don't print long common suffixes",
+ action='store_true', default=False),
+ Option("-r", "--talk-to-remote", help="query other DCs' databases",
+ action='store_true', default=False),
+ Option("--no-key", help="omit the explanatory key",
+ action='store_false', default=True, dest='key'),
+]
+
+DOT_OPTIONS = [
+ Option("--dot", help="Graphviz dot output", dest='format',
+ const='dot', action='store_const'),
+ Option("--xdot", help="attempt to call Graphviz xdot", dest='format',
+ const='xdot', action='store_const'),
+]
+
+TEMP_FILE = '__temp__'
+
+
+class GraphCommand(Command):
+ """Base class for graphing commands"""
+
+ synopsis = "%prog [options]"
+ takes_optiongroups = {
+ "sambaopts": options.SambaOptions,
+ "versionopts": options.VersionOptions,
+ "credopts": options.CredentialsOptions,
+ }
+ takes_options = COMMON_OPTIONS + DOT_OPTIONS
+ takes_args = ()
+
+ def get_db(self, H, sambaopts, credopts):
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp, fallback_machine=True)
+ samdb = SamDB(url=H, credentials=creds, lp=lp)
+ return samdb
+
+ def write(self, s, fn=None, suffix='.dot'):
+ """Decide whether we're dealing with a filename, a tempfile, or
+ stdout, and write accordingly.
+
+ :param s: the string to write
+ :param fn: a destination
+ :param suffix: suffix, if destination is a tempfile
+
+ If fn is None or "-", write to stdout.
+ If fn is visualize.TEMP_FILE, write to a temporary file
+ Otherwise fn should be a filename to write to.
+ """
+ if fn is None or fn == '-':
+ # we're just using stdout (a.k.a self.outf)
+ print(s, file=self.outf)
+ return
+
+ if fn is TEMP_FILE:
+ fd, fn = tempfile.mkstemp(prefix='samba-tool-visualise',
+ suffix=suffix)
+ f = open(fn, 'w')
+ os.close(fd)
+ else:
+ f = open(fn, 'w')
+
+ f.write(s)
+ f.close()
+ return fn
+
+ def calc_output_format(self, format, output):
+ """Heuristics to work out what output format was wanted."""
+ if not format:
+ # They told us nothing! We have to work it out for ourselves.
+ if output and output.lower().endswith('.dot'):
+ return 'dot'
+ else:
+ return 'distance'
+
+ if format == 'xdot':
+ return 'dot'
+
+ return format
+
+ def call_xdot(self, s, output):
+ if output is None:
+ fn = self.write(s, TEMP_FILE)
+ else:
+ fn = self.write(s, output)
+ xdot = os.environ.get('SAMBA_TOOL_XDOT_PATH', '/usr/bin/xdot')
+ subprocess.call([xdot, fn])
+ os.remove(fn)
+
+ def calc_distance_color_scheme(self, color_scheme, output):
+ """Heuristics to work out the colour scheme for distance matrices.
+ Returning None means no colour, otherwise it should be a colour
+ from graph.COLOUR_SETS"""
+ if color_scheme is not None:
+ # --color-scheme implies --color=yes for *this* purpose.
+ return color_scheme
+
+ if output in ('-', None):
+ output = self.outf
+
+ want_colour = is_colour_wanted(output, hint=self.requested_colour)
+ if not want_colour:
+ return None
+
+ # if we got to here, we are using colour according to the
+ # --color/NO_COLOR rules, but no colour scheme has been
+ # specified, so we choose some defaults.
+ if '256color' in os.environ.get('TERM', ''):
+ return 'xterm-256color-heatmap'
+ return 'ansi'
+
+
+def get_dnstr_site(dn):
+ """Helper function for sorting and grouping DNs by site, if
+ possible."""
+ m = re.search(r'CN=Servers,CN=\s*([^,]+)\s*,CN=Sites', dn)
+ if m:
+ return m.group(1)
+ # Oh well, let it sort by DN
+ return dn
+
+
+def get_dnstrlist_site(t):
+ """Helper function for sorting and grouping lists of (DN, ...) tuples
+ by site, if possible."""
+ return get_dnstr_site(t[0])
+
+
+def colour_hash(x):
+ """Generate a randomish but consistent darkish colour based on the
+ given object."""
+ from hashlib import md5
+ tmp_str = str(x)
+ if isinstance(tmp_str, str):
+ tmp_str = tmp_str.encode('utf8')
+ c = int(md5(tmp_str).hexdigest()[:6], base=16) & 0x7f7f7f
+ return '#%06x' % c
+
+
+class cmd_reps(GraphCommand):
+ "repsFrom/repsTo from every DSA"
+
+ takes_options = COMMON_OPTIONS + DOT_OPTIONS + [
+ Option("-p", "--partition", help="restrict to this partition",
+ default=None),
+ ]
+
+ def run(self, H=None, output=None, shorten_names=False,
+ key=True, talk_to_remote=False,
+ sambaopts=None, credopts=None, versionopts=None,
+ mode='self', partition=None, color_scheme=None,
+ utf8=None, format=None, xdot=False):
+ # We use the KCC libraries in readonly mode to get the
+ # replication graph.
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp, fallback_machine=True)
+ local_kcc, dsas = get_kcc_and_dsas(H, lp, creds)
+ unix_now = local_kcc.unix_now
+
+ partition = get_partition(local_kcc.samdb, partition)
+
+ # nc_reps is an autovivifying dictionary of dictionaries of lists.
+ # nc_reps[partition]['current' | 'needed'] is a list of
+ # (dsa dn string, repsFromTo object) pairs.
+ nc_reps = defaultdict(lambda: defaultdict(list))
+
+ guid_to_dnstr = {}
+
+ # We run a new KCC for each DSA even if we aren't talking to
+ # the remote, because after kcc.run (or kcc.list_dsas) the kcc
+ # ends up in a messy state.
+ for dsa_dn in dsas:
+ kcc = KCC(unix_now, readonly=True)
+ if talk_to_remote:
+ res = local_kcc.samdb.search(dsa_dn,
+ scope=SCOPE_BASE,
+ attrs=["dNSHostName"])
+ dns_name = str(res[0]["dNSHostName"][0])
+ print("Attempting to contact ldap://%s (%s)" %
+ (dns_name, dsa_dn),
+ file=sys.stderr)
+ try:
+ kcc.load_samdb("ldap://%s" % dns_name, lp, creds)
+ except KCCError as e:
+ print("Could not contact ldap://%s (%s)" % (dns_name, e),
+ file=sys.stderr)
+ continue
+
+ kcc.run(H, lp, creds)
+ else:
+ kcc.load_samdb(H, lp, creds)
+ kcc.run(H, lp, creds, forced_local_dsa=dsa_dn)
+
+ dsas_from_here = set(kcc.list_dsas())
+ if dsas != dsas_from_here:
+ print("found extra DSAs:", file=sys.stderr)
+ for dsa in (dsas_from_here - dsas):
+ print(" %s" % dsa, file=sys.stderr)
+ print("missing DSAs (known locally, not by %s):" % dsa_dn,
+ file=sys.stderr)
+ for dsa in (dsas - dsas_from_here):
+ print(" %s" % dsa, file=sys.stderr)
+
+ for remote_dn in dsas_from_here:
+ if mode == 'others' and remote_dn == dsa_dn:
+ continue
+ elif mode == 'self' and remote_dn != dsa_dn:
+ continue
+
+ remote_dsa = kcc.get_dsa('CN=NTDS Settings,' + remote_dn)
+ kcc.translate_ntdsconn(remote_dsa)
+ guid_to_dnstr[str(remote_dsa.dsa_guid)] = remote_dn
+ # get_reps_tables() returns two dictionaries mapping
+ # dns to NCReplica objects
+ c, n = remote_dsa.get_rep_tables()
+ for part, rep in c.items():
+ if partition is None or part == partition:
+ nc_reps[part]['current'].append((dsa_dn, rep))
+ for part, rep in n.items():
+ if partition is None or part == partition:
+ nc_reps[part]['needed'].append((dsa_dn, rep))
+
+ all_edges = {'needed': {'to': [], 'from': []},
+ 'current': {'to': [], 'from': []}}
+
+ short_partitions, long_partitions = get_partition_maps(local_kcc.samdb)
+
+ for partname, part in nc_reps.items():
+ for state, edgelists in all_edges.items():
+ for dsa_dn, rep in part[state]:
+ short_name = long_partitions.get(partname, partname)
+ for r in rep.rep_repsFrom:
+ edgelists['from'].append(
+ (dsa_dn,
+ guid_to_dnstr[str(r.source_dsa_obj_guid)],
+ short_name))
+ for r in rep.rep_repsTo:
+ edgelists['to'].append(
+ (guid_to_dnstr[str(r.source_dsa_obj_guid)],
+ dsa_dn,
+ short_name))
+
+ # Here we have the set of edges. From now it is a matter of
+ # interpretation and presentation.
+
+ if self.calc_output_format(format, output) == 'distance':
+ color_scheme = self.calc_distance_color_scheme(color_scheme,
+ output)
+ header_strings = {
+ 'from': "RepsFrom objects for %s",
+ 'to': "RepsTo objects for %s",
+ }
+ for state, edgelists in all_edges.items():
+ for direction, items in edgelists.items():
+ part_edges = defaultdict(list)
+ for src, dest, part in items:
+ part_edges[part].append((src, dest))
+ for part, edges in part_edges.items():
+ s = distance_matrix(None, edges,
+ utf8=utf8,
+ colour=color_scheme,
+ shorten_names=shorten_names,
+ generate_key=key,
+ grouping_function=get_dnstr_site)
+
+ s = "\n%s\n%s" % (header_strings[direction] % part, s)
+ self.write(s, output)
+ return
+
+ edge_colours = []
+ edge_styles = []
+ dot_edges = []
+ dot_vertices = set()
+ used_colours = {}
+ key_set = set()
+ for state, edgelist in all_edges.items():
+ for direction, items in edgelist.items():
+ for src, dest, part in items:
+ colour = used_colours.setdefault((part),
+ colour_hash((part,
+ direction)))
+ linestyle = 'dotted' if state == 'needed' else 'solid'
+ arrow = 'open' if direction == 'to' else 'empty'
+ dot_vertices.add(src)
+ dot_vertices.add(dest)
+ dot_edges.append((src, dest))
+ edge_colours.append(colour)
+ style = 'style="%s"; arrowhead=%s' % (linestyle, arrow)
+ edge_styles.append(style)
+ key_set.add((part, 'reps' + direction.title(),
+ colour, style))
+
+ key_items = []
+ if key:
+ for part, direction, colour, linestyle in sorted(key_set):
+ key_items.append((False,
+ 'color="%s"; %s' % (colour, linestyle),
+ "%s %s" % (part, direction)))
+ key_items.append((False,
+ 'style="dotted"; arrowhead="open"',
+ "repsFromTo is needed"))
+ key_items.append((False,
+ 'style="solid"; arrowhead="open"',
+ "repsFromTo currently exists"))
+
+ s = dot_graph(dot_vertices, dot_edges,
+ directed=True,
+ edge_colors=edge_colours,
+ edge_styles=edge_styles,
+ shorten_names=shorten_names,
+ key_items=key_items)
+
+ if format == 'xdot':
+ self.call_xdot(s, output)
+ else:
+ self.write(s, output)
+
+
+class NTDSConn(object):
+ """Collects observation counts for NTDS connections, so we know
+ whether all DSAs agree."""
+ def __init__(self, src, dest):
+ self.observations = 0
+ self.src_attests = False
+ self.dest_attests = False
+ self.src = src
+ self.dest = dest
+
+ def attest(self, attester):
+ self.observations += 1
+ if attester == self.src:
+ self.src_attests = True
+ if attester == self.dest:
+ self.dest_attests = True
+
+
+class cmd_ntdsconn(GraphCommand):
+ "Draw the NTDSConnection graph"
+ takes_options = COMMON_OPTIONS + DOT_OPTIONS + [
+ Option("--importldif", help="graph from samba_kcc generated ldif",
+ default=None),
+ ]
+
+ def import_ldif_db(self, ldif, lp):
+ d = tempfile.mkdtemp(prefix='samba-tool-visualise')
+ fn = os.path.join(d, 'imported.ldb')
+ self._tmp_fn_to_delete = fn
+ samdb = ldif_import_export.ldif_to_samdb(fn, lp, ldif)
+ return fn
+
+ def run(self, H=None, output=None, shorten_names=False,
+ key=True, talk_to_remote=False,
+ sambaopts=None, credopts=None, versionopts=None,
+ color_scheme=None,
+ utf8=None, format=None, importldif=None,
+ xdot=False):
+
+ lp = sambaopts.get_loadparm()
+ if importldif is None:
+ creds = credopts.get_credentials(lp, fallback_machine=True)
+ else:
+ creds = None
+ H = self.import_ldif_db(importldif, lp)
+
+ local_kcc, dsas = get_kcc_and_dsas(H, lp, creds)
+ local_dsa_dn = local_kcc.my_dsa_dnstr.split(',', 1)[1]
+ vertices = set()
+ attested_edges = []
+ for dsa_dn in dsas:
+ if talk_to_remote:
+ res = local_kcc.samdb.search(dsa_dn,
+ scope=SCOPE_BASE,
+ attrs=["dNSHostName"])
+ dns_name = res[0]["dNSHostName"][0]
+ try:
+ samdb = self.get_db("ldap://%s" % dns_name, sambaopts,
+ credopts)
+ except LdbError as e:
+ print("Could not contact ldap://%s (%s)" % (dns_name, e),
+ file=sys.stderr)
+ continue
+
+ ntds_dn = samdb.get_dsServiceName()
+ dn = samdb.domain_dn()
+ else:
+ samdb = self.get_db(H, sambaopts, credopts)
+ ntds_dn = 'CN=NTDS Settings,' + dsa_dn
+ dn = dsa_dn
+
+ res = samdb.search(ntds_dn,
+ scope=SCOPE_BASE,
+ attrs=["msDS-isRODC"])
+
+ is_rodc = res[0]["msDS-isRODC"][0] == 'TRUE'
+
+ vertices.add((ntds_dn, 'RODC' if is_rodc else ''))
+ # XXX we could also look at schedule
+ res = samdb.search(dn,
+ scope=SCOPE_SUBTREE,
+ expression="(objectClass=nTDSConnection)",
+ attrs=['fromServer'],
+ # XXX can't be critical for ldif test
+ # controls=["search_options:1:2"],
+ controls=["search_options:0:2"],
+ )
+
+ for msg in res:
+ msgdn = str(msg.dn)
+ dest_dn = msgdn[msgdn.index(',') + 1:]
+ attested_edges.append((str(msg['fromServer'][0]),
+ dest_dn, ntds_dn))
+
+ if importldif and H == self._tmp_fn_to_delete:
+ os.remove(H)
+ os.rmdir(os.path.dirname(H))
+
+ # now we overlay all the graphs and generate styles accordingly
+ edges = {}
+ for src, dest, attester in attested_edges:
+ k = (src, dest)
+ if k in edges:
+ e = edges[k]
+ else:
+ e = NTDSConn(*k)
+ edges[k] = e
+ e.attest(attester)
+
+ vertices, rodc_status = zip(*sorted(vertices))
+
+ if self.calc_output_format(format, output) == 'distance':
+ color_scheme = self.calc_distance_color_scheme(color_scheme,
+ output)
+ colours = COLOUR_SETS[color_scheme]
+ c_header = colours.get('header', '')
+ c_reset = colours.get('reset', '')
+
+ epilog = []
+ if 'RODC' in rodc_status:
+ epilog.append('No outbound connections are expected from RODCs')
+
+ if not talk_to_remote:
+ # If we are not talking to remote servers, we list all
+ # the connections.
+ graph_edges = edges.keys()
+ title = 'NTDS Connections known to %s' % local_dsa_dn
+
+ else:
+ # If we are talking to the remotes, there are
+ # interesting cases we can discover. What matters most
+ # is that the destination (i.e. owner) knowns about
+ # the connection, but it would be worth noting if the
+ # source doesn't. Another strange situation could be
+ # when a DC thinks there is a connection elsewhere,
+ # but the computers allegedly involved don't believe
+ # it exists.
+ #
+ # With limited bandwidth in the table, we mark the
+ # edges known to the destination, and note the other
+ # cases in a list after the diagram.
+ graph_edges = []
+ source_denies = []
+ dest_denies = []
+ both_deny = []
+ for e, conn in edges.items():
+ if conn.dest_attests:
+ graph_edges.append(e)
+ if not conn.src_attests:
+ source_denies.append(e)
+ elif conn.src_attests:
+ dest_denies.append(e)
+ else:
+ both_deny.append(e)
+
+ title = 'NTDS Connections known to each destination DC'
+
+ if both_deny:
+ epilog.append('The following connections are alleged by '
+ 'DCs other than the source and '
+ 'destination:\n')
+ for e in both_deny:
+ epilog.append(' %s -> %s\n' % e)
+ if dest_denies:
+ epilog.append('The following connections are alleged by '
+ 'DCs other than the destination but '
+ 'including the source:\n')
+ for e in dest_denies:
+ epilog.append(' %s -> %s\n' % e)
+ if source_denies:
+ epilog.append('The following connections '
+ '(included in the chart) '
+ 'are not known to the source DC:\n')
+ for e in source_denies:
+ epilog.append(' %s -> %s\n' % e)
+
+ s = distance_matrix(vertices, graph_edges,
+ utf8=utf8,
+ colour=color_scheme,
+ shorten_names=shorten_names,
+ generate_key=key,
+ grouping_function=get_dnstrlist_site,
+ row_comments=rodc_status)
+
+ epilog = ''.join(epilog)
+ if epilog:
+ epilog = '\n%sNOTES%s\n%s' % (c_header,
+ c_reset,
+ epilog)
+
+ self.write('\n%s\n\n%s\n%s' % (title,
+ s,
+ epilog), output)
+ return
+
+ dot_edges = []
+ edge_colours = []
+ edge_styles = []
+ edge_labels = []
+ n_servers = len(dsas)
+ for k, e in sorted(edges.items()):
+ dot_edges.append(k)
+ if e.observations == n_servers or not talk_to_remote:
+ edge_colours.append('#000000')
+ edge_styles.append('')
+ elif e.dest_attests:
+ edge_styles.append('')
+ if e.src_attests:
+ edge_colours.append('#0000ff')
+ else:
+ edge_colours.append('#cc00ff')
+ elif e.src_attests:
+ edge_colours.append('#ff0000')
+ edge_styles.append('style=dashed')
+ else:
+ edge_colours.append('#ff0000')
+ edge_styles.append('style=dotted')
+
+ key_items = []
+ if key:
+ key_items.append((False,
+ 'color="#000000"',
+ "NTDS Connection"))
+ for colour, desc in (('#0000ff', "missing from some DCs"),
+ ('#cc00ff', "missing from source DC")):
+ if colour in edge_colours:
+ key_items.append((False, 'color="%s"' % colour, desc))
+
+ for style, desc in (('style=dashed', "unknown to destination"),
+ ('style=dotted',
+ "unknown to source and destination")):
+ if style in edge_styles:
+ key_items.append((False,
+ 'color="#ff0000; %s"' % style,
+ desc))
+
+ if talk_to_remote:
+ title = 'NTDS Connections'
+ else:
+ title = 'NTDS Connections known to %s' % local_dsa_dn
+
+ s = dot_graph(sorted(vertices), dot_edges,
+ directed=True,
+ title=title,
+ edge_colors=edge_colours,
+ edge_labels=edge_labels,
+ edge_styles=edge_styles,
+ shorten_names=shorten_names,
+ key_items=key_items)
+
+ if format == 'xdot':
+ self.call_xdot(s, output)
+ else:
+ self.write(s, output)
+
+
+class cmd_uptodateness(GraphCommand):
+ """visualize uptodateness vectors"""
+
+ takes_options = COMMON_OPTIONS + [
+ Option("-p", "--partition", help="restrict to this partition",
+ default=None),
+ Option("--max-digits", default=3, type=int,
+ help="display this many digits of out-of-date-ness"),
+ ]
+
+ def run(self, H=None, output=None, shorten_names=False,
+ key=True, talk_to_remote=False,
+ sambaopts=None, credopts=None, versionopts=None,
+ color_scheme=None,
+ utf8=False, format=None, importldif=None,
+ xdot=False, partition=None, max_digits=3):
+ if not talk_to_remote:
+ print("this won't work without talking to the remote servers "
+ "(use -r)", file=self.outf)
+ return
+
+ # We use the KCC libraries in readonly mode to get the
+ # replication graph.
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp, fallback_machine=True)
+ local_kcc, dsas = get_kcc_and_dsas(H, lp, creds)
+ self.samdb = local_kcc.samdb
+ partition = get_partition(self.samdb, partition)
+
+ short_partitions, long_partitions = get_partition_maps(self.samdb)
+ color_scheme = self.calc_distance_color_scheme(color_scheme,
+ output)
+
+ for part_name, part_dn in short_partitions.items():
+ if partition not in (part_dn, None):
+ continue # we aren't doing this partition
+
+ utdv_edges = get_utdv_edges(local_kcc, dsas, part_dn, lp, creds)
+
+ distances = get_utdv_distances(utdv_edges, dsas)
+
+ max_distance = get_utdv_max_distance(distances)
+
+ digits = min(max_digits, len(str(max_distance)))
+ if digits < 1:
+ digits = 1
+ c_scale = 10 ** digits
+
+ s = full_matrix(distances,
+ utf8=utf8,
+ colour=color_scheme,
+ shorten_names=shorten_names,
+ generate_key=key,
+ grouping_function=get_dnstr_site,
+ colour_scale=c_scale,
+ digits=digits,
+ ylabel='DC',
+ xlabel='out-of-date-ness')
+
+ self.write('\n%s\n\n%s' % (part_name, s), output)
+
+
+class cmd_visualize(SuperCommand):
+ """Produces graphical representations of Samba network state."""
+ subcommands = {}
+
+ for k, v in globals().items():
+ if k.startswith('cmd_'):
+ subcommands[k[4:]] = v()
diff --git a/python/samba/nt_time.py b/python/samba/nt_time.py
new file mode 100644
index 0000000..4518e90
--- /dev/null
+++ b/python/samba/nt_time.py
@@ -0,0 +1,60 @@
+#
+# NT Time utility functions.
+#
+# Copyright (C) Catalyst.Net Ltd 2023
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <https://www.gnu.org/licenses/>.
+#
+
+import datetime
+from typing import NewType
+
+
+NtTime = NewType("NtTime", int)
+NtTimeDelta = NewType("NtTimeDelta", int)
+
+
+NT_EPOCH = datetime.datetime(
+ 1601, 1, 1, 0, 0, 0, 0, tzinfo=datetime.timezone.utc
+)
+NT_TICKS_PER_μSEC = 10
+NT_TICKS_PER_SEC = NT_TICKS_PER_μSEC * 10**6
+
+
+def _validate_nt_time(nt_time: NtTime) -> None:
+ if not isinstance(nt_time, int):
+ raise ValueError(f"{nt_time} is not an integer")
+ if not 0 <= nt_time < 2**64:
+ raise ValueError(f"{nt_time} is out of range")
+
+
+def nt_time_from_datetime(tm: datetime.datetime) -> NtTime:
+ time_since_epoch = tm - NT_EPOCH
+ nt_time = NtTime(round(time_since_epoch.total_seconds() * NT_TICKS_PER_SEC))
+ _validate_nt_time(nt_time)
+ return nt_time
+
+
+def datetime_from_nt_time(nt_time: NtTime) -> datetime.datetime:
+ _validate_nt_time(nt_time)
+ time_since_epoch = datetime.timedelta(microseconds=nt_time / NT_TICKS_PER_μSEC)
+ return NT_EPOCH + time_since_epoch
+
+
+def nt_time_delta_from_datetime(dt: datetime.timedelta) -> NtTimeDelta:
+ return NtTimeDelta(round(dt.total_seconds() * NT_TICKS_PER_SEC))
+
+
+def timedelta_from_nt_time_delta(nt_time_delta: NtTimeDelta) -> datetime.timedelta:
+ return datetime.timedelta(microseconds=nt_time_delta / NT_TICKS_PER_μSEC)
diff --git a/python/samba/ntacls.py b/python/samba/ntacls.py
new file mode 100644
index 0000000..24af056
--- /dev/null
+++ b/python/samba/ntacls.py
@@ -0,0 +1,662 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Matthieu Patou <mat@matws.net> 2009-2010
+#
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+"""NT Acls."""
+
+
+import os
+import tempfile
+import shutil
+
+import samba.xattr_native
+import samba.xattr_tdb
+import samba.posix_eadb
+from samba.samba3 import param as s3param
+from samba.dcerpc import security, xattr, idmap
+from samba.ndr import ndr_pack, ndr_unpack
+from samba.samba3 import smbd
+from samba.samba3 import libsmb_samba_internal as libsmb
+from samba.logger import get_samba_logger
+from samba import NTSTATUSError
+from samba.auth_util import system_session_unix
+from samba import safe_tarfile as tarfile
+
+
+# don't include volumes
+SMB_FILE_ATTRIBUTE_FLAGS = libsmb.FILE_ATTRIBUTE_SYSTEM | \
+ libsmb.FILE_ATTRIBUTE_DIRECTORY | \
+ libsmb.FILE_ATTRIBUTE_ARCHIVE | \
+ libsmb.FILE_ATTRIBUTE_HIDDEN
+
+
+SECURITY_SECINFO_FLAGS = security.SECINFO_OWNER | \
+ security.SECINFO_GROUP | \
+ security.SECINFO_DACL | \
+ security.SECINFO_SACL
+
+class XattrBackendError(Exception):
+ """A generic xattr backend error."""
+
+
+def checkset_backend(lp, backend, eadbfile):
+ """return the path to the eadb, or None"""
+ if backend is None:
+ xattr_tdb = lp.get("xattr_tdb:file")
+ if xattr_tdb is not None:
+ return (samba.xattr_tdb, lp.get("xattr_tdb:file"))
+ posix_eadb = lp.get("posix:eadb")
+ if posix_eadb is not None:
+ return (samba.posix_eadb, lp.get("posix:eadb"))
+ return (None, None)
+ elif backend == "native":
+ return (None, None)
+ elif backend == "eadb":
+ if eadbfile is not None:
+ return (samba.posix_eadb, eadbfile)
+ else:
+ return (samba.posix_eadb, os.path.abspath(os.path.join(lp.get("private dir"), "eadb.tdb")))
+ elif backend == "tdb":
+ if eadbfile is not None:
+ return (samba.xattr_tdb, eadbfile)
+ else:
+ state_dir = lp.get("state directory")
+ db_path = os.path.abspath(os.path.join(state_dir, "xattr.tdb"))
+ return (samba.xattr_tdb, db_path)
+ else:
+ raise XattrBackendError("Invalid xattr backend choice %s" % backend)
+
+
+def getdosinfo(lp, file):
+ try:
+ attribute = samba.xattr_native.wrap_getxattr(file,
+ xattr.XATTR_DOSATTRIB_NAME_S3)
+ except Exception:
+ return
+
+ return ndr_unpack(xattr.DOSATTRIB, attribute)
+
+
+def getntacl(lp,
+ file,
+ session_info,
+ backend=None,
+ eadbfile=None,
+ direct_db_access=True,
+ service=None):
+ if direct_db_access:
+ (backend_obj, dbname) = checkset_backend(lp, backend, eadbfile)
+ if dbname is not None:
+ try:
+ attribute = backend_obj.wrap_getxattr(dbname, file,
+ xattr.XATTR_NTACL_NAME)
+ except Exception:
+ # FIXME: Don't catch all exceptions, just those related to opening
+ # xattrdb
+ print("Fail to open %s" % dbname)
+ attribute = samba.xattr_native.wrap_getxattr(file,
+ xattr.XATTR_NTACL_NAME)
+ else:
+ attribute = samba.xattr_native.wrap_getxattr(file,
+ xattr.XATTR_NTACL_NAME)
+ ntacl = ndr_unpack(xattr.NTACL, attribute)
+ if ntacl.version == 1:
+ return ntacl.info
+ elif ntacl.version == 2:
+ return ntacl.info.sd
+ elif ntacl.version == 3:
+ return ntacl.info.sd
+ elif ntacl.version == 4:
+ return ntacl.info.sd
+ else:
+ return smbd.get_nt_acl(file,
+ SECURITY_SECINFO_FLAGS,
+ session_info,
+ service=service)
+
+
+def setntacl(lp, file, sddl, domsid, session_info,
+ backend=None, eadbfile=None,
+ use_ntvfs=True, skip_invalid_chown=False,
+ passdb=None, service=None):
+ """
+ A wrapper for smbd set_nt_acl api.
+
+ Args:
+ lp (LoadParam): load param from conf
+ file (str): a path to file or dir
+ sddl (str): ntacl sddl string
+ service (str): name of share service, e.g.: sysvol
+ session_info (auth_session_info): session info for authentication
+
+ Note:
+ Get `session_info` with `samba.auth.user_session`, do not use the
+ `admin_session` api.
+
+ Returns:
+ None
+ """
+
+ assert(isinstance(domsid, str) or isinstance(domsid, security.dom_sid))
+ if isinstance(domsid, str):
+ sid = security.dom_sid(domsid)
+ elif isinstance(domsid, security.dom_sid):
+ sid = domsid
+ domsid = str(sid)
+
+ assert(isinstance(sddl, str) or isinstance(sddl, security.descriptor))
+ if isinstance(sddl, str):
+ sd = security.descriptor.from_sddl(sddl, sid)
+ elif isinstance(sddl, security.descriptor):
+ sd = sddl
+ sddl = sd.as_sddl(sid)
+
+ if not use_ntvfs and skip_invalid_chown:
+ # Check if the owner can be resolved as a UID
+ (owner_id, owner_type) = passdb.sid_to_id(sd.owner_sid)
+ if ((owner_type != idmap.ID_TYPE_UID) and (owner_type != idmap.ID_TYPE_BOTH)):
+ # Check if this particular owner SID was domain admins,
+ # because we special-case this as mapping to
+ # 'administrator' instead.
+ if sd.owner_sid == security.dom_sid("%s-%d" % (domsid, security.DOMAIN_RID_ADMINS)):
+ administrator = security.dom_sid("%s-%d" % (domsid, security.DOMAIN_RID_ADMINISTRATOR))
+ (admin_id, admin_type) = passdb.sid_to_id(administrator)
+
+ # Confirm we have a UID for administrator
+ if ((admin_type == idmap.ID_TYPE_UID) or (admin_type == idmap.ID_TYPE_BOTH)):
+
+ # Set it, changing the owner to 'administrator' rather than domain admins
+ sd2 = sd
+ sd2.owner_sid = administrator
+
+ smbd.set_nt_acl(
+ file, SECURITY_SECINFO_FLAGS, sd2,
+ session_info,
+ service=service)
+
+ # and then set an NTVFS ACL (which does not set the posix ACL) to pretend the owner really was set
+ use_ntvfs = True
+ else:
+ raise XattrBackendError("Unable to find UID for domain administrator %s, got id %d of type %d" % (administrator, admin_id, admin_type))
+ else:
+ # For all other owning users, reset the owner to root
+ # and then set the ACL without changing the owner
+ #
+ # This won't work in test environments, as it tries a real (rather than xattr-based fake) chown
+
+ os.chown(file, 0, 0)
+ smbd.set_nt_acl(
+ file,
+ security.SECINFO_GROUP |
+ security.SECINFO_DACL |
+ security.SECINFO_SACL,
+ sd,
+ session_info,
+ service=service)
+
+ if use_ntvfs:
+ (backend_obj, dbname) = checkset_backend(lp, backend, eadbfile)
+ ntacl = xattr.NTACL()
+ ntacl.version = 1
+ ntacl.info = sd
+ if dbname is not None:
+ try:
+ backend_obj.wrap_setxattr(dbname,
+ file, xattr.XATTR_NTACL_NAME, ndr_pack(ntacl))
+ except Exception:
+ # FIXME: Don't catch all exceptions, just those related to opening
+ # xattrdb
+ print("Fail to open %s" % dbname)
+ samba.xattr_native.wrap_setxattr(file, xattr.XATTR_NTACL_NAME,
+ ndr_pack(ntacl))
+ else:
+ samba.xattr_native.wrap_setxattr(file, xattr.XATTR_NTACL_NAME,
+ ndr_pack(ntacl))
+ else:
+ smbd.set_nt_acl(
+ file, SECURITY_SECINFO_FLAGS, sd,
+ service=service, session_info=session_info)
+
+
+def ldapmask2filemask(ldm):
+ """Takes the access mask of a DS ACE and transform them in a File ACE mask.
+ """
+ RIGHT_DS_CREATE_CHILD = 0x00000001
+ RIGHT_DS_DELETE_CHILD = 0x00000002
+ RIGHT_DS_LIST_CONTENTS = 0x00000004
+ ACTRL_DS_SELF = 0x00000008
+ RIGHT_DS_READ_PROPERTY = 0x00000010
+ RIGHT_DS_WRITE_PROPERTY = 0x00000020
+ RIGHT_DS_DELETE_TREE = 0x00000040
+ RIGHT_DS_LIST_OBJECT = 0x00000080
+ RIGHT_DS_CONTROL_ACCESS = 0x00000100
+ FILE_READ_DATA = 0x0001
+ FILE_LIST_DIRECTORY = 0x0001
+ FILE_WRITE_DATA = 0x0002
+ FILE_ADD_FILE = 0x0002
+ FILE_APPEND_DATA = 0x0004
+ FILE_ADD_SUBDIRECTORY = 0x0004
+ FILE_CREATE_PIPE_INSTANCE = 0x0004
+ FILE_READ_EA = 0x0008
+ FILE_WRITE_EA = 0x0010
+ FILE_EXECUTE = 0x0020
+ FILE_TRAVERSE = 0x0020
+ FILE_DELETE_CHILD = 0x0040
+ FILE_READ_ATTRIBUTES = 0x0080
+ FILE_WRITE_ATTRIBUTES = 0x0100
+ DELETE = 0x00010000
+ READ_CONTROL = 0x00020000
+ WRITE_DAC = 0x00040000
+ WRITE_OWNER = 0x00080000
+ SYNCHRONIZE = 0x00100000
+ STANDARD_RIGHTS_ALL = 0x001F0000
+
+ filemask = ldm & STANDARD_RIGHTS_ALL
+
+ if (ldm & RIGHT_DS_READ_PROPERTY) and (ldm & RIGHT_DS_LIST_CONTENTS):
+ filemask = filemask | (SYNCHRONIZE | FILE_LIST_DIRECTORY |
+ FILE_READ_ATTRIBUTES | FILE_READ_EA |
+ FILE_READ_DATA | FILE_EXECUTE)
+
+ if ldm & RIGHT_DS_WRITE_PROPERTY:
+ filemask = filemask | (SYNCHRONIZE | FILE_WRITE_DATA |
+ FILE_APPEND_DATA | FILE_WRITE_EA |
+ FILE_WRITE_ATTRIBUTES | FILE_ADD_FILE |
+ FILE_ADD_SUBDIRECTORY)
+
+ if ldm & RIGHT_DS_CREATE_CHILD:
+ filemask = filemask | (FILE_ADD_SUBDIRECTORY | FILE_ADD_FILE)
+
+ if ldm & RIGHT_DS_DELETE_CHILD:
+ filemask = filemask | FILE_DELETE_CHILD
+
+ return filemask
+
+
+def dsacl2fsacl(dssddl, sid, as_sddl=True):
+ """
+
+ This function takes an the SDDL representation of a DS
+ ACL and return the SDDL representation of this ACL adapted
+ for files. It's used for Policy object provision
+ """
+ ref = security.descriptor.from_sddl(dssddl, sid)
+ fdescr = security.descriptor()
+ fdescr.owner_sid = ref.owner_sid
+ fdescr.group_sid = ref.group_sid
+ fdescr.type = ref.type
+ fdescr.revision = ref.revision
+ aces = ref.dacl.aces
+ for i in range(0, len(aces)):
+ ace = aces[i]
+ if ace.type in (security.SEC_ACE_TYPE_ACCESS_ALLOWED_OBJECT,
+ security.SEC_ACE_TYPE_ACCESS_ALLOWED) and str(ace.trustee) != security.SID_BUILTIN_PREW2K:
+ # if fdescr.type & security.SEC_DESC_DACL_AUTO_INHERITED:
+ ace.flags = ace.flags | security.SEC_ACE_FLAG_OBJECT_INHERIT | security.SEC_ACE_FLAG_CONTAINER_INHERIT
+ if str(ace.trustee) == security.SID_CREATOR_OWNER:
+ # For Creator/Owner the IO flag is set as this ACE has only a sense for child objects
+ ace.flags = ace.flags | security.SEC_ACE_FLAG_INHERIT_ONLY
+ ace.access_mask = ldapmask2filemask(ace.access_mask)
+ fdescr.dacl_add(ace)
+
+ if not as_sddl:
+ return fdescr
+
+ return fdescr.as_sddl(sid)
+
+
+class SMBHelper:
+ """
+ A wrapper class for SMB connection
+
+ smb_path: path with separator "\\" other than "/"
+ """
+
+ def __init__(self, smb_conn, dom_sid):
+ self.smb_conn = smb_conn
+ self.dom_sid = dom_sid
+
+ def get_acl(self, smb_path, as_sddl=False,
+ sinfo=None, access_mask=None):
+ assert '/' not in smb_path
+
+ ntacl_sd = self.smb_conn.get_acl(smb_path,
+ sinfo=sinfo,
+ access_mask=access_mask)
+
+ return ntacl_sd.as_sddl(self.dom_sid) if as_sddl else ntacl_sd
+
+ def set_acl(self, smb_path, ntacl_sd,
+ sinfo=None, access_mask=None):
+ assert '/' not in smb_path
+
+ assert(isinstance(ntacl_sd, str) or isinstance(ntacl_sd, security.descriptor))
+ if isinstance(ntacl_sd, str):
+ tmp_desc = security.descriptor.from_sddl(ntacl_sd, self.domain_sid)
+ elif isinstance(ntacl_sd, security.descriptor):
+ tmp_desc = ntacl_sd
+
+ self.smb_conn.set_acl(smb_path, tmp_desc,
+ sinfo=sinfo,
+ access_mask=access_mask)
+
+ def list(self, smb_path=''):
+ """
+ List file and dir base names in smb_path without recursive.
+ """
+ assert '/' not in smb_path
+ return self.smb_conn.list(smb_path, attribs=SMB_FILE_ATTRIBUTE_FLAGS)
+
+ def is_dir(self, attrib):
+ """
+ Check whether the attrib value is a directory.
+
+ attrib is from list method.
+ """
+ return bool(attrib & libsmb.FILE_ATTRIBUTE_DIRECTORY)
+
+ def join(self, root, name):
+ """
+ Join path with '\\'
+ """
+ return root + '\\' + name if root else name
+
+ def loadfile(self, smb_path):
+ assert '/' not in smb_path
+ return self.smb_conn.loadfile(smb_path)
+
+ def create_tree(self, tree, smb_path=''):
+ """
+ Create files as defined in tree
+ """
+ for name, content in tree.items():
+ fullname = self.join(smb_path, name)
+ if isinstance(content, dict): # a dir
+ if not self.smb_conn.chkpath(fullname):
+ self.smb_conn.mkdir(fullname)
+ self.create_tree(content, smb_path=fullname)
+ else: # a file
+ self.smb_conn.savefile(fullname, content)
+
+ def get_tree(self, smb_path=''):
+ """
+ Get the tree structure via smb conn
+
+ self.smb_conn.list example:
+
+ [
+ {
+ 'attrib': 16,
+ 'mtime': 1528848309,
+ 'name': 'dir1',
+ 'short_name': 'dir1',
+ 'size': 0L
+ }, {
+ 'attrib': 32,
+ 'mtime': 1528848309,
+ 'name': 'file0.txt',
+ 'short_name': 'file0.txt',
+ 'size': 10L
+ }
+ ]
+ """
+ tree = {}
+ for item in self.list(smb_path):
+ name = item['name']
+ fullname = self.join(smb_path, name)
+ if self.is_dir(item['attrib']):
+ tree[name] = self.get_tree(smb_path=fullname)
+ else:
+ tree[name] = self.loadfile(fullname)
+ return tree
+
+ def get_ntacls(self, smb_path=''):
+ """
+ Get ntacl for each file and dir via smb conn
+ """
+ ntacls = {}
+ for item in self.list(smb_path):
+ name = item['name']
+ fullname = self.join(smb_path, name)
+ if self.is_dir(item['attrib']):
+ ntacls.update(self.get_ntacls(smb_path=fullname))
+ else:
+ ntacl_sd = self.get_acl(fullname)
+ ntacls[fullname] = ntacl_sd.as_sddl(self.dom_sid)
+ return ntacls
+
+ def delete_tree(self):
+ for item in self.list():
+ name = item['name']
+ if self.is_dir(item['attrib']):
+ self.smb_conn.deltree(name)
+ else:
+ self.smb_conn.unlink(name)
+
+
+class NtaclsHelper:
+
+ def __init__(self, service, smb_conf_path, dom_sid):
+ self.service = service
+ self.dom_sid = dom_sid
+
+ # this is important to help smbd find services.
+ self.lp = s3param.get_context()
+ self.lp.load(smb_conf_path)
+
+ self.use_ntvfs = "smb" in self.lp.get("server services")
+
+ def getntacl(self, path, session_info, as_sddl=False, direct_db_access=None):
+ if direct_db_access is None:
+ direct_db_access = self.use_ntvfs
+
+ ntacl_sd = getntacl(
+ self.lp, path, session_info,
+ direct_db_access=direct_db_access,
+ service=self.service)
+
+ return ntacl_sd.as_sddl(self.dom_sid) if as_sddl else ntacl_sd
+
+ def setntacl(self, path, ntacl_sd, session_info):
+ # ntacl_sd can be obj or str
+ return setntacl(self.lp, path, ntacl_sd, self.dom_sid, session_info,
+ use_ntvfs=self.use_ntvfs)
+
+
+def _create_ntacl_file(dst, ntacl_sddl_str):
+ with open(dst + '.NTACL', 'w') as f:
+ f.write(ntacl_sddl_str)
+
+
+def _read_ntacl_file(src):
+ ntacl_file = src + '.NTACL'
+
+ if not os.path.exists(ntacl_file):
+ return None
+
+ with open(ntacl_file, 'r') as f:
+ return f.read()
+
+
+def backup_online(smb_conn, dest_tarfile_path, dom_sid):
+ """
+ Backup all files and dirs with ntacl for the serive behind smb_conn.
+
+ 1. Create a temp dir as container dir
+ 2. Backup all files with dir structure into container dir
+ 3. Generate file.NTACL files for each file and dir in container dir
+ 4. Create a tar file from container dir(without top level folder)
+ 5. Delete container dir
+ """
+
+ logger = get_samba_logger()
+
+ if isinstance(dom_sid, str):
+ dom_sid = security.dom_sid(dom_sid)
+
+ smb_helper = SMBHelper(smb_conn, dom_sid)
+
+ remotedir = '' # root dir
+
+ localdir = tempfile.mkdtemp()
+
+ r_dirs = [remotedir]
+ l_dirs = [localdir]
+
+ while r_dirs:
+ r_dir = r_dirs.pop()
+ l_dir = l_dirs.pop()
+
+ for e in smb_helper.list(smb_path=r_dir):
+ r_name = smb_helper.join(r_dir, e['name'])
+ l_name = os.path.join(l_dir, e['name'])
+
+ if smb_helper.is_dir(e['attrib']):
+ r_dirs.append(r_name)
+ l_dirs.append(l_name)
+ os.mkdir(l_name)
+ else:
+ data = smb_helper.loadfile(r_name)
+ with open(l_name, 'wb') as f:
+ f.write(data)
+
+ # get ntacl for this entry and save alongside
+ try:
+ ntacl_sddl_str = smb_helper.get_acl(r_name, as_sddl=True)
+ _create_ntacl_file(l_name, ntacl_sddl_str)
+ except NTSTATUSError as e:
+ logger.error('Failed to get the ntacl for %s: %s' %
+ (r_name, e.args[1]))
+ logger.warning('The permissions for %s may not be' % r_name +
+ ' restored correctly')
+
+ with tarfile.open(name=dest_tarfile_path, mode='w:gz') as tar:
+ for name in os.listdir(localdir):
+ path = os.path.join(localdir, name)
+ tar.add(path, arcname=name)
+
+ shutil.rmtree(localdir)
+
+
+def backup_offline(src_service_path, dest_tarfile_path, smb_conf_path, dom_sid):
+ """
+ Backup files and ntacls to a tarfile for a service
+ """
+ service = src_service_path.rstrip('/').rsplit('/', 1)[-1]
+ tempdir = tempfile.mkdtemp()
+ session_info = system_session_unix()
+
+ ntacls_helper = NtaclsHelper(service, smb_conf_path, dom_sid)
+
+ for dirpath, dirnames, filenames in os.walk(src_service_path):
+ # each dir only cares about its direct children
+ rel_dirpath = os.path.relpath(dirpath, start=src_service_path)
+ dst_dirpath = os.path.join(tempdir, rel_dirpath)
+
+ # create sub dirs and NTACL file
+ for dirname in dirnames:
+ src = os.path.join(dirpath, dirname)
+ dst = os.path.join(dst_dirpath, dirname)
+ # mkdir with metadata
+ smbd.mkdir(dst, session_info, service)
+ ntacl_sddl_str = ntacls_helper.getntacl(src, session_info, as_sddl=True)
+ _create_ntacl_file(dst, ntacl_sddl_str)
+
+ # create files and NTACL file, then copy data
+ for filename in filenames:
+ src = os.path.join(dirpath, filename)
+ dst = os.path.join(dst_dirpath, filename)
+ # create an empty file with metadata
+ smbd.create_file(dst, session_info, service)
+ ntacl_sddl_str = ntacls_helper.getntacl(src, session_info, as_sddl=True)
+ _create_ntacl_file(dst, ntacl_sddl_str)
+
+ # now put data in
+ with open(src, 'rb') as src_file:
+ data = src_file.read()
+ with open(dst, 'wb') as dst_file:
+ dst_file.write(data)
+
+ # add all files in tempdir to tarfile without a top folder
+ with tarfile.open(name=dest_tarfile_path, mode='w:gz') as tar:
+ for name in os.listdir(tempdir):
+ path = os.path.join(tempdir, name)
+ tar.add(path, arcname=name)
+
+ shutil.rmtree(tempdir)
+
+
+def backup_restore(src_tarfile_path, dst_service_path, samdb_conn, smb_conf_path):
+ """
+ Restore files and ntacls from a tarfile to a service
+ """
+ logger = get_samba_logger()
+ service = dst_service_path.rstrip('/').rsplit('/', 1)[-1]
+ tempdir = tempfile.mkdtemp() # src files
+
+ dom_sid_str = samdb_conn.get_domain_sid()
+ dom_sid = security.dom_sid(dom_sid_str)
+
+ ntacls_helper = NtaclsHelper(service, smb_conf_path, dom_sid)
+ session_info = system_session_unix()
+
+ with tarfile.open(src_tarfile_path) as f:
+ f.extractall(path=tempdir)
+ # e.g.: /tmp/tmpRNystY/{dir1,dir1.NTACL,...file1,file1.NTACL}
+
+ for dirpath, dirnames, filenames in os.walk(tempdir):
+ rel_dirpath = os.path.relpath(dirpath, start=tempdir)
+ dst_dirpath = os.path.normpath(
+ os.path.join(dst_service_path, rel_dirpath))
+
+ for dirname in dirnames:
+ if not dirname.endswith('.NTACL'):
+ src = os.path.join(dirpath, dirname)
+ dst = os.path.join(dst_dirpath, dirname)
+ if not os.path.isdir(dst):
+ # dst must be absolute path for smbd API
+ smbd.mkdir(dst, session_info, service)
+
+ ntacl_sddl_str = _read_ntacl_file(src)
+ if ntacl_sddl_str:
+ ntacls_helper.setntacl(dst, ntacl_sddl_str, session_info)
+ else:
+ logger.warning(
+ 'Failed to restore ntacl for directory %s.' % dst
+ + ' Please check the permissions are correct')
+
+ for filename in filenames:
+ if not filename.endswith('.NTACL'):
+ src = os.path.join(dirpath, filename)
+ dst = os.path.join(dst_dirpath, filename)
+ if not os.path.isfile(dst):
+ # dst must be absolute path for smbd API
+ smbd.create_file(dst, session_info, service)
+
+ ntacl_sddl_str = _read_ntacl_file(src)
+ if ntacl_sddl_str:
+ ntacls_helper.setntacl(dst, ntacl_sddl_str, session_info)
+ else:
+ logger.warning('Failed to restore ntacl for file %s.' % dst
+ + ' Please check the permissions are correct')
+
+ # now put data in
+ with open(src, 'rb') as src_file:
+ data = src_file.read()
+ with open(dst, 'wb') as dst_file:
+ dst_file.write(data)
+
+ shutil.rmtree(tempdir)
diff --git a/python/samba/policies.py b/python/samba/policies.py
new file mode 100644
index 0000000..4539232
--- /dev/null
+++ b/python/samba/policies.py
@@ -0,0 +1,388 @@
+# Utilities for working with policies in SYSVOL Registry.pol files
+#
+# Copyright (C) David Mulder <dmulder@samba.org> 2022
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from io import StringIO
+import ldb
+from samba.ndr import ndr_unpack, ndr_pack
+from samba.dcerpc import preg
+from samba.netcmd.common import netcmd_finddc
+from samba.netcmd.gpcommon import (
+ create_directory_hier,
+ smb_connection,
+ get_gpo_dn
+)
+from samba import NTSTATUSError
+from numbers import Number
+from samba.registry import str_regtype
+from samba.ntstatus import (
+ NT_STATUS_OBJECT_NAME_INVALID,
+ NT_STATUS_OBJECT_NAME_NOT_FOUND,
+ NT_STATUS_OBJECT_PATH_NOT_FOUND,
+ NT_STATUS_INVALID_PARAMETER
+)
+from samba.gp_parse.gp_ini import GPTIniParser
+from samba.common import get_string
+from samba.dcerpc import security
+from samba.ntacls import dsacl2fsacl
+from samba.dcerpc.misc import REG_BINARY, REG_MULTI_SZ, REG_SZ, GUID
+
+GPT_EMPTY = \
+"""
+[General]
+Version=0
+"""
+
+class RegistryGroupPolicies(object):
+ def __init__(self, gpo, lp, creds, samdb, host=None):
+ self.gpo = gpo
+ self.lp = lp
+ self.creds = creds
+ self.samdb = samdb
+ realm = self.lp.get('realm')
+ self.pol_dir = '\\'.join([realm.lower(), 'Policies', gpo, '%s'])
+ self.pol_file = '\\'.join([self.pol_dir, 'Registry.pol'])
+ self.policy_dn = get_gpo_dn(self.samdb, self.gpo)
+
+ if host and host.startswith('ldap://'):
+ dc_hostname = host[7:]
+ else:
+ dc_hostname = netcmd_finddc(self.lp, self.creds)
+
+ self.conn = smb_connection(dc_hostname,
+ 'sysvol',
+ lp=self.lp,
+ creds=self.creds)
+
+ # Get new security descriptor
+ ds_sd_flags = (security.SECINFO_OWNER |
+ security.SECINFO_GROUP |
+ security.SECINFO_DACL)
+ msg = self.samdb.search(base=self.policy_dn, scope=ldb.SCOPE_BASE,
+ attrs=['nTSecurityDescriptor'])[0]
+ ds_sd_ndr = msg['nTSecurityDescriptor'][0]
+ ds_sd = ndr_unpack(security.descriptor, ds_sd_ndr).as_sddl()
+
+ # Create a file system security descriptor
+ domain_sid = security.dom_sid(self.samdb.get_domain_sid())
+ sddl = dsacl2fsacl(ds_sd, domain_sid)
+ self.fs_sd = security.descriptor.from_sddl(sddl, domain_sid)
+
+ def __load_registry_pol(self, pol_file):
+ try:
+ pol_data = ndr_unpack(preg.file, self.conn.loadfile(pol_file))
+ except NTSTATUSError as e:
+ if e.args[0] in [NT_STATUS_OBJECT_NAME_INVALID,
+ NT_STATUS_OBJECT_NAME_NOT_FOUND,
+ NT_STATUS_OBJECT_PATH_NOT_FOUND]:
+ pol_data = preg.file() # The file doesn't exist
+ else:
+ raise
+ return pol_data
+
+ def __save_file(self, file_dir, file_name, data):
+ create_directory_hier(self.conn, file_dir)
+ self.conn.savefile(file_name, data)
+ self.conn.set_acl(file_name, self.fs_sd)
+
+ def __save_registry_pol(self, pol_dir, pol_file, pol_data):
+ self.__save_file(pol_dir, pol_file, ndr_pack(pol_data))
+
+ def __validate_json(self, json_input, remove=False):
+ if type(json_input) != list:
+ raise SyntaxError('JSON not formatted correctly')
+ for entry in json_input:
+ if type(entry) != dict:
+ raise SyntaxError('JSON not formatted correctly')
+ keys = ['keyname', 'valuename', 'class']
+ if not remove:
+ keys.extend(['data', 'type'])
+ if not all([k in entry for k in keys]):
+ raise SyntaxError('JSON not formatted correctly')
+
+ def __determine_data_type(self, entry):
+ if isinstance(entry['type'], Number):
+ return entry['type']
+ else:
+ for i in range(12):
+ if str_regtype(i) == entry['type'].upper():
+ return i
+ raise TypeError('Unknown type %s' % entry['type'])
+
+ def __set_data(self, rtype, data):
+ # JSON can't store bytes, and have to be set via an int array
+ if rtype == REG_BINARY and type(data) == list:
+ return bytes(data)
+ elif rtype == REG_MULTI_SZ and type(data) == list:
+ data = ('\x00').join(data) + '\x00\x00'
+ return data.encode('utf-16-le')
+ elif rtype == REG_SZ and type(data) == str:
+ return data.encode('utf-8')
+ return data
+
+ def __pol_replace(self, pol_data, entry):
+ for e in pol_data.entries:
+ if e.keyname == entry['keyname'] and \
+ e.valuename == entry['valuename']:
+ e.data = self.__set_data(e.type, entry['data'])
+ break
+ else:
+ e = preg.entry()
+ e.keyname = entry['keyname']
+ e.valuename = entry['valuename']
+ e.type = self.__determine_data_type(entry)
+ e.data = self.__set_data(e.type, entry['data'])
+ entries = list(pol_data.entries)
+ entries.append(e)
+ pol_data.entries = entries
+ pol_data.num_entries = len(entries)
+
+ def __pol_remove(self, pol_data, entry):
+ entries = []
+ for e in pol_data.entries:
+ if not (e.keyname == entry['keyname'] and
+ e.valuename == entry['valuename']):
+ entries.append(e)
+ pol_data.entries = entries
+ pol_data.num_entries = len(entries)
+
+ def increment_gpt_ini(self, machine_changed=False, user_changed=False):
+ if not machine_changed and not user_changed:
+ return
+ GPT_INI = self.pol_dir % 'GPT.INI'
+ try:
+ data = self.conn.loadfile(GPT_INI)
+ except NTSTATUSError as e:
+ if e.args[0] in [NT_STATUS_OBJECT_NAME_INVALID,
+ NT_STATUS_OBJECT_NAME_NOT_FOUND,
+ NT_STATUS_OBJECT_PATH_NOT_FOUND]:
+ data = GPT_EMPTY
+ else:
+ raise
+ parser = GPTIniParser()
+ parser.parse(data)
+ version = 0
+ machine_version = 0
+ user_version = 0
+ if parser.ini_conf.has_option('General', 'Version'):
+ version = int(parser.ini_conf.get('General',
+ 'Version').encode('utf-8'))
+ machine_version = version & 0x0000FFFF
+ user_version = version >> 16
+ if machine_changed:
+ machine_version += 1
+ if user_changed:
+ user_version += 1
+ version = (user_version << 16) + machine_version
+
+ # Set the new version in the GPT.INI
+ if not parser.ini_conf.has_section('General'):
+ parser.ini_conf.add_section('General')
+ parser.ini_conf.set('General', 'Version', str(version))
+ with StringIO() as out_data:
+ parser.ini_conf.write(out_data)
+ out_data.seek(0)
+ self.__save_file(self.pol_dir % '', GPT_INI,
+ out_data.read().encode('utf-8'))
+
+ # Set the new versionNumber on the ldap object
+ m = ldb.Message()
+ m.dn = self.policy_dn
+ m['new_value'] = ldb.MessageElement(str(version), ldb.FLAG_MOD_REPLACE,
+ 'versionNumber')
+ self.samdb.modify(m)
+
+ def __validate_extension_registration(self, ext_name, ext_attr):
+ try:
+ ext_name_guid = GUID(ext_name)
+ except NTSTATUSError as e:
+ if e.args[0] == NT_STATUS_INVALID_PARAMETER:
+ raise SyntaxError('Extension name not formatted correctly')
+ raise
+ if ext_attr not in ['gPCMachineExtensionNames',
+ 'gPCUserExtensionNames']:
+ raise SyntaxError('Extension attribute incorrect')
+ return '{%s}' % ext_name_guid
+
+ def register_extension_name(self, ext_name, ext_attr):
+ ext_name = self.__validate_extension_registration(ext_name, ext_attr)
+ res = self.samdb.search(base=self.policy_dn, scope=ldb.SCOPE_BASE,
+ attrs=[ext_attr])
+ if len(res) == 0 or ext_attr not in res[0]:
+ ext_names = '[]'
+ else:
+ ext_names = get_string(res[0][ext_attr][-1])
+ if ext_name not in ext_names:
+ ext_names = '[' + ext_names.strip('[]') + ext_name + ']'
+ else:
+ return
+
+ m = ldb.Message()
+ m.dn = self.policy_dn
+ m['new_value'] = ldb.MessageElement(ext_names, ldb.FLAG_MOD_REPLACE,
+ ext_attr)
+ self.samdb.modify(m)
+
+ def unregister_extension_name(self, ext_name, ext_attr):
+ ext_name = self.__validate_extension_registration(ext_name, ext_attr)
+ res = self.samdb.search(base=self.policy_dn, scope=ldb.SCOPE_BASE,
+ attrs=[ext_attr])
+ if len(res) == 0 or ext_attr not in res[0]:
+ return
+ else:
+ ext_names = get_string(res[0][ext_attr][-1])
+ if ext_name in ext_names:
+ ext_names = ext_names.replace(ext_name, '')
+ else:
+ return
+
+ m = ldb.Message()
+ m.dn = self.policy_dn
+ m['new_value'] = ldb.MessageElement(ext_names, ldb.FLAG_MOD_REPLACE,
+ ext_attr)
+ self.samdb.modify(m)
+
+ def remove_s(self, json_input):
+ """remove_s
+ json_input: JSON list of entries to remove from GPO
+
+ Example json_input:
+ [
+ {
+ "keyname": "Software\\Policies\\Mozilla\\Firefox\\Homepage",
+ "valuename": "StartPage",
+ "class": "USER",
+ },
+ {
+ "keyname": "Software\\Policies\\Mozilla\\Firefox\\Homepage",
+ "valuename": "URL",
+ "class": "USER",
+ },
+ ]
+ """
+ self.__validate_json(json_input, remove=True)
+ user_pol_data = self.__load_registry_pol(self.pol_file % 'User')
+ machine_pol_data = self.__load_registry_pol(self.pol_file % 'Machine')
+
+ machine_changed = False
+ user_changed = False
+ for entry in json_input:
+ cls = entry['class'].lower()
+ if cls == 'machine' or cls == 'both':
+ machine_changed = True
+ self.__pol_remove(machine_pol_data, entry)
+ if cls == 'user' or cls == 'both':
+ user_changed = True
+ self.__pol_remove(user_pol_data, entry)
+ if user_changed:
+ self.__save_registry_pol(self.pol_dir % 'User',
+ self.pol_file % 'User',
+ user_pol_data)
+ if machine_changed:
+ self.__save_registry_pol(self.pol_dir % 'Machine',
+ self.pol_file % 'Machine',
+ machine_pol_data)
+ self.increment_gpt_ini(machine_changed, user_changed)
+
+ def merge_s(self, json_input):
+ """merge_s
+ json_input: JSON list of entries to merge into GPO
+
+ Example json_input:
+ [
+ {
+ "keyname": "Software\\Policies\\Mozilla\\Firefox\\Homepage",
+ "valuename": "StartPage",
+ "class": "USER",
+ "type": "REG_SZ",
+ "data": "homepage"
+ },
+ {
+ "keyname": "Software\\Policies\\Mozilla\\Firefox\\Homepage",
+ "valuename": "URL",
+ "class": "USER",
+ "type": "REG_SZ",
+ "data": "google.com"
+ },
+ ]
+ """
+ self.__validate_json(json_input)
+ user_pol_data = self.__load_registry_pol(self.pol_file % 'User')
+ machine_pol_data = self.__load_registry_pol(self.pol_file % 'Machine')
+
+ machine_changed = False
+ user_changed = False
+ for entry in json_input:
+ cls = entry['class'].lower()
+ if cls == 'machine' or cls == 'both':
+ machine_changed = True
+ self.__pol_replace(machine_pol_data, entry)
+ if cls == 'user' or cls == 'both':
+ user_changed = True
+ self.__pol_replace(user_pol_data, entry)
+ if user_changed:
+ self.__save_registry_pol(self.pol_dir % 'User',
+ self.pol_file % 'User',
+ user_pol_data)
+ if machine_changed:
+ self.__save_registry_pol(self.pol_dir % 'Machine',
+ self.pol_file % 'Machine',
+ machine_pol_data)
+ self.increment_gpt_ini(machine_changed, user_changed)
+
+ def replace_s(self, json_input):
+ """replace_s
+ json_input: JSON list of entries to replace entries in GPO
+
+ Example json_input:
+ [
+ {
+ "keyname": "Software\\Policies\\Mozilla\\Firefox\\Homepage",
+ "valuename": "StartPage",
+ "class": "USER",
+ "data": "homepage"
+ },
+ {
+ "keyname": "Software\\Policies\\Mozilla\\Firefox\\Homepage",
+ "valuename": "URL",
+ "class": "USER",
+ "data": "google.com"
+ },
+ ]
+ """
+ self.__validate_json(json_input)
+ user_pol_data = preg.file()
+ machine_pol_data = preg.file()
+
+ machine_changed = False
+ user_changed = False
+ for entry in json_input:
+ cls = entry['class'].lower()
+ if cls == 'machine' or cls == 'both':
+ machine_changed = True
+ self.__pol_replace(machine_pol_data, entry)
+ if cls == 'user' or cls == 'both':
+ user_changed = True
+ self.__pol_replace(user_pol_data, entry)
+ if user_changed:
+ self.__save_registry_pol(self.pol_dir % 'User',
+ self.pol_file % 'User',
+ user_pol_data)
+ if machine_changed:
+ self.__save_registry_pol(self.pol_dir % 'Machine',
+ self.pol_file % 'Machine',
+ machine_pol_data)
+ self.increment_gpt_ini(machine_changed, user_changed)
diff --git a/python/samba/provision/__init__.py b/python/samba/provision/__init__.py
new file mode 100644
index 0000000..56ca749
--- /dev/null
+++ b/python/samba/provision/__init__.py
@@ -0,0 +1,2524 @@
+# Unix SMB/CIFS implementation.
+# backend code for provisioning a Samba AD server
+
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007-2012
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2008-2009
+# Copyright (C) Oliver Liebel <oliver@itc.li> 2008-2009
+#
+# Based on the original in EJS:
+# Copyright (C) Andrew Tridgell <tridge@samba.org> 2005
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Functions for setting up a Samba configuration."""
+
+__docformat__ = "restructuredText"
+
+from base64 import b64encode
+import errno
+import os
+import stat
+import re
+import pwd
+import grp
+import logging
+import time
+import uuid
+import socket
+import tempfile
+import samba.dsdb
+
+import ldb
+
+from samba.auth import system_session, admin_session
+from samba.auth_util import system_session_unix
+import samba
+from samba import auth
+from samba.samba3 import smbd, passdb
+from samba.samba3 import param as s3param
+from samba import (
+ Ldb,
+ MAX_NETBIOS_NAME_LEN,
+ check_all_substituted,
+ is_valid_netbios_char,
+ setup_file,
+ substitute_var,
+ valid_netbios_name,
+ version,
+ is_heimdal_built,
+)
+from samba.dcerpc import security, misc
+from samba.dcerpc.misc import (
+ SEC_CHAN_BDC,
+ SEC_CHAN_WKSTA,
+)
+from samba.dsdb import (
+ DS_DOMAIN_FUNCTION_2000,
+ DS_DOMAIN_FUNCTION_2008,
+ DS_DOMAIN_FUNCTION_2008_R2,
+ DS_DOMAIN_FUNCTION_2012,
+ DS_DOMAIN_FUNCTION_2012_R2,
+ DS_DOMAIN_FUNCTION_2016,
+ ENC_ALL_TYPES,
+)
+from samba.idmap import IDmapDB
+from samba.ms_display_specifiers import read_ms_ldif
+from samba.ntacls import setntacl, getntacl, dsacl2fsacl
+from samba.ndr import ndr_pack, ndr_unpack
+from samba.provision.backend import (
+ LDBBackend,
+)
+from samba.descriptor import (
+ get_deletedobjects_descriptor,
+ get_config_descriptor,
+ get_config_partitions_descriptor,
+ get_config_sites_descriptor,
+ get_config_ntds_quotas_descriptor,
+ get_config_delete_protected1_descriptor,
+ get_config_delete_protected1wd_descriptor,
+ get_config_delete_protected2_descriptor,
+ get_domain_descriptor,
+ get_domain_infrastructure_descriptor,
+ get_domain_builtin_descriptor,
+ get_domain_computers_descriptor,
+ get_domain_users_descriptor,
+ get_domain_controllers_descriptor,
+ get_domain_delete_protected1_descriptor,
+ get_domain_delete_protected2_descriptor,
+ get_managed_service_accounts_descriptor,
+)
+from samba.provision.common import (
+ setup_path,
+ setup_add_ldif,
+ setup_modify_ldif,
+ FILL_FULL,
+ FILL_SUBDOMAIN,
+ FILL_DRS
+)
+from samba.provision.sambadns import (
+ get_dnsadmins_sid,
+ setup_ad_dns,
+ create_dns_dir_keytab_link,
+ create_dns_update_list
+)
+
+import samba.param
+import samba.registry
+from samba.schema import Schema
+from samba.samdb import SamDB
+from samba.dbchecker import dbcheck
+from samba.provision.kerberos import create_kdc_conf
+from samba.samdb import get_default_backend_store
+from samba import functional_level
+
+DEFAULT_POLICY_GUID = "31B2F340-016D-11D2-945F-00C04FB984F9"
+DEFAULT_DC_POLICY_GUID = "6AC1786C-016F-11D2-945F-00C04FB984F9"
+DEFAULTSITE = "Default-First-Site-Name"
+LAST_PROVISION_USN_ATTRIBUTE = "lastProvisionUSN"
+
+DEFAULT_MIN_PWD_LENGTH = 7
+
+
+class ProvisionPaths(object):
+
+ def __init__(self):
+ self.shareconf = None
+ self.hklm = None
+ self.hkcu = None
+ self.hkcr = None
+ self.hku = None
+ self.hkpd = None
+ self.hkpt = None
+ self.samdb = None
+ self.idmapdb = None
+ self.secrets = None
+ self.keytab = None
+ self.dns_keytab = None
+ self.dns = None
+ self.winsdb = None
+ self.private_dir = None
+ self.binddns_dir = None
+ self.state_dir = None
+
+
+class ProvisionNames(object):
+
+ def __init__(self):
+ self.ncs = None
+ self.rootdn = None
+ self.domaindn = None
+ self.configdn = None
+ self.schemadn = None
+ self.dnsforestdn = None
+ self.dnsdomaindn = None
+ self.ldapmanagerdn = None
+ self.dnsdomain = None
+ self.realm = None
+ self.netbiosname = None
+ self.domain = None
+ self.hostname = None
+ self.sitename = None
+ self.smbconf = None
+ self.domainsid = None
+ self.forestsid = None
+ self.domainguid = None
+ self.name_map = {}
+
+
+def find_provision_key_parameters(samdb, secretsdb, idmapdb, paths, smbconf,
+ lp):
+ """Get key provision parameters (realm, domain, ...) from a given provision
+
+ :param samdb: An LDB object connected to the sam.ldb file
+ :param secretsdb: An LDB object connected to the secrets.ldb file
+ :param idmapdb: An LDB object connected to the idmap.ldb file
+ :param paths: A list of path to provision object
+ :param smbconf: Path to the smb.conf file
+ :param lp: A LoadParm object
+ :return: A list of key provision parameters
+ """
+ names = ProvisionNames()
+ names.adminpass = None
+
+ # NT domain, kerberos realm, root dn, domain dn, domain dns name
+ names.domain = lp.get("workgroup").upper()
+ names.realm = lp.get("realm")
+ names.dnsdomain = names.realm.lower()
+ basedn = samba.dn_from_dns_name(names.dnsdomain)
+ names.realm = names.realm.upper()
+ # netbiosname
+ # Get the netbiosname first (could be obtained from smb.conf in theory)
+ res = secretsdb.search(expression="(flatname=%s)" %
+ names.domain, base="CN=Primary Domains",
+ scope=ldb.SCOPE_SUBTREE, attrs=["sAMAccountName"])
+ names.netbiosname = str(res[0]["sAMAccountName"]).replace("$", "")
+
+ names.smbconf = smbconf
+
+ # That's a bit simplistic but it's ok as long as we have only 3
+ # partitions
+ current = samdb.search(expression="(objectClass=*)",
+ base="", scope=ldb.SCOPE_BASE,
+ attrs=["defaultNamingContext", "schemaNamingContext",
+ "configurationNamingContext", "rootDomainNamingContext",
+ "namingContexts"])
+
+ names.configdn = str(current[0]["configurationNamingContext"][0])
+ names.schemadn = str(current[0]["schemaNamingContext"][0])
+ if not (ldb.Dn(samdb, basedn) == (ldb.Dn(samdb,
+ current[0]["defaultNamingContext"][0].decode('utf8')))):
+ raise ProvisioningError(("basedn in %s (%s) and from %s (%s)"
+ "is not the same ..." % (paths.samdb,
+ str(current[0]["defaultNamingContext"][0].decode('utf8')),
+ paths.smbconf, basedn)))
+
+ names.domaindn = str(current[0]["defaultNamingContext"][0])
+ names.rootdn = str(current[0]["rootDomainNamingContext"][0])
+ names.ncs = current[0]["namingContexts"]
+ names.dnsforestdn = None
+ names.dnsdomaindn = None
+
+ for i in range(0, len(names.ncs)):
+ nc = str(names.ncs[i])
+
+ dnsforestdn = "DC=ForestDnsZones,%s" % (str(names.rootdn))
+ if nc == dnsforestdn:
+ names.dnsforestdn = dnsforestdn
+ continue
+
+ dnsdomaindn = "DC=DomainDnsZones,%s" % (str(names.domaindn))
+ if nc == dnsdomaindn:
+ names.dnsdomaindn = dnsdomaindn
+ continue
+
+ # default site name
+ res3 = samdb.search(expression="(objectClass=site)",
+ base="CN=Sites," + str(names.configdn), scope=ldb.SCOPE_ONELEVEL, attrs=["cn"])
+ names.sitename = str(res3[0]["cn"])
+
+ # dns hostname and server dn
+ res4 = samdb.search(expression="(CN=%s)" % names.netbiosname,
+ base="OU=Domain Controllers,%s" % basedn,
+ scope=ldb.SCOPE_ONELEVEL, attrs=["dNSHostName"])
+ if len(res4) == 0:
+ raise ProvisioningError("Unable to find DC called CN=%s under OU=Domain Controllers,%s" % (names.netbiosname, basedn))
+
+ names.hostname = str(res4[0]["dNSHostName"]).replace("." + names.dnsdomain, "")
+
+ server_res = samdb.search(expression="serverReference=%s" % res4[0].dn,
+ attrs=[], base=names.configdn)
+ names.serverdn = str(server_res[0].dn)
+
+ # invocation id/objectguid
+ res5 = samdb.search(expression="(objectClass=*)",
+ base="CN=NTDS Settings,%s" % str(names.serverdn),
+ scope=ldb.SCOPE_BASE,
+ attrs=["invocationID", "objectGUID"])
+ names.invocation = str(ndr_unpack(misc.GUID, res5[0]["invocationId"][0]))
+ names.ntdsguid = str(ndr_unpack(misc.GUID, res5[0]["objectGUID"][0]))
+
+ # domain guid/sid
+ res6 = samdb.search(expression="(objectClass=*)", base=basedn,
+ scope=ldb.SCOPE_BASE, attrs=["objectGUID",
+ "objectSid", "msDS-Behavior-Version"])
+ names.domainguid = str(ndr_unpack(misc.GUID, res6[0]["objectGUID"][0]))
+ names.domainsid = ndr_unpack(security.dom_sid, res6[0]["objectSid"][0])
+ names.forestsid = ndr_unpack(security.dom_sid, res6[0]["objectSid"][0])
+ if res6[0].get("msDS-Behavior-Version") is None or \
+ int(res6[0]["msDS-Behavior-Version"][0]) < DS_DOMAIN_FUNCTION_2000:
+ names.domainlevel = DS_DOMAIN_FUNCTION_2000
+ else:
+ names.domainlevel = int(res6[0]["msDS-Behavior-Version"][0])
+
+ # policy guid
+ res7 = samdb.search(expression="(name={%s})" % DEFAULT_POLICY_GUID,
+ base="CN=Policies,CN=System," + basedn,
+ scope=ldb.SCOPE_ONELEVEL, attrs=["cn", "displayName"])
+ names.policyid = str(res7[0]["cn"]).replace("{", "").replace("}", "")
+ # dc policy guid
+ res8 = samdb.search(expression="(name={%s})" % DEFAULT_DC_POLICY_GUID,
+ base="CN=Policies,CN=System," + basedn,
+ scope=ldb.SCOPE_ONELEVEL,
+ attrs=["cn", "displayName"])
+ if len(res8) == 1:
+ names.policyid_dc = str(res8[0]["cn"]).replace("{", "").replace("}", "")
+ else:
+ names.policyid_dc = None
+
+ res9 = idmapdb.search(expression="(cn=%s-%s)" %
+ (str(names.domainsid), security.DOMAIN_RID_ADMINISTRATOR),
+ attrs=["xidNumber", "type"])
+ if len(res9) != 1:
+ raise ProvisioningError("Unable to find uid/gid for Domain Admins rid (%s-%s" % (str(names.domainsid), security.DOMAIN_RID_ADMINISTRATOR))
+ if str(res9[0]["type"][0]) == "ID_TYPE_BOTH":
+ names.root_gid = int(res9[0]["xidNumber"][0])
+ else:
+ names.root_gid = pwd.getpwuid(int(res9[0]["xidNumber"][0])).pw_gid
+
+ res10 = samdb.search(expression="(samaccountname=dns)",
+ scope=ldb.SCOPE_SUBTREE, attrs=["dn"],
+ controls=["search_options:1:2"])
+ if (len(res10) > 0):
+ has_legacy_dns_account = True
+ else:
+ has_legacy_dns_account = False
+
+ res11 = samdb.search(expression="(samaccountname=dns-%s)" % names.netbiosname,
+ scope=ldb.SCOPE_SUBTREE, attrs=["dn"],
+ controls=["search_options:1:2"])
+ if (len(res11) > 0):
+ has_dns_account = True
+ else:
+ has_dns_account = False
+
+ if names.dnsdomaindn is not None:
+ if has_dns_account:
+ names.dns_backend = 'BIND9_DLZ'
+ else:
+ names.dns_backend = 'SAMBA_INTERNAL'
+ elif has_dns_account or has_legacy_dns_account:
+ names.dns_backend = 'BIND9_FLATFILE'
+ else:
+ names.dns_backend = 'NONE'
+
+ dns_admins_sid = get_dnsadmins_sid(samdb, names.domaindn)
+ names.name_map['DnsAdmins'] = str(dns_admins_sid)
+
+ return names
+
+
+def update_provision_usn(samdb, low, high, id, replace=False):
+ """Update the field provisionUSN in sam.ldb
+
+ This field is used to track range of USN modified by provision and
+ upgradeprovision.
+ This value is used afterward by next provision to figure out if
+ the field have been modified since last provision.
+
+ :param samdb: An LDB object connect to sam.ldb
+ :param low: The lowest USN modified by this upgrade
+ :param high: The highest USN modified by this upgrade
+ :param id: The invocation id of the samba's dc
+ :param replace: A boolean indicating if the range should replace any
+ existing one or appended (default)
+ """
+
+ tab = []
+ if not replace:
+ entry = samdb.search(base="@PROVISION",
+ scope=ldb.SCOPE_BASE,
+ attrs=[LAST_PROVISION_USN_ATTRIBUTE, "dn"])
+ for e in entry[0][LAST_PROVISION_USN_ATTRIBUTE]:
+ if not re.search(';', str(e)):
+ e = "%s;%s" % (str(e), id)
+ tab.append(str(e))
+
+ tab.append("%s-%s;%s" % (low, high, id))
+ delta = ldb.Message()
+ delta.dn = ldb.Dn(samdb, "@PROVISION")
+ delta[LAST_PROVISION_USN_ATTRIBUTE] = \
+ ldb.MessageElement(tab,
+ ldb.FLAG_MOD_REPLACE,
+ LAST_PROVISION_USN_ATTRIBUTE)
+ entry = samdb.search(expression='provisionnerID=*',
+ base="@PROVISION", scope=ldb.SCOPE_BASE,
+ attrs=["provisionnerID"])
+ if len(entry) == 0 or len(entry[0]) == 0:
+ delta["provisionnerID"] = ldb.MessageElement(id, ldb.FLAG_MOD_ADD, "provisionnerID")
+ samdb.modify(delta)
+
+
+def set_provision_usn(samdb, low, high, id):
+ """Set the field provisionUSN in sam.ldb
+ This field is used to track range of USN modified by provision and
+ upgradeprovision.
+ This value is used afterward by next provision to figure out if
+ the field have been modified since last provision.
+
+ :param samdb: An LDB object connect to sam.ldb
+ :param low: The lowest USN modified by this upgrade
+ :param high: The highest USN modified by this upgrade
+ :param id: The invocationId of the provision"""
+
+ tab = []
+ tab.append("%s-%s;%s" % (low, high, id))
+
+ delta = ldb.Message()
+ delta.dn = ldb.Dn(samdb, "@PROVISION")
+ delta[LAST_PROVISION_USN_ATTRIBUTE] = \
+ ldb.MessageElement(tab,
+ ldb.FLAG_MOD_ADD,
+ LAST_PROVISION_USN_ATTRIBUTE)
+ samdb.add(delta)
+
+
+def get_max_usn(samdb, basedn):
+ """ This function return the biggest USN present in the provision
+
+ :param samdb: A LDB object pointing to the sam.ldb
+ :param basedn: A string containing the base DN of the provision
+ (ie. DC=foo, DC=bar)
+ :return: The biggest USN in the provision"""
+
+ res = samdb.search(expression="objectClass=*", base=basedn,
+ scope=ldb.SCOPE_SUBTREE, attrs=["uSNChanged"],
+ controls=["search_options:1:2",
+ "server_sort:1:1:uSNChanged",
+ "paged_results:1:1"])
+ return res[0]["uSNChanged"]
+
+
+def get_last_provision_usn(sam):
+ """Get USNs ranges modified by a provision or an upgradeprovision
+
+ :param sam: An LDB object pointing to the sam.ldb
+ :return: a dictionary which keys are invocation id and values are an array
+ of integer representing the different ranges
+ """
+ try:
+ entry = sam.search(expression="%s=*" % LAST_PROVISION_USN_ATTRIBUTE,
+ base="@PROVISION", scope=ldb.SCOPE_BASE,
+ attrs=[LAST_PROVISION_USN_ATTRIBUTE, "provisionnerID"])
+ except ldb.LdbError as e1:
+ (ecode, emsg) = e1.args
+ if ecode == ldb.ERR_NO_SUCH_OBJECT:
+ return None
+ raise
+ if len(entry) > 0:
+ myids = []
+ range = {}
+ p = re.compile(r'-')
+ if entry[0].get("provisionnerID"):
+ for e in entry[0]["provisionnerID"]:
+ myids.append(str(e))
+ for r in entry[0][LAST_PROVISION_USN_ATTRIBUTE]:
+ tab1 = str(r).split(';')
+ if len(tab1) == 2:
+ id = tab1[1]
+ else:
+ id = "default"
+ if (len(myids) > 0 and id not in myids):
+ continue
+ tab2 = p.split(tab1[0])
+ if range.get(id) is None:
+ range[id] = []
+ range[id].append(tab2[0])
+ range[id].append(tab2[1])
+ return range
+ else:
+ return None
+
+
+class ProvisionResult(object):
+ """Result of a provision.
+
+ :ivar server_role: The server role
+ :ivar paths: ProvisionPaths instance
+ :ivar domaindn: The domain dn, as string
+ """
+
+ def __init__(self):
+ self.server_role = None
+ self.paths = None
+ self.domaindn = None
+ self.lp = None
+ self.samdb = None
+ self.idmap = None
+ self.names = None
+ self.domainsid = None
+ self.adminpass_generated = None
+ self.adminpass = None
+ self.backend_result = None
+
+ def report_logger(self, logger):
+ """Report this provision result to a logger."""
+ logger.info(
+ "Once the above files are installed, your Samba AD server will "
+ "be ready to use")
+ if self.adminpass_generated:
+ logger.info("Admin password: %s", self.adminpass)
+ logger.info("Server Role: %s", self.server_role)
+ logger.info("Hostname: %s", self.names.hostname)
+ logger.info("NetBIOS Domain: %s", self.names.domain)
+ logger.info("DNS Domain: %s", self.names.dnsdomain)
+ logger.info("DOMAIN SID: %s", self.domainsid)
+
+ if self.backend_result:
+ self.backend_result.report_logger(logger)
+
+
+def findnss(nssfn, names):
+ """Find a user or group from a list of possibilities.
+
+ :param nssfn: NSS Function to try (should raise KeyError if not found)
+ :param names: Names to check.
+ :return: Value return by first names list.
+ """
+ for name in names:
+ try:
+ return nssfn(name)
+ except KeyError:
+ pass
+ raise KeyError("Unable to find user/group in %r" % names)
+
+
+def findnss_uid(names):
+ return findnss(pwd.getpwnam, names)[2]
+
+
+def findnss_gid(names):
+ return findnss(grp.getgrnam, names)[2]
+
+
+def get_root_uid(root, logger):
+ try:
+ root_uid = findnss_uid(root)
+ except KeyError as e:
+ logger.info(e)
+ logger.info("Assuming root user has UID zero")
+ root_uid = 0
+ return root_uid
+
+
+def provision_paths_from_lp(lp, dnsdomain):
+ """Set the default paths for provisioning.
+
+ :param lp: Loadparm context.
+ :param dnsdomain: DNS Domain name
+ """
+ paths = ProvisionPaths()
+ paths.private_dir = lp.get("private dir")
+ paths.binddns_dir = lp.get("binddns dir")
+ paths.state_dir = lp.get("state directory")
+
+ # This is stored without path prefix for the "privateKeytab" attribute in
+ # "secrets_dns.ldif".
+ paths.dns_keytab = "dns.keytab"
+ paths.keytab = "secrets.keytab"
+
+ paths.shareconf = os.path.join(paths.private_dir, "share.ldb")
+ paths.samdb = os.path.join(paths.private_dir, "sam.ldb")
+ paths.idmapdb = os.path.join(paths.private_dir, "idmap.ldb")
+ paths.secrets = os.path.join(paths.private_dir, "secrets.ldb")
+ paths.privilege = os.path.join(paths.private_dir, "privilege.ldb")
+ paths.dns_update_list = os.path.join(paths.private_dir, "dns_update_list")
+ paths.spn_update_list = os.path.join(paths.private_dir, "spn_update_list")
+ paths.krb5conf = os.path.join(paths.private_dir, "krb5.conf")
+ paths.kdcconf = os.path.join(paths.private_dir, "kdc.conf")
+ paths.winsdb = os.path.join(paths.private_dir, "wins.ldb")
+ paths.s4_ldapi_path = os.path.join(paths.private_dir, "ldapi")
+ paths.encrypted_secrets_key_path = os.path.join(
+ paths.private_dir,
+ "encrypted_secrets.key")
+
+ paths.dns = os.path.join(paths.binddns_dir, "dns", dnsdomain + ".zone")
+ paths.namedconf = os.path.join(paths.binddns_dir, "named.conf")
+ paths.namedconf_update = os.path.join(paths.binddns_dir, "named.conf.update")
+ paths.namedtxt = os.path.join(paths.binddns_dir, "named.txt")
+
+ paths.hklm = "hklm.ldb"
+ paths.hkcr = "hkcr.ldb"
+ paths.hkcu = "hkcu.ldb"
+ paths.hku = "hku.ldb"
+ paths.hkpd = "hkpd.ldb"
+ paths.hkpt = "hkpt.ldb"
+ paths.sysvol = lp.get("path", "sysvol")
+ paths.netlogon = lp.get("path", "netlogon")
+ paths.smbconf = lp.configfile
+ return paths
+
+
+def determine_netbios_name(hostname):
+ """Determine a netbios name from a hostname."""
+ # remove forbidden chars and force the length to be <16
+ netbiosname = "".join([x for x in hostname if is_valid_netbios_char(x)])
+ return netbiosname[:MAX_NETBIOS_NAME_LEN].upper()
+
+
+def guess_names(lp=None, hostname=None, domain=None, dnsdomain=None,
+ serverrole=None, rootdn=None, domaindn=None, configdn=None,
+ schemadn=None, serverdn=None, sitename=None,
+ domain_names_forced=False):
+ """Guess configuration settings to use."""
+
+ if hostname is None:
+ hostname = socket.gethostname().split(".")[0]
+
+ netbiosname = lp.get("netbios name")
+ if netbiosname is None:
+ netbiosname = determine_netbios_name(hostname)
+ netbiosname = netbiosname.upper()
+ if not valid_netbios_name(netbiosname):
+ raise InvalidNetbiosName(netbiosname)
+
+ if dnsdomain is None:
+ dnsdomain = lp.get("realm")
+ if dnsdomain is None or dnsdomain == "":
+ raise ProvisioningError(
+ "guess_names: 'realm' not specified in supplied %s!" %
+ lp.configfile)
+
+ dnsdomain = dnsdomain.lower()
+
+ if serverrole is None:
+ serverrole = lp.get("server role")
+ if serverrole is None:
+ raise ProvisioningError("guess_names: 'server role' not specified in supplied %s!" % lp.configfile)
+
+ serverrole = serverrole.lower()
+
+ realm = dnsdomain.upper()
+
+ if lp.get("realm") == "":
+ raise ProvisioningError("guess_names: 'realm =' was not specified in supplied %s. Please remove the smb.conf file and let provision generate it" % lp.configfile)
+
+ if lp.get("realm").upper() != realm:
+ raise ProvisioningError("guess_names: 'realm=%s' in %s must match chosen realm '%s'! Please remove the smb.conf file and let provision generate it" % (lp.get("realm").upper(), lp.configfile, realm))
+
+ if lp.get("server role").lower() != serverrole:
+ raise ProvisioningError("guess_names: 'server role=%s' in %s must match chosen server role '%s'! Please remove the smb.conf file and let provision generate it" % (lp.get("server role"), lp.configfile, serverrole))
+
+ if serverrole == "active directory domain controller":
+ if domain is None:
+ # This will, for better or worse, default to 'WORKGROUP'
+ domain = lp.get("workgroup")
+ domain = domain.upper()
+
+ if lp.get("workgroup").upper() != domain:
+ raise ProvisioningError("guess_names: Workgroup '%s' in smb.conf must match chosen domain '%s'! Please remove the %s file and let provision generate it" % (lp.get("workgroup").upper(), domain, lp.configfile))
+
+ if domaindn is None:
+ domaindn = samba.dn_from_dns_name(dnsdomain)
+
+ if domain == netbiosname:
+ raise ProvisioningError("guess_names: Domain '%s' must not be equal to short host name '%s'!" % (domain, netbiosname))
+ else:
+ domain = netbiosname
+ if domaindn is None:
+ domaindn = "DC=" + netbiosname
+
+ if not valid_netbios_name(domain):
+ raise InvalidNetbiosName(domain)
+
+ if hostname.upper() == realm:
+ raise ProvisioningError("guess_names: Realm '%s' must not be equal to hostname '%s'!" % (realm, hostname))
+ if netbiosname.upper() == realm:
+ raise ProvisioningError("guess_names: Realm '%s' must not be equal to NetBIOS hostname '%s'!" % (realm, netbiosname))
+ if domain == realm and not domain_names_forced:
+ raise ProvisioningError("guess_names: Realm '%s' must not be equal to short domain name '%s'!" % (realm, domain))
+
+ if serverrole != "active directory domain controller":
+ #
+ # This is the code path for a domain member
+ # where we provision the database as if we were
+ # on a domain controller, so we should not use
+ # the same dnsdomain as the domain controllers
+ # of our primary domain.
+ #
+ # This will be important if we start doing
+ # SID/name filtering and reject the local
+ # sid and names if they come from a domain
+ # controller.
+ #
+ realm = netbiosname
+ dnsdomain = netbiosname.lower()
+
+ if rootdn is None:
+ rootdn = domaindn
+
+ if configdn is None:
+ configdn = "CN=Configuration," + rootdn
+ if schemadn is None:
+ schemadn = "CN=Schema," + configdn
+
+ if sitename is None:
+ sitename = DEFAULTSITE
+
+ if serverdn is None:
+ serverdn = "CN=%s,CN=Servers,CN=%s,CN=Sites,%s" % (
+ netbiosname, sitename, configdn)
+
+ names = ProvisionNames()
+ names.rootdn = rootdn
+ names.domaindn = domaindn
+ names.configdn = configdn
+ names.schemadn = schemadn
+ names.ldapmanagerdn = "CN=Manager," + rootdn
+ names.dnsdomain = dnsdomain
+ names.domain = domain
+ names.realm = realm
+ names.netbiosname = netbiosname
+ names.hostname = hostname
+ names.sitename = sitename
+ names.serverdn = serverdn
+
+ return names
+
+
+def make_smbconf(smbconf, hostname, domain, realm, targetdir,
+ serverrole=None, eadb=False, use_ntvfs=False, lp=None,
+ global_param=None):
+ """Create a new smb.conf file based on a couple of basic settings.
+ """
+ assert smbconf is not None
+
+ if hostname is None:
+ hostname = socket.gethostname().split(".")[0]
+
+ netbiosname = determine_netbios_name(hostname)
+
+ if serverrole is None:
+ serverrole = "standalone server"
+
+ assert domain is not None
+ domain = domain.upper()
+
+ assert realm is not None
+ realm = realm.upper()
+
+ global_settings = {
+ "netbios name": netbiosname,
+ "workgroup": domain,
+ "realm": realm,
+ "server role": serverrole,
+ }
+
+ if lp is None:
+ lp = samba.param.LoadParm()
+ # Load non-existent file
+ if os.path.exists(smbconf):
+ lp.load(smbconf)
+
+ if global_param is not None:
+ for ent in global_param:
+ if global_param[ent] is not None:
+ global_settings[ent] = " ".join(global_param[ent])
+
+ if targetdir is not None:
+ global_settings["private dir"] = os.path.abspath(os.path.join(targetdir, "private"))
+ global_settings["lock dir"] = os.path.abspath(targetdir)
+ global_settings["state directory"] = os.path.abspath(os.path.join(targetdir, "state"))
+ global_settings["cache directory"] = os.path.abspath(os.path.join(targetdir, "cache"))
+ global_settings["binddns dir"] = os.path.abspath(os.path.join(targetdir, "bind-dns"))
+
+ lp.set("lock dir", os.path.abspath(targetdir))
+ lp.set("state directory", global_settings["state directory"])
+ lp.set("cache directory", global_settings["cache directory"])
+ lp.set("binddns dir", global_settings["binddns dir"])
+
+ if eadb:
+ if use_ntvfs:
+ if targetdir is not None:
+ privdir = os.path.join(targetdir, "private")
+ lp.set("posix:eadb",
+ os.path.abspath(os.path.join(privdir, "eadb.tdb")))
+ elif not lp.get("posix:eadb"):
+ privdir = lp.get("private dir")
+ lp.set("posix:eadb",
+ os.path.abspath(os.path.join(privdir, "eadb.tdb")))
+ else:
+ if targetdir is not None:
+ statedir = os.path.join(targetdir, "state")
+ lp.set("xattr_tdb:file",
+ os.path.abspath(os.path.join(statedir, "xattr.tdb")))
+ elif not lp.get("xattr_tdb:file"):
+ statedir = lp.get("state directory")
+ lp.set("xattr_tdb:file",
+ os.path.abspath(os.path.join(statedir, "xattr.tdb")))
+
+ shares = {}
+ if serverrole == "active directory domain controller":
+ shares["sysvol"] = os.path.join(lp.get("state directory"), "sysvol")
+ shares["netlogon"] = os.path.join(shares["sysvol"], realm.lower(),
+ "scripts")
+ else:
+ global_settings["passdb backend"] = "samba_dsdb"
+
+ f = open(smbconf, 'w')
+ try:
+ f.write("[globals]\n")
+ for key, val in global_settings.items():
+ f.write("\t%s = %s\n" % (key, val))
+ f.write("\n")
+
+ for name, path in shares.items():
+ f.write("[%s]\n" % name)
+ f.write("\tpath = %s\n" % path)
+ f.write("\tread only = no\n")
+ f.write("\n")
+ finally:
+ f.close()
+ # reload the smb.conf
+ lp.load(smbconf)
+
+ # and dump it without any values that are the default
+ # this ensures that any smb.conf parameters that were set
+ # on the provision/join command line are set in the resulting smb.conf
+ lp.dump(False, smbconf)
+
+
+def setup_name_mappings(idmap, sid, root_uid, nobody_uid,
+ users_gid):
+ """setup reasonable name mappings for sam names to unix names.
+
+ :param samdb: SamDB object.
+ :param idmap: IDmap db object.
+ :param sid: The domain sid.
+ :param domaindn: The domain DN.
+ :param root_uid: uid of the UNIX root user.
+ :param nobody_uid: uid of the UNIX nobody user.
+ :param users_gid: gid of the UNIX users group.
+ """
+ idmap.setup_name_mapping("S-1-5-7", idmap.TYPE_UID, nobody_uid)
+
+ idmap.setup_name_mapping(sid + "-500", idmap.TYPE_UID, root_uid)
+ idmap.setup_name_mapping(sid + "-513", idmap.TYPE_GID, users_gid)
+
+
+def setup_samdb_partitions(samdb_path, logger, lp, session_info,
+ provision_backend, names, serverrole,
+ plaintext_secrets=False,
+ backend_store=None):
+ """Setup the partitions for the SAM database.
+
+ Alternatively, provision() may call this, and then populate the database.
+
+ :note: This will wipe the Sam Database!
+
+ :note: This function always removes the local SAM LDB file. The erase
+ parameter controls whether to erase the existing data, which
+ may not be stored locally but in LDAP.
+
+ """
+ assert session_info is not None
+
+ # We use options=["modules:"] to stop the modules loading - we
+ # just want to wipe and re-initialise the database, not start it up
+
+ try:
+ os.unlink(samdb_path)
+ except OSError:
+ pass
+
+ samdb = Ldb(url=samdb_path, session_info=session_info,
+ lp=lp, options=["modules:"])
+
+ ldap_backend_line = "# No LDAP backend"
+ if provision_backend.type != "ldb":
+ ldap_backend_line = "ldapBackend: %s" % provision_backend.ldap_uri
+
+ required_features = None
+ if not plaintext_secrets:
+ required_features = "requiredFeatures: encryptedSecrets"
+
+ if backend_store is None:
+ backend_store = get_default_backend_store()
+ backend_store_line = "backendStore: %s" % backend_store
+
+ if backend_store == "mdb":
+ if required_features is not None:
+ required_features += "\n"
+ else:
+ required_features = ""
+ required_features += "requiredFeatures: lmdbLevelOne"
+
+ if required_features is None:
+ required_features = "# No required features"
+
+ samdb.transaction_start()
+ try:
+ logger.info("Setting up sam.ldb partitions and settings")
+ setup_add_ldif(samdb, setup_path("provision_partitions.ldif"), {
+ "LDAP_BACKEND_LINE": ldap_backend_line,
+ "BACKEND_STORE": backend_store_line
+ })
+
+ setup_add_ldif(samdb, setup_path("provision_init.ldif"), {
+ "BACKEND_TYPE": provision_backend.type,
+ "SERVER_ROLE": serverrole,
+ "REQUIRED_FEATURES": required_features
+ })
+
+ logger.info("Setting up sam.ldb rootDSE")
+ setup_samdb_rootdse(samdb, names)
+ except:
+ samdb.transaction_cancel()
+ raise
+ else:
+ samdb.transaction_commit()
+
+
+def secretsdb_self_join(secretsdb, domain,
+ netbiosname, machinepass, domainsid=None,
+ realm=None, dnsdomain=None,
+ key_version_number=1,
+ secure_channel_type=SEC_CHAN_WKSTA):
+ """Add domain join-specific bits to a secrets database.
+
+ :param secretsdb: Ldb Handle to the secrets database
+ :param machinepass: Machine password
+ """
+ attrs = ["whenChanged",
+ "secret",
+ "priorSecret",
+ "priorChanged",
+ "krb5Keytab",
+ "privateKeytab"]
+
+ if realm is not None:
+ if dnsdomain is None:
+ dnsdomain = realm.lower()
+ dnsname = '%s.%s' % (netbiosname.lower(), dnsdomain.lower())
+ else:
+ dnsname = None
+ shortname = netbiosname.lower()
+
+ # We don't need to set msg["flatname"] here, because rdn_name will handle
+ # it, and it causes problems for modifies anyway
+ msg = ldb.Message(ldb.Dn(secretsdb, "flatname=%s,cn=Primary Domains" % domain))
+ msg["secureChannelType"] = [str(secure_channel_type)]
+ msg["objectClass"] = ["top", "primaryDomain"]
+ if dnsname is not None:
+ msg["objectClass"] = ["top", "primaryDomain", "kerberosSecret"]
+ msg["realm"] = [realm]
+ msg["saltPrincipal"] = ["host/%s@%s" % (dnsname, realm.upper())]
+ msg["msDS-KeyVersionNumber"] = [str(key_version_number)]
+ msg["privateKeytab"] = ["secrets.keytab"]
+
+ msg["secret"] = [machinepass.encode('utf-8')]
+ msg["samAccountName"] = ["%s$" % netbiosname]
+ msg["secureChannelType"] = [str(secure_channel_type)]
+ if domainsid is not None:
+ msg["objectSid"] = [ndr_pack(domainsid)]
+
+ # This complex expression tries to ensure that we don't have more
+ # than one record for this SID, realm or netbios domain at a time,
+ # but we don't delete the old record that we are about to modify,
+ # because that would delete the keytab and previous password.
+ res = secretsdb.search(base="cn=Primary Domains", attrs=attrs,
+ expression=("(&(|(flatname=%s)(realm=%s)(objectSid=%s))(objectclass=primaryDomain)(!(distinguishedName=%s)))" % (domain, realm, str(domainsid), str(msg.dn))),
+ scope=ldb.SCOPE_ONELEVEL)
+
+ for del_msg in res:
+ secretsdb.delete(del_msg.dn)
+
+ res = secretsdb.search(base=msg.dn, attrs=attrs, scope=ldb.SCOPE_BASE)
+
+ if len(res) == 1:
+ msg["priorSecret"] = [res[0]["secret"][0]]
+ try:
+ msg["priorWhenChanged"] = [res[0]["whenChanged"][0]]
+ except KeyError:
+ pass
+
+ try:
+ msg["privateKeytab"] = [res[0]["privateKeytab"][0]]
+ except KeyError:
+ pass
+
+ try:
+ msg["krb5Keytab"] = [res[0]["krb5Keytab"][0]]
+ except KeyError:
+ pass
+
+ for el in msg:
+ if el != 'dn':
+ msg[el].set_flags(ldb.FLAG_MOD_REPLACE)
+ secretsdb.modify(msg)
+ secretsdb.rename(res[0].dn, msg.dn)
+ else:
+ spn = ['HOST/%s' % shortname]
+ if secure_channel_type == SEC_CHAN_BDC and dnsname is not None:
+ # if we are a domain controller then we add servicePrincipalName
+ # entries for the keytab code to update.
+ spn.extend(['HOST/%s' % dnsname])
+ msg["servicePrincipalName"] = spn
+
+ secretsdb.add(msg)
+
+
+def setup_secretsdb(paths, session_info, lp):
+ """Setup the secrets database.
+
+ :note: This function does not handle exceptions and transaction on purpose,
+ it's up to the caller to do this job.
+
+ :param path: Path to the secrets database.
+ :param session_info: Session info.
+ :param lp: Loadparm context
+ :return: LDB handle for the created secrets database
+ """
+ if os.path.exists(paths.secrets):
+ os.unlink(paths.secrets)
+
+ keytab_path = os.path.join(paths.private_dir, paths.keytab)
+ if os.path.exists(keytab_path):
+ os.unlink(keytab_path)
+
+ bind_dns_keytab_path = os.path.join(paths.binddns_dir, paths.dns_keytab)
+ if os.path.exists(bind_dns_keytab_path):
+ os.unlink(bind_dns_keytab_path)
+
+ dns_keytab_path = os.path.join(paths.private_dir, paths.dns_keytab)
+ if os.path.exists(dns_keytab_path):
+ os.unlink(dns_keytab_path)
+
+ path = paths.secrets
+
+ secrets_ldb = Ldb(path, session_info=session_info, lp=lp)
+ secrets_ldb.erase()
+ secrets_ldb.load_ldif_file_add(setup_path("secrets_init.ldif"))
+ secrets_ldb = Ldb(path, session_info=session_info, lp=lp)
+ secrets_ldb.transaction_start()
+ try:
+ secrets_ldb.load_ldif_file_add(setup_path("secrets.ldif"))
+ except:
+ secrets_ldb.transaction_cancel()
+ raise
+ return secrets_ldb
+
+
+def setup_privileges(path, session_info, lp):
+ """Setup the privileges database.
+
+ :param path: Path to the privileges database.
+ :param session_info: Session info.
+ :param lp: Loadparm context
+ :return: LDB handle for the created secrets database
+ """
+ if os.path.exists(path):
+ os.unlink(path)
+ privilege_ldb = Ldb(path, session_info=session_info, lp=lp)
+ privilege_ldb.erase()
+ privilege_ldb.load_ldif_file_add(setup_path("provision_privilege.ldif"))
+
+
+def setup_encrypted_secrets_key(path):
+ """Setup the encrypted secrets key file.
+
+ Any existing key file will be deleted and a new random key generated.
+
+ :param path: Path to the secrets key file.
+
+ """
+ if os.path.exists(path):
+ os.unlink(path)
+
+ flags = os.O_WRONLY | os.O_CREAT | os.O_EXCL
+ mode = stat.S_IRUSR | stat.S_IWUSR
+
+ umask_original = os.umask(0)
+ try:
+ fd = os.open(path, flags, mode)
+ finally:
+ os.umask(umask_original)
+
+ with os.fdopen(fd, 'wb') as f:
+ key = samba.generate_random_bytes(16)
+ f.write(key)
+
+
+def setup_registry(path, session_info, lp):
+ """Setup the registry.
+
+ :param path: Path to the registry database
+ :param session_info: Session information
+ :param lp: Loadparm context
+ """
+ reg = samba.registry.Registry()
+ hive = samba.registry.open_ldb(path, session_info=session_info, lp_ctx=lp)
+ reg.mount_hive(hive, samba.registry.HKEY_LOCAL_MACHINE)
+ provision_reg = setup_path("provision.reg")
+ assert os.path.exists(provision_reg)
+ reg.diff_apply(provision_reg)
+
+
+def setup_idmapdb(path, session_info, lp):
+ """Setup the idmap database.
+
+ :param path: path to the idmap database
+ :param session_info: Session information
+ :param lp: Loadparm context
+ """
+ if os.path.exists(path):
+ os.unlink(path)
+
+ idmap_ldb = IDmapDB(path, session_info=session_info, lp=lp)
+ idmap_ldb.erase()
+ idmap_ldb.load_ldif_file_add(setup_path("idmap_init.ldif"))
+ return idmap_ldb
+
+
+def setup_samdb_rootdse(samdb, names):
+ """Setup the SamDB rootdse.
+
+ :param samdb: Sam Database handle
+ """
+ setup_add_ldif(samdb, setup_path("provision_rootdse_add.ldif"), {
+ "SCHEMADN": names.schemadn,
+ "DOMAINDN": names.domaindn,
+ "ROOTDN": names.rootdn,
+ "CONFIGDN": names.configdn,
+ "SERVERDN": names.serverdn,
+ })
+
+
+def setup_self_join(samdb, admin_session_info, names, fill, machinepass,
+ dns_backend, dnspass, domainsid, next_rid, invocationid,
+ policyguid, policyguid_dc,
+ domainControllerFunctionality, ntdsguid=None, dc_rid=None):
+ """Join a host to its own domain."""
+ assert isinstance(invocationid, str)
+ if ntdsguid is not None:
+ ntdsguid_line = "objectGUID: %s\n" % ntdsguid
+ else:
+ ntdsguid_line = ""
+
+ if dc_rid is None:
+ dc_rid = next_rid
+
+ # Some clients/applications (like exchange) make use of
+ # the operatingSystemVersion attribute in order to
+ # find if a DC is good enough.
+ #
+ # So we better use a value matching a Windows DC
+ # with the same domainControllerFunctionality level
+ operatingSystemVersion = samba.dsdb.dc_operatingSystemVersion(domainControllerFunctionality)
+
+ setup_add_ldif(samdb, setup_path("provision_self_join.ldif"), {
+ "CONFIGDN": names.configdn,
+ "SCHEMADN": names.schemadn,
+ "DOMAINDN": names.domaindn,
+ "SERVERDN": names.serverdn,
+ "INVOCATIONID": invocationid,
+ "NETBIOSNAME": names.netbiosname,
+ "DNSNAME": "%s.%s" % (names.hostname, names.dnsdomain),
+ "MACHINEPASS_B64": b64encode(machinepass.encode('utf-16-le')).decode('utf8'),
+ "DOMAINSID": str(domainsid),
+ "DCRID": str(dc_rid),
+ "OPERATING_SYSTEM": "Samba-%s" % version,
+ "OPERATING_SYSTEM_VERSION": operatingSystemVersion,
+ "NTDSGUID": ntdsguid_line,
+ "DOMAIN_CONTROLLER_FUNCTIONALITY": str(
+ domainControllerFunctionality),
+ "RIDALLOCATIONSTART": str(next_rid + 100),
+ "RIDALLOCATIONEND": str(next_rid + 100 + 499)})
+
+ setup_add_ldif(samdb, setup_path("provision_group_policy.ldif"), {
+ "POLICYGUID": policyguid,
+ "POLICYGUID_DC": policyguid_dc,
+ "DNSDOMAIN": names.dnsdomain,
+ "DOMAINDN": names.domaindn})
+
+ # If we are setting up a subdomain, then this has been replicated in, so we
+ # don't need to add it
+ if fill == FILL_FULL:
+ setup_add_ldif(samdb, setup_path("provision_self_join_config.ldif"), {
+ "CONFIGDN": names.configdn,
+ "SCHEMADN": names.schemadn,
+ "DOMAINDN": names.domaindn,
+ "SERVERDN": names.serverdn,
+ "INVOCATIONID": invocationid,
+ "NETBIOSNAME": names.netbiosname,
+ "DNSNAME": "%s.%s" % (names.hostname, names.dnsdomain),
+ "MACHINEPASS_B64": b64encode(machinepass.encode('utf-16-le')).decode('utf8'),
+ "DOMAINSID": str(domainsid),
+ "DCRID": str(dc_rid),
+ "SAMBA_VERSION_STRING": version,
+ "NTDSGUID": ntdsguid_line,
+ "DOMAIN_CONTROLLER_FUNCTIONALITY": str(
+ domainControllerFunctionality)})
+
+ # Setup fSMORoleOwner entries to point at the newly created DC entry
+ setup_modify_ldif(samdb,
+ setup_path("provision_self_join_modify_schema.ldif"), {
+ "SCHEMADN": names.schemadn,
+ "SERVERDN": names.serverdn,
+ },
+ controls=["provision:0", "relax:0"])
+ setup_modify_ldif(samdb,
+ setup_path("provision_self_join_modify_config.ldif"), {
+ "CONFIGDN": names.configdn,
+ "DEFAULTSITE": names.sitename,
+ "NETBIOSNAME": names.netbiosname,
+ "SERVERDN": names.serverdn,
+ })
+
+ system_session_info = system_session()
+ samdb.set_session_info(system_session_info)
+ # Setup fSMORoleOwner entries to point at the newly created DC entry to
+ # modify a serverReference under cn=config when we are a subdomain, we must
+ # be system due to ACLs
+ setup_modify_ldif(samdb, setup_path("provision_self_join_modify.ldif"), {
+ "DOMAINDN": names.domaindn,
+ "SERVERDN": names.serverdn,
+ "NETBIOSNAME": names.netbiosname,
+ })
+
+ samdb.set_session_info(admin_session_info)
+
+ if dns_backend != "SAMBA_INTERNAL":
+ # This is Samba4 specific and should be replaced by the correct
+ # DNS AD-style setup
+ setup_add_ldif(samdb, setup_path("provision_dns_add_samba.ldif"), {
+ "DNSDOMAIN": names.dnsdomain,
+ "DOMAINDN": names.domaindn,
+ "DNSPASS_B64": b64encode(dnspass.encode('utf-16-le')).decode('utf8'),
+ "HOSTNAME": names.hostname,
+ "DNSNAME": '%s.%s' % (
+ names.netbiosname.lower(), names.dnsdomain.lower())
+ })
+
+
+def getpolicypath(sysvolpath, dnsdomain, guid):
+ """Return the physical path of policy given its guid.
+
+ :param sysvolpath: Path to the sysvol folder
+ :param dnsdomain: DNS name of the AD domain
+ :param guid: The GUID of the policy
+ :return: A string with the complete path to the policy folder
+ """
+ if guid[0] != "{":
+ guid = "{%s}" % guid
+ policy_path = os.path.join(sysvolpath, dnsdomain, "Policies", guid)
+ return policy_path
+
+
+def create_gpo_struct(policy_path):
+ if not os.path.exists(policy_path):
+ os.makedirs(policy_path, 0o775)
+ f = open(os.path.join(policy_path, "GPT.INI"), 'w')
+ try:
+ f.write("[General]\r\nVersion=0")
+ finally:
+ f.close()
+ p = os.path.join(policy_path, "MACHINE")
+ if not os.path.exists(p):
+ os.makedirs(p, 0o775)
+ p = os.path.join(policy_path, "USER")
+ if not os.path.exists(p):
+ os.makedirs(p, 0o775)
+
+
+def create_default_gpo(sysvolpath, dnsdomain, policyguid, policyguid_dc):
+ """Create the default GPO for a domain
+
+ :param sysvolpath: Physical path for the sysvol folder
+ :param dnsdomain: DNS domain name of the AD domain
+ :param policyguid: GUID of the default domain policy
+ :param policyguid_dc: GUID of the default domain controller policy
+ """
+ policy_path = getpolicypath(sysvolpath, dnsdomain, policyguid)
+ create_gpo_struct(policy_path)
+
+ policy_path = getpolicypath(sysvolpath, dnsdomain, policyguid_dc)
+ create_gpo_struct(policy_path)
+
+
+# Default the database size to 8Gb
+DEFAULT_BACKEND_SIZE = 8 * 1024 * 1024 *1024
+
+def setup_samdb(path, session_info, provision_backend, lp, names,
+ logger, serverrole, schema, am_rodc=False,
+ plaintext_secrets=False, backend_store=None,
+ backend_store_size=None, batch_mode=False):
+ """Setup a complete SAM Database.
+
+ :note: This will wipe the main SAM database file!
+ """
+
+ # Also wipes the database
+ setup_samdb_partitions(path, logger=logger, lp=lp,
+ provision_backend=provision_backend, session_info=session_info,
+ names=names, serverrole=serverrole, plaintext_secrets=plaintext_secrets,
+ backend_store=backend_store)
+
+ store_size = DEFAULT_BACKEND_SIZE
+ if backend_store_size:
+ store_size = backend_store_size
+
+ options = []
+ if backend_store == "mdb":
+ options.append("lmdb_env_size:" + str(store_size))
+ if batch_mode:
+ options.append("batch_mode:1")
+ if batch_mode:
+ # Estimate the number of index records in the transaction_index_cache
+ # Numbers chosen give the prime 202481 for the default backend size,
+ # which works well for a 100,000 user database
+ cache_size = int(store_size / 42423) + 1
+ options.append("transaction_index_cache_size:" + str(cache_size))
+
+ # Load the database, but don's load the global schema and don't connect
+ # quite yet
+ samdb = SamDB(session_info=session_info, url=None, auto_connect=False,
+ lp=lp,
+ global_schema=False, am_rodc=am_rodc, options=options)
+
+ logger.info("Pre-loading the Samba 4 and AD schema")
+
+ # Load the schema from the one we computed earlier
+ samdb.set_schema(schema, write_indices_and_attributes=False)
+
+ # Set the NTDS settings DN manually - in order to have it already around
+ # before the provisioned tree exists and we connect
+ samdb.set_ntds_settings_dn("CN=NTDS Settings,%s" % names.serverdn)
+
+ # And now we can connect to the DB - the schema won't be loaded from the
+ # DB
+ try:
+ samdb.connect(path, options=options)
+ except ldb.LdbError as e2:
+ (num, string_error) = e2.args
+ if (num == ldb.ERR_INSUFFICIENT_ACCESS_RIGHTS):
+ raise ProvisioningError("Permission denied connecting to %s, are you running as root?" % path)
+ else:
+ raise
+
+ # But we have to give it one more kick to have it use the schema
+ # during provision - it needs, now that it is connected, to write
+ # the schema @ATTRIBUTES and @INDEXLIST records to the database.
+ samdb.set_schema(schema, write_indices_and_attributes=True)
+
+ return samdb
+
+
+def fill_samdb(samdb, lp, names, logger, policyguid,
+ policyguid_dc, fill, adminpass, krbtgtpass, machinepass, dns_backend,
+ dnspass, invocationid, ntdsguid,
+ dom_for_fun_level=None, schema=None, next_rid=None, dc_rid=None):
+
+ if next_rid is None:
+ next_rid = 1000
+
+ # Provision does not make much sense values larger than 1000000000
+ # as the upper range of the rIDAvailablePool is 1073741823 and
+ # we don't want to create a domain that cannot allocate rids.
+ if next_rid < 1000 or next_rid > 1000000000:
+ error = "You want to run SAMBA 4 with a next_rid of %u, " % (next_rid)
+ error += "the valid range is %u-%u. The default is %u." % (
+ 1000, 1000000000, 1000)
+ raise ProvisioningError(error)
+
+ domainControllerFunctionality = functional_level.dc_level_from_lp(lp)
+
+ # ATTENTION: Do NOT change these default values without discussion with the
+ # team and/or release manager. They have a big impact on the whole program!
+ if dom_for_fun_level is None:
+ dom_for_fun_level = DS_DOMAIN_FUNCTION_2008_R2
+
+ if dom_for_fun_level > domainControllerFunctionality:
+ level = functional_level.level_to_string(domainControllerFunctionality)
+ raise ProvisioningError(f"You want to run SAMBA 4 on a domain and forest function level which itself is higher than its actual DC function level ({level}). This won't work!")
+
+ domainFunctionality = dom_for_fun_level
+ forestFunctionality = dom_for_fun_level
+
+ # Set the NTDS settings DN manually - in order to have it already around
+ # before the provisioned tree exists and we connect
+ samdb.set_ntds_settings_dn("CN=NTDS Settings,%s" % names.serverdn)
+
+ # Set the domain functionality levels onto the database.
+ # Various module (the password_hash module in particular) need
+ # to know what level of AD we are emulating.
+
+ # These will be fixed into the database via the database
+ # modifictions below, but we need them set from the start.
+ samdb.set_opaque_integer("domainFunctionality", domainFunctionality)
+ samdb.set_opaque_integer("forestFunctionality", forestFunctionality)
+ samdb.set_opaque_integer("domainControllerFunctionality",
+ domainControllerFunctionality)
+
+ samdb.set_domain_sid(str(names.domainsid))
+ samdb.set_invocation_id(invocationid)
+
+ logger.info("Adding DomainDN: %s" % names.domaindn)
+
+ # impersonate domain admin
+ admin_session_info = admin_session(lp, str(names.domainsid))
+ samdb.set_session_info(admin_session_info)
+ if names.domainguid is not None:
+ domainguid_line = "objectGUID: %s\n-" % names.domainguid
+ else:
+ domainguid_line = ""
+
+ descr = b64encode(get_domain_descriptor(names.domainsid)).decode('utf8')
+ setup_add_ldif(samdb, setup_path("provision_basedn.ldif"), {
+ "DOMAINDN": names.domaindn,
+ "DOMAINSID": str(names.domainsid),
+ "DESCRIPTOR": descr,
+ "DOMAINGUID": domainguid_line
+ })
+
+ setup_modify_ldif(samdb, setup_path("provision_basedn_modify.ldif"), {
+ "DOMAINDN": names.domaindn,
+ "CREATTIME": str(samba.unix2nttime(int(time.time()))),
+ "NEXTRID": str(next_rid),
+ "DEFAULTSITE": names.sitename,
+ "CONFIGDN": names.configdn,
+ "POLICYGUID": policyguid,
+ "DOMAIN_FUNCTIONALITY": str(domainFunctionality),
+ "SAMBA_VERSION_STRING": version,
+ "MIN_PWD_LENGTH": str(DEFAULT_MIN_PWD_LENGTH)
+ })
+
+ # If we are setting up a subdomain, then this has been replicated in, so we don't need to add it
+ if fill == FILL_FULL:
+ logger.info("Adding configuration container")
+ descr = b64encode(get_config_descriptor(names.domainsid)).decode('utf8')
+ setup_add_ldif(samdb, setup_path("provision_configuration_basedn.ldif"), {
+ "CONFIGDN": names.configdn,
+ "DESCRIPTOR": descr,
+ })
+
+ # The LDIF here was created when the Schema object was constructed
+ ignore_checks_oid = "local_oid:%s:0" % samba.dsdb.DSDB_CONTROL_SKIP_DUPLICATES_CHECK_OID
+ schema_controls = [
+ "provision:0",
+ "relax:0",
+ ignore_checks_oid
+ ]
+
+ logger.info("Setting up sam.ldb schema")
+ samdb.add_ldif(schema.schema_dn_add, controls=schema_controls)
+ samdb.modify_ldif(schema.schema_dn_modify, controls=schema_controls)
+ samdb.write_prefixes_from_schema()
+ samdb.add_ldif(schema.schema_data, controls=schema_controls)
+ setup_add_ldif(samdb, setup_path("aggregate_schema.ldif"),
+ {"SCHEMADN": names.schemadn},
+ controls=schema_controls)
+
+ # Now register this container in the root of the forest
+ msg = ldb.Message(ldb.Dn(samdb, names.domaindn))
+ msg["subRefs"] = ldb.MessageElement(names.configdn, ldb.FLAG_MOD_ADD,
+ "subRefs")
+
+ deletedobjects_descr = b64encode(get_deletedobjects_descriptor(names.domainsid)).decode('utf8')
+
+ samdb.invocation_id = invocationid
+
+ # If we are setting up a subdomain, then this has been replicated in, so we don't need to add it
+ if fill == FILL_FULL:
+ logger.info("Setting up sam.ldb configuration data")
+
+ partitions_descr = b64encode(get_config_partitions_descriptor(names.domainsid)).decode('utf8')
+ sites_descr = b64encode(get_config_sites_descriptor(names.domainsid)).decode('utf8')
+ ntdsquotas_descr = b64encode(get_config_ntds_quotas_descriptor(names.domainsid)).decode('utf8')
+ protected1_descr = b64encode(get_config_delete_protected1_descriptor(names.domainsid)).decode('utf8')
+ protected1wd_descr = b64encode(get_config_delete_protected1wd_descriptor(names.domainsid)).decode('utf8')
+ protected2_descr = b64encode(get_config_delete_protected2_descriptor(names.domainsid)).decode('utf8')
+
+ if "2008" in schema.base_schema:
+ # exclude 2012-specific changes if we're using a 2008 schema
+ incl_2012 = "#"
+ else:
+ incl_2012 = ""
+
+ setup_add_ldif(samdb, setup_path("provision_configuration.ldif"), {
+ "CONFIGDN": names.configdn,
+ "NETBIOSNAME": names.netbiosname,
+ "DEFAULTSITE": names.sitename,
+ "DNSDOMAIN": names.dnsdomain,
+ "DOMAIN": names.domain,
+ "SCHEMADN": names.schemadn,
+ "DOMAINDN": names.domaindn,
+ "SERVERDN": names.serverdn,
+ "FOREST_FUNCTIONALITY": str(forestFunctionality),
+ "DOMAIN_FUNCTIONALITY": str(domainFunctionality),
+ "NTDSQUOTAS_DESCRIPTOR": ntdsquotas_descr,
+ "DELETEDOBJECTS_DESCRIPTOR": deletedobjects_descr,
+ "LOSTANDFOUND_DESCRIPTOR": protected1wd_descr,
+ "SERVICES_DESCRIPTOR": protected1_descr,
+ "PHYSICALLOCATIONS_DESCRIPTOR": protected1wd_descr,
+ "FORESTUPDATES_DESCRIPTOR": protected1wd_descr,
+ "EXTENDEDRIGHTS_DESCRIPTOR": protected2_descr,
+ "PARTITIONS_DESCRIPTOR": partitions_descr,
+ "SITES_DESCRIPTOR": sites_descr,
+ })
+
+ setup_add_ldif(samdb, setup_path("extended-rights.ldif"), {
+ "CONFIGDN": names.configdn,
+ "INC2012": incl_2012,
+ })
+
+ logger.info("Setting up display specifiers")
+ display_specifiers_ldif = read_ms_ldif(
+ setup_path('display-specifiers/DisplaySpecifiers-Win2k8R2.txt'))
+ display_specifiers_ldif = substitute_var(display_specifiers_ldif,
+ {"CONFIGDN": names.configdn})
+ check_all_substituted(display_specifiers_ldif)
+ samdb.add_ldif(display_specifiers_ldif)
+
+ logger.info("Modifying display specifiers and extended rights")
+ setup_modify_ldif(samdb,
+ setup_path("provision_configuration_modify.ldif"), {
+ "CONFIGDN": names.configdn,
+ "DISPLAYSPECIFIERS_DESCRIPTOR": protected2_descr
+ })
+
+ logger.info("Adding users container")
+ users_desc = b64encode(get_domain_users_descriptor(names.domainsid)).decode('utf8')
+ setup_add_ldif(samdb, setup_path("provision_users_add.ldif"), {
+ "DOMAINDN": names.domaindn,
+ "USERS_DESCRIPTOR": users_desc
+ })
+ logger.info("Modifying users container")
+ setup_modify_ldif(samdb, setup_path("provision_users_modify.ldif"), {
+ "DOMAINDN": names.domaindn})
+ logger.info("Adding computers container")
+ computers_desc = b64encode(get_domain_computers_descriptor(names.domainsid)).decode('utf8')
+ setup_add_ldif(samdb, setup_path("provision_computers_add.ldif"), {
+ "DOMAINDN": names.domaindn,
+ "COMPUTERS_DESCRIPTOR": computers_desc
+ })
+ logger.info("Modifying computers container")
+ setup_modify_ldif(samdb,
+ setup_path("provision_computers_modify.ldif"), {
+ "DOMAINDN": names.domaindn})
+ logger.info("Setting up sam.ldb data")
+ infrastructure_desc = b64encode(get_domain_infrastructure_descriptor(names.domainsid)).decode('utf8')
+ lostandfound_desc = b64encode(get_domain_delete_protected2_descriptor(names.domainsid)).decode('utf8')
+ system_desc = b64encode(get_domain_delete_protected1_descriptor(names.domainsid)).decode('utf8')
+ builtin_desc = b64encode(get_domain_builtin_descriptor(names.domainsid)).decode('utf8')
+ controllers_desc = b64encode(get_domain_controllers_descriptor(names.domainsid)).decode('utf8')
+ setup_add_ldif(samdb, setup_path("provision.ldif"), {
+ "CREATTIME": str(samba.unix2nttime(int(time.time()))),
+ "DOMAINDN": names.domaindn,
+ "NETBIOSNAME": names.netbiosname,
+ "DEFAULTSITE": names.sitename,
+ "CONFIGDN": names.configdn,
+ "SERVERDN": names.serverdn,
+ "RIDAVAILABLESTART": str(next_rid + 600),
+ "POLICYGUID_DC": policyguid_dc,
+ "INFRASTRUCTURE_DESCRIPTOR": infrastructure_desc,
+ "DELETEDOBJECTS_DESCRIPTOR": deletedobjects_descr,
+ "LOSTANDFOUND_DESCRIPTOR": lostandfound_desc,
+ "SYSTEM_DESCRIPTOR": system_desc,
+ "BUILTIN_DESCRIPTOR": builtin_desc,
+ "DOMAIN_CONTROLLERS_DESCRIPTOR": controllers_desc,
+ })
+
+ # If we are setting up a subdomain, then this has been replicated in, so we don't need to add it
+ if fill == FILL_FULL:
+ managedservice_descr = b64encode(get_managed_service_accounts_descriptor(names.domainsid)).decode('utf8')
+ setup_modify_ldif(samdb,
+ setup_path("provision_configuration_references.ldif"), {
+ "CONFIGDN": names.configdn,
+ "SCHEMADN": names.schemadn})
+
+ logger.info("Setting up well known security principals")
+ protected1wd_descr = b64encode(get_config_delete_protected1wd_descriptor(names.domainsid)).decode('utf8')
+ setup_add_ldif(samdb, setup_path("provision_well_known_sec_princ.ldif"), {
+ "CONFIGDN": names.configdn,
+ "WELLKNOWNPRINCIPALS_DESCRIPTOR": protected1wd_descr,
+ }, controls=["relax:0", "provision:0"])
+
+ if fill == FILL_FULL or fill == FILL_SUBDOMAIN:
+ setup_modify_ldif(samdb,
+ setup_path("provision_basedn_references.ldif"), {
+ "DOMAINDN": names.domaindn,
+ "MANAGEDSERVICE_DESCRIPTOR": managedservice_descr
+ })
+
+ logger.info("Setting up sam.ldb users and groups")
+ setup_add_ldif(samdb, setup_path("provision_users.ldif"), {
+ "DOMAINDN": names.domaindn,
+ "DOMAINSID": str(names.domainsid),
+ "ADMINPASS_B64": b64encode(adminpass.encode('utf-16-le')).decode('utf8'),
+ "KRBTGTPASS_B64": b64encode(krbtgtpass.encode('utf-16-le')).decode('utf8')
+ }, controls=["relax:0", "provision:0"])
+
+ logger.info("Setting up self join")
+ setup_self_join(samdb, admin_session_info, names=names, fill=fill,
+ invocationid=invocationid,
+ dns_backend=dns_backend,
+ dnspass=dnspass,
+ machinepass=machinepass,
+ domainsid=names.domainsid,
+ next_rid=next_rid,
+ dc_rid=dc_rid,
+ policyguid=policyguid,
+ policyguid_dc=policyguid_dc,
+ domainControllerFunctionality=domainControllerFunctionality,
+ ntdsguid=ntdsguid)
+
+ ntds_dn = "CN=NTDS Settings,%s" % names.serverdn
+ names.ntdsguid = samdb.searchone(basedn=ntds_dn,
+ attribute="objectGUID", expression="", scope=ldb.SCOPE_BASE).decode('utf8')
+ assert isinstance(names.ntdsguid, str)
+
+ return samdb
+
+
+SYSVOL_ACL = "O:LAG:BAD:P(A;OICI;FA;;;BA)(A;OICI;0x1200a9;;;SO)(A;OICI;FA;;;SY)(A;OICI;0x1200a9;;;AU)"
+POLICIES_ACL = "O:LAG:BAD:P(A;OICI;FA;;;BA)(A;OICI;0x1200a9;;;SO)(A;OICI;FA;;;SY)(A;OICI;0x1200a9;;;AU)(A;OICI;0x1301bf;;;PA)"
+SYSVOL_SERVICE = "sysvol"
+
+
+def set_dir_acl(path, acl, lp, domsid, use_ntvfs, passdb, service=SYSVOL_SERVICE):
+ session_info = system_session_unix()
+ setntacl(lp, path, acl, domsid, session_info, use_ntvfs=use_ntvfs, skip_invalid_chown=True, passdb=passdb, service=service)
+ for root, dirs, files in os.walk(path, topdown=False):
+ for name in files:
+ setntacl(lp, os.path.join(root, name), acl, domsid, session_info,
+ use_ntvfs=use_ntvfs, skip_invalid_chown=True, passdb=passdb, service=service)
+ for name in dirs:
+ setntacl(lp, os.path.join(root, name), acl, domsid, session_info,
+ use_ntvfs=use_ntvfs, skip_invalid_chown=True, passdb=passdb, service=service)
+
+
+def set_gpos_acl(sysvol, dnsdomain, domainsid, domaindn, samdb, lp, use_ntvfs, passdb):
+ """Set ACL on the sysvol/<dnsname>/Policies folder and the policy
+ folders beneath.
+
+ :param sysvol: Physical path for the sysvol folder
+ :param dnsdomain: The DNS name of the domain
+ :param domainsid: The SID of the domain
+ :param domaindn: The DN of the domain (ie. DC=...)
+ :param samdb: An LDB object on the SAM db
+ :param lp: an LP object
+ """
+
+ # Set ACL for GPO root folder
+ root_policy_path = os.path.join(sysvol, dnsdomain, "Policies")
+ session_info = system_session_unix()
+
+ setntacl(lp, root_policy_path, POLICIES_ACL, str(domainsid), session_info,
+ use_ntvfs=use_ntvfs, skip_invalid_chown=True, passdb=passdb, service=SYSVOL_SERVICE)
+
+ res = samdb.search(base="CN=Policies,CN=System,%s" %(domaindn),
+ attrs=["cn", "nTSecurityDescriptor"],
+ expression="", scope=ldb.SCOPE_ONELEVEL)
+
+ for policy in res:
+ acl = ndr_unpack(security.descriptor,
+ policy["nTSecurityDescriptor"][0]).as_sddl()
+ policy_path = getpolicypath(sysvol, dnsdomain, str(policy["cn"]))
+ set_dir_acl(policy_path, dsacl2fsacl(acl, domainsid), lp,
+ str(domainsid), use_ntvfs,
+ passdb=passdb)
+
+
+def setsysvolacl(samdb, sysvol, uid, gid, domainsid, dnsdomain,
+ domaindn, lp, use_ntvfs):
+ """Set the ACL for the sysvol share and the subfolders
+
+ :param samdb: An LDB object on the SAM db
+ :param sysvol: Physical path for the sysvol folder
+ :param uid: The UID of the "Administrator" user
+ :param gid: The GID of the "Domain administrators" group
+ :param domainsid: The SID of the domain
+ :param dnsdomain: The DNS name of the domain
+ :param domaindn: The DN of the domain (ie. DC=...)
+ """
+ s4_passdb = None
+
+ if not use_ntvfs:
+ s3conf = s3param.get_context()
+ s3conf.load(lp.configfile)
+
+ file = tempfile.NamedTemporaryFile(dir=os.path.abspath(sysvol))
+ try:
+ try:
+ smbd.set_simple_acl(file.name, 0o755, system_session_unix(), gid)
+ except OSError:
+ if not smbd.have_posix_acls():
+ # This clue is only strictly correct for RPM and
+ # Debian-like Linux systems, but hopefully other users
+ # will get enough clue from it.
+ raise ProvisioningError("Samba was compiled without the posix ACL support that s3fs requires. "
+ "Try installing libacl1-dev or libacl-devel, then re-run configure and make.")
+
+ raise ProvisioningError("Your filesystem or build does not support posix ACLs, which s3fs requires. "
+ "Try the mounting the filesystem with the 'acl' option.")
+ try:
+ smbd.chown(file.name, uid, gid, system_session_unix())
+ except OSError:
+ raise ProvisioningError("Unable to chown a file on your filesystem. "
+ "You may not be running provision as root.")
+ finally:
+ file.close()
+
+ # This will ensure that the smbd code we are running when setting ACLs
+ # is initialised with the smb.conf
+ s3conf = s3param.get_context()
+ s3conf.load(lp.configfile)
+ # ensure we are using the right samba_dsdb passdb backend, no matter what
+ s3conf.set("passdb backend", "samba_dsdb:%s" % samdb.url)
+ passdb.reload_static_pdb()
+
+ # ensure that we init the samba_dsdb backend, so the domain sid is
+ # marked in secrets.tdb
+ s4_passdb = passdb.PDB(s3conf.get("passdb backend"))
+
+ # now ensure everything matches correctly, to avoid weird issues
+ if passdb.get_global_sam_sid() != domainsid:
+ raise ProvisioningError('SID as seen by smbd [%s] does not match SID as seen by the provision script [%s]!' % (passdb.get_global_sam_sid(), domainsid))
+
+ domain_info = s4_passdb.domain_info()
+ if domain_info["dom_sid"] != domainsid:
+ raise ProvisioningError('SID as seen by pdb_samba_dsdb [%s] does not match SID as seen by the provision script [%s]!' % (domain_info["dom_sid"], domainsid))
+
+ if domain_info["dns_domain"].upper() != dnsdomain.upper():
+ raise ProvisioningError('Realm as seen by pdb_samba_dsdb [%s] does not match Realm as seen by the provision script [%s]!' % (domain_info["dns_domain"].upper(), dnsdomain.upper()))
+
+ try:
+ if use_ntvfs:
+ os.chown(sysvol, -1, gid)
+ except OSError:
+ canchown = False
+ else:
+ canchown = True
+
+ # use admin sid dn as user dn, since admin should own most of the files,
+ # the operation will be much faster
+ userdn = '<SID={}-{}>'.format(domainsid, security.DOMAIN_RID_ADMINISTRATOR)
+
+ flags = (auth.AUTH_SESSION_INFO_DEFAULT_GROUPS |
+ auth.AUTH_SESSION_INFO_AUTHENTICATED |
+ auth.AUTH_SESSION_INFO_SIMPLE_PRIVILEGES)
+
+ session_info = auth.user_session(samdb, lp_ctx=lp, dn=userdn,
+ session_info_flags=flags)
+ auth.session_info_set_unix(session_info,
+ lp_ctx=lp,
+ user_name="Administrator",
+ uid=uid,
+ gid=gid)
+
+ def _setntacl(path):
+ """A helper to reuse args"""
+ return setntacl(
+ lp, path, SYSVOL_ACL, str(domainsid), session_info,
+ use_ntvfs=use_ntvfs, skip_invalid_chown=True, passdb=s4_passdb,
+ service=SYSVOL_SERVICE)
+
+ # Set the SYSVOL_ACL on the sysvol folder and subfolder (first level)
+ _setntacl(sysvol)
+ for root, dirs, files in os.walk(sysvol, topdown=False):
+ for name in files:
+ if use_ntvfs and canchown:
+ os.chown(os.path.join(root, name), -1, gid)
+ _setntacl(os.path.join(root, name))
+ for name in dirs:
+ if use_ntvfs and canchown:
+ os.chown(os.path.join(root, name), -1, gid)
+ _setntacl(os.path.join(root, name))
+
+ # Set acls on Policy folder and policies folders
+ set_gpos_acl(sysvol, dnsdomain, domainsid, domaindn, samdb, lp, use_ntvfs, passdb=s4_passdb)
+
+
+def acl_type(direct_db_access):
+ if direct_db_access:
+ return "DB"
+ else:
+ return "VFS"
+
+
+def check_dir_acl(path, acl, lp, domainsid, direct_db_access):
+ session_info = system_session_unix()
+ fsacl = getntacl(lp, path, session_info, direct_db_access=direct_db_access, service=SYSVOL_SERVICE)
+ fsacl_sddl = fsacl.as_sddl(domainsid)
+ if fsacl_sddl != acl:
+ raise ProvisioningError('%s ACL on GPO directory %s %s does not match expected value %s from GPO object' % (acl_type(direct_db_access), path, fsacl_sddl, acl))
+
+ for root, dirs, files in os.walk(path, topdown=False):
+ for name in files:
+ fsacl = getntacl(lp, os.path.join(root, name), session_info,
+ direct_db_access=direct_db_access, service=SYSVOL_SERVICE)
+ if fsacl is None:
+ raise ProvisioningError('%s ACL on GPO file %s not found!' %
+ (acl_type(direct_db_access),
+ os.path.join(root, name)))
+ fsacl_sddl = fsacl.as_sddl(domainsid)
+ if fsacl_sddl != acl:
+ raise ProvisioningError('%s ACL on GPO file %s %s does not match expected value %s from GPO object' % (acl_type(direct_db_access), os.path.join(root, name), fsacl_sddl, acl))
+
+ for name in dirs:
+ fsacl = getntacl(lp, os.path.join(root, name), session_info,
+ direct_db_access=direct_db_access, service=SYSVOL_SERVICE)
+ if fsacl is None:
+ raise ProvisioningError('%s ACL on GPO directory %s not found!'
+ % (acl_type(direct_db_access),
+ os.path.join(root, name)))
+ fsacl_sddl = fsacl.as_sddl(domainsid)
+ if fsacl_sddl != acl:
+ raise ProvisioningError('%s ACL on GPO directory %s %s does not match expected value %s from GPO object' % (acl_type(direct_db_access), os.path.join(root, name), fsacl_sddl, acl))
+
+
+def check_gpos_acl(sysvol, dnsdomain, domainsid, domaindn, samdb, lp,
+ direct_db_access):
+ """Set ACL on the sysvol/<dnsname>/Policies folder and the policy
+ folders beneath.
+
+ :param sysvol: Physical path for the sysvol folder
+ :param dnsdomain: The DNS name of the domain
+ :param domainsid: The SID of the domain
+ :param domaindn: The DN of the domain (ie. DC=...)
+ :param samdb: An LDB object on the SAM db
+ :param lp: an LP object
+ """
+
+ # Set ACL for GPO root folder
+ root_policy_path = os.path.join(sysvol, dnsdomain, "Policies")
+ session_info = system_session_unix()
+ fsacl = getntacl(lp, root_policy_path, session_info,
+ direct_db_access=direct_db_access, service=SYSVOL_SERVICE)
+ if fsacl is None:
+ raise ProvisioningError('DB ACL on policy root %s %s not found!' % (acl_type(direct_db_access), root_policy_path))
+ fsacl_sddl = fsacl.as_sddl(domainsid)
+ if fsacl_sddl != POLICIES_ACL:
+ raise ProvisioningError('%s ACL on policy root %s %s does not match expected value %s from provision' % (acl_type(direct_db_access), root_policy_path, fsacl_sddl, fsacl))
+ res = samdb.search(base="CN=Policies,CN=System,%s" %(domaindn),
+ attrs=["cn", "nTSecurityDescriptor"],
+ expression="", scope=ldb.SCOPE_ONELEVEL)
+
+ for policy in res:
+ acl = ndr_unpack(security.descriptor,
+ policy["nTSecurityDescriptor"][0]).as_sddl()
+ policy_path = getpolicypath(sysvol, dnsdomain, str(policy["cn"]))
+ check_dir_acl(policy_path, dsacl2fsacl(acl, domainsid), lp,
+ domainsid, direct_db_access)
+
+
+def checksysvolacl(samdb, netlogon, sysvol, domainsid, dnsdomain, domaindn,
+ lp):
+ """Set the ACL for the sysvol share and the subfolders
+
+ :param samdb: An LDB object on the SAM db
+ :param netlogon: Physical path for the netlogon folder
+ :param sysvol: Physical path for the sysvol folder
+ :param uid: The UID of the "Administrator" user
+ :param gid: The GID of the "Domain administrators" group
+ :param domainsid: The SID of the domain
+ :param dnsdomain: The DNS name of the domain
+ :param domaindn: The DN of the domain (ie. DC=...)
+ """
+
+ # This will ensure that the smbd code we are running when setting ACLs is initialised with the smb.conf
+ s3conf = s3param.get_context()
+ s3conf.load(lp.configfile)
+ # ensure we are using the right samba_dsdb passdb backend, no matter what
+ s3conf.set("passdb backend", "samba_dsdb:%s" % samdb.url)
+ # ensure that we init the samba_dsdb backend, so the domain sid is marked in secrets.tdb
+ s4_passdb = passdb.PDB(s3conf.get("passdb backend"))
+
+ # now ensure everything matches correctly, to avoid weird issues
+ if passdb.get_global_sam_sid() != domainsid:
+ raise ProvisioningError('SID as seen by smbd [%s] does not match SID as seen by the provision script [%s]!' % (passdb.get_global_sam_sid(), domainsid))
+
+ domain_info = s4_passdb.domain_info()
+ if domain_info["dom_sid"] != domainsid:
+ raise ProvisioningError('SID as seen by pdb_samba_dsdb [%s] does not match SID as seen by the provision script [%s]!' % (domain_info["dom_sid"], domainsid))
+
+ if domain_info["dns_domain"].upper() != dnsdomain.upper():
+ raise ProvisioningError('Realm as seen by pdb_samba_dsdb [%s] does not match Realm as seen by the provision script [%s]!' % (domain_info["dns_domain"].upper(), dnsdomain.upper()))
+
+ # Ensure we can read this directly, and via the smbd VFS
+ session_info = system_session_unix()
+ for direct_db_access in [True, False]:
+ # Check the SYSVOL_ACL on the sysvol folder and subfolder (first level)
+ for dir_path in [os.path.join(sysvol, dnsdomain), netlogon]:
+ fsacl = getntacl(lp, dir_path, session_info, direct_db_access=direct_db_access, service=SYSVOL_SERVICE)
+ if fsacl is None:
+ raise ProvisioningError('%s ACL on sysvol directory %s not found!' % (acl_type(direct_db_access), dir_path))
+ fsacl_sddl = fsacl.as_sddl(domainsid)
+ if fsacl_sddl != SYSVOL_ACL:
+ raise ProvisioningError('%s ACL on sysvol directory %s %s does not match expected value %s from provision' % (acl_type(direct_db_access), dir_path, fsacl_sddl, SYSVOL_ACL))
+
+ # Check acls on Policy folder and policies folders
+ check_gpos_acl(sysvol, dnsdomain, domainsid, domaindn, samdb, lp,
+ direct_db_access)
+
+
+def interface_ips_v4(lp, all_interfaces=False):
+ """return only IPv4 IPs"""
+ ips = samba.interface_ips(lp, all_interfaces)
+ ret = []
+ for i in ips:
+ if i.find(':') == -1:
+ ret.append(i)
+ return ret
+
+
+def interface_ips_v6(lp):
+ """return only IPv6 IPs"""
+ ips = samba.interface_ips(lp, False)
+ ret = []
+ for i in ips:
+ if i.find(':') != -1:
+ ret.append(i)
+ return ret
+
+
+def provision_fill(samdb, secrets_ldb, logger, names, paths,
+ schema=None,
+ samdb_fill=FILL_FULL,
+ hostip=None, hostip6=None,
+ next_rid=1000, dc_rid=None, adminpass=None, krbtgtpass=None,
+ domainguid=None, policyguid=None, policyguid_dc=None,
+ invocationid=None, machinepass=None, ntdsguid=None,
+ dns_backend=None, dnspass=None,
+ serverrole=None, dom_for_fun_level=None,
+ lp=None, use_ntvfs=False,
+ skip_sysvolacl=False):
+ # create/adapt the group policy GUIDs
+ # Default GUID for default policy are described at
+ # "How Core Group Policy Works"
+ # http://technet.microsoft.com/en-us/library/cc784268%28WS.10%29.aspx
+ if policyguid is None:
+ policyguid = DEFAULT_POLICY_GUID
+ policyguid = policyguid.upper()
+ if policyguid_dc is None:
+ policyguid_dc = DEFAULT_DC_POLICY_GUID
+ policyguid_dc = policyguid_dc.upper()
+
+ if invocationid is None:
+ invocationid = str(uuid.uuid4())
+
+ if krbtgtpass is None:
+ # Note that the machinepass value is ignored
+ # as the backend (password_hash.c) will generate its
+ # own random values for the krbtgt keys
+ krbtgtpass = samba.generate_random_machine_password(128, 255)
+ if machinepass is None:
+ machinepass = samba.generate_random_machine_password(120, 120)
+ if dnspass is None:
+ dnspass = samba.generate_random_password(120, 120)
+
+ samdb.transaction_start()
+ try:
+ samdb = fill_samdb(samdb, lp, names, logger=logger,
+ schema=schema,
+ policyguid=policyguid, policyguid_dc=policyguid_dc,
+ fill=samdb_fill, adminpass=adminpass, krbtgtpass=krbtgtpass,
+ invocationid=invocationid, machinepass=machinepass,
+ dns_backend=dns_backend, dnspass=dnspass,
+ ntdsguid=ntdsguid,
+ dom_for_fun_level=dom_for_fun_level,
+ next_rid=next_rid, dc_rid=dc_rid)
+
+ # Set up group policies (domain policy and domain controller
+ # policy)
+ if serverrole == "active directory domain controller":
+ create_default_gpo(paths.sysvol, names.dnsdomain, policyguid,
+ policyguid_dc)
+ except:
+ samdb.transaction_cancel()
+ raise
+ else:
+ samdb.transaction_commit()
+
+ if serverrole == "active directory domain controller":
+ # Continue setting up sysvol for GPO. This appears to require being
+ # outside a transaction.
+ if not skip_sysvolacl:
+ setsysvolacl(samdb, paths.sysvol, paths.root_uid,
+ paths.root_gid, names.domainsid, names.dnsdomain,
+ names.domaindn, lp, use_ntvfs)
+ else:
+ logger.info("Setting acl on sysvol skipped")
+
+ secretsdb_self_join(secrets_ldb, domain=names.domain,
+ realm=names.realm, dnsdomain=names.dnsdomain,
+ netbiosname=names.netbiosname, domainsid=names.domainsid,
+ machinepass=machinepass, secure_channel_type=SEC_CHAN_BDC)
+
+ # Now set up the right msDS-SupportedEncryptionTypes into the DB
+ # In future, this might be determined from some configuration
+ kerberos_enctypes = str(ENC_ALL_TYPES)
+
+ try:
+ msg = ldb.Message(ldb.Dn(samdb,
+ samdb.searchone("distinguishedName",
+ expression="samAccountName=%s$" % names.netbiosname,
+ scope=ldb.SCOPE_SUBTREE).decode('utf8')))
+ msg["msDS-SupportedEncryptionTypes"] = ldb.MessageElement(
+ elements=kerberos_enctypes, flags=ldb.FLAG_MOD_REPLACE,
+ name="msDS-SupportedEncryptionTypes")
+ samdb.modify(msg)
+ except ldb.LdbError as e:
+ (enum, estr) = e.args
+ if enum != ldb.ERR_NO_SUCH_ATTRIBUTE:
+ # It might be that this attribute does not exist in this schema
+ raise
+
+ setup_ad_dns(samdb, secrets_ldb, names, paths, logger,
+ hostip=hostip, hostip6=hostip6, dns_backend=dns_backend,
+ dnspass=dnspass, os_level=dom_for_fun_level,
+ fill_level=samdb_fill)
+
+ domainguid = samdb.searchone(basedn=samdb.get_default_basedn(),
+ attribute="objectGUID").decode('utf8')
+ assert isinstance(domainguid, str)
+
+ lastProvisionUSNs = get_last_provision_usn(samdb)
+ maxUSN = get_max_usn(samdb, str(names.rootdn))
+ if lastProvisionUSNs is not None:
+ update_provision_usn(samdb, 0, maxUSN, invocationid, 1)
+ else:
+ set_provision_usn(samdb, 0, maxUSN, invocationid)
+
+ logger.info("Setting up sam.ldb rootDSE marking as synchronized")
+ setup_modify_ldif(samdb, setup_path("provision_rootdse_modify.ldif"),
+ {'NTDSGUID': names.ntdsguid})
+
+ # fix any dangling GUIDs from the provision
+ logger.info("Fixing provision GUIDs")
+ chk = dbcheck(samdb, samdb_schema=samdb, verbose=False, fix=True, yes=True,
+ quiet=True)
+ samdb.transaction_start()
+ try:
+ # a small number of GUIDs are missing because of ordering issues in the
+ # provision code
+ for schema_obj in ['CN=Domain', 'CN=Organizational-Person', 'CN=Contact', 'CN=inetOrgPerson']:
+ chk.check_database(DN="%s,%s" % (schema_obj, names.schemadn),
+ scope=ldb.SCOPE_BASE,
+ attrs=['defaultObjectCategory'])
+ chk.check_database(DN="CN=IP Security,CN=System,%s" % names.domaindn,
+ scope=ldb.SCOPE_ONELEVEL,
+ attrs=['ipsecOwnersReference',
+ 'ipsecFilterReference',
+ 'ipsecISAKMPReference',
+ 'ipsecNegotiationPolicyReference',
+ 'ipsecNFAReference'])
+ if chk.check_database(DN=names.schemadn, scope=ldb.SCOPE_SUBTREE,
+ attrs=['attributeId', 'governsId']) != 0:
+ raise ProvisioningError("Duplicate attributeId or governsId in schema. Must be fixed manually!!")
+ except:
+ samdb.transaction_cancel()
+ raise
+ else:
+ samdb.transaction_commit()
+
+
+_ROLES_MAP = {
+ "ROLE_STANDALONE": "standalone server",
+ "ROLE_DOMAIN_MEMBER": "member server",
+ "ROLE_DOMAIN_BDC": "active directory domain controller",
+ "ROLE_DOMAIN_PDC": "active directory domain controller",
+ "dc": "active directory domain controller",
+ "member": "member server",
+ "domain controller": "active directory domain controller",
+ "active directory domain controller": "active directory domain controller",
+ "member server": "member server",
+ "standalone": "standalone server",
+ "standalone server": "standalone server",
+}
+
+
+def sanitize_server_role(role):
+ """Sanitize a server role name.
+
+ :param role: Server role
+ :raise ValueError: If the role can not be interpreted
+ :return: Sanitized server role (one of "member server",
+ "active directory domain controller", "standalone server")
+ """
+ try:
+ return _ROLES_MAP[role]
+ except KeyError:
+ raise ValueError(role)
+
+
+def provision_fake_ypserver(logger, samdb, domaindn, netbiosname, nisdomain):
+ """Create AD entries for the fake ypserver.
+
+ This is needed for being able to manipulate posix attrs via ADUC.
+ """
+ samdb.transaction_start()
+ try:
+ logger.info("Setting up fake yp server settings")
+ setup_add_ldif(samdb, setup_path("ypServ30.ldif"), {
+ "DOMAINDN": domaindn,
+ "NETBIOSNAME": netbiosname,
+ "NISDOMAIN": nisdomain,
+ })
+ except:
+ samdb.transaction_cancel()
+ raise
+ else:
+ samdb.transaction_commit()
+
+
+def directory_create_or_exists(path, mode=0o755):
+ if not os.path.exists(path):
+ try:
+ os.mkdir(path, mode)
+ except OSError as e:
+ if e.errno in [errno.EEXIST]:
+ pass
+ else:
+ raise ProvisioningError("Failed to create directory %s: %s" % (path, e.strerror))
+
+
+def determine_host_ip(logger, lp, hostip=None):
+ if hostip is None:
+ logger.info("Looking up IPv4 addresses")
+ hostips = interface_ips_v4(lp)
+ if len(hostips) > 0:
+ hostip = hostips[0]
+ if len(hostips) > 1:
+ logger.warning("More than one IPv4 address found. Using %s",
+ hostip)
+ if hostip == "127.0.0.1":
+ hostip = None
+ if hostip is None:
+ logger.warning("No IPv4 address will be assigned")
+
+ return hostip
+
+
+def determine_host_ip6(logger, lp, hostip6=None):
+ if hostip6 is None:
+ logger.info("Looking up IPv6 addresses")
+ hostips = interface_ips_v6(lp)
+ if hostips:
+ hostip6 = hostips[0]
+ if len(hostips) > 1:
+ logger.warning("More than one IPv6 address found. Using %s", hostip6)
+ if hostip6 is None:
+ logger.warning("No IPv6 address will be assigned")
+
+ return hostip6
+
+
+def provision(logger, session_info, smbconf=None,
+ targetdir=None, samdb_fill=FILL_FULL, realm=None, rootdn=None,
+ domaindn=None, schemadn=None, configdn=None, serverdn=None,
+ domain=None, hostname=None, hostip=None, hostip6=None, domainsid=None,
+ next_rid=1000, dc_rid=None, adminpass=None, ldapadminpass=None,
+ krbtgtpass=None, domainguid=None, policyguid=None, policyguid_dc=None,
+ dns_backend=None, dns_forwarder=None, dnspass=None,
+ invocationid=None, machinepass=None, ntdsguid=None,
+ root=None, nobody=None, users=None,
+ sitename=None, serverrole=None, dom_for_fun_level=None,
+ useeadb=False, am_rodc=False, lp=None, use_ntvfs=False,
+ use_rfc2307=False, skip_sysvolacl=True,
+ base_schema="2019", adprep_level=DS_DOMAIN_FUNCTION_2016,
+ plaintext_secrets=False, backend_store=None,
+ backend_store_size=None, batch_mode=False):
+ """Provision samba4
+
+ :note: caution, this wipes all existing data!
+ """
+
+ try:
+ serverrole = sanitize_server_role(serverrole)
+ except ValueError:
+ raise ProvisioningError('server role (%s) should be one of "active directory domain controller", "member server", "standalone server"' % serverrole)
+
+ if dom_for_fun_level is None:
+ dom_for_fun_level = DS_DOMAIN_FUNCTION_2008_R2
+
+ if base_schema in ["2008_R2", "2008_R2_old"]:
+ max_adprep_level = DS_DOMAIN_FUNCTION_2008_R2
+ elif base_schema in ["2012"]:
+ max_adprep_level = DS_DOMAIN_FUNCTION_2012
+ elif base_schema in ["2012_R2"]:
+ max_adprep_level = DS_DOMAIN_FUNCTION_2012_R2
+ else:
+ max_adprep_level = DS_DOMAIN_FUNCTION_2016
+
+ if max_adprep_level < dom_for_fun_level:
+ raise ProvisioningError('dom_for_fun_level[%u] incompatible with base_schema[%s]' %
+ (dom_for_fun_level, base_schema))
+
+ if adprep_level is not None and max_adprep_level < adprep_level:
+ raise ProvisioningError('base_schema[%s] incompatible with adprep_level[%u]' %
+ (base_schema, adprep_level))
+
+ if adprep_level is not None and adprep_level < dom_for_fun_level:
+ raise ProvisioningError('dom_for_fun_level[%u] incompatible with adprep_level[%u]' %
+ (dom_for_fun_level, adprep_level))
+
+ if ldapadminpass is None:
+ # Make a new, random password between Samba and it's LDAP server
+ ldapadminpass = samba.generate_random_password(128, 255)
+
+ if backend_store is None:
+ backend_store = get_default_backend_store()
+
+ if domainsid is None:
+ domainsid = security.random_sid()
+
+ root_uid = get_root_uid([root or "root"], logger)
+ nobody_uid = findnss_uid([nobody or "nobody"])
+ users_gid = findnss_gid([users or "users", 'users', 'other', 'staff'])
+ root_gid = pwd.getpwuid(root_uid).pw_gid
+
+ try:
+ bind_gid = findnss_gid(["bind", "named"])
+ except KeyError:
+ bind_gid = None
+
+ if targetdir is not None:
+ smbconf = os.path.join(targetdir, "etc", "smb.conf")
+ elif smbconf is None:
+ smbconf = samba.param.default_path()
+ if not os.path.exists(os.path.dirname(smbconf)):
+ os.makedirs(os.path.dirname(smbconf))
+
+ server_services = []
+ global_param = {}
+ if use_rfc2307:
+ global_param["idmap_ldb:use rfc2307"] = ["yes"]
+
+ if dns_backend != "SAMBA_INTERNAL":
+ server_services.append("-dns")
+ else:
+ if dns_forwarder is not None:
+ global_param["dns forwarder"] = [dns_forwarder]
+
+ if use_ntvfs:
+ server_services.append("+smb")
+ server_services.append("-s3fs")
+ global_param["dcerpc endpoint servers"] = ["+winreg", "+srvsvc"]
+
+ if len(server_services) > 0:
+ global_param["server services"] = server_services
+
+ # only install a new smb.conf if there isn't one there already
+ if os.path.exists(smbconf):
+ # if Samba Team members can't figure out the weird errors
+ # loading an empty smb.conf gives, then we need to be smarter.
+ # Pretend it just didn't exist --abartlet
+ f = open(smbconf, 'r')
+ try:
+ data = f.read().lstrip()
+ finally:
+ f.close()
+ if data is None or data == "":
+ make_smbconf(smbconf, hostname, domain, realm,
+ targetdir, serverrole=serverrole,
+ eadb=useeadb, use_ntvfs=use_ntvfs,
+ lp=lp, global_param=global_param)
+ else:
+ make_smbconf(smbconf, hostname, domain, realm, targetdir,
+ serverrole=serverrole,
+ eadb=useeadb, use_ntvfs=use_ntvfs, lp=lp, global_param=global_param)
+
+ if lp is None:
+ lp = samba.param.LoadParm()
+ lp.load(smbconf)
+ names = guess_names(lp=lp, hostname=hostname, domain=domain,
+ dnsdomain=realm, serverrole=serverrole, domaindn=domaindn,
+ configdn=configdn, schemadn=schemadn, serverdn=serverdn,
+ sitename=sitename, rootdn=rootdn, domain_names_forced=(samdb_fill == FILL_DRS))
+ paths = provision_paths_from_lp(lp, names.dnsdomain)
+
+ paths.bind_gid = bind_gid
+ paths.root_uid = root_uid
+ paths.root_gid = root_gid
+
+ hostip = determine_host_ip(logger, lp, hostip)
+ hostip6 = determine_host_ip6(logger, lp, hostip6)
+ names.hostip = hostip
+ names.hostip6 = hostip6
+ names.domainguid = domainguid
+ names.domainsid = domainsid
+ names.forestsid = domainsid
+
+ if serverrole is None:
+ serverrole = lp.get("server role")
+
+ directory_create_or_exists(paths.private_dir, 0o700)
+ directory_create_or_exists(paths.binddns_dir, 0o770)
+ directory_create_or_exists(os.path.join(paths.private_dir, "tls"))
+ directory_create_or_exists(paths.state_dir)
+ if not plaintext_secrets:
+ setup_encrypted_secrets_key(paths.encrypted_secrets_key_path)
+
+ if paths.sysvol and not os.path.exists(paths.sysvol):
+ os.makedirs(paths.sysvol, 0o775)
+
+ schema = Schema(domainsid, invocationid=invocationid,
+ schemadn=names.schemadn, base_schema=base_schema)
+
+ provision_backend = LDBBackend(paths=paths,
+ lp=lp,
+ names=names, logger=logger)
+
+ provision_backend.init()
+ provision_backend.start()
+
+ # only install a new shares config db if there is none
+ if not os.path.exists(paths.shareconf):
+ logger.info("Setting up share.ldb")
+ share_ldb = Ldb(paths.shareconf, session_info=session_info, lp=lp)
+ share_ldb.load_ldif_file_add(setup_path("share.ldif"))
+
+ logger.info("Setting up secrets.ldb")
+ secrets_ldb = setup_secretsdb(paths,
+ session_info=session_info, lp=lp)
+
+ try:
+ logger.info("Setting up the registry")
+ setup_registry(paths.hklm, session_info, lp=lp)
+
+ logger.info("Setting up the privileges database")
+ setup_privileges(paths.privilege, session_info, lp=lp)
+
+ logger.info("Setting up idmap db")
+ idmap = setup_idmapdb(paths.idmapdb, session_info=session_info, lp=lp)
+
+ setup_name_mappings(idmap, sid=str(domainsid),
+ root_uid=root_uid, nobody_uid=nobody_uid,
+ users_gid=users_gid)
+
+ logger.info("Setting up SAM db")
+ samdb = setup_samdb(paths.samdb, session_info,
+ provision_backend, lp, names, logger=logger,
+ serverrole=serverrole,
+ schema=schema, am_rodc=am_rodc,
+ plaintext_secrets=plaintext_secrets,
+ backend_store=backend_store,
+ backend_store_size=backend_store_size,
+ batch_mode=batch_mode)
+
+ if serverrole == "active directory domain controller":
+ if paths.netlogon is None:
+ raise MissingShareError("netlogon", paths.smbconf)
+
+ if paths.sysvol is None:
+ raise MissingShareError("sysvol", paths.smbconf)
+
+ if not os.path.isdir(paths.netlogon):
+ os.makedirs(paths.netlogon, 0o755)
+
+ if adminpass is None:
+ adminpass = samba.generate_random_password(12, 32)
+ adminpass_generated = True
+ else:
+ if isinstance(adminpass, bytes):
+ adminpass = adminpass.decode('utf-8')
+ adminpass_generated = False
+
+ if samdb_fill == FILL_FULL:
+ provision_fill(samdb, secrets_ldb, logger, names, paths,
+ schema=schema, samdb_fill=samdb_fill,
+ hostip=hostip, hostip6=hostip6,
+ next_rid=next_rid, dc_rid=dc_rid, adminpass=adminpass,
+ krbtgtpass=krbtgtpass,
+ policyguid=policyguid, policyguid_dc=policyguid_dc,
+ invocationid=invocationid, machinepass=machinepass,
+ ntdsguid=ntdsguid, dns_backend=dns_backend,
+ dnspass=dnspass, serverrole=serverrole,
+ dom_for_fun_level=dom_for_fun_level,
+ lp=lp, use_ntvfs=use_ntvfs,
+ skip_sysvolacl=skip_sysvolacl)
+
+ if adprep_level is not None:
+ updates_allowed_overridden = False
+ if lp.get("dsdb:schema update allowed") is None:
+ lp.set("dsdb:schema update allowed", "yes")
+ print("Temporarily overriding 'dsdb:schema update allowed' setting")
+ updates_allowed_overridden = True
+
+ samdb.transaction_start()
+ try:
+ from samba.forest_update import ForestUpdate
+ forest = ForestUpdate(samdb, fix=True)
+
+ forest.check_updates_iterator([11, 54, 79, 80, 81, 82, 83])
+ forest.check_updates_functional_level(adprep_level,
+ DS_DOMAIN_FUNCTION_2008_R2,
+ update_revision=True)
+
+ samdb.transaction_commit()
+ except Exception as e:
+ samdb.transaction_cancel()
+ raise e
+
+ samdb.transaction_start()
+ try:
+ from samba.domain_update import DomainUpdate
+
+ DomainUpdate(samdb, fix=True).check_updates_functional_level(
+ adprep_level,
+ DS_DOMAIN_FUNCTION_2008,
+ update_revision=True,
+ )
+
+ samdb.transaction_commit()
+ except Exception as e:
+ samdb.transaction_cancel()
+ raise e
+
+ if updates_allowed_overridden:
+ lp.set("dsdb:schema update allowed", "no")
+
+ if not is_heimdal_built():
+ create_kdc_conf(paths.kdcconf, realm, domain, os.path.dirname(lp.get("log file")))
+ logger.info("The Kerberos KDC configuration for Samba AD is "
+ "located at %s", paths.kdcconf)
+
+ create_krb5_conf(paths.krb5conf,
+ dnsdomain=names.dnsdomain, hostname=names.hostname,
+ realm=names.realm)
+ logger.info("A Kerberos configuration suitable for Samba AD has been "
+ "generated at %s", paths.krb5conf)
+ logger.info("Merge the contents of this file with your system "
+ "krb5.conf or replace it with this one. Do not create a "
+ "symlink!")
+
+ if serverrole == "active directory domain controller":
+ create_dns_update_list(paths)
+
+ backend_result = provision_backend.post_setup()
+ provision_backend.shutdown()
+
+ except:
+ secrets_ldb.transaction_cancel()
+ raise
+
+ # Now commit the secrets.ldb to disk
+ secrets_ldb.transaction_commit()
+
+ # the commit creates the dns.keytab in the private directory
+ create_dns_dir_keytab_link(logger, paths)
+
+ result = ProvisionResult()
+ result.server_role = serverrole
+ result.domaindn = domaindn
+ result.paths = paths
+ result.names = names
+ result.lp = lp
+ result.samdb = samdb
+ result.idmap = idmap
+ result.domainsid = str(domainsid)
+
+ if samdb_fill == FILL_FULL:
+ result.adminpass_generated = adminpass_generated
+ result.adminpass = adminpass
+ else:
+ result.adminpass_generated = False
+ result.adminpass = None
+
+ result.backend_result = backend_result
+
+ if use_rfc2307:
+ provision_fake_ypserver(logger=logger, samdb=samdb,
+ domaindn=names.domaindn, netbiosname=names.netbiosname,
+ nisdomain=names.domain.lower())
+
+ return result
+
+
+def provision_become_dc(smbconf=None, targetdir=None, realm=None,
+ rootdn=None, domaindn=None, schemadn=None,
+ configdn=None, serverdn=None, domain=None,
+ hostname=None, domainsid=None,
+ machinepass=None, dnspass=None,
+ dns_backend=None, sitename=None, debuglevel=1,
+ use_ntvfs=False):
+
+ logger = logging.getLogger("provision")
+ samba.set_debug_level(debuglevel)
+
+ res = provision(logger, system_session(),
+ smbconf=smbconf, targetdir=targetdir, samdb_fill=FILL_DRS,
+ realm=realm, rootdn=rootdn, domaindn=domaindn, schemadn=schemadn,
+ configdn=configdn, serverdn=serverdn, domain=domain,
+ hostname=hostname, hostip=None, domainsid=domainsid,
+ machinepass=machinepass,
+ serverrole="active directory domain controller",
+ sitename=sitename, dns_backend=dns_backend, dnspass=dnspass,
+ use_ntvfs=use_ntvfs)
+ res.lp.set("debuglevel", str(debuglevel))
+ return res
+
+
+def create_krb5_conf(path, dnsdomain, hostname, realm):
+ """Write out a file containing a valid krb5.conf file
+
+ :param path: Path of the new krb5.conf file.
+ :param dnsdomain: DNS Domain name
+ :param hostname: Local hostname
+ :param realm: Realm name
+ """
+ setup_file(setup_path("krb5.conf"), path, {
+ "DNSDOMAIN": dnsdomain,
+ "HOSTNAME": hostname,
+ "REALM": realm,
+ })
+
+
+class ProvisioningError(Exception):
+ """A generic provision error."""
+
+ def __init__(self, value):
+ self.value = value
+
+ def __str__(self):
+ return "ProvisioningError: " + self.value
+
+
+class InvalidNetbiosName(Exception):
+ """A specified name was not a valid NetBIOS name."""
+
+ def __init__(self, name):
+ super().__init__(
+ "The name '%r' is not a valid NetBIOS name" % name)
+
+
+class MissingShareError(ProvisioningError):
+
+ def __init__(self, name, smbconf):
+ super().__init__(
+ "Existing smb.conf does not have a [%s] share, but you are "
+ "configuring a DC. Please remove %s or add the share manually." %
+ (name, smbconf))
diff --git a/python/samba/provision/backend.py b/python/samba/provision/backend.py
new file mode 100644
index 0000000..4ffe308
--- /dev/null
+++ b/python/samba/provision/backend.py
@@ -0,0 +1,87 @@
+#
+# Unix SMB/CIFS implementation.
+# backend code for provisioning a Samba4 server
+
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007-2008
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2008-2009
+# Copyright (C) Oliver Liebel <oliver@itc.li> 2008-2009
+#
+# Based on the original in EJS:
+# Copyright (C) Andrew Tridgell <tridge@samba.org> 2005
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Functions for setting up a Samba configuration (LDB and LDAP backends)."""
+
+import shutil
+
+class BackendResult(object):
+
+ def report_logger(self, logger):
+ """Rerport this result to a particular logger.
+
+ """
+ raise NotImplementedError(self.report_logger)
+
+
+class ProvisionBackend(object):
+
+ def __init__(self, paths=None, lp=None,
+ names=None, logger=None):
+ """Provision a backend for samba4"""
+ self.paths = paths
+ self.lp = lp
+ self.names = names
+ self.logger = logger
+
+ self.type = "ldb"
+
+ def init(self):
+ """Initialize the backend."""
+ raise NotImplementedError(self.init)
+
+ def start(self):
+ """Start the backend."""
+ raise NotImplementedError(self.start)
+
+ def shutdown(self):
+ """Shutdown the backend."""
+ raise NotImplementedError(self.shutdown)
+
+ def post_setup(self):
+ """Post setup.
+
+ :return: A BackendResult or None
+ """
+ raise NotImplementedError(self.post_setup)
+
+
+class LDBBackend(ProvisionBackend):
+
+ def init(self):
+
+ # Wipe the old sam.ldb databases away
+ shutil.rmtree(self.paths.samdb + ".d", True)
+
+ def start(self):
+ pass
+
+ def shutdown(self):
+ pass
+
+ def post_setup(self):
+ pass
+
+
diff --git a/python/samba/provision/common.py b/python/samba/provision/common.py
new file mode 100644
index 0000000..a6851b7
--- /dev/null
+++ b/python/samba/provision/common.py
@@ -0,0 +1,91 @@
+
+# Unix SMB/CIFS implementation.
+# utility functions for provisioning a Samba4 server
+
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007-2010
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2008-2009
+# Copyright (C) Oliver Liebel <oliver@itc.li> 2008-2009
+#
+# Based on the original in EJS:
+# Copyright (C) Andrew Tridgell <tridge@samba.org> 2005
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Functions for setting up a Samba configuration."""
+
+__docformat__ = "restructuredText"
+
+import os
+from samba import read_and_sub_file
+from samba.param import setup_dir
+
+FILL_FULL = "FULL"
+FILL_SUBDOMAIN = "SUBDOMAIN"
+FILL_NT4SYNC = "NT4SYNC"
+FILL_DRS = "DRS"
+
+
+def setup_path(file):
+ """Return an absolute path to the provision template file specified by file"""
+ return os.path.join(setup_dir(), file)
+
+
+def setup_add_ldif(ldb, ldif_path, subst_vars=None, controls=None):
+ """Setup a ldb in the private dir.
+
+ :param ldb: LDB file to import data into
+ :param ldif_path: Path of the LDIF file to load
+ :param subst_vars: Optional variables to substitute in LDIF.
+ :param nocontrols: Optional list of controls, can be None for no controls
+ """
+ if controls is None:
+ controls = ["relax:0"]
+ assert isinstance(ldif_path, str)
+ data = read_and_sub_file(ldif_path, subst_vars)
+ ldb.add_ldif(data, controls)
+
+
+def setup_modify_ldif(ldb, ldif_path, subst_vars=None, controls=None):
+ """Modify a ldb in the private dir.
+
+ :param ldb: LDB object.
+ :param ldif_path: LDIF file path.
+ :param subst_vars: Optional dictionary with substitution variables.
+ """
+ if controls is None:
+ controls = ["relax:0"]
+ data = read_and_sub_file(ldif_path, subst_vars)
+ ldb.modify_ldif(data, controls)
+
+
+def setup_ldb(ldb, ldif_path, subst_vars):
+ """Import a LDIF a file into a LDB handle, optionally substituting
+ variables.
+
+ :note: Either all LDIF data will be added or none (using transactions).
+
+ :param ldb: LDB file to import into.
+ :param ldif_path: Path to the LDIF file.
+ :param subst_vars: Dictionary with substitution variables.
+ """
+ assert ldb is not None
+ ldb.transaction_start()
+ try:
+ setup_add_ldif(ldb, ldif_path, subst_vars)
+ except:
+ ldb.transaction_cancel()
+ raise
+ else:
+ ldb.transaction_commit()
diff --git a/python/samba/provision/kerberos.py b/python/samba/provision/kerberos.py
new file mode 100644
index 0000000..665c031
--- /dev/null
+++ b/python/samba/provision/kerberos.py
@@ -0,0 +1,104 @@
+# Unix SMB/CIFS implementation
+#
+# Backend code for provisioning a Samba AD server
+#
+# Copyright (c) 2015 Andreas Schneider <asn@samba.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from samba.provision.kerberos_implementation import (
+ kdb_modules_dir)
+from samba import is_heimdal_built
+import os
+
+
+def create_kdc_conf(kdcconf, realm, domain, logdir):
+
+ if is_heimdal_built():
+ return
+
+ # Do nothing if kdc.conf has been set
+ if 'KRB5_KDC_PROFILE' in os.environ:
+ return
+
+ # We are in selftest
+ if 'SAMBA_SELFTEST' in os.environ and 'MITKRB5' in os.environ:
+ return
+
+ assert kdcconf is not None
+
+ assert domain is not None
+ domain = domain.upper()
+
+ assert realm is not None
+ realm = realm.upper()
+
+ f = open(kdcconf, 'w')
+ try:
+ f.write("[kdcdefaults]\n")
+
+ f.write("\tkdc_ports = 88\n")
+ f.write("\tkdc_tcp_ports = 88\n")
+ f.write("\tkadmind_port = 464\n")
+ f.write("\trestrict_anonymous_to_tgt = true\n")
+ f.write("\n")
+
+ f.write("[realms]\n")
+
+ f.write("\t%s = {\n" % realm)
+ f.write("\t\tmaster_key_type = aes256-cts\n")
+ f.write("\t\tdefault_principal_flags = +preauth\n")
+ f.write("\t}\n")
+ f.write("\n")
+
+ f.write("\t%s = {\n" % realm.lower())
+ f.write("\t\tmaster_key_type = aes256-cts\n")
+ f.write("\t\tdefault_principal_flags = +preauth\n")
+ f.write("\t}\n")
+ f.write("\n")
+
+ f.write("\t%s = {\n" % domain)
+ f.write("\t\tmaster_key_type = aes256-cts\n")
+ f.write("\t\tdefault_principal_flags = +preauth\n")
+ f.write("\t}\n")
+ f.write("\n")
+
+ f.write("[dbmodules]\n")
+
+ f.write("\tdb_module_dir = %s\n" % kdb_modules_dir)
+ f.write("\n")
+
+ f.write("\t%s = {\n" % realm)
+ f.write("\t\tdb_library = samba\n")
+ f.write("\t}\n")
+ f.write("\n")
+
+ f.write("\t%s = {\n" % realm.lower())
+ f.write("\t\tdb_library = samba\n")
+ f.write("\t}\n")
+ f.write("\n")
+
+ f.write("\t%s = {\n" % domain)
+ f.write("\t\tdb_library = samba\n")
+ f.write("\t}\n")
+ f.write("\n")
+
+ f.write("[logging]\n")
+
+ f.write("\tkdc = FILE:%s/mit_kdc.log\n" % logdir)
+ f.write("\tadmin_server = FILE:%s/mit_kadmin.log\n" % logdir)
+ f.write("\n")
+ finally:
+ f.close()
diff --git a/python/samba/provision/sambadns.py b/python/samba/provision/sambadns.py
new file mode 100644
index 0000000..01398bb
--- /dev/null
+++ b/python/samba/provision/sambadns.py
@@ -0,0 +1,1329 @@
+# Unix SMB/CIFS implementation.
+# backend code for provisioning DNS for a Samba4 server
+#
+# Copyright (C) Kai Blin <kai@samba.org> 2011
+# Copyright (C) Amitay Isaacs <amitay@gmail.com> 2011
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""DNS-related provisioning"""
+
+import os
+import uuid
+import shutil
+import time
+import ldb
+from base64 import b64encode
+import subprocess
+import samba
+from samba.tdb_util import tdb_copy
+from samba.mdb_util import mdb_copy
+from samba.ndr import ndr_pack, ndr_unpack
+from samba import setup_file
+from samba.dcerpc import dnsp, misc, security
+from samba.dsdb import (
+ DS_DOMAIN_FUNCTION_2000,
+ DS_DOMAIN_FUNCTION_2003,
+ DS_DOMAIN_FUNCTION_2016,
+ DS_GUID_USERS_CONTAINER
+)
+from samba.descriptor import (
+ get_deletedobjects_descriptor,
+ get_domain_descriptor,
+ get_domain_delete_protected1_descriptor,
+ get_domain_delete_protected2_descriptor,
+ get_dns_partition_descriptor,
+ get_dns_forest_microsoft_dns_descriptor,
+ get_dns_domain_microsoft_dns_descriptor
+)
+from samba.provision.common import (
+ setup_path,
+ setup_add_ldif,
+ setup_modify_ldif,
+ setup_ldb,
+ FILL_FULL,
+ FILL_SUBDOMAIN,
+)
+
+from samba.samdb import get_default_backend_store
+from samba.common import get_string
+
+def get_domainguid(samdb, domaindn):
+ res = samdb.search(base=domaindn, scope=ldb.SCOPE_BASE, attrs=["objectGUID"])
+ domainguid = str(ndr_unpack(misc.GUID, res[0]["objectGUID"][0]))
+ return domainguid
+
+
+def get_dnsadmins_sid(samdb, domaindn):
+ base_dn = "CN=DnsAdmins,%s" % samdb.get_wellknown_dn(ldb.Dn(samdb,
+ domaindn), DS_GUID_USERS_CONTAINER)
+ res = samdb.search(base=base_dn, scope=ldb.SCOPE_BASE, attrs=["objectSid"])
+ dnsadmins_sid = ndr_unpack(security.dom_sid, res[0]["objectSid"][0])
+ return dnsadmins_sid
+
+
+# Note: these classes are not quite the same as similar looking ones
+# in ../dnsserver.py -- those ones are based on
+# dnsserver.DNS_RPC_RECORD ([MS-DNSP]2.2.2.2.5 "DNS_RPC_RECORD"),
+# these are based on dnsp.DnssrvRpcRecord ([MS-DNSP] 2.3.2.2
+# "DnsRecord").
+#
+# They are not interchangeable or mergeable. If you're talking over
+# the wire you want those other ones; these are the on-disk format.
+
+class ARecord(dnsp.DnssrvRpcRecord):
+
+ def __init__(self, ip_addr, serial=1, ttl=900, rank=dnsp.DNS_RANK_ZONE):
+ super().__init__()
+ self.wType = dnsp.DNS_TYPE_A
+ self.rank = rank
+ self.dwSerial = serial
+ self.dwTtlSeconds = ttl
+ self.data = ip_addr
+
+
+class AAAARecord(dnsp.DnssrvRpcRecord):
+
+ def __init__(self, ip6_addr, serial=1, ttl=900, rank=dnsp.DNS_RANK_ZONE):
+ super().__init__()
+ self.wType = dnsp.DNS_TYPE_AAAA
+ self.rank = rank
+ self.dwSerial = serial
+ self.dwTtlSeconds = ttl
+ self.data = ip6_addr
+
+
+class CNAMERecord(dnsp.DnssrvRpcRecord):
+
+ def __init__(self, cname, serial=1, ttl=900, rank=dnsp.DNS_RANK_ZONE):
+ super().__init__()
+ self.wType = dnsp.DNS_TYPE_CNAME
+ self.rank = rank
+ self.dwSerial = serial
+ self.dwTtlSeconds = ttl
+ self.data = cname
+
+
+class NSRecord(dnsp.DnssrvRpcRecord):
+
+ def __init__(self, dns_server, serial=1, ttl=900, rank=dnsp.DNS_RANK_ZONE):
+ super().__init__()
+ self.wType = dnsp.DNS_TYPE_NS
+ self.rank = rank
+ self.dwSerial = serial
+ self.dwTtlSeconds = ttl
+ self.data = dns_server
+
+
+class SOARecord(dnsp.DnssrvRpcRecord):
+
+ def __init__(self, mname, rname, serial=1, refresh=900, retry=600,
+ expire=86400, minimum=3600, ttl=3600, rank=dnsp.DNS_RANK_ZONE):
+ super().__init__()
+ self.wType = dnsp.DNS_TYPE_SOA
+ self.rank = rank
+ self.dwSerial = serial
+ self.dwTtlSeconds = ttl
+ soa = dnsp.soa()
+ soa.serial = serial
+ soa.refresh = refresh
+ soa.retry = retry
+ soa.expire = expire
+ soa.mname = mname
+ soa.rname = rname
+ soa.minimum = minimum
+ self.data = soa
+
+
+class SRVRecord(dnsp.DnssrvRpcRecord):
+
+ def __init__(self, target, port, priority=0, weight=100, serial=1, ttl=900,
+ rank=dnsp.DNS_RANK_ZONE):
+ super().__init__()
+ self.wType = dnsp.DNS_TYPE_SRV
+ self.rank = rank
+ self.dwSerial = serial
+ self.dwTtlSeconds = ttl
+ srv = dnsp.srv()
+ srv.nameTarget = target
+ srv.wPort = port
+ srv.wPriority = priority
+ srv.wWeight = weight
+ self.data = srv
+
+
+class TXTRecord(dnsp.DnssrvRpcRecord):
+
+ def __init__(self, slist, serial=1, ttl=900, rank=dnsp.DNS_RANK_ZONE):
+ super().__init__()
+ self.wType = dnsp.DNS_TYPE_TXT
+ self.rank = rank
+ self.dwSerial = serial
+ self.dwTtlSeconds = ttl
+ stringlist = dnsp.string_list()
+ stringlist.count = len(slist)
+ stringlist.str = slist
+ self.data = stringlist
+
+
+class TypeProperty(dnsp.DnsProperty):
+
+ def __init__(self, zone_type=dnsp.DNS_ZONE_TYPE_PRIMARY):
+ super().__init__()
+ self.wDataLength = 1
+ self.version = 1
+ self.id = dnsp.DSPROPERTY_ZONE_TYPE
+ self.data = zone_type
+
+
+class AllowUpdateProperty(dnsp.DnsProperty):
+
+ def __init__(self, allow_update=dnsp.DNS_ZONE_UPDATE_SECURE):
+ super().__init__()
+ self.wDataLength = 1
+ self.version = 1
+ self.id = dnsp.DSPROPERTY_ZONE_ALLOW_UPDATE
+ self.data = allow_update
+
+
+class SecureTimeProperty(dnsp.DnsProperty):
+
+ def __init__(self, secure_time=0):
+ super().__init__()
+ self.wDataLength = 1
+ self.version = 1
+ self.id = dnsp.DSPROPERTY_ZONE_SECURE_TIME
+ self.data = secure_time
+
+
+class NorefreshIntervalProperty(dnsp.DnsProperty):
+
+ def __init__(self, norefresh_interval=0):
+ super().__init__()
+ self.wDataLength = 1
+ self.version = 1
+ self.id = dnsp.DSPROPERTY_ZONE_NOREFRESH_INTERVAL
+ self.data = norefresh_interval
+
+
+class RefreshIntervalProperty(dnsp.DnsProperty):
+
+ def __init__(self, refresh_interval=0):
+ super().__init__()
+ self.wDataLength = 1
+ self.version = 1
+ self.id = dnsp.DSPROPERTY_ZONE_REFRESH_INTERVAL
+ self.data = refresh_interval
+
+
+class AgingStateProperty(dnsp.DnsProperty):
+
+ def __init__(self, aging_enabled=0):
+ super().__init__()
+ self.wDataLength = 1
+ self.version = 1
+ self.id = dnsp.DSPROPERTY_ZONE_AGING_STATE
+ self.data = aging_enabled
+
+
+class AgingEnabledTimeProperty(dnsp.DnsProperty):
+
+ def __init__(self, next_cycle_hours=0):
+ super().__init__()
+ self.wDataLength = 1
+ self.version = 1
+ self.id = dnsp.DSPROPERTY_ZONE_AGING_ENABLED_TIME
+ self.data = next_cycle_hours
+
+
+def setup_dns_partitions(samdb, domainsid, domaindn, forestdn, configdn,
+ serverdn, fill_level):
+ domainzone_dn = "DC=DomainDnsZones,%s" % domaindn
+ forestzone_dn = "DC=ForestDnsZones,%s" % forestdn
+ descriptor = get_dns_partition_descriptor(domainsid)
+ deletedobjects_desc = get_deletedobjects_descriptor(domainsid)
+
+ setup_add_ldif(samdb, setup_path("provision_dnszones_partitions.ldif"), {
+ "ZONE_DN": domainzone_dn,
+ "SECDESC": b64encode(descriptor).decode('utf8')
+ })
+ if fill_level != FILL_SUBDOMAIN:
+ setup_add_ldif(samdb, setup_path("provision_dnszones_partitions.ldif"), {
+ "ZONE_DN": forestzone_dn,
+ "SECDESC": b64encode(descriptor).decode('utf8')
+ })
+
+ domainzone_guid = str(uuid.uuid4())
+ domainzone_dns = ldb.Dn(samdb, domainzone_dn).canonical_ex_str().strip()
+
+ protected1_desc = get_domain_delete_protected1_descriptor(domainsid)
+ protected2_desc = get_domain_delete_protected2_descriptor(domainsid)
+ setup_add_ldif(samdb, setup_path("provision_dnszones_add.ldif"), {
+ "ZONE_DN": domainzone_dn,
+ "ZONE_GUID": domainzone_guid,
+ "ZONE_DNS": domainzone_dns,
+ "CONFIGDN": configdn,
+ "SERVERDN": serverdn,
+ "DELETEDOBJECTS_DESCRIPTOR": b64encode(deletedobjects_desc).decode('utf8'),
+ "LOSTANDFOUND_DESCRIPTOR": b64encode(protected2_desc).decode('utf8'),
+ "INFRASTRUCTURE_DESCRIPTOR": b64encode(protected1_desc).decode('utf8'),
+ })
+ setup_modify_ldif(samdb, setup_path("provision_dnszones_modify.ldif"), {
+ "CONFIGDN": configdn,
+ "SERVERDN": serverdn,
+ "ZONE_DN": domainzone_dn,
+ })
+
+ if fill_level != FILL_SUBDOMAIN:
+ forestzone_guid = str(uuid.uuid4())
+ forestzone_dns = ldb.Dn(samdb, forestzone_dn).canonical_ex_str().strip()
+
+ setup_add_ldif(samdb, setup_path("provision_dnszones_add.ldif"), {
+ "ZONE_DN": forestzone_dn,
+ "ZONE_GUID": forestzone_guid,
+ "ZONE_DNS": forestzone_dns,
+ "CONFIGDN": configdn,
+ "SERVERDN": serverdn,
+ "DELETEDOBJECTS_DESCRIPTOR": b64encode(deletedobjects_desc).decode('utf8'),
+ "LOSTANDFOUND_DESCRIPTOR": b64encode(protected2_desc).decode('utf8'),
+ "INFRASTRUCTURE_DESCRIPTOR": b64encode(protected1_desc).decode('utf8'),
+ })
+ setup_modify_ldif(samdb, setup_path("provision_dnszones_modify.ldif"), {
+ "CONFIGDN": configdn,
+ "SERVERDN": serverdn,
+ "ZONE_DN": forestzone_dn,
+ })
+
+
+def add_dns_accounts(samdb, domaindn):
+ setup_add_ldif(samdb, setup_path("provision_dns_accounts_add.ldif"), {
+ "DOMAINDN": domaindn,
+ })
+
+
+def add_dns_container(samdb, domaindn, prefix, domain_sid, dnsadmins_sid, forest=False):
+ name_map = {'DnsAdmins': str(dnsadmins_sid)}
+ if forest is True:
+ sd_val = get_dns_forest_microsoft_dns_descriptor(domain_sid,
+ name_map=name_map)
+ else:
+ sd_val = get_dns_domain_microsoft_dns_descriptor(domain_sid,
+ name_map=name_map)
+ # CN=MicrosoftDNS,<PREFIX>,<DOMAINDN>
+ msg = ldb.Message(ldb.Dn(samdb, "CN=MicrosoftDNS,%s,%s" % (prefix, domaindn)))
+ msg["objectClass"] = ["top", "container"]
+ msg["nTSecurityDescriptor"] = \
+ ldb.MessageElement(sd_val, ldb.FLAG_MOD_ADD,
+ "nTSecurityDescriptor")
+ samdb.add(msg)
+
+
+def add_rootservers(samdb, domaindn, prefix):
+ # https://www.internic.net/zones/named.root
+ rootservers = {}
+ rootservers["a.root-servers.net"] = "198.41.0.4"
+ rootservers["b.root-servers.net"] = "192.228.79.201"
+ rootservers["c.root-servers.net"] = "192.33.4.12"
+ rootservers["d.root-servers.net"] = "199.7.91.13"
+ rootservers["e.root-servers.net"] = "192.203.230.10"
+ rootservers["f.root-servers.net"] = "192.5.5.241"
+ rootservers["g.root-servers.net"] = "192.112.36.4"
+ rootservers["h.root-servers.net"] = "198.97.190.53"
+ rootservers["i.root-servers.net"] = "192.36.148.17"
+ rootservers["j.root-servers.net"] = "192.58.128.30"
+ rootservers["k.root-servers.net"] = "193.0.14.129"
+ rootservers["l.root-servers.net"] = "199.7.83.42"
+ rootservers["m.root-servers.net"] = "202.12.27.33"
+
+ rootservers_v6 = {}
+ rootservers_v6["a.root-servers.net"] = "2001:503:ba3e::2:30"
+ rootservers_v6["b.root-servers.net"] = "2001:500:84::b"
+ rootservers_v6["c.root-servers.net"] = "2001:500:2::c"
+ rootservers_v6["d.root-servers.net"] = "2001:500:2d::d"
+ rootservers_v6["e.root-servers.net"] = "2001:500:a8::e"
+ rootservers_v6["f.root-servers.net"] = "2001:500:2f::f"
+ rootservers_v6["g.root-servers.net"] = "2001:500:12::d0d"
+ rootservers_v6["h.root-servers.net"] = "2001:500:1::53"
+ rootservers_v6["i.root-servers.net"] = "2001:7fe::53"
+ rootservers_v6["j.root-servers.net"] = "2001:503:c27::2:30"
+ rootservers_v6["k.root-servers.net"] = "2001:7fd::1"
+ rootservers_v6["l.root-servers.net"] = "2001:500:9f::42"
+ rootservers_v6["m.root-servers.net"] = "2001:dc3::35"
+
+ container_dn = "DC=RootDNSServers,CN=MicrosoftDNS,%s,%s" % (prefix, domaindn)
+
+ # Add DC=RootDNSServers,CN=MicrosoftDNS,<PREFIX>,<DOMAINDN>
+ msg = ldb.Message(ldb.Dn(samdb, container_dn))
+ props = []
+ props.append(ndr_pack(TypeProperty(zone_type=dnsp.DNS_ZONE_TYPE_CACHE)))
+ props.append(ndr_pack(AllowUpdateProperty(allow_update=dnsp.DNS_ZONE_UPDATE_OFF)))
+ props.append(ndr_pack(SecureTimeProperty()))
+ props.append(ndr_pack(NorefreshIntervalProperty()))
+ props.append(ndr_pack(RefreshIntervalProperty()))
+ props.append(ndr_pack(AgingStateProperty()))
+ props.append(ndr_pack(AgingEnabledTimeProperty()))
+ msg["objectClass"] = ["top", "dnsZone"]
+ msg["cn"] = ldb.MessageElement("Zone", ldb.FLAG_MOD_ADD, "cn")
+ msg["dNSProperty"] = ldb.MessageElement(props, ldb.FLAG_MOD_ADD, "dNSProperty")
+ samdb.add(msg)
+
+ # Add DC=@,DC=RootDNSServers,CN=MicrosoftDNS,<PREFIX>,<DOMAINDN>
+ record = []
+ for rserver in rootservers:
+ record.append(ndr_pack(NSRecord(rserver, serial=0, ttl=0, rank=dnsp.DNS_RANK_ROOT_HINT)))
+
+ msg = ldb.Message(ldb.Dn(samdb, "DC=@,%s" % container_dn))
+ msg["objectClass"] = ["top", "dnsNode"]
+ msg["dnsRecord"] = ldb.MessageElement(record, ldb.FLAG_MOD_ADD, "dnsRecord")
+ samdb.add(msg)
+
+ # Add DC=<rootserver>,DC=RootDNSServers,CN=MicrosoftDNS,<PREFIX>,<DOMAINDN>
+ for rserver in rootservers:
+ record = [ndr_pack(ARecord(rootservers[rserver], serial=0, ttl=0, rank=dnsp.DNS_RANK_ROOT_HINT))]
+ # Add AAAA record as well (How does W2K* add IPv6 records?)
+ # if rserver in rootservers_v6:
+ # record.append(ndr_pack(AAAARecord(rootservers_v6[rserver], serial=0, ttl=0)))
+ msg = ldb.Message(ldb.Dn(samdb, "DC=%s,%s" % (rserver, container_dn)))
+ msg["objectClass"] = ["top", "dnsNode"]
+ msg["dnsRecord"] = ldb.MessageElement(record, ldb.FLAG_MOD_ADD, "dnsRecord")
+ samdb.add(msg)
+
+
+def add_at_record(samdb, container_dn, prefix, hostname, dnsdomain, hostip, hostip6):
+
+ fqdn_hostname = "%s.%s" % (hostname, dnsdomain)
+
+ at_records = []
+
+ # SOA record
+ at_soa_record = SOARecord(fqdn_hostname, "hostmaster.%s" % dnsdomain)
+ at_records.append(ndr_pack(at_soa_record))
+
+ # NS record
+ at_ns_record = NSRecord(fqdn_hostname)
+ at_records.append(ndr_pack(at_ns_record))
+
+ if hostip is not None:
+ # A record
+ at_a_record = ARecord(hostip)
+ at_records.append(ndr_pack(at_a_record))
+
+ if hostip6 is not None:
+ # AAAA record
+ at_aaaa_record = AAAARecord(hostip6)
+ at_records.append(ndr_pack(at_aaaa_record))
+
+ msg = ldb.Message(ldb.Dn(samdb, "%s,%s" % (prefix, container_dn)))
+ msg["objectClass"] = ["top", "dnsNode"]
+ msg["dnsRecord"] = ldb.MessageElement(at_records, ldb.FLAG_MOD_ADD, "dnsRecord")
+ samdb.add(msg)
+
+
+def add_srv_record(samdb, container_dn, prefix, host, port):
+ srv_record = SRVRecord(host, port)
+ msg = ldb.Message(ldb.Dn(samdb, "%s,%s" % (prefix, container_dn)))
+ msg["objectClass"] = ["top", "dnsNode"]
+ msg["dnsRecord"] = ldb.MessageElement(ndr_pack(srv_record), ldb.FLAG_MOD_ADD, "dnsRecord")
+ samdb.add(msg)
+
+
+def add_ns_record(samdb, container_dn, prefix, host):
+ ns_record = NSRecord(host)
+ msg = ldb.Message(ldb.Dn(samdb, "%s,%s" % (prefix, container_dn)))
+ msg["objectClass"] = ["top", "dnsNode"]
+ msg["dnsRecord"] = ldb.MessageElement(ndr_pack(ns_record), ldb.FLAG_MOD_ADD, "dnsRecord")
+ samdb.add(msg)
+
+
+def add_ns_glue_record(samdb, container_dn, prefix, host):
+ ns_record = NSRecord(host, rank=dnsp.DNS_RANK_NS_GLUE)
+ msg = ldb.Message(ldb.Dn(samdb, "%s,%s" % (prefix, container_dn)))
+ msg["objectClass"] = ["top", "dnsNode"]
+ msg["dnsRecord"] = ldb.MessageElement(ndr_pack(ns_record), ldb.FLAG_MOD_ADD, "dnsRecord")
+ samdb.add(msg)
+
+
+def add_cname_record(samdb, container_dn, prefix, host):
+ cname_record = CNAMERecord(host)
+ msg = ldb.Message(ldb.Dn(samdb, "%s,%s" % (prefix, container_dn)))
+ msg["objectClass"] = ["top", "dnsNode"]
+ msg["dnsRecord"] = ldb.MessageElement(ndr_pack(cname_record), ldb.FLAG_MOD_ADD, "dnsRecord")
+ samdb.add(msg)
+
+
+def add_host_record(samdb, container_dn, prefix, hostip, hostip6):
+ host_records = []
+ if hostip:
+ a_record = ARecord(hostip)
+ host_records.append(ndr_pack(a_record))
+ if hostip6:
+ aaaa_record = AAAARecord(hostip6)
+ host_records.append(ndr_pack(aaaa_record))
+ if host_records:
+ msg = ldb.Message(ldb.Dn(samdb, "%s,%s" % (prefix, container_dn)))
+ msg["objectClass"] = ["top", "dnsNode"]
+ msg["dnsRecord"] = ldb.MessageElement(host_records, ldb.FLAG_MOD_ADD, "dnsRecord")
+ samdb.add(msg)
+
+
+def add_domain_record(samdb, domaindn, prefix, dnsdomain, domainsid, dnsadmins_sid):
+ # DC=<DNSDOMAIN>,CN=MicrosoftDNS,<PREFIX>,<DOMAINDN>
+ sddl = "O:SYG:BAD:AI" \
+ "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;DA)" \
+ "(A;;CC;;;AU)" \
+ "(A;;RPLCLORC;;;WD)" \
+ "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)" \
+ "(A;CI;RPWPCRCCDCLCRCWOWDSDDTSW;;;ED)" \
+ "(A;CIID;RPWPCRCCDCLCRCWOWDSDDTSW;;;%s)" \
+ "(A;CIID;RPWPCRCCDCLCRCWOWDSDDTSW;;;ED)" \
+ "(OA;CIID;RPWPCR;91e647de-d96f-4b70-9557-d63ff4f3ccd8;;PS)" \
+ "(A;CIID;RPWPCRCCDCLCLORCWOWDSDDTSW;;;EA)" \
+ "(A;CIID;LC;;;RU)" \
+ "(A;CIID;RPWPCRCCLCLORCWOWDSDSW;;;BA)" \
+ "S:AI" % dnsadmins_sid
+ sec = security.descriptor.from_sddl(sddl, domainsid)
+ props = []
+ props.append(ndr_pack(TypeProperty()))
+ props.append(ndr_pack(AllowUpdateProperty()))
+ props.append(ndr_pack(SecureTimeProperty()))
+ props.append(ndr_pack(NorefreshIntervalProperty(norefresh_interval=168)))
+ props.append(ndr_pack(RefreshIntervalProperty(refresh_interval=168)))
+ props.append(ndr_pack(AgingStateProperty()))
+ props.append(ndr_pack(AgingEnabledTimeProperty()))
+ msg = ldb.Message(ldb.Dn(samdb, "DC=%s,CN=MicrosoftDNS,%s,%s" % (dnsdomain, prefix, domaindn)))
+ msg["objectClass"] = ["top", "dnsZone"]
+ msg["ntSecurityDescriptor"] = \
+ ldb.MessageElement(ndr_pack(sec),
+ ldb.FLAG_MOD_ADD,
+ "nTSecurityDescriptor")
+ msg["dNSProperty"] = ldb.MessageElement(props, ldb.FLAG_MOD_ADD, "dNSProperty")
+ samdb.add(msg)
+
+
+def add_msdcs_record(samdb, forestdn, prefix, dnsforest):
+ # DC=_msdcs.<DNSFOREST>,CN=MicrosoftDNS,<PREFIX>,<FORESTDN>
+ msg = ldb.Message(ldb.Dn(samdb, "DC=_msdcs.%s,CN=MicrosoftDNS,%s,%s" %
+ (dnsforest, prefix, forestdn)))
+ msg["objectClass"] = ["top", "dnsZone"]
+ samdb.add(msg)
+
+
+def add_dc_domain_records(samdb, domaindn, prefix, site, dnsdomain, hostname,
+ hostip, hostip6):
+
+ fqdn_hostname = "%s.%s" % (hostname, dnsdomain)
+
+ # Set up domain container - DC=<DNSDOMAIN>,CN=MicrosoftDNS,<PREFIX>,<DOMAINDN>
+ domain_container_dn = ldb.Dn(samdb, "DC=%s,CN=MicrosoftDNS,%s,%s" %
+ (dnsdomain, prefix, domaindn))
+
+ # DC=@ record
+ add_at_record(samdb, domain_container_dn, "DC=@", hostname, dnsdomain,
+ hostip, hostip6)
+
+ # DC=<HOSTNAME> record
+ add_host_record(samdb, domain_container_dn, "DC=%s" % hostname, hostip,
+ hostip6)
+
+ # DC=_kerberos._tcp record
+ add_srv_record(samdb, domain_container_dn, "DC=_kerberos._tcp",
+ fqdn_hostname, 88)
+
+ # DC=_kerberos._tcp.<SITENAME>._sites record
+ add_srv_record(samdb, domain_container_dn, "DC=_kerberos._tcp.%s._sites" %
+ site, fqdn_hostname, 88)
+
+ # DC=_kerberos._udp record
+ add_srv_record(samdb, domain_container_dn, "DC=_kerberos._udp",
+ fqdn_hostname, 88)
+
+ # DC=_kpasswd._tcp record
+ add_srv_record(samdb, domain_container_dn, "DC=_kpasswd._tcp",
+ fqdn_hostname, 464)
+
+ # DC=_kpasswd._udp record
+ add_srv_record(samdb, domain_container_dn, "DC=_kpasswd._udp",
+ fqdn_hostname, 464)
+
+ # DC=_ldap._tcp record
+ add_srv_record(samdb, domain_container_dn, "DC=_ldap._tcp", fqdn_hostname,
+ 389)
+
+ # DC=_ldap._tcp.<SITENAME>._sites record
+ add_srv_record(samdb, domain_container_dn, "DC=_ldap._tcp.%s._sites" %
+ site, fqdn_hostname, 389)
+
+ # FIXME: The number of SRV records depend on the various roles this DC has.
+ # _gc and _msdcs records are added if the we are the forest dc and not subdomain dc
+ #
+ # Assumption: current DC is GC and add all the entries
+
+ # DC=_gc._tcp record
+ add_srv_record(samdb, domain_container_dn, "DC=_gc._tcp", fqdn_hostname,
+ 3268)
+
+ # DC=_gc._tcp.<SITENAME>,_sites record
+ add_srv_record(samdb, domain_container_dn, "DC=_gc._tcp.%s._sites" % site,
+ fqdn_hostname, 3268)
+
+ # DC=_msdcs record
+ add_ns_glue_record(samdb, domain_container_dn, "DC=_msdcs", fqdn_hostname)
+
+ # FIXME: Following entries are added only if DomainDnsZones and ForestDnsZones partitions
+ # are created
+ #
+ # Assumption: Additional entries won't hurt on os_level = 2000
+
+ # DC=_ldap._tcp.<SITENAME>._sites.DomainDnsZones
+ add_srv_record(samdb, domain_container_dn,
+ "DC=_ldap._tcp.%s._sites.DomainDnsZones" % site, fqdn_hostname,
+ 389)
+
+ # DC=_ldap._tcp.<SITENAME>._sites.ForestDnsZones
+ add_srv_record(samdb, domain_container_dn,
+ "DC=_ldap._tcp.%s._sites.ForestDnsZones" % site, fqdn_hostname,
+ 389)
+
+ # DC=_ldap._tcp.DomainDnsZones
+ add_srv_record(samdb, domain_container_dn, "DC=_ldap._tcp.DomainDnsZones",
+ fqdn_hostname, 389)
+
+ # DC=_ldap._tcp.ForestDnsZones
+ add_srv_record(samdb, domain_container_dn, "DC=_ldap._tcp.ForestDnsZones",
+ fqdn_hostname, 389)
+
+ # DC=DomainDnsZones
+ add_host_record(samdb, domain_container_dn, "DC=DomainDnsZones", hostip,
+ hostip6)
+
+ # DC=ForestDnsZones
+ add_host_record(samdb, domain_container_dn, "DC=ForestDnsZones", hostip,
+ hostip6)
+
+
+def add_dc_msdcs_records(samdb, forestdn, prefix, site, dnsforest, hostname,
+ hostip, hostip6, domainguid, ntdsguid):
+
+ fqdn_hostname = "%s.%s" % (hostname, dnsforest)
+
+ # Set up forest container - DC=<DNSDOMAIN>,CN=MicrosoftDNS,<PREFIX>,<DOMAINDN>
+ forest_container_dn = ldb.Dn(samdb, "DC=_msdcs.%s,CN=MicrosoftDNS,%s,%s" %
+ (dnsforest, prefix, forestdn))
+
+ # DC=@ record
+ add_at_record(samdb, forest_container_dn, "DC=@", hostname, dnsforest,
+ None, None)
+
+ # DC=_kerberos._tcp.dc record
+ add_srv_record(samdb, forest_container_dn, "DC=_kerberos._tcp.dc",
+ fqdn_hostname, 88)
+
+ # DC=_kerberos._tcp.<SITENAME>._sites.dc record
+ add_srv_record(samdb, forest_container_dn,
+ "DC=_kerberos._tcp.%s._sites.dc" % site, fqdn_hostname, 88)
+
+ # DC=_ldap._tcp.dc record
+ add_srv_record(samdb, forest_container_dn, "DC=_ldap._tcp.dc",
+ fqdn_hostname, 389)
+
+ # DC=_ldap._tcp.<SITENAME>._sites.dc record
+ add_srv_record(samdb, forest_container_dn, "DC=_ldap._tcp.%s._sites.dc" %
+ site, fqdn_hostname, 389)
+
+ # DC=_ldap._tcp.<SITENAME>._sites.gc record
+ add_srv_record(samdb, forest_container_dn, "DC=_ldap._tcp.%s._sites.gc" %
+ site, fqdn_hostname, 3268)
+
+ # DC=_ldap._tcp.gc record
+ add_srv_record(samdb, forest_container_dn, "DC=_ldap._tcp.gc",
+ fqdn_hostname, 3268)
+
+ # DC=_ldap._tcp.pdc record
+ add_srv_record(samdb, forest_container_dn, "DC=_ldap._tcp.pdc",
+ fqdn_hostname, 389)
+
+ # DC=gc record
+ add_host_record(samdb, forest_container_dn, "DC=gc", hostip, hostip6)
+
+ # DC=_ldap._tcp.<DOMAINGUID>.domains record
+ add_srv_record(samdb, forest_container_dn,
+ "DC=_ldap._tcp.%s.domains" % domainguid, fqdn_hostname, 389)
+
+ # DC=<NTDSGUID>
+ add_cname_record(samdb, forest_container_dn, "DC=%s" % ntdsguid,
+ fqdn_hostname)
+
+
+def secretsdb_setup_dns(secretsdb, names, private_dir, binddns_dir, realm,
+ dnsdomain, dns_keytab_path, dnspass, key_version_number):
+ """Add DNS specific bits to a secrets database.
+
+ :param secretsdb: Ldb Handle to the secrets database
+ :param names: Names shortcut
+ :param machinepass: Machine password
+ """
+ try:
+ os.unlink(os.path.join(private_dir, dns_keytab_path))
+ os.unlink(os.path.join(binddns_dir, dns_keytab_path))
+ except OSError:
+ pass
+
+ if key_version_number is None:
+ key_version_number = 1
+
+ # This will create the dns.keytab file in the private_dir when it is
+ # committed!
+ setup_ldb(secretsdb, setup_path("secrets_dns.ldif"), {
+ "REALM": realm,
+ "DNSDOMAIN": dnsdomain,
+ "DNS_KEYTAB": dns_keytab_path,
+ "DNSPASS_B64": b64encode(dnspass.encode('utf-8')).decode('utf8'),
+ "KEY_VERSION_NUMBER": str(key_version_number),
+ "HOSTNAME": names.hostname,
+ "DNSNAME": '%s.%s' % (
+ names.netbiosname.lower(), names.dnsdomain.lower())
+ })
+
+
+def create_dns_dir(logger, paths):
+ """(Re)create the DNS directory and chown it to bind.
+
+ :param logger: Logger object
+ :param paths: paths object
+ """
+ dns_dir = os.path.dirname(paths.dns)
+
+ try:
+ shutil.rmtree(dns_dir, True)
+ except OSError:
+ pass
+
+ os.mkdir(dns_dir, 0o770)
+
+ if paths.bind_gid is not None:
+ try:
+ os.chown(dns_dir, -1, paths.bind_gid)
+ # chmod needed to cope with umask
+ os.chmod(dns_dir, 0o770)
+ except OSError:
+ if 'SAMBA_SELFTEST' not in os.environ:
+ logger.error("Failed to chown %s to bind gid %u" % (
+ dns_dir, paths.bind_gid))
+
+
+def create_dns_dir_keytab_link(logger, paths):
+ """Create link for BIND to DNS keytab
+
+ :param logger: Logger object
+ :param paths: paths object
+ """
+ private_dns_keytab_path = os.path.join(paths.private_dir, paths.dns_keytab)
+ bind_dns_keytab_path = os.path.join(paths.binddns_dir, paths.dns_keytab)
+
+ if os.path.isfile(private_dns_keytab_path):
+ if os.path.isfile(bind_dns_keytab_path):
+ try:
+ os.unlink(bind_dns_keytab_path)
+ except OSError as e:
+ logger.error("Failed to remove %s: %s" %
+ (bind_dns_keytab_path, e.strerror))
+
+ # link the dns.keytab to the bind-dns directory
+ try:
+ os.link(private_dns_keytab_path, bind_dns_keytab_path)
+ except OSError as e:
+ logger.error("Failed to create link %s -> %s: %s" %
+ (private_dns_keytab_path, bind_dns_keytab_path, e.strerror))
+
+ # chown the dns.keytab in the bind-dns directory
+ if paths.bind_gid is not None:
+ try:
+ os.chmod(paths.binddns_dir, 0o770)
+ os.chown(paths.binddns_dir, -1, paths.bind_gid)
+ except OSError:
+ if 'SAMBA_SELFTEST' not in os.environ:
+ logger.info("Failed to chown %s to bind gid %u",
+ paths.binddns_dir, paths.bind_gid)
+ try:
+ os.chmod(bind_dns_keytab_path, 0o640)
+ os.chown(bind_dns_keytab_path, -1, paths.bind_gid)
+ except OSError:
+ if 'SAMBA_SELFTEST' not in os.environ:
+ logger.info("Failed to chown %s to bind gid %u",
+ bind_dns_keytab_path, paths.bind_gid)
+
+
+def create_zone_file(logger, paths, dnsdomain,
+ hostip, hostip6, hostname, realm, domainguid,
+ ntdsguid, site):
+ """Write out a DNS zone file, from the info in the current database.
+
+ :param paths: paths object
+ :param dnsdomain: DNS Domain name
+ :param domaindn: DN of the Domain
+ :param hostip: Local IPv4 IP
+ :param hostip6: Local IPv6 IP
+ :param hostname: Local hostname
+ :param realm: Realm name
+ :param domainguid: GUID of the domain.
+ :param ntdsguid: GUID of the hosts nTDSDSA record.
+ """
+ assert isinstance(domainguid, str)
+
+ if hostip6 is not None:
+ hostip6_base_line = " IN AAAA " + hostip6
+ hostip6_host_line = hostname + " IN AAAA " + hostip6
+ gc_msdcs_ip6_line = "gc._msdcs IN AAAA " + hostip6
+ else:
+ hostip6_base_line = ""
+ hostip6_host_line = ""
+ gc_msdcs_ip6_line = ""
+
+ if hostip is not None:
+ hostip_base_line = " IN A " + hostip
+ hostip_host_line = hostname + " IN A " + hostip
+ gc_msdcs_ip_line = "gc._msdcs IN A " + hostip
+ else:
+ hostip_base_line = ""
+ hostip_host_line = ""
+ gc_msdcs_ip_line = ""
+
+ setup_file(setup_path("provision.zone"), paths.dns, {
+ "HOSTNAME": hostname,
+ "DNSDOMAIN": dnsdomain,
+ "REALM": realm,
+ "HOSTIP_BASE_LINE": hostip_base_line,
+ "HOSTIP_HOST_LINE": hostip_host_line,
+ "DOMAINGUID": domainguid,
+ "DATESTRING": time.strftime("%Y%m%d%H"),
+ "DEFAULTSITE": site,
+ "NTDSGUID": ntdsguid,
+ "HOSTIP6_BASE_LINE": hostip6_base_line,
+ "HOSTIP6_HOST_LINE": hostip6_host_line,
+ "GC_MSDCS_IP_LINE": gc_msdcs_ip_line,
+ "GC_MSDCS_IP6_LINE": gc_msdcs_ip6_line,
+ })
+
+ if paths.bind_gid is not None:
+ try:
+ os.chown(paths.dns, -1, paths.bind_gid)
+ # chmod needed to cope with umask
+ os.chmod(paths.dns, 0o664)
+ except OSError:
+ if 'SAMBA_SELFTEST' not in os.environ:
+ logger.error("Failed to chown %s to bind gid %u" % (
+ paths.dns, paths.bind_gid))
+
+
+def create_samdb_copy(samdb, logger, paths, names, domainsid, domainguid):
+ """Create a copy of samdb and give write permissions to named for dns partitions
+ """
+ private_dir = paths.private_dir
+ samldb_dir = os.path.join(private_dir, "sam.ldb.d")
+ dns_dir = os.path.dirname(paths.dns)
+ dns_samldb_dir = os.path.join(dns_dir, "sam.ldb.d")
+
+ # Find the partitions and corresponding filenames
+ partfile = {}
+ res = samdb.search(base="@PARTITION",
+ scope=ldb.SCOPE_BASE,
+ attrs=["partition", "backendStore"])
+ for tmp in res[0]["partition"]:
+ (nc, fname) = str(tmp).split(':')
+ partfile[nc.upper()] = fname
+
+ backend_store = get_default_backend_store()
+ if "backendStore" in res[0]:
+ backend_store = str(res[0]["backendStore"][0])
+
+ # Create empty domain partition
+
+ domaindn = names.domaindn.upper()
+ domainpart_file = os.path.join(dns_dir, partfile[domaindn])
+ try:
+ os.mkdir(dns_samldb_dir)
+ open(domainpart_file, 'w').close()
+
+ # Fill the basedn and @OPTION records in domain partition
+ dom_url = "%s://%s" % (backend_store, domainpart_file)
+ dom_ldb = samba.Ldb(dom_url)
+
+ # We need the dummy main-domain DB to have the correct @INDEXLIST
+ index_res = samdb.search(base="@INDEXLIST", scope=ldb.SCOPE_BASE)
+ dom_ldb.add(index_res[0])
+
+ domainguid_line = "objectGUID: %s\n-" % domainguid
+ descr = b64encode(get_domain_descriptor(domainsid)).decode('utf8')
+ setup_add_ldif(dom_ldb, setup_path("provision_basedn.ldif"), {
+ "DOMAINDN": names.domaindn,
+ "DOMAINGUID": domainguid_line,
+ "DOMAINSID": str(domainsid),
+ "DESCRIPTOR": descr})
+ setup_add_ldif(dom_ldb,
+ setup_path("provision_basedn_options.ldif"), None)
+
+ except:
+ logger.error(
+ "Failed to setup database for BIND, AD based DNS cannot be used")
+ raise
+
+ # This line is critical to the security of the whole scheme.
+ # We assume there is no secret data in the (to be left out of
+ # date and essentially read-only) config, schema and metadata partitions.
+ #
+ # Only the stub of the domain partition is created above.
+ #
+ # That way, things like the krbtgt key do not leak.
+ del partfile[domaindn]
+
+ # Link dns partitions and metadata
+ domainzonedn = "DC=DOMAINDNSZONES,%s" % names.domaindn.upper()
+ forestzonedn = "DC=FORESTDNSZONES,%s" % names.rootdn.upper()
+
+ domainzone_file = partfile[domainzonedn]
+ forestzone_file = partfile.get(forestzonedn)
+
+ metadata_file = "metadata.tdb"
+ try:
+ os.link(os.path.join(samldb_dir, metadata_file),
+ os.path.join(dns_samldb_dir, metadata_file))
+ os.link(os.path.join(private_dir, domainzone_file),
+ os.path.join(dns_dir, domainzone_file))
+ if backend_store == "mdb":
+ # If the file is an lmdb data file need to link the
+ # lock file as well
+ os.link(os.path.join(private_dir, domainzone_file + "-lock"),
+ os.path.join(dns_dir, domainzone_file + "-lock"))
+ if forestzone_file:
+ os.link(os.path.join(private_dir, forestzone_file),
+ os.path.join(dns_dir, forestzone_file))
+ if backend_store == "mdb":
+ # If the database file is an lmdb data file need to link the
+ # lock file as well
+ os.link(os.path.join(private_dir, forestzone_file + "-lock"),
+ os.path.join(dns_dir, forestzone_file + "-lock"))
+ except OSError:
+ logger.error(
+ "Failed to setup database for BIND, AD based DNS cannot be used")
+ raise
+ del partfile[domainzonedn]
+ if forestzone_file:
+ del partfile[forestzonedn]
+
+ # Copy root, config, schema partitions (and any other if any)
+ # Since samdb is open in the current process, copy them in a child process
+ try:
+ tdb_copy(os.path.join(private_dir, "sam.ldb"),
+ os.path.join(dns_dir, "sam.ldb"))
+ for nc in partfile:
+ pfile = partfile[nc]
+ if backend_store == "mdb":
+ mdb_copy(os.path.join(private_dir, pfile),
+ os.path.join(dns_dir, pfile))
+ else:
+ tdb_copy(os.path.join(private_dir, pfile),
+ os.path.join(dns_dir, pfile))
+ except:
+ logger.error(
+ "Failed to setup database for BIND, AD based DNS cannot be used")
+ raise
+
+ # Give bind read/write permissions dns partitions
+ if paths.bind_gid is not None:
+ try:
+ for dirname, dirs, files in os.walk(dns_dir):
+ for d in dirs:
+ dpath = os.path.join(dirname, d)
+ os.chown(dpath, -1, paths.bind_gid)
+ os.chmod(dpath, 0o770)
+ for f in files:
+ if f.endswith(('.ldb', '.tdb', 'ldb-lock')):
+ fpath = os.path.join(dirname, f)
+ os.chown(fpath, -1, paths.bind_gid)
+ os.chmod(fpath, 0o660)
+ except OSError:
+ if 'SAMBA_SELFTEST' not in os.environ:
+ logger.error(
+ "Failed to set permissions to sam.ldb* files, fix manually")
+ else:
+ if 'SAMBA_SELFTEST' not in os.environ:
+ logger.warning("""Unable to find group id for BIND,
+ set permissions to sam.ldb* files manually""")
+
+
+def create_dns_update_list(paths):
+ """Write out a dns_update_list file"""
+ # note that we use no variable substitution on this file
+ # the substitution is done at runtime by samba_dnsupdate, samba_spnupdate
+ setup_file(setup_path("dns_update_list"), paths.dns_update_list, None)
+ setup_file(setup_path("spn_update_list"), paths.spn_update_list, None)
+
+
+def create_named_conf(paths, realm, dnsdomain, dns_backend, logger):
+ """Write out a file containing zone statements suitable for inclusion in a
+ named.conf file (including GSS-TSIG configuration).
+
+ :param paths: all paths
+ :param realm: Realm name
+ :param dnsdomain: DNS Domain name
+ :param dns_backend: DNS backend type
+ :param keytab_name: File name of DNS keytab file
+ :param logger: Logger object
+ """
+
+ # TODO: This really should have been done as a top level import.
+ # It is done here to avoid a dependency loop. That is, we move
+ # ProvisioningError to another file, and have all the provision
+ # scripts import it from there.
+
+ from samba.provision import ProvisioningError
+
+ if dns_backend == "BIND9_FLATFILE":
+ setup_file(setup_path("named.conf"), paths.namedconf, {
+ "DNSDOMAIN": dnsdomain,
+ "REALM": realm,
+ "ZONE_FILE": paths.dns,
+ "REALM_WC": "*." + ".".join(realm.split(".")[1:]),
+ "NAMED_CONF": paths.namedconf,
+ "NAMED_CONF_UPDATE": paths.namedconf_update
+ })
+
+ setup_file(setup_path("named.conf.update"), paths.namedconf_update)
+
+ elif dns_backend == "BIND9_DLZ":
+ bind_info = subprocess.Popen(['named -V'], shell=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ cwd='.').communicate()[0]
+ bind_info = get_string(bind_info)
+ bind9_8 = '#'
+ bind9_9 = '#'
+ bind9_10 = '#'
+ bind9_11 = '#'
+ bind9_12 = '#'
+ bind9_14 = '#'
+ bind9_16 = '#'
+ bind9_18 = '#'
+ if bind_info.upper().find('BIND 9.8') != -1:
+ bind9_8 = ''
+ elif bind_info.upper().find('BIND 9.9') != -1:
+ bind9_9 = ''
+ elif bind_info.upper().find('BIND 9.10') != -1:
+ bind9_10 = ''
+ elif bind_info.upper().find('BIND 9.11') != -1:
+ bind9_11 = ''
+ elif bind_info.upper().find('BIND 9.12') != -1:
+ bind9_12 = ''
+ elif bind_info.upper().find('BIND 9.14') != -1:
+ bind9_14 = ''
+ elif bind_info.upper().find('BIND 9.16') != -1:
+ bind9_16 = ''
+ elif bind_info.upper().find('BIND 9.18') != -1:
+ bind9_18 = ''
+ elif bind_info.upper().find('BIND 9.7') != -1:
+ raise ProvisioningError("DLZ option incompatible with BIND 9.7.")
+ elif bind_info.upper().find('BIND_9.13') != -1:
+ raise ProvisioningError("Only stable/esv releases of BIND are supported.")
+ elif bind_info.upper().find('BIND_9.15') != -1:
+ raise ProvisioningError("Only stable/esv releases of BIND are supported.")
+ elif bind_info.upper().find('BIND_9.17') != -1:
+ raise ProvisioningError("Only stable/esv releases of BIND are supported.")
+ else:
+ logger.warning("BIND version unknown, please modify %s manually." % paths.namedconf)
+ setup_file(setup_path("named.conf.dlz"), paths.namedconf, {
+ "NAMED_CONF": paths.namedconf,
+ "MODULESDIR": samba.param.modules_dir(),
+ "BIND9_8": bind9_8,
+ "BIND9_9": bind9_9,
+ "BIND9_10": bind9_10,
+ "BIND9_11": bind9_11,
+ "BIND9_12": bind9_12,
+ "BIND9_14": bind9_14,
+ "BIND9_16": bind9_16,
+ "BIND9_18": bind9_18
+ })
+
+
+def create_named_txt(path, realm, dnsdomain, dnsname, binddns_dir,
+ keytab_name):
+ """Write out a file containing zone statements suitable for inclusion in a
+ named.conf file (including GSS-TSIG configuration).
+
+ :param path: Path of the new named.conf file.
+ :param realm: Realm name
+ :param dnsdomain: DNS Domain name
+ :param binddns_dir: Path to bind dns directory
+ :param keytab_name: File name of DNS keytab file
+ """
+ setup_file(setup_path("named.txt"), path, {
+ "DNSDOMAIN": dnsdomain,
+ "DNSNAME": dnsname,
+ "REALM": realm,
+ "DNS_KEYTAB": keytab_name,
+ "DNS_KEYTAB_ABS": os.path.join(binddns_dir, keytab_name),
+ "PRIVATE_DIR": binddns_dir
+ })
+
+
+def is_valid_dns_backend(dns_backend):
+ return dns_backend in ("BIND9_FLATFILE", "BIND9_DLZ", "SAMBA_INTERNAL", "NONE")
+
+
+def is_valid_os_level(os_level):
+ return DS_DOMAIN_FUNCTION_2000 <= os_level <= DS_DOMAIN_FUNCTION_2016
+
+
+def create_dns_legacy(samdb, domainsid, forestdn, dnsadmins_sid):
+ # Set up MicrosoftDNS container
+ add_dns_container(samdb, forestdn, "CN=System", domainsid, dnsadmins_sid)
+ # Add root servers
+ add_rootservers(samdb, forestdn, "CN=System")
+
+
+def fill_dns_data_legacy(samdb, domainsid, forestdn, dnsdomain, site, hostname,
+ hostip, hostip6, dnsadmins_sid):
+ # Add domain record
+ add_domain_record(samdb, forestdn, "CN=System", dnsdomain, domainsid,
+ dnsadmins_sid)
+
+ # Add DNS records for a DC in domain
+ add_dc_domain_records(samdb, forestdn, "CN=System", site, dnsdomain,
+ hostname, hostip, hostip6)
+
+
+def create_dns_partitions(samdb, domainsid, names, domaindn, forestdn,
+ dnsadmins_sid, fill_level):
+ # Set up additional partitions (DomainDnsZones, ForstDnsZones)
+ setup_dns_partitions(samdb, domainsid, domaindn, forestdn,
+ names.configdn, names.serverdn, fill_level)
+
+ # Set up MicrosoftDNS containers
+ add_dns_container(samdb, domaindn, "DC=DomainDnsZones", domainsid,
+ dnsadmins_sid)
+ if fill_level != FILL_SUBDOMAIN:
+ add_dns_container(samdb, forestdn, "DC=ForestDnsZones", domainsid,
+ dnsadmins_sid, forest=True)
+
+
+def fill_dns_data_partitions(samdb, domainsid, site, domaindn, forestdn,
+ dnsdomain, dnsforest, hostname, hostip, hostip6,
+ domainguid, ntdsguid, dnsadmins_sid, autofill=True,
+ fill_level=FILL_FULL, add_root=True):
+ """Fill data in various AD partitions
+
+ :param samdb: LDB object connected to sam.ldb file
+ :param domainsid: Domain SID (as dom_sid object)
+ :param site: Site name to create hostnames in
+ :param domaindn: DN of the domain
+ :param forestdn: DN of the forest
+ :param dnsdomain: DNS name of the domain
+ :param dnsforest: DNS name of the forest
+ :param hostname: Host name of this DC
+ :param hostip: IPv4 addresses
+ :param hostip6: IPv6 addresses
+ :param domainguid: Domain GUID
+ :param ntdsguid: NTDS GUID
+ :param dnsadmins_sid: SID for DnsAdmins group
+ :param autofill: Create DNS records (using fixed template)
+ """
+
+ # Set up DC=DomainDnsZones,<DOMAINDN>
+ # Add rootserver records
+ if add_root:
+ add_rootservers(samdb, domaindn, "DC=DomainDnsZones")
+
+ # Add domain record
+ add_domain_record(samdb, domaindn, "DC=DomainDnsZones", dnsdomain,
+ domainsid, dnsadmins_sid)
+
+ # Add DNS records for a DC in domain
+ if autofill:
+ add_dc_domain_records(samdb, domaindn, "DC=DomainDnsZones", site,
+ dnsdomain, hostname, hostip, hostip6)
+
+ if fill_level != FILL_SUBDOMAIN:
+ # Set up DC=ForestDnsZones,<FORESTDN>
+ # Add _msdcs record
+ add_msdcs_record(samdb, forestdn, "DC=ForestDnsZones", dnsforest)
+
+ # Add DNS records for a DC in forest
+ if autofill:
+ add_dc_msdcs_records(samdb, forestdn, "DC=ForestDnsZones", site,
+ dnsforest, hostname, hostip, hostip6,
+ domainguid, ntdsguid)
+
+
+def setup_ad_dns(samdb, secretsdb, names, paths, logger,
+ dns_backend, os_level, dnspass=None, hostip=None, hostip6=None,
+ fill_level=FILL_FULL):
+ """Provision DNS information (assuming GC role)
+
+ :param samdb: LDB object connected to sam.ldb file
+ :param secretsdb: LDB object connected to secrets.ldb file
+ :param names: Names shortcut
+ :param paths: Paths shortcut
+ :param logger: Logger object
+ :param dns_backend: Type of DNS backend
+ :param os_level: Functional level (treated as os level)
+ :param dnspass: Password for bind's DNS account
+ :param hostip: IPv4 address
+ :param hostip6: IPv6 address
+ """
+
+ if not is_valid_dns_backend(dns_backend):
+ raise Exception("Invalid dns backend: %r" % dns_backend)
+
+ if not is_valid_os_level(os_level):
+ raise Exception("Invalid os level: %r" % os_level)
+
+ if dns_backend == "NONE":
+ logger.info("No DNS backend set, not configuring DNS")
+ return
+
+ # Add dns accounts (DnsAdmins, DnsUpdateProxy) in domain
+ logger.info("Adding DNS accounts")
+ add_dns_accounts(samdb, names.domaindn)
+
+ # If dns_backend is BIND9_FLATFILE
+ # Populate only CN=MicrosoftDNS,CN=System,<DOMAINDN>
+ #
+ # If dns_backend is SAMBA_INTERNAL or BIND9_DLZ
+ # Populate DNS partitions
+
+ # If os_level < 2003 (DS_DOMAIN_FUNCTION_2000)
+ # All dns records are in CN=MicrosoftDNS,CN=System,<DOMAINDN>
+ #
+ # If os_level >= 2003 (DS_DOMAIN_FUNCTION_2003, DS_DOMAIN_FUNCTION_2008,
+ # DS_DOMAIN_FUNCTION_2008_R2)
+ # Root server records are in CN=MicrosoftDNS,CN=System,<DOMAINDN>
+ # Domain records are in CN=MicrosoftDNS,CN=System,<DOMAINDN>
+ # Domain records are in CN=MicrosoftDNS,DC=DomainDnsZones,<DOMAINDN>
+ # Forest records are in CN=MicrosoftDNS,DC=ForestDnsZones,<FORESTDN>
+ domaindn = names.domaindn
+ forestdn = samdb.get_root_basedn().get_linearized()
+
+ dnsdomain = names.dnsdomain.lower()
+ dnsforest = dnsdomain
+
+ site = names.sitename
+
+ hostname = names.netbiosname.lower()
+
+ dnsadmins_sid = get_dnsadmins_sid(samdb, domaindn)
+ domainguid = get_domainguid(samdb, domaindn)
+
+ samdb.transaction_start()
+ try:
+ # Create CN=System
+ logger.info("Creating CN=MicrosoftDNS,CN=System,%s" % domaindn)
+ create_dns_legacy(samdb, names.domainsid, domaindn, dnsadmins_sid)
+
+ if os_level == DS_DOMAIN_FUNCTION_2000:
+ # Populating legacy dns
+ logger.info("Populating CN=MicrosoftDNS,CN=System,%s" % domaindn)
+ fill_dns_data_legacy(samdb, names.domainsid, domaindn, dnsdomain, site,
+ hostname, hostip, hostip6, dnsadmins_sid)
+
+ elif dns_backend in ("SAMBA_INTERNAL", "BIND9_DLZ") and \
+ os_level >= DS_DOMAIN_FUNCTION_2003:
+
+ # Create DNS partitions
+ logger.info("Creating DomainDnsZones and ForestDnsZones partitions")
+ create_dns_partitions(samdb, names.domainsid, names, domaindn, forestdn,
+ dnsadmins_sid, fill_level)
+
+ # Populating dns partitions
+ logger.info("Populating DomainDnsZones and ForestDnsZones partitions")
+ fill_dns_data_partitions(samdb, names.domainsid, site, domaindn, forestdn,
+ dnsdomain, dnsforest, hostname, hostip, hostip6,
+ domainguid, names.ntdsguid, dnsadmins_sid,
+ fill_level=fill_level)
+
+ except:
+ samdb.transaction_cancel()
+ raise
+ else:
+ samdb.transaction_commit()
+
+ if dns_backend.startswith("BIND9_"):
+ setup_bind9_dns(samdb, secretsdb, names, paths, logger,
+ dns_backend, os_level, site=site, dnspass=dnspass, hostip=hostip,
+ hostip6=hostip6)
+
+
+def setup_bind9_dns(samdb, secretsdb, names, paths, logger,
+ dns_backend, os_level, site=None, dnspass=None, hostip=None,
+ hostip6=None, key_version_number=None):
+ """Provision DNS information (assuming BIND9 backend in DC role)
+
+ :param samdb: LDB object connected to sam.ldb file
+ :param secretsdb: LDB object connected to secrets.ldb file
+ :param names: Names shortcut
+ :param paths: Paths shortcut
+ :param logger: Logger object
+ :param dns_backend: Type of DNS backend
+ :param os_level: Functional level (treated as os level)
+ :param site: Site to create hostnames in
+ :param dnspass: Password for bind's DNS account
+ :param hostip: IPv4 address
+ :param hostip6: IPv6 address
+ """
+
+ if (not is_valid_dns_backend(dns_backend) or
+ not dns_backend.startswith("BIND9_")):
+ raise Exception("Invalid dns backend: %r" % dns_backend)
+
+ if not is_valid_os_level(os_level):
+ raise Exception("Invalid os level: %r" % os_level)
+
+ domaindn = names.domaindn
+
+ domainguid = get_domainguid(samdb, domaindn)
+
+ secretsdb_setup_dns(secretsdb, names,
+ paths.private_dir,
+ paths.binddns_dir,
+ realm=names.realm,
+ dnsdomain=names.dnsdomain,
+ dns_keytab_path=paths.dns_keytab, dnspass=dnspass,
+ key_version_number=key_version_number)
+
+ create_dns_dir(logger, paths)
+ create_dns_dir_keytab_link(logger, paths)
+
+ if dns_backend == "BIND9_FLATFILE":
+ create_zone_file(logger, paths, site=site,
+ dnsdomain=names.dnsdomain, hostip=hostip,
+ hostip6=hostip6, hostname=names.hostname,
+ realm=names.realm, domainguid=domainguid,
+ ntdsguid=names.ntdsguid)
+
+ if dns_backend == "BIND9_DLZ" and os_level >= DS_DOMAIN_FUNCTION_2003:
+ create_samdb_copy(samdb, logger, paths,
+ names, names.domainsid, domainguid)
+
+ create_named_conf(paths, realm=names.realm,
+ dnsdomain=names.dnsdomain, dns_backend=dns_backend,
+ logger=logger)
+
+ create_named_txt(paths.namedtxt,
+ realm=names.realm, dnsdomain=names.dnsdomain,
+ dnsname="%s.%s" % (names.hostname, names.dnsdomain),
+ binddns_dir=paths.binddns_dir,
+ keytab_name=paths.dns_keytab)
+ logger.info("See %s for an example configuration include file for BIND",
+ paths.namedconf)
+ logger.info("and %s for further documentation required for secure DNS "
+ "updates", paths.namedtxt)
diff --git a/python/samba/remove_dc.py b/python/samba/remove_dc.py
new file mode 100644
index 0000000..6532138
--- /dev/null
+++ b/python/samba/remove_dc.py
@@ -0,0 +1,466 @@
+# Unix SMB/CIFS implementation.
+# Copyright Matthieu Patou <mat@matws.net> 2011
+# Copyright Andrew Bartlett <abartlet@samba.org> 2008-2015
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import uuid
+import ldb
+from ldb import LdbError
+from samba import werror
+from samba.ndr import ndr_unpack
+from samba.dcerpc import misc, dnsp
+from samba.dcerpc.dnsp import DNS_TYPE_NS, DNS_TYPE_A, DNS_TYPE_AAAA, \
+ DNS_TYPE_CNAME, DNS_TYPE_SRV, DNS_TYPE_PTR
+
+
+class DemoteException(Exception):
+ """Base element for demote errors"""
+
+ def __init__(self, value):
+ self.value = value
+
+ def __str__(self):
+ return "DemoteException: " + self.value
+
+
+def remove_sysvol_references(samdb, logger, dc_name):
+ # DNs under the Configuration DN:
+ realm = samdb.domain_dns_name()
+ for s in ("CN=Enterprise,CN=Microsoft System Volumes,CN=System",
+ "CN=%s,CN=Microsoft System Volumes,CN=System" % realm):
+ dn = ldb.Dn(samdb, s)
+
+ # This is verbose, but it is the safe, escape-proof way
+ # to add a base and add an arbitrary RDN.
+ try:
+ dn.add_base(samdb.get_config_basedn())
+ except ldb.LdbError:
+ raise DemoteException("Failed constructing DN %s by adding base %s"
+ % (dn, samdb.get_config_basedn()))
+ try:
+ dn.add_child("CN=X")
+ except ldb.LdbError:
+ raise DemoteException("Failed constructing DN %s by adding child CN=X"
+ % (dn))
+ dn.set_component(0, "CN", dc_name)
+ try:
+ logger.info("Removing Sysvol reference: %s" % dn)
+ samdb.delete(dn)
+ except ldb.LdbError as e:
+ (enum, estr) = e.args
+ if enum == ldb.ERR_NO_SUCH_OBJECT:
+ pass
+ else:
+ raise
+
+ # DNs under the Domain DN:
+ for s in ("CN=Domain System Volumes (SYSVOL share),CN=File Replication Service,CN=System",
+ "CN=Topology,CN=Domain System Volume,CN=DFSR-GlobalSettings,CN=System"):
+ # This is verbose, but it is the safe, escape-proof way
+ # to add a base and add an arbitrary RDN.
+ dn = ldb.Dn(samdb, s)
+ try:
+ dn.add_base(samdb.get_default_basedn())
+ except ldb.LdbError:
+ raise DemoteException("Failed constructing DN %s by adding base %s"
+ % (dn, samdb.get_default_basedn()))
+ try:
+ dn.add_child("CN=X")
+ except ldb.LdbError:
+ raise DemoteException("Failed constructing DN %s by adding child "
+ "CN=X (soon to be CN=%s)" % (dn, dc_name))
+ dn.set_component(0, "CN", dc_name)
+
+ try:
+ logger.info("Removing Sysvol reference: %s" % dn)
+ samdb.delete(dn)
+ except ldb.LdbError as e1:
+ (enum, estr) = e1.args
+ if enum == ldb.ERR_NO_SUCH_OBJECT:
+ pass
+ else:
+ raise
+
+
+def remove_dns_references(samdb, logger, dnsHostName, ignore_no_name=False):
+
+ # Check we are using in-database DNS
+ zones = samdb.search(base="", scope=ldb.SCOPE_SUBTREE,
+ expression="(&(objectClass=dnsZone)(!(dc=RootDNSServers)))",
+ attrs=[],
+ controls=["search_options:0:2"])
+ if len(zones) == 0:
+ return
+
+ dnsHostNameUpper = dnsHostName.upper()
+
+ try:
+ (dn, primary_recs) = samdb.dns_lookup(dnsHostName)
+ except RuntimeError as e4:
+ (enum, estr) = e4.args
+ if (enum == werror.WERR_DNS_ERROR_NAME_DOES_NOT_EXIST or
+ enum == werror.WERR_DNS_ERROR_RCODE_NAME_ERROR):
+ if ignore_no_name:
+ remove_hanging_dns_references(samdb, logger,
+ dnsHostNameUpper,
+ zones)
+ return
+ raise DemoteException("lookup of %s failed: %s" % (dnsHostName, estr))
+ samdb.dns_replace(dnsHostName, [])
+
+ res = samdb.search("",
+ scope=ldb.SCOPE_BASE, attrs=["namingContexts"])
+ assert len(res) == 1
+ ncs = res[0]["namingContexts"]
+
+ # Work out the set of names we will likely have an A record on by
+ # default. This is by default all the partitions of type
+ # domainDNS. By finding the canonical name of all the partitions,
+ # we find the likely candidates. We only remove the record if it
+ # matches the IP that was used by the dnsHostName. This avoids us
+ # needing to look at a dns_update_list file from in the demote
+ # script.
+
+ def dns_name_from_dn(dn):
+ # The canonical string of DC=example,DC=com is
+ # example.com/
+ #
+ # The canonical string of CN=Configuration,DC=example,DC=com
+ # is example.com/Configuration
+ return ldb.Dn(samdb, dn).canonical_str().split('/', 1)[0]
+
+ # By using a set here, duplicates via (eg) example.com/Configuration
+ # do not matter, they become just example.com
+ a_names_to_remove_from \
+ = set(dns_name_from_dn(str(dn)) for dn in ncs)
+
+ def a_rec_to_remove(dnsRecord):
+ if dnsRecord.wType == DNS_TYPE_A or dnsRecord.wType == DNS_TYPE_AAAA:
+ for rec in primary_recs:
+ if rec.wType == dnsRecord.wType and rec.data == dnsRecord.data:
+ return True
+ return False
+
+ for a_name in a_names_to_remove_from:
+ try:
+ logger.debug("checking for DNS records to remove on %s" % a_name)
+ (a_rec_dn, a_recs) = samdb.dns_lookup(a_name)
+ except RuntimeError as e2:
+ (enum, estr) = e2.args
+ if enum == werror.WERR_DNS_ERROR_NAME_DOES_NOT_EXIST:
+ return
+ raise DemoteException("lookup of %s failed: %s" % (a_name, estr))
+
+ orig_num_recs = len(a_recs)
+ a_recs = [r for r in a_recs if not a_rec_to_remove(r)]
+
+ if len(a_recs) != orig_num_recs:
+ logger.info("updating %s keeping %d values, removing %s values" %
+ (a_name, len(a_recs), orig_num_recs - len(a_recs)))
+ samdb.dns_replace(a_name, a_recs)
+
+ remove_hanging_dns_references(samdb, logger, dnsHostNameUpper, zones)
+
+
+def remove_hanging_dns_references(samdb, logger, dnsHostNameUpper, zones):
+
+ # Find all the CNAME, NS, PTR and SRV records that point at the
+ # name we are removing
+
+ def to_remove(value):
+ dnsRecord = ndr_unpack(dnsp.DnssrvRpcRecord, value)
+ if dnsRecord.wType == DNS_TYPE_NS \
+ or dnsRecord.wType == DNS_TYPE_CNAME \
+ or dnsRecord.wType == DNS_TYPE_PTR:
+ if dnsRecord.data.upper() == dnsHostNameUpper:
+ return True
+ elif dnsRecord.wType == DNS_TYPE_SRV:
+ if dnsRecord.data.nameTarget.upper() == dnsHostNameUpper:
+ return True
+ return False
+
+ for zone in zones:
+ logger.debug("checking %s" % zone.dn)
+ records = samdb.search(base=zone.dn, scope=ldb.SCOPE_SUBTREE,
+ expression="(&(objectClass=dnsNode)"
+ "(!(dNSTombstoned=TRUE)))",
+ attrs=["dnsRecord"])
+ for record in records:
+ try:
+ orig_values = record["dnsRecord"]
+ except KeyError:
+ continue
+
+ # Remove references to dnsHostName in A, AAAA, NS, CNAME and SRV
+ values = [ndr_unpack(dnsp.DnssrvRpcRecord, v)
+ for v in orig_values if not to_remove(v)]
+
+ if len(values) != len(orig_values):
+ logger.info("updating %s keeping %d values, removing %s values"
+ % (record.dn, len(values),
+ len(orig_values) - len(values)))
+
+ # This requires the values to be unpacked, so this
+ # has been done in the list comprehension above
+ samdb.dns_replace_by_dn(record.dn, values)
+
+
+def offline_remove_server(samdb, logger,
+ server_dn,
+ remove_computer_obj=False,
+ remove_server_obj=False,
+ remove_sysvol_obj=False,
+ remove_dns_names=False,
+ remove_dns_account=False):
+ res = samdb.search("",
+ scope=ldb.SCOPE_BASE, attrs=["dsServiceName"])
+ assert len(res) == 1
+ my_serviceName = res[0]["dsServiceName"][0]
+
+ # Confirm this is really a server object
+ msgs = samdb.search(base=server_dn,
+ attrs=["serverReference", "cn",
+ "dnsHostName"],
+ scope=ldb.SCOPE_BASE,
+ expression="(objectClass=server)")
+ msg = msgs[0]
+ dc_name = str(msg["cn"][0])
+
+ try:
+ computer_dn = ldb.Dn(samdb, msg["serverReference"][0].decode('utf8'))
+ except KeyError:
+ computer_dn = None
+
+ try:
+ dnsHostName = str(msg["dnsHostName"][0])
+ except KeyError:
+ dnsHostName = None
+
+ if remove_server_obj:
+ # Remove the server DN (do a tree-delete as it could still have a
+ # 'DNS Settings' child object if it's a Windows DC)
+ samdb.delete(server_dn, ["tree_delete:0"])
+
+ if computer_dn is not None:
+ computer_msgs = samdb.search(base=computer_dn,
+ expression="objectclass=computer",
+ attrs=["msDS-KrbTgtLink",
+ "rIDSetReferences",
+ "cn"],
+ scope=ldb.SCOPE_BASE)
+ if "rIDSetReferences" in computer_msgs[0]:
+ rid_set_dn = str(computer_msgs[0]["rIDSetReferences"][0])
+ logger.info("Removing RID Set: %s" % rid_set_dn)
+ samdb.delete(rid_set_dn)
+ if "msDS-KrbTgtLink" in computer_msgs[0]:
+ krbtgt_link_dn = str(computer_msgs[0]["msDS-KrbTgtLink"][0])
+ logger.info("Removing RODC KDC account: %s" % krbtgt_link_dn)
+ samdb.delete(krbtgt_link_dn)
+
+ if remove_computer_obj:
+ # Delete the computer tree
+ logger.info("Removing computer account: %s (and any child objects)" % computer_dn)
+ samdb.delete(computer_dn, ["tree_delete:0"])
+
+ if "dnsHostName" in msg:
+ dnsHostName = str(msg["dnsHostName"][0])
+
+ if remove_dns_account:
+ res = samdb.search(expression="(&(objectclass=user)(cn=dns-%s)(servicePrincipalName=DNS/%s))" %
+ (ldb.binary_encode(dc_name), dnsHostName),
+ attrs=[], scope=ldb.SCOPE_SUBTREE,
+ base=samdb.get_default_basedn())
+ if len(res) == 1:
+ logger.info("Removing Samba-specific DNS service account: %s" % res[0].dn)
+ samdb.delete(res[0].dn)
+
+ if dnsHostName is not None and remove_dns_names:
+ remove_dns_references(samdb, logger, dnsHostName)
+
+ if remove_sysvol_obj:
+ remove_sysvol_references(samdb, logger, dc_name)
+
+
+def offline_remove_ntds_dc(samdb,
+ logger,
+ ntds_dn,
+ remove_computer_obj=False,
+ remove_server_obj=False,
+ remove_connection_obj=False,
+ seize_stale_fsmo=False,
+ remove_sysvol_obj=False,
+ remove_dns_names=False,
+ remove_dns_account=False):
+ res = samdb.search("",
+ scope=ldb.SCOPE_BASE, attrs=["dsServiceName"])
+ assert len(res) == 1
+ my_serviceName = ldb.Dn(samdb, res[0]["dsServiceName"][0].decode('utf8'))
+ server_dn = ntds_dn.parent()
+
+ if my_serviceName == ntds_dn:
+ raise DemoteException("Refusing to demote our own DSA: %s " % my_serviceName)
+
+ try:
+ msgs = samdb.search(base=ntds_dn, expression="objectClass=ntdsDSA",
+ attrs=["objectGUID"], scope=ldb.SCOPE_BASE)
+ except LdbError as e5:
+ (enum, estr) = e5.args
+ if enum == ldb.ERR_NO_SUCH_OBJECT:
+ raise DemoteException("Given DN %s doesn't exist" % ntds_dn)
+ else:
+ raise
+ if (len(msgs) == 0):
+ raise DemoteException("%s is not an ntdsda in %s"
+ % (ntds_dn, samdb.domain_dns_name()))
+
+ msg = msgs[0]
+ if (msg.dn.get_rdn_name() != "CN" or
+ msg.dn.get_rdn_value() != "NTDS Settings"):
+ raise DemoteException("Given DN (%s) wasn't the NTDS Settings DN" %
+ ntds_dn)
+
+ ntds_guid = ndr_unpack(misc.GUID, msg["objectGUID"][0])
+
+ if remove_connection_obj:
+ # Find any nTDSConnection objects with that DC as the fromServer.
+ # We use the GUID to avoid issues with any () chars in a server
+ # name.
+ stale_connections = samdb.search(base=samdb.get_config_basedn(),
+ expression="(&(objectclass=nTDSConnection)"
+ "(fromServer=<GUID=%s>))" % ntds_guid)
+ for conn in stale_connections:
+ logger.info("Removing nTDSConnection: %s" % conn.dn)
+ samdb.delete(conn.dn)
+
+ if seize_stale_fsmo:
+ stale_fsmo_roles = samdb.search(base="", scope=ldb.SCOPE_SUBTREE,
+ expression="(fsmoRoleOwner=<GUID=%s>))"
+ % ntds_guid,
+ controls=["search_options:0:2"])
+ # Find any FSMO roles they have, give them to this server
+
+ for role in stale_fsmo_roles:
+ val = str(my_serviceName)
+ m = ldb.Message()
+ m.dn = role.dn
+ m['value'] = ldb.MessageElement(val, ldb.FLAG_MOD_REPLACE,
+ 'fsmoRoleOwner')
+ logger.warning("Seizing FSMO role on: %s (now owned by %s)"
+ % (role.dn, my_serviceName))
+ samdb.modify(m)
+
+ # Remove the NTDS setting tree
+ try:
+ logger.info("Removing nTDSDSA: %s (and any children)" % ntds_dn)
+ samdb.delete(ntds_dn, ["tree_delete:0"])
+ except LdbError as e6:
+ (enum, estr) = e6.args
+ raise DemoteException("Failed to remove the DCs NTDS DSA object: %s"
+ % estr)
+
+ offline_remove_server(samdb, logger, server_dn,
+ remove_computer_obj=remove_computer_obj,
+ remove_server_obj=remove_server_obj,
+ remove_sysvol_obj=remove_sysvol_obj,
+ remove_dns_names=remove_dns_names,
+ remove_dns_account=remove_dns_account)
+
+
+def remove_dc(samdb, logger, dc_name):
+
+ # TODO: Check if this is the last server (covered mostly by
+ # refusing to remove our own name)
+
+ samdb.transaction_start()
+
+ server_dn = None
+
+ # Allow the name to be an nTDS-DSA GUID
+ try:
+ ntds_guid = uuid.UUID(hex=dc_name)
+ ntds_dn = "<GUID=%s>" % ntds_guid
+ except ValueError:
+ try:
+ server_msgs = samdb.search(base=samdb.get_config_basedn(),
+ attrs=[],
+ expression="(&(objectClass=server)"
+ "(cn=%s))"
+ % ldb.binary_encode(dc_name))
+ except LdbError as e3:
+ (enum, estr) = e3.args
+ raise DemoteException("Failure checking if %s is an server "
+ "object in %s: %s"
+ % (dc_name, samdb.domain_dns_name(), estr))
+
+ if (len(server_msgs) == 0):
+ samdb.transaction_cancel()
+ raise DemoteException("%s is not an AD DC in %s"
+ % (dc_name, samdb.domain_dns_name()))
+ server_dn = server_msgs[0].dn
+
+ ntds_dn = ldb.Dn(samdb, "CN=NTDS Settings")
+ ntds_dn.add_base(server_dn)
+
+ # Confirm this is really an ntdsDSA object
+ try:
+ ntds_msgs = samdb.search(base=ntds_dn, attrs=[], scope=ldb.SCOPE_BASE,
+ expression="(objectClass=ntdsdsa)")
+ except LdbError as e7:
+ (enum, estr) = e7.args
+ if enum == ldb.ERR_NO_SUCH_OBJECT:
+ ntds_msgs = []
+ else:
+ samdb.transaction_cancel()
+ raise DemoteException(
+ "Failure checking if %s is an NTDS DSA in %s: %s" %
+ (ntds_dn, samdb.domain_dns_name(), estr))
+
+ # If the NTDS Settings child DN wasn't found or wasn't an ntdsDSA
+ # object, just remove the server object located above
+ if (len(ntds_msgs) == 0):
+ if server_dn is None:
+ samdb.transaction_cancel()
+ raise DemoteException("%s is not an AD DC in %s"
+ % (dc_name, samdb.domain_dns_name()))
+
+ offline_remove_server(samdb, logger,
+ server_dn,
+ remove_computer_obj=True,
+ remove_server_obj=True,
+ remove_sysvol_obj=True,
+ remove_dns_names=True,
+ remove_dns_account=True)
+ else:
+ offline_remove_ntds_dc(samdb, logger,
+ ntds_msgs[0].dn,
+ remove_computer_obj=True,
+ remove_server_obj=True,
+ remove_connection_obj=True,
+ seize_stale_fsmo=True,
+ remove_sysvol_obj=True,
+ remove_dns_names=True,
+ remove_dns_account=True)
+
+ samdb.transaction_commit()
+
+
+def offline_remove_dc_RemoveDsServer(samdb, ntds_dn):
+
+ samdb.start_transaction()
+
+ offline_remove_ntds_dc(samdb, ntds_dn, None)
+
+ samdb.commit_transaction()
diff --git a/python/samba/safe_tarfile.py b/python/samba/safe_tarfile.py
new file mode 100644
index 0000000..1015fcf
--- /dev/null
+++ b/python/samba/safe_tarfile.py
@@ -0,0 +1,94 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Douglas Bagnall <douglas.bagnall@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+import os
+import tarfile
+from pathlib import Path
+from tarfile import ExtractError, TarFile as UnsafeTarFile
+
+
+class TarFile(UnsafeTarFile):
+ """This TarFile implementation is trying to ameliorate CVE-2007-4559,
+ where tarfile.TarFiles can step outside of the target directory
+ using '../../'.
+ """
+
+ try:
+ # New in version 3.11.4 (also has been backported)
+ # https://docs.python.org/3/library/tarfile.html#tarfile.TarFile.extraction_filter
+ # https://peps.python.org/pep-0706/
+ extraction_filter = staticmethod(tarfile.tar_filter)
+ except AttributeError:
+ def extract(self, member, path="", set_attrs=True, *,
+ numeric_owner=False):
+ self._safetarfile_check()
+ super().extract(member, path, set_attrs=set_attrs,
+ numeric_owner=numeric_owner)
+
+ def extractall(self, path, members=None, *, numeric_owner=False):
+ self._safetarfile_check()
+ super().extractall(path, members,
+ numeric_owner=numeric_owner)
+
+ def _safetarfile_check(self):
+ for tarinfo in self.__iter__():
+ if self._is_traversal_attempt(tarinfo=tarinfo):
+ raise ExtractError(
+ "Attempted directory traversal for "
+ f"member: {tarinfo.name}")
+ if self._is_unsafe_symlink(tarinfo=tarinfo):
+ raise ExtractError(
+ "Attempted directory traversal via symlink for "
+ f"member: {tarinfo.linkname}")
+ if self._is_unsafe_link(tarinfo=tarinfo):
+ raise ExtractError(
+ "Attempted directory traversal via link for "
+ f"member: {tarinfo.linkname}")
+
+ def _resolve_path(self, path):
+ return os.path.realpath(os.path.abspath(path))
+
+ def _is_path_in_dir(self, path, basedir):
+ return self._resolve_path(os.path.join(basedir,
+ path)).startswith(basedir)
+
+ def _is_traversal_attempt(self, tarinfo):
+ if (tarinfo.name.startswith(os.sep)
+ or ".." + os.sep in tarinfo.name):
+ return True
+ return False
+
+ def _is_unsafe_symlink(self, tarinfo):
+ if tarinfo.issym():
+ symlink_file = Path(
+ os.path.normpath(os.path.join(os.getcwd(),
+ tarinfo.linkname)))
+ if not self._is_path_in_dir(symlink_file, os.getcwd()):
+ return True
+ return False
+
+ def _is_unsafe_link(self, tarinfo):
+ if tarinfo.islnk():
+ link_file = Path(
+ os.path.normpath(os.path.join(os.getcwd(),
+ tarinfo.linkname)))
+ if not self._is_path_in_dir(link_file, os.getcwd()):
+ return True
+ return False
+
+
+open = TarFile.open
diff --git a/python/samba/samba3/__init__.py b/python/samba/samba3/__init__.py
new file mode 100644
index 0000000..af00f69
--- /dev/null
+++ b/python/samba/samba3/__init__.py
@@ -0,0 +1,409 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Support for reading Samba 3 data files."""
+
+__docformat__ = "restructuredText"
+
+REGISTRY_VALUE_PREFIX = b"SAMBA_REGVAL"
+REGISTRY_DB_VERSION = 1
+
+import os
+import struct
+import tdb
+
+from samba.samba3 import passdb
+from samba.samba3 import param as s3param
+from samba.common import get_bytes
+
+def fetch_uint32(db, key):
+ try:
+ data = db[key]
+ except KeyError:
+ return None
+ assert len(data) == 4
+ return struct.unpack("<L", data)[0]
+
+
+def fetch_int32(db, key):
+ try:
+ data = db[key]
+ except KeyError:
+ return None
+ assert len(data) == 4
+ return struct.unpack("<l", data)[0]
+
+
+class DbDatabase(object):
+ """Simple Samba 3 TDB database reader."""
+ def __init__(self, file):
+ """Open a file.
+
+ :param file: Path of the file to open, appending .tdb or .ntdb.
+ """
+ self.db = tdb.Tdb(file + ".tdb", flags=os.O_RDONLY)
+ self._check_version()
+
+ def _check_version(self):
+ pass
+
+ def close(self):
+ """Close resources associated with this object."""
+ self.db.close()
+
+
+class Registry(DbDatabase):
+ """Simple read-only support for reading the Samba3 registry.
+
+ :note: This object uses the same syntax for registry key paths as
+ Samba 3. This particular format uses forward slashes for key path
+ separators and abbreviations for the predefined key names.
+ e.g.: HKLM/Software/Bar.
+ """
+ def __len__(self):
+ """Return the number of keys."""
+ return len(self.keys())
+
+ def keys(self):
+ """Return list with all the keys."""
+ return [k.rstrip(b"\x00") for k in self.db if not k.startswith(REGISTRY_VALUE_PREFIX)]
+
+ def subkeys(self, key):
+ """Retrieve the subkeys for the specified key.
+
+ :param key: Key path.
+ :return: list with key names
+ """
+ data = self.db.get(key + b"\x00")
+ if data is None:
+ return []
+ (num, ) = struct.unpack("<L", data[0:4])
+ keys = data[4:].split(b"\0")
+ assert keys[-1] == b""
+ keys.pop()
+ assert len(keys) == num
+ return keys
+
+ def values(self, key):
+ """Return a dictionary with the values set for a specific key.
+
+ :param key: Key to retrieve values for.
+ :return: Dictionary with value names as key, tuple with type and
+ data as value."""
+ data = self.db.get(REGISTRY_VALUE_PREFIX + b'/' + key + b'\x00')
+ if data is None:
+ return {}
+ ret = {}
+ (num, ) = struct.unpack("<L", data[0:4])
+ data = data[4:]
+ for i in range(num):
+ # Value name
+ (name, data) = data.split(b"\0", 1)
+
+ (type, ) = struct.unpack("<L", data[0:4])
+ data = data[4:]
+ (value_len, ) = struct.unpack("<L", data[0:4])
+ data = data[4:]
+
+ ret[name] = (type, data[:value_len])
+ data = data[value_len:]
+
+ return ret
+
+
+# High water mark keys
+IDMAP_HWM_GROUP = b"GROUP HWM\0"
+IDMAP_HWM_USER = b"USER HWM\0"
+
+IDMAP_GROUP_PREFIX = b"GID "
+IDMAP_USER_PREFIX = b"UID "
+
+# idmap version determines auto-conversion
+IDMAP_VERSION_V2 = 2
+
+
+class IdmapDatabase(DbDatabase):
+ """Samba 3 ID map database reader."""
+
+ def _check_version(self):
+ assert fetch_int32(self.db, b"IDMAP_VERSION\0") == IDMAP_VERSION_V2
+
+ def ids(self):
+ """Retrieve a list of all ids in this database."""
+ for k in self.db:
+ if k.startswith(IDMAP_USER_PREFIX):
+ yield k.rstrip(b"\0").split(b" ")
+ if k.startswith(IDMAP_GROUP_PREFIX):
+ yield k.rstrip(b"\0").split(b" ")
+
+ def uids(self):
+ """Retrieve a list of all uids in this database."""
+ for k in self.db:
+ if k.startswith(IDMAP_USER_PREFIX):
+ yield int(k[len(IDMAP_USER_PREFIX):].rstrip(b"\0"))
+
+ def gids(self):
+ """Retrieve a list of all gids in this database."""
+ for k in self.db:
+ if k.startswith(IDMAP_GROUP_PREFIX):
+ yield int(k[len(IDMAP_GROUP_PREFIX):].rstrip(b"\0"))
+
+ def get_sid(self, xid, id_type):
+ """Retrieve SID associated with a particular id and type.
+
+ :param xid: UID or GID to retrieve SID for.
+ :param id_type: Type of id specified - 'UID' or 'GID'
+ """
+ data = self.db.get(get_bytes("%s %s\0" % (id_type, str(xid))))
+ if data is None:
+ return data
+ return data.rstrip("\0")
+
+ def get_user_sid(self, uid):
+ """Retrieve the SID associated with a particular uid.
+
+ :param uid: UID to retrieve SID for.
+ :return: A SID or None if no mapping was found.
+ """
+ data = self.db.get(IDMAP_USER_PREFIX + str(uid).encode() + b'\0')
+ if data is None:
+ return data
+ return data.rstrip(b"\0")
+
+ def get_group_sid(self, gid):
+ data = self.db.get(IDMAP_GROUP_PREFIX + str(gid).encode() + b'\0')
+ if data is None:
+ return data
+ return data.rstrip(b"\0")
+
+ def get_user_hwm(self):
+ """Obtain the user high-water mark."""
+ return fetch_uint32(self.db, IDMAP_HWM_USER)
+
+ def get_group_hwm(self):
+ """Obtain the group high-water mark."""
+ return fetch_uint32(self.db, IDMAP_HWM_GROUP)
+
+
+class SecretsDatabase(DbDatabase):
+ """Samba 3 Secrets database reader."""
+
+ def get_auth_password(self):
+ return self.db.get(b"SECRETS/AUTH_PASSWORD")
+
+ def get_auth_domain(self):
+ return self.db.get(b"SECRETS/AUTH_DOMAIN")
+
+ def get_auth_user(self):
+ return self.db.get(b"SECRETS/AUTH_USER")
+
+ def get_domain_guid(self, host):
+ return self.db.get(b"SECRETS/DOMGUID/%s" % host)
+
+ def ldap_dns(self):
+ for k in self.db:
+ if k.startswith("SECRETS/LDAP_BIND_PW/"):
+ yield k[len("SECRETS/LDAP_BIND_PW/"):].rstrip("\0")
+
+ def domains(self):
+ """Iterate over domains in this database.
+
+ :return: Iterator over the names of domains in this database.
+ """
+ for k in self.db:
+ if k.startswith("SECRETS/SID/"):
+ yield k[len("SECRETS/SID/"):].rstrip("\0")
+
+ def get_ldap_bind_pw(self, host):
+ return self.db.get(get_bytes("SECRETS/LDAP_BIND_PW/%s" % host))
+
+ def get_afs_keyfile(self, host):
+ return self.db.get(get_bytes("SECRETS/AFS_KEYFILE/%s" % host))
+
+ def get_machine_sec_channel_type(self, host):
+ return fetch_uint32(self.db, get_bytes("SECRETS/MACHINE_SEC_CHANNEL_TYPE/%s" % host))
+
+ def get_machine_last_change_time(self, host):
+ return fetch_uint32(self.db, "SECRETS/MACHINE_LAST_CHANGE_TIME/%s" % host)
+
+ def get_machine_password(self, host):
+ return self.db.get(get_bytes("SECRETS/MACHINE_PASSWORD/%s" % host))
+
+ def get_machine_acc(self, host):
+ return self.db.get(get_bytes("SECRETS/$MACHINE.ACC/%s" % host))
+
+ def get_domtrust_acc(self, host):
+ return self.db.get(get_bytes("SECRETS/$DOMTRUST.ACC/%s" % host))
+
+ def trusted_domains(self):
+ for k in self.db:
+ if k.startswith("SECRETS/$DOMTRUST.ACC/"):
+ yield k[len("SECRETS/$DOMTRUST.ACC/"):].rstrip("\0")
+
+ def get_random_seed(self):
+ return self.db.get(b"INFO/random_seed")
+
+ def get_sid(self, host):
+ return self.db.get(get_bytes("SECRETS/SID/%s" % host.upper()))
+
+
+SHARE_DATABASE_VERSION_V1 = 1
+SHARE_DATABASE_VERSION_V2 = 2
+
+
+class ShareInfoDatabase(DbDatabase):
+ """Samba 3 Share Info database reader."""
+
+ def _check_version(self):
+ assert fetch_int32(self.db, "INFO/version\0") in (SHARE_DATABASE_VERSION_V1, SHARE_DATABASE_VERSION_V2)
+
+ def get_secdesc(self, name):
+ """Obtain the security descriptor on a particular share.
+
+ :param name: Name of the share
+ """
+ secdesc = self.db.get(get_bytes("SECDESC/%s" % name))
+ # FIXME: Run ndr_pull_security_descriptor
+ return secdesc
+
+
+class Shares(object):
+ """Container for share objects."""
+ def __init__(self, lp, shareinfo):
+ self.lp = lp
+ self.shareinfo = shareinfo
+
+ def __len__(self):
+ """Number of shares."""
+ return len(self.lp) - 1
+
+ def __iter__(self):
+ """Iterate over the share names."""
+ return self.lp.__iter__()
+
+
+def shellsplit(text):
+ """Very simple shell-like line splitting.
+
+ :param text: Text to split.
+ :return: List with parts of the line as strings.
+ """
+ ret = list()
+ inquotes = False
+ current = ""
+ for c in text:
+ if c == "\"":
+ inquotes = not inquotes
+ elif c in ("\t", "\n", " ") and not inquotes:
+ if current != "":
+ ret.append(current)
+ current = ""
+ else:
+ current += c
+ if current != "":
+ ret.append(current)
+ return ret
+
+
+class WinsDatabase(object):
+ """Samba 3 WINS database reader."""
+ def __init__(self, file):
+ self.entries = {}
+ f = open(file, 'r')
+ assert f.readline().rstrip("\n") == "VERSION 1 0"
+ for l in f.readlines():
+ if l[0] == "#": # skip comments
+ continue
+ entries = shellsplit(l.rstrip("\n"))
+ name = entries[0]
+ ttl = int(entries[1])
+ i = 2
+ ips = []
+ while "." in entries[i]:
+ ips.append(entries[i])
+ i += 1
+ nb_flags = int(entries[i][:-1], 16)
+ assert name not in self.entries, "Name %s exists twice" % name
+ self.entries[name] = (ttl, ips, nb_flags)
+ f.close()
+
+ def __getitem__(self, name):
+ return self.entries[name]
+
+ def __len__(self):
+ return len(self.entries)
+
+ def __iter__(self):
+ return iter(self.entries)
+
+ def items(self):
+ """Return the entries in this WINS database."""
+ return self.entries.items()
+
+ def close(self): # for consistency
+ pass
+
+
+class Samba3(object):
+ """Samba 3 configuration and state data reader."""
+
+ def __init__(self, smbconfpath, s3_lp_ctx=None):
+ """Open the configuration and data for a Samba 3 installation.
+
+ :param smbconfpath: Path to the smb.conf file.
+ :param s3_lp_ctx: Samba3 Loadparm context
+ """
+ self.smbconfpath = smbconfpath
+ if s3_lp_ctx:
+ self.lp = s3_lp_ctx
+ else:
+ self.lp = s3param.get_context()
+ self.lp.load(smbconfpath)
+
+ def statedir_path(self, path):
+ if path[0] == "/" or path[0] == ".":
+ return path
+ return os.path.join(self.lp.get("state directory"), path)
+
+ def privatedir_path(self, path):
+ if path[0] == "/" or path[0] == ".":
+ return path
+ return os.path.join(self.lp.get("private dir"), path)
+
+ def get_conf(self):
+ return self.lp
+
+ def get_sam_db(self):
+ return passdb.PDB(self.lp.get('passdb backend'))
+
+ def get_registry(self):
+ return Registry(self.statedir_path("registry"))
+
+ def get_secrets_db(self):
+ return SecretsDatabase(self.privatedir_path("secrets"))
+
+ def get_shareinfo_db(self):
+ return ShareInfoDatabase(self.statedir_path("share_info"))
+
+ def get_idmap_db(self):
+ return IdmapDatabase(self.statedir_path("winbindd_idmap"))
+
+ def get_wins_db(self):
+ return WinsDatabase(self.statedir_path("wins.dat"))
+
+ def get_shares(self):
+ return Shares(self.get_conf(), self.get_shareinfo_db())
diff --git a/python/samba/samba3/libsmb_samba_internal.py b/python/samba/samba3/libsmb_samba_internal.py
new file mode 100644
index 0000000..ef0b30d
--- /dev/null
+++ b/python/samba/samba3/libsmb_samba_internal.py
@@ -0,0 +1,130 @@
+# Copyright (C) Volker Lendecke <vl@samba.org> 2020
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from samba.samba3.libsmb_samba_cwrapper import *
+from samba.dcerpc import security
+
+class Conn(LibsmbCConn):
+ def deltree(self, path):
+ if self.chkpath(path):
+ for entry in self.list(path):
+ self.deltree(path + "\\" + entry['name'])
+ self.rmdir(path)
+ else:
+ self.unlink(path)
+
+ SECINFO_DEFAULT_FLAGS = \
+ security.SECINFO_OWNER | \
+ security.SECINFO_GROUP | \
+ security.SECINFO_DACL | \
+ security.SECINFO_SACL
+
+ def required_access_for_get_secinfo(self, secinfo):
+ access = 0
+
+ #
+ # This is based on MS-FSA
+ # 2.1.5.13 Server Requests a Query of Security Information
+ #
+ # Note that MS-SMB2 3.3.5.20.3 Handling SMB2_0_INFO_SECURITY
+ # doesn't specify any extra checks
+ #
+
+ if secinfo & security.SECINFO_OWNER:
+ access |= security.SEC_STD_READ_CONTROL
+ if secinfo & security.SECINFO_GROUP:
+ access |= security.SEC_STD_READ_CONTROL
+ if secinfo & security.SECINFO_DACL:
+ access |= security.SEC_STD_READ_CONTROL
+ if secinfo & security.SECINFO_SACL:
+ access |= security.SEC_FLAG_SYSTEM_SECURITY
+
+ if secinfo & security.SECINFO_LABEL:
+ access |= security.SEC_STD_READ_CONTROL
+
+ return access
+
+ def required_access_for_set_secinfo(self, secinfo):
+ access = 0
+
+ #
+ # This is based on MS-FSA
+ # 2.1.5.16 Server Requests Setting of Security Information
+ # and additional constraints from
+ # MS-SMB2 3.3.5.21.3 Handling SMB2_0_INFO_SECURITY
+ #
+
+ if secinfo & security.SECINFO_OWNER:
+ access |= security.SEC_STD_WRITE_OWNER
+ if secinfo & security.SECINFO_GROUP:
+ access |= security.SEC_STD_WRITE_OWNER
+ if secinfo & security.SECINFO_DACL:
+ access |= security.SEC_STD_WRITE_DAC
+ if secinfo & security.SECINFO_SACL:
+ access |= security.SEC_FLAG_SYSTEM_SECURITY
+
+ if secinfo & security.SECINFO_LABEL:
+ access |= security.SEC_STD_WRITE_OWNER
+
+ if secinfo & security.SECINFO_ATTRIBUTE:
+ access |= security.SEC_STD_WRITE_DAC
+
+ if secinfo & security.SECINFO_SCOPE:
+ access |= security.SEC_FLAG_SYSTEM_SECURITY
+
+ if secinfo & security.SECINFO_BACKUP:
+ access |= security.SEC_STD_WRITE_OWNER
+ access |= security.SEC_STD_WRITE_DAC
+ access |= security.SEC_FLAG_SYSTEM_SECURITY
+
+ return access
+
+ def get_acl(self,
+ filename,
+ sinfo=None,
+ access_mask=None):
+ """Get security descriptor for file."""
+ if sinfo is None:
+ sinfo = self.SECINFO_DEFAULT_FLAGS
+ if access_mask is None:
+ access_mask = self.required_access_for_get_secinfo(sinfo)
+ fnum = self.create(
+ Name=filename,
+ DesiredAccess=access_mask,
+ ShareAccess=(FILE_SHARE_READ|FILE_SHARE_WRITE))
+ try:
+ sd = self.get_sd(fnum, sinfo)
+ finally:
+ self.close(fnum)
+ return sd
+
+ def set_acl(self,
+ filename,
+ sd,
+ sinfo=None,
+ access_mask=None):
+ """Set security descriptor for file."""
+ if sinfo is None:
+ sinfo = self.SECINFO_DEFAULT_FLAGS
+ if access_mask is None:
+ access_mask = self.required_access_for_set_secinfo(sinfo)
+ fnum = self.create(
+ Name=filename,
+ DesiredAccess=access_mask,
+ ShareAccess=(FILE_SHARE_READ|FILE_SHARE_WRITE))
+ try:
+ self.set_sd(fnum, sd, sinfo)
+ finally:
+ self.close(fnum)
diff --git a/python/samba/samdb.py b/python/samba/samdb.py
new file mode 100644
index 0000000..9bbec43
--- /dev/null
+++ b/python/samba/samdb.py
@@ -0,0 +1,1623 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007-2010
+# Copyright (C) Matthias Dieter Wallnoefer 2009
+#
+# Based on the original in EJS:
+# Copyright (C) Andrew Tridgell <tridge@samba.org> 2005
+# Copyright (C) Giampaolo Lauria <lauria2@yahoo.com> 2011
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Convenience functions for using the SAM."""
+
+import samba
+import ldb
+import time
+import base64
+import os
+import re
+from samba import dsdb, dsdb_dns
+from samba.ndr import ndr_unpack, ndr_pack
+from samba.dcerpc import drsblobs, misc
+from samba.common import normalise_int32
+from samba.common import get_bytes, cmp
+from samba.dcerpc import security
+from samba import is_ad_dc_built
+import binascii
+
+__docformat__ = "restructuredText"
+
+
+def get_default_backend_store():
+ return "tdb"
+
+class SamDBError(Exception):
+ pass
+
+class SamDBNotFoundError(SamDBError):
+ pass
+
+class SamDB(samba.Ldb):
+ """The SAM database."""
+
+ hash_oid_name = {}
+ hash_well_known = {}
+
+ class _CleanUpOnError:
+ def __init__(self, samdb, dn):
+ self.samdb = samdb
+ self.dn = dn
+
+ def __enter__(self):
+ pass
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ if exc_type is not None:
+ # We failed to modify the account. If we connected to the
+ # database over LDAP, we don't have transactions, and so when
+ # we call transaction_cancel(), the account will still exist in
+ # a half-created state. We'll delete the account to ensure that
+ # doesn't happen.
+ self.samdb.delete(self.dn)
+
+ # Don't suppress any exceptions
+ return False
+
+ def __init__(self, url=None, lp=None, modules_dir=None, session_info=None,
+ credentials=None, flags=ldb.FLG_DONT_CREATE_DB,
+ options=None, global_schema=True,
+ auto_connect=True, am_rodc=None):
+ self.lp = lp
+ if not auto_connect:
+ url = None
+ elif url is None and lp is not None:
+ url = lp.samdb_url()
+
+ self.url = url
+
+ super().__init__(url=url, lp=lp, modules_dir=modules_dir,
+ session_info=session_info, credentials=credentials, flags=flags,
+ options=options)
+
+ if global_schema:
+ dsdb._dsdb_set_global_schema(self)
+
+ if am_rodc is not None:
+ dsdb._dsdb_set_am_rodc(self, am_rodc)
+
+ def connect(self, url=None, flags=0, options=None):
+ """connect to the database"""
+ if self.lp is not None and not os.path.exists(url):
+ url = self.lp.private_path(url)
+ self.url = url
+
+ super().connect(url=url, flags=flags, options=options)
+
+ def am_rodc(self):
+ """return True if we are an RODC"""
+ return dsdb._am_rodc(self)
+
+ def am_pdc(self):
+ """return True if we are an PDC emulator"""
+ return dsdb._am_pdc(self)
+
+ def domain_dn(self):
+ """return the domain DN"""
+ return str(self.get_default_basedn())
+
+ def schema_dn(self):
+ """return the schema partition dn"""
+ return str(self.get_schema_basedn())
+
+ def disable_account(self, search_filter):
+ """Disables an account
+
+ :param search_filter: LDAP filter to find the user (eg
+ samccountname=name)
+ """
+
+ flags = samba.dsdb.UF_ACCOUNTDISABLE
+ self.toggle_userAccountFlags(search_filter, flags, on=True)
+
+ def enable_account(self, search_filter):
+ """Enables an account
+
+ :param search_filter: LDAP filter to find the user (eg
+ samccountname=name)
+ """
+
+ flags = samba.dsdb.UF_ACCOUNTDISABLE | samba.dsdb.UF_PASSWD_NOTREQD
+ self.toggle_userAccountFlags(search_filter, flags, on=False)
+
+ def toggle_userAccountFlags(self, search_filter, flags, flags_str=None,
+ on=True, strict=False):
+ """Toggle_userAccountFlags
+
+ :param search_filter: LDAP filter to find the user (eg
+ samccountname=name)
+ :param flags: samba.dsdb.UF_* flags
+ :param on: on=True (default) => set, on=False => unset
+ :param strict: strict=False (default) ignore if no action is needed
+ strict=True raises an Exception if...
+ """
+ res = self.search(base=self.domain_dn(), scope=ldb.SCOPE_SUBTREE,
+ expression=search_filter, attrs=["userAccountControl"])
+ if len(res) == 0:
+ raise Exception("Unable to find account where '%s'" % search_filter)
+ assert(len(res) == 1)
+ account_dn = res[0].dn
+
+ old_uac = int(res[0]["userAccountControl"][0])
+ if on:
+ if strict and (old_uac & flags):
+ error = "Account flag(s) '%s' already set" % flags_str
+ raise Exception(error)
+
+ new_uac = old_uac | flags
+ else:
+ if strict and not (old_uac & flags):
+ error = "Account flag(s) '%s' already unset" % flags_str
+ raise Exception(error)
+
+ new_uac = old_uac & ~flags
+
+ if old_uac == new_uac:
+ return
+
+ mod = """
+dn: %s
+changetype: modify
+delete: userAccountControl
+userAccountControl: %u
+add: userAccountControl
+userAccountControl: %u
+""" % (account_dn, old_uac, new_uac)
+ self.modify_ldif(mod)
+
+ def force_password_change_at_next_login(self, search_filter):
+ """Forces a password change at next login
+
+ :param search_filter: LDAP filter to find the user (eg
+ samccountname=name)
+ """
+ res = self.search(base=self.domain_dn(), scope=ldb.SCOPE_SUBTREE,
+ expression=search_filter, attrs=[])
+ if len(res) == 0:
+ raise Exception('Unable to find user "%s"' % search_filter)
+ assert(len(res) == 1)
+ user_dn = res[0].dn
+
+ mod = """
+dn: %s
+changetype: modify
+replace: pwdLastSet
+pwdLastSet: 0
+""" % (user_dn)
+ self.modify_ldif(mod)
+
+ def unlock_account(self, search_filter):
+ """Unlock a user account by resetting lockoutTime to 0.
+ This does also reset the badPwdCount to 0.
+
+ :param search_filter: LDAP filter to find the user (e.g.
+ sAMAccountName=username)
+ """
+ res = self.search(base=self.domain_dn(),
+ scope=ldb.SCOPE_SUBTREE,
+ expression=search_filter,
+ attrs=[])
+ if len(res) == 0:
+ raise SamDBNotFoundError('Unable to find user "%s"' % search_filter)
+ if len(res) != 1:
+ raise SamDBError('User "%s" is not unique' % search_filter)
+ user_dn = res[0].dn
+
+ mod = """
+dn: %s
+changetype: modify
+replace: lockoutTime
+lockoutTime: 0
+""" % (user_dn)
+ self.modify_ldif(mod)
+
+ def newgroup(self, groupname, groupou=None, grouptype=None,
+ description=None, mailaddress=None, notes=None, sd=None,
+ gidnumber=None, nisdomain=None):
+ """Adds a new group with additional parameters
+
+ :param groupname: Name of the new group
+ :param grouptype: Type of the new group
+ :param description: Description of the new group
+ :param mailaddress: Email address of the new group
+ :param notes: Notes of the new group
+ :param gidnumber: GID Number of the new group
+ :param nisdomain: NIS Domain Name of the new group
+ :param sd: security descriptor of the object
+ """
+
+ if groupou:
+ group_dn = "CN=%s,%s,%s" % (groupname, groupou, self.domain_dn())
+ else:
+ group_dn = "CN=%s,%s" % (groupname, self.get_wellknown_dn(
+ self.get_default_basedn(),
+ dsdb.DS_GUID_USERS_CONTAINER))
+
+ # The new user record. Note the reliance on the SAMLDB module which
+ # fills in the default information
+ ldbmessage = {"dn": group_dn,
+ "sAMAccountName": groupname,
+ "objectClass": "group"}
+
+ if grouptype is not None:
+ ldbmessage["groupType"] = normalise_int32(grouptype)
+
+ if description is not None:
+ ldbmessage["description"] = description
+
+ if mailaddress is not None:
+ ldbmessage["mail"] = mailaddress
+
+ if notes is not None:
+ ldbmessage["info"] = notes
+
+ if gidnumber is not None:
+ ldbmessage["gidNumber"] = normalise_int32(gidnumber)
+
+ if nisdomain is not None:
+ ldbmessage["msSFU30Name"] = groupname
+ ldbmessage["msSFU30NisDomain"] = nisdomain
+
+ if sd is not None:
+ ldbmessage["nTSecurityDescriptor"] = ndr_pack(sd)
+
+ self.add(ldbmessage)
+
+ def deletegroup(self, groupname):
+ """Deletes a group
+
+ :param groupname: Name of the target group
+ """
+
+ groupfilter = "(&(sAMAccountName=%s)(objectCategory=%s,%s))" % (ldb.binary_encode(groupname), "CN=Group,CN=Schema,CN=Configuration", self.domain_dn())
+ self.transaction_start()
+ try:
+ targetgroup = self.search(base=self.domain_dn(), scope=ldb.SCOPE_SUBTREE,
+ expression=groupfilter, attrs=[])
+ if len(targetgroup) == 0:
+ raise Exception('Unable to find group "%s"' % groupname)
+ assert(len(targetgroup) == 1)
+ self.delete(targetgroup[0].dn)
+ except:
+ self.transaction_cancel()
+ raise
+ else:
+ self.transaction_commit()
+
+ def group_member_filter(self, member, member_types):
+ filter = ""
+
+ all_member_types = [ 'user',
+ 'group',
+ 'computer',
+ 'serviceaccount',
+ 'contact',
+ ]
+
+ if 'all' in member_types:
+ member_types = all_member_types
+
+ for member_type in member_types:
+ if member_type not in all_member_types:
+ raise Exception('Invalid group member type "%s". '
+ 'Valid types are %s and all.' %
+ (member_type, ", ".join(all_member_types)))
+
+ if 'user' in member_types:
+ filter += ('(&(sAMAccountName=%s)(samAccountType=%d))' %
+ (ldb.binary_encode(member), dsdb.ATYPE_NORMAL_ACCOUNT))
+ if 'group' in member_types:
+ filter += ('(&(sAMAccountName=%s)'
+ '(objectClass=group)'
+ '(!(groupType:1.2.840.113556.1.4.803:=1)))' %
+ ldb.binary_encode(member))
+ if 'computer' in member_types:
+ samaccountname = member
+ if member[-1] != '$':
+ samaccountname = "%s$" % member
+ filter += ('(&(samAccountType=%d)'
+ '(!(objectCategory=msDS-ManagedServiceAccount))'
+ '(sAMAccountName=%s))' %
+ (dsdb.ATYPE_WORKSTATION_TRUST,
+ ldb.binary_encode(samaccountname)))
+ if 'serviceaccount' in member_types:
+ samaccountname = member
+ if member[-1] != '$':
+ samaccountname = "%s$" % member
+ filter += ('(&(samAccountType=%d)'
+ '(objectCategory=msDS-ManagedServiceAccount)'
+ '(sAMAccountName=%s))' %
+ (dsdb.ATYPE_WORKSTATION_TRUST,
+ ldb.binary_encode(samaccountname)))
+ if 'contact' in member_types:
+ filter += ('(&(objectCategory=Person)(!(objectSid=*))(name=%s))' %
+ ldb.binary_encode(member))
+
+ filter = "(|%s)" % filter
+
+ return filter
+
+ def add_remove_group_members(self, groupname, members,
+ add_members_operation=True,
+ member_types=None,
+ member_base_dn=None):
+ """Adds or removes group members
+
+ :param groupname: Name of the target group
+ :param members: list of group members
+ :param add_members_operation: Defines if its an add or remove
+ operation
+ """
+ if member_types is None:
+ member_types = ['user', 'group', 'computer']
+
+ groupfilter = "(&(sAMAccountName=%s)(objectCategory=%s,%s))" % (
+ ldb.binary_encode(groupname), "CN=Group,CN=Schema,CN=Configuration", self.domain_dn())
+
+ self.transaction_start()
+ try:
+ targetgroup = self.search(base=self.domain_dn(), scope=ldb.SCOPE_SUBTREE,
+ expression=groupfilter, attrs=['member'])
+ if len(targetgroup) == 0:
+ raise Exception('Unable to find group "%s"' % groupname)
+ assert(len(targetgroup) == 1)
+
+ modified = False
+
+ addtargettogroup = """
+dn: %s
+changetype: modify
+""" % (str(targetgroup[0].dn))
+
+ for member in members:
+ targetmember_dn = None
+ if member_base_dn is None:
+ member_base_dn = self.domain_dn()
+
+ try:
+ membersid = security.dom_sid(member)
+ targetmember_dn = "<SID=%s>" % str(membersid)
+ except ValueError:
+ pass
+
+ if targetmember_dn is None:
+ try:
+ member_dn = ldb.Dn(self, member)
+ if member_dn.get_linearized() == member_dn.extended_str(1):
+ full_member_dn = self.normalize_dn_in_domain(member_dn)
+ else:
+ full_member_dn = member_dn
+ targetmember_dn = full_member_dn.extended_str(1)
+ except ValueError as e:
+ pass
+
+ if targetmember_dn is None:
+ filter = self.group_member_filter(member, member_types)
+ targetmember = self.search(base=member_base_dn,
+ scope=ldb.SCOPE_SUBTREE,
+ expression=filter,
+ attrs=[])
+
+ if len(targetmember) > 1:
+ targetmemberlist_str = ""
+ for msg in targetmember:
+ targetmemberlist_str += "%s\n" % msg.get("dn")
+ raise Exception('Found multiple results for "%s":\n%s' %
+ (member, targetmemberlist_str))
+ if len(targetmember) != 1:
+ raise Exception('Unable to find "%s". Operation cancelled.' % member)
+ targetmember_dn = targetmember[0].dn.extended_str(1)
+
+ if add_members_operation is True and (targetgroup[0].get('member') is None or get_bytes(targetmember_dn) not in [str(x) for x in targetgroup[0]['member']]):
+ modified = True
+ addtargettogroup += """add: member
+member: %s
+""" % (str(targetmember_dn))
+
+ elif add_members_operation is False and (targetgroup[0].get('member') is not None and get_bytes(targetmember_dn) in targetgroup[0]['member']):
+ modified = True
+ addtargettogroup += """delete: member
+member: %s
+""" % (str(targetmember_dn))
+
+ if modified is True:
+ self.modify_ldif(addtargettogroup)
+
+ except:
+ self.transaction_cancel()
+ raise
+ else:
+ self.transaction_commit()
+
+ def prepare_attr_replace(self, msg, old, attr_name, value):
+ """Changes the MessageElement with the given attr_name of the
+ given Message. If the value is "" set an empty value and the flag
+ FLAG_MOD_DELETE, otherwise set the new value and FLAG_MOD_REPLACE.
+ If the value is None or the Message contains the attr_name with this
+ value, nothing will changed."""
+ # skip unchanged attribute
+ if value is None:
+ return
+ if attr_name in old and str(value) == str(old[attr_name]):
+ return
+
+ # remove attribute
+ if len(value) == 0:
+ if attr_name in old:
+ el = ldb.MessageElement([], ldb.FLAG_MOD_DELETE, attr_name)
+ msg.add(el)
+ return
+
+ # change attribute
+ el = ldb.MessageElement(value, ldb.FLAG_MOD_REPLACE, attr_name)
+ msg.add(el)
+
+ def fullname_from_names(self, given_name=None, initials=None, surname=None,
+ old_attrs=None, fallback_default=""):
+ """Prepares new combined fullname, using the name parts.
+ Used for things like displayName or cn.
+ Use the original name values, if no new one is specified."""
+ if old_attrs is None:
+ old_attrs = {}
+
+ attrs = {"givenName": given_name,
+ "initials": initials,
+ "sn": surname}
+
+ # if the attribute is not specified, try to use the old one
+ for attr_name, attr_value in attrs.items():
+ if attr_value is None and attr_name in old_attrs:
+ attrs[attr_name] = str(old_attrs[attr_name])
+
+ # add '.' to initials if initials are not None and not "" and if the initials
+ # don't have already a '.' at the end
+ if attrs["initials"] and not attrs["initials"].endswith('.'):
+ attrs["initials"] += '.'
+
+ # remove empty values (None and '')
+ attrs_values = list(filter(None, attrs.values()))
+
+ # fullname is the combination of not-empty values as string, separated by ' '
+ fullname = ' '.join(attrs_values)
+
+ if fullname == '':
+ return fallback_default
+
+ return fullname
+
+ def newuser(self, username, password,
+ force_password_change_at_next_login_req=False,
+ useusernameascn=False, userou=None, surname=None, givenname=None,
+ initials=None, profilepath=None, scriptpath=None, homedrive=None,
+ homedirectory=None, jobtitle=None, department=None, company=None,
+ description=None, mailaddress=None, internetaddress=None,
+ telephonenumber=None, physicaldeliveryoffice=None, sd=None,
+ setpassword=True, uidnumber=None, gidnumber=None, gecos=None,
+ loginshell=None, uid=None, nisdomain=None, unixhome=None,
+ smartcard_required=False):
+ """Adds a new user with additional parameters
+
+ :param username: Name of the new user
+ :param password: Password for the new user
+ :param force_password_change_at_next_login_req: Force password change
+ :param useusernameascn: Use username as cn rather that firstname +
+ initials + lastname
+ :param userou: Object container (without domainDN postfix) for new user
+ :param surname: Surname of the new user
+ :param givenname: First name of the new user
+ :param initials: Initials of the new user
+ :param profilepath: Profile path of the new user
+ :param scriptpath: Logon script path of the new user
+ :param homedrive: Home drive of the new user
+ :param homedirectory: Home directory of the new user
+ :param jobtitle: Job title of the new user
+ :param department: Department of the new user
+ :param company: Company of the new user
+ :param description: of the new user
+ :param mailaddress: Email address of the new user
+ :param internetaddress: Home page of the new user
+ :param telephonenumber: Phone number of the new user
+ :param physicaldeliveryoffice: Office location of the new user
+ :param sd: security descriptor of the object
+ :param setpassword: optionally disable password reset
+ :param uidnumber: RFC2307 Unix numeric UID of the new user
+ :param gidnumber: RFC2307 Unix primary GID of the new user
+ :param gecos: RFC2307 Unix GECOS field of the new user
+ :param loginshell: RFC2307 Unix login shell of the new user
+ :param uid: RFC2307 Unix username of the new user
+ :param nisdomain: RFC2307 Unix NIS domain of the new user
+ :param unixhome: RFC2307 Unix home directory of the new user
+ :param smartcard_required: set the UF_SMARTCARD_REQUIRED bit of the new user
+ """
+
+ displayname = self.fullname_from_names(given_name=givenname,
+ initials=initials,
+ surname=surname)
+ cn = username
+ if useusernameascn is None and displayname != "":
+ cn = displayname
+
+ if userou:
+ user_dn = "CN=%s,%s,%s" % (cn, userou, self.domain_dn())
+ else:
+ user_dn = "CN=%s,%s" % (cn, self.get_wellknown_dn(
+ self.get_default_basedn(),
+ dsdb.DS_GUID_USERS_CONTAINER))
+
+ dnsdomain = ldb.Dn(self, self.domain_dn()).canonical_str().replace("/", "")
+ user_principal_name = "%s@%s" % (username, dnsdomain)
+ # The new user record. Note the reliance on the SAMLDB module which
+ # fills in the default information
+ ldbmessage = {"dn": user_dn,
+ "sAMAccountName": username,
+ "userPrincipalName": user_principal_name,
+ "objectClass": "user"}
+
+ if smartcard_required:
+ ldbmessage["userAccountControl"] = str(dsdb.UF_NORMAL_ACCOUNT |
+ dsdb.UF_SMARTCARD_REQUIRED)
+ setpassword = False
+
+ if surname is not None:
+ ldbmessage["sn"] = surname
+
+ if givenname is not None:
+ ldbmessage["givenName"] = givenname
+
+ if displayname != "":
+ ldbmessage["displayName"] = displayname
+ ldbmessage["name"] = displayname
+
+ if initials is not None:
+ ldbmessage["initials"] = '%s.' % initials
+
+ if profilepath is not None:
+ ldbmessage["profilePath"] = profilepath
+
+ if scriptpath is not None:
+ ldbmessage["scriptPath"] = scriptpath
+
+ if homedrive is not None:
+ ldbmessage["homeDrive"] = homedrive
+
+ if homedirectory is not None:
+ ldbmessage["homeDirectory"] = homedirectory
+
+ if jobtitle is not None:
+ ldbmessage["title"] = jobtitle
+
+ if department is not None:
+ ldbmessage["department"] = department
+
+ if company is not None:
+ ldbmessage["company"] = company
+
+ if description is not None:
+ ldbmessage["description"] = description
+
+ if mailaddress is not None:
+ ldbmessage["mail"] = mailaddress
+
+ if internetaddress is not None:
+ ldbmessage["wWWHomePage"] = internetaddress
+
+ if telephonenumber is not None:
+ ldbmessage["telephoneNumber"] = telephonenumber
+
+ if physicaldeliveryoffice is not None:
+ ldbmessage["physicalDeliveryOfficeName"] = physicaldeliveryoffice
+
+ if sd is not None:
+ ldbmessage["nTSecurityDescriptor"] = ndr_pack(sd)
+
+ ldbmessage2 = None
+ if any(map(lambda b: b is not None, (uid, uidnumber, gidnumber, gecos,
+ loginshell, nisdomain, unixhome))):
+ ldbmessage2 = ldb.Message()
+ ldbmessage2.dn = ldb.Dn(self, user_dn)
+ if uid is not None:
+ ldbmessage2["uid"] = ldb.MessageElement(str(uid), ldb.FLAG_MOD_REPLACE, 'uid')
+ if uidnumber is not None:
+ ldbmessage2["uidNumber"] = ldb.MessageElement(str(uidnumber), ldb.FLAG_MOD_REPLACE, 'uidNumber')
+ if gidnumber is not None:
+ ldbmessage2["gidNumber"] = ldb.MessageElement(str(gidnumber), ldb.FLAG_MOD_REPLACE, 'gidNumber')
+ if gecos is not None:
+ ldbmessage2["gecos"] = ldb.MessageElement(str(gecos), ldb.FLAG_MOD_REPLACE, 'gecos')
+ if loginshell is not None:
+ ldbmessage2["loginShell"] = ldb.MessageElement(str(loginshell), ldb.FLAG_MOD_REPLACE, 'loginShell')
+ if unixhome is not None:
+ ldbmessage2["unixHomeDirectory"] = ldb.MessageElement(
+ str(unixhome), ldb.FLAG_MOD_REPLACE, 'unixHomeDirectory')
+ if nisdomain is not None:
+ ldbmessage2["msSFU30NisDomain"] = ldb.MessageElement(
+ str(nisdomain), ldb.FLAG_MOD_REPLACE, 'msSFU30NisDomain')
+ ldbmessage2["msSFU30Name"] = ldb.MessageElement(
+ str(username), ldb.FLAG_MOD_REPLACE, 'msSFU30Name')
+ ldbmessage2["unixUserPassword"] = ldb.MessageElement(
+ 'ABCD!efgh12345$67890', ldb.FLAG_MOD_REPLACE,
+ 'unixUserPassword')
+
+ self.transaction_start()
+ try:
+ self.add(ldbmessage)
+
+ with self._CleanUpOnError(self, user_dn):
+ if ldbmessage2:
+ self.modify(ldbmessage2)
+
+ # Sets the password for it
+ if setpassword:
+ self.setpassword(("(distinguishedName=%s)" %
+ ldb.binary_encode(user_dn)),
+ password,
+ force_password_change_at_next_login_req)
+ except:
+ self.transaction_cancel()
+ raise
+ else:
+ self.transaction_commit()
+
+ def newcontact(self,
+ fullcontactname=None,
+ ou=None,
+ surname=None,
+ givenname=None,
+ initials=None,
+ displayname=None,
+ jobtitle=None,
+ department=None,
+ company=None,
+ description=None,
+ mailaddress=None,
+ internetaddress=None,
+ telephonenumber=None,
+ mobilenumber=None,
+ physicaldeliveryoffice=None):
+ """Adds a new contact with additional parameters
+
+ :param fullcontactname: Optional full name of the new contact
+ :param ou: Object container for new contact
+ :param surname: Surname of the new contact
+ :param givenname: First name of the new contact
+ :param initials: Initials of the new contact
+ :param displayname: displayName of the new contact
+ :param jobtitle: Job title of the new contact
+ :param department: Department of the new contact
+ :param company: Company of the new contact
+ :param description: Description of the new contact
+ :param mailaddress: Email address of the new contact
+ :param internetaddress: Home page of the new contact
+ :param telephonenumber: Phone number of the new contact
+ :param mobilenumber: Primary mobile number of the new contact
+ :param physicaldeliveryoffice: Office location of the new contact
+ """
+
+ # Prepare the contact name like the RSAT, using the name parts.
+ cn = self.fullname_from_names(given_name=givenname,
+ initials=initials,
+ surname=surname)
+
+ # Use the specified fullcontactname instead of the previously prepared
+ # contact name, if it is specified.
+ # This is similar to the "Full name" value of the RSAT.
+ if fullcontactname is not None:
+ cn = fullcontactname
+
+ if fullcontactname is None and cn == "":
+ raise Exception('No name for contact specified')
+
+ contactcontainer_dn = self.domain_dn()
+ if ou:
+ contactcontainer_dn = self.normalize_dn_in_domain(ou)
+
+ contact_dn = "CN=%s,%s" % (cn, contactcontainer_dn)
+
+ ldbmessage = {"dn": contact_dn,
+ "objectClass": "contact",
+ }
+
+ if surname is not None:
+ ldbmessage["sn"] = surname
+
+ if givenname is not None:
+ ldbmessage["givenName"] = givenname
+
+ if displayname is not None:
+ ldbmessage["displayName"] = displayname
+
+ if initials is not None:
+ ldbmessage["initials"] = '%s.' % initials
+
+ if jobtitle is not None:
+ ldbmessage["title"] = jobtitle
+
+ if department is not None:
+ ldbmessage["department"] = department
+
+ if company is not None:
+ ldbmessage["company"] = company
+
+ if description is not None:
+ ldbmessage["description"] = description
+
+ if mailaddress is not None:
+ ldbmessage["mail"] = mailaddress
+
+ if internetaddress is not None:
+ ldbmessage["wWWHomePage"] = internetaddress
+
+ if telephonenumber is not None:
+ ldbmessage["telephoneNumber"] = telephonenumber
+
+ if mobilenumber is not None:
+ ldbmessage["mobile"] = mobilenumber
+
+ if physicaldeliveryoffice is not None:
+ ldbmessage["physicalDeliveryOfficeName"] = physicaldeliveryoffice
+
+ self.add(ldbmessage)
+
+ return cn
+
+ def newcomputer(self, computername, computerou=None, description=None,
+ prepare_oldjoin=False, ip_address_list=None,
+ service_principal_name_list=None):
+ """Adds a new user with additional parameters
+
+ :param computername: Name of the new computer
+ :param computerou: Object container for new computer
+ :param description: Description of the new computer
+ :param prepare_oldjoin: Preset computer password for oldjoin mechanism
+ :param ip_address_list: ip address list for DNS A or AAAA record
+ :param service_principal_name_list: string list of servicePincipalName
+ """
+
+ cn = re.sub(r"\$$", "", computername)
+ if cn.count('$'):
+ raise Exception('Illegal computername "%s"' % computername)
+ samaccountname = "%s$" % cn
+
+ computercontainer_dn = self.get_wellknown_dn(self.get_default_basedn(),
+ dsdb.DS_GUID_COMPUTERS_CONTAINER)
+ if computerou:
+ computercontainer_dn = self.normalize_dn_in_domain(computerou)
+
+ computer_dn = "CN=%s,%s" % (cn, computercontainer_dn)
+
+ ldbmessage = {"dn": computer_dn,
+ "sAMAccountName": samaccountname,
+ "objectClass": "computer",
+ }
+
+ if description is not None:
+ ldbmessage["description"] = description
+
+ if service_principal_name_list:
+ ldbmessage["servicePrincipalName"] = service_principal_name_list
+
+ accountcontrol = str(dsdb.UF_WORKSTATION_TRUST_ACCOUNT |
+ dsdb.UF_ACCOUNTDISABLE)
+ if prepare_oldjoin:
+ accountcontrol = str(dsdb.UF_WORKSTATION_TRUST_ACCOUNT)
+ ldbmessage["userAccountControl"] = accountcontrol
+
+ if ip_address_list:
+ ldbmessage['dNSHostName'] = '{}.{}'.format(
+ cn, self.domain_dns_name())
+
+ self.transaction_start()
+ try:
+ self.add(ldbmessage)
+
+ if prepare_oldjoin:
+ password = cn.lower()
+ with self._CleanUpOnError(self, computer_dn):
+ self.setpassword(("(distinguishedName=%s)" %
+ ldb.binary_encode(computer_dn)),
+ password, False)
+ except:
+ self.transaction_cancel()
+ raise
+ else:
+ self.transaction_commit()
+
+ def deleteuser(self, username):
+ """Deletes a user
+
+ :param username: Name of the target user
+ """
+
+ filter = "(&(sAMAccountName=%s)(objectCategory=%s,%s))" % (ldb.binary_encode(username), "CN=Person,CN=Schema,CN=Configuration", self.domain_dn())
+ self.transaction_start()
+ try:
+ target = self.search(base=self.domain_dn(), scope=ldb.SCOPE_SUBTREE,
+ expression=filter, attrs=[])
+ if len(target) == 0:
+ raise Exception('Unable to find user "%s"' % username)
+ assert(len(target) == 1)
+ self.delete(target[0].dn)
+ except:
+ self.transaction_cancel()
+ raise
+ else:
+ self.transaction_commit()
+
+ def setpassword(self, search_filter, password,
+ force_change_at_next_login=False, username=None):
+ """Sets the password for a user
+
+ :param search_filter: LDAP filter to find the user (eg
+ samccountname=name)
+ :param password: Password for the user
+ :param force_change_at_next_login: Force password change
+ """
+ self.transaction_start()
+ try:
+ res = self.search(base=self.domain_dn(), scope=ldb.SCOPE_SUBTREE,
+ expression=search_filter, attrs=[])
+ if len(res) == 0:
+ raise Exception('Unable to find user "%s"' % (username or search_filter))
+ if len(res) > 1:
+ raise Exception('Matched %u multiple users with filter "%s"' % (len(res), search_filter))
+ user_dn = res[0].dn
+ if not isinstance(password, str):
+ pw = password.decode('utf-8')
+ else:
+ pw = password
+ pw = ('"' + pw + '"').encode('utf-16-le')
+ setpw = """
+dn: %s
+changetype: modify
+replace: unicodePwd
+unicodePwd:: %s
+""" % (user_dn, base64.b64encode(pw).decode('utf-8'))
+
+ self.modify_ldif(setpw)
+
+ if force_change_at_next_login:
+ self.force_password_change_at_next_login(
+ "(distinguishedName=" + str(user_dn) + ")")
+
+ # modify the userAccountControl to remove the disabled bit
+ self.enable_account(search_filter)
+ except:
+ self.transaction_cancel()
+ raise
+ else:
+ self.transaction_commit()
+
+ def setexpiry(self, search_filter, expiry_seconds, no_expiry_req=False):
+ """Sets the account expiry for a user
+
+ :param search_filter: LDAP filter to find the user (eg
+ samaccountname=name)
+ :param expiry_seconds: expiry time from now in seconds
+ :param no_expiry_req: if set, then don't expire password
+ """
+ self.transaction_start()
+ try:
+ res = self.search(base=self.domain_dn(), scope=ldb.SCOPE_SUBTREE,
+ expression=search_filter,
+ attrs=["userAccountControl", "accountExpires"])
+ if len(res) == 0:
+ raise Exception('Unable to find user "%s"' % search_filter)
+ assert(len(res) == 1)
+ user_dn = res[0].dn
+
+ userAccountControl = int(res[0]["userAccountControl"][0])
+ if no_expiry_req:
+ userAccountControl = userAccountControl | 0x10000
+ accountExpires = 0
+ else:
+ userAccountControl = userAccountControl & ~0x10000
+ accountExpires = samba.unix2nttime(expiry_seconds + int(time.time()))
+
+ setexp = """
+dn: %s
+changetype: modify
+replace: userAccountControl
+userAccountControl: %u
+replace: accountExpires
+accountExpires: %u
+""" % (user_dn, userAccountControl, accountExpires)
+
+ self.modify_ldif(setexp)
+ except:
+ self.transaction_cancel()
+ raise
+ else:
+ self.transaction_commit()
+
+ def set_domain_sid(self, sid):
+ """Change the domain SID used by this LDB.
+
+ :param sid: The new domain sid to use.
+ """
+ dsdb._samdb_set_domain_sid(self, sid)
+
+ def get_domain_sid(self):
+ """Read the domain SID used by this LDB. """
+ return dsdb._samdb_get_domain_sid(self)
+
+ domain_sid = property(get_domain_sid, set_domain_sid,
+ doc="SID for the domain")
+
+ def set_invocation_id(self, invocation_id):
+ """Set the invocation id for this SamDB handle.
+
+ :param invocation_id: GUID of the invocation id.
+ """
+ dsdb._dsdb_set_ntds_invocation_id(self, invocation_id)
+
+ def get_invocation_id(self):
+ """Get the invocation_id id"""
+ return dsdb._samdb_ntds_invocation_id(self)
+
+ invocation_id = property(get_invocation_id, set_invocation_id,
+ doc="Invocation ID GUID")
+
+ def get_oid_from_attid(self, attid):
+ return dsdb._dsdb_get_oid_from_attid(self, attid)
+
+ def get_attid_from_lDAPDisplayName(self, ldap_display_name,
+ is_schema_nc=False):
+ """return the attribute ID for a LDAP attribute as an integer as found in DRSUAPI"""
+ return dsdb._dsdb_get_attid_from_lDAPDisplayName(self,
+ ldap_display_name, is_schema_nc)
+
+ def get_syntax_oid_from_lDAPDisplayName(self, ldap_display_name):
+ """return the syntax OID for a LDAP attribute as a string"""
+ return dsdb._dsdb_get_syntax_oid_from_lDAPDisplayName(self, ldap_display_name)
+
+ def get_systemFlags_from_lDAPDisplayName(self, ldap_display_name):
+ """return the systemFlags for a LDAP attribute as a integer"""
+ return dsdb._dsdb_get_systemFlags_from_lDAPDisplayName(self, ldap_display_name)
+
+ def get_linkId_from_lDAPDisplayName(self, ldap_display_name):
+ """return the linkID for a LDAP attribute as a integer"""
+ return dsdb._dsdb_get_linkId_from_lDAPDisplayName(self, ldap_display_name)
+
+ def get_lDAPDisplayName_by_attid(self, attid):
+ """return the lDAPDisplayName from an integer DRS attribute ID"""
+ return dsdb._dsdb_get_lDAPDisplayName_by_attid(self, attid)
+
+ def get_backlink_from_lDAPDisplayName(self, ldap_display_name):
+ """return the attribute name of the corresponding backlink from the name
+ of a forward link attribute. If there is no backlink return None"""
+ return dsdb._dsdb_get_backlink_from_lDAPDisplayName(self, ldap_display_name)
+
+ def set_ntds_settings_dn(self, ntds_settings_dn):
+ """Set the NTDS Settings DN, as would be returned on the dsServiceName
+ rootDSE attribute.
+
+ This allows the DN to be set before the database fully exists
+
+ :param ntds_settings_dn: The new DN to use
+ """
+ dsdb._samdb_set_ntds_settings_dn(self, ntds_settings_dn)
+
+ def get_ntds_GUID(self):
+ """Get the NTDS objectGUID"""
+ return dsdb._samdb_ntds_objectGUID(self)
+
+ def get_timestr(self):
+ """Get the current time as generalized time string"""
+ res = self.search(base="",
+ scope=ldb.SCOPE_BASE,
+ attrs=["currentTime"])
+ return str(res[0]["currentTime"][0])
+
+ def get_time(self):
+ """Get the current time as UNIX time"""
+ return ldb.string_to_time(self.get_timestr())
+
+ def get_nttime(self):
+ """Get the current time as NT time"""
+ return samba.unix2nttime(self.get_time())
+
+ def server_site_name(self):
+ """Get the server site name"""
+ return dsdb._samdb_server_site_name(self)
+
+ def host_dns_name(self):
+ """return the DNS name of this host"""
+ res = self.search(base='', scope=ldb.SCOPE_BASE, attrs=['dNSHostName'])
+ return str(res[0]['dNSHostName'][0])
+
+ def domain_dns_name(self):
+ """return the DNS name of the domain root"""
+ domain_dn = self.get_default_basedn()
+ return domain_dn.canonical_str().split('/')[0]
+
+ def domain_netbios_name(self):
+ """return the NetBIOS name of the domain root"""
+ domain_dn = self.get_default_basedn()
+ dns_name = self.domain_dns_name()
+ filter = "(&(objectClass=crossRef)(nETBIOSName=*)(ncName=%s)(dnsroot=%s))" % (domain_dn, dns_name)
+ partitions_dn = self.get_partitions_dn()
+ res = self.search(partitions_dn,
+ scope=ldb.SCOPE_ONELEVEL,
+ expression=filter)
+ try:
+ netbios_domain = res[0]["nETBIOSName"][0].decode()
+ except IndexError:
+ return None
+ return netbios_domain
+
+ def forest_dns_name(self):
+ """return the DNS name of the forest root"""
+ forest_dn = self.get_root_basedn()
+ return forest_dn.canonical_str().split('/')[0]
+
+ def load_partition_usn(self, base_dn):
+ return dsdb._dsdb_load_partition_usn(self, base_dn)
+
+ def set_schema(self, schema, write_indices_and_attributes=True):
+ self.set_schema_from_ldb(schema.ldb, write_indices_and_attributes=write_indices_and_attributes)
+
+ def set_schema_from_ldb(self, ldb_conn, write_indices_and_attributes=True):
+ dsdb._dsdb_set_schema_from_ldb(self, ldb_conn, write_indices_and_attributes)
+
+ def set_schema_update_now(self):
+ ldif = """
+dn:
+changetype: modify
+add: schemaUpdateNow
+schemaUpdateNow: 1
+"""
+ self.modify_ldif(ldif)
+
+ def dsdb_DsReplicaAttribute(self, ldb, ldap_display_name, ldif_elements):
+ """convert a list of attribute values to a DRSUAPI DsReplicaAttribute"""
+ return dsdb._dsdb_DsReplicaAttribute(ldb, ldap_display_name, ldif_elements)
+
+ def dsdb_normalise_attributes(self, ldb, ldap_display_name, ldif_elements):
+ """normalise a list of attribute values"""
+ return dsdb._dsdb_normalise_attributes(ldb, ldap_display_name, ldif_elements)
+
+ def get_attribute_from_attid(self, attid):
+ """ Get from an attid the associated attribute
+
+ :param attid: The attribute id for searched attribute
+ :return: The name of the attribute associated with this id
+ """
+ if len(self.hash_oid_name.keys()) == 0:
+ self._populate_oid_attid()
+ if self.get_oid_from_attid(attid) in self.hash_oid_name:
+ return self.hash_oid_name[self.get_oid_from_attid(attid)]
+ else:
+ return None
+
+ def _populate_oid_attid(self):
+ """Populate the hash hash_oid_name.
+
+ This hash contains the oid of the attribute as a key and
+ its display name as a value
+ """
+ self.hash_oid_name = {}
+ res = self.search(expression="objectClass=attributeSchema",
+ controls=["search_options:1:2"],
+ attrs=["attributeID",
+ "lDAPDisplayName"])
+ if len(res) > 0:
+ for e in res:
+ strDisplay = str(e.get("lDAPDisplayName"))
+ self.hash_oid_name[str(e.get("attributeID"))] = strDisplay
+
+ def get_attribute_replmetadata_version(self, dn, att):
+ """Get the version field trom the replPropertyMetaData for
+ the given field
+
+ :param dn: The on which we want to get the version
+ :param att: The name of the attribute
+ :return: The value of the version field in the replPropertyMetaData
+ for the given attribute. None if the attribute is not replicated
+ """
+
+ res = self.search(expression="distinguishedName=%s" % dn,
+ scope=ldb.SCOPE_SUBTREE,
+ controls=["search_options:1:2"],
+ attrs=["replPropertyMetaData"])
+ if len(res) == 0:
+ return None
+
+ repl = ndr_unpack(drsblobs.replPropertyMetaDataBlob,
+ res[0]["replPropertyMetaData"][0])
+ ctr = repl.ctr
+ if len(self.hash_oid_name.keys()) == 0:
+ self._populate_oid_attid()
+ for o in ctr.array:
+ # Search for Description
+ att_oid = self.get_oid_from_attid(o.attid)
+ if att_oid in self.hash_oid_name and\
+ att.lower() == self.hash_oid_name[att_oid].lower():
+ return o.version
+ return None
+
+ def set_attribute_replmetadata_version(self, dn, att, value,
+ addifnotexist=False):
+ res = self.search(expression="distinguishedName=%s" % dn,
+ scope=ldb.SCOPE_SUBTREE,
+ controls=["search_options:1:2"],
+ attrs=["replPropertyMetaData"])
+ if len(res) == 0:
+ return None
+
+ repl = ndr_unpack(drsblobs.replPropertyMetaDataBlob,
+ res[0]["replPropertyMetaData"][0])
+ ctr = repl.ctr
+ now = samba.unix2nttime(int(time.time()))
+ found = False
+ if len(self.hash_oid_name.keys()) == 0:
+ self._populate_oid_attid()
+ for o in ctr.array:
+ # Search for Description
+ att_oid = self.get_oid_from_attid(o.attid)
+ if att_oid in self.hash_oid_name and\
+ att.lower() == self.hash_oid_name[att_oid].lower():
+ found = True
+ seq = self.sequence_number(ldb.SEQ_NEXT)
+ o.version = value
+ o.originating_change_time = now
+ o.originating_invocation_id = misc.GUID(self.get_invocation_id())
+ o.originating_usn = seq
+ o.local_usn = seq
+
+ if not found and addifnotexist and len(ctr.array) > 0:
+ o2 = drsblobs.replPropertyMetaData1()
+ o2.attid = 589914
+ att_oid = self.get_oid_from_attid(o2.attid)
+ seq = self.sequence_number(ldb.SEQ_NEXT)
+ o2.version = value
+ o2.originating_change_time = now
+ o2.originating_invocation_id = misc.GUID(self.get_invocation_id())
+ o2.originating_usn = seq
+ o2.local_usn = seq
+ found = True
+ tab = ctr.array
+ tab.append(o2)
+ ctr.count = ctr.count + 1
+ ctr.array = tab
+
+ if found:
+ replBlob = ndr_pack(repl)
+ msg = ldb.Message()
+ msg.dn = res[0].dn
+ msg["replPropertyMetaData"] = \
+ ldb.MessageElement(replBlob,
+ ldb.FLAG_MOD_REPLACE,
+ "replPropertyMetaData")
+ self.modify(msg, ["local_oid:1.3.6.1.4.1.7165.4.3.14:0"])
+
+ def write_prefixes_from_schema(self):
+ dsdb._dsdb_write_prefixes_from_schema_to_ldb(self)
+
+ def get_partitions_dn(self):
+ return dsdb._dsdb_get_partitions_dn(self)
+
+ def get_nc_root(self, dn):
+ return dsdb._dsdb_get_nc_root(self, dn)
+
+ def get_wellknown_dn(self, nc_root, wkguid):
+ h_nc = self.hash_well_known.get(str(nc_root))
+ dn = None
+ if h_nc is not None:
+ dn = h_nc.get(wkguid)
+ if dn is None:
+ dn = dsdb._dsdb_get_wellknown_dn(self, nc_root, wkguid)
+ if dn is None:
+ return dn
+ if h_nc is None:
+ self.hash_well_known[str(nc_root)] = {}
+ h_nc = self.hash_well_known[str(nc_root)]
+ h_nc[wkguid] = dn
+ return dn
+
+ def set_minPwdAge(self, value):
+ if not isinstance(value, bytes):
+ value = str(value).encode('utf8')
+ m = ldb.Message()
+ m.dn = ldb.Dn(self, self.domain_dn())
+ m["minPwdAge"] = ldb.MessageElement(value, ldb.FLAG_MOD_REPLACE, "minPwdAge")
+ self.modify(m)
+
+ def get_minPwdAge(self):
+ res = self.search(self.domain_dn(), scope=ldb.SCOPE_BASE, attrs=["minPwdAge"])
+ if len(res) == 0:
+ return None
+ elif "minPwdAge" not in res[0]:
+ return None
+ else:
+ return int(res[0]["minPwdAge"][0])
+
+ def set_maxPwdAge(self, value):
+ if not isinstance(value, bytes):
+ value = str(value).encode('utf8')
+ m = ldb.Message()
+ m.dn = ldb.Dn(self, self.domain_dn())
+ m["maxPwdAge"] = ldb.MessageElement(value, ldb.FLAG_MOD_REPLACE, "maxPwdAge")
+ self.modify(m)
+
+ def get_maxPwdAge(self):
+ res = self.search(self.domain_dn(), scope=ldb.SCOPE_BASE, attrs=["maxPwdAge"])
+ if len(res) == 0:
+ return None
+ elif "maxPwdAge" not in res[0]:
+ return None
+ else:
+ return int(res[0]["maxPwdAge"][0])
+
+ def set_minPwdLength(self, value):
+ if not isinstance(value, bytes):
+ value = str(value).encode('utf8')
+ m = ldb.Message()
+ m.dn = ldb.Dn(self, self.domain_dn())
+ m["minPwdLength"] = ldb.MessageElement(value, ldb.FLAG_MOD_REPLACE, "minPwdLength")
+ self.modify(m)
+
+ def get_minPwdLength(self):
+ res = self.search(self.domain_dn(), scope=ldb.SCOPE_BASE, attrs=["minPwdLength"])
+ if len(res) == 0:
+ return None
+ elif "minPwdLength" not in res[0]:
+ return None
+ else:
+ return int(res[0]["minPwdLength"][0])
+
+ def set_pwdProperties(self, value):
+ if not isinstance(value, bytes):
+ value = str(value).encode('utf8')
+ m = ldb.Message()
+ m.dn = ldb.Dn(self, self.domain_dn())
+ m["pwdProperties"] = ldb.MessageElement(value, ldb.FLAG_MOD_REPLACE, "pwdProperties")
+ self.modify(m)
+
+ def get_pwdProperties(self):
+ res = self.search(self.domain_dn(), scope=ldb.SCOPE_BASE, attrs=["pwdProperties"])
+ if len(res) == 0:
+ return None
+ elif "pwdProperties" not in res[0]:
+ return None
+ else:
+ return int(res[0]["pwdProperties"][0])
+
+ def set_dsheuristics(self, dsheuristics):
+ m = ldb.Message()
+ m.dn = ldb.Dn(self, "CN=Directory Service,CN=Windows NT,CN=Services,%s"
+ % self.get_config_basedn().get_linearized())
+ if dsheuristics is not None:
+ m["dSHeuristics"] = \
+ ldb.MessageElement(dsheuristics,
+ ldb.FLAG_MOD_REPLACE,
+ "dSHeuristics")
+ else:
+ m["dSHeuristics"] = \
+ ldb.MessageElement([], ldb.FLAG_MOD_DELETE,
+ "dSHeuristics")
+ self.modify(m)
+
+ def get_dsheuristics(self):
+ res = self.search("CN=Directory Service,CN=Windows NT,CN=Services,%s"
+ % self.get_config_basedn().get_linearized(),
+ scope=ldb.SCOPE_BASE, attrs=["dSHeuristics"])
+ if len(res) == 0:
+ dsheuristics = None
+ elif "dSHeuristics" in res[0]:
+ dsheuristics = res[0]["dSHeuristics"][0]
+ else:
+ dsheuristics = None
+
+ return dsheuristics
+
+ def create_ou(self, ou_dn, description=None, name=None, sd=None):
+ """Creates an organizationalUnit object
+ :param ou_dn: dn of the new object
+ :param description: description attribute
+ :param name: name attribute
+ :param sd: security descriptor of the object, can be
+ an SDDL string or security.descriptor type
+ """
+ m = {"dn": ou_dn,
+ "objectClass": "organizationalUnit"}
+
+ if description:
+ m["description"] = description
+ if name:
+ m["name"] = name
+
+ if sd:
+ m["nTSecurityDescriptor"] = ndr_pack(sd)
+ self.add(m)
+
+ def sequence_number(self, seq_type):
+ """Returns the value of the sequence number according to the requested type
+ :param seq_type: type of sequence number
+ """
+ self.transaction_start()
+ try:
+ seq = super().sequence_number(seq_type)
+ except:
+ self.transaction_cancel()
+ raise
+ else:
+ self.transaction_commit()
+ return seq
+
+ def get_dsServiceName(self):
+ """get the NTDS DN from the rootDSE"""
+ res = self.search(base="", scope=ldb.SCOPE_BASE, attrs=["dsServiceName"])
+ return str(res[0]["dsServiceName"][0])
+
+ def get_serverName(self):
+ """get the server DN from the rootDSE"""
+ res = self.search(base="", scope=ldb.SCOPE_BASE, attrs=["serverName"])
+ return str(res[0]["serverName"][0])
+
+ def dns_lookup(self, dns_name, dns_partition=None):
+ """Do a DNS lookup in the database, returns the NDR database structures"""
+ if dns_partition is None:
+ return dsdb_dns.lookup(self, dns_name)
+ else:
+ return dsdb_dns.lookup(self, dns_name,
+ dns_partition=dns_partition)
+
+ def dns_extract(self, el):
+ """Return the NDR database structures from a dnsRecord element"""
+ return dsdb_dns.extract(self, el)
+
+ def dns_replace(self, dns_name, new_records):
+ """Do a DNS modification on the database, sets the NDR database
+ structures on a DNS name
+ """
+ return dsdb_dns.replace(self, dns_name, new_records)
+
+ def dns_replace_by_dn(self, dn, new_records):
+ """Do a DNS modification on the database, sets the NDR database
+ structures on a LDB DN
+
+ This routine is important because if the last record on the DN
+ is removed, this routine will put a tombstone in the record.
+ """
+ return dsdb_dns.replace_by_dn(self, dn, new_records)
+
+ def garbage_collect_tombstones(self, dn, current_time,
+ tombstone_lifetime=None):
+ """garbage_collect_tombstones(lp, samdb, [dn], current_time, tombstone_lifetime)
+ -> (num_objects_expunged, num_links_expunged)"""
+
+ if not is_ad_dc_built():
+ raise SamDBError('Cannot garbage collect tombstones: '
+ 'AD DC was not built')
+
+ if tombstone_lifetime is None:
+ return dsdb._dsdb_garbage_collect_tombstones(self, dn,
+ current_time)
+ else:
+ return dsdb._dsdb_garbage_collect_tombstones(self, dn,
+ current_time,
+ tombstone_lifetime)
+
+ def create_own_rid_set(self):
+ """create a RID set for this DSA"""
+ return dsdb._dsdb_create_own_rid_set(self)
+
+ def allocate_rid(self):
+ """return a new RID from the RID Pool on this DSA"""
+ return dsdb._dsdb_allocate_rid(self)
+
+ def next_free_rid(self):
+ """return the next free RID from the RID Pool on this DSA.
+
+ :note: This function is not intended for general use, and care must be
+ taken if it is used to generate objectSIDs. The returned RID is not
+ formally reserved for use, creating the possibility of duplicate
+ objectSIDs.
+ """
+ rid, _ = self.free_rid_bounds()
+ return rid
+
+ def free_rid_bounds(self):
+ """return the low and high bounds (inclusive) of RIDs that are
+ available for use in this DSA's current RID pool.
+
+ :note: This function is not intended for general use, and care must be
+ taken if it is used to generate objectSIDs. The returned range of
+ RIDs is not formally reserved for use, creating the possibility of
+ duplicate objectSIDs.
+ """
+ # Get DN of this server's RID Set
+ server_name_dn = ldb.Dn(self, self.get_serverName())
+ res = self.search(base=server_name_dn,
+ scope=ldb.SCOPE_BASE,
+ attrs=["serverReference"])
+ try:
+ server_ref = res[0]["serverReference"]
+ except KeyError:
+ raise ldb.LdbError(
+ ldb.ERR_NO_SUCH_ATTRIBUTE,
+ "No RID Set DN - "
+ "Cannot find attribute serverReference of %s "
+ "to calculate reference dn" % server_name_dn) from None
+ server_ref_dn = ldb.Dn(self, server_ref[0].decode("utf-8"))
+
+ res = self.search(base=server_ref_dn,
+ scope=ldb.SCOPE_BASE,
+ attrs=["rIDSetReferences"])
+ try:
+ rid_set_refs = res[0]["rIDSetReferences"]
+ except KeyError:
+ raise ldb.LdbError(
+ ldb.ERR_NO_SUCH_ATTRIBUTE,
+ "No RID Set DN - "
+ "Cannot find attribute rIDSetReferences of %s "
+ "to calculate reference dn" % server_ref_dn) from None
+ rid_set_dn = ldb.Dn(self, rid_set_refs[0].decode("utf-8"))
+
+ # Get the alloc pools and next RID of this RID Set
+ res = self.search(base=rid_set_dn,
+ scope=ldb.SCOPE_BASE,
+ attrs=["rIDAllocationPool",
+ "rIDPreviousAllocationPool",
+ "rIDNextRID"])
+
+ uint32_max = 2**32 - 1
+ uint64_max = 2**64 - 1
+
+ try:
+ alloc_pool = int(res[0]["rIDAllocationPool"][0])
+ except KeyError:
+ alloc_pool = uint64_max
+ if alloc_pool == uint64_max:
+ raise ldb.LdbError(ldb.ERR_OPERATIONS_ERROR,
+ "Bad RID Set %s" % rid_set_dn)
+
+ try:
+ prev_pool = int(res[0]["rIDPreviousAllocationPool"][0])
+ except KeyError:
+ prev_pool = uint64_max
+ try:
+ next_rid = int(res[0]["rIDNextRID"][0])
+ except KeyError:
+ next_rid = uint32_max
+
+ # If we never used a pool, set up our first pool
+ if prev_pool == uint64_max or next_rid == uint32_max:
+ prev_pool = alloc_pool
+ next_rid = prev_pool & uint32_max
+ else:
+ next_rid += 1
+
+ # Now check if our current pool is still usable
+ prev_pool_lo = prev_pool & uint32_max
+ prev_pool_hi = prev_pool >> 32
+ if next_rid > prev_pool_hi:
+ # We need a new pool, check if we already have a new one
+ # Otherwise we return an error code.
+ if alloc_pool == prev_pool:
+ raise ldb.LdbError(ldb.ERR_OPERATIONS_ERROR,
+ "RID pools out of RIDs")
+
+ # Now use the new pool
+ prev_pool = alloc_pool
+ prev_pool_lo = prev_pool & uint32_max
+ prev_pool_hi = prev_pool >> 32
+ next_rid = prev_pool_lo
+
+ if next_rid < prev_pool_lo or next_rid > prev_pool_hi:
+ raise ldb.LdbError(ldb.ERR_OPERATIONS_ERROR,
+ "Bad RID chosen %d from range %d-%d" %
+ (next_rid, prev_pool_lo, prev_pool_hi))
+
+ return next_rid, prev_pool_hi
+
+ def normalize_dn_in_domain(self, dn):
+ """return a new DN expanded by adding the domain DN
+
+ If the dn is already a child of the domain DN, just
+ return it as-is.
+
+ :param dn: relative dn
+ """
+ domain_dn = ldb.Dn(self, self.domain_dn())
+
+ if isinstance(dn, ldb.Dn):
+ dn = str(dn)
+
+ full_dn = ldb.Dn(self, dn)
+ if not full_dn.is_child_of(domain_dn):
+ full_dn.add_base(domain_dn)
+ return full_dn
+
+class dsdb_Dn(object):
+ """a class for binary DN"""
+
+ def __init__(self, samdb, dnstring, syntax_oid=None):
+ """create a dsdb_Dn"""
+ if syntax_oid is None:
+ # auto-detect based on string
+ if dnstring.startswith("B:"):
+ syntax_oid = dsdb.DSDB_SYNTAX_BINARY_DN
+ elif dnstring.startswith("S:"):
+ syntax_oid = dsdb.DSDB_SYNTAX_STRING_DN
+ else:
+ syntax_oid = dsdb.DSDB_SYNTAX_OR_NAME
+ if syntax_oid in [dsdb.DSDB_SYNTAX_BINARY_DN, dsdb.DSDB_SYNTAX_STRING_DN]:
+ # it is a binary DN
+ colons = dnstring.split(':')
+ if len(colons) < 4:
+ raise RuntimeError("Invalid DN %s" % dnstring)
+ prefix_len = 4 + len(colons[1]) + int(colons[1])
+ self.prefix = dnstring[0:prefix_len]
+ self.binary = self.prefix[3 + len(colons[1]):-1]
+ self.dnstring = dnstring[prefix_len:]
+ else:
+ self.dnstring = dnstring
+ self.prefix = ''
+ self.binary = ''
+ self.dn = ldb.Dn(samdb, self.dnstring)
+
+ def __str__(self):
+ return self.prefix + str(self.dn.extended_str(mode=1))
+
+ def __cmp__(self, other):
+ """ compare dsdb_Dn values similar to parsed_dn_compare()"""
+ dn1 = self
+ dn2 = other
+ guid1 = dn1.dn.get_extended_component("GUID")
+ guid2 = dn2.dn.get_extended_component("GUID")
+
+ v = cmp(guid1, guid2)
+ if v != 0:
+ return v
+ v = cmp(dn1.binary, dn2.binary)
+ return v
+
+ # In Python3, __cmp__ is replaced by these 6 methods
+ def __eq__(self, other):
+ return self.__cmp__(other) == 0
+
+ def __ne__(self, other):
+ return self.__cmp__(other) != 0
+
+ def __lt__(self, other):
+ return self.__cmp__(other) < 0
+
+ def __le__(self, other):
+ return self.__cmp__(other) <= 0
+
+ def __gt__(self, other):
+ return self.__cmp__(other) > 0
+
+ def __ge__(self, other):
+ return self.__cmp__(other) >= 0
+
+ def get_binary_integer(self):
+ """return binary part of a dsdb_Dn as an integer, or None"""
+ if self.prefix == '':
+ return None
+ return int(self.binary, 16)
+
+ def get_bytes(self):
+ """return binary as a byte string"""
+ return binascii.unhexlify(self.binary)
diff --git a/python/samba/schema.py b/python/samba/schema.py
new file mode 100644
index 0000000..094ce87
--- /dev/null
+++ b/python/samba/schema.py
@@ -0,0 +1,264 @@
+#
+# Unix SMB/CIFS implementation.
+# backend code for provisioning a Samba4 server
+#
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007-2008
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2008-2009
+# Copyright (C) Oliver Liebel <oliver@itc.li> 2008-2009
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Functions for setting up a Samba Schema."""
+
+from base64 import b64encode
+from samba import read_and_sub_file, substitute_var, check_all_substituted
+from samba.dcerpc import security
+from samba.ms_schema import read_ms_schema
+from samba.ndr import ndr_pack
+from samba.samdb import SamDB
+from samba.common import get_string
+from samba import dsdb
+from ldb import SCOPE_SUBTREE, SCOPE_ONELEVEL
+
+
+def get_schema_descriptor(domain_sid, name_map=None):
+ if name_map is None:
+ name_map = {}
+
+ sddl = "O:SAG:SAD:AI(OA;;CR;e12b56b6-0a95-11d1-adbb-00c04fd8d5cd;;SA)" \
+ "(OA;;CR;1131f6aa-9c07-11d1-f79f-00c04fc2dcd2;;ED)" \
+ "(OA;;CR;1131f6ab-9c07-11d1-f79f-00c04fc2dcd2;;ED)" \
+ "(OA;;CR;1131f6ac-9c07-11d1-f79f-00c04fc2dcd2;;ED)" \
+ "(OA;;CR;1131f6aa-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \
+ "(OA;;CR;1131f6ab-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \
+ "(OA;;CR;1131f6ac-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \
+ "(A;CI;RPLCLORC;;;AU)" \
+ "(A;CI;RPWPCRCCLCLORCWOWDSW;;;SA)" \
+ "(A;CI;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)" \
+ "(OA;;CR;1131f6ad-9c07-11d1-f79f-00c04fc2dcd2;;ED)" \
+ "(OA;;CR;89e95b76-444d-4c62-991a-0facbeda640c;;ED)" \
+ "(OA;;CR;1131f6ad-9c07-11d1-f79f-00c04fc2dcd2;;BA)" \
+ "(OA;;CR;89e95b76-444d-4c62-991a-0facbeda640c;;BA)" \
+ "(OA;;CR;1131f6aa-9c07-11d1-f79f-00c04fc2dcd2;;RO)" \
+ "(OA;;CR;1131f6ad-9c07-11d1-f79f-00c04fc2dcd2;;RO)" \
+ "(OA;;CR;89e95b76-444d-4c62-991a-0facbeda640c;;RO)" \
+ "S:(AU;SA;WPCCDCWOWDSDDTSW;;;WD)" \
+ "(AU;CISA;WP;;;WD)" \
+ "(AU;SA;CR;;;BA)" \
+ "(AU;SA;CR;;;DU)" \
+ "(OU;SA;CR;e12b56b6-0a95-11d1-adbb-00c04fd8d5cd;;WD)" \
+ "(OU;SA;CR;45ec5156-db7e-47bb-b53f-dbeb2d03c40f;;WD)"
+ sec = security.descriptor.from_sddl(sddl, domain_sid)
+ return ndr_pack(sec)
+
+
+class Schema(object):
+
+ # the schema files (and corresponding object version) that we know about
+ base_schemas = {
+ "2008_R2_old": ("MS-AD_Schema_2K8_R2_Attributes.txt",
+ "MS-AD_Schema_2K8_R2_Classes.txt",
+ 47),
+ "2008_R2": ("Attributes_for_AD_DS__Windows_Server_2008_R2.ldf",
+ "Classes_for_AD_DS__Windows_Server_2008_R2.ldf",
+ 47),
+ "2012": ("Attributes_for_AD_DS__Windows_Server_2012.ldf",
+ "Classes_for_AD_DS__Windows_Server_2012.ldf",
+ 56),
+ "2012_R2": ("AD_DS_Attributes__Windows_Server_2012_R2.ldf",
+ "AD_DS_Classes__Windows_Server_2012_R2.ldf",
+ 69),
+ "2016": ("AD_DS_Attributes__Windows_Server_v1803.ldf",
+ "AD_DS_Classes__Windows_Server_v1803.ldf",
+ 87),
+ "2019": ("AD_DS_Attributes_Windows_Server_v1903.ldf",
+ "AD_DS_Classes_Windows_Server_v1903.ldf",
+ 88),
+ }
+
+ def __init__(self, domain_sid, invocationid=None, schemadn=None,
+ files=None, override_prefixmap=None, additional_prefixmap=None,
+ base_schema=None):
+ from samba.provision import setup_path
+
+ """Load schema for the SamDB from the AD schema files and
+ samba4_schema.ldif
+
+ :param samdb: Load a schema into a SamDB.
+ :param schemadn: DN of the schema
+
+ Returns the schema data loaded, to avoid double-parsing when then
+ needing to add it to the db
+ """
+
+ if base_schema is None:
+ base_schema = Schema.default_base_schema()
+
+ self.base_schema = base_schema
+
+ self.schemadn = schemadn
+ # We need to have the am_rodc=False just to keep some warnings quiet -
+ # this isn't a real SAM, so it's meaningless.
+ self.ldb = SamDB(global_schema=False, am_rodc=False)
+ if invocationid is not None:
+ self.ldb.set_invocation_id(invocationid)
+
+ self.schema_data = read_ms_schema(
+ setup_path('ad-schema/%s' % Schema.base_schemas[base_schema][0]),
+ setup_path('ad-schema/%s' % Schema.base_schemas[base_schema][1]))
+
+ def read_file(file):
+ with open(file, 'rb') as data_file:
+ return data_file.read()
+
+ if files is not None:
+ self.schema_data = "".join(get_string(read_file(file))
+ for file in files)
+
+ self.schema_data = substitute_var(self.schema_data,
+ {"SCHEMADN": schemadn})
+ check_all_substituted(self.schema_data)
+
+ schema_version = str(Schema.get_version(base_schema))
+ self.schema_dn_modify = read_and_sub_file(
+ setup_path("provision_schema_basedn_modify.ldif"),
+ {"SCHEMADN": schemadn, "OBJVERSION": schema_version})
+
+ descr = b64encode(get_schema_descriptor(domain_sid)).decode('utf8')
+ self.schema_dn_add = read_and_sub_file(
+ setup_path("provision_schema_basedn.ldif"),
+ {"SCHEMADN": schemadn, "DESCRIPTOR": descr})
+
+ if override_prefixmap is not None:
+ self.prefixmap_data = override_prefixmap
+ else:
+ self.prefixmap_data = read_file(setup_path("prefixMap.txt"))
+
+ if additional_prefixmap is not None:
+ self.prefixmap_data += "".join("%s\n" % map for map in additional_prefixmap)
+
+ self.prefixmap_data = b64encode(self.prefixmap_data).decode('utf8')
+
+ # We don't actually add this ldif, just parse it
+ prefixmap_ldif = "dn: %s\nprefixMap:: %s\n\n" % (self.schemadn, self.prefixmap_data)
+ self.set_from_ldif(prefixmap_ldif, self.schema_data, self.schemadn)
+
+ @staticmethod
+ def default_base_schema():
+ """Returns the default base schema to use"""
+ return "2012_R2"
+
+ @staticmethod
+ def get_version(base_schema):
+ """Returns the base schema's object version, e.g. 47 for 2008_R2"""
+ return Schema.base_schemas[base_schema][2]
+
+ def set_from_ldif(self, pf, df, dn):
+ dsdb._dsdb_set_schema_from_ldif(self.ldb, pf, df, dn)
+
+ def write_to_tmp_ldb(self, schemadb_path):
+ self.ldb.connect(url=schemadb_path)
+ self.ldb.transaction_start()
+ try:
+ # These are actually ignored, as the schema has been forced
+ # when the ldb object was created, and that overrides this
+ self.ldb.add_ldif("""dn: @ATTRIBUTES
+linkID: INTEGER
+
+dn: @INDEXLIST
+@IDXATTR: linkID
+@IDXATTR: attributeSyntax
+@IDXGUID: objectGUID
+""")
+
+ schema_dn_add = self.schema_dn_add \
+ + "objectGUID: 24e2ca70-b093-4ae8-84c0-2d7ac652a1b8\n"
+
+ # These bits of LDIF are supplied when the Schema object is created
+ self.ldb.add_ldif(schema_dn_add)
+ self.ldb.modify_ldif(self.schema_dn_modify)
+ self.ldb.add_ldif(self.schema_data)
+ except:
+ self.ldb.transaction_cancel()
+ raise
+ else:
+ self.ldb.transaction_commit()
+
+ # Return a hash with the forward attribute as a key and the back as the
+ # value
+ def linked_attributes(self):
+ return get_linked_attributes(self.schemadn, self.ldb)
+
+ def dnsyntax_attributes(self):
+ return get_dnsyntax_attributes(self.schemadn, self.ldb)
+
+ def convert_to_openldap(self, target, mapping):
+ return dsdb._dsdb_convert_schema_to_openldap(self.ldb, target, mapping)
+
+
+# Return a hash with the forward attribute as a key and the back as the value
+def get_linked_attributes(schemadn, schemaldb):
+ attrs = ["linkID", "lDAPDisplayName"]
+ res = schemaldb.search(
+ expression="(&(linkID=*)"
+ "(!(linkID:1.2.840.113556.1.4.803:=1))"
+ "(objectclass=attributeSchema)"
+ "(attributeSyntax=2.5.5.1))",
+ base=schemadn, scope=SCOPE_ONELEVEL, attrs=attrs)
+ attributes = {}
+ for i in range(0, len(res)):
+ expression = ("(&(objectclass=attributeSchema)(linkID=%d)"
+ "(attributeSyntax=2.5.5.1))" %
+ (int(res[i]["linkID"][0]) + 1))
+ target = schemaldb.searchone(basedn=schemadn,
+ expression=expression,
+ attribute="lDAPDisplayName",
+ scope=SCOPE_SUBTREE)
+ if target is not None:
+ attributes[str(res[i]["lDAPDisplayName"])] = target.decode('utf-8')
+
+ return attributes
+
+
+def get_dnsyntax_attributes(schemadn, schemaldb):
+ res = schemaldb.search(
+ expression="(&(!(linkID=*))(objectclass=attributeSchema)(attributeSyntax=2.5.5.1))",
+ base=schemadn, scope=SCOPE_ONELEVEL,
+ attrs=["linkID", "lDAPDisplayName"])
+ attributes = []
+ for i in range(0, len(res)):
+ attributes.append(str(res[i]["lDAPDisplayName"]))
+ return attributes
+
+
+def ldb_with_schema(schemadn="cn=schema,cn=configuration,dc=example,dc=com",
+ domainsid=None,
+ override_prefixmap=None):
+ """Load schema for the SamDB from the AD schema files and samba4_schema.ldif
+
+ :param schemadn: DN of the schema
+ :param serverdn: DN of the server
+
+ Returns the schema data loaded as an object, with .ldb being a
+ new ldb with the schema loaded. This allows certain tests to
+ operate without a remote or local schema.
+ """
+
+ if domainsid is None:
+ domainsid = security.random_sid()
+ else:
+ domainsid = security.dom_sid(domainsid)
+ return Schema(domainsid, schemadn=schemadn,
+ override_prefixmap=override_prefixmap)
diff --git a/python/samba/sd_utils.py b/python/samba/sd_utils.py
new file mode 100644
index 0000000..cabbd47
--- /dev/null
+++ b/python/samba/sd_utils.py
@@ -0,0 +1,231 @@
+# Utility methods for security descriptor manipulation
+#
+# Copyright Nadezhda Ivanova 2010 <nivanova@samba.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Utility methods for security descriptor manipulation."""
+
+import samba
+from ldb import Message, MessageElement, Dn
+from ldb import FLAG_MOD_REPLACE, SCOPE_BASE
+from samba.ndr import ndr_pack, ndr_unpack, ndr_deepcopy
+from samba.dcerpc import security
+from samba.ntstatus import (
+ NT_STATUS_OBJECT_NAME_NOT_FOUND,
+)
+
+
+def escaped_claim_id(claim_id):
+ """Encode claim attribute names according to [MS-DTYP] 2.5.1 ("attr-char2")
+
+ Some characters must be encoded as %hhhh, while others must not be.
+ Of the optional ones, we encode some control characters.
+
+ The \x00 byte is also encoded, which is useful for tests, but it
+ is forbidden in either form.
+ """
+ escapes = '\x00\t\n\x0b\x0c\r !"%&()<=>|'
+ return ''.join(c
+ if c not in escapes
+ else f'%{ord(c):04x}'
+ for c in claim_id)
+
+
+class SDUtils(object):
+ """Some utilities for manipulation of security descriptors on objects."""
+
+ def __init__(self, samdb):
+ self.ldb = samdb
+ self.domain_sid = security.dom_sid(self.ldb.get_domain_sid())
+
+ def modify_sd_on_dn(self, object_dn, sd, controls=None):
+ """Modify security descriptor using either SDDL string
+ or security.descriptor object
+ """
+ m = Message()
+ if isinstance(object_dn, Dn):
+ m.dn = object_dn
+ else:
+ m.dn = Dn(self.ldb, object_dn)
+
+ assert(isinstance(sd, str) or isinstance(sd, security.descriptor))
+ if isinstance(sd, str):
+ tmp_desc = security.descriptor.from_sddl(sd, self.domain_sid)
+ elif isinstance(sd, security.descriptor):
+ tmp_desc = sd
+
+ m["nTSecurityDescriptor"] = MessageElement(ndr_pack(tmp_desc),
+ FLAG_MOD_REPLACE,
+ "nTSecurityDescriptor")
+ self.ldb.modify(m, controls)
+
+ def read_sd_on_dn(self, object_dn, controls=None):
+ res = self.ldb.search(object_dn, SCOPE_BASE, None,
+ ["nTSecurityDescriptor"], controls=controls)
+ desc = res[0]["nTSecurityDescriptor"][0]
+ return ndr_unpack(security.descriptor, desc)
+
+ def get_object_sid(self, object_dn):
+ res = self.ldb.search(object_dn)
+ return ndr_unpack(security.dom_sid, res[0]["objectSid"][0])
+
+ def update_aces_in_dacl(self, dn, del_aces=None, add_aces=None,
+ sddl_attr=None, controls=None):
+ if del_aces is None:
+ del_aces=[]
+ if add_aces is None:
+ add_aces=[]
+
+ def ace_from_sddl(ace_sddl):
+ ace_sd = security.descriptor.from_sddl("D:" + ace_sddl, self.domain_sid)
+ assert(len(ace_sd.dacl.aces)==1)
+ return ace_sd.dacl.aces[0]
+
+ if sddl_attr is None:
+ if controls is None:
+ controls=["sd_flags:1:%d" % security.SECINFO_DACL]
+ sd = self.read_sd_on_dn(dn, controls=controls)
+ if not sd.type & security.SEC_DESC_DACL_PROTECTED:
+ # if the DACL is not protected remove all
+ # inherited aces, as they will be re-inherited
+ # on the server, we need a ndr_deepcopy in order
+ # to avoid reference problems while deleting
+ # the aces while looping over them
+ dacl_copy = ndr_deepcopy(sd.dacl)
+ for ace in dacl_copy.aces:
+ if ace.flags & security.SEC_ACE_FLAG_INHERITED_ACE:
+ try:
+ sd.dacl_del_ace(ace)
+ except samba.NTSTATUSError as err:
+ if err.args[0] != NT_STATUS_OBJECT_NAME_NOT_FOUND:
+ raise err
+ # dacl_del_ace may remove more than
+ # one ace, so we may not find it anymore
+ pass
+ else:
+ if controls is None:
+ controls=[]
+ res = self.ldb.search(dn, SCOPE_BASE, None,
+ [sddl_attr], controls=controls)
+ old_sddl = str(res[0][sddl_attr][0])
+ sd = security.descriptor.from_sddl(old_sddl, self.domain_sid)
+
+ num_changes = 0
+ del_ignored = []
+ add_ignored = []
+ inherited_ignored = []
+
+ for ace in del_aces:
+ if isinstance(ace, str):
+ ace = ace_from_sddl(ace)
+ assert(isinstance(ace, security.ace))
+
+ if ace.flags & security.SEC_ACE_FLAG_INHERITED_ACE:
+ inherited_ignored.append(ace)
+ continue
+
+ if ace not in sd.dacl.aces:
+ del_ignored.append(ace)
+ continue
+
+ sd.dacl_del_ace(ace)
+ num_changes += 1
+
+ for ace in add_aces:
+ add_idx = -1
+ if isinstance(ace, dict):
+ if "idx" in ace:
+ add_idx = ace["idx"]
+ ace = ace["ace"]
+ if isinstance(ace, str):
+ ace = ace_from_sddl(ace)
+ assert(isinstance(ace, security.ace))
+
+ if ace.flags & security.SEC_ACE_FLAG_INHERITED_ACE:
+ inherited_ignored.append(ace)
+ continue
+
+ if ace in sd.dacl.aces:
+ add_ignored.append(ace)
+ continue
+
+ sd.dacl_add(ace, add_idx)
+ num_changes += 1
+
+ if num_changes == 0:
+ return del_ignored, add_ignored, inherited_ignored
+
+ if sddl_attr is None:
+ self.modify_sd_on_dn(dn, sd, controls=controls)
+ else:
+ new_sddl = sd.as_sddl(self.domain_sid)
+ m = Message()
+ m.dn = dn
+ m[sddl_attr] = MessageElement(new_sddl.encode('ascii'),
+ FLAG_MOD_REPLACE,
+ sddl_attr)
+ self.ldb.modify(m, controls=controls)
+
+ return del_ignored, add_ignored, inherited_ignored
+
+ def dacl_prepend_aces(self, object_dn, aces, controls=None):
+ """Prepend an ACE (or more) to an objects security descriptor
+ """
+ ace_sd = security.descriptor.from_sddl("D:" + aces, self.domain_sid)
+ add_aces = []
+ add_idx = 0
+ for ace in ace_sd.dacl.aces:
+ add_aces.append({"idx": add_idx, "ace": ace})
+ add_idx += 1
+ _,ai,ii = self.update_aces_in_dacl(object_dn, add_aces=add_aces,
+ controls=controls)
+ return ai, ii
+
+ def dacl_add_ace(self, object_dn, ace):
+ """Add an ACE (or more) to an objects security descriptor
+ """
+ _,_ = self.dacl_prepend_aces(object_dn, ace,
+ controls=["show_deleted:1"])
+
+ def dacl_append_aces(self, object_dn, aces, controls=None):
+ """Append an ACE (or more) to an objects security descriptor
+ """
+ ace_sd = security.descriptor.from_sddl("D:" + aces, self.domain_sid)
+ add_aces = []
+ for ace in ace_sd.dacl.aces:
+ add_aces.append(ace)
+ _,ai,ii = self.update_aces_in_dacl(object_dn, add_aces=add_aces,
+ controls=controls)
+ return ai, ii
+
+ def dacl_delete_aces(self, object_dn, aces, controls=None):
+ """Delete an ACE (or more) to an objects security descriptor
+ """
+ del_sd = security.descriptor.from_sddl("D:" + aces, self.domain_sid)
+ del_aces = []
+ for ace in del_sd.dacl.aces:
+ del_aces.append(ace)
+ di,_,ii = self.update_aces_in_dacl(object_dn, del_aces=del_aces,
+ controls=controls)
+ return di, ii
+
+ def get_sd_as_sddl(self, object_dn, controls=None):
+ """Return object nTSecurityDescriptor in SDDL format
+ """
+ if controls is None:
+ controls = []
+ desc = self.read_sd_on_dn(object_dn, controls + ["show_deleted:1"])
+ return desc.as_sddl(self.domain_sid)
diff --git a/python/samba/sites.py b/python/samba/sites.py
new file mode 100644
index 0000000..a59e998
--- /dev/null
+++ b/python/samba/sites.py
@@ -0,0 +1,126 @@
+# python site manipulation code
+# Copyright Matthieu Patou <mat@matws.net> 2011
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Manipulating sites."""
+
+import ldb
+from ldb import FLAG_MOD_ADD, LdbError
+
+
+class SiteException(Exception):
+ """Base element for Sites errors"""
+
+ def __init__(self, value):
+ self.value = value
+
+ def __str__(self):
+ return "%s: %s" % (self.__class__.__name__, self.value)
+
+
+class SiteNotFoundException(SiteException):
+ """Raised when the site is not found and it's expected to exists."""
+
+
+class SiteAlreadyExistsException(SiteException):
+ """Raised when the site is not found and it's expected not to exists."""
+
+
+class SiteServerNotEmptyException(SiteException):
+ """Raised when the site still has servers attached."""
+
+
+def create_site(samdb, configDn, siteName):
+ """
+ Create a site
+
+ :param samdb: A samdb connection
+ :param configDn: The DN of the configuration partition
+ :param siteName: Name of the site to create
+ :return: True upon success
+ :raise SiteAlreadyExists: if the site to be created already exists.
+ """
+
+ ret = samdb.search(base=configDn, scope=ldb.SCOPE_SUBTREE,
+ expression='(&(objectclass=Site)(cn=%s))' % siteName)
+ if len(ret) != 0:
+ raise SiteAlreadyExistsException('A site with the name %s already exists' % siteName)
+
+ m = ldb.Message()
+ m.dn = ldb.Dn(samdb, "Cn=%s,CN=Sites,%s" % (siteName, str(configDn)))
+ m["objectclass"] = ldb.MessageElement("site", FLAG_MOD_ADD, "objectclass")
+
+ samdb.add(m)
+
+ m2 = ldb.Message()
+ m2.dn = ldb.Dn(samdb, "Cn=NTDS Site Settings,%s" % str(m.dn))
+ m2["objectclass"] = ldb.MessageElement("nTDSSiteSettings", FLAG_MOD_ADD, "objectclass")
+
+ samdb.add(m2)
+
+ m3 = ldb.Message()
+ m3.dn = ldb.Dn(samdb, "Cn=Servers,%s" % str(m.dn))
+ m3["objectclass"] = ldb.MessageElement("serversContainer", FLAG_MOD_ADD, "objectclass")
+
+ samdb.add(m3)
+
+ return True
+
+
+def delete_site(samdb, configDn, siteName):
+ """
+ Delete a site
+
+ :param samdb: A samdb connection
+ :param configDn: The DN of the configuration partition
+ :param siteName: Name of the site to delete
+ :return: True upon success
+ :raise SiteNotFoundException: if the site to be deleted do not exists.
+ :raise SiteServerNotEmpty: if the site has still servers in it.
+ """
+
+ dnsite = ldb.Dn(samdb, "CN=Sites")
+ try:
+ dnsite.add_base(configDn)
+ except ldb.LdbError:
+ raise SiteException("dnsite.add_base() failed")
+ try:
+ dnsite.add_child("CN=X")
+ except ldb.LdbError:
+ raise SiteException("dnsite.add_child() failed")
+ dnsite.set_component(0, "CN", siteName)
+
+ dnservers = ldb.Dn(samdb, "CN=Servers")
+ dnservers.add_base(dnsite)
+
+ try:
+ ret = samdb.search(base=dnsite, scope=ldb.SCOPE_BASE,
+ expression="objectClass=site")
+ if len(ret) != 1:
+ raise SiteNotFoundException('Site %s does not exist' % siteName)
+ except LdbError as e:
+ (enum, estr) = e.args
+ if enum == ldb.ERR_NO_SUCH_OBJECT:
+ raise SiteNotFoundException('Site %s does not exist' % siteName)
+
+ ret = samdb.search(base=dnservers, scope=ldb.SCOPE_ONELEVEL,
+ expression='(objectclass=server)')
+ if len(ret) != 0:
+ raise SiteServerNotEmptyException('Site %s still has servers in it, move them before removal' % siteName)
+
+ samdb.delete(dnsite, ["tree_delete:0"])
+
+ return True
diff --git a/python/samba/subnets.py b/python/samba/subnets.py
new file mode 100644
index 0000000..ef73950
--- /dev/null
+++ b/python/samba/subnets.py
@@ -0,0 +1,247 @@
+# Add/remove subnets to sites.
+#
+# Copyright (C) Catalyst.Net Ltd 2015
+# Copyright Matthieu Patou <mat@matws.net> 2011
+#
+# Catalyst.Net's contribution was written by Douglas Bagnall
+# <douglas.bagnall@catalyst.net.nz>.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import ldb
+from ldb import FLAG_MOD_ADD, FLAG_MOD_REPLACE, LdbError
+from . sites import SiteNotFoundException
+
+
+class SubnetException(Exception):
+ """Base element for Subnet errors"""
+ pass
+
+
+class SubnetNotFound(SubnetException):
+ """The subnet requested does not exist."""
+ pass
+
+
+class SubnetAlreadyExists(SubnetException):
+ """The subnet being added already exists."""
+ pass
+
+
+class SubnetInvalid(SubnetException):
+ """The subnet CIDR is invalid."""
+ pass
+
+
+class SiteNotFound(SubnetException):
+ """The site to be used for the subnet does not exist."""
+ pass
+
+
+def create_subnet(samdb, configDn, subnet_name, site_name):
+ """Create a subnet and associate it with a site.
+
+ :param samdb: A samdb connection
+ :param configDn: The DN of the configuration partition
+ :param subnet_name: name of the subnet to create (a CIDR range)
+ :return: None
+ :raise SubnetAlreadyExists: if the subnet to be created already exists.
+ :raise SiteNotFound: if the site does not exist.
+ """
+ ret = samdb.search(base=configDn, scope=ldb.SCOPE_SUBTREE,
+ expression='(&(objectclass=Site)(cn=%s))' %
+ ldb.binary_encode(site_name))
+ if len(ret) != 1:
+ raise SiteNotFound('A site with the name %s does not exist' %
+ site_name)
+ dn_site = ret[0].dn
+
+ if not isinstance(subnet_name, str):
+ raise SubnetInvalid("%s is not a valid subnet (not a string)" % subnet_name)
+
+ dnsubnet = ldb.Dn(samdb, "CN=Subnets,CN=Sites")
+ try:
+ dnsubnet.add_base(configDn)
+ except ldb.LdbError:
+ raise SubnetException("dnsubnet.add_base() failed")
+ try:
+ dnsubnet.add_child("CN=X")
+ except ldb.LdbError:
+ raise SubnetException("dnsubnet.add_child() failed")
+ dnsubnet.set_component(0, "CN", subnet_name)
+
+ try:
+ m = ldb.Message()
+ m.dn = dnsubnet
+ m["objectclass"] = ldb.MessageElement("subnet", FLAG_MOD_ADD,
+ "objectclass")
+ m["siteObject"] = ldb.MessageElement(str(dn_site), FLAG_MOD_ADD,
+ "siteObject")
+ samdb.add(m)
+ except ldb.LdbError as e:
+ (enum, estr) = e.args
+ if enum == ldb.ERR_INVALID_DN_SYNTAX:
+ raise SubnetInvalid("%s is not a valid subnet: %s" % (subnet_name, estr))
+ elif enum == ldb.ERR_ENTRY_ALREADY_EXISTS:
+ # Subnet collisions are checked by exact match only, not
+ # overlapping range. This won't stop you creating
+ # 10.1.1.0/24 when there is already 10.1.0.0/16, or
+ # prevent you from having numerous IPv6 subnets that refer
+ # to the same range (e.g 5::0/16, 5::/16, 5:0:0::/16).
+ raise SubnetAlreadyExists('A subnet with the CIDR %s already exists'
+ % subnet_name)
+ else:
+ raise
+
+
+def delete_subnet(samdb, configDn, subnet_name):
+ """Delete a subnet.
+
+ :param samdb: A samdb connection
+ :param configDn: The DN of the configuration partition
+ :param subnet_name: Name of the subnet to delete
+ :return: None
+ :raise SubnetNotFound: if the subnet to be deleted does not exist.
+ """
+ dnsubnet = ldb.Dn(samdb, "CN=Subnets,CN=Sites")
+ try:
+ dnsubnet.add_base(configDn)
+ except ldb.LdbError:
+ raise SubnetException("dnsubnet.add_base() failed")
+ try:
+ dnsubnet.add_child("CN=X")
+ except ldb.LdbError:
+ raise SubnetException("dnsubnet.add_child() failed")
+ dnsubnet.set_component(0, "CN", subnet_name)
+
+ try:
+ ret = samdb.search(base=dnsubnet, scope=ldb.SCOPE_BASE,
+ expression="objectClass=subnet")
+ if len(ret) != 1:
+ raise SubnetNotFound('Subnet %s does not exist' % subnet_name)
+ except LdbError as e1:
+ (enum, estr) = e1.args
+ if enum == ldb.ERR_NO_SUCH_OBJECT:
+ raise SubnetNotFound('Subnet %s does not exist' % subnet_name)
+
+ samdb.delete(dnsubnet)
+
+
+def rename_subnet(samdb, configDn, subnet_name, new_name):
+ """Rename a subnet.
+
+ :param samdb: A samdb connection
+ :param configDn: The DN of the configuration partition
+ :param subnet_name: Name of the subnet to rename
+ :param new_name: New name for the subnet
+ :return: None
+ :raise SubnetNotFound: if the subnet to be renamed does not exist.
+ :raise SubnetExists: if the subnet to be created already exists.
+ """
+ dnsubnet = ldb.Dn(samdb, "CN=Subnets,CN=Sites")
+ try:
+ dnsubnet.add_base(configDn)
+ except ldb.LdbError:
+ raise SubnetException("dnsubnet.add_base() failed")
+ try:
+ dnsubnet.add_child("CN=X")
+ except ldb.LdbError:
+ raise SubnetException("dnsubnet.add_child() failed")
+ dnsubnet.set_component(0, "CN", subnet_name)
+
+ newdnsubnet = ldb.Dn(samdb, str(dnsubnet))
+ newdnsubnet.set_component(0, "CN", new_name)
+ try:
+ samdb.rename(dnsubnet, newdnsubnet)
+ except LdbError as e2:
+ (enum, estr) = e2.args
+ if enum == ldb.ERR_NO_SUCH_OBJECT:
+ raise SubnetNotFound('Subnet %s does not exist' % dnsubnet)
+ elif enum == ldb.ERR_ENTRY_ALREADY_EXISTS:
+ raise SubnetAlreadyExists('A subnet with the CIDR %s already exists'
+ % new_name)
+ elif enum == ldb.ERR_INVALID_DN_SYNTAX:
+ raise SubnetInvalid("%s is not a valid subnet: %s" % (new_name,
+ estr))
+ else:
+ raise
+
+
+def set_subnet_site(samdb, configDn, subnet_name, site_name):
+ """Assign a subnet to a site.
+
+ This dissociates the subnet from its previous site.
+
+ :param samdb: A samdb connection
+ :param configDn: The DN of the configuration partition
+ :param subnet_name: Name of the subnet
+ :param site_name: Name of the site
+ :return: None
+ :raise SubnetNotFound: if the subnet does not exist.
+ :raise SiteNotFound: if the site does not exist.
+ """
+
+ dnsubnet = ldb.Dn(samdb, "CN=Subnets,CN=Sites")
+ try:
+ dnsubnet.add_base(configDn)
+ except ldb.LdbError:
+ raise SubnetException("dnsubnet.add_base() failed")
+ try:
+ dnsubnet.add_child("CN=X")
+ except ldb.LdbError:
+ raise SubnetException("dnsubnet.add_child() failed")
+ dnsubnet.set_component(0, "CN", subnet_name)
+
+ try:
+ ret = samdb.search(base=dnsubnet, scope=ldb.SCOPE_BASE,
+ expression="objectClass=subnet")
+ if len(ret) != 1:
+ raise SubnetNotFound('Subnet %s does not exist' % subnet_name)
+ except LdbError as e3:
+ (enum, estr) = e3.args
+ if enum == ldb.ERR_NO_SUCH_OBJECT:
+ raise SubnetNotFound('Subnet %s does not exist' % subnet_name)
+
+ dnsite = ldb.Dn(samdb, "CN=Sites")
+ try:
+ dnsite.add_base(configDn)
+ except ldb.LdbError:
+ raise SubnetException("dnsite.add_base() failed")
+ try:
+ dnsite.add_child("CN=X")
+ except ldb.LdbError:
+ raise SubnetException("dnsite.add_child() failed")
+ dnsite.set_component(0, "CN", site_name)
+
+ dnservers = ldb.Dn(samdb, "CN=Servers")
+ dnservers.add_base(dnsite)
+
+ try:
+ ret = samdb.search(base=dnsite, scope=ldb.SCOPE_BASE,
+ expression="objectClass=site")
+ if len(ret) != 1:
+ raise SiteNotFoundException('Site %s does not exist' % site_name)
+ except LdbError as e4:
+ (enum, estr) = e4.args
+ if enum == ldb.ERR_NO_SUCH_OBJECT:
+ raise SiteNotFoundException('Site %s does not exist' % site_name)
+
+ siteDn = str(ret[0].dn)
+
+ m = ldb.Message()
+ m.dn = dnsubnet
+ m["siteObject"] = ldb.MessageElement(siteDn, FLAG_MOD_REPLACE,
+ "siteObject")
+ samdb.modify(m)
diff --git a/python/samba/subunit/__init__.py b/python/samba/subunit/__init__.py
new file mode 100644
index 0000000..dab522e
--- /dev/null
+++ b/python/samba/subunit/__init__.py
@@ -0,0 +1,85 @@
+# Subunit handling
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2014
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Subunit test protocol."""
+
+import unittest
+
+
+PROGRESS_SET = 0
+PROGRESS_CUR = 1
+PROGRESS_PUSH = 2
+PROGRESS_POP = 3
+
+
+def RemoteError(description=""):
+ return (Exception, Exception(description), None)
+
+
+class RemotedTestCase(unittest.TestCase):
+ """A class to represent test cases run in child processes.
+
+ Instances of this class are used to provide the Python test API a TestCase
+ that can be printed to the screen, introspected for metadata and so on.
+ However, as they are a simply a memoisation of a test that was actually
+ run in the past by a separate process, they cannot perform any interactive
+ actions.
+ """
+
+ def __eq__(self, other):
+ try:
+ return self.__description == other.__description
+ except AttributeError:
+ return False
+
+ def __init__(self, description):
+ """Create a pseudo test case with description description."""
+ self.__description = description
+
+ def error(self, label):
+ raise NotImplementedError("%s on RemotedTestCases is not permitted." %
+ label)
+
+ def setUp(self):
+ self.error("setUp")
+
+ def tearDown(self):
+ self.error("tearDown")
+
+ def shortDescription(self):
+ return self.__description
+
+ def id(self):
+ return "%s" % (self.__description,)
+
+ def __str__(self):
+ return "%s (%s)" % (self.__description, self._strclass())
+
+ def __repr__(self):
+ return "<%s description='%s'>" % \
+ (self._strclass(), self.__description)
+
+ def run(self, result=None):
+ if result is None:
+ result = self.defaultTestResult()
+ result.startTest(self)
+ result.addError(self, RemoteError("Cannot run RemotedTestCases.\n"))
+ result.stopTest(self)
+
+ def _strclass(self):
+ cls = self.__class__
+ return "%s.%s" % (cls.__module__, cls.__name__)
diff --git a/python/samba/subunit/run.py b/python/samba/subunit/run.py
new file mode 100755
index 0000000..dc3f931
--- /dev/null
+++ b/python/samba/subunit/run.py
@@ -0,0 +1,682 @@
+#!/usr/bin/env python3
+#
+# Simple subunit testrunner for python
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2014
+
+# Cobbled together from testtools and subunit:
+# Copyright (C) 2005-2011 Robert Collins <robertc@robertcollins.net>
+# Copyright (c) 2008-2011 testtools developers.
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""Run a unittest testcase reporting results as Subunit.
+
+ $ python -m samba.subunit.run mylib.tests.test_suite
+"""
+
+import datetime
+import os
+import sys
+import unittest
+
+
+class TestProtocolClient(unittest.TestResult):
+ """A TestResult which generates a subunit stream for a test run.
+
+ # Get a TestSuite or TestCase to run
+ suite = make_suite()
+ # Create a stream (any object with a 'write' method). This should accept
+ # bytes not strings: subunit is a byte orientated protocol.
+ stream = open('tests.log', 'wb')
+ # Create a subunit result object which will output to the stream
+ result = subunit.TestProtocolClient(stream)
+ # Optionally, to get timing data for performance analysis, wrap the
+ # serialiser with a timing decorator
+ result = subunit.test_results.AutoTimingTestResultDecorator(result)
+ # Run the test suite reporting to the subunit result object
+ suite.run(result)
+ # Close the stream.
+ stream.close()
+ """
+
+ def __init__(self, stream):
+ unittest.TestResult.__init__(self)
+ self._stream = stream
+ self.successes = []
+
+ def _addOutcome(self, outcome, test, errors=None):
+ """Report an outcome of test test.
+
+ :param outcome: A string describing the outcome - used as the
+ event name in the subunit stream.
+ :param errors: A list of strings describing the errors.
+ """
+ self._stream.write(("%s: " % outcome) + test.id())
+ if errors:
+ self._stream.write(" [\n")
+ for error in errors:
+ self._stream.write(error)
+ if not error.endswith('\n'):
+ self._stream.write('\n')
+ self._stream.write("]")
+ self._stream.write("\n")
+
+ def addSuccess(self, test):
+ """Report a success in a test."""
+ self.successes.append(test)
+
+ def startTest(self, test):
+ """Mark a test as starting its test run."""
+ super().startTest(test)
+ self._stream.write("test: " + test.id() + "\n")
+ self._stream.flush()
+
+ def stopTest(self, test):
+ """Mark a test as having finished its test run."""
+ super().stopTest(test)
+ self.writeOutcome(test)
+
+ def writeOutcome(self, test):
+ """Output the overall outcome for test test."""
+ err, self.errors = self._filterErrors(test, self.errors)
+ fail, self.failures = self._filterErrors(test, self.failures)
+ xfail, self.expectedFailures = self._filterErrors(test, self.expectedFailures)
+ skip, self.skipped = self._filterErrors(test, self.skipped)
+ success, self.successes = self._filterSuccesses(test, self.successes)
+ uxsuccess, self.unexpectedSuccesses = self._filterSuccesses(test, self.unexpectedSuccesses)
+
+ if err:
+ outcome = "error"
+ elif fail:
+ outcome = "failure"
+ elif skip:
+ outcome = "skip"
+ elif uxsuccess:
+ outcome = "uxsuccess"
+ elif xfail:
+ outcome = "xfail"
+ elif success:
+ outcome = "successful"
+ else:
+ outcome = None
+
+ if outcome:
+ self._addOutcome(outcome, test, errors=err+fail+skip+xfail)
+
+ self._stream.flush()
+
+ def _filterErrors(self, test, errors):
+ """Filter a list of errors by test test.
+
+ :param test: The test to filter by.
+ :param errors: A list of <test, error> pairs to filter.
+
+ :return: A pair whose first element is a list of strings containing
+ errors that apply to test test, and whose second element is a list
+ of the remaining elements.
+ """
+ filtered = []
+ unfiltered = []
+
+ for error in errors:
+ if error[0] is test:
+ filtered.append(error[1])
+ else:
+ unfiltered.append(error)
+
+ return (filtered, unfiltered)
+
+ def _filterSuccesses(self, test, successes):
+ """Filter a list of successes by test test.
+
+ :param test: The test to filter by.
+ :param successes: A list of tests to filter.
+
+ :return: A tuple whose first element is a boolean stating whether test
+ test was found in the list of successes, and whose second element is
+ a list of the remaining elements.
+ """
+ filtered = False
+ unfiltered = []
+
+ for success in successes:
+ if success is test:
+ filtered = True
+ else:
+ unfiltered.append(success)
+
+ return (filtered, unfiltered)
+
+ def time(self, a_datetime):
+ """Inform the client of the time.
+
+ ":param a_datetime: A datetime.datetime object.
+ """
+ time = a_datetime.astimezone(datetime.timezone.utc)
+ self._stream.write("time: %04d-%02d-%02d %02d:%02d:%02d.%06dZ\n" % (
+ time.year, time.month, time.day, time.hour, time.minute,
+ time.second, time.microsecond))
+
+
+def _flatten_tests(suite_or_case, unpack_outer=False):
+ try:
+ tests = iter(suite_or_case)
+ except TypeError:
+ # Not iterable, assume it's a test case.
+ return [(suite_or_case.id(), suite_or_case)]
+ if (type(suite_or_case) in (unittest.TestSuite,) or
+ unpack_outer):
+ # Plain old test suite (or any others we may add).
+ result = []
+ for test in tests:
+ # Recurse to flatten.
+ result.extend(_flatten_tests(test))
+ return result
+ else:
+ # Find any old actual test and grab its id.
+ suite_id = None
+ tests = iterate_tests(suite_or_case)
+ for test in tests:
+ suite_id = test.id()
+ break
+ # If it has a sort_tests method, call that.
+ if getattr(suite_or_case, 'sort_tests', None) is not None:
+ suite_or_case.sort_tests()
+ return [(suite_id, suite_or_case)]
+
+
+def sorted_tests(suite_or_case, unpack_outer=False):
+ """Sort suite_or_case while preserving non-vanilla TestSuites."""
+ tests = _flatten_tests(suite_or_case, unpack_outer=unpack_outer)
+ tests.sort()
+ return unittest.TestSuite([test for (sort_key, test) in tests])
+
+
+def iterate_tests(test_suite_or_case):
+ """Iterate through all of the test cases in 'test_suite_or_case'."""
+ try:
+ suite = iter(test_suite_or_case)
+ except TypeError:
+ yield test_suite_or_case
+ else:
+ for test in suite:
+ for subtest in iterate_tests(test):
+ yield subtest
+
+
+defaultTestLoader = unittest.defaultTestLoader
+defaultTestLoaderCls = unittest.TestLoader
+
+if getattr(defaultTestLoader, 'discover', None) is None:
+ try:
+ import discover
+ defaultTestLoader = discover.DiscoveringTestLoader()
+ defaultTestLoaderCls = discover.DiscoveringTestLoader
+ have_discover = True
+ except ImportError:
+ have_discover = False
+else:
+ have_discover = True
+
+
+####################
+# Taken from python 2.7 and slightly modified for compatibility with
+# older versions. Delete when 2.7 is the oldest supported version.
+# Modifications:
+# - Use have_discover to raise an error if the user tries to use
+# discovery on an old version and doesn't have discover installed.
+# - If --catch is given check that installHandler is available, as
+# it won't be on old python versions.
+# - print calls have been been made single-source python3 compatible.
+# - exception handling likewise.
+# - The default help has been changed to USAGE_AS_MAIN and USAGE_FROM_MODULE
+# removed.
+# - A tweak has been added to detect 'python -m *.run' and use a
+# better progName in that case.
+# - self.module is more comprehensively set to None when being invoked from
+# the commandline - __name__ is used as a sentinel value.
+# - --list has been added which can list tests (should be upstreamed).
+# - --load-list has been added which can reduce the tests used (should be
+# upstreamed).
+# - The limitation of using getopt is declared to the user.
+# - http://bugs.python.org/issue16709 is worked around, by sorting tests when
+# discover is used.
+
+CATCHBREAK = " -c, --catch Catch control-C and display results\n"
+BUFFEROUTPUT = " -b, --buffer Buffer stdout and stderr during test runs\n"
+
+USAGE_AS_MAIN = """\
+Usage: %(progName)s [options] [tests]
+
+Options:
+ -h, --help Show this message
+ -v, --verbose Verbose output
+ -q, --quiet Minimal output
+ -l, --list List tests rather than executing them.
+ --load-list Specifies a file containing test ids, only tests matching
+ those ids are executed.
+%(catchbreak)s%(buffer)s
+Examples:
+ %(progName)s test_module - run tests from test_module
+ %(progName)s module.TestClass - run tests from module.TestClass
+ %(progName)s module.Class.test_method - run specified test method
+
+All options must come before [tests]. [tests] can be a list of any number of
+test modules, classes and test methods.
+
+Alternative Usage: %(progName)s discover [options]
+
+Options:
+ -v, --verbose Verbose output
+s%(catchbreak)s%(buffer)s -s directory Directory to start discovery ('.' default)
+ -p pattern Pattern to match test files ('test*.py' default)
+ -t directory Top level directory of project (default to
+ start directory)
+ -l, --list List tests rather than executing them.
+ --load-list Specifies a file containing test ids, only tests matching
+ those ids are executed.
+
+For test discovery all test modules must be importable from the top
+level directory of the project.
+"""
+
+
+# NOT a TestResult, because we are implementing the interface, not inheriting
+# it.
+class TestResultDecorator(object):
+ """General pass-through decorator.
+
+ This provides a base that other TestResults can inherit from to
+ gain basic forwarding functionality. It also takes care of
+ handling the case where the target doesn't support newer methods
+ or features by degrading them.
+ """
+
+ def __init__(self, decorated):
+ """Create a TestResultDecorator forwarding to decorated."""
+ # Make every decorator degrade gracefully.
+ self.decorated = decorated
+
+ def startTest(self, test):
+ return self.decorated.startTest(test)
+
+ def startTestRun(self):
+ return self.decorated.startTestRun()
+
+ def stopTest(self, test):
+ return self.decorated.stopTest(test)
+
+ def stopTestRun(self):
+ return self.decorated.stopTestRun()
+
+ def addError(self, test, err=None):
+ return self.decorated.addError(test, err)
+
+ def addFailure(self, test, err=None):
+ return self.decorated.addFailure(test, err)
+
+ def addSuccess(self, test):
+ return self.decorated.addSuccess(test)
+
+ def addSkip(self, test, reason=None):
+ return self.decorated.addSkip(test, reason)
+
+ def addExpectedFailure(self, test, err=None):
+ return self.decorated.addExpectedFailure(test, err)
+
+ def addUnexpectedSuccess(self, test):
+ return self.decorated.addUnexpectedSuccess(test)
+
+ def wasSuccessful(self):
+ return self.decorated.wasSuccessful()
+
+ @property
+ def shouldStop(self):
+ return self.decorated.shouldStop
+
+ def stop(self):
+ return self.decorated.stop()
+
+ @property
+ def testsRun(self):
+ return self.decorated.testsRun
+
+ def time(self, a_datetime):
+ return self.decorated.time(a_datetime)
+
+
+class HookedTestResultDecorator(TestResultDecorator):
+ """A TestResult which calls a hook on every event."""
+
+ def __init__(self, decorated):
+ self.super = super()
+ self.super.__init__(decorated)
+
+ def startTest(self, test):
+ self._before_event()
+ return self.super.startTest(test)
+
+ def startTestRun(self):
+ self._before_event()
+ return self.super.startTestRun()
+
+ def stopTest(self, test):
+ self._before_event()
+ return self.super.stopTest(test)
+
+ def stopTestRun(self):
+ self._before_event()
+ return self.super.stopTestRun()
+
+ def addError(self, test, err=None):
+ self._before_event()
+ return self.super.addError(test, err)
+
+ def addFailure(self, test, err=None):
+ self._before_event()
+ return self.super.addFailure(test, err)
+
+ def addSuccess(self, test):
+ self._before_event()
+ return self.super.addSuccess(test)
+
+ def addSkip(self, test, reason=None):
+ self._before_event()
+ return self.super.addSkip(test, reason)
+
+ def addExpectedFailure(self, test, err=None):
+ self._before_event()
+ return self.super.addExpectedFailure(test, err)
+
+ def addUnexpectedSuccess(self, test):
+ self._before_event()
+ return self.super.addUnexpectedSuccess(test)
+
+ def wasSuccessful(self):
+ self._before_event()
+ return self.super.wasSuccessful()
+
+ @property
+ def shouldStop(self):
+ self._before_event()
+ return self.super.shouldStop
+
+ def stop(self):
+ self._before_event()
+ return self.super.stop()
+
+ def time(self, a_datetime):
+ self._before_event()
+ return self.super.time(a_datetime)
+
+
+class AutoTimingTestResultDecorator(HookedTestResultDecorator):
+ """Decorate a TestResult to add time events to a test run.
+
+ By default this will cause a time event before every test event,
+ but if explicit time data is being provided by the test run, then
+ this decorator will turn itself off to prevent causing confusion.
+ """
+
+ def __init__(self, decorated):
+ self._time = None
+ super().__init__(decorated)
+
+ def _before_event(self):
+ time = self._time
+ if time is not None:
+ return
+ time = datetime.datetime.now(tz=datetime.timezone.utc)
+ self.decorated.time(time)
+
+ @property
+ def shouldStop(self):
+ return self.decorated.shouldStop
+
+ def time(self, a_datetime):
+ """Provide a timestamp for the current test activity.
+
+ :param a_datetime: If None, automatically add timestamps before every
+ event (this is the default behaviour if time() is not called at
+ all). If not None, pass the provided time onto the decorated
+ result object and disable automatic timestamps.
+ """
+ self._time = a_datetime
+ return self.decorated.time(a_datetime)
+
+
+class SubunitTestRunner(object):
+
+ def __init__(self, verbosity=None, buffer=None, stream=None):
+ """Create a SubunitTestRunner.
+
+ :param verbosity: Ignored.
+ :param buffer: Ignored.
+ """
+ self.stream = stream or sys.stdout
+
+ def run(self, test):
+ "Run the given test case or test suite."
+ result = TestProtocolClient(self.stream)
+ result = AutoTimingTestResultDecorator(result)
+ test(result)
+ return result
+
+
+class TestProgram(object):
+ """A command-line program that runs a set of tests; this is primarily
+ for making test modules conveniently executable.
+ """
+ USAGE = USAGE_AS_MAIN
+
+ # defaults for testing
+ catchbreak = buffer = progName = None
+
+ def __init__(self, module=__name__, defaultTest=None, argv=None,
+ testRunner=None, testLoader=defaultTestLoader,
+ exit=True, verbosity=1, catchbreak=None,
+ buffer=None, stdout=None):
+ if module == __name__:
+ self.module = None
+ elif isinstance(module, str):
+ self.module = __import__(module)
+ for part in module.split('.')[1:]:
+ self.module = getattr(self.module, part)
+ else:
+ self.module = module
+ if argv is None:
+ argv = sys.argv
+ if stdout is None:
+ stdout = sys.stdout
+ if testRunner is None:
+ testRunner = SubunitTestRunner()
+
+ self.exit = exit
+ self.catchbreak = catchbreak
+ self.verbosity = verbosity
+ self.buffer = buffer
+ self.defaultTest = defaultTest
+ self.listtests = False
+ self.load_list = None
+ self.testRunner = testRunner
+ self.testLoader = testLoader
+ progName = argv[0]
+ if progName.endswith('%srun.py' % os.path.sep):
+ elements = progName.split(os.path.sep)
+ progName = '%s.run' % elements[-2]
+ else:
+ progName = os.path.basename(argv[0])
+ self.progName = progName
+ self.parseArgs(argv)
+ if self.load_list:
+ # TODO: preserve existing suites (like testresources does in
+ # OptimisingTestSuite.add, but with a standard protocol).
+ # This is needed because the load_tests hook allows arbitrary
+ # suites, even if that is rarely used.
+ source = open(self.load_list, 'rb')
+ try:
+ lines = source.readlines()
+ finally:
+ source.close()
+ test_ids = set(line.strip().decode('utf-8') for line in lines)
+ filtered = unittest.TestSuite()
+ for test in iterate_tests(self.test):
+ if test.id() in test_ids:
+ filtered.addTest(test)
+ self.test = filtered
+ if not self.listtests:
+ self.runTests()
+ else:
+ for test in iterate_tests(self.test):
+ stdout.write('%s\n' % test.id())
+
+ def parseArgs(self, argv):
+ if len(argv) > 1 and argv[1].lower() == 'discover':
+ self._do_discovery(argv[2:])
+ return
+
+ import getopt
+ long_opts = ['help', 'verbose', 'quiet', 'catch', 'buffer',
+ 'list', 'load-list=']
+ try:
+ options, args = getopt.getopt(argv[1:], 'hHvqfcbl', long_opts)
+ for opt, value in options:
+ if opt in ('-h','-H','--help'):
+ self.usageExit()
+ if opt in ('-q','--quiet'):
+ self.verbosity = 0
+ if opt in ('-v','--verbose'):
+ self.verbosity = 2
+ if opt in ('-c','--catch'):
+ if self.catchbreak is None:
+ self.catchbreak = True
+ # Should this raise an exception if -c is not valid?
+ if opt in ('-b','--buffer'):
+ if self.buffer is None:
+ self.buffer = True
+ # Should this raise an exception if -b is not valid?
+ if opt in ('-l', '--list'):
+ self.listtests = True
+ if opt == '--load-list':
+ self.load_list = value
+ if len(args) == 0 and self.defaultTest is None:
+ # createTests will load tests from self.module
+ self.testNames = None
+ elif len(args) > 0:
+ self.testNames = args
+ else:
+ self.testNames = (self.defaultTest,)
+ self.createTests()
+ except getopt.error:
+ self.usageExit(sys.exc_info()[1])
+
+ def createTests(self):
+ if self.testNames is None:
+ self.test = self.testLoader.loadTestsFromModule(self.module)
+ else:
+ self.test = self.testLoader.loadTestsFromNames(self.testNames,
+ self.module)
+
+ def _do_discovery(self, argv, Loader=defaultTestLoaderCls):
+ # handle command line args for test discovery
+ if not have_discover:
+ raise AssertionError("Unable to use discovery, must use python 2.7 "
+ "or greater, or install the discover package.")
+ self.progName = '%s discover' % self.progName
+ import optparse
+ parser = optparse.OptionParser()
+ parser.prog = self.progName
+ parser.add_option('-v', '--verbose', dest='verbose', default=False,
+ help='Verbose output', action='store_true')
+ if self.catchbreak is not False:
+ parser.add_option('-c', '--catch', dest='catchbreak', default=False,
+ help='Catch ctrl-C and display results so far',
+ action='store_true')
+ if self.buffer is not False:
+ parser.add_option('-b', '--buffer', dest='buffer', default=False,
+ help='Buffer stdout and stderr during tests',
+ action='store_true')
+ parser.add_option('-s', '--start-directory', dest='start', default='.',
+ help="Directory to start discovery ('.' default)")
+ parser.add_option('-p', '--pattern', dest='pattern', default='test*.py',
+ help="Pattern to match tests ('test*.py' default)")
+ parser.add_option('-t', '--top-level-directory', dest='top', default=None,
+ help='Top level directory of project (defaults to start directory)')
+ parser.add_option('-l', '--list', dest='listtests', default=False, action="store_true",
+ help='List tests rather than running them.')
+ parser.add_option('--load-list', dest='load_list', default=None,
+ help='Specify a filename containing the test ids to use.')
+
+ options, args = parser.parse_args(argv)
+ if len(args) > 3:
+ self.usageExit()
+
+ for name, value in zip(('start', 'pattern', 'top'), args):
+ setattr(options, name, value)
+
+ # only set options from the parsing here
+ # if they weren't set explicitly in the constructor
+ if self.catchbreak is None:
+ self.catchbreak = options.catchbreak
+ if self.buffer is None:
+ self.buffer = options.buffer
+ self.listtests = options.listtests
+ self.load_list = options.load_list
+
+ if options.verbose:
+ self.verbosity = 2
+
+ start_dir = options.start
+ pattern = options.pattern
+ top_level_dir = options.top
+
+ loader = Loader()
+ # See http://bugs.python.org/issue16709
+ # While sorting here is intrusive, its better than being random.
+ # Rules for the sort:
+ # - standard suites are flattened, and the resulting tests sorted by
+ # id.
+ # - non-standard suites are preserved as-is, and sorted into position
+ # by the first test found by iterating the suite.
+ # We do this by a DSU process: flatten and grab a key, sort, strip the
+ # keys.
+ loaded = loader.discover(start_dir, pattern, top_level_dir)
+ self.test = sorted_tests(loaded)
+
+ def runTests(self):
+ if (self.catchbreak
+ and getattr(unittest, 'installHandler', None) is not None):
+ unittest.installHandler()
+ self.result = self.testRunner.run(self.test)
+ if self.exit:
+ sys.exit(not self.result.wasSuccessful())
+
+ def usageExit(self, msg=None):
+ if msg:
+ print (msg)
+ usage = {'progName': self.progName, 'catchbreak': '',
+ 'buffer': ''}
+ if self.catchbreak is not False:
+ usage['catchbreak'] = CATCHBREAK
+ if self.buffer is not False:
+ usage['buffer'] = BUFFEROUTPUT
+ usage_text = self.USAGE % usage
+ usage_lines = usage_text.split('\n')
+ usage_lines.insert(2, "Run a test suite with a subunit reporter.")
+ usage_lines.insert(3, "")
+ print('\n'.join(usage_lines))
+ sys.exit(2)
+
+
+if __name__ == '__main__':
+ TestProgram(module=None, argv=sys.argv, stdout=sys.stdout)
diff --git a/python/samba/tdb_util.py b/python/samba/tdb_util.py
new file mode 100644
index 0000000..99b6e02
--- /dev/null
+++ b/python/samba/tdb_util.py
@@ -0,0 +1,46 @@
+# Unix SMB/CIFS implementation.
+# tdb util helpers
+#
+# Copyright (C) Kai Blin <kai@samba.org> 2011
+# Copyright (C) Amitay Isaacs <amitay@gmail.com> 2011
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2013
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import samba
+import subprocess
+import os
+
+
+def tdb_copy(file1, file2, readonly=False):
+ """Copy tdb file using tdbbackup utility and rename it
+ """
+ # Find the location of tdbbackup tool
+ dirs = ["bin", samba.param.bin_dir()] + os.getenv('PATH').split(os.pathsep)
+ for d in dirs:
+ toolpath = os.path.join(d, "tdbbackup")
+ if os.path.exists(toolpath):
+ break
+ else:
+ # we did not find a path to tdbbackup
+ raise FileNotFoundError(2, "could not find tdbbackup tool: "
+ "is tdb-tools installed?")
+
+ tdbbackup_cmd = [toolpath, "-s", ".copy.tdb", file1]
+ if readonly:
+ tdbbackup_cmd.append("-r")
+
+ status = subprocess.check_call(tdbbackup_cmd, close_fds=True, shell=False)
+
+ os.rename("%s.copy.tdb" % file1, file2)
diff --git a/python/samba/tests/__init__.py b/python/samba/tests/__init__.py
new file mode 100644
index 0000000..136dd2f
--- /dev/null
+++ b/python/samba/tests/__init__.py
@@ -0,0 +1,824 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007-2010
+# Copyright (C) Stefan Metzmacher 2014,2015
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Samba Python tests."""
+import os
+import tempfile
+import traceback
+import collections
+import ldb
+import samba
+from samba import param
+from samba import credentials
+from samba.credentials import Credentials
+import subprocess
+import sys
+import unittest
+import re
+from enum import IntEnum, unique
+import samba.auth
+import samba.gensec
+import samba.dcerpc.base
+from random import randint
+from random import SystemRandom
+from contextlib import contextmanager
+import shutil
+import string
+try:
+ from samba.samdb import SamDB
+except ImportError:
+ # We are built without samdb support,
+ # imitate it so that connect_samdb() can recover
+ def SamDB(*args, **kwargs):
+ return None
+
+import samba.ndr
+import samba.dcerpc.dcerpc
+import samba.dcerpc.epmapper
+
+from unittest import SkipTest
+
+
+BINDIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
+ "../../../../bin"))
+
+HEXDUMP_FILTER = bytearray([x if ((len(repr(chr(x))) == 3) and (x < 127)) else ord('.') for x in range(256)])
+
+LDB_ERR_LUT = {v: k for k, v in vars(ldb).items() if k.startswith('ERR_')}
+
+RE_CAMELCASE = re.compile(r"([_\-])+")
+
+
+def ldb_err(v):
+ if isinstance(v, ldb.LdbError):
+ v = v.args[0]
+
+ if v in LDB_ERR_LUT:
+ return LDB_ERR_LUT[v]
+
+ try:
+ return f"[{', '.join(LDB_ERR_LUT.get(x, x) for x in v)}]"
+ except TypeError as e:
+ print(e)
+ return v
+
+
+def DynamicTestCase(cls):
+ cls.setUpDynamicTestCases()
+ return cls
+
+
+class TestCase(unittest.TestCase):
+ """A Samba test case."""
+
+ # Re-implement addClassCleanup to support Python versions older than 3.8.
+ # Can be removed once these older Python versions are no longer needed.
+ if sys.version_info.major == 3 and sys.version_info.minor < 8:
+ _class_cleanups = []
+
+ @classmethod
+ def addClassCleanup(cls, function, *args, **kwargs):
+ cls._class_cleanups.append((function, args, kwargs))
+
+ @classmethod
+ def tearDownClass(cls):
+ teardown_exceptions = []
+
+ while cls._class_cleanups:
+ function, args, kwargs = cls._class_cleanups.pop()
+ try:
+ function(*args, **kwargs)
+ except Exception:
+ teardown_exceptions.append(traceback.format_exc())
+
+ # ExceptionGroup would be better but requires Python 3.11
+ if teardown_exceptions:
+ raise ValueError("tearDownClass failed:\n\n" +
+ "\n".join(teardown_exceptions))
+
+ @classmethod
+ def setUpClass(cls):
+ """
+ Call setUpTestData, ensure tearDownClass is called on exceptions.
+
+ This is only required on Python versions older than 3.8.
+ """
+ try:
+ cls.setUpTestData()
+ except Exception:
+ cls.tearDownClass()
+ raise
+ else:
+ @classmethod
+ def setUpClass(cls):
+ """
+ setUpClass only needs to call setUpTestData.
+
+ On Python 3.8 and above unittest will always call tearDownClass,
+ even if an exception was raised in setUpClass.
+ """
+ cls.setUpTestData()
+
+ @classmethod
+ def setUpTestData(cls):
+ """Create class level test fixtures here."""
+ pass
+
+ @classmethod
+ def generate_dynamic_test(cls, fnname, suffix, *args, doc=None):
+ """
+ fnname is something like "test_dynamic_sum"
+ suffix is something like "1plus2"
+ argstr could be (1, 2)
+
+ This would generate a test case called
+ "test_dynamic_sum_1plus2(self)" that
+ calls
+ self._test_dynamic_sum_with_args(1, 2)
+ """
+ def fn(self):
+ getattr(self, "_%s_with_args" % fnname)(*args)
+ fn.__doc__ = doc
+ attr = "%s_%s" % (fnname, suffix)
+ if hasattr(cls, attr):
+ raise RuntimeError(f"Dynamic test {attr} already exists!")
+ setattr(cls, attr, fn)
+
+ @classmethod
+ def setUpDynamicTestCases(cls):
+ """This can be implemented in order to call cls.generate_dynamic_test()
+ In order to implement autogenerated testcase permutations.
+ """
+ msg = "%s needs setUpDynamicTestCases() if @DynamicTestCase is used!" % (cls)
+ raise NotImplementedError(msg)
+
+ def unique_name(self):
+ """Generate a unique name from within a test for creating objects.
+
+ Used to ensure each test generates uniquely named objects that don't
+ interfere with other tests.
+ """
+ # name of calling function
+ name = self.id().rsplit(".", 1)[1]
+
+ # remove test_ prefix
+ if name.startswith("test_"):
+ name = name[5:]
+
+ # finally, convert to camelcase
+ name = RE_CAMELCASE.sub(" ", name).title().replace(" ", "")
+ return "".join([name[0].lower(), name[1:]])
+
+ def setUp(self):
+ super().setUp()
+ test_debug_level = os.getenv("TEST_DEBUG_LEVEL")
+ if test_debug_level is not None:
+ test_debug_level = int(test_debug_level)
+ self._old_debug_level = samba.get_debug_level()
+ samba.set_debug_level(test_debug_level)
+ self.addCleanup(samba.set_debug_level, test_debug_level)
+
+ @classmethod
+ def get_loadparm(cls, s3=False):
+ return env_loadparm(s3=s3)
+
+ def get_credentials(self):
+ return cmdline_credentials
+
+ @classmethod
+ def get_env_credentials(cls, *, lp, env_username, env_password,
+ env_realm=None, env_domain=None):
+ creds = credentials.Credentials()
+
+ # guess Credentials parameters here. Otherwise, workstation
+ # and domain fields are NULL and gencache code segfaults
+ creds.guess(lp)
+ creds.set_username(env_get_var_value(env_username))
+ creds.set_password(env_get_var_value(env_password))
+
+ if env_realm is not None:
+ creds.set_realm(env_get_var_value(env_realm))
+
+ if env_domain is not None:
+ creds.set_domain(env_get_var_value(env_domain))
+
+ return creds
+
+ def get_creds_ccache_name(self):
+ creds = self.get_credentials()
+ ccache = creds.get_named_ccache(self.get_loadparm())
+ ccache_name = ccache.get_name()
+
+ return ccache_name
+
+ def hexdump(self, src):
+ N = 0
+ result = ''
+ is_string = isinstance(src, str)
+ while src:
+ ll = src[:8]
+ lr = src[8:16]
+ src = src[16:]
+ if is_string:
+ hl = ' '.join(["%02X" % ord(x) for x in ll])
+ hr = ' '.join(["%02X" % ord(x) for x in lr])
+ ll = ll.translate(HEXDUMP_FILTER)
+ lr = lr.translate(HEXDUMP_FILTER)
+ else:
+ hl = ' '.join(["%02X" % x for x in ll])
+ hr = ' '.join(["%02X" % x for x in lr])
+ ll = ll.translate(HEXDUMP_FILTER).decode('utf8')
+ lr = lr.translate(HEXDUMP_FILTER).decode('utf8')
+ result += "[%04X] %-*s %-*s %s %s\n" % (N, 8 * 3, hl, 8 * 3, hr, ll, lr)
+ N += 16
+ return result
+
+ def insta_creds(self, template=None, username=None, userpass=None, kerberos_state=None):
+
+ if template is None:
+ raise ValueError("you need to supply a Credentials template")
+
+ if username is not None and userpass is None:
+ raise ValueError(
+ "you cannot set creds username without setting a password")
+
+ if username is None:
+ assert userpass is None
+
+ username = template.get_username()
+ userpass = template.get_password()
+
+ simple_bind_dn = template.get_bind_dn()
+
+ if kerberos_state is None:
+ kerberos_state = template.get_kerberos_state()
+
+ # get a copy of the global creds or the passed in creds
+ c = Credentials()
+ c.set_username(username)
+ c.set_password(userpass)
+ c.set_domain(template.get_domain())
+ c.set_realm(template.get_realm())
+ c.set_workstation(template.get_workstation())
+ c.set_gensec_features(c.get_gensec_features()
+ | samba.gensec.FEATURE_SEAL)
+ c.set_kerberos_state(kerberos_state)
+ if simple_bind_dn:
+ c.set_bind_dn(simple_bind_dn)
+ return c
+
+ def assertStringsEqual(self, a, b, msg=None, strip=False):
+ """Assert equality between two strings and highlight any differences.
+ If strip is true, leading and trailing whitespace is ignored."""
+ if strip:
+ a = a.strip()
+ b = b.strip()
+
+ if a != b:
+ sys.stderr.write("The strings differ %s(lengths %d vs %d); "
+ "a diff follows\n"
+ % ('when stripped ' if strip else '',
+ len(a), len(b),
+ ))
+
+ from difflib import unified_diff
+ diff = unified_diff(a.splitlines(True),
+ b.splitlines(True),
+ 'a', 'b')
+ for line in diff:
+ sys.stderr.write(line)
+
+ self.fail(msg)
+
+ def assertRaisesLdbError(self, errcode, message, f, *args, **kwargs):
+ """Assert a function raises a particular LdbError."""
+ if message is None:
+ message = f"{f.__name__}(*{args}, **{kwargs})"
+ try:
+ f(*args, **kwargs)
+ except ldb.LdbError as e:
+ (num, msg) = e.args
+ if isinstance(errcode, collections.abc.Container):
+ found = num in errcode
+ else:
+ found = num == errcode
+ if not found:
+ lut = {v: k for k, v in vars(ldb).items()
+ if k.startswith('ERR_') and isinstance(v, int)}
+ if isinstance(errcode, collections.abc.Container):
+ errcode_name = ' '.join(lut.get(x) for x in errcode)
+ else:
+ errcode_name = lut.get(errcode)
+ self.fail(f"{message}, expected "
+ f"LdbError {errcode_name}, {errcode} "
+ f"got {lut.get(num)} ({num}) "
+ f"{msg}")
+ else:
+ lut = {v: k for k, v in vars(ldb).items()
+ if k.startswith('ERR_') and isinstance(v, int)}
+ if isinstance(errcode, collections.abc.Container):
+ errcode_name = ' '.join(lut.get(x) for x in errcode)
+ else:
+ errcode_name = lut.get(errcode)
+ self.fail("%s, expected "
+ "LdbError %s, (%s) "
+ "but we got success" % (message,
+ errcode_name,
+ errcode))
+
+
+class LdbTestCase(TestCase):
+ """Trivial test case for running tests against a LDB."""
+
+ def setUp(self):
+ super().setUp()
+ self.tempfile = tempfile.NamedTemporaryFile(delete=False)
+ self.filename = self.tempfile.name
+ self.ldb = samba.Ldb(self.filename)
+
+ def set_modules(self, modules=None):
+ """Change the modules for this Ldb."""
+ if modules is None:
+ modules = []
+ m = ldb.Message()
+ m.dn = ldb.Dn(self.ldb, "@MODULES")
+ m["@LIST"] = ",".join(modules)
+ self.ldb.add(m)
+ self.ldb = samba.Ldb(self.filename)
+
+
+class TestCaseInTempDir(TestCase):
+
+ def setUp(self):
+ super().setUp()
+ self.tempdir = tempfile.mkdtemp()
+ self.addCleanup(self._remove_tempdir)
+
+ def _remove_tempdir(self):
+ # Note asserting here is treated as an error rather than a test failure
+ self.assertEqual([], os.listdir(self.tempdir))
+ os.rmdir(self.tempdir)
+ self.tempdir = None
+
+ @contextmanager
+ def mktemp(self):
+ """Yield a temporary filename in the tempdir."""
+ try:
+ fd, fn = tempfile.mkstemp(dir=self.tempdir)
+ yield fn
+ finally:
+ try:
+ os.close(fd)
+ os.unlink(fn)
+ except (OSError, IOError) as e:
+ print("could not remove temporary file: %s" % e,
+ file=sys.stderr)
+
+ def rm_files(self, *files, allow_missing=False, _rm=os.remove):
+ """Remove listed files from the temp directory.
+
+ The files must be true files in the directory itself, not in
+ sub-directories.
+
+ By default a non-existent file will cause a test failure (or
+ error if used outside a test in e.g. tearDown), but if
+ allow_missing is true, the absence will be ignored.
+ """
+ for f in files:
+ path = os.path.join(self.tempdir, f)
+
+ # os.path.join will happily step out of the tempdir,
+ # so let's just check.
+ if os.path.dirname(path) != self.tempdir:
+ raise ValueError(f"{path} might be outside {self.tempdir}")
+
+ try:
+ _rm(path)
+ except FileNotFoundError as e:
+ if not allow_missing:
+ raise AssertionError(f"{f} not in {self.tempdir}: {e}")
+
+ print(f"{f} not in {self.tempdir}")
+
+ def rm_dirs(self, *dirs, allow_missing=False):
+ """Remove listed directories from temp directory.
+
+ This works like rm_files, but only removes directories,
+ including their contents.
+ """
+ self.rm_files(*dirs, allow_missing=allow_missing, _rm=shutil.rmtree)
+
+
+def env_loadparm(s3=False):
+ if s3:
+ from samba.samba3 import param as s3param
+ lp = s3param.get_context()
+ else:
+ lp = param.LoadParm()
+
+ try:
+ lp.load(os.environ["SMB_CONF_PATH"])
+ except KeyError:
+ raise KeyError("SMB_CONF_PATH not set")
+ return lp
+
+def env_get_var_value(var_name, allow_missing=False):
+ """Returns value for variable in os.environ
+
+ Function throws AssertionError if variable is undefined.
+ Unit-test based python tests require certain input params
+ to be set in environment, otherwise they can't be run
+ """
+ if allow_missing:
+ if var_name not in os.environ.keys():
+ return None
+ assert var_name in os.environ.keys(), "Please supply %s in environment" % var_name
+ return os.environ[var_name]
+
+
+cmdline_credentials = None
+
+
+class RpcInterfaceTestCase(TestCase):
+ """DCE/RPC Test case."""
+
+
+class BlackboxProcessError(Exception):
+ """This is raised when check_output() process returns a non-zero exit status
+
+ Exception instance should contain the exact exit code (S.returncode),
+ command line (S.cmd), process output (S.stdout) and process error stream
+ (S.stderr)
+ """
+
+ def __init__(self, returncode, cmd, stdout, stderr, msg=None):
+ self.returncode = returncode
+ if isinstance(cmd, list):
+ self.cmd = ' '.join(cmd)
+ self.shell = False
+ else:
+ self.cmd = cmd
+ self.shell = True
+ self.stdout = stdout
+ self.stderr = stderr
+ self.msg = msg
+
+ def __str__(self):
+ s = ("Command '%s'; shell %s; exit status %d; "
+ "stdout: '%s'; stderr: '%s'" %
+ (self.cmd, self.shell, self.returncode, self.stdout, self.stderr))
+ if self.msg is not None:
+ s = "%s; message: %s" % (s, self.msg)
+
+ return s
+
+
+class BlackboxTestCase(TestCaseInTempDir):
+ """Base test case for blackbox tests."""
+
+ @staticmethod
+ def _make_cmdline(line):
+ """Expand the called script into a fully resolved path in the bin
+ directory."""
+ if isinstance(line, list):
+ parts = line
+ else:
+ parts = line.split(" ", 1)
+ cmd = parts[0]
+ exe = os.path.join(BINDIR, cmd)
+
+ python_cmds = ["samba-tool",
+ "samba_dnsupdate",
+ "samba_upgradedns",
+ "script/traffic_replay",
+ "script/traffic_learner"]
+
+ if os.path.exists(exe):
+ parts[0] = exe
+ if cmd in python_cmds and os.getenv("PYTHON", False):
+ parts.insert(0, os.environ["PYTHON"])
+
+ if not isinstance(line, list):
+ line = " ".join(parts)
+
+ return line
+
+ @classmethod
+ def check_run(cls, line, msg=None):
+ cls.check_exit_code(line, 0, msg=msg)
+
+ @classmethod
+ def check_exit_code(cls, line, expected, msg=None):
+ line = cls._make_cmdline(line)
+ use_shell = not isinstance(line, list)
+ p = subprocess.Popen(line,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ shell=use_shell)
+ stdoutdata, stderrdata = p.communicate()
+ retcode = p.returncode
+ if retcode != expected:
+ if msg is None:
+ msg = "expected return code %s; got %s" % (expected, retcode)
+ raise BlackboxProcessError(retcode,
+ line,
+ stdoutdata,
+ stderrdata,
+ msg)
+ return stdoutdata
+
+ @classmethod
+ def check_output(cls, line):
+ use_shell = not isinstance(line, list)
+ line = cls._make_cmdline(line)
+ p = subprocess.Popen(line, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+ shell=use_shell, close_fds=True)
+ stdoutdata, stderrdata = p.communicate()
+ retcode = p.returncode
+ if retcode:
+ raise BlackboxProcessError(retcode, line, stdoutdata, stderrdata)
+ return stdoutdata
+
+ #
+ # Run a command without checking the return code, returns the tuple
+ # (ret, stdout, stderr)
+ # where ret is the return code
+ # stdout is a string containing the commands stdout
+ # stderr is a string containing the commands stderr
+ @classmethod
+ def run_command(cls, line):
+ line = cls._make_cmdline(line)
+ use_shell = not isinstance(line, list)
+ p = subprocess.Popen(line,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ shell=use_shell)
+ stdoutdata, stderrdata = p.communicate()
+ retcode = p.returncode
+ return (retcode, stdoutdata.decode('UTF8'), stderrdata.decode('UTF8'))
+
+ # Generate a random password that can be safely passed on the command line
+ # i.e. it does not contain any shell meta characters.
+ def random_password(self, count=32):
+ password = SystemRandom().choice(string.ascii_uppercase)
+ password += SystemRandom().choice(string.digits)
+ password += SystemRandom().choice(string.ascii_lowercase)
+ password += ''.join(SystemRandom().choice(string.ascii_uppercase +
+ string.ascii_lowercase +
+ string.digits) for x in range(count - 3))
+ return password
+
+
+def connect_samdb(samdb_url, *, lp=None, session_info=None, credentials=None,
+ flags=0, ldb_options=None, ldap_only=False, global_schema=True):
+ """Create SamDB instance and connects to samdb_url database.
+
+ :param samdb_url: Url for database to connect to.
+ :param lp: Optional loadparm object
+ :param session_info: Optional session information
+ :param credentials: Optional credentials, defaults to anonymous.
+ :param flags: Optional LDB flags
+ :param ldap_only: If set, only remote LDAP connection will be created.
+ :param global_schema: Whether to use global schema.
+
+ Added value for tests is that we have a shorthand function
+ to make proper URL for ldb.connect() while using default
+ parameters for connection based on test environment
+ """
+ if "://" not in samdb_url:
+ if not ldap_only and os.path.isfile(samdb_url):
+ samdb_url = "tdb://%s" % samdb_url
+ else:
+ samdb_url = "ldap://%s" % samdb_url
+ # use 'paged_search' module when connecting remotely
+ if samdb_url.startswith("ldap://"):
+ ldb_options = ["modules:paged_searches"]
+ elif ldap_only:
+ raise AssertionError("Trying to connect to %s while remote "
+ "connection is required" % samdb_url)
+
+ # set defaults for test environment
+ if lp is None:
+ lp = env_loadparm()
+ if session_info is None:
+ session_info = samba.auth.system_session(lp)
+ if credentials is None:
+ credentials = cmdline_credentials
+
+ return SamDB(url=samdb_url,
+ lp=lp,
+ session_info=session_info,
+ credentials=credentials,
+ flags=flags,
+ options=ldb_options,
+ global_schema=global_schema)
+
+
+def connect_samdb_ex(samdb_url, *, lp=None, session_info=None, credentials=None,
+ flags=0, ldb_options=None, ldap_only=False):
+ """Connects to samdb_url database
+
+ :param samdb_url: Url for database to connect to.
+ :param lp: Optional loadparm object
+ :param session_info: Optional session information
+ :param credentials: Optional credentials, defaults to anonymous.
+ :param flags: Optional LDB flags
+ :param ldap_only: If set, only remote LDAP connection will be created.
+ :return: (sam_db_connection, rootDse_record) tuple
+ """
+ sam_db = connect_samdb(samdb_url, lp=lp, session_info=session_info,
+ credentials=credentials, flags=flags,
+ ldb_options=ldb_options, ldap_only=ldap_only)
+ # fetch RootDse
+ res = sam_db.search(base="", expression="", scope=ldb.SCOPE_BASE,
+ attrs=["*"])
+ return (sam_db, res[0])
+
+
+def connect_samdb_env(env_url, env_username, env_password, lp=None):
+ """Connect to SamDB by getting URL and Credentials from environment
+
+ :param env_url: Environment variable name to get lsb url from
+ :param env_username: Username environment variable
+ :param env_password: Password environment variable
+ :return: sam_db_connection
+ """
+ samdb_url = env_get_var_value(env_url)
+ creds = credentials.Credentials()
+ if lp is None:
+ # guess Credentials parameters here. Otherwise workstation
+ # and domain fields are NULL and gencache code segfaults
+ lp = param.LoadParm()
+ creds.guess(lp)
+ creds.set_username(env_get_var_value(env_username))
+ creds.set_password(env_get_var_value(env_password))
+ return connect_samdb(samdb_url, credentials=creds, lp=lp)
+
+
+def delete_force(samdb, dn, **kwargs):
+ try:
+ samdb.delete(dn, **kwargs)
+ except ldb.LdbError as error:
+ (num, errstr) = error.args
+ assert num == ldb.ERR_NO_SUCH_OBJECT, "ldb.delete() failed: %s" % errstr
+
+
+def create_test_ou(samdb, name):
+ """Creates a unique OU for the test"""
+
+ # Add some randomness to the test OU. Replication between the testenvs is
+ # constantly happening in the background. Deletion of the last test's
+ # objects can be slow to replicate out. So the OU created by a previous
+ # testenv may still exist at the point that tests start on another testenv.
+ rand = randint(1, 10000000)
+ dn = ldb.Dn(samdb, "OU=%s%d,%s" % (name, rand, samdb.get_default_basedn()))
+ samdb.add({"dn": dn, "objectclass": "organizationalUnit"})
+ return dn
+
+
+@unique
+class OptState(IntEnum):
+ NOOPT = 0
+ HYPHEN1 = 1
+ HYPHEN2 = 2
+ NAME = 3
+
+
+def parse_help_consistency(out,
+ options_start=None,
+ options_end=None,
+ optmap=None,
+ max_leading_spaces=10):
+ if options_start is None:
+ opt_lines = []
+ else:
+ opt_lines = None
+
+ for raw_line in out.split('\n'):
+ line = raw_line.lstrip()
+ if line == '':
+ continue
+ if opt_lines is None:
+ if line == options_start:
+ opt_lines = []
+ else:
+ continue
+ if len(line) < len(raw_line) - max_leading_spaces:
+ # for the case where we have:
+ #
+ # --foo frobnicate or barlify depending on
+ # --bar option.
+ #
+ # where we want to ignore the --bar.
+ continue
+ if line[0] == '-':
+ opt_lines.append(line)
+ if line == options_end:
+ break
+
+ if opt_lines is None:
+ # No --help options is not an error in *this* test.
+ return
+
+ is_longname_char = re.compile(r'^[\w-]$').match
+ for line in opt_lines:
+ state = OptState.NOOPT
+ name = None
+ prev = ' '
+ for c in line:
+ if state == OptState.NOOPT:
+ if c == '-' and prev.isspace():
+ state = OptState.HYPHEN1
+ prev = c
+ continue
+ if state == OptState.HYPHEN1:
+ if c.isalnum():
+ name = '-' + c
+ state = OptState.NAME
+ elif c == '-':
+ state = OptState.HYPHEN2
+ continue
+ if state == OptState.HYPHEN2:
+ if c.isalnum():
+ name = '--' + c
+ state = OptState.NAME
+ else: # WTF, perhaps '--' ending option list.
+ state = OptState.NOOPT
+ prev = c
+ continue
+ if state == OptState.NAME:
+ if is_longname_char(c):
+ name += c
+ else:
+ optmap.setdefault(name, []).append(line)
+ state = OptState.NOOPT
+ prev = c
+
+ if state == OptState.NAME:
+ optmap.setdefault(name, []).append(line)
+
+
+def check_help_consistency(out,
+ options_start=None,
+ options_end=None):
+ """Ensure that options are not repeated and redefined in --help
+ output.
+
+ Returns None if everything is OK, otherwise a string indicating
+ the problems.
+
+ If options_start and/or options_end are provided, only the bit in
+ the output between these two lines is considered. For example,
+ with samba-tool,
+
+ options_start='Options:', options_end='Available subcommands:'
+
+ will prevent the test looking at the preamble which may contain
+ examples using options.
+ """
+ # Silly test, you might think, but this happens
+ optmap = {}
+ parse_help_consistency(out,
+ options_start,
+ options_end,
+ optmap)
+
+ errors = []
+ for k, values in sorted(optmap.items()):
+ if len(values) > 1:
+ for v in values:
+ errors.append("%s: %s" % (k, v))
+
+ if errors:
+ return "\n".join(errors)
+
+
+def get_env_dir(key):
+ """A helper to pull a directory name from the environment, used in
+ some tests that optionally write e.g. fuzz seeds into a directory
+ named in an environment variable.
+ """
+ dir = os.environ.get(key)
+ if dir is None:
+ return None
+
+ if not os.path.isdir(dir):
+ raise ValueError(
+ f"{key} should name an existing directory (got '{dir}')")
+
+ return dir
diff --git a/python/samba/tests/audit_log_base.py b/python/samba/tests/audit_log_base.py
new file mode 100644
index 0000000..fa5ecc7
--- /dev/null
+++ b/python/samba/tests/audit_log_base.py
@@ -0,0 +1,206 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+"""Tests for DSDB audit logging.
+"""
+
+import samba.tests
+from samba.messaging import Messaging
+from samba.dcerpc.messaging import MSG_AUTH_LOG, AUTH_EVENT_NAME
+from samba.param import LoadParm
+import time
+import json
+import os
+import re
+
+
+def getAudit(message):
+ if "type" not in message:
+ return None
+
+ type = message["type"]
+ audit = message[type]
+ return audit
+
+
+class AuditLogTestBase(samba.tests.TestCase):
+
+ def setUp(self):
+ super().setUp()
+
+ # connect to the server's messaging bus (we need to explicitly load a
+ # different smb.conf here, because in all other respects this test
+ # wants to act as a separate remote client)
+ server_conf = os.getenv('SERVERCONFFILE')
+ if server_conf:
+ lp_ctx = LoadParm(filename_for_non_global_lp=server_conf)
+ else:
+ lp_ctx = self.get_loadparm()
+ self.msg_ctx = Messaging((1,), lp_ctx=lp_ctx)
+ self.msg_ctx.irpc_add_name(self.event_type)
+
+ # Now switch back to using the client-side smb.conf. The tests will
+ # use the first interface in the client.conf (we need to strip off
+ # the subnet mask portion)
+ lp_ctx = self.get_loadparm()
+ client_ip_and_mask = lp_ctx.get('interfaces')[0]
+ client_ip = client_ip_and_mask.split('/')[0]
+
+ # the messaging ctx is the server's view of the world, so our own
+ # client IP will be the remoteAddress when connections are logged
+ self.remoteAddress = client_ip
+
+ #
+ # Check the remote address of a message against the one being used
+ # for the tests.
+ #
+ def isRemote(message):
+ audit = getAudit(message)
+ if audit is None:
+ return False
+
+ remote = audit["remoteAddress"]
+ if remote is None:
+ return False
+
+ try:
+ addr = remote.split(":")
+ return addr[1] == self.remoteAddress
+ except IndexError:
+ return False
+
+ def messageHandler(context, msgType, src, message):
+ # This does not look like sub unit output and it
+ # makes these tests much easier to debug.
+ print(message)
+ jsonMsg = json.loads(message)
+ if ((jsonMsg["type"] == "passwordChange" or
+ jsonMsg["type"] == "dsdbChange" or
+ jsonMsg["type"] == "groupChange") and
+ isRemote(jsonMsg)):
+ context["messages"].append(jsonMsg)
+ elif jsonMsg["type"] == "dsdbTransaction":
+ context["txnMessage"] = jsonMsg
+
+ self.context = {"messages": [], "txnMessage": None}
+ self.msg_handler_and_context = (messageHandler, self.context)
+ self.msg_ctx.register(self.msg_handler_and_context,
+ msg_type=self.message_type)
+
+ self.msg_ctx.irpc_add_name(AUTH_EVENT_NAME)
+
+ def authHandler(context, msgType, src, message):
+ jsonMsg = json.loads(message)
+ if jsonMsg["type"] == "Authorization" and isRemote(jsonMsg):
+ # This does not look like sub unit output and it
+ # makes these tests much easier to debug.
+ print(message)
+ context["sessionId"] = jsonMsg["Authorization"]["sessionId"]
+ context["serviceDescription"] =\
+ jsonMsg["Authorization"]["serviceDescription"]
+
+ self.auth_context = {"sessionId": "", "serviceDescription": ""}
+ self.auth_handler_and_context = (authHandler, self.auth_context)
+ self.msg_ctx.register(self.auth_handler_and_context,
+ msg_type=MSG_AUTH_LOG)
+
+ self.discardMessages()
+
+ self.server = os.environ["SERVER"]
+ self.connection = None
+
+ def tearDown(self):
+ self.discardMessages()
+ self.msg_ctx.irpc_remove_name(self.event_type)
+ self.msg_ctx.irpc_remove_name(AUTH_EVENT_NAME)
+ self.msg_ctx.deregister(self.msg_handler_and_context,
+ msg_type=self.message_type)
+ self.msg_ctx.deregister(self.auth_handler_and_context,
+ msg_type=MSG_AUTH_LOG)
+
+ super().tearDown()
+
+ def haveExpected(self, expected, dn):
+ if dn is None:
+ return len(self.context["messages"]) >= expected
+ else:
+ received = 0
+ for msg in self.context["messages"]:
+ audit = getAudit(msg)
+ if audit["dn"].lower() == dn.lower():
+ received += 1
+ if received >= expected:
+ return True
+ return False
+
+ def waitForMessages(self, number, connection=None, dn=None):
+ """Wait for all the expected messages to arrive
+ The connection is passed through to keep the connection alive
+ until all the logging messages have been received.
+ """
+
+ self.connection = connection
+
+ start_time = time.time()
+ while not self.haveExpected(number, dn):
+ self.msg_ctx.loop_once(0.1)
+ if time.time() - start_time > 1:
+ self.connection = None
+ print("Timed out")
+ return []
+
+ self.connection = None
+ if dn is None:
+ return self.context["messages"]
+
+ messages = []
+ for msg in self.context["messages"]:
+ audit = getAudit(msg)
+ if audit["dn"].lower() == dn.lower():
+ messages.append(msg)
+ return messages
+
+ # Discard any previously queued messages.
+ def discardMessages(self):
+ messages = self.context["messages"]
+
+ while True:
+ messages.clear()
+ self.context["txnMessage"] = None
+
+ # tevent presumably has other tasks to run, so we might need two or
+ # three loops before a message comes through.
+ for _ in range(5):
+ self.msg_ctx.loop_once(0.001)
+
+ if not messages and self.context["txnMessage"] is None:
+ # No new messages. We’ve probably got them all.
+ break
+
+ GUID_RE = re.compile(
+ "[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}")
+
+ #
+ # Is the supplied GUID string correctly formatted
+ #
+ def is_guid(self, guid):
+ return self.GUID_RE.fullmatch(guid)
+
+ def get_session(self):
+ return self.auth_context["sessionId"]
+
+ def get_service_description(self):
+ return self.auth_context["serviceDescription"]
diff --git a/python/samba/tests/audit_log_dsdb.py b/python/samba/tests/audit_log_dsdb.py
new file mode 100644
index 0000000..af62337
--- /dev/null
+++ b/python/samba/tests/audit_log_dsdb.py
@@ -0,0 +1,634 @@
+# Tests for SamDb password change audit logging.
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2018
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for the SamDb logging of password changes.
+"""
+
+import samba.tests
+from samba.dcerpc.messaging import MSG_DSDB_LOG, DSDB_EVENT_NAME
+from ldb import ERR_NO_SUCH_OBJECT
+from samba.samdb import SamDB
+from samba.auth import system_session
+import os
+import time
+from samba.tests.audit_log_base import AuditLogTestBase
+from samba.tests import delete_force
+from samba.net import Net
+import samba
+from samba.dcerpc import security, lsa
+
+USER_NAME = "auditlogtestuser"
+USER_PASS = samba.generate_random_password(32, 32)
+
+
+class AuditLogDsdbTests(AuditLogTestBase):
+
+ def setUp(self):
+ self.message_type = MSG_DSDB_LOG
+ self.event_type = DSDB_EVENT_NAME
+ super().setUp()
+
+ self.server_ip = os.environ["SERVER_IP"]
+
+ host = "ldap://%s" % os.environ["SERVER"]
+ self.ldb = SamDB(url=host,
+ session_info=system_session(),
+ credentials=self.get_credentials(),
+ lp=self.get_loadparm())
+ self.server = os.environ["SERVER"]
+
+ # Gets back the basedn
+ self.base_dn = self.ldb.domain_dn()
+
+ # Get the old "dSHeuristics" if it was set
+ dsheuristics = self.ldb.get_dsheuristics()
+
+ # Set the "dSHeuristics" to activate the correct "userPassword"
+ # behaviour
+ self.ldb.set_dsheuristics("000000001")
+
+ # Reset the "dSHeuristics" as they were before
+ self.addCleanup(self.ldb.set_dsheuristics, dsheuristics)
+
+ # Get the old "minPwdAge"
+ minPwdAge = self.ldb.get_minPwdAge()
+
+ # Set it temporarily to "0"
+ self.ldb.set_minPwdAge("0")
+ self.base_dn = self.ldb.domain_dn()
+
+ # Reset the "minPwdAge" as it was before
+ self.addCleanup(self.ldb.set_minPwdAge, minPwdAge)
+
+ # (Re)adds the test user USER_NAME with password USER_PASS
+ delete_force(self.ldb, "cn=" + USER_NAME + ",cn=users," + self.base_dn)
+ self.ldb.add({
+ "dn": "cn=" + USER_NAME + ",cn=users," + self.base_dn,
+ "objectclass": "user",
+ "sAMAccountName": USER_NAME,
+ "userPassword": USER_PASS
+ })
+
+ #
+ # Discard the messages from the setup code
+ #
+ def discardSetupMessages(self, dn):
+ self.waitForMessages(2, dn=dn)
+ self.discardMessages()
+
+ def tearDown(self):
+ self.discardMessages()
+ super().tearDown()
+
+ def haveExpectedTxn(self, expected):
+ if self.context["txnMessage"] is not None:
+ txn = self.context["txnMessage"]["dsdbTransaction"]
+ if txn["transactionId"] == expected:
+ return True
+ return False
+
+ def waitForTransaction(self, expected, connection=None):
+ """Wait for a transaction message to arrive
+ The connection is passed through to keep the connection alive
+ until all the logging messages have been received.
+ """
+
+ self.connection = connection
+
+ start_time = time.time()
+ while not self.haveExpectedTxn(expected):
+ self.msg_ctx.loop_once(0.1)
+ if time.time() - start_time > 1:
+ self.connection = None
+ return ""
+
+ self.connection = None
+ return self.context["txnMessage"]
+
+ def test_net_change_password(self):
+
+ dn = "CN=" + USER_NAME + ",CN=Users," + self.base_dn
+ self.discardSetupMessages(dn)
+
+ creds = self.insta_creds(template=self.get_credentials())
+
+ lp = self.get_loadparm()
+ net = Net(creds, lp, server=self.server)
+ password = "newPassword!!42"
+
+ net.change_password(newpassword=password,
+ username=USER_NAME,
+ oldpassword=USER_PASS)
+
+ messages = self.waitForMessages(1, net, dn=dn)
+ print("Received %d messages" % len(messages))
+ self.assertEqual(1,
+ len(messages),
+ "Did not receive the expected number of messages")
+
+ audit = messages[0]["dsdbChange"]
+ self.assertEqual("Modify", audit["operation"])
+ self.assertFalse(audit["performedAsSystem"])
+ self.assertTrue(dn.lower(), audit["dn"].lower())
+ self.assertRegex(audit["remoteAddress"],
+ self.remoteAddress)
+ session_id = self.get_session()
+ self.assertEqual(session_id, audit["sessionId"])
+ # We skip the check for self.get_service_description() as this
+ # is subject to a race between smbd and the s4 rpc_server code
+ # as to which will set the description as it is DCE/RPC over SMB
+
+ self.assertTrue(self.is_guid(audit["transactionId"]))
+
+ attributes = audit["attributes"]
+ self.assertEqual(1, len(attributes))
+ actions = attributes["clearTextPassword"]["actions"]
+ self.assertEqual(1, len(actions))
+ self.assertTrue(actions[0]["redacted"])
+ self.assertEqual("replace", actions[0]["action"])
+
+ def test_net_set_password(self):
+
+ dn = "CN=" + USER_NAME + ",CN=Users," + self.base_dn
+ self.discardSetupMessages(dn)
+
+ creds = self.insta_creds(template=self.get_credentials())
+
+ lp = self.get_loadparm()
+ net = Net(creds, lp, server=self.server)
+ password = "newPassword!!42"
+ domain = lp.get("workgroup")
+
+ net.set_password(newpassword=password,
+ account_name=USER_NAME,
+ domain_name=domain)
+ messages = self.waitForMessages(1, net, dn=dn)
+ print("Received %d messages" % len(messages))
+ self.assertEqual(1,
+ len(messages),
+ "Did not receive the expected number of messages")
+ audit = messages[0]["dsdbChange"]
+ self.assertEqual("Modify", audit["operation"])
+ self.assertFalse(audit["performedAsSystem"])
+ self.assertEqual(dn, audit["dn"])
+ self.assertRegex(audit["remoteAddress"],
+ self.remoteAddress)
+ session_id = self.get_session()
+ self.assertEqual(session_id, audit["sessionId"])
+ # We skip the check for self.get_service_description() as this
+ # is subject to a race between smbd and the s4 rpc_server code
+ # as to which will set the description as it is DCE/RPC over SMB
+
+ self.assertTrue(self.is_guid(audit["transactionId"]))
+
+ attributes = audit["attributes"]
+ self.assertEqual(1, len(attributes))
+ actions = attributes["clearTextPassword"]["actions"]
+ self.assertEqual(1, len(actions))
+ self.assertTrue(actions[0]["redacted"])
+ self.assertEqual("replace", actions[0]["action"])
+
+ def test_ldap_change_password(self):
+
+ dn = "cn=" + USER_NAME + ",cn=users," + self.base_dn
+ self.discardSetupMessages(dn)
+
+ new_password = samba.generate_random_password(32, 32)
+ dn = "cn=" + USER_NAME + ",cn=users," + self.base_dn
+ self.ldb.modify_ldif(
+ "dn: " + dn + "\n" +
+ "changetype: modify\n" +
+ "delete: userPassword\n" +
+ "userPassword: " + USER_PASS + "\n" +
+ "add: userPassword\n" +
+ "userPassword: " + new_password + "\n")
+
+ messages = self.waitForMessages(1)
+ print("Received %d messages" % len(messages))
+ self.assertEqual(1,
+ len(messages),
+ "Did not receive the expected number of messages")
+
+ audit = messages[0]["dsdbChange"]
+ self.assertEqual("Modify", audit["operation"])
+ self.assertFalse(audit["performedAsSystem"])
+ self.assertEqual(dn, audit["dn"])
+ self.assertRegex(audit["remoteAddress"],
+ self.remoteAddress)
+ self.assertTrue(self.is_guid(audit["sessionId"]))
+ session_id = self.get_session()
+ self.assertEqual(session_id, audit["sessionId"])
+ service_description = self.get_service_description()
+ self.assertEqual(service_description, "LDAP")
+
+ attributes = audit["attributes"]
+ self.assertEqual(1, len(attributes))
+ actions = attributes["userPassword"]["actions"]
+ self.assertEqual(2, len(actions))
+ self.assertTrue(actions[0]["redacted"])
+ self.assertEqual("delete", actions[0]["action"])
+ self.assertTrue(actions[1]["redacted"])
+ self.assertEqual("add", actions[1]["action"])
+
+ def test_ldap_replace_password(self):
+
+ dn = "cn=" + USER_NAME + ",cn=users," + self.base_dn
+ self.discardSetupMessages(dn)
+
+ new_password = samba.generate_random_password(32, 32)
+ self.ldb.modify_ldif(
+ "dn: " + dn + "\n" +
+ "changetype: modify\n" +
+ "replace: userPassword\n" +
+ "userPassword: " + new_password + "\n")
+
+ messages = self.waitForMessages(1, dn=dn)
+ print("Received %d messages" % len(messages))
+ self.assertEqual(1,
+ len(messages),
+ "Did not receive the expected number of messages")
+
+ audit = messages[0]["dsdbChange"]
+ self.assertEqual("Modify", audit["operation"])
+ self.assertFalse(audit["performedAsSystem"])
+ self.assertTrue(dn.lower(), audit["dn"].lower())
+ self.assertRegex(audit["remoteAddress"],
+ self.remoteAddress)
+ self.assertTrue(self.is_guid(audit["sessionId"]))
+ session_id = self.get_session()
+ self.assertEqual(session_id, audit["sessionId"])
+ service_description = self.get_service_description()
+ self.assertEqual(service_description, "LDAP")
+ self.assertTrue(self.is_guid(audit["transactionId"]))
+
+ attributes = audit["attributes"]
+ self.assertEqual(1, len(attributes))
+ actions = attributes["userPassword"]["actions"]
+ self.assertEqual(1, len(actions))
+ self.assertTrue(actions[0]["redacted"])
+ self.assertEqual("replace", actions[0]["action"])
+
+ def test_ldap_add_user(self):
+
+ # The setup code adds a user, so we check for the dsdb events
+ # generated by it.
+ dn = "cn=" + USER_NAME + ",cn=users," + self.base_dn
+ messages = self.waitForMessages(2, dn=dn)
+ print("Received %d messages" % len(messages))
+ self.assertEqual(2,
+ len(messages),
+ "Did not receive the expected number of messages")
+
+ audit = messages[1]["dsdbChange"]
+ self.assertEqual("Add", audit["operation"])
+ self.assertFalse(audit["performedAsSystem"])
+ self.assertEqual(dn, audit["dn"])
+ self.assertRegex(audit["remoteAddress"],
+ self.remoteAddress)
+ session_id = self.get_session()
+ self.assertEqual(session_id, audit["sessionId"])
+ service_description = self.get_service_description()
+ self.assertEqual(service_description, "LDAP")
+ self.assertTrue(self.is_guid(audit["sessionId"]))
+ self.assertTrue(self.is_guid(audit["transactionId"]))
+
+ attributes = audit["attributes"]
+ self.assertEqual(3, len(attributes))
+
+ actions = attributes["objectclass"]["actions"]
+ self.assertEqual(1, len(actions))
+ self.assertEqual("add", actions[0]["action"])
+ self.assertEqual(1, len(actions[0]["values"]))
+ self.assertEqual("user", actions[0]["values"][0]["value"])
+
+ actions = attributes["sAMAccountName"]["actions"]
+ self.assertEqual(1, len(actions))
+ self.assertEqual("add", actions[0]["action"])
+ self.assertEqual(1, len(actions[0]["values"]))
+ self.assertEqual(USER_NAME, actions[0]["values"][0]["value"])
+
+ actions = attributes["userPassword"]["actions"]
+ self.assertEqual(1, len(actions))
+ self.assertEqual("add", actions[0]["action"])
+ self.assertTrue(actions[0]["redacted"])
+
+ def test_samdb_delete_user(self):
+
+ dn = "cn=" + USER_NAME + ",cn=users," + self.base_dn
+ self.discardSetupMessages(dn)
+
+ self.ldb.deleteuser(USER_NAME)
+
+ messages = self.waitForMessages(1, dn=dn)
+ print("Received %d messages" % len(messages))
+ self.assertEqual(1,
+ len(messages),
+ "Did not receive the expected number of messages")
+
+ audit = messages[0]["dsdbChange"]
+ self.assertEqual("Delete", audit["operation"])
+ self.assertFalse(audit["performedAsSystem"])
+ self.assertTrue(dn.lower(), audit["dn"].lower())
+ self.assertRegex(audit["remoteAddress"],
+ self.remoteAddress)
+ self.assertTrue(self.is_guid(audit["sessionId"]))
+ self.assertEqual(0, audit["statusCode"])
+ self.assertEqual("Success", audit["status"])
+ session_id = self.get_session()
+ self.assertEqual(session_id, audit["sessionId"])
+ service_description = self.get_service_description()
+ self.assertEqual(service_description, "LDAP")
+
+ transactionId = audit["transactionId"]
+ message = self.waitForTransaction(transactionId)
+ audit = message["dsdbTransaction"]
+ self.assertEqual("commit", audit["action"])
+ self.assertTrue(self.is_guid(audit["transactionId"]))
+ self.assertTrue(audit["duration"] > 0)
+
+ def test_samdb_delete_non_existent_dn(self):
+
+ DOES_NOT_EXIST = "doesNotExist"
+ dn = "cn=" + USER_NAME + ",cn=users," + self.base_dn
+ self.discardSetupMessages(dn)
+
+ dn = "cn=" + DOES_NOT_EXIST + ",cn=users," + self.base_dn
+ try:
+ self.ldb.delete(dn)
+ self.fail("Exception not thrown")
+ except Exception:
+ pass
+
+ messages = self.waitForMessages(1)
+ print("Received %d messages" % len(messages))
+ self.assertEqual(1,
+ len(messages),
+ "Did not receive the expected number of messages")
+
+ audit = messages[0]["dsdbChange"]
+ self.assertEqual("Delete", audit["operation"])
+ self.assertFalse(audit["performedAsSystem"])
+ self.assertTrue(dn.lower(), audit["dn"].lower())
+ self.assertRegex(audit["remoteAddress"],
+ self.remoteAddress)
+ self.assertEqual(ERR_NO_SUCH_OBJECT, audit["statusCode"])
+ self.assertEqual("No such object", audit["status"])
+ self.assertTrue(self.is_guid(audit["sessionId"]))
+ session_id = self.get_session()
+ self.assertEqual(session_id, audit["sessionId"])
+ service_description = self.get_service_description()
+ self.assertEqual(service_description, "LDAP")
+
+ transactionId = audit["transactionId"]
+ message = self.waitForTransaction(transactionId)
+ audit = message["dsdbTransaction"]
+ self.assertEqual("rollback", audit["action"])
+ self.assertTrue(self.is_guid(audit["transactionId"]))
+ self.assertTrue(audit["duration"] > 0)
+
+ def test_create_and_delete_secret_over_lsa(self):
+
+ dn = "cn=Test Secret,CN=System," + self.base_dn
+ self.discardSetupMessages(dn)
+
+ creds = self.insta_creds(template=self.get_credentials())
+ lsa_conn = lsa.lsarpc(
+ "ncacn_np:%s" % self.server,
+ self.get_loadparm(),
+ creds)
+ lsa_handle = lsa_conn.OpenPolicy2(
+ system_name="\\",
+ attr=lsa.ObjectAttribute(),
+ access_mask=security.SEC_FLAG_MAXIMUM_ALLOWED)
+ secret_name = lsa.String()
+ secret_name.string = "G$Test"
+ lsa_conn.CreateSecret(
+ handle=lsa_handle,
+ name=secret_name,
+ access_mask=security.SEC_FLAG_MAXIMUM_ALLOWED)
+
+ messages = self.waitForMessages(1, dn=dn)
+ print("Received %d messages" % len(messages))
+ self.assertEqual(1,
+ len(messages),
+ "Did not receive the expected number of messages")
+
+ audit = messages[0]["dsdbChange"]
+ self.assertEqual("Add", audit["operation"])
+ self.assertTrue(audit["performedAsSystem"])
+ self.assertTrue(dn.lower(), audit["dn"].lower())
+ self.assertRegex(audit["remoteAddress"],
+ self.remoteAddress)
+ self.assertTrue(self.is_guid(audit["sessionId"]))
+ session_id = self.get_session()
+ self.assertEqual(session_id, audit["sessionId"])
+
+ # We skip the check for self.get_service_description() as this
+ # is subject to a race between smbd and the s4 rpc_server code
+ # as to which will set the description as it is DCE/RPC over SMB
+
+ attributes = audit["attributes"]
+ self.assertEqual(2, len(attributes))
+
+ object_class = attributes["objectClass"]
+ self.assertEqual(1, len(object_class["actions"]))
+ action = object_class["actions"][0]
+ self.assertEqual("add", action["action"])
+ values = action["values"]
+ self.assertEqual(1, len(values))
+ self.assertEqual("secret", values[0]["value"])
+
+ cn = attributes["cn"]
+ self.assertEqual(1, len(cn["actions"]))
+ action = cn["actions"][0]
+ self.assertEqual("add", action["action"])
+ values = action["values"]
+ self.assertEqual(1, len(values))
+ self.assertEqual("Test Secret", values[0]["value"])
+
+ #
+ # Now delete the secret.
+ self.discardMessages()
+ h = lsa_conn.OpenSecret(
+ handle=lsa_handle,
+ name=secret_name,
+ access_mask=security.SEC_FLAG_MAXIMUM_ALLOWED)
+
+ lsa_conn.DeleteObject(h)
+ messages = self.waitForMessages(1, dn=dn)
+ print("Received %d messages" % len(messages))
+ self.assertEqual(1,
+ len(messages),
+ "Did not receive the expected number of messages")
+
+ dn = "cn=Test Secret,CN=System," + self.base_dn
+ audit = messages[0]["dsdbChange"]
+ self.assertEqual("Delete", audit["operation"])
+ self.assertTrue(audit["performedAsSystem"])
+ self.assertTrue(dn.lower(), audit["dn"].lower())
+ self.assertRegex(audit["remoteAddress"],
+ self.remoteAddress)
+ self.assertTrue(self.is_guid(audit["sessionId"]))
+ session_id = self.get_session()
+ self.assertEqual(session_id, audit["sessionId"])
+
+ # We skip the check for self.get_service_description() as this
+ # is subject to a race between smbd and the s4 rpc_server code
+ # as to which will set the description as it is DCE/RPC over SMB
+
+ def test_modify(self):
+
+ dn = "cn=" + USER_NAME + ",cn=users," + self.base_dn
+ self.discardSetupMessages(dn)
+
+ #
+ # Add an attribute value
+ #
+ self.ldb.modify_ldif(
+ "dn: " + dn + "\n" +
+ "changetype: modify\n" +
+ "add: carLicense\n" +
+ "carLicense: license-01\n")
+
+ messages = self.waitForMessages(1, dn=dn)
+ print("Received %d messages" % len(messages))
+ self.assertEqual(1,
+ len(messages),
+ "Did not receive the expected number of messages")
+
+ audit = messages[0]["dsdbChange"]
+ self.assertEqual("Modify", audit["operation"])
+ self.assertFalse(audit["performedAsSystem"])
+ self.assertEqual(dn, audit["dn"])
+ self.assertRegex(audit["remoteAddress"],
+ self.remoteAddress)
+ self.assertTrue(self.is_guid(audit["sessionId"]))
+ session_id = self.get_session()
+ self.assertEqual(session_id, audit["sessionId"])
+ service_description = self.get_service_description()
+ self.assertEqual(service_description, "LDAP")
+
+ attributes = audit["attributes"]
+ self.assertEqual(1, len(attributes))
+ actions = attributes["carLicense"]["actions"]
+ self.assertEqual(1, len(actions))
+ self.assertEqual("add", actions[0]["action"])
+ values = actions[0]["values"]
+ self.assertEqual(1, len(values))
+ self.assertEqual("license-01", values[0]["value"])
+
+ #
+ # Add an another value to the attribute
+ #
+ self.discardMessages()
+ self.ldb.modify_ldif(
+ "dn: " + dn + "\n" +
+ "changetype: modify\n" +
+ "add: carLicense\n" +
+ "carLicense: license-02\n")
+
+ messages = self.waitForMessages(1, dn=dn)
+ print("Received %d messages" % len(messages))
+ self.assertEqual(1,
+ len(messages),
+ "Did not receive the expected number of messages")
+ attributes = messages[0]["dsdbChange"]["attributes"]
+ self.assertEqual(1, len(attributes))
+ actions = attributes["carLicense"]["actions"]
+ self.assertEqual(1, len(actions))
+ self.assertEqual("add", actions[0]["action"])
+ values = actions[0]["values"]
+ self.assertEqual(1, len(values))
+ self.assertEqual("license-02", values[0]["value"])
+
+ #
+ # Add an another two values to the attribute
+ #
+ self.discardMessages()
+ self.ldb.modify_ldif(
+ "dn: " + dn + "\n" +
+ "changetype: modify\n" +
+ "add: carLicense\n" +
+ "carLicense: license-03\n" +
+ "carLicense: license-04\n")
+
+ messages = self.waitForMessages(1, dn=dn)
+ print("Received %d messages" % len(messages))
+ self.assertEqual(1,
+ len(messages),
+ "Did not receive the expected number of messages")
+ attributes = messages[0]["dsdbChange"]["attributes"]
+ self.assertEqual(1, len(attributes))
+ actions = attributes["carLicense"]["actions"]
+ self.assertEqual(1, len(actions))
+ self.assertEqual("add", actions[0]["action"])
+ values = actions[0]["values"]
+ self.assertEqual(2, len(values))
+ self.assertEqual("license-03", values[0]["value"])
+ self.assertEqual("license-04", values[1]["value"])
+
+ #
+ # delete two values to the attribute
+ #
+ self.discardMessages()
+ self.ldb.modify_ldif(
+ "dn: " + dn + "\n" +
+ "changetype: modify\n" +
+ "delete: carLicense\n" +
+ "carLicense: license-03\n" +
+ "carLicense: license-04\n")
+
+ messages = self.waitForMessages(1, dn=dn)
+ print("Received %d messages" % len(messages))
+ self.assertEqual(1,
+ len(messages),
+ "Did not receive the expected number of messages")
+ attributes = messages[0]["dsdbChange"]["attributes"]
+ self.assertEqual(1, len(attributes))
+ actions = attributes["carLicense"]["actions"]
+ self.assertEqual(1, len(actions))
+ self.assertEqual("delete", actions[0]["action"])
+ values = actions[0]["values"]
+ self.assertEqual(2, len(values))
+ self.assertEqual("license-03", values[0]["value"])
+ self.assertEqual("license-04", values[1]["value"])
+
+ #
+ # replace two values to the attribute
+ #
+ self.discardMessages()
+ self.ldb.modify_ldif(
+ "dn: " + dn + "\n" +
+ "changetype: modify\n" +
+ "replace: carLicense\n" +
+ "carLicense: license-05\n" +
+ "carLicense: license-06\n")
+
+ messages = self.waitForMessages(1, dn=dn)
+ print("Received %d messages" % len(messages))
+ self.assertEqual(1,
+ len(messages),
+ "Did not receive the expected number of messages")
+ attributes = messages[0]["dsdbChange"]["attributes"]
+ self.assertEqual(1, len(attributes))
+ actions = attributes["carLicense"]["actions"]
+ self.assertEqual(1, len(actions))
+ self.assertEqual("replace", actions[0]["action"])
+ values = actions[0]["values"]
+ self.assertEqual(2, len(values))
+ self.assertEqual("license-05", values[0]["value"])
+ self.assertEqual("license-06", values[1]["value"])
diff --git a/python/samba/tests/audit_log_pass_change.py b/python/samba/tests/audit_log_pass_change.py
new file mode 100644
index 0000000..1039e17
--- /dev/null
+++ b/python/samba/tests/audit_log_pass_change.py
@@ -0,0 +1,331 @@
+# Tests for SamDb password change audit logging.
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2018
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for the SamDb logging of password changes.
+"""
+
+import samba.tests
+from samba.dcerpc.messaging import MSG_DSDB_PWD_LOG, DSDB_PWD_EVENT_NAME
+from samba.samdb import SamDB
+from samba.auth import system_session
+import os
+from samba.tests.audit_log_base import AuditLogTestBase
+from samba.tests import delete_force
+from samba.net import Net
+from ldb import ERR_INSUFFICIENT_ACCESS_RIGHTS
+from samba.dcerpc.windows_event_ids import (
+ EVT_ID_PASSWORD_CHANGE,
+ EVT_ID_PASSWORD_RESET
+)
+
+
+USER_NAME = "auditlogtestuser"
+USER_PASS = samba.generate_random_password(32, 32)
+
+SECOND_USER_NAME = "auditlogtestuser02"
+SECOND_USER_PASS = samba.generate_random_password(32, 32)
+
+
+class AuditLogPassChangeTests(AuditLogTestBase):
+
+ def setUp(self):
+ self.message_type = MSG_DSDB_PWD_LOG
+ self.event_type = DSDB_PWD_EVENT_NAME
+ super().setUp()
+
+ self.server_ip = os.environ["SERVER_IP"]
+
+ host = "ldap://%s" % os.environ["SERVER"]
+ self.ldb = SamDB(url=host,
+ session_info=system_session(),
+ credentials=self.get_credentials(),
+ lp=self.get_loadparm())
+ self.server = os.environ["SERVER"]
+
+ # Gets back the basedn
+ self.base_dn = self.ldb.domain_dn()
+
+ # Get the old "dSHeuristics" if it was set
+ dsheuristics = self.ldb.get_dsheuristics()
+
+ # Set the "dSHeuristics" to activate the correct "userPassword"
+ # behaviour
+ self.ldb.set_dsheuristics("000000001")
+
+ # Reset the "dSHeuristics" as they were before
+ self.addCleanup(self.ldb.set_dsheuristics, dsheuristics)
+
+ # Get the old "minPwdAge"
+ minPwdAge = self.ldb.get_minPwdAge()
+
+ # Set it temporarily to "0"
+ self.ldb.set_minPwdAge("0")
+ self.base_dn = self.ldb.domain_dn()
+
+ # Reset the "minPwdAge" as it was before
+ self.addCleanup(self.ldb.set_minPwdAge, minPwdAge)
+
+ # (Re)adds the test user USER_NAME with password USER_PASS
+ delete_force(self.ldb, "cn=" + USER_NAME + ",cn=users," + self.base_dn)
+ delete_force(
+ self.ldb,
+ "cn=" + SECOND_USER_NAME + ",cn=users," + self.base_dn)
+ self.ldb.add({
+ "dn": "cn=" + USER_NAME + ",cn=users," + self.base_dn,
+ "objectclass": "user",
+ "sAMAccountName": USER_NAME,
+ "userPassword": USER_PASS
+ })
+
+ #
+ # Discard the messages from the setup code
+ #
+ def discardSetupMessages(self, dn):
+ self.waitForMessages(1, dn=dn)
+ self.discardMessages()
+
+ def test_net_change_password(self):
+
+ dn = "CN=" + USER_NAME + ",CN=Users," + self.base_dn
+ self.discardSetupMessages(dn)
+
+ creds = self.insta_creds(template=self.get_credentials())
+
+ lp = self.get_loadparm()
+ net = Net(creds, lp, server=self.server)
+ password = "newPassword!!42"
+
+ net.change_password(newpassword=password,
+ username=USER_NAME,
+ oldpassword=USER_PASS)
+
+ messages = self.waitForMessages(1, net, dn)
+ print("Received %d messages" % len(messages))
+ self.assertEqual(1,
+ len(messages),
+ "Did not receive the expected number of messages")
+ audit = messages[0]["passwordChange"]
+ self.assertEqual(EVT_ID_PASSWORD_CHANGE, audit["eventId"])
+ self.assertEqual("Change", audit["action"])
+ self.assertEqual(dn, audit["dn"])
+ self.assertRegex(audit["remoteAddress"],
+ self.remoteAddress)
+ session_id = self.get_session()
+ self.assertEqual(session_id, audit["sessionId"])
+ service_description = self.get_service_description()
+ self.assertEqual(service_description, "DCE/RPC")
+ self.assertTrue(self.is_guid(audit["transactionId"]))
+
+ def test_net_set_password_user_without_permission(self):
+
+ dn = "CN=" + USER_NAME + ",CN=Users," + self.base_dn
+ self.discardSetupMessages(dn)
+
+ self.ldb.newuser(SECOND_USER_NAME, SECOND_USER_PASS)
+
+ #
+ # Get the password reset from the user add
+ #
+ dn = "CN=" + SECOND_USER_NAME + ",CN=Users," + self.base_dn
+ messages = self.waitForMessages(1, dn=dn)
+ print("Received %d messages" % len(messages))
+ self.assertEqual(1,
+ len(messages),
+ "Did not receive the expected number of messages")
+
+ audit = messages[0]["passwordChange"]
+ self.assertEqual(EVT_ID_PASSWORD_RESET, audit["eventId"])
+ self.assertEqual("Reset", audit["action"])
+ self.assertEqual(dn, audit["dn"])
+ self.assertRegex(audit["remoteAddress"],
+ self.remoteAddress)
+ session_id = self.get_session()
+ self.assertEqual(session_id, audit["sessionId"])
+ service_description = self.get_service_description()
+ self.assertEqual(service_description, "LDAP")
+ self.assertTrue(self.is_guid(audit["transactionId"]))
+ self.assertEqual(0, audit["statusCode"])
+ self.assertEqual("Success", audit["status"])
+ self.discardMessages()
+
+ creds = self.insta_creds(
+ template=self.get_credentials(),
+ username=SECOND_USER_NAME,
+ userpass=SECOND_USER_PASS,
+ kerberos_state=None)
+
+ lp = self.get_loadparm()
+ net = Net(creds, lp, server=self.server)
+ password = "newPassword!!42"
+ domain = lp.get("workgroup")
+
+ try:
+ net.set_password(newpassword=password,
+ account_name=USER_NAME,
+ domain_name=domain)
+ self.fail("Expected exception not thrown")
+ except Exception:
+ pass
+
+ dn = "CN=" + USER_NAME + ",CN=Users," + self.base_dn
+ messages = self.waitForMessages(1, net, dn=dn)
+ print("Received %d messages" % len(messages))
+ self.assertEqual(1,
+ len(messages),
+ "Did not receive the expected number of messages")
+
+ audit = messages[0]["passwordChange"]
+ self.assertEqual(EVT_ID_PASSWORD_RESET, audit["eventId"])
+ self.assertEqual("Reset", audit["action"])
+ self.assertEqual(dn, audit["dn"])
+ self.assertRegex(audit["remoteAddress"],
+ self.remoteAddress)
+ session_id = self.get_session()
+ self.assertEqual(session_id, audit["sessionId"])
+ service_description = self.get_service_description()
+ self.assertEqual(service_description, "DCE/RPC")
+ self.assertTrue(self.is_guid(audit["transactionId"]))
+ self.assertEqual(ERR_INSUFFICIENT_ACCESS_RIGHTS, audit["statusCode"])
+ self.assertEqual("insufficient access rights", audit["status"])
+
+ def test_net_set_password(self):
+
+ dn = "CN=" + USER_NAME + ",CN=Users," + self.base_dn
+ self.discardSetupMessages(dn)
+
+ creds = self.insta_creds(template=self.get_credentials())
+
+ lp = self.get_loadparm()
+ net = Net(creds, lp, server=self.server)
+ password = "newPassword!!42"
+ domain = lp.get("workgroup")
+
+ net.set_password(newpassword=password,
+ account_name=USER_NAME,
+ domain_name=domain)
+
+ dn = "CN=" + USER_NAME + ",CN=Users," + self.base_dn
+ messages = self.waitForMessages(1, net, dn)
+ print("Received %d messages" % len(messages))
+ self.assertEqual(1,
+ len(messages),
+ "Did not receive the expected number of messages")
+
+ audit = messages[0]["passwordChange"]
+ self.assertEqual(EVT_ID_PASSWORD_RESET, audit["eventId"])
+ self.assertEqual("Reset", audit["action"])
+ self.assertEqual(dn, audit["dn"])
+ self.assertRegex(audit["remoteAddress"],
+ self.remoteAddress)
+ session_id = self.get_session()
+ self.assertEqual(session_id, audit["sessionId"])
+ service_description = self.get_service_description()
+ self.assertEqual(service_description, "DCE/RPC")
+ session_id = self.get_session()
+ self.assertEqual(session_id, audit["sessionId"])
+ self.assertTrue(self.is_guid(audit["transactionId"]))
+
+ def test_ldap_change_password(self):
+
+ dn = "cn=" + USER_NAME + ",cn=users," + self.base_dn
+ self.discardSetupMessages(dn)
+
+ new_password = samba.generate_random_password(32, 32)
+ self.ldb.modify_ldif(
+ "dn: " + dn + "\n" +
+ "changetype: modify\n" +
+ "delete: userPassword\n" +
+ "userPassword: " + USER_PASS + "\n" +
+ "add: userPassword\n" +
+ "userPassword: " + new_password + "\n")
+
+ messages = self.waitForMessages(1, dn=dn)
+ print("Received %d messages" % len(messages))
+ self.assertEqual(1,
+ len(messages),
+ "Did not receive the expected number of messages")
+
+ audit = messages[0]["passwordChange"]
+ self.assertEqual(EVT_ID_PASSWORD_CHANGE, audit["eventId"])
+ self.assertEqual("Change", audit["action"])
+ self.assertEqual(dn, audit["dn"])
+ self.assertRegex(audit["remoteAddress"],
+ self.remoteAddress)
+ self.assertTrue(self.is_guid(audit["sessionId"]))
+ session_id = self.get_session()
+ self.assertEqual(session_id, audit["sessionId"])
+ service_description = self.get_service_description()
+ self.assertEqual(service_description, "LDAP")
+ self.assertTrue(self.is_guid(audit["transactionId"]))
+
+ def test_ldap_replace_password(self):
+
+ dn = "cn=" + USER_NAME + ",cn=users," + self.base_dn
+ self.discardSetupMessages(dn)
+
+ new_password = samba.generate_random_password(32, 32)
+ self.ldb.modify_ldif(
+ "dn: " + dn + "\n" +
+ "changetype: modify\n" +
+ "replace: userPassword\n" +
+ "userPassword: " + new_password + "\n")
+
+ messages = self.waitForMessages(1, dn=dn)
+ print("Received %d messages" % len(messages))
+ self.assertEqual(1,
+ len(messages),
+ "Did not receive the expected number of messages")
+
+ audit = messages[0]["passwordChange"]
+ self.assertEqual(EVT_ID_PASSWORD_RESET, audit["eventId"])
+ self.assertEqual("Reset", audit["action"])
+ self.assertEqual(dn, audit["dn"])
+ self.assertRegex(audit["remoteAddress"],
+ self.remoteAddress)
+ self.assertTrue(self.is_guid(audit["sessionId"]))
+ session_id = self.get_session()
+ self.assertEqual(session_id, audit["sessionId"])
+ service_description = self.get_service_description()
+ self.assertEqual(service_description, "LDAP")
+ self.assertTrue(self.is_guid(audit["transactionId"]))
+
+ def test_ldap_add_user(self):
+
+ # The setup code adds a user, so we check for the password event
+ # generated by it.
+ dn = "cn=" + USER_NAME + ",cn=users," + self.base_dn
+ messages = self.waitForMessages(1, dn=dn)
+ print("Received %d messages" % len(messages))
+ self.assertEqual(1,
+ len(messages),
+ "Did not receive the expected number of messages")
+
+ #
+ # The first message should be the reset from the Setup code.
+ #
+ audit = messages[0]["passwordChange"]
+ self.assertEqual(EVT_ID_PASSWORD_RESET, audit["eventId"])
+ self.assertEqual("Reset", audit["action"])
+ self.assertEqual(dn, audit["dn"])
+ self.assertRegex(audit["remoteAddress"],
+ self.remoteAddress)
+ session_id = self.get_session()
+ self.assertEqual(session_id, audit["sessionId"])
+ service_description = self.get_service_description()
+ self.assertEqual(service_description, "LDAP")
+ self.assertTrue(self.is_guid(audit["sessionId"]))
+ self.assertTrue(self.is_guid(audit["transactionId"]))
diff --git a/python/samba/tests/auth.py b/python/samba/tests/auth.py
new file mode 100644
index 0000000..3fedd5e
--- /dev/null
+++ b/python/samba/tests/auth.py
@@ -0,0 +1,102 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for the Auth Python bindings.
+
+Note that this just tests the bindings work. It does not intend to test
+the functionality, that's already done in other tests.
+"""
+
+from samba import auth
+import samba.tests
+
+
+class AuthSystemSessionTests(samba.tests.TestCase):
+
+ def setUp(self):
+ super().setUp()
+ self.system_session = auth.system_session()
+ self.lp = samba.tests.env_loadparm()
+
+ def test_system_session_attrs(self):
+ self.assertTrue(hasattr(self.system_session, 'credentials'))
+ self.assertTrue(hasattr(self.system_session, 'info'))
+ self.assertTrue(hasattr(self.system_session, 'security_token'))
+ self.assertTrue(hasattr(self.system_session, 'session_key'))
+ self.assertTrue(hasattr(self.system_session, 'torture'))
+
+ def test_system_session_credentials(self):
+ self.assertIsNone(self.system_session.credentials.get_bind_dn())
+ self.assertIsNotNone(self.system_session.credentials.get_password())
+ self.assertEqual(self.system_session.credentials.get_username(),
+ self.lp.get('netbios name').upper() + "$")
+
+ def test_system_session_info(self):
+ self.assertEqual(self.system_session.info.full_name, 'System')
+ self.assertEqual(self.system_session.info.domain_name, 'NT AUTHORITY')
+ self.assertEqual(self.system_session.info.account_name, 'SYSTEM')
+
+ def test_system_session_session_key(self):
+ expected = b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+ self.assertEqual(self.system_session.session_key, expected)
+
+ def test_system_session_security_token(self):
+ self.assertTrue(self.system_session.security_token.is_system())
+ self.assertFalse(self.system_session.security_token.is_anonymous())
+
+
+class AuthAdminSessionTests(samba.tests.TestCase):
+
+ def setUp(self):
+ super().setUp()
+ self.lp = samba.tests.env_loadparm()
+ self.admin_session = auth.admin_session(self.lp,
+ "S-1-5-21-2212615479-2695158682-2101375467")
+
+ def test_admin_session_attrs(self):
+ self.assertTrue(hasattr(self.admin_session, 'credentials'))
+ self.assertTrue(hasattr(self.admin_session, 'info'))
+ self.assertTrue(hasattr(self.admin_session, 'security_token'))
+ self.assertTrue(hasattr(self.admin_session, 'session_key'))
+ self.assertTrue(hasattr(self.admin_session, 'torture'))
+
+ def test_admin_session_credentials(self):
+ self.assertIsNone(self.admin_session.credentials)
+
+ def test_session_info_details(self):
+ self.assertEqual(self.admin_session.info.full_name,
+ 'Administrator')
+ self.assertEqual(self.admin_session.info.domain_name,
+ self.lp.get('workgroup'))
+ self.assertEqual(self.admin_session.info.account_name,
+ 'Administrator')
+
+ def test_security_token(self):
+ self.assertFalse(self.admin_session.security_token.is_system())
+ self.assertFalse(self.admin_session.security_token.is_anonymous())
+ self.assertTrue(self.admin_session.security_token.has_builtin_administrators())
+
+ def test_session_info_unix_details(self):
+ samba.auth.session_info_fill_unix(session_info=self.admin_session,
+ lp_ctx=self.lp,
+ user_name="Administrator")
+ self.assertEqual(self.admin_session.unix_info.sanitized_username,
+ 'Administrator')
+ self.assertEqual(self.admin_session.unix_info.unix_name,
+ self.lp.get('workgroup').upper() +
+ self.lp.get('winbind separator') + 'Administrator')
+ self.assertIsNotNone(self.admin_session.unix_token)
diff --git a/python/samba/tests/auth_log.py b/python/samba/tests/auth_log.py
new file mode 100755
index 0000000..0307ed0
--- /dev/null
+++ b/python/samba/tests/auth_log.py
@@ -0,0 +1,1489 @@
+#!/usr/bin/env python3
+# Unix SMB/CIFS implementation.
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for the Auth and AuthZ logging.
+"""
+
+import sys
+
+sys.path.insert(0, 'bin/python')
+
+import samba.tests
+from samba.dcerpc import srvsvc, dnsserver
+import os
+from samba.samba3 import libsmb_samba_internal as libsmb
+from samba.samba3 import param as s3param
+from samba.samdb import SamDB
+import samba.tests.auth_log_base
+from samba.credentials import DONT_USE_KERBEROS, MUST_USE_KERBEROS
+from samba import NTSTATUSError
+from subprocess import call
+from ldb import LdbError
+from samba.dcerpc.windows_event_ids import (
+ EVT_ID_SUCCESSFUL_LOGON,
+ EVT_ID_UNSUCCESSFUL_LOGON,
+ EVT_LOGON_NETWORK,
+ EVT_LOGON_INTERACTIVE,
+ EVT_LOGON_NETWORK_CLEAR_TEXT
+)
+import re
+
+
+class AuthLogTests(samba.tests.auth_log_base.AuthLogTestBase):
+
+ def setUp(self):
+ super().setUp()
+ self.remoteAddress = os.environ["CLIENT_IP"]
+
+ def smb_connection(self, creds, use_spnego="yes", ntlmv2_auth="yes",
+ force_smb1=False):
+ # the SMB bindings rely on having a s3 loadparm
+ lp = self.get_loadparm()
+ s3_lp = s3param.get_context()
+ s3_lp.load(lp.configfile)
+
+ # Allow the testcase to skip SPNEGO or use NTLMv1
+ s3_lp.set("client use spnego", use_spnego)
+ s3_lp.set("client ntlmv2 auth", ntlmv2_auth)
+
+ return libsmb.Conn(self.server, "sysvol", lp=s3_lp, creds=creds,
+ force_smb1=force_smb1)
+
+ def _test_rpc_ncacn_np(self, authTypes, creds, service,
+ binding, protection, checkFunction):
+ def isLastExpectedMessage(msg):
+ return (msg["type"] == "Authorization" and
+ (msg["Authorization"]["serviceDescription"] == "DCE/RPC" or
+ msg["Authorization"]["serviceDescription"] == service) and
+ msg["Authorization"]["authType"] == authTypes[0] and
+ msg["Authorization"]["transportProtection"] == protection)
+
+ if binding:
+ binding = "[%s]" % binding
+
+ if service == "dnsserver":
+ x = dnsserver.dnsserver("ncacn_np:%s%s" % (self.server, binding),
+ self.get_loadparm(),
+ creds)
+ elif service == "srvsvc":
+ x = srvsvc.srvsvc("ncacn_np:%s%s" % (self.server, binding),
+ self.get_loadparm(),
+ creds)
+
+ # The connection is passed to ensure the server
+ # messaging context stays up until all the messages have been received.
+ messages = self.waitForMessages(isLastExpectedMessage, x)
+ checkFunction(messages, authTypes, service, binding, protection)
+
+ def _assert_ncacn_np_serviceDescription(self, binding, serviceDescription):
+ # Turn "[foo,bar]" into a list ("foo", "bar") to test
+ # lambda x: x removes anything that evaluates to False,
+ # including empty strings, so we handle "" as well
+ binding_list = \
+ list(filter(lambda x: x, re.compile(r'[\[,\]]').split(binding)))
+
+ # Handle explicit smb2, smb1 or auto negotiation
+ if "smb2" in binding_list:
+ self.assertEqual(serviceDescription, "SMB2")
+ elif "smb1" in binding_list:
+ self.assertEqual(serviceDescription, "SMB")
+ else:
+ self.assertIn(serviceDescription, ["SMB", "SMB2"])
+
+ def rpc_ncacn_np_ntlm_check(self, messages, authTypes, service,
+ binding, protection):
+
+ expected_messages = len(authTypes)
+ self.assertEqual(expected_messages,
+ len(messages),
+ "Did not receive the expected number of messages")
+
+ # Check the first message it should be an Authentication
+ msg = messages[0]
+ self.assertEqual("Authentication", msg["type"])
+ self.assertEqual("NT_STATUS_OK", msg["Authentication"]["status"])
+ self.assertEqual(
+ EVT_ID_SUCCESSFUL_LOGON, msg["Authentication"]["eventId"])
+ self.assertEqual(
+ EVT_LOGON_NETWORK, msg["Authentication"]["logonType"])
+ self._assert_ncacn_np_serviceDescription(
+ binding, msg["Authentication"]["serviceDescription"])
+ self.assertEqual(authTypes[1],
+ msg["Authentication"]["authDescription"])
+
+ # Check the second message it should be an Authorization
+ msg = messages[1]
+ self.assertEqual("Authorization", msg["type"])
+ self._assert_ncacn_np_serviceDescription(
+ binding, msg["Authorization"]["serviceDescription"])
+ self.assertEqual(authTypes[2], msg["Authorization"]["authType"])
+ self.assertEqual("SMB", msg["Authorization"]["transportProtection"])
+ self.assertTrue(self.is_guid(msg["Authorization"]["sessionId"]))
+
+ # Check the third message it should be an Authentication
+ # if we are expecting 4 messages
+ if expected_messages == 4:
+ def checkServiceDescription(desc):
+ return (desc == "DCE/RPC" or desc == service)
+
+ msg = messages[2]
+ self.assertEqual("Authentication", msg["type"])
+ self.assertEqual("NT_STATUS_OK", msg["Authentication"]["status"])
+ self.assertTrue(
+ checkServiceDescription(
+ msg["Authentication"]["serviceDescription"]))
+
+ self.assertEqual(authTypes[3],
+ msg["Authentication"]["authDescription"])
+ self.assertEqual(
+ EVT_ID_SUCCESSFUL_LOGON, msg["Authentication"]["eventId"])
+ self.assertEqual(
+ EVT_LOGON_NETWORK, msg["Authentication"]["logonType"])
+
+ def rpc_ncacn_np_krb5_check(
+ self,
+ messages,
+ authTypes,
+ service,
+ binding,
+ protection):
+
+ expected_messages = len(authTypes)
+ self.assertEqual(expected_messages,
+ len(messages),
+ "Did not receive the expected number of messages")
+
+ # Check the first message it should be an Authentication
+ # This is almost certainly Authentication over UDP, and is probably
+ # returning message too big,
+ msg = messages[0]
+ self.assertEqual("Authentication", msg["type"])
+ self.assertEqual("NT_STATUS_PROTOCOL_UNREACHABLE", # RESPONSE_TOO_BIG
+ msg["Authentication"]["status"])
+ self.assertEqual("Kerberos KDC",
+ msg["Authentication"]["serviceDescription"])
+ self.assertEqual(authTypes[1],
+ msg["Authentication"]["authDescription"])
+ self.assertEqual(
+ EVT_ID_UNSUCCESSFUL_LOGON, msg["Authentication"]["eventId"])
+ self.assertEqual(
+ EVT_LOGON_NETWORK, msg["Authentication"]["logonType"])
+
+ # Check the second message it should be an Authentication
+ # This this the TCP Authentication in response to the message too big
+ # response to the UDP Authentication
+ msg = messages[1]
+ self.assertEqual("Authentication", msg["type"])
+ self.assertEqual("NT_STATUS_OK", msg["Authentication"]["status"])
+ self.assertEqual("Kerberos KDC",
+ msg["Authentication"]["serviceDescription"])
+ self.assertEqual(authTypes[2],
+ msg["Authentication"]["authDescription"])
+ self.assertEqual(
+ EVT_ID_SUCCESSFUL_LOGON, msg["Authentication"]["eventId"])
+ self.assertEqual(
+ EVT_LOGON_NETWORK, msg["Authentication"]["logonType"])
+
+ # Check the third message it should be an Authorization
+ msg = messages[2]
+ self.assertEqual("Authorization", msg["type"])
+ self._assert_ncacn_np_serviceDescription(
+ binding, msg["Authorization"]["serviceDescription"])
+ self.assertEqual(authTypes[3], msg["Authorization"]["authType"])
+ self.assertEqual("SMB", msg["Authorization"]["transportProtection"])
+ self.assertTrue(self.is_guid(msg["Authorization"]["sessionId"]))
+
+ def test_rpc_ncacn_np_ntlm_dns_sign(self):
+ creds = self.insta_creds(template=self.get_credentials(),
+ kerberos_state=DONT_USE_KERBEROS)
+ self._test_rpc_ncacn_np(["NTLMSSP",
+ "NTLMSSP",
+ "NTLMSSP",
+ "NTLMSSP"],
+ creds, "dnsserver", "sign", "SIGN",
+ self.rpc_ncacn_np_ntlm_check)
+
+ def test_rpc_ncacn_np_ntlm_srv_sign(self):
+ creds = self.insta_creds(template=self.get_credentials(),
+ kerberos_state=DONT_USE_KERBEROS)
+ self._test_rpc_ncacn_np(["NTLMSSP",
+ "NTLMSSP",
+ "NTLMSSP",
+ "NTLMSSP"],
+ creds, "srvsvc", "sign", "SIGN",
+ self.rpc_ncacn_np_ntlm_check)
+
+ def test_rpc_ncacn_np_ntlm_dns(self):
+ creds = self.insta_creds(template=self.get_credentials(),
+ kerberos_state=DONT_USE_KERBEROS)
+ self._test_rpc_ncacn_np(["ncacn_np",
+ "NTLMSSP",
+ "NTLMSSP"],
+ creds, "dnsserver", "", "SMB",
+ self.rpc_ncacn_np_ntlm_check)
+
+ def test_rpc_ncacn_np_ntlm_srv(self):
+ creds = self.insta_creds(template=self.get_credentials(),
+ kerberos_state=DONT_USE_KERBEROS)
+ self._test_rpc_ncacn_np(["ncacn_np",
+ "NTLMSSP",
+ "NTLMSSP"],
+ creds, "srvsvc", "", "SMB",
+ self.rpc_ncacn_np_ntlm_check)
+
+ def test_rpc_ncacn_np_krb_dns_sign(self):
+ creds = self.insta_creds(template=self.get_credentials(),
+ kerberos_state=MUST_USE_KERBEROS)
+ self._test_rpc_ncacn_np(["krb5",
+ "ENC-TS Pre-authentication",
+ "ENC-TS Pre-authentication",
+ "krb5"],
+ creds, "dnsserver", "sign", "SIGN",
+ self.rpc_ncacn_np_krb5_check)
+
+ def test_rpc_ncacn_np_krb_srv_sign(self):
+ creds = self.insta_creds(template=self.get_credentials(),
+ kerberos_state=MUST_USE_KERBEROS)
+ self._test_rpc_ncacn_np(["krb5",
+ "ENC-TS Pre-authentication",
+ "ENC-TS Pre-authentication",
+ "krb5"],
+ creds, "srvsvc", "sign", "SIGN",
+ self.rpc_ncacn_np_krb5_check)
+
+ def test_rpc_ncacn_np_krb_dns(self):
+ creds = self.insta_creds(template=self.get_credentials(),
+ kerberos_state=MUST_USE_KERBEROS)
+ self._test_rpc_ncacn_np(["ncacn_np",
+ "ENC-TS Pre-authentication",
+ "ENC-TS Pre-authentication",
+ "krb5"],
+ creds, "dnsserver", "", "SMB",
+ self.rpc_ncacn_np_krb5_check)
+
+ def test_rpc_ncacn_np_krb_dns_smb2(self):
+ creds = self.insta_creds(template=self.get_credentials(),
+ kerberos_state=MUST_USE_KERBEROS)
+ self._test_rpc_ncacn_np(["ncacn_np",
+ "ENC-TS Pre-authentication",
+ "ENC-TS Pre-authentication",
+ "krb5"],
+ creds, "dnsserver", "smb2", "SMB",
+ self.rpc_ncacn_np_krb5_check)
+
+ def test_rpc_ncacn_np_krb_srv(self):
+ creds = self.insta_creds(template=self.get_credentials(),
+ kerberos_state=MUST_USE_KERBEROS)
+ self._test_rpc_ncacn_np(["ncacn_np",
+ "ENC-TS Pre-authentication",
+ "ENC-TS Pre-authentication",
+ "krb5"],
+ creds, "srvsvc", "", "SMB",
+ self.rpc_ncacn_np_krb5_check)
+
+ def _test_rpc_ncacn_ip_tcp(self, authTypes, creds, service,
+ binding, protection, checkFunction):
+ def isLastExpectedMessage(msg):
+ return (msg["type"] == "Authorization" and
+ msg["Authorization"]["serviceDescription"] == "DCE/RPC" and
+ msg["Authorization"]["authType"] == authTypes[0] and
+ msg["Authorization"]["transportProtection"] == protection)
+
+ if binding:
+ binding = "[%s]" % binding
+
+ if service == "dnsserver":
+ conn = dnsserver.dnsserver(
+ "ncacn_ip_tcp:%s%s" % (self.server, binding),
+ self.get_loadparm(),
+ creds)
+ elif service == "srvsvc":
+ conn = srvsvc.srvsvc("ncacn_ip_tcp:%s%s" % (self.server, binding),
+ self.get_loadparm(),
+ creds)
+
+ messages = self.waitForMessages(isLastExpectedMessage, conn)
+ checkFunction(messages, authTypes, service, binding, protection)
+
+ def rpc_ncacn_ip_tcp_ntlm_check(self, messages, authTypes, service,
+ binding, protection):
+
+ expected_messages = len(authTypes)
+ self.assertEqual(expected_messages,
+ len(messages),
+ "Did not receive the expected number of messages")
+
+ # Check the first message it should be an Authorization
+ msg = messages[0]
+ self.assertEqual("Authorization", msg["type"])
+ self.assertEqual("DCE/RPC",
+ msg["Authorization"]["serviceDescription"])
+ self.assertEqual(authTypes[1], msg["Authorization"]["authType"])
+ self.assertEqual("NONE", msg["Authorization"]["transportProtection"])
+ self.assertTrue(self.is_guid(msg["Authorization"]["sessionId"]))
+
+ # Check the second message it should be an Authentication
+ msg = messages[1]
+ self.assertEqual("Authentication", msg["type"])
+ self.assertEqual("NT_STATUS_OK", msg["Authentication"]["status"])
+ self.assertEqual("DCE/RPC",
+ msg["Authentication"]["serviceDescription"])
+ self.assertEqual(authTypes[2],
+ msg["Authentication"]["authDescription"])
+ self.assertEqual(
+ EVT_ID_SUCCESSFUL_LOGON, msg["Authentication"]["eventId"])
+ self.assertEqual(
+ EVT_LOGON_NETWORK, msg["Authentication"]["logonType"])
+
+ def rpc_ncacn_ip_tcp_krb5_check(self, messages, authTypes, service,
+ binding, protection):
+
+ expected_messages = len(authTypes)
+ self.assertEqual(expected_messages,
+ len(messages),
+ "Did not receive the expected number of messages")
+
+ # Check the first message it should be an Authorization
+ msg = messages[0]
+ self.assertEqual("Authorization", msg["type"])
+ self.assertEqual("DCE/RPC",
+ msg["Authorization"]["serviceDescription"])
+ self.assertEqual(authTypes[1], msg["Authorization"]["authType"])
+ self.assertEqual("NONE", msg["Authorization"]["transportProtection"])
+ self.assertTrue(self.is_guid(msg["Authorization"]["sessionId"]))
+
+ # Check the second message it should be an Authentication
+ msg = messages[1]
+ self.assertEqual("Authentication", msg["type"])
+ self.assertEqual("NT_STATUS_PROTOCOL_UNREACHABLE", # RESPONSE_TOO_BIG
+ msg["Authentication"]["status"])
+ self.assertEqual("Kerberos KDC",
+ msg["Authentication"]["serviceDescription"])
+ self.assertEqual(authTypes[2],
+ msg["Authentication"]["authDescription"])
+ self.assertEqual(
+ EVT_ID_UNSUCCESSFUL_LOGON, msg["Authentication"]["eventId"])
+ self.assertEqual(
+ EVT_LOGON_NETWORK, msg["Authentication"]["logonType"])
+
+ # Check the third message it should be an Authentication
+ msg = messages[2]
+ self.assertEqual("Authentication", msg["type"])
+ self.assertEqual("NT_STATUS_OK", msg["Authentication"]["status"])
+ self.assertEqual("Kerberos KDC",
+ msg["Authentication"]["serviceDescription"])
+ self.assertEqual(authTypes[2],
+ msg["Authentication"]["authDescription"])
+ self.assertEqual(
+ EVT_ID_SUCCESSFUL_LOGON, msg["Authentication"]["eventId"])
+ self.assertEqual(
+ EVT_LOGON_NETWORK, msg["Authentication"]["logonType"])
+
+ def test_rpc_ncacn_ip_tcp_ntlm_dns_sign(self):
+ creds = self.insta_creds(template=self.get_credentials(),
+ kerberos_state=DONT_USE_KERBEROS)
+ self._test_rpc_ncacn_ip_tcp(["NTLMSSP",
+ "ncacn_ip_tcp",
+ "NTLMSSP"],
+ creds, "dnsserver", "sign", "SIGN",
+ self.rpc_ncacn_ip_tcp_ntlm_check)
+
+ def test_rpc_ncacn_ip_tcp_krb5_dns_sign(self):
+ creds = self.insta_creds(template=self.get_credentials(),
+ kerberos_state=MUST_USE_KERBEROS)
+ self._test_rpc_ncacn_ip_tcp(["krb5",
+ "ncacn_ip_tcp",
+ "ENC-TS Pre-authentication",
+ "ENC-TS Pre-authentication"],
+ creds, "dnsserver", "sign", "SIGN",
+ self.rpc_ncacn_ip_tcp_krb5_check)
+
+ def test_rpc_ncacn_ip_tcp_ntlm_dns(self):
+ creds = self.insta_creds(template=self.get_credentials(),
+ kerberos_state=DONT_USE_KERBEROS)
+ self._test_rpc_ncacn_ip_tcp(["NTLMSSP",
+ "ncacn_ip_tcp",
+ "NTLMSSP"],
+ creds, "dnsserver", "", "SIGN",
+ self.rpc_ncacn_ip_tcp_ntlm_check)
+
+ def test_rpc_ncacn_ip_tcp_krb5_dns(self):
+ creds = self.insta_creds(template=self.get_credentials(),
+ kerberos_state=MUST_USE_KERBEROS)
+ self._test_rpc_ncacn_ip_tcp(["krb5",
+ "ncacn_ip_tcp",
+ "ENC-TS Pre-authentication",
+ "ENC-TS Pre-authentication"],
+ creds, "dnsserver", "", "SIGN",
+ self.rpc_ncacn_ip_tcp_krb5_check)
+
+ def test_rpc_ncacn_ip_tcp_ntlm_dns_connect(self):
+ creds = self.insta_creds(template=self.get_credentials(),
+ kerberos_state=DONT_USE_KERBEROS)
+ self._test_rpc_ncacn_ip_tcp(["NTLMSSP",
+ "ncacn_ip_tcp",
+ "NTLMSSP"],
+ creds, "dnsserver", "connect", "NONE",
+ self.rpc_ncacn_ip_tcp_ntlm_check)
+
+ def test_rpc_ncacn_ip_tcp_krb5_dns_connect(self):
+ creds = self.insta_creds(template=self.get_credentials(),
+ kerberos_state=MUST_USE_KERBEROS)
+ self._test_rpc_ncacn_ip_tcp(["krb5",
+ "ncacn_ip_tcp",
+ "ENC-TS Pre-authentication",
+ "ENC-TS Pre-authentication"],
+ creds, "dnsserver", "connect", "NONE",
+ self.rpc_ncacn_ip_tcp_krb5_check)
+
+ def test_rpc_ncacn_ip_tcp_ntlm_dns_seal(self):
+ creds = self.insta_creds(template=self.get_credentials(),
+ kerberos_state=DONT_USE_KERBEROS)
+ self._test_rpc_ncacn_ip_tcp(["NTLMSSP",
+ "ncacn_ip_tcp",
+ "NTLMSSP"],
+ creds, "dnsserver", "seal", "SEAL",
+ self.rpc_ncacn_ip_tcp_ntlm_check)
+
+ def test_rpc_ncacn_ip_tcp_krb5_dns_seal(self):
+ creds = self.insta_creds(template=self.get_credentials(),
+ kerberos_state=MUST_USE_KERBEROS)
+ self._test_rpc_ncacn_ip_tcp(["krb5",
+ "ncacn_ip_tcp",
+ "ENC-TS Pre-authentication",
+ "ENC-TS Pre-authentication"],
+ creds, "dnsserver", "seal", "SEAL",
+ self.rpc_ncacn_ip_tcp_krb5_check)
+
+ def test_ldap(self):
+
+ def isLastExpectedMessage(msg):
+ return (msg["type"] == "Authorization" and
+ msg["Authorization"]["serviceDescription"] == "LDAP" and
+ msg["Authorization"]["transportProtection"] == "SEAL" and
+ msg["Authorization"]["authType"] == "krb5")
+
+ self.samdb = SamDB(url="ldap://%s" % os.environ["SERVER"],
+ lp=self.get_loadparm(),
+ credentials=self.get_credentials())
+
+ messages = self.waitForMessages(isLastExpectedMessage)
+ self.assertEqual(3,
+ len(messages),
+ "Did not receive the expected number of messages")
+
+ # Check the first message it should be an Authentication
+ msg = messages[0]
+ self.assertEqual("Authentication", msg["type"])
+ self.assertEqual("NT_STATUS_PROTOCOL_UNREACHABLE", # RESPONSE_TOO_BIG
+ msg["Authentication"]["status"])
+ self.assertEqual("Kerberos KDC",
+ msg["Authentication"]["serviceDescription"])
+ self.assertEqual("ENC-TS Pre-authentication",
+ msg["Authentication"]["authDescription"])
+ self.assertTrue(msg["Authentication"]["duration"] > 0)
+ self.assertEqual(
+ EVT_ID_UNSUCCESSFUL_LOGON, msg["Authentication"]["eventId"])
+ self.assertEqual(
+ EVT_LOGON_NETWORK, msg["Authentication"]["logonType"])
+
+ # Check the second message it should be an Authentication
+ msg = messages[1]
+ self.assertEqual("Authentication", msg["type"])
+ self.assertEqual("NT_STATUS_OK", msg["Authentication"]["status"])
+ self.assertEqual("Kerberos KDC",
+ msg["Authentication"]["serviceDescription"])
+ self.assertEqual("ENC-TS Pre-authentication",
+ msg["Authentication"]["authDescription"])
+ self.assertTrue(msg["Authentication"]["duration"] > 0)
+ self.assertEqual(
+ EVT_ID_SUCCESSFUL_LOGON, msg["Authentication"]["eventId"])
+ self.assertEqual(
+ EVT_LOGON_NETWORK, msg["Authentication"]["logonType"])
+
+ def test_ldap_ntlm(self):
+
+ def isLastExpectedMessage(msg):
+ return (msg["type"] == "Authorization" and
+ msg["Authorization"]["serviceDescription"] == "LDAP" and
+ msg["Authorization"]["transportProtection"] == "SEAL" and
+ msg["Authorization"]["authType"] == "NTLMSSP")
+
+ self.samdb = SamDB(url="ldap://%s" % os.environ["SERVER_IP"],
+ lp=self.get_loadparm(),
+ credentials=self.get_credentials())
+
+ messages = self.waitForMessages(isLastExpectedMessage)
+ self.assertEqual(2,
+ len(messages),
+ "Did not receive the expected number of messages")
+ # Check the first message it should be an Authentication
+ msg = messages[0]
+ self.assertEqual("Authentication", msg["type"])
+ self.assertEqual("NT_STATUS_OK", msg["Authentication"]["status"])
+ self.assertEqual("LDAP",
+ msg["Authentication"]["serviceDescription"])
+ self.assertEqual("NTLMSSP", msg["Authentication"]["authDescription"])
+ self.assertTrue(msg["Authentication"]["duration"] > 0)
+ self.assertEqual(
+ EVT_ID_SUCCESSFUL_LOGON, msg["Authentication"]["eventId"])
+ self.assertEqual(
+ EVT_LOGON_NETWORK, msg["Authentication"]["logonType"])
+
+ def test_ldap_simple_bind(self):
+ def isLastExpectedMessage(msg):
+ return (msg["type"] == "Authorization" and
+ msg["Authorization"]["serviceDescription"] == "LDAP" and
+ msg["Authorization"]["transportProtection"] == "TLS" and
+ msg["Authorization"]["authType"] == "simple bind")
+
+ creds = self.insta_creds(template=self.get_credentials())
+ creds.set_bind_dn("%s\\%s" % (creds.get_domain(),
+ creds.get_username()))
+
+ self.samdb = SamDB(url="ldaps://%s" % os.environ["SERVER"],
+ lp=self.get_loadparm(),
+ credentials=creds)
+
+ messages = self.waitForMessages(isLastExpectedMessage)
+ self.assertEqual(2,
+ len(messages),
+ "Did not receive the expected number of messages")
+
+ # Check the first message it should be an Authentication
+ msg = messages[0]
+ self.assertEqual("Authentication", msg["type"])
+ self.assertEqual("NT_STATUS_OK", msg["Authentication"]["status"])
+ self.assertEqual("LDAP",
+ msg["Authentication"]["serviceDescription"])
+ self.assertEqual("simple bind/TLS",
+ msg["Authentication"]["authDescription"])
+ self.assertEqual(
+ EVT_ID_SUCCESSFUL_LOGON, msg["Authentication"]["eventId"])
+ self.assertEqual(
+ EVT_LOGON_NETWORK_CLEAR_TEXT, msg["Authentication"]["logonType"])
+
+ def test_ldap_simple_bind_bad_password(self):
+ def isLastExpectedMessage(msg):
+ return (msg["type"] == "Authentication" and
+ msg["Authentication"]["serviceDescription"] == "LDAP" and
+ (msg["Authentication"]["status"] ==
+ "NT_STATUS_WRONG_PASSWORD") and
+ (msg["Authentication"]["authDescription"] ==
+ "simple bind/TLS") and
+ (msg["Authentication"]["eventId"] ==
+ EVT_ID_UNSUCCESSFUL_LOGON) and
+ (msg["Authentication"]["logonType"] ==
+ EVT_LOGON_NETWORK_CLEAR_TEXT))
+
+ creds = self.insta_creds(template=self.get_credentials())
+ creds.set_password("badPassword")
+ creds.set_bind_dn("%s\\%s" % (creds.get_domain(),
+ creds.get_username()))
+
+ thrown = False
+ try:
+ self.samdb = SamDB(url="ldaps://%s" % os.environ["SERVER"],
+ lp=self.get_loadparm(),
+ credentials=creds)
+ except LdbError:
+ thrown = True
+ self.assertEqual(thrown, True)
+
+ messages = self.waitForMessages(isLastExpectedMessage)
+ self.assertEqual(1,
+ len(messages),
+ "Did not receive the expected number of messages")
+
+ def test_ldap_simple_bind_bad_user(self):
+ def isLastExpectedMessage(msg):
+ return (msg["type"] == "Authentication" and
+ msg["Authentication"]["serviceDescription"] == "LDAP" and
+ (msg["Authentication"]["status"] ==
+ "NT_STATUS_NO_SUCH_USER") and
+ (msg["Authentication"]["authDescription"] ==
+ "simple bind/TLS") and
+ (msg["Authentication"]["eventId"] ==
+ EVT_ID_UNSUCCESSFUL_LOGON) and
+ (msg["Authentication"]["logonType"] ==
+ EVT_LOGON_NETWORK_CLEAR_TEXT))
+
+ creds = self.insta_creds(template=self.get_credentials())
+ creds.set_bind_dn("%s\\%s" % (creds.get_domain(), "badUser"))
+
+ thrown = False
+ try:
+ self.samdb = SamDB(url="ldaps://%s" % os.environ["SERVER"],
+ lp=self.get_loadparm(),
+ credentials=creds)
+ except LdbError:
+ thrown = True
+ self.assertEqual(thrown, True)
+
+ messages = self.waitForMessages(isLastExpectedMessage)
+ self.assertEqual(1,
+ len(messages),
+ "Did not receive the expected number of messages")
+
+ def test_ldap_simple_bind_unparseable_user(self):
+ def isLastExpectedMessage(msg):
+ return (msg["type"] == "Authentication" and
+ msg["Authentication"]["serviceDescription"] == "LDAP" and
+ (msg["Authentication"]["status"] ==
+ "NT_STATUS_NO_SUCH_USER") and
+ (msg["Authentication"]["authDescription"] ==
+ "simple bind/TLS") and
+ (msg["Authentication"]["eventId"] ==
+ EVT_ID_UNSUCCESSFUL_LOGON) and
+ (msg["Authentication"]["logonType"] ==
+ EVT_LOGON_NETWORK_CLEAR_TEXT))
+
+ creds = self.insta_creds(template=self.get_credentials())
+ creds.set_bind_dn("%s\\%s" % (creds.get_domain(), "abdcef"))
+
+ thrown = False
+ try:
+ self.samdb = SamDB(url="ldaps://%s" % os.environ["SERVER"],
+ lp=self.get_loadparm(),
+ credentials=creds)
+ except LdbError:
+ thrown = True
+ self.assertEqual(thrown, True)
+
+ messages = self.waitForMessages(isLastExpectedMessage)
+ self.assertEqual(1,
+ len(messages),
+ "Did not receive the expected number of messages")
+
+ #
+ # Note: as this test does not expect any messages it will
+ # time out in the call to self.waitForMessages.
+ # This is expected, but it will slow this test.
+ def test_ldap_anonymous_access_bind_only(self):
+ # Should be no logging for anonymous bind
+ # so receiving any message indicates a failure.
+ def isLastExpectedMessage(msg):
+ return True
+
+ creds = self.insta_creds(template=self.get_credentials())
+ creds.set_anonymous()
+
+ self.samdb = SamDB(url="ldaps://%s" % os.environ["SERVER"],
+ lp=self.get_loadparm(),
+ credentials=creds)
+
+ messages = self.waitForMessages(isLastExpectedMessage)
+ self.assertEqual(0,
+ len(messages),
+ "Did not receive the expected number of messages")
+
+ def test_ldap_anonymous_access(self):
+ def isLastExpectedMessage(msg):
+ return (msg["type"] == "Authorization" and
+ msg["Authorization"]["serviceDescription"] == "LDAP" and
+ msg["Authorization"]["transportProtection"] == "TLS" and
+ msg["Authorization"]["account"] == "ANONYMOUS LOGON" and
+ msg["Authorization"]["authType"] == "no bind")
+
+ creds = self.insta_creds(template=self.get_credentials())
+ creds.set_anonymous()
+
+ self.samdb = SamDB(url="ldaps://%s" % os.environ["SERVER"],
+ lp=self.get_loadparm(),
+ credentials=creds)
+
+ try:
+ self.samdb.search(base=self.samdb.domain_dn())
+ self.fail("Expected an LdbError exception")
+ except LdbError:
+ pass
+
+ messages = self.waitForMessages(isLastExpectedMessage)
+ self.assertEqual(1,
+ len(messages),
+ "Did not receive the expected number of messages")
+
+ def test_smb(self):
+ def isLastExpectedMessage(msg):
+ return (msg["type"] == "Authorization" and
+ "SMB" in msg["Authorization"]["serviceDescription"] and
+ msg["Authorization"]["authType"] == "krb5" and
+ msg["Authorization"]["transportProtection"] == "SMB")
+
+ creds = self.insta_creds(template=self.get_credentials())
+ self.smb_connection(creds)
+
+ messages = self.waitForMessages(isLastExpectedMessage)
+ self.assertEqual(3,
+ len(messages),
+ "Did not receive the expected number of messages")
+ # Check the first message it should be an Authentication
+ msg = messages[0]
+ self.assertEqual("Authentication", msg["type"])
+ self.assertEqual("NT_STATUS_PROTOCOL_UNREACHABLE", # RESPONSE_TOO_BIG
+ msg["Authentication"]["status"])
+ self.assertEqual("Kerberos KDC",
+ msg["Authentication"]["serviceDescription"])
+ self.assertEqual("ENC-TS Pre-authentication",
+ msg["Authentication"]["authDescription"])
+ self.assertEqual(EVT_ID_UNSUCCESSFUL_LOGON,
+ msg["Authentication"]["eventId"])
+ self.assertEqual(EVT_LOGON_NETWORK,
+ msg["Authentication"]["logonType"])
+
+ # Check the second message it should be an Authentication
+ msg = messages[1]
+ self.assertEqual("Authentication", msg["type"])
+ self.assertEqual("NT_STATUS_OK", msg["Authentication"]["status"])
+ self.assertEqual("Kerberos KDC",
+ msg["Authentication"]["serviceDescription"])
+ self.assertEqual("ENC-TS Pre-authentication",
+ msg["Authentication"]["authDescription"])
+ self.assertEqual(EVT_ID_SUCCESSFUL_LOGON,
+ msg["Authentication"]["eventId"])
+ self.assertEqual(EVT_LOGON_NETWORK,
+ msg["Authentication"]["logonType"])
+
+ def test_smb_bad_password(self):
+ def isLastExpectedMessage(msg):
+ return (msg["type"] == "Authentication" and
+ (msg["Authentication"]["serviceDescription"] ==
+ "Kerberos KDC") and
+ (msg["Authentication"]["status"] ==
+ "NT_STATUS_WRONG_PASSWORD") and
+ (msg["Authentication"]["authDescription"] ==
+ "ENC-TS Pre-authentication"))
+
+ creds = self.insta_creds(template=self.get_credentials())
+ creds.set_kerberos_state(MUST_USE_KERBEROS)
+ creds.set_password("badPassword")
+
+ thrown = False
+ try:
+ self.smb_connection(creds)
+ except NTSTATUSError:
+ thrown = True
+ self.assertEqual(thrown, True)
+
+ messages = self.waitForMessages(isLastExpectedMessage)
+ self.assertEqual(1,
+ len(messages),
+ "Did not receive the expected number of messages")
+
+ def test_smb_bad_user(self):
+ def isLastExpectedMessage(msg):
+ return (msg["type"] == "Authentication" and
+ (msg["Authentication"]["serviceDescription"] ==
+ "Kerberos KDC") and
+ (msg["Authentication"]["status"] ==
+ "NT_STATUS_NO_SUCH_USER") and
+ (msg["Authentication"]["authDescription"] ==
+ "AS-REQ") and
+ (msg["Authentication"]["eventId"] ==
+ EVT_ID_UNSUCCESSFUL_LOGON) and
+ (msg["Authentication"]["logonType"] ==
+ EVT_LOGON_NETWORK))
+
+ creds = self.insta_creds(template=self.get_credentials())
+ creds.set_kerberos_state(MUST_USE_KERBEROS)
+ creds.set_username("badUser")
+
+ thrown = False
+ try:
+ self.smb_connection(creds)
+ except NTSTATUSError:
+ thrown = True
+ self.assertEqual(thrown, True)
+
+ messages = self.waitForMessages(isLastExpectedMessage)
+ self.assertEqual(1,
+ len(messages),
+ "Did not receive the expected number of messages")
+
+ def test_smb1_anonymous(self):
+ def isLastExpectedMessage(msg):
+ return (msg["type"] == "Authorization" and
+ msg["Authorization"]["serviceDescription"] == "SMB" and
+ msg["Authorization"]["authType"] == "NTLMSSP" and
+ msg["Authorization"]["account"] == "ANONYMOUS LOGON" and
+ msg["Authorization"]["transportProtection"] == "SMB")
+
+ server = os.environ["SERVER"]
+
+ path = "//%s/IPC$" % server
+ auth = "-N"
+ call(["bin/smbclient", path, auth, "-mNT1", "-c quit"])
+
+ messages = self.waitForMessages(isLastExpectedMessage)
+ self.assertEqual(3,
+ len(messages),
+ "Did not receive the expected number of messages")
+
+ # Check the first message it should be an Authentication
+ msg = messages[0]
+ self.assertEqual("Authentication", msg["type"])
+ self.assertEqual("NT_STATUS_NO_SUCH_USER",
+ msg["Authentication"]["status"])
+ self.assertEqual("SMB",
+ msg["Authentication"]["serviceDescription"])
+ self.assertEqual("NTLMSSP",
+ msg["Authentication"]["authDescription"])
+ self.assertEqual("No-Password",
+ msg["Authentication"]["passwordType"])
+ self.assertEqual(EVT_ID_UNSUCCESSFUL_LOGON,
+ msg["Authentication"]["eventId"])
+ self.assertEqual(EVT_LOGON_NETWORK,
+ msg["Authentication"]["logonType"])
+
+ # Check the second message it should be an Authentication
+ msg = messages[1]
+ self.assertEqual("Authentication", msg["type"])
+ self.assertEqual("NT_STATUS_OK",
+ msg["Authentication"]["status"])
+ self.assertEqual("SMB",
+ msg["Authentication"]["serviceDescription"])
+ self.assertEqual("NTLMSSP",
+ msg["Authentication"]["authDescription"])
+ self.assertEqual("No-Password",
+ msg["Authentication"]["passwordType"])
+ self.assertEqual("ANONYMOUS LOGON",
+ msg["Authentication"]["becameAccount"])
+ self.assertEqual(EVT_ID_SUCCESSFUL_LOGON,
+ msg["Authentication"]["eventId"])
+ self.assertEqual(EVT_LOGON_NETWORK,
+ msg["Authentication"]["logonType"])
+
+ def test_smb2_anonymous(self):
+ def isLastExpectedMessage(msg):
+ return (msg["type"] == "Authorization" and
+ msg["Authorization"]["serviceDescription"] == "SMB2" and
+ msg["Authorization"]["authType"] == "NTLMSSP" and
+ msg["Authorization"]["account"] == "ANONYMOUS LOGON" and
+ msg["Authorization"]["transportProtection"] == "SMB")
+
+ server = os.environ["SERVER"]
+
+ path = "//%s/IPC$" % server
+ auth = "-N"
+ call(["bin/smbclient", path, auth, "-mSMB3", "-c quit"])
+
+ messages = self.waitForMessages(isLastExpectedMessage)
+ self.assertEqual(3,
+ len(messages),
+ "Did not receive the expected number of messages")
+
+ # Check the first message it should be an Authentication
+ msg = messages[0]
+ self.assertEqual("Authentication", msg["type"])
+ self.assertEqual("NT_STATUS_NO_SUCH_USER",
+ msg["Authentication"]["status"])
+ self.assertEqual("SMB2",
+ msg["Authentication"]["serviceDescription"])
+ self.assertEqual("NTLMSSP",
+ msg["Authentication"]["authDescription"])
+ self.assertEqual("No-Password",
+ msg["Authentication"]["passwordType"])
+ self.assertEqual(EVT_ID_UNSUCCESSFUL_LOGON,
+ msg["Authentication"]["eventId"])
+ self.assertEqual(EVT_LOGON_NETWORK,
+ msg["Authentication"]["logonType"])
+
+ # Check the second message it should be an Authentication
+ msg = messages[1]
+ self.assertEqual("Authentication", msg["type"])
+ self.assertEqual("NT_STATUS_OK",
+ msg["Authentication"]["status"])
+ self.assertEqual("SMB2",
+ msg["Authentication"]["serviceDescription"])
+ self.assertEqual("NTLMSSP",
+ msg["Authentication"]["authDescription"])
+ self.assertEqual("No-Password",
+ msg["Authentication"]["passwordType"])
+ self.assertEqual("ANONYMOUS LOGON",
+ msg["Authentication"]["becameAccount"])
+ self.assertEqual(EVT_ID_SUCCESSFUL_LOGON,
+ msg["Authentication"]["eventId"])
+ self.assertEqual(EVT_LOGON_NETWORK,
+ msg["Authentication"]["logonType"])
+
+ def test_smb_no_krb_spnego(self):
+ def isLastExpectedMessage(msg):
+ return (msg["type"] == "Authorization" and
+ "SMB" in msg["Authorization"]["serviceDescription"] and
+ msg["Authorization"]["authType"] == "NTLMSSP" and
+ msg["Authorization"]["transportProtection"] == "SMB")
+
+ creds = self.insta_creds(template=self.get_credentials(),
+ kerberos_state=DONT_USE_KERBEROS)
+ self.smb_connection(creds)
+
+ messages = self.waitForMessages(isLastExpectedMessage)
+ self.assertEqual(2,
+ len(messages),
+ "Did not receive the expected number of messages")
+ # Check the first message it should be an Authentication
+ msg = messages[0]
+ self.assertEqual("Authentication", msg["type"])
+ self.assertEqual("NT_STATUS_OK", msg["Authentication"]["status"])
+ self.assertIn(msg["Authentication"]["serviceDescription"],
+ ["SMB", "SMB2"])
+ self.assertEqual("NTLMSSP",
+ msg["Authentication"]["authDescription"])
+ self.assertEqual("NTLMv2",
+ msg["Authentication"]["passwordType"])
+ self.assertEqual(EVT_ID_SUCCESSFUL_LOGON,
+ msg["Authentication"]["eventId"])
+ self.assertEqual(EVT_LOGON_NETWORK,
+ msg["Authentication"]["logonType"])
+
+ def test_smb_no_krb_spnego_bad_password(self):
+ def isLastExpectedMessage(msg):
+ return (msg["type"] == "Authentication" and
+ "SMB" in msg["Authentication"]["serviceDescription"] and
+ msg["Authentication"]["authDescription"] == "NTLMSSP" and
+ msg["Authentication"]["passwordType"] == "NTLMv2" and
+ (msg["Authentication"]["status"] ==
+ "NT_STATUS_WRONG_PASSWORD") and
+ (msg["Authentication"]["eventId"] ==
+ EVT_ID_UNSUCCESSFUL_LOGON) and
+ (msg["Authentication"]["logonType"] ==
+ EVT_LOGON_NETWORK))
+
+ creds = self.insta_creds(template=self.get_credentials(),
+ kerberos_state=DONT_USE_KERBEROS)
+ creds.set_password("badPassword")
+
+ thrown = False
+ try:
+ self.smb_connection(creds)
+ except NTSTATUSError:
+ thrown = True
+ self.assertEqual(thrown, True)
+
+ messages = self.waitForMessages(isLastExpectedMessage)
+ self.assertEqual(1,
+ len(messages),
+ "Did not receive the expected number of messages")
+
+ def test_smb_no_krb_spnego_bad_user(self):
+ def isLastExpectedMessage(msg):
+ return (msg["type"] == "Authentication" and
+ "SMB" in msg["Authentication"]["serviceDescription"] and
+ msg["Authentication"]["authDescription"] == "NTLMSSP" and
+ msg["Authentication"]["passwordType"] == "NTLMv2" and
+ (msg["Authentication"]["status"] ==
+ "NT_STATUS_NO_SUCH_USER") and
+ (msg["Authentication"]["eventId"] ==
+ EVT_ID_UNSUCCESSFUL_LOGON) and
+ (msg["Authentication"]["logonType"] ==
+ EVT_LOGON_NETWORK))
+
+ creds = self.insta_creds(template=self.get_credentials(),
+ kerberos_state=DONT_USE_KERBEROS)
+ creds.set_username("badUser")
+
+ thrown = False
+ try:
+ self.smb_connection(creds)
+ except NTSTATUSError:
+ thrown = True
+ self.assertEqual(thrown, True)
+
+ messages = self.waitForMessages(isLastExpectedMessage)
+ self.assertEqual(1,
+ len(messages),
+ "Did not receive the expected number of messages")
+
+ def test_smb_no_krb_no_spnego_no_ntlmv2(self):
+ def isLastExpectedMessage(msg):
+ return (msg["type"] == "Authorization" and
+ msg["Authorization"]["serviceDescription"] == "SMB" and
+ msg["Authorization"]["authType"] == "bare-NTLM" and
+ msg["Authorization"]["transportProtection"] == "SMB")
+
+ creds = self.insta_creds(template=self.get_credentials(),
+ kerberos_state=DONT_USE_KERBEROS)
+ self.smb_connection(creds,
+ force_smb1=True,
+ ntlmv2_auth="no",
+ use_spnego="no")
+
+ messages = self.waitForMessages(isLastExpectedMessage)
+ self.assertEqual(2,
+ len(messages),
+ "Did not receive the expected number of messages")
+ # Check the first message it should be an Authentication
+ msg = messages[0]
+ self.assertEqual("Authentication", msg["type"])
+ self.assertEqual("NT_STATUS_OK", msg["Authentication"]["status"])
+ self.assertEqual("SMB",
+ msg["Authentication"]["serviceDescription"])
+ self.assertEqual("bare-NTLM",
+ msg["Authentication"]["authDescription"])
+ self.assertEqual("NTLMv1",
+ msg["Authentication"]["passwordType"])
+ self.assertEqual(EVT_ID_SUCCESSFUL_LOGON,
+ msg["Authentication"]["eventId"])
+ self.assertEqual(EVT_LOGON_NETWORK,
+ msg["Authentication"]["logonType"])
+
+ def test_smb_no_krb_no_spnego_no_ntlmv2_bad_password(self):
+ def isLastExpectedMessage(msg):
+ return (msg["type"] == "Authentication" and
+ msg["Authentication"]["serviceDescription"] == "SMB" and
+ msg["Authentication"]["authDescription"] == "bare-NTLM" and
+ msg["Authentication"]["passwordType"] == "NTLMv1" and
+ (msg["Authentication"]["status"] ==
+ "NT_STATUS_WRONG_PASSWORD") and
+ (msg["Authentication"]["eventId"] ==
+ EVT_ID_UNSUCCESSFUL_LOGON) and
+ (msg["Authentication"]["logonType"] ==
+ EVT_LOGON_NETWORK))
+
+ creds = self.insta_creds(template=self.get_credentials(),
+ kerberos_state=DONT_USE_KERBEROS)
+ creds.set_password("badPassword")
+
+ thrown = False
+ try:
+ self.smb_connection(creds,
+ force_smb1=True,
+ ntlmv2_auth="no",
+ use_spnego="no")
+ except NTSTATUSError:
+ thrown = True
+ self.assertEqual(thrown, True)
+
+ messages = self.waitForMessages(isLastExpectedMessage)
+ self.assertEqual(1,
+ len(messages),
+ "Did not receive the expected number of messages")
+
+ def test_smb_no_krb_no_spnego_no_ntlmv2_bad_user(self):
+ def isLastExpectedMessage(msg):
+ return (msg["type"] == "Authentication" and
+ msg["Authentication"]["serviceDescription"] == "SMB" and
+ msg["Authentication"]["authDescription"] == "bare-NTLM" and
+ msg["Authentication"]["passwordType"] == "NTLMv1" and
+ (msg["Authentication"]["status"] ==
+ "NT_STATUS_NO_SUCH_USER") and
+ (msg["Authentication"]["eventId"] ==
+ EVT_ID_UNSUCCESSFUL_LOGON) and
+ (msg["Authentication"]["logonType"] ==
+ EVT_LOGON_NETWORK))
+
+ creds = self.insta_creds(template=self.get_credentials(),
+ kerberos_state=DONT_USE_KERBEROS)
+ creds.set_username("badUser")
+
+ thrown = False
+ try:
+ self.smb_connection(creds,
+ force_smb1=True,
+ ntlmv2_auth="no",
+ use_spnego="no")
+ except NTSTATUSError:
+ thrown = True
+ self.assertEqual(thrown, True)
+
+ messages = self.waitForMessages(isLastExpectedMessage)
+ self.assertEqual(1,
+ len(messages),
+ "Did not receive the expected number of messages")
+
+ def test_samlogon_interactive(self):
+
+ workstation = "AuthLogTests"
+
+ def isLastExpectedMessage(msg):
+ return (msg["type"] == "Authentication" and
+ (msg["Authentication"]["serviceDescription"] ==
+ "SamLogon") and
+ (msg["Authentication"]["authDescription"] ==
+ "interactive") and
+ msg["Authentication"]["status"] == "NT_STATUS_OK" and
+ (msg["Authentication"]["workstation"] ==
+ r"\\%s" % workstation) and
+ (msg["Authentication"]["eventId"] ==
+ EVT_ID_SUCCESSFUL_LOGON) and
+ (msg["Authentication"]["logonType"] ==
+ EVT_LOGON_INTERACTIVE))
+
+ server = os.environ["SERVER"]
+ user = os.environ["USERNAME"]
+ password = os.environ["PASSWORD"]
+ samlogon = "samlogon %s %s %s %d" % (user, password, workstation, 1)
+
+ call(["bin/rpcclient", "-c", samlogon, "-U%", server])
+
+ messages = self.waitForMessages(isLastExpectedMessage)
+ messages = self.remove_netlogon_messages(messages)
+ received = len(messages)
+ self.assertIs(True,
+ (received == 4 or received == 5),
+ "Did not receive the expected number of messages")
+
+ def test_samlogon_interactive_bad_password(self):
+
+ workstation = "AuthLogTests"
+
+ def isLastExpectedMessage(msg):
+ return (msg["type"] == "Authentication" and
+ (msg["Authentication"]["serviceDescription"] ==
+ "SamLogon") and
+ (msg["Authentication"]["authDescription"] ==
+ "interactive") and
+ (msg["Authentication"]["status"] ==
+ "NT_STATUS_WRONG_PASSWORD") and
+ (msg["Authentication"]["workstation"] ==
+ r"\\%s" % workstation) and
+ (msg["Authentication"]["eventId"] ==
+ EVT_ID_UNSUCCESSFUL_LOGON) and
+ (msg["Authentication"]["logonType"] ==
+ EVT_LOGON_INTERACTIVE))
+
+ server = os.environ["SERVER"]
+ user = os.environ["USERNAME"]
+ password = "badPassword"
+ samlogon = "samlogon %s %s %s %d" % (user, password, workstation, 1)
+
+ call(["bin/rpcclient", "-c", samlogon, "-U%", server])
+
+ messages = self.waitForMessages(isLastExpectedMessage)
+ messages = self.remove_netlogon_messages(messages)
+ received = len(messages)
+ self.assertIs(True,
+ (received == 4 or received == 5),
+ "Did not receive the expected number of messages")
+
+ def test_samlogon_interactive_bad_user(self):
+
+ workstation = "AuthLogTests"
+
+ def isLastExpectedMessage(msg):
+ return (msg["type"] == "Authentication" and
+ (msg["Authentication"]["serviceDescription"] ==
+ "SamLogon") and
+ (msg["Authentication"]["authDescription"] ==
+ "interactive") and
+ (msg["Authentication"]["status"] ==
+ "NT_STATUS_NO_SUCH_USER") and
+ (msg["Authentication"]["workstation"] ==
+ r"\\%s" % workstation) and
+ (msg["Authentication"]["eventId"] ==
+ EVT_ID_UNSUCCESSFUL_LOGON) and
+ (msg["Authentication"]["logonType"] ==
+ EVT_LOGON_INTERACTIVE))
+
+ server = os.environ["SERVER"]
+ user = "badUser"
+ password = os.environ["PASSWORD"]
+ samlogon = "samlogon %s %s %s %d" % (user, password, workstation, 1)
+
+ call(["bin/rpcclient", "-c", samlogon, "-U%", server])
+
+ messages = self.waitForMessages(isLastExpectedMessage)
+ messages = self.remove_netlogon_messages(messages)
+ received = len(messages)
+ self.assertIs(True,
+ (received == 4 or received == 5),
+ "Did not receive the expected number of messages")
+
+ def test_samlogon_network(self):
+
+ workstation = "AuthLogTests"
+
+ def isLastExpectedMessage(msg):
+ return (msg["type"] == "Authentication" and
+ (msg["Authentication"]["serviceDescription"] ==
+ "SamLogon") and
+ msg["Authentication"]["authDescription"] == "network" and
+ msg["Authentication"]["status"] == "NT_STATUS_OK" and
+ (msg["Authentication"]["workstation"] ==
+ r"\\%s" % workstation) and
+ (msg["Authentication"]["eventId"] ==
+ EVT_ID_SUCCESSFUL_LOGON) and
+ (msg["Authentication"]["logonType"] ==
+ EVT_LOGON_NETWORK))
+
+ server = os.environ["SERVER"]
+ user = os.environ["USERNAME"]
+ password = os.environ["PASSWORD"]
+ samlogon = "samlogon %s %s %s %d" % (user, password, workstation, 2)
+
+ call(["bin/rpcclient", "-c", samlogon, "-U%", server])
+
+ messages = self.waitForMessages(isLastExpectedMessage)
+ messages = self.remove_netlogon_messages(messages)
+ received = len(messages)
+ self.assertIs(True,
+ (received == 4 or received == 5),
+ "Did not receive the expected number of messages")
+
+ def test_samlogon_network_bad_password(self):
+
+ workstation = "AuthLogTests"
+
+ def isLastExpectedMessage(msg):
+ return (msg["type"] == "Authentication" and
+ (msg["Authentication"]["serviceDescription"] ==
+ "SamLogon") and
+ msg["Authentication"]["authDescription"] == "network" and
+ (msg["Authentication"]["status"] ==
+ "NT_STATUS_WRONG_PASSWORD") and
+ (msg["Authentication"]["workstation"] ==
+ r"\\%s" % workstation) and
+ (msg["Authentication"]["eventId"] ==
+ EVT_ID_UNSUCCESSFUL_LOGON) and
+ (msg["Authentication"]["logonType"] ==
+ EVT_LOGON_NETWORK))
+
+ server = os.environ["SERVER"]
+ user = os.environ["USERNAME"]
+ password = "badPassword"
+ samlogon = "samlogon %s %s %s %d" % (user, password, workstation, 2)
+
+ call(["bin/rpcclient", "-c", samlogon, "-U%", server])
+
+ messages = self.waitForMessages(isLastExpectedMessage)
+ messages = self.remove_netlogon_messages(messages)
+ received = len(messages)
+ self.assertIs(True,
+ (received == 4 or received == 5),
+ "Did not receive the expected number of messages")
+
+ def test_samlogon_network_bad_user(self):
+
+ workstation = "AuthLogTests"
+
+ def isLastExpectedMessage(msg):
+ return ((msg["type"] == "Authentication") and
+ (msg["Authentication"]["serviceDescription"] ==
+ "SamLogon") and
+ (msg["Authentication"]["authDescription"] == "network") and
+ (msg["Authentication"]["status"] ==
+ "NT_STATUS_NO_SUCH_USER") and
+ (msg["Authentication"]["workstation"] ==
+ r"\\%s" % workstation) and
+ (msg["Authentication"]["eventId"] ==
+ EVT_ID_UNSUCCESSFUL_LOGON) and
+ (msg["Authentication"]["logonType"] ==
+ EVT_LOGON_NETWORK))
+
+ server = os.environ["SERVER"]
+ user = "badUser"
+ password = os.environ["PASSWORD"]
+ samlogon = "samlogon %s %s %s %d" % (user, password, workstation, 2)
+
+ call(["bin/rpcclient", "-c", samlogon, "-U%", server])
+
+ messages = self.waitForMessages(isLastExpectedMessage)
+ messages = self.remove_netlogon_messages(messages)
+ received = len(messages)
+ self.assertIs(True,
+ (received == 4 or received == 5),
+ "Did not receive the expected number of messages")
+
+ def test_samlogon_network_mschap(self):
+
+ workstation = "AuthLogTests"
+
+ def isLastExpectedMessage(msg):
+ return ((msg["type"] == "Authentication") and
+ (msg["Authentication"]["serviceDescription"] ==
+ "SamLogon") and
+ (msg["Authentication"]["authDescription"] == "network") and
+ (msg["Authentication"]["status"] == "NT_STATUS_OK") and
+ (msg["Authentication"]["passwordType"] == "MSCHAPv2") and
+ (msg["Authentication"]["workstation"] ==
+ r"\\%s" % workstation) and
+ (msg["Authentication"]["eventId"] ==
+ EVT_ID_SUCCESSFUL_LOGON) and
+ (msg["Authentication"]["logonType"] ==
+ EVT_LOGON_NETWORK))
+
+ server = os.environ["SERVER"]
+ user = os.environ["USERNAME"]
+ password = os.environ["PASSWORD"]
+ samlogon = "samlogon %s %s %s %d 0x00010000" % (
+ user, password, workstation, 2)
+
+ call(["bin/rpcclient", "-c", samlogon, "-U%", server])
+
+ messages = self.waitForMessages(isLastExpectedMessage)
+ messages = self.remove_netlogon_messages(messages)
+ received = len(messages)
+ self.assertIs(True,
+ (received == 4 or received == 5),
+ "Did not receive the expected number of messages")
+
+ def test_samlogon_network_mschap_bad_password(self):
+
+ workstation = "AuthLogTests"
+
+ def isLastExpectedMessage(msg):
+ return ((msg["type"] == "Authentication") and
+ (msg["Authentication"]["serviceDescription"] ==
+ "SamLogon") and
+ (msg["Authentication"]["authDescription"] == "network") and
+ (msg["Authentication"]["status"] ==
+ "NT_STATUS_WRONG_PASSWORD") and
+ (msg["Authentication"]["passwordType"] == "MSCHAPv2") and
+ (msg["Authentication"]["workstation"] ==
+ r"\\%s" % workstation) and
+ (msg["Authentication"]["eventId"] ==
+ EVT_ID_UNSUCCESSFUL_LOGON) and
+ (msg["Authentication"]["logonType"] ==
+ EVT_LOGON_NETWORK))
+
+ server = os.environ["SERVER"]
+ user = os.environ["USERNAME"]
+ password = "badPassword"
+ samlogon = "samlogon %s %s %s %d 0x00010000" % (
+ user, password, workstation, 2)
+
+ call(["bin/rpcclient", "-c", samlogon, "-U%", server])
+
+ messages = self.waitForMessages(isLastExpectedMessage)
+ messages = self.remove_netlogon_messages(messages)
+ received = len(messages)
+ self.assertIs(True,
+ (received == 4 or received == 5),
+ "Did not receive the expected number of messages")
+
+ def test_samlogon_network_mschap_bad_user(self):
+
+ workstation = "AuthLogTests"
+
+ def isLastExpectedMessage(msg):
+ return ((msg["type"] == "Authentication") and
+ (msg["Authentication"]["serviceDescription"] ==
+ "SamLogon") and
+ (msg["Authentication"]["authDescription"] == "network") and
+ (msg["Authentication"]["status"] ==
+ "NT_STATUS_NO_SUCH_USER") and
+ (msg["Authentication"]["passwordType"] == "MSCHAPv2") and
+ (msg["Authentication"]["workstation"] ==
+ r"\\%s" % workstation) and
+ (msg["Authentication"]["eventId"] ==
+ EVT_ID_UNSUCCESSFUL_LOGON) and
+ (msg["Authentication"]["logonType"] ==
+ EVT_LOGON_NETWORK))
+
+ server = os.environ["SERVER"]
+ user = "badUser"
+ password = os.environ["PASSWORD"]
+ samlogon = "samlogon %s %s %s %d 0x00010000" % (
+ user, password, workstation, 2)
+
+ call(["bin/rpcclient", "-c", samlogon, "-U%", server])
+
+ messages = self.waitForMessages(isLastExpectedMessage)
+ messages = self.remove_netlogon_messages(messages)
+ received = len(messages)
+ self.assertIs(True,
+ (received == 4 or received == 5),
+ "Did not receive the expected number of messages")
+
+ def test_samlogon_schannel_seal(self):
+
+ workstation = "AuthLogTests"
+
+ def isLastExpectedMessage(msg):
+ return ((msg["type"] == "Authentication") and
+ (msg["Authentication"]["serviceDescription"] ==
+ "SamLogon") and
+ (msg["Authentication"]["authDescription"] == "network") and
+ (msg["Authentication"]["status"] == "NT_STATUS_OK") and
+ (msg["Authentication"]["workstation"] ==
+ r"\\%s" % workstation) and
+ (msg["Authentication"]["eventId"] ==
+ EVT_ID_SUCCESSFUL_LOGON) and
+ (msg["Authentication"]["logonType"] ==
+ EVT_LOGON_NETWORK))
+
+ server = os.environ["SERVER"]
+ user = os.environ["USERNAME"]
+ password = os.environ["PASSWORD"]
+ samlogon = "schannel;samlogon %s %s %s" % (user, password, workstation)
+
+ call(["bin/rpcclient", "-c", samlogon, "-U%", server])
+
+ messages = self.waitForMessages(isLastExpectedMessage)
+ messages = self.remove_netlogon_messages(messages)
+ received = len(messages)
+ self.assertIs(True,
+ (received == 4 or received == 5),
+ "Did not receive the expected number of messages")
+
+ # Check the second to last message it should be an Authorization
+ msg = messages[-2]
+ self.assertEqual("Authorization", msg["type"])
+ self.assertEqual("DCE/RPC",
+ msg["Authorization"]["serviceDescription"])
+ self.assertEqual("schannel", msg["Authorization"]["authType"])
+ self.assertEqual("SEAL", msg["Authorization"]["transportProtection"])
+ self.assertTrue(self.is_guid(msg["Authorization"]["sessionId"]))
+
+ # Signed logons get promoted to sealed, this test ensures that
+ # this behaviour is not removed accidentally
+ def test_samlogon_schannel_sign(self):
+
+ workstation = "AuthLogTests"
+
+ def isLastExpectedMessage(msg):
+ return ((msg["type"] == "Authentication") and
+ (msg["Authentication"]["serviceDescription"] ==
+ "SamLogon") and
+ (msg["Authentication"]["authDescription"] == "network") and
+ (msg["Authentication"]["status"] == "NT_STATUS_OK") and
+ (msg["Authentication"]["workstation"] ==
+ r"\\%s" % workstation) and
+ (msg["Authentication"]["eventId"] ==
+ EVT_ID_SUCCESSFUL_LOGON) and
+ (msg["Authentication"]["logonType"] ==
+ EVT_LOGON_NETWORK))
+
+ server = os.environ["SERVER"]
+ user = os.environ["USERNAME"]
+ password = os.environ["PASSWORD"]
+ samlogon = "schannelsign;samlogon %s %s %s" % (
+ user, password, workstation)
+
+ call(["bin/rpcclient", "-c", samlogon, "-U%", server])
+
+ messages = self.waitForMessages(isLastExpectedMessage)
+ messages = self.remove_netlogon_messages(messages)
+ received = len(messages)
+ self.assertIs(True,
+ (received == 4 or received == 5),
+ "Did not receive the expected number of messages")
+
+ # Check the second to last message it should be an Authorization
+ msg = messages[-2]
+ self.assertEqual("Authorization", msg["type"])
+ self.assertEqual("DCE/RPC",
+ msg["Authorization"]["serviceDescription"])
+ self.assertEqual("schannel", msg["Authorization"]["authType"])
+ self.assertEqual("SEAL", msg["Authorization"]["transportProtection"])
+ self.assertTrue(self.is_guid(msg["Authorization"]["sessionId"]))
+
+
+if __name__ == '__main__':
+ import unittest
+ unittest.main()
diff --git a/python/samba/tests/auth_log_base.py b/python/samba/tests/auth_log_base.py
new file mode 100644
index 0000000..131f019
--- /dev/null
+++ b/python/samba/tests/auth_log_base.py
@@ -0,0 +1,221 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for the Auth and AuthZ logging.
+"""
+
+import samba.tests
+from samba.messaging import Messaging
+from samba.dcerpc.messaging import MSG_AUTH_LOG, AUTH_EVENT_NAME
+from samba.param import LoadParm
+import time
+import json
+import os
+import re
+
+
+def default_msg_filter(msg):
+ # When our authentication logging tests were written, these were the only
+ # supported message types. The tests were built on the assumption that no
+ # new types would be added, and violating this assumption will result in
+ # many tests failing as they receive messages that they weren’t
+ # expecting. To allow these tests to continue to pass, this default filter
+ # makes sure that only messages for which the tests are prepared pass
+ # though.
+ default_supported_types = {
+ "Authentication",
+ "Authorization",
+ }
+
+ return msg['type'] in default_supported_types
+
+
+class NoMessageException(Exception):
+ pass
+
+
+class AuthLogTestBase(samba.tests.TestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ super().setUpClass()
+
+ # connect to the server's messaging bus (we need to explicitly load a
+ # different smb.conf here, because in all other respects this test
+ # wants to act as a separate remote client)
+ server_conf = os.getenv('SERVERCONFFILE')
+ if server_conf:
+ lp_ctx = LoadParm(filename_for_non_global_lp=server_conf)
+ else:
+ lp_ctx = samba.tests.env_loadparm()
+ cls.msg_ctx = Messaging((1,), lp_ctx=lp_ctx)
+ cls.msg_ctx.irpc_add_name(AUTH_EVENT_NAME)
+
+ # Now switch back to using the client-side smb.conf. The tests will
+ # use the first interface in the client.conf (we need to strip off
+ # the subnet mask portion)
+ lp_ctx = samba.tests.env_loadparm()
+ client_ip_and_mask = lp_ctx.get('interfaces')[0]
+ client_ip = client_ip_and_mask.split('/')[0]
+
+ # the messaging ctx is the server's view of the world, so our own
+ # client IP will be the remoteAddress when connections are logged
+ cls.remoteAddress = client_ip
+
+ def messageHandler(context, msgType, src, message):
+ # This does not look like sub unit output and it
+ # makes these tests much easier to debug.
+ print(message)
+ jsonMsg = json.loads(message)
+ context["messages"].append(jsonMsg)
+
+ cls.context = {"messages": []}
+ cls.msg_handler_and_context = (messageHandler, cls.context)
+ cls.msg_ctx.register(cls.msg_handler_and_context,
+ msg_type=MSG_AUTH_LOG)
+
+ cls.server = os.environ["SERVER"]
+ cls.connection = None
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.msg_ctx.deregister(cls.msg_handler_and_context,
+ msg_type=MSG_AUTH_LOG)
+ cls.msg_ctx.irpc_remove_name(AUTH_EVENT_NAME)
+
+ super().tearDownClass()
+
+ def setUp(self):
+ super().setUp()
+ type(self).discardMessages()
+
+ def isRemote(self, message):
+ if self.remoteAddress is None:
+ return True
+
+ supported_types = {
+ "Authentication",
+ "Authorization",
+ "KDC Authorization",
+ }
+ message_type = message["type"]
+ if message_type in supported_types:
+ remote = message[message_type]["remoteAddress"]
+ else:
+ return False
+
+ try:
+ addr = remote.split(":")
+ return addr[1] == self.remoteAddress
+ except IndexError:
+ return False
+
+ def waitForMessages(self, isLastExpectedMessage, connection=None, *,
+ msgFilter=default_msg_filter):
+ """Wait for all the expected messages to arrive
+ The connection is passed through to keep the connection alive
+ until all the logging messages have been received.
+
+ By default, only Authentication and Authorization messages will be
+ returned, so that old tests continue to pass. To receive all messages,
+ pass msgFilter=None.
+
+ """
+
+ messages = []
+ while True:
+ try:
+ msg = self.nextMessage(msgFilter=msgFilter)
+ except NoMessageException:
+ return []
+
+ messages.append(msg)
+ if isLastExpectedMessage(msg):
+ return messages
+
+ def nextMessage(self, msgFilter=None):
+ """Return the next relevant message, or throw a NoMessageException."""
+ def is_relevant(msg):
+ if not self.isRemote(msg):
+ return False
+
+ if msgFilter is None:
+ return True
+
+ return msgFilter(msg)
+
+ messages = self.context['messages']
+
+ while True:
+ timeout = 2
+ until = time.time() + timeout
+
+ while not messages:
+ # Fetch a new message from the messaging bus.
+
+ current = time.time()
+ if until < current:
+ break
+
+ self.msg_ctx.loop_once(until - current)
+
+ if not messages:
+ raise NoMessageException('timed out looking for a message')
+
+ # Grab the next message from the queue.
+ msg = messages.pop(0)
+ if is_relevant(msg):
+ return msg
+
+ # Discard any previously queued messages.
+ @classmethod
+ def discardMessages(cls):
+ messages = cls.context["messages"]
+
+ while True:
+ messages.clear()
+
+ # tevent presumably has other tasks to run, so we might need two or
+ # three loops before a message comes through.
+ for _ in range(5):
+ cls.msg_ctx.loop_once(0.001)
+
+ if not messages:
+ # No new messages. We’ve probably got them all.
+ break
+
+ # Remove any NETLOGON authentication messages
+ # NETLOGON is only performed once per session, so to avoid ordering
+ # dependencies within the tests it's best to strip out NETLOGON messages.
+ #
+ def remove_netlogon_messages(self, messages):
+ def is_not_netlogon(msg):
+ if "Authentication" not in msg:
+ return True
+ sd = msg["Authentication"]["serviceDescription"]
+ return sd != "NETLOGON"
+
+ return list(filter(is_not_netlogon, messages))
+
+ GUID_RE = re.compile(
+ "[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}")
+
+ #
+ # Is the supplied GUID string correctly formatted
+ #
+ def is_guid(self, guid):
+ return self.GUID_RE.fullmatch(guid)
diff --git a/python/samba/tests/auth_log_ncalrpc.py b/python/samba/tests/auth_log_ncalrpc.py
new file mode 100644
index 0000000..c671556
--- /dev/null
+++ b/python/samba/tests/auth_log_ncalrpc.py
@@ -0,0 +1,102 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for the Auth and AuthZ logging.
+"""
+
+import samba.tests
+from samba.credentials import DONT_USE_KERBEROS
+from samba.dcerpc.dcerpc import AS_SYSTEM_MAGIC_PATH_TOKEN
+from samba.dcerpc import samr
+import samba.tests.auth_log_base
+from samba.dcerpc.windows_event_ids import (
+ EVT_ID_SUCCESSFUL_LOGON,
+ EVT_LOGON_NETWORK
+)
+
+
+class AuthLogTestsNcalrpc(samba.tests.auth_log_base.AuthLogTestBase):
+
+ def setUp(self):
+ super().setUp()
+ self.remoteAddress = AS_SYSTEM_MAGIC_PATH_TOKEN
+
+ def _test_rpc_ncaclrpc(self, authTypes, binding, creds,
+ protection, checkFunction):
+
+ def isLastExpectedMessage(msg):
+ return (
+ msg["type"] == "Authorization" and
+ msg["Authorization"]["serviceDescription"] == "DCE/RPC" and
+ msg["Authorization"]["authType"] == authTypes[0] and
+ msg["Authorization"]["transportProtection"] == protection)
+
+ if binding:
+ binding = "[%s]" % binding
+
+ samr.samr("ncalrpc:%s" % binding, self.get_loadparm(), creds)
+ messages = self.waitForMessages(isLastExpectedMessage)
+ checkFunction(messages, authTypes, protection)
+
+ def rpc_ncacn_np_ntlm_check(self, messages, authTypes, protection):
+
+ expected_messages = len(authTypes)
+ self.assertEqual(expected_messages,
+ len(messages),
+ "Did not receive the expected number of messages")
+
+ # Check the first message it should be an Authorization
+ msg = messages[0]
+ self.assertEqual("Authorization", msg["type"])
+ self.assertEqual("DCE/RPC",
+ msg["Authorization"]["serviceDescription"])
+ self.assertEqual(authTypes[1], msg["Authorization"]["authType"])
+ self.assertEqual("NONE", msg["Authorization"]["transportProtection"])
+ self.assertTrue(self.is_guid(msg["Authorization"]["sessionId"]))
+
+ # Check the second message it should be an Authentication
+ msg = messages[1]
+ self.assertEqual("Authentication", msg["type"])
+ self.assertEqual("NT_STATUS_OK", msg["Authentication"]["status"])
+ self.assertEqual("DCE/RPC",
+ msg["Authentication"]["serviceDescription"])
+ self.assertEqual(authTypes[2],
+ msg["Authentication"]["authDescription"])
+ self.assertEqual(EVT_ID_SUCCESSFUL_LOGON,
+ msg["Authentication"]["eventId"])
+ self.assertEqual(EVT_LOGON_NETWORK,
+ msg["Authentication"]["logonType"])
+
+ def test_ncalrpc_ntlm_dns_sign(self):
+
+ creds = self.insta_creds(template=self.get_credentials(),
+ kerberos_state=DONT_USE_KERBEROS)
+ self._test_rpc_ncaclrpc(["NTLMSSP",
+ "ncalrpc",
+ "NTLMSSP"],
+ "", creds, "SIGN",
+ self.rpc_ncacn_np_ntlm_check)
+
+ def test_ncalrpc_ntlm_dns_seal(self):
+
+ creds = self.insta_creds(template=self.get_credentials(),
+ kerberos_state=DONT_USE_KERBEROS)
+ self._test_rpc_ncaclrpc(["NTLMSSP",
+ "ncalrpc",
+ "NTLMSSP"],
+ "seal", creds, "SEAL",
+ self.rpc_ncacn_np_ntlm_check)
diff --git a/python/samba/tests/auth_log_netlogon.py b/python/samba/tests/auth_log_netlogon.py
new file mode 100644
index 0000000..ac7e284
--- /dev/null
+++ b/python/samba/tests/auth_log_netlogon.py
@@ -0,0 +1,134 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2017
+# Copyright (C) Catalyst IT Ltd. 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""
+ Tests that exercise the auth logging for a successful netlogon attempt
+
+ NOTE: As the netlogon authentication is performed once per session,
+ there is only one test in this routine. If another test is added
+ only the test executed first will generate the netlogon auth message
+"""
+
+import samba.tests
+import os
+from samba.samdb import SamDB
+import samba.tests.auth_log_base
+from samba.credentials import Credentials
+from samba.dcerpc import netlogon
+from samba.dcerpc.dcerpc import AS_SYSTEM_MAGIC_PATH_TOKEN
+from samba.auth import system_session
+from samba.tests import delete_force
+from samba.dsdb import UF_WORKSTATION_TRUST_ACCOUNT, UF_PASSWD_NOTREQD
+from samba.dcerpc.misc import SEC_CHAN_WKSTA
+from samba.dcerpc.windows_event_ids import (
+ EVT_ID_SUCCESSFUL_LOGON,
+ EVT_LOGON_NETWORK
+)
+
+
+class AuthLogTestsNetLogon(samba.tests.auth_log_base.AuthLogTestBase):
+
+ def setUp(self):
+ super().setUp()
+ self.lp = samba.tests.env_loadparm()
+ self.session = system_session()
+ self.ldb = SamDB(
+ session_info=self.session,
+ lp=self.lp)
+
+ self.domain = os.environ["DOMAIN"]
+ self.netbios_name = "NetLogonGood"
+ self.machinepass = "abcdefghij"
+ self.remoteAddress = AS_SYSTEM_MAGIC_PATH_TOKEN
+ self.base_dn = self.ldb.domain_dn()
+ self.dn = ("cn=%s,cn=users,%s" % (self.netbios_name, self.base_dn))
+
+ utf16pw = ('"' + self.machinepass + '"').encode('utf-16-le')
+ self.ldb.add({
+ "dn": self.dn,
+ "objectclass": "computer",
+ "sAMAccountName": "%s$" % self.netbios_name,
+ "userAccountControl":
+ str(UF_WORKSTATION_TRUST_ACCOUNT | UF_PASSWD_NOTREQD),
+ "unicodePwd": utf16pw})
+
+ def tearDown(self):
+ super().tearDown()
+ delete_force(self.ldb, self.dn)
+
+ def _test_netlogon(self, binding, checkFunction):
+
+ def isLastExpectedMessage(msg):
+ return (
+ msg["type"] == "Authorization" and
+ msg["Authorization"]["serviceDescription"] == "DCE/RPC" and
+ msg["Authorization"]["authType"] == "schannel" and
+ msg["Authorization"]["transportProtection"] == "SEAL")
+
+ if binding:
+ binding = "[schannel,%s]" % binding
+ else:
+ binding = "[schannel]"
+
+ machine_creds = Credentials()
+ machine_creds.guess(self.get_loadparm())
+ machine_creds.set_secure_channel_type(SEC_CHAN_WKSTA)
+ machine_creds.set_password(self.machinepass)
+ machine_creds.set_username(self.netbios_name + "$")
+
+ netlogon_conn = netlogon.netlogon("ncalrpc:%s" % binding,
+ self.get_loadparm(),
+ machine_creds)
+
+ messages = self.waitForMessages(isLastExpectedMessage, netlogon_conn)
+ checkFunction(messages)
+
+ def netlogon_check(self, messages):
+
+ expected_messages = 5
+ self.assertEqual(expected_messages,
+ len(messages),
+ "Did not receive the expected number of messages")
+
+ # Check the first message it should be an Authorization
+ msg = messages[0]
+ self.assertEqual("Authorization", msg["type"])
+ self.assertEqual("DCE/RPC",
+ msg["Authorization"]["serviceDescription"])
+ self.assertEqual("ncalrpc", msg["Authorization"]["authType"])
+ self.assertEqual("NONE", msg["Authorization"]["transportProtection"])
+ self.assertTrue(self.is_guid(msg["Authorization"]["sessionId"]))
+
+ # Check the fourth message it should be a NETLOGON Authentication
+ msg = messages[3]
+ self.assertEqual("Authentication", msg["type"])
+ self.assertEqual("NETLOGON",
+ msg["Authentication"]["serviceDescription"])
+ self.assertEqual("ServerAuthenticate",
+ msg["Authentication"]["authDescription"])
+ self.assertEqual("NT_STATUS_OK",
+ msg["Authentication"]["status"])
+ self.assertEqual("HMAC-SHA256",
+ msg["Authentication"]["passwordType"])
+ self.assertEqual(EVT_ID_SUCCESSFUL_LOGON,
+ msg["Authentication"]["eventId"])
+ self.assertEqual(EVT_LOGON_NETWORK,
+ msg["Authentication"]["logonType"])
+
+ def test_netlogon(self):
+ self._test_netlogon("SEAL", self.netlogon_check)
diff --git a/python/samba/tests/auth_log_netlogon_bad_creds.py b/python/samba/tests/auth_log_netlogon_bad_creds.py
new file mode 100644
index 0000000..e97ca85
--- /dev/null
+++ b/python/samba/tests/auth_log_netlogon_bad_creds.py
@@ -0,0 +1,190 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2017
+# Copyright (C) Catalyst IT Ltd. 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""
+ Tests that exercise auth logging for unsuccessful netlogon attempts.
+
+ NOTE: netlogon is only done once per session, so this file should only
+ test failed logons. Adding a successful case will potentially break
+ the other tests, depending on the order of execution.
+"""
+
+import samba.tests
+import os
+from samba import NTSTATUSError
+from samba.samdb import SamDB
+import samba.tests.auth_log_base
+from samba.credentials import Credentials
+from samba.dcerpc import netlogon
+from samba.dcerpc.dcerpc import AS_SYSTEM_MAGIC_PATH_TOKEN
+from samba.auth import system_session
+from samba.tests import delete_force
+from samba.dsdb import UF_WORKSTATION_TRUST_ACCOUNT, UF_PASSWD_NOTREQD
+from samba.dcerpc.misc import SEC_CHAN_WKSTA
+from samba.dcerpc.netlogon import NETLOGON_NEG_STRONG_KEYS
+from samba.common import get_string
+from samba.dcerpc.windows_event_ids import (
+ EVT_ID_UNSUCCESSFUL_LOGON,
+ EVT_LOGON_NETWORK
+)
+
+
+class AuthLogTestsNetLogonBadCreds(samba.tests.auth_log_base.AuthLogTestBase):
+
+ def setUp(self):
+ super().setUp()
+ self.lp = samba.tests.env_loadparm()
+ self.session = system_session()
+ self.ldb = SamDB(
+ session_info=self.session,
+ lp=self.lp)
+
+ self.domain = os.environ["DOMAIN"]
+ self.netbios_name = "NetLogonBad"
+ self.machinepass = "abcdefghij"
+ self.remoteAddress = AS_SYSTEM_MAGIC_PATH_TOKEN
+ self.base_dn = self.ldb.domain_dn()
+ self.dn = ("cn=%s,cn=users,%s" % (self.netbios_name, self.base_dn))
+
+ utf16pw = get_string('"' + self.machinepass + '"').encode('utf-16-le')
+ self.ldb.add({
+ "dn": self.dn,
+ "objectclass": "computer",
+ "sAMAccountName": "%s$" % self.netbios_name,
+ "userAccountControl":
+ str(UF_WORKSTATION_TRUST_ACCOUNT | UF_PASSWD_NOTREQD),
+ "unicodePwd": utf16pw})
+
+ def tearDown(self):
+ super().tearDown()
+ delete_force(self.ldb, self.dn)
+
+ def _test_netlogon(self, name, pwd, status, checkFunction, event_id):
+
+ def isLastExpectedMessage(msg):
+ return (
+ msg["type"] == "Authentication" and
+ msg["Authentication"]["serviceDescription"] == "NETLOGON" and
+ msg["Authentication"]["authDescription"] ==
+ "ServerAuthenticate" and
+ msg["Authentication"]["status"] == status and
+ msg["Authentication"]["eventId"] == event_id and
+ msg["Authentication"]["logonType"] == EVT_LOGON_NETWORK)
+
+ machine_creds = Credentials()
+ machine_creds.guess(self.get_loadparm())
+ machine_creds.set_secure_channel_type(SEC_CHAN_WKSTA)
+ machine_creds.set_password(pwd)
+ machine_creds.set_username(name + "$")
+
+ try:
+ netlogon.netlogon("ncalrpc:[schannel]",
+ self.get_loadparm(),
+ machine_creds)
+ self.fail("NTSTATUSError not raised")
+ except NTSTATUSError:
+ pass
+
+ messages = self.waitForMessages(isLastExpectedMessage)
+ checkFunction(messages)
+
+ def netlogon_check(self, messages):
+
+ expected_messages = 4
+ self.assertEqual(expected_messages,
+ len(messages),
+ "Did not receive the expected number of messages")
+
+ # Check the first message it should be an Authorization
+ msg = messages[0]
+ self.assertEqual("Authorization", msg["type"])
+ self.assertEqual("DCE/RPC",
+ msg["Authorization"]["serviceDescription"])
+ self.assertEqual("ncalrpc", msg["Authorization"]["authType"])
+ self.assertEqual("NONE", msg["Authorization"]["transportProtection"])
+ self.assertTrue(self.is_guid(msg["Authorization"]["sessionId"]))
+
+ def test_netlogon_bad_machine_name(self):
+ self._test_netlogon("bad_name",
+ self.machinepass,
+ "NT_STATUS_NO_TRUST_SAM_ACCOUNT",
+ self.netlogon_check,
+ EVT_ID_UNSUCCESSFUL_LOGON)
+
+ def test_netlogon_bad_password(self):
+ self._test_netlogon(self.netbios_name,
+ "badpass",
+ "NT_STATUS_ACCESS_DENIED",
+ self.netlogon_check,
+ EVT_ID_UNSUCCESSFUL_LOGON)
+
+ def test_netlogon_password_DES(self):
+ """Logon failure that exercises the "DES" passwordType path.
+ """
+ def isLastExpectedMessage(msg):
+ return (
+ msg["type"] == "Authentication" and
+ msg["Authentication"]["serviceDescription"] == "NETLOGON" and
+ msg["Authentication"]["authDescription"] ==
+ "ServerAuthenticate" and
+ msg["Authentication"]["passwordType"] == "DES" and
+ (msg["Authentication"]["eventId"] ==
+ EVT_ID_UNSUCCESSFUL_LOGON) and
+ msg["Authentication"]["logonType"] == EVT_LOGON_NETWORK)
+
+ c = netlogon.netlogon("ncalrpc:[schannel]", self.get_loadparm())
+ creds = netlogon.netr_Credential()
+ c.netr_ServerReqChallenge(self.server, self.netbios_name, creds)
+ try:
+ c.netr_ServerAuthenticate3(self.server,
+ self.netbios_name,
+ SEC_CHAN_WKSTA,
+ self.netbios_name,
+ creds,
+ 0)
+ except NTSTATUSError:
+ pass
+ self.waitForMessages(isLastExpectedMessage)
+
+ def test_netlogon_password_HMAC_MD5(self):
+ """Logon failure that exercises the "HMAC-MD5" passwordType path.
+ """
+ def isLastExpectedMessage(msg):
+ return (
+ msg["type"] == "Authentication" and
+ msg["Authentication"]["serviceDescription"] == "NETLOGON" and
+ msg["Authentication"]["authDescription"] ==
+ "ServerAuthenticate" and
+ msg["Authentication"]["passwordType"] == "HMAC-MD5" and
+ (msg["Authentication"]["eventId"] ==
+ EVT_ID_UNSUCCESSFUL_LOGON) and
+ msg["Authentication"]["logonType"] == EVT_LOGON_NETWORK)
+
+ c = netlogon.netlogon("ncalrpc:[schannel]", self.get_loadparm())
+ creds = netlogon.netr_Credential()
+ c.netr_ServerReqChallenge(self.server, self.netbios_name, creds)
+ try:
+ c.netr_ServerAuthenticate3(self.server,
+ self.netbios_name,
+ SEC_CHAN_WKSTA,
+ self.netbios_name,
+ creds,
+ NETLOGON_NEG_STRONG_KEYS)
+ except NTSTATUSError:
+ pass
+ self.waitForMessages(isLastExpectedMessage)
diff --git a/python/samba/tests/auth_log_pass_change.py b/python/samba/tests/auth_log_pass_change.py
new file mode 100644
index 0000000..29a9e38
--- /dev/null
+++ b/python/samba/tests/auth_log_pass_change.py
@@ -0,0 +1,282 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for the Auth and AuthZ logging of password changes.
+"""
+
+import samba.tests
+from samba.samdb import SamDB
+from samba.auth import system_session
+import os
+import samba.tests.auth_log_base
+from samba.tests import delete_force
+from samba.net import Net
+import samba
+from ldb import LdbError
+from samba.tests.password_test import PasswordCommon
+from samba.dcerpc.windows_event_ids import (
+ EVT_ID_SUCCESSFUL_LOGON,
+ EVT_ID_UNSUCCESSFUL_LOGON,
+ EVT_LOGON_NETWORK
+)
+
+USER_NAME = "authlogtestuser"
+USER_PASS = samba.generate_random_password(32, 32)
+
+
+class AuthLogPassChangeTests(samba.tests.auth_log_base.AuthLogTestBase):
+
+ def setUp(self):
+ super().setUp()
+
+ self.server_ip = os.environ["SERVER_IP"]
+
+ host = "ldap://%s" % os.environ["SERVER"]
+ self.ldb = SamDB(url=host,
+ session_info=system_session(),
+ credentials=self.get_credentials(),
+ lp=self.get_loadparm())
+
+ # permit password changes during this test
+ PasswordCommon.allow_password_changes(self, self.ldb)
+
+ self.base_dn = self.ldb.domain_dn()
+
+ # (Re)adds the test user USER_NAME with password USER_PASS
+ delete_force(self.ldb, "cn=" + USER_NAME + ",cn=users," + self.base_dn)
+ self.ldb.add({
+ "dn": "cn=" + USER_NAME + ",cn=users," + self.base_dn,
+ "objectclass": "user",
+ "sAMAccountName": USER_NAME,
+ "userPassword": USER_PASS
+ })
+
+ # discard any auth log messages for the password setup
+ type(self).discardMessages()
+
+ def _authDescription(self):
+ return "samr_ChangePasswordUser4"
+
+ def test_admin_change_password(self):
+ def isLastExpectedMessage(msg):
+ return ((msg["type"] == "Authentication") and
+ (msg["Authentication"]["status"] == "NT_STATUS_OK") and
+ (msg["Authentication"]["serviceDescription"] ==
+ "SAMR Password Change") and
+ (msg["Authentication"]["authDescription"] ==
+ self._authDescription()) and
+ (msg["Authentication"]["eventId"] ==
+ EVT_ID_SUCCESSFUL_LOGON) and
+ (msg["Authentication"]["logonType"] ==
+ EVT_LOGON_NETWORK))
+
+ creds = self.insta_creds(template=self.get_credentials())
+
+ lp = self.get_loadparm()
+ net = Net(creds, lp, server=self.server_ip)
+ password = "newPassword!!42"
+
+ net.change_password(newpassword=password,
+ username=USER_NAME,
+ oldpassword=USER_PASS)
+ self.assertTrue(self.waitForMessages(isLastExpectedMessage),
+ "Did not receive the expected message")
+
+ def test_admin_change_password_new_password_fails_restriction(self):
+ def isLastExpectedMessage(msg):
+ return ((msg["type"] == "Authentication") and
+ (msg["Authentication"]["status"] ==
+ "NT_STATUS_PASSWORD_RESTRICTION") and
+ (msg["Authentication"]["serviceDescription"] ==
+ "SAMR Password Change") and
+ (msg["Authentication"]["authDescription"] ==
+ self._authDescription()) and
+ (msg["Authentication"]["eventId"] ==
+ EVT_ID_UNSUCCESSFUL_LOGON) and
+ (msg["Authentication"]["logonType"] ==
+ EVT_LOGON_NETWORK))
+
+ creds = self.insta_creds(template=self.get_credentials())
+
+ lp = self.get_loadparm()
+ net = Net(creds, lp, server=self.server_ip)
+ password = "newPassword"
+
+ exception_thrown = False
+ try:
+ net.change_password(newpassword=password,
+ oldpassword=USER_PASS,
+ username=USER_NAME)
+ except Exception:
+ exception_thrown = True
+ self.assertEqual(True, exception_thrown,
+ "Expected exception not thrown")
+ self.assertTrue(self.waitForMessages(isLastExpectedMessage),
+ "Did not receive the expected message")
+
+ def test_admin_change_password_unknown_user(self):
+ def isLastExpectedMessage(msg):
+ return ((msg["type"] == "Authentication") and
+ (msg["Authentication"]["status"] ==
+ "NT_STATUS_NO_SUCH_USER") and
+ (msg["Authentication"]["serviceDescription"] ==
+ "SAMR Password Change") and
+ (msg["Authentication"]["authDescription"] ==
+ self._authDescription()) and
+ (msg["Authentication"]["eventId"] ==
+ EVT_ID_UNSUCCESSFUL_LOGON) and
+ (msg["Authentication"]["logonType"] ==
+ EVT_LOGON_NETWORK))
+
+ creds = self.insta_creds(template=self.get_credentials())
+
+ lp = self.get_loadparm()
+ net = Net(creds, lp, server=self.server_ip)
+ password = "newPassword!!42"
+
+ exception_thrown = False
+ try:
+ net.change_password(newpassword=password,
+ oldpassword=USER_PASS,
+ username="badUser")
+ except Exception:
+ exception_thrown = True
+ self.assertEqual(True, exception_thrown,
+ "Expected exception not thrown")
+
+ self.assertTrue(self.waitForMessages(isLastExpectedMessage),
+ "Did not receive the expected message")
+
+ def test_admin_change_password_bad_original_password(self):
+ def isLastExpectedMessage(msg):
+ return ((msg["type"] == "Authentication") and
+ (msg["Authentication"]["status"] ==
+ "NT_STATUS_WRONG_PASSWORD") and
+ (msg["Authentication"]["serviceDescription"] ==
+ "SAMR Password Change") and
+ (msg["Authentication"]["authDescription"] ==
+ self._authDescription()) and
+ (msg["Authentication"]["eventId"] ==
+ EVT_ID_UNSUCCESSFUL_LOGON) and
+ (msg["Authentication"]["logonType"] ==
+ EVT_LOGON_NETWORK))
+
+ creds = self.insta_creds(template=self.get_credentials())
+
+ lp = self.get_loadparm()
+ net = Net(creds, lp, server=self.server_ip)
+ password = "newPassword!!42"
+
+ exception_thrown = False
+ try:
+ net.change_password(newpassword=password,
+ oldpassword="badPassword",
+ username=USER_NAME)
+ except Exception:
+ exception_thrown = True
+ self.assertEqual(True, exception_thrown,
+ "Expected exception not thrown")
+
+ self.assertTrue(self.waitForMessages(isLastExpectedMessage),
+ "Did not receive the expected message")
+
+ def test_ldap_change_password(self):
+ def isLastExpectedMessage(msg):
+ return ((msg["type"] == "Authentication") and
+ (msg["Authentication"]["status"] == "NT_STATUS_OK") and
+ (msg["Authentication"]["serviceDescription"] ==
+ "LDAP Password Change") and
+ (msg["Authentication"]["authDescription"] ==
+ "LDAP Modify") and
+ (msg["Authentication"]["eventId"] ==
+ EVT_ID_SUCCESSFUL_LOGON) and
+ (msg["Authentication"]["logonType"] ==
+ EVT_LOGON_NETWORK))
+
+ new_password = samba.generate_random_password(32, 32)
+ self.ldb.modify_ldif(
+ "dn: cn=" + USER_NAME + ",cn=users," + self.base_dn + "\n" +
+ "changetype: modify\n" +
+ "delete: userPassword\n" +
+ "userPassword: " + USER_PASS + "\n" +
+ "add: userPassword\n" +
+ "userPassword: " + new_password + "\n")
+
+ self.assertTrue(self.waitForMessages(isLastExpectedMessage),
+ "Did not receive the expected message")
+
+ #
+ # Currently this does not get logged, so we expect to see no messages.
+ #
+ def test_ldap_change_password_bad_user(self):
+ def isLastExpectedMessage(msg):
+ msg_type = msg["type"]
+
+ # Accept any message we receive, except for those produced while
+ # the Administrator authenticates in setUp().
+ return (msg_type != "Authentication" or (
+ "Administrator" not in msg[msg_type]["clientAccount"])) and (
+ msg_type != "Authorization" or (
+ "Administrator" not in msg[msg_type]["account"]))
+
+ new_password = samba.generate_random_password(32, 32)
+ try:
+ self.ldb.modify_ldif(
+ "dn: cn=" + "badUser" + ",cn=users," + self.base_dn + "\n" +
+ "changetype: modify\n" +
+ "delete: userPassword\n" +
+ "userPassword: " + USER_PASS + "\n" +
+ "add: userPassword\n" +
+ "userPassword: " + new_password + "\n")
+ self.fail()
+ except LdbError as e:
+ (num, msg) = e.args
+ pass
+
+ self.assertFalse(self.waitForMessages(isLastExpectedMessage),
+ "Received unexpected messages")
+
+ def test_ldap_change_password_bad_original_password(self):
+ def isLastExpectedMessage(msg):
+ return ((msg["type"] == "Authentication") and
+ (msg["Authentication"]["status"] ==
+ "NT_STATUS_WRONG_PASSWORD") and
+ (msg["Authentication"]["serviceDescription"] ==
+ "LDAP Password Change") and
+ (msg["Authentication"]["authDescription"] ==
+ "LDAP Modify") and
+ (msg["Authentication"]["eventId"] ==
+ EVT_ID_UNSUCCESSFUL_LOGON) and
+ (msg["Authentication"]["logonType"] ==
+ EVT_LOGON_NETWORK))
+
+ new_password = samba.generate_random_password(32, 32)
+ try:
+ self.ldb.modify_ldif(
+ "dn: cn=" + USER_NAME + ",cn=users," + self.base_dn + "\n" +
+ "changetype: modify\n" +
+ "delete: userPassword\n" +
+ "userPassword: " + "badPassword" + "\n" +
+ "add: userPassword\n" +
+ "userPassword: " + new_password + "\n")
+ self.fail()
+ except LdbError as e1:
+ (num, msg) = e1.args
+ pass
+
+ self.assertTrue(self.waitForMessages(isLastExpectedMessage),
+ "Did not receive the expected message")
diff --git a/python/samba/tests/auth_log_samlogon.py b/python/samba/tests/auth_log_samlogon.py
new file mode 100644
index 0000000..f3dfeba
--- /dev/null
+++ b/python/samba/tests/auth_log_samlogon.py
@@ -0,0 +1,181 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""
+ Tests auth logging tests that exercise SamLogon
+"""
+
+import samba.tests
+import os
+from samba.samdb import SamDB
+import samba.tests.auth_log_base
+from samba.credentials import (
+ Credentials,
+ DONT_USE_KERBEROS,
+ CLI_CRED_NTLMv2_AUTH
+)
+from samba.dcerpc import ntlmssp, netlogon
+from samba.dcerpc.dcerpc import AS_SYSTEM_MAGIC_PATH_TOKEN
+from samba.ndr import ndr_pack
+from samba.auth import system_session
+from samba.tests import delete_force
+from samba.dsdb import UF_WORKSTATION_TRUST_ACCOUNT, UF_PASSWD_NOTREQD
+from samba.dcerpc.misc import SEC_CHAN_WKSTA
+from samba.dcerpc.windows_event_ids import (
+ EVT_ID_SUCCESSFUL_LOGON,
+ EVT_LOGON_NETWORK
+)
+
+
+class AuthLogTestsSamLogon(samba.tests.auth_log_base.AuthLogTestBase):
+
+ def setUp(self):
+ super().setUp()
+ self.lp = samba.tests.env_loadparm()
+ self.session = system_session()
+ self.ldb = SamDB(
+ session_info=self.session,
+ lp=self.lp)
+
+ self.domain = os.environ["DOMAIN"]
+ self.netbios_name = "SamLogonTest"
+ self.machinepass = "abcdefghij"
+ self.remoteAddress = AS_SYSTEM_MAGIC_PATH_TOKEN
+ self.base_dn = self.ldb.domain_dn()
+ self.samlogon_dn = ("cn=%s,cn=users,%s" %
+ (self.netbios_name, self.base_dn))
+
+ def tearDown(self):
+ super().tearDown()
+ delete_force(self.ldb, self.samlogon_dn)
+
+ def _test_samlogon(self, binding, creds, checkFunction):
+
+ def isLastExpectedMessage(msg):
+ return (
+ msg["type"] == "Authentication" and
+ msg["Authentication"]["serviceDescription"] == "SamLogon" and
+ msg["Authentication"]["authDescription"] == "network" and
+ msg["Authentication"]["passwordType"] == "NTLMv2" and
+ (msg["Authentication"]["eventId"] ==
+ EVT_ID_SUCCESSFUL_LOGON) and
+ (msg["Authentication"]["logonType"] == EVT_LOGON_NETWORK))
+
+ if binding:
+ binding = "[schannel,%s]" % binding
+ else:
+ binding = "[schannel]"
+
+ utf16pw = ('"' + self.machinepass + '"').encode('utf-16-le')
+ self.ldb.add({
+ "dn": self.samlogon_dn,
+ "objectclass": "computer",
+ "sAMAccountName": "%s$" % self.netbios_name,
+ "userAccountControl":
+ str(UF_WORKSTATION_TRUST_ACCOUNT | UF_PASSWD_NOTREQD),
+ "unicodePwd": utf16pw})
+
+ machine_creds = Credentials()
+ machine_creds.guess(self.get_loadparm())
+ machine_creds.set_secure_channel_type(SEC_CHAN_WKSTA)
+ machine_creds.set_password(self.machinepass)
+ machine_creds.set_username(self.netbios_name + "$")
+
+ netlogon_conn = netlogon.netlogon("ncalrpc:%s" % binding,
+ self.get_loadparm(),
+ machine_creds)
+ challenge = b"abcdefgh"
+
+ target_info = ntlmssp.AV_PAIR_LIST()
+ target_info.count = 3
+
+ domainname = ntlmssp.AV_PAIR()
+ domainname.AvId = ntlmssp.MsvAvNbDomainName
+ domainname.Value = self.domain
+
+ computername = ntlmssp.AV_PAIR()
+ computername.AvId = ntlmssp.MsvAvNbComputerName
+ computername.Value = self.netbios_name
+
+ eol = ntlmssp.AV_PAIR()
+ eol.AvId = ntlmssp.MsvAvEOL
+ target_info.pair = [domainname, computername, eol]
+
+ target_info_blob = ndr_pack(target_info)
+
+ response = creds.get_ntlm_response(flags=CLI_CRED_NTLMv2_AUTH,
+ challenge=challenge,
+ target_info=target_info_blob)
+
+ netr_flags = 0
+
+ logon_level = netlogon.NetlogonNetworkTransitiveInformation
+ logon = samba.dcerpc.netlogon.netr_NetworkInfo()
+
+ logon.challenge = [
+ x if isinstance(x, int) else ord(x) for x in challenge]
+ logon.nt = netlogon.netr_ChallengeResponse()
+ logon.nt.length = len(response["nt_response"])
+ logon.nt.data = [
+ x if isinstance(x, int) else ord(x) for
+ x in response["nt_response"]
+ ]
+ logon.identity_info = samba.dcerpc.netlogon.netr_IdentityInfo()
+ (username, domain) = creds.get_ntlm_username_domain()
+
+ logon.identity_info.domain_name.string = domain
+ logon.identity_info.account_name.string = username
+ logon.identity_info.workstation.string = creds.get_workstation()
+
+ validation_level = samba.dcerpc.netlogon.NetlogonValidationSamInfo4
+
+ result = netlogon_conn.netr_LogonSamLogonEx(
+ os.environ["SERVER"],
+ machine_creds.get_workstation(),
+ logon_level, logon,
+ validation_level, netr_flags)
+
+ (validation, authoritative, netr_flags_out) = result
+
+ messages = self.waitForMessages(isLastExpectedMessage, netlogon_conn)
+ checkFunction(messages)
+
+ def samlogon_check(self, messages):
+
+ messages = self.remove_netlogon_messages(messages)
+ expected_messages = 5
+ self.assertEqual(expected_messages,
+ len(messages),
+ "Did not receive the expected number of messages")
+
+ # Check the first message it should be an Authorization
+ msg = messages[0]
+ self.assertEqual("Authorization", msg["type"])
+ self.assertEqual("DCE/RPC",
+ msg["Authorization"]["serviceDescription"])
+ self.assertEqual("ncalrpc", msg["Authorization"]["authType"])
+ self.assertEqual("NONE", msg["Authorization"]["transportProtection"])
+ self.assertTrue(self.is_guid(msg["Authorization"]["sessionId"]))
+
+ def test_ncalrpc_samlogon(self):
+
+ creds = self.insta_creds(template=self.get_credentials(),
+ kerberos_state=DONT_USE_KERBEROS)
+ try:
+ self._test_samlogon("SEAL", creds, self.samlogon_check)
+ except Exception as e:
+ self.fail("Unexpected exception: " + str(e))
diff --git a/python/samba/tests/auth_log_winbind.py b/python/samba/tests/auth_log_winbind.py
new file mode 100644
index 0000000..1445eff
--- /dev/null
+++ b/python/samba/tests/auth_log_winbind.py
@@ -0,0 +1,460 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2019
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""
+ auth logging tests that exercise winbind
+"""
+
+import json
+import os
+import time
+
+from samba.auth import system_session
+from samba.credentials import Credentials
+from samba.common import get_string, get_bytes
+from samba.dcerpc.messaging import AUTH_EVENT_NAME, MSG_AUTH_LOG
+from samba.dsdb import UF_NORMAL_ACCOUNT
+from samba.messaging import Messaging
+from samba.param import LoadParm
+from samba.samdb import SamDB
+from samba.tests import delete_force, BlackboxProcessError, BlackboxTestCase
+from samba.tests.auth_log_base import AuthLogTestBase
+
+USER_NAME = "WBALU"
+
+
+class AuthLogTestsWinbind(AuthLogTestBase, BlackboxTestCase):
+
+ #
+ # Helper function to watch for authentication messages on the
+ # Domain Controller.
+ #
+ def dc_watcher(self):
+
+ (r1, w1) = os.pipe()
+ pid = os.fork()
+ if pid != 0:
+ # Parent process return the result socket to the caller.
+ return r1
+
+ # Load the lp context for the Domain Controller, rather than the
+ # member server.
+ config_file = os.environ["DC_SERVERCONFFILE"]
+ lp_ctx = LoadParm()
+ lp_ctx.load(config_file)
+
+ #
+ # Is the message a SamLogon authentication?
+ def is_sam_logon(m):
+ if m is None:
+ return False
+ msg = json.loads(m)
+ return (
+ msg["type"] == "Authentication" and
+ msg["Authentication"]["serviceDescription"] == "SamLogon")
+
+ #
+ # Handler function for received authentication messages.
+ def message_handler(context, msgType, src, message):
+ # Print the message to help debugging the tests.
+ # as it's a JSON message it does not look like a sub-unit message.
+ print(message)
+ self.dc_msgs.append(message)
+
+ # Set up a messaging context to listen for authentication events on
+ # the domain controller.
+ msg_ctx = Messaging((1,), lp_ctx=lp_ctx)
+ msg_ctx.irpc_add_name(AUTH_EVENT_NAME)
+ msg_handler_and_context = (message_handler, None)
+ msg_ctx.register(msg_handler_and_context, msg_type=MSG_AUTH_LOG)
+
+ # Wait for the SamLogon message.
+ # As there could be other SamLogon's in progress we need to collect
+ # all the SamLogons and let the caller match them to the session.
+ self.dc_msgs = []
+ start_time = time.time()
+ while (time.time() - start_time < 1):
+ msg_ctx.loop_once(0.1)
+
+ # Only interested in SamLogon messages, filter out the rest
+ msgs = list(filter(is_sam_logon, self.dc_msgs))
+ if msgs:
+ for m in msgs:
+ os.write(w1, get_bytes(m+"\n"))
+ else:
+ os.write(w1, get_bytes("None\n"))
+ os.close(w1)
+
+ msg_ctx.deregister(msg_handler_and_context, msg_type=MSG_AUTH_LOG)
+ msg_ctx.irpc_remove_name(AUTH_EVENT_NAME)
+
+ os._exit(0)
+
+ # Remove any DCE/RPC ncacn_np messages
+ # these only get triggered once per session, and stripping them out
+ # avoids ordering dependencies in the tests
+ #
+ def filter_messages(self, messages):
+ def keep(msg):
+ if (msg["type"] == "Authorization" and
+ msg["Authorization"]["serviceDescription"] == "DCE/RPC" and
+ msg["Authorization"]["authType"] == "ncacn_np"):
+ return False
+ else:
+ return True
+
+ return list(filter(keep, messages))
+
+ def setUp(self):
+ super().setUp()
+ self.domain = os.environ["DOMAIN"]
+ self.host = os.environ["SERVER"]
+ self.dc = os.environ["DC_SERVER"]
+ self.lp = self.get_loadparm()
+ self.credentials = self.get_credentials()
+ self.session = system_session()
+
+ self.ldb = SamDB(
+ url="ldap://{0}".format(self.dc),
+ session_info=self.session,
+ credentials=self.credentials,
+ lp=self.lp)
+ self.create_user_account()
+
+ self.remoteAddress = ''
+
+ def tearDown(self):
+ super().tearDown()
+ delete_force(self.ldb, self.user_dn)
+
+ #
+ # Create a test user account
+ def create_user_account(self):
+ self.user_pass = self.random_password()
+ self.user_name = USER_NAME
+ self.user_dn = "cn=%s,%s" % (self.user_name, self.ldb.domain_dn())
+
+ # remove the account if it exists, this will happen if a previous test
+ # run failed
+ delete_force(self.ldb, self.user_dn)
+
+ utf16pw = ('"%s"' % get_string(self.user_pass)).encode('utf-16-le')
+ self.ldb.add({
+ "dn": self.user_dn,
+ "objectclass": "user",
+ "sAMAccountName": "%s" % self.user_name,
+ "userAccountControl": str(UF_NORMAL_ACCOUNT),
+ "unicodePwd": utf16pw})
+
+ self.user_creds = Credentials()
+ self.user_creds.guess(self.get_loadparm())
+ self.user_creds.set_password(self.user_pass)
+ self.user_creds.set_username(self.user_name)
+ self.user_creds.set_workstation(self.server)
+
+ #
+ # Check that the domain server received a SamLogon request for the
+ # current logon.
+ #
+ def check_domain_server_authentication(self, pipe, logon_id, description):
+
+ messages = os.read(pipe, 8192)
+ messages = get_string(messages)
+ if len(messages) == 0 or messages == "None":
+ self.fail("No Domain server authentication message")
+
+ #
+ # Look for the SamLogon request matching logon_id
+ msg = None
+ for message in messages.split("\n"):
+ msg = json.loads(get_string(message))
+ if logon_id == msg["Authentication"]["logonId"]:
+ break
+ msg = None
+
+ if msg is None:
+ self.fail("No Domain server authentication message")
+
+ #
+ # Validate that message contains the expected data
+ #
+ self.assertEqual("Authentication", msg["type"])
+ self.assertEqual(logon_id, msg["Authentication"]["logonId"])
+ self.assertEqual("SamLogon",
+ msg["Authentication"]["serviceDescription"])
+ self.assertEqual(description,
+ msg["Authentication"]["authDescription"])
+
+ def test_ntlm_auth(self):
+
+ def isLastExpectedMessage(msg):
+ DESC = "PAM_AUTH, ntlm_auth"
+ return (
+ msg["type"] == "Authentication" and
+ msg["Authentication"]["serviceDescription"] == "winbind" and
+ msg["Authentication"]["authDescription"] is not None and
+ msg["Authentication"]["authDescription"].startswith(DESC))
+
+ pipe = self.dc_watcher()
+ COMMAND = "bin/ntlm_auth"
+ self.check_run("{0} --username={1} --password={2}".format(
+ COMMAND,
+ self.credentials.get_username(),
+ self.credentials.get_password()),
+ msg="ntlm_auth failed")
+
+ messages = self.waitForMessages(isLastExpectedMessage)
+ messages = self.filter_messages(messages)
+ expected_messages = 1
+ self.assertEqual(expected_messages,
+ len(messages),
+ "Did not receive the expected number of messages")
+
+ # Check the first message it should be an Authentication
+ msg = messages[0]
+ self.assertEqual("Authentication", msg["type"])
+ self.assertTrue(
+ msg["Authentication"]["authDescription"].startswith(
+ "PAM_AUTH, ntlm_auth,"))
+ self.assertEqual("winbind",
+ msg["Authentication"]["serviceDescription"])
+ self.assertEqual("Plaintext", msg["Authentication"]["passwordType"])
+ # Logon type should be NetworkCleartext
+ self.assertEqual(8, msg["Authentication"]["logonType"])
+ # Event code should be Successful logon
+ self.assertEqual(4624, msg["Authentication"]["eventId"])
+ self.assertEqual("unix:", msg["Authentication"]["remoteAddress"])
+ self.assertEqual("unix:", msg["Authentication"]["localAddress"])
+ self.assertEqual(self.domain, msg["Authentication"]["clientDomain"])
+ self.assertEqual("NT_STATUS_OK", msg["Authentication"]["status"])
+ self.assertEqual(self.credentials.get_username(),
+ msg["Authentication"]["clientAccount"])
+ self.assertEqual(self.credentials.get_domain(),
+ msg["Authentication"]["clientDomain"])
+ self.assertTrue(msg["Authentication"]["workstation"] is None)
+
+ logon_id = msg["Authentication"]["logonId"]
+
+ #
+ # Now check the Domain server authentication message
+ #
+ self.check_domain_server_authentication(pipe, logon_id, "interactive")
+
+ def test_wbinfo(self):
+ def isLastExpectedMessage(msg):
+ DESC = "NTLM_AUTH, wbinfo"
+ return (
+ msg["type"] == "Authentication" and
+ msg["Authentication"]["serviceDescription"] == "winbind" and
+ msg["Authentication"]["authDescription"] is not None and
+ msg["Authentication"]["authDescription"].startswith(DESC))
+
+ pipe = self.dc_watcher()
+ COMMAND = "bin/wbinfo"
+ try:
+ self.check_run("{0} -a {1}%{2}".format(
+ COMMAND,
+ self.credentials.get_username(),
+ self.credentials.get_password()),
+ msg="ntlm_auth failed")
+ except BlackboxProcessError:
+ pass
+
+ messages = self.waitForMessages(isLastExpectedMessage)
+ messages = self.filter_messages(messages)
+ expected_messages = 3
+ self.assertEqual(expected_messages,
+ len(messages),
+ "Did not receive the expected number of messages")
+
+ # The 1st message should be an Authentication against the local
+ # password database
+ msg = messages[0]
+ self.assertEqual("Authentication", msg["type"])
+ self.assertTrue(msg["Authentication"]["authDescription"].startswith(
+ "PASSDB, wbinfo,"))
+ self.assertEqual("winbind",
+ msg["Authentication"]["serviceDescription"])
+ # Logon type should be Interactive
+ self.assertEqual(2, msg["Authentication"]["logonType"])
+ # Event code should be Unsuccessful logon
+ self.assertEqual(4625, msg["Authentication"]["eventId"])
+ self.assertEqual("unix:", msg["Authentication"]["remoteAddress"])
+ self.assertEqual("unix:", msg["Authentication"]["localAddress"])
+ self.assertEqual('', msg["Authentication"]["clientDomain"])
+ # This is what the existing winbind implementation returns.
+ self.assertEqual("NT_STATUS_NO_SUCH_USER",
+ msg["Authentication"]["status"])
+ self.assertEqual("NTLMv2", msg["Authentication"]["passwordType"])
+ self.assertEqual(self.credentials.get_username(),
+ msg["Authentication"]["clientAccount"])
+ self.assertEqual("", msg["Authentication"]["clientDomain"])
+
+ logon_id = msg["Authentication"]["logonId"]
+
+ # The 2nd message should be a PAM_AUTH with the same logon id as the
+ # 1st message
+ msg = messages[1]
+ self.assertEqual("Authentication", msg["type"])
+ self.assertTrue(msg["Authentication"]["authDescription"].startswith(
+ "PAM_AUTH"))
+ self.assertEqual("winbind",
+ msg["Authentication"]["serviceDescription"])
+ self.assertEqual(logon_id, msg["Authentication"]["logonId"])
+ # Logon type should be NetworkCleartext
+ self.assertEqual(8, msg["Authentication"]["logonType"])
+ # Event code should be Unsuccessful logon
+ self.assertEqual(4625, msg["Authentication"]["eventId"])
+ self.assertEqual("unix:", msg["Authentication"]["remoteAddress"])
+ self.assertEqual("unix:", msg["Authentication"]["localAddress"])
+ self.assertEqual('', msg["Authentication"]["clientDomain"])
+ # This is what the existing winbind implementation returns.
+ self.assertEqual("NT_STATUS_NO_SUCH_USER",
+ msg["Authentication"]["status"])
+ self.assertEqual(self.credentials.get_username(),
+ msg["Authentication"]["clientAccount"])
+ self.assertEqual("", msg["Authentication"]["clientDomain"])
+
+ # The 3rd message should be an NTLM_AUTH
+ msg = messages[2]
+ self.assertEqual("Authentication", msg["type"])
+ self.assertTrue(msg["Authentication"]["authDescription"].startswith(
+ "NTLM_AUTH, wbinfo,"))
+ self.assertEqual("winbind",
+ msg["Authentication"]["serviceDescription"])
+ # Logon type should be Network
+ self.assertEqual(3, msg["Authentication"]["logonType"])
+ self.assertEqual("NT_STATUS_OK", msg["Authentication"]["status"])
+ # Event code should be successful logon
+ self.assertEqual(4624, msg["Authentication"]["eventId"])
+ self.assertEqual("NTLMv2", msg["Authentication"]["passwordType"])
+ self.assertEqual("unix:", msg["Authentication"]["remoteAddress"])
+ self.assertEqual("unix:", msg["Authentication"]["localAddress"])
+ self.assertEqual(self.credentials.get_username(),
+ msg["Authentication"]["clientAccount"])
+ self.assertEqual(self.credentials.get_domain(),
+ msg["Authentication"]["clientDomain"])
+
+ logon_id = msg["Authentication"]["logonId"]
+
+ #
+ # Now check the Domain server authentication message
+ #
+ self.check_domain_server_authentication(pipe, logon_id, "network")
+
+ def test_wbinfo_ntlmv1(self):
+ def isLastExpectedMessage(msg):
+ DESC = "NTLM_AUTH, wbinfo"
+ return (
+ msg["type"] == "Authentication" and
+ msg["Authentication"]["serviceDescription"] == "winbind" and
+ msg["Authentication"]["authDescription"] is not None and
+ msg["Authentication"]["authDescription"].startswith(DESC))
+
+ pipe = self.dc_watcher()
+ COMMAND = "bin/wbinfo"
+ try:
+ self.check_run("{0} --ntlmv1 -a {1}%{2}".format(
+ COMMAND,
+ self.credentials.get_username(),
+ self.credentials.get_password()),
+ msg="ntlm_auth failed")
+ except BlackboxProcessError:
+ pass
+
+ messages = self.waitForMessages(isLastExpectedMessage)
+ messages = self.filter_messages(messages)
+ expected_messages = 3
+ self.assertEqual(expected_messages,
+ len(messages),
+ "Did not receive the expected number of messages")
+
+ # The 1st message should be an Authentication against the local
+ # password database
+ msg = messages[0]
+ self.assertEqual("Authentication", msg["type"])
+ self.assertTrue(msg["Authentication"]["authDescription"].startswith(
+ "PASSDB, wbinfo,"))
+ self.assertEqual("winbind",
+ msg["Authentication"]["serviceDescription"])
+ # Logon type should be Interactive
+ self.assertEqual(2, msg["Authentication"]["logonType"])
+ # Event code should be Unsuccessful logon
+ self.assertEqual(4625, msg["Authentication"]["eventId"])
+ self.assertEqual("unix:", msg["Authentication"]["remoteAddress"])
+ self.assertEqual("unix:", msg["Authentication"]["localAddress"])
+ self.assertEqual('', msg["Authentication"]["clientDomain"])
+ # This is what the existing winbind implementation returns.
+ self.assertEqual("NT_STATUS_NO_SUCH_USER",
+ msg["Authentication"]["status"])
+ self.assertEqual("NTLMv2", msg["Authentication"]["passwordType"])
+ self.assertEqual(self.credentials.get_username(),
+ msg["Authentication"]["clientAccount"])
+ self.assertEqual("", msg["Authentication"]["clientDomain"])
+
+ logon_id = msg["Authentication"]["logonId"]
+
+ # The 2nd message should be a PAM_AUTH with the same logon id as the
+ # 1st message
+ msg = messages[1]
+ self.assertEqual("Authentication", msg["type"])
+ self.assertTrue(msg["Authentication"]["authDescription"].startswith(
+ "PAM_AUTH"))
+ self.assertEqual("winbind",
+ msg["Authentication"]["serviceDescription"])
+ self.assertEqual(logon_id, msg["Authentication"]["logonId"])
+ self.assertEqual("Plaintext", msg["Authentication"]["passwordType"])
+ # Logon type should be NetworkCleartext
+ self.assertEqual(8, msg["Authentication"]["logonType"])
+ # Event code should be Unsuccessful logon
+ self.assertEqual(4625, msg["Authentication"]["eventId"])
+ self.assertEqual("unix:", msg["Authentication"]["remoteAddress"])
+ self.assertEqual("unix:", msg["Authentication"]["localAddress"])
+ self.assertEqual('', msg["Authentication"]["clientDomain"])
+ # This is what the existing winbind implementation returns.
+ self.assertEqual("NT_STATUS_NO_SUCH_USER",
+ msg["Authentication"]["status"])
+ self.assertEqual(self.credentials.get_username(),
+ msg["Authentication"]["clientAccount"])
+ self.assertEqual("", msg["Authentication"]["clientDomain"])
+
+ # The 3rd message should be an NTLM_AUTH
+ msg = messages[2]
+ self.assertEqual("Authentication", msg["type"])
+ self.assertTrue(msg["Authentication"]["authDescription"].startswith(
+ "NTLM_AUTH, wbinfo,"))
+ self.assertEqual("winbind",
+ msg["Authentication"]["serviceDescription"])
+ self.assertEqual("NTLMv1",
+ msg["Authentication"]["passwordType"])
+ # Logon type should be Network
+ self.assertEqual(3, msg["Authentication"]["logonType"])
+ self.assertEqual("NT_STATUS_OK", msg["Authentication"]["status"])
+ # Event code should be successful logon
+ self.assertEqual(4624, msg["Authentication"]["eventId"])
+ self.assertEqual("unix:", msg["Authentication"]["remoteAddress"])
+ self.assertEqual("unix:", msg["Authentication"]["localAddress"])
+ self.assertEqual(self.credentials.get_username(),
+ msg["Authentication"]["clientAccount"])
+ self.assertEqual(self.credentials.get_domain(),
+ msg["Authentication"]["clientDomain"])
+
+ logon_id = msg["Authentication"]["logonId"]
+ #
+ # Now check the Domain server authentication message
+ #
+ self.check_domain_server_authentication(pipe, logon_id, "network")
diff --git a/python/samba/tests/bin/cepces-submit b/python/samba/tests/bin/cepces-submit
new file mode 100755
index 0000000..de63164
--- /dev/null
+++ b/python/samba/tests/bin/cepces-submit
@@ -0,0 +1,18 @@
+#!/usr/bin/python3
+import optparse
+import os, sys, re
+
+sys.path.insert(0, "bin/python")
+
+if __name__ == "__main__":
+ parser = optparse.OptionParser('cepces-submit [options]')
+ parser.add_option('--server')
+ parser.add_option('--auth')
+
+ (opts, args) = parser.parse_args()
+ assert opts.server is not None
+ assert opts.auth == 'Kerberos'
+ if 'CERTMONGER_OPERATION' in os.environ and \
+ os.environ['CERTMONGER_OPERATION'] == 'GET-SUPPORTED-TEMPLATES':
+ templates = os.environ.get('CEPCES_SUBMIT_SUPPORTED_TEMPLATES', 'Machine').split(',')
+ print('\n'.join(templates)) # Report the requested templates
diff --git a/python/samba/tests/bin/crontab b/python/samba/tests/bin/crontab
new file mode 100755
index 0000000..764d584
--- /dev/null
+++ b/python/samba/tests/bin/crontab
@@ -0,0 +1,29 @@
+#!/usr/bin/python3
+import optparse
+import os, sys
+from shutil import copy
+
+sys.path.insert(0, "bin/python")
+
+if __name__ == "__main__":
+ parser = optparse.OptionParser('crontab <file> [options]')
+ parser.add_option('-l', action="store_true")
+ parser.add_option('-u')
+
+ (opts, args) = parser.parse_args()
+
+ # Use a dir we can write to in the testenv
+ if 'LOCAL_PATH' in os.environ:
+ data_dir = os.path.realpath(os.environ.get('LOCAL_PATH'))
+ else:
+ data_dir = os.path.dirname(os.path.realpath(__file__))
+ dump_file = os.path.join(data_dir, 'crontab.dump')
+ if opts.u:
+ assert opts.u == os.environ.get('DC_USERNAME')
+ if len(args) == 1:
+ assert os.path.exists(args[0])
+ copy(args[0], dump_file)
+ elif opts.l:
+ if os.path.exists(dump_file):
+ with open(dump_file, 'r') as r:
+ print(r.read())
diff --git a/python/samba/tests/bin/firewall-cmd b/python/samba/tests/bin/firewall-cmd
new file mode 100755
index 0000000..3bc69da
--- /dev/null
+++ b/python/samba/tests/bin/firewall-cmd
@@ -0,0 +1,114 @@
+#!/usr/bin/python3
+import optparse
+import os, sys, re
+import pickle
+try:
+ from firewall.core.rich import Rich_Rule
+except ImportError:
+ Rich_Rule = None
+
+sys.path.insert(0, "bin/python")
+
+if __name__ == "__main__":
+ parser = optparse.OptionParser('firewall-cmd [options]')
+ parser.add_option('--list-interfaces', default=False, action="store_true")
+ parser.add_option('--permanent', default=False, action="store_true")
+ parser.add_option('--new-zone')
+ parser.add_option('--get-zones', default=False, action="store_true")
+ parser.add_option('--delete-zone')
+ parser.add_option('--zone')
+ parser.add_option('--add-interface')
+ parser.add_option('--add-rich-rule')
+ parser.add_option('--remove-rich-rule')
+ parser.add_option('--list-rich-rules', default=False, action="store_true")
+
+ (opts, args) = parser.parse_args()
+
+ # Use a dir we can write to in the testenv
+ if 'LOCAL_PATH' in os.environ:
+ data_dir = os.path.realpath(os.environ.get('LOCAL_PATH'))
+ else:
+ data_dir = os.path.dirname(os.path.realpath(__file__))
+ dump_file = os.path.join(data_dir, 'firewall-cmd.dump')
+ if os.path.exists(dump_file):
+ with open(dump_file, 'rb') as r:
+ data = pickle.load(r)
+ else:
+ data = {}
+
+ if opts.list_interfaces:
+ if not opts.zone: # default zone dummy interface
+ print('eth0')
+ else:
+ assert 'zone_interfaces' in data
+ assert opts.zone in data['zone_interfaces'].keys()
+ for interface in data['zone_interfaces'][opts.zone]:
+ sys.stdout.write('%s ' % interface)
+ print()
+ elif opts.new_zone:
+ if 'zones' not in data:
+ data['zones'] = []
+ if opts.new_zone not in data['zones']:
+ data['zones'].append(opts.new_zone)
+ elif opts.get_zones:
+ if 'zones' in data:
+ for zone in data['zones']:
+ sys.stdout.write('%s ' % zone)
+ print()
+ elif opts.delete_zone:
+ assert 'zones' in data
+ assert opts.delete_zone in data['zones']
+ data['zones'].remove(opts.delete_zone)
+ if len(data['zones']) == 0:
+ del data['zones']
+ if 'zone_interfaces' in data and opts.zone in data['zone_interfaces'].keys():
+ del data['zone_interfaces'][opts.zone]
+ elif opts.add_interface:
+ assert opts.zone
+ assert 'zones' in data
+ assert opts.zone in data['zones']
+ if 'zone_interfaces' not in data:
+ data['zone_interfaces'] = {}
+ if opts.zone not in data['zone_interfaces'].keys():
+ data['zone_interfaces'][opts.zone] = []
+ if opts.add_interface not in data['zone_interfaces'][opts.zone]:
+ data['zone_interfaces'][opts.zone].append(opts.add_interface)
+ elif opts.add_rich_rule:
+ assert opts.zone
+ if 'rules' not in data:
+ data['rules'] = {}
+ if opts.zone not in data['rules']:
+ data['rules'][opts.zone] = []
+ # Test rule parsing if firewalld is installed
+ if Rich_Rule:
+ # Parsing failure will throw an exception
+ rule = str(Rich_Rule(rule_str=opts.add_rich_rule))
+ else:
+ rule = opts.add_rich_rule
+ if rule not in data['rules'][opts.zone]:
+ data['rules'][opts.zone].append(rule)
+ elif opts.remove_rich_rule:
+ assert opts.zone
+ assert 'rules' in data
+ assert opts.zone in data['rules'].keys()
+ if Rich_Rule:
+ rich_rule = str(Rich_Rule(rule_str=opts.remove_rich_rule))
+ assert rich_rule in data['rules'][opts.zone]
+ data['rules'][opts.zone].remove(rich_rule)
+ else:
+ assert opts.remove_rich_rule in data['rules'][opts.zone]
+ data['rules'][opts.zone].remove(opts.remove_rich_rule)
+ elif opts.list_rich_rules:
+ assert opts.zone
+ assert 'rules' in data
+ assert opts.zone in data['rules'].keys()
+ for rule in data['rules'][opts.zone]:
+ print(rule)
+
+ if opts.permanent:
+ if data == {}:
+ if os.path.exists(dump_file):
+ os.unlink(dump_file)
+ else:
+ with open(dump_file, 'wb') as w:
+ pickle.dump(data, w)
diff --git a/python/samba/tests/bin/getcert b/python/samba/tests/bin/getcert
new file mode 100755
index 0000000..93895eb
--- /dev/null
+++ b/python/samba/tests/bin/getcert
@@ -0,0 +1,84 @@
+#!/usr/bin/python3
+import optparse
+import os, sys, re
+import pickle
+
+sys.path.insert(0, "bin/python")
+
+if __name__ == "__main__":
+ parser = optparse.OptionParser('getcert <cmd> [options]')
+ parser.add_option('-i')
+ parser.add_option('-c')
+ parser.add_option('-T')
+ parser.add_option('-I')
+ parser.add_option('-k')
+ parser.add_option('-f')
+ parser.add_option('-e')
+ parser.add_option('-g')
+
+ (opts, args) = parser.parse_args()
+ assert len(args) == 1
+ assert args[0] in ['add-ca', 'request', 'remove-ca', 'stop-tracking',
+ 'list', 'list-cas']
+
+ # Use a dir we can write to in the testenv
+ if 'LOCAL_PATH' in os.environ:
+ data_dir = os.path.realpath(os.environ.get('LOCAL_PATH'))
+ else:
+ data_dir = os.path.dirname(os.path.realpath(__file__))
+ dump_file = os.path.join(data_dir, 'getcert.dump')
+ if os.path.exists(dump_file):
+ with open(dump_file, 'rb') as r:
+ cas, certs = pickle.load(r)
+ else:
+ cas = {}
+ certs = {}
+ if args[0] == 'add-ca':
+ # Add a fake CA entry
+ assert opts.c not in cas.keys()
+ cas[opts.c] = opts.e
+ elif args[0] == 'remove-ca':
+ # Remove a fake CA entry
+ assert opts.c in cas.keys()
+ del cas[opts.c]
+ elif args[0] == 'list-cas':
+ # List the fake CAs
+ for ca, helper_location in cas.items():
+ print('CA \'%s\':\n\tis-default: no\n\tca-type: EXTERNAL\n' % ca +
+ '\thelper-location: %s' % helper_location)
+ elif args[0] == 'request':
+ # Add a fake cert request
+ assert opts.c in cas.keys()
+ assert opts.I not in certs.keys()
+ certs[opts.I] = { 'ca': opts.c, 'template': opts.T,
+ 'keyfile': os.path.abspath(opts.k),
+ 'certfile': os.path.abspath(opts.f),
+ 'keysize': opts.g }
+ # Create dummy key and cert (empty files)
+ with open(opts.k, 'w') as w:
+ pass
+ with open(opts.f, 'w') as w:
+ pass
+ elif args[0] == 'stop-tracking':
+ # Remove the fake cert request
+ assert opts.i in certs.keys()
+ del certs[opts.i]
+ elif args[0] == 'list':
+ # List the fake cert requests
+ print('Number of certificates and requests being tracked: %d.' % \
+ len(certs))
+ for rid, data in certs.items():
+ print('Request ID \'%s\':\n\tstatus: MONITORING\n' % rid +
+ '\tstuck: no\n\tkey pair storage: type=FILE,' +
+ 'location=\'%s\'' % data['keyfile'] + '\n\t' +
+ 'certificate: type=FILE,location=\'%s\'' % data['certfile'] +
+ '\n\tCA: %s\n\t' % data['ca'] +
+ 'certificate template/profile: %s\n\t' % data['template'] +
+ 'track: yes\n\tauto-renew: yes')
+
+ if len(cas.items()) == 0 and len(certs.items()) == 0:
+ if os.path.exists(dump_file):
+ os.unlink(dump_file)
+ else:
+ with open(dump_file, 'wb') as w:
+ pickle.dump((cas, certs), w)
diff --git a/python/samba/tests/bin/gio b/python/samba/tests/bin/gio
new file mode 100755
index 0000000..30e31ac
--- /dev/null
+++ b/python/samba/tests/bin/gio
@@ -0,0 +1,11 @@
+#!/usr/bin/python3
+import optparse
+
+if __name__ == "__main__":
+ parser = optparse.OptionParser('gio <cmd> <url> [options]')
+ parser.add_option('--unmount')
+
+ (opts, args) = parser.parse_args()
+
+ assert args[0] == 'mount', 'Unrecognized command `gio %s`' % args[0]
+ assert len(args) == 2, 'Missing url parameter'
diff --git a/python/samba/tests/blackbox/__init__.py b/python/samba/tests/blackbox/__init__.py
new file mode 100644
index 0000000..361e5cf
--- /dev/null
+++ b/python/samba/tests/blackbox/__init__.py
@@ -0,0 +1,17 @@
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2008
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Blackbox tests. """
diff --git a/python/samba/tests/blackbox/bug13653.py b/python/samba/tests/blackbox/bug13653.py
new file mode 100644
index 0000000..215b9fc
--- /dev/null
+++ b/python/samba/tests/blackbox/bug13653.py
@@ -0,0 +1,216 @@
+# Black box tests verify bug 13653
+#
+# Copyright (C) Catalyst.Net Ltd'. 2018
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""Blackbox test verifying bug 13653
+
+https://bugzilla.samba.org/show_bug.cgi?id=13653
+
+
+When creating a new user and specifying the local filepath of the sam.ldb DB,
+it's possible to create an account that you can't actually login with.
+
+This only happens if the DB is using encrypted secrets and you specify "ldb://"
+in the sam.ldb path, e.g. "-H ldb://st/ad_dc/private/sam.ldb".
+The user account will be created, but its secrets will not be encrypted.
+Attempts to login as the user will then be rejected due to invalid credentials.
+
+We think this may also cause replication/joins to break.
+
+You do get a warning about "No encrypted secrets key file" when this happens,
+although the reason behind this message is not obvious. Specifying a "tdb://"
+prefix, or not specifying a prefix, works fine.
+
+Example of the problem below using the ad_dc testenv.
+
+addc$ bin/samba-tool user create tdb-user pass12#
+ -H tdb://st/ad_dc/private/sam.ldb
+User 'tdb-user' created successfully
+
+# HERE: using the "ldb://" prefix generates a warning, but the user is still
+# created successfully.
+
+addc$ bin/samba-tool user create ldb-user pass12#
+ -H ldb://st/ad_dc/private/sam.ldb
+No encrypted secrets key file. Secret attributes will not be encrypted or
+decrypted
+
+User 'ldb-user' created successfully
+
+addc$ bin/samba-tool user create noprefix-user pass12#
+ -H st/ad_dc/private/sam.ldb
+User 'noprefix-user' created successfully
+
+addc$ bin/ldbsearch -H ldap://$SERVER -Utdb-user%pass12# '(cn=tdb-user)' dn
+# record 1
+dn: CN=tdb-user,CN=Users,DC=addom,DC=samba,DC=example,DC=com
+
+# Referral
+ref: ldap://addom.samba.example.com/CN=Configuration,DC=addom,DC=samba,
+ DC=example,DC=com
+
+# Referral
+ref: ldap://addom.samba.example.com/DC=DomainDnsZones,DC=addom,DC=samba,
+ DC=example,DC=com
+
+# Referral
+ref: ldap://addom.samba.example.com/DC=ForestDnsZones,DC=addom,DC=samba,
+ DC=example,DC=com
+
+# returned 4 records
+# 1 entries
+# 3 referrals
+
+# HERE: can't login as the user created with "ldb://" prefix
+
+addc$ bin/ldbsearch -H ldap://$SERVER -Uldb-user%pass12# '(cn=ldb-user)' dn
+Wrong username or password: kinit for ldb-user@ADDOM.SAMBA.EXAMPLE.COM failed
+(Client not found in Kerberos database)
+
+Failed to bind - LDAP error 49 LDAP_INVALID_CREDENTIALS
+ - <8009030C: LdapErr: DSID-0C0904DC,
+ comment: AcceptSecurityContext error, data 54e, v1db1> <>
+Failed to connect to 'ldap://addc' with backend
+ 'ldap': LDAP error 49 LDAP_INVALID_CREDENTIALS
+ - <8009030C: LdapErr: DSID-0C0904DC,
+ comment: AcceptSecurityContext error, data 54e, v1db1> <>
+Failed to connect to ldap://addc - LDAP error 49 LDAP_INVALID_CREDENTIALS
+ - <8009030C: LdapErr: DSID-0C0904DC,
+ comment: AcceptSecurityContext error, data 54e, v1db1> <>
+addc$ bin/ldbsearch -H ldap://$SERVER -Unoprefix-user%pass12#
+ '(cn=noprefix-user)' dn
+# record 1
+dn: CN=noprefix-user,CN=Users,DC=addom,DC=samba,DC=example,DC=com
+
+# Referral
+ref: ldap://addom.samba.example.com/CN=Configuration,DC=addom,DC=samba,
+ DC=example,DC=com
+
+# Referral
+ref: ldap://addom.samba.example.com/DC=DomainDnsZones,DC=addom,DC=samba,
+ DC=example,DC=com
+
+# Referral
+ref: ldap://addom.samba.example.com/DC=ForestDnsZones,DC=addom,DC=samba,
+ DC=example,DC=com
+
+# returned 4 records
+# 1 entries
+# 3 referrals
+"""
+
+from samba.tests import (
+ BlackboxTestCase,
+ BlackboxProcessError,
+ delete_force,
+ env_loadparm)
+from samba.credentials import Credentials
+from samba.samdb import SamDB
+from samba.auth import system_session
+import os
+
+
+class Bug13653Tests(BlackboxTestCase):
+
+ # Open a local connection to the SamDB
+ # and load configuration from the OS environment.
+ def setUp(self):
+ super().setUp()
+ self.env = os.environ["TEST_ENV"]
+ self.server = os.environ["SERVER"]
+ self.prefix = os.environ["PREFIX_ABS"]
+ lp = env_loadparm()
+ creds = Credentials()
+ session = system_session()
+ creds.guess(lp)
+ self.ldb = SamDB(session_info=session,
+ credentials=creds,
+ lp=lp)
+
+ # Delete the user account created by the test case.
+ # The user name is in self.user
+ def tearDown(self):
+ super().tearDown()
+ try:
+ dn = "CN=%s,CN=Users,%s" % (self.user, self.ldb.domain_dn())
+ delete_force(self.ldb, dn)
+ except Exception as e:
+ # We ignore any exceptions deleting the user in tearDown
+ # this allows the known fail mechanism to work for this test
+ # so the test can be committed before the fix.
+ # otherwise this delete fails with
+ # Error(11) unpacking encrypted secret, data possibly corrupted
+ # or altered
+ pass
+
+ # Delete the user account created by the test case.
+ # The user name is in self.user
+ def delete_user(self):
+ dn = "CN=%s,CN=Users,%s" % (self.user, self.ldb.domain_dn())
+ try:
+ delete_force(self.ldb, dn)
+ except Exception as e:
+ self.fail(str(e))
+
+ def _test_scheme(self, scheme):
+ """Ensure a user can be created by samba-tool with the supplied scheme
+ and that that user can logon."""
+
+ self.delete_user()
+
+ password = self.random_password()
+ db_path = "%s/%s/%s/private/sam.ldb" % (scheme, self.prefix, self.env)
+ try:
+ command =\
+ "samba-tool user create %s %s -H %s" % (
+ self.user, password, db_path)
+ self.check_run(command)
+
+ ldbsearch = "ldbsearch"
+ if os.path.exists("bin/ldbsearch"):
+ ldbsearch = "bin/ldbsearch"
+ command =\
+ "%s -H ldap://%s/ -U%s%%%s '(cn=%s)' dn" % (
+ ldbsearch, self.server, self.user, password, self.user)
+ self.check_run(command)
+ except BlackboxProcessError as e:
+ self.fail(str(e))
+
+ def test_tdb_scheme(self):
+ """Ensure a user can be created by samba-tool with the "tbd://" scheme
+ and that that user can logon."""
+
+ self.user = "TDB_USER"
+ self._test_scheme("tdb://")
+
+ def test_mdb_scheme(self):
+ """Ensure a user can be created by samba-tool with the "mdb://" scheme
+ and that that user can logon.
+
+ NOTE: this test is currently in knownfail.d/encrypted_secrets as
+ sam.ldb is currently a tdb even if the lmdb backend is
+ selected
+ """
+
+ self.user = "MDB_USER"
+ self._test_scheme("mdb://")
+
+ def test_ldb_scheme(self):
+ """Ensure a user can be created by samba-tool with the "ldb://" scheme
+ and that that user can logon."""
+
+ self.user = "LDB_USER"
+ self._test_scheme("ldb://")
diff --git a/python/samba/tests/blackbox/check_output.py b/python/samba/tests/blackbox/check_output.py
new file mode 100644
index 0000000..7f1e4d1
--- /dev/null
+++ b/python/samba/tests/blackbox/check_output.py
@@ -0,0 +1,108 @@
+# Copyright (C) Catalyst IT Ltd. 2017
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""
+Blackbox tests for blackboxtest check output methods.
+"""
+
+import signal
+from samba.tests import BlackboxTestCase
+
+
+class TimeoutHelper():
+ """
+ Timeout class using alarm signal.
+
+ Raise a Timeout exception if a function timeout.
+ Usage:
+
+ try:
+ with Timeout(3):
+ foobar("Request 1")
+ except TimeoutHelper.Timeout:
+ print("Timeout")
+ """
+
+ class Timeout(Exception):
+ pass
+
+ def __init__(self, sec):
+ self.sec = sec
+
+ def __enter__(self):
+ signal.signal(signal.SIGALRM, self.raise_timeout)
+ signal.alarm(self.sec)
+
+ def __exit__(self, *args):
+ signal.alarm(0) # disable alarm
+
+ def raise_timeout(self, *args):
+ raise TimeoutHelper.Timeout()
+
+
+def _make_cmdline(data='$', repeat=(5 * 1024 * 1024), retcode=0):
+ """Build a command to call gen_output.py to generate large output"""
+ return 'gen_output.py --data {0} --repeat {1} --retcode {2}'.format(data,
+ repeat,
+ retcode)
+
+
+class CheckOutputTests(BlackboxTestCase):
+ """
+ Blackbox tests for check_xxx methods.
+
+ The check_xxx methods in BlackboxTestCase will deadlock
+ on large output from command which caused by Popen.wait().
+
+ This is a test case to show the deadlock issue,
+ will fix in another commit.
+ """
+
+ def test_check_run_timeout(self):
+ """Call check_run with large output."""
+ try:
+ with TimeoutHelper(10):
+ self.check_run(_make_cmdline())
+ except TimeoutHelper.Timeout:
+ self.fail(msg='Timeout!')
+
+ def test_check_exit_code_with_large_output_success(self):
+ try:
+ with TimeoutHelper(10):
+ self.check_exit_code(_make_cmdline(retcode=0), 0)
+ except TimeoutHelper.Timeout:
+ self.fail(msg='Timeout!')
+
+ def test_check_exit_code_with_large_output_failure(self):
+ try:
+ with TimeoutHelper(10):
+ self.check_exit_code(_make_cmdline(retcode=1), 1)
+ except TimeoutHelper.Timeout:
+ self.fail(msg='Timeout!')
+
+ def test_check_output_with_large_output(self):
+ data = '@'
+ repeat = 5 * 1024 * 1024 # 5M
+ expected = data * repeat
+ cmdline = _make_cmdline(data=data, repeat=repeat)
+
+ try:
+ with TimeoutHelper(10):
+ actual = self.check_output(cmdline)
+ # check_output will return bytes
+ # convert expected to bytes for python 3
+ self.assertEqual(actual, expected.encode('utf-8'))
+ except TimeoutHelper.Timeout:
+ self.fail(msg='Timeout!')
diff --git a/python/samba/tests/blackbox/claims.py b/python/samba/tests/blackbox/claims.py
new file mode 100755
index 0000000..cad8095
--- /dev/null
+++ b/python/samba/tests/blackbox/claims.py
@@ -0,0 +1,526 @@
+#!/usr/bin/env python3
+# Unix SMB/CIFS implementation.
+#
+# Blackbox tests for claims support
+#
+# Copyright (C) Catalyst.Net Ltd. 2023
+#
+# Written by Rob van der Linde <rob@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import os
+
+from samba import NTSTATUSError
+from samba.auth import AuthContext
+from samba.credentials import Credentials
+from samba.gensec import FEATURE_SEAL, Security
+from samba.ntstatus import NT_STATUS_LOGON_FAILURE, NT_STATUS_UNSUCCESSFUL
+from samba.tests import BlackboxTestCase
+
+SERVER = os.environ["SERVER"]
+SERVER_USERNAME = os.environ["USERNAME"]
+SERVER_PASSWORD = os.environ["PASSWORD"]
+
+HOST = f"ldap://{SERVER}"
+CREDS = f"-U{SERVER_USERNAME}%{SERVER_PASSWORD}"
+
+
+class ClaimsSupportTests(BlackboxTestCase):
+ """Blackbox tests for Claims support
+
+ NOTE: all these commands are subcommands of samba-tool.
+
+ NOTE: the addCleanup functions get called automatically in reverse
+ order after the tests finishes, they don't execute straight away.
+ """
+
+ def test_device_group_restrictions(self):
+ client_password = "T3stPassword0nly"
+ target_password = "T3stC0mputerPassword"
+ device_password = "T3stD3vicePassword"
+
+ # Create target computer.
+ self.check_run("computer create claims-server")
+ self.addCleanup(self.run_command, "computer delete claims-server")
+ self.check_run(rf"user setpassword claims-server\$ --newpassword={target_password}")
+
+ # Create device computer.
+ self.check_run("computer create claims-device")
+ self.addCleanup(self.run_command, "computer delete claims-device")
+ self.check_run(rf"user setpassword claims-device\$ --newpassword={device_password}")
+
+ # Create a user.
+ self.check_run(f"user create claimstestuser {client_password}")
+ self.addCleanup(self.run_command, "user delete claimstestuser")
+
+ # Create an authentication policy.
+ self.check_run("domain auth policy create --enforce --name=device-restricted-users-pol")
+ self.addCleanup(self.run_command,
+ "domain auth policy delete --name=device-restricted-users-pol")
+
+ self.check_run("group add allowed-devices")
+ self.addCleanup(self.run_command, "group delete allowed-devices")
+
+ # Set allowed to authenticate from.
+ self.check_run("domain auth policy modify --name=device-restricted-users-pol "
+ "--user-allowed-to-authenticate-from-device-group=allowed-devices")
+
+ self.check_run("user auth policy assign claimstestuser --policy=device-restricted-users-pol")
+
+ with self.assertRaises(NTSTATUSError) as error:
+ self.verify_access(
+ client_username="claimstestuser",
+ client_password=client_password,
+ target_hostname="claims-server",
+ target_username="claims-server",
+ target_password=target_password,
+ device_username="claims-device",
+ device_password=device_password,
+ )
+
+ self.assertEqual(error.exception.args[0], NT_STATUS_LOGON_FAILURE)
+ self.assertEqual(
+ error.exception.args[1],
+ "The attempted logon is invalid. This is either due to a "
+ "bad username or authentication information.")
+
+ self.check_run("group addmembers allowed-devices claims-device")
+
+ self.verify_access(
+ client_username="claimstestuser",
+ client_password=client_password,
+ target_hostname="claims-server",
+ target_username="claims-server",
+ target_password=target_password,
+ device_username="claims-device",
+ device_password=device_password,
+ )
+
+ def test_device_silo_restrictions(self):
+ client_password = "T3stPassword0nly"
+ target_password = "T3stC0mputerPassword"
+ device_password = "T3stD3vicePassword"
+
+ # Create target computer.
+ self.check_run("computer create claims-server")
+ self.addCleanup(self.run_command, "computer delete claims-server")
+ self.check_run(rf"user setpassword claims-server\$ --newpassword={target_password}")
+
+ # Create device computer.
+ self.check_run("computer create claims-device")
+ self.addCleanup(self.run_command, "computer delete claims-device")
+ self.check_run(rf"user setpassword claims-device\$ --newpassword={device_password}")
+
+ # Create a user.
+ self.check_run(f"user create claimstestuser {client_password}")
+ self.addCleanup(self.run_command, "user delete claimstestuser")
+
+ # Create an authentication policy.
+ self.check_run("domain auth policy create --enforce --name=allowed-devices-only-pol")
+ self.addCleanup(self.run_command,
+ "domain auth policy delete --name=allowed-devices-only-pol")
+
+ # Create an authentication silo.
+ self.check_run("domain auth silo create --enforce --name=allowed-devices-only-silo "
+ "--user-authentication-policy=allowed-devices-only-pol "
+ "--computer-authentication-policy=allowed-devices-only-pol "
+ "--service-authentication-policy=allowed-devices-only-pol")
+ self.addCleanup(self.run_command,
+ "domain auth silo delete --name=allowed-devices-only-silo")
+
+ # Set allowed to authenticate from (where the login can happen) and to
+ # (server requires silo that in term has this rule, so knows the user
+ # was required to authenticate from).
+ self.check_run("domain auth policy modify --name=allowed-devices-only-pol "
+ "--user-allowed-to-authenticate-from-device-silo=allowed-devices-only-silo")
+
+ # Grant access to silo.
+ self.check_run(r"domain auth silo member grant --name=allowed-devices-only-silo --member=claims-device\$")
+ self.check_run("domain auth silo member grant --name=allowed-devices-only-silo --member=claimstestuser")
+
+ # However with nothing assigned, allow-by-default still applies
+ self.verify_access(
+ client_username="claimstestuser",
+ client_password=client_password,
+ target_hostname="claims-server",
+ target_username="claims-server",
+ target_password=target_password,
+ )
+
+ # Show that adding a FAST armor from the device doesn't change
+ # things either way
+ self.verify_access(
+ client_username="claimstestuser",
+ client_password=client_password,
+ target_hostname="claims-server",
+ target_username="claims-server",
+ target_password=target_password,
+ device_username="claims-device",
+ device_password=device_password,
+ )
+
+ # Assign silo to the user.
+ self.check_run("user auth silo assign claimstestuser --silo=allowed-devices-only-silo")
+
+ # We fail, as the KDC now requires the silo but the client is not using an approved device
+ with self.assertRaises(NTSTATUSError) as error:
+ self.verify_access(
+ client_username="claimstestuser",
+ client_password=client_password,
+ target_hostname="claims-server",
+ target_username="claims-server",
+ target_password=target_password,
+ device_username="claims-device",
+ device_password=device_password,
+ )
+
+ self.assertEqual(error.exception.args[0], NT_STATUS_UNSUCCESSFUL)
+ self.assertIn(
+ "The requested operation was unsuccessful.",
+ error.exception.args[1])
+
+ # Assign silo to the device.
+ self.check_run(r"user auth silo assign claims-device\$ --silo=allowed-devices-only-silo")
+
+ self.verify_access(
+ client_username="claimstestuser",
+ client_password=client_password,
+ target_hostname="claims-server",
+ target_username="claims-server",
+ target_password=target_password,
+ device_username="claims-device",
+ device_password=device_password,
+ )
+
+ def test_device_and_server_silo_restrictions(self):
+ client_password = "T3stPassword0nly"
+ target_password = "T3stC0mputerPassword"
+ device_password = "T3stD3vicePassword"
+
+ # Create target computer.
+ self.check_run("computer create claims-server")
+ self.addCleanup(self.run_command, "computer delete claims-server")
+ self.check_run(rf"user setpassword claims-server\$ --newpassword={target_password}")
+
+ # Create device computer.
+ self.check_run("computer create claims-device")
+ self.addCleanup(self.run_command, "computer delete claims-device")
+ self.check_run(rf"user setpassword claims-device\$ --newpassword={device_password}")
+
+ # Create a user.
+ self.check_run(f"user create claimstestuser {client_password}")
+ self.addCleanup(self.run_command, "user delete claimstestuser")
+
+ # Create an authentication policy.
+ self.check_run("domain auth policy create --enforce --name=allowed-devices-only-pol")
+ self.addCleanup(self.run_command,
+ "domain auth policy delete --name=allowed-devices-only-pol")
+
+ # Create an authentication silo.
+ self.check_run("domain auth silo create --enforce --name=allowed-devices-only-silo "
+ "--user-authentication-policy=allowed-devices-only-pol "
+ "--computer-authentication-policy=allowed-devices-only-pol "
+ "--service-authentication-policy=allowed-devices-only-pol")
+ self.addCleanup(self.run_command,
+ "domain auth silo delete --name=allowed-devices-only-silo")
+
+ # Set allowed to authenticate from (where the login can happen) and to
+ # (server requires silo that in term has this rule, so knows the user
+ # was required to authenticate from).
+ # If we assigned services to the silo we would need to add
+ # --service-allowed-to-authenticate-to/from options as well.
+ # Likewise, if there are services running in user accounts, we need
+ # --user-allowed-to-authenticate-to
+ self.check_run("domain auth policy modify --name=allowed-devices-only-pol "
+ "--user-allowed-to-authenticate-from-device-silo=allowed-devices-only-silo "
+ "--computer-allowed-to-authenticate-to-by-silo=allowed-devices-only-silo")
+
+ # Grant access to silo.
+ self.check_run(r"domain auth silo member grant --name=allowed-devices-only-silo --member=claims-device\$")
+ self.check_run(r"domain auth silo member grant --name=allowed-devices-only-silo --member=claims-server\$")
+ self.check_run("domain auth silo member grant --name=allowed-devices-only-silo --member=claimstestuser")
+
+ # However with nothing assigned, allow-by-default still applies
+ self.verify_access(
+ client_username="claimstestuser",
+ client_password=client_password,
+ target_hostname="claims-server",
+ target_username="claims-server",
+ target_password=target_password,
+ )
+
+ # Show that adding a FAST armor from the device doesn't change
+ # things either way
+ self.verify_access(
+ client_username="claimstestuser",
+ client_password=client_password,
+ target_hostname="claims-server",
+ target_username="claims-server",
+ target_password=target_password,
+ device_username="claims-device",
+ device_password=device_password,
+ )
+
+ self.check_run(r"user auth silo assign claims-server\$ --silo=allowed-devices-only-silo")
+
+ # We fail, as the server now requires the silo but the client is not in it
+ with self.assertRaises(NTSTATUSError) as error:
+ self.verify_access(
+ client_username="claimstestuser",
+ client_password=client_password,
+ target_hostname="claims-server",
+ target_username="claims-server",
+ target_password=target_password,
+ device_username="claims-device",
+ device_password=device_password,
+ )
+
+ self.assertEqual(error.exception.args[0], NT_STATUS_LOGON_FAILURE)
+ self.assertEqual(
+ error.exception.args[1],
+ "The attempted logon is invalid. This is either due to a "
+ "bad username or authentication information.")
+
+ # Assign silo to the user.
+ self.check_run("user auth silo assign claimstestuser --silo=allowed-devices-only-silo")
+
+ # We fail, as the KDC now requires the silo but the client not is using an approved device
+ with self.assertRaises(NTSTATUSError) as error:
+ self.verify_access(
+ client_username="claimstestuser",
+ client_password=client_password,
+ target_hostname="claims-server",
+ target_username="claims-server",
+ target_password=target_password,
+ device_username="claims-device",
+ device_password=device_password,
+ )
+
+ self.assertEqual(error.exception.args[0], NT_STATUS_UNSUCCESSFUL)
+ self.assertIn(
+ "The requested operation was unsuccessful.",
+ error.exception.args[1])
+
+ # Assign silo to the device.
+ self.check_run(r"user auth silo assign claims-device\$ --silo=allowed-devices-only-silo")
+
+ self.verify_access(
+ client_username="claimstestuser",
+ client_password=client_password,
+ target_hostname="claims-server",
+ target_username="claims-server",
+ target_password=target_password,
+ device_username="claims-device",
+ device_password=device_password,
+ )
+
+ def test_user_group_access(self):
+ """An example use with groups."""
+ client_password = "T3stPassword0nly"
+ target_password = "T3stC0mputerPassword"
+
+ # Create a computer.
+ self.check_run("computer create claims-server")
+ self.addCleanup(self.run_command, "computer delete claims-server")
+ self.check_run(rf"user setpassword claims-server\$ --newpassword={target_password}")
+
+ # Create a user.
+ self.check_run(f"user create claimstestuser {client_password}")
+ self.addCleanup(self.run_command, "user delete claimstestuser")
+
+ # Create an authentication policy.
+ self.check_run("domain auth policy create --enforce --name=restricted-servers-pol")
+ self.addCleanup(self.run_command,
+ "domain auth policy delete --name=restricted-servers-pol")
+
+ self.check_run("group add server-access-group")
+ self.addCleanup(self.run_command, "group delete server-access-group")
+
+ # Set allowed to authenticate to.
+ self.check_run("domain auth policy modify --name=restricted-servers-pol "
+ "--computer-allowed-to-authenticate-to-by-group=server-access-group")
+
+ self.check_run(r"user auth policy assign claims-server\$ --policy=restricted-servers-pol")
+
+ with self.assertRaises(NTSTATUSError) as error:
+ self.verify_access(
+ client_username="claimstestuser",
+ client_password=client_password,
+ target_hostname="claims-server",
+ target_username="claims-server",
+ target_password=target_password,
+ )
+
+ self.assertEqual(error.exception.args[0], NT_STATUS_LOGON_FAILURE)
+ self.assertEqual(
+ error.exception.args[1],
+ "The attempted logon is invalid. This is either due to a "
+ "bad username or authentication information.")
+
+ # Add group members.
+ self.check_run("group addmembers server-access-group claimstestuser")
+
+ self.verify_access(
+ client_username="claimstestuser",
+ client_password=client_password,
+ target_hostname="claims-server",
+ target_username="claims-server",
+ target_password=target_password,
+ )
+
+ def test_user_silo_access(self):
+ """An example use with authentication silos."""
+ client_password = "T3stPassword0nly"
+ target_password = "T3stC0mputerPassword"
+
+ # Create a computer.
+ self.check_run("computer create claims-server")
+ self.addCleanup(self.run_command, "computer delete claims-server")
+ self.check_run(rf"user setpassword claims-server\$ --newpassword={target_password}")
+
+ # Create a user.
+ self.check_run(f"user create claimstestuser {client_password}")
+ self.addCleanup(self.run_command, "user delete claimstestuser")
+
+ # Create an authentication policy.
+ self.check_run("domain auth policy create --enforce --name=restricted-servers-pol")
+ self.addCleanup(self.run_command,
+ "domain auth policy delete --name=restricted-servers-pol")
+
+ # Create an authentication silo.
+ self.check_run("domain auth silo create --enforce --name=restricted-servers-silo "
+ "--user-authentication-policy=restricted-servers-pol "
+ "--computer-authentication-policy=restricted-servers-pol "
+ "--service-authentication-policy=restricted-servers-pol")
+ self.addCleanup(self.run_command,
+ "domain auth silo delete --name=restricted-servers-silo")
+
+ # Set allowed to authenticate to.
+ self.check_run("domain auth policy modify --name=restricted-servers-pol "
+ "--computer-allowed-to-authenticate-to-by-silo=restricted-servers-silo")
+
+ # Grant access to silo.
+ self.check_run(r"domain auth silo member grant --name=restricted-servers-silo --member=claims-server\$")
+ self.check_run("domain auth silo member grant --name=restricted-servers-silo --member=claimstestuser")
+
+ self.verify_access(
+ client_username="claimstestuser",
+ client_password=client_password,
+ target_hostname="claims-server",
+ target_username="claims-server",
+ target_password=target_password,
+ )
+
+ self.check_run(r"user auth silo assign claims-server\$ --silo=restricted-servers-silo")
+
+ with self.assertRaises(NTSTATUSError) as error:
+ self.verify_access(
+ client_username="claimstestuser",
+ client_password=client_password,
+ target_hostname="claims-server",
+ target_username="claims-server",
+ target_password=target_password,
+ )
+
+ self.assertEqual(error.exception.args[0], NT_STATUS_LOGON_FAILURE)
+ self.assertEqual(
+ error.exception.args[1],
+ "The attempted logon is invalid. This is either due to a "
+ "bad username or authentication information.")
+
+ # Set assigned silo on user and computer.
+ self.check_run("user auth silo assign claimstestuser --silo=restricted-servers-silo")
+
+ self.verify_access(
+ client_username="claimstestuser",
+ client_password=client_password,
+ target_hostname="claims-server",
+ target_username="claims-server",
+ target_password=target_password,
+ )
+
+ @classmethod
+ def _make_cmdline(cls, line):
+ """Override to pass line as samba-tool subcommand instead.
+
+ Automatically fills in HOST and CREDS as well.
+ """
+ if isinstance(line, list):
+ cmd = ["samba-tool"] + line + ["-H", HOST, CREDS]
+ else:
+ cmd = f"samba-tool {line} -H {HOST} {CREDS}"
+
+ return super()._make_cmdline(cmd)
+
+ def verify_access(self, client_username, client_password,
+ target_hostname, target_username, target_password, *,
+ device_username=None, device_password=None):
+
+ lp = self.get_loadparm()
+
+ client_creds = Credentials()
+ client_creds.set_username(client_username)
+ client_creds.set_password(client_password)
+ client_creds.guess(lp)
+
+ if device_username:
+ device_creds = Credentials()
+ device_creds.set_username(device_username)
+ device_creds.set_password(device_password)
+ device_creds.guess(lp)
+ client_creds.set_krb5_fast_armor_credentials(device_creds, True)
+
+ target_creds = Credentials()
+ target_creds.set_username(target_username)
+ target_creds.set_password(target_password)
+ target_creds.guess(lp)
+
+ settings = {
+ "lp_ctx": lp,
+ "target_hostname": target_hostname
+ }
+
+ gensec_client = Security.start_client(settings)
+ gensec_client.set_credentials(client_creds)
+ gensec_client.want_feature(FEATURE_SEAL)
+ gensec_client.start_mech_by_sasl_name("GSSAPI")
+
+ gensec_target = Security.start_server(settings=settings,
+ auth_context=AuthContext(lp_ctx=lp))
+ gensec_target.set_credentials(target_creds)
+ gensec_target.start_mech_by_sasl_name("GSSAPI")
+
+ client_finished = False
+ server_finished = False
+ client_to_server = b""
+ server_to_client = b""
+
+ # Operate as both the client and the server to verify the user's
+ # credentials.
+ while not client_finished or not server_finished:
+ if not client_finished:
+ print("running client gensec_update")
+ client_finished, client_to_server = gensec_client.update(
+ server_to_client)
+ if not server_finished:
+ print("running server gensec_update")
+ server_finished, server_to_client = gensec_target.update(
+ client_to_server)
+
+
+if __name__ == "__main__":
+ import unittest
+ unittest.main()
diff --git a/python/samba/tests/blackbox/downgradedatabase.py b/python/samba/tests/blackbox/downgradedatabase.py
new file mode 100644
index 0000000..d850d1b
--- /dev/null
+++ b/python/samba/tests/blackbox/downgradedatabase.py
@@ -0,0 +1,167 @@
+# Blackbox tests for sambadowngradedatabase
+#
+# Copyright (C) Catalyst IT Ltd. 2019
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+from samba.tests import BlackboxTestCase
+import os
+import ldb
+from subprocess import check_output
+from samba.samdb import SamDB
+
+COMMAND = os.path.join(os.environ.get("SRCDIR_ABS"),
+ "source4/scripting/bin/samba_downgrade_db")
+
+
+class DowngradeTestBase(BlackboxTestCase):
+ """Test that sambadowngradedatabase downgrades the samba database"""
+
+ def setUp(self):
+ super().setUp()
+ if not hasattr(self, "backend"):
+ self.fail("Subclass this class and set 'backend'")
+
+ prov_cmd = "samba-tool domain provision " +\
+ "--domain FOO --realm foo.example.com " +\
+ "--targetdir {self.tempdir} " +\
+ "--backend-store {self.backend} " +\
+ "--host-name downgradetest " +\
+ "--option=\"vfs objects=dfs_samba4 acl_xattr fake_acls xattr_tdb\""
+ prov_cmd = prov_cmd.format(self=self)
+ self.check_run(prov_cmd, "Provisioning for downgrade")
+
+ private_dir = os.path.join(self.tempdir, "private")
+ self.sam_path = os.path.join(private_dir, "sam.ldb")
+ self.ldb = ldb.Ldb(self.sam_path, options=["modules:"])
+
+ partitions = self.ldb.search(base="@PARTITION",
+ scope=ldb.SCOPE_BASE,
+ attrs=["partition"])
+ partitions = partitions[0]['partition']
+ partitions = [str(p).split(":")[1] for p in partitions]
+ self.dbs = [os.path.join(private_dir, p)
+ for p in partitions]
+ self.dbs.append(self.sam_path)
+
+ def tearDown(self):
+ self.rm_dirs("private",
+ "etc",
+ "state",
+ "bind-dns",
+ "msg.lock")
+ self.rm_files("names.tdb", "gencache.tdb")
+ super().tearDown()
+
+ # Parse out the comments above each record that ldbdump produces
+ # containing pack format version and KV level key for each record.
+ # Return all GUID keys and DN keys (without @attrs), and the set
+ # of all unique pack formats.
+ def ldbdump_keys_pack_formats(self):
+ # Get all comments from all partition dbs
+ comments = []
+ ldbdump = "ldbdump"
+ if os.path.exists("bin/ldbdump"):
+ ldbdump = "bin/ldbdump"
+
+ for db in self.dbs:
+ dump = check_output([ldbdump, "-i", db])
+ dump = dump.decode("utf-8")
+ dump = dump.split("\n")
+ comments += [s for s in dump if s.startswith("#")]
+
+ guid_key_tag = "# key: GUID="
+ guid_keys = {c[len(guid_key_tag):] for c in comments
+ if c.startswith(guid_key_tag)}
+
+ dn_key_tag = "# key: DN="
+ dn_keys = {c[len(dn_key_tag):] for c in comments
+ if c.startswith(dn_key_tag)}
+
+ # Ignore @ attributes, they are always DN keyed
+ dn_keys_no_at_attrs = {d for d in dn_keys if not d.startswith("@")}
+
+ pack_format_tag = "# pack format: "
+ pack_formats = {c[len(pack_format_tag):] for c in comments
+ if c.startswith(pack_format_tag)}
+ pack_formats = [int(s, 16) for s in pack_formats]
+
+ return dn_keys_no_at_attrs, guid_keys, pack_formats
+
+ # Get a set of all distinct types in @ATTRIBUTES
+ def attribute_types(self):
+ at_attributes = self.ldb.search(base="@ATTRIBUTES",
+ scope=ldb.SCOPE_BASE,
+ attrs=["*"])
+ self.assertEqual(len(at_attributes), 1)
+ keys = at_attributes[0].keys()
+ attribute_types = {str(at_attributes[0].get(k)) for k in keys}
+
+ return attribute_types
+
+class DowngradeTestTDB(DowngradeTestBase):
+ backend = 'tdb'
+
+ # Check that running sambadowngradedatabase with a TDB backend:
+ # * Replaces all GUID keys with DN keys
+ # * Removes ORDERED_INTEGER from @ATTRIBUTES
+ # * Repacks database with pack format version 1
+ def test_downgrade_database(self):
+ type_prefix = "LDB_SYNTAX_"
+ ordered_int_type = ldb.SYNTAX_ORDERED_INTEGER[len(type_prefix):]
+
+ dn_keys, guid_keys, pack_formats = self.ldbdump_keys_pack_formats()
+ self.assertGreater(len(guid_keys), 20)
+ self.assertEqual(len(dn_keys), 0)
+ self.assertTrue(ordered_int_type in self.attribute_types())
+ self.assertEqual(pack_formats, [ldb.PACKING_FORMAT_V2])
+
+ num_guid_keys_before_downgrade = len(guid_keys)
+
+ self.check_run("%s -H %s" % (COMMAND, self.sam_path),
+ msg="Running sambadowngradedatabase")
+
+ dn_keys, guid_keys, pack_formats = self.ldbdump_keys_pack_formats()
+ self.assertEqual(len(guid_keys), 0)
+ self.assertEqual(len(dn_keys), num_guid_keys_before_downgrade)
+ self.assertTrue(ordered_int_type not in self.attribute_types())
+ self.assertEqual(pack_formats, [ldb.PACKING_FORMAT])
+
+class DowngradeTestMDB(DowngradeTestBase):
+ backend = 'mdb'
+
+ # Check that running sambadowngradedatabase with a TDB backend:
+ # * Does NOT replace GUID keys with DN keys
+ # * Removes ORDERED_INTEGER from @ATTRIBUTES
+ # * Repacks database with pack format version 1
+ def test_undo_guid(self):
+ type_prefix = "LDB_SYNTAX_"
+ ordered_int_type = ldb.SYNTAX_ORDERED_INTEGER[len(type_prefix):]
+
+ dn_keys, guid_keys, pack_formats = self.ldbdump_keys_pack_formats()
+ self.assertGreater(len(guid_keys), 20)
+ self.assertEqual(len(dn_keys), 0)
+ self.assertTrue(ordered_int_type in self.attribute_types())
+ self.assertEqual(pack_formats, [ldb.PACKING_FORMAT_V2])
+
+ num_guid_keys_before_downgrade = len(guid_keys)
+
+ self.check_run("%s -H %s" % (COMMAND, self.sam_path),
+ msg="Running sambadowngradedatabase")
+
+ dn_keys, guid_keys, pack_formats = self.ldbdump_keys_pack_formats()
+ self.assertEqual(len(guid_keys), num_guid_keys_before_downgrade)
+ self.assertEqual(len(dn_keys), 0)
+ self.assertTrue(ordered_int_type not in self.attribute_types())
+ self.assertEqual(pack_formats, [ldb.PACKING_FORMAT])
diff --git a/python/samba/tests/blackbox/mdsearch.py b/python/samba/tests/blackbox/mdsearch.py
new file mode 100644
index 0000000..8d67090
--- /dev/null
+++ b/python/samba/tests/blackbox/mdsearch.py
@@ -0,0 +1,126 @@
+#
+# Blackbox tests for mdsearch
+#
+# Copyright (C) Ralph Boehme 2019
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Blackbox test for mdsearch"""
+
+import os
+import time
+import threading
+import logging
+import json
+from http.server import HTTPServer, BaseHTTPRequestHandler
+from samba.dcerpc import mdssvc
+from samba.tests import BlackboxTestCase
+from samba.samba3 import mdscli
+from samba.logger import get_samba_logger
+
+logger = get_samba_logger(name=__name__)
+
+testfiles = [
+ "foo",
+ "bar",
+]
+
+class MdssvcHTTPRequestHandler(BaseHTTPRequestHandler):
+ def do_POST(self):
+ content_length = int(self.headers['content-length'])
+ body = self.rfile.read(content_length)
+
+ actual_json = json.loads((body))
+ expected_json = json.loads(self.server.json_in)
+
+ if actual_json != expected_json:
+ logger.error("Bad request, expected:\n%s\nGot:\n%s\n" % (expected_json, actual_json))
+ self.send_error(400,
+ "Bad request",
+ "Expected: %s\n"
+ "Got: %s\n" %
+ (expected_json, actual_json))
+ return
+
+ resp = bytes(self.server.json_out, encoding="utf-8")
+
+ self.send_response(200)
+ self.send_header('content-type', 'application/json; charset=UTF-8')
+ self.send_header('content-length', len(resp))
+ self.end_headers()
+ self.wfile.write(resp)
+
+class MdfindBlackboxTests(BlackboxTestCase):
+
+ def setUp(self):
+ super().setUp()
+
+ self.server = HTTPServer(('10.53.57.35', 8080),
+ MdssvcHTTPRequestHandler,
+ bind_and_activate=False)
+
+ self.t = threading.Thread(target=MdfindBlackboxTests.http_server, args=(self,))
+ self.t.setDaemon(True)
+ self.t.start()
+ time.sleep(1)
+
+ self.sharepath = os.environ["LOCAL_PATH"]
+
+ for file in testfiles:
+ f = open("%s/%s" % (self.sharepath, file), "w")
+ f.close()
+
+ def tearDown(self):
+ super().tearDown()
+ for file in testfiles:
+ os.remove("%s/%s" % (self.sharepath, file))
+
+ def http_server(self):
+ self.server.server_bind()
+ self.server.server_activate()
+ self.server.serve_forever()
+
+ def test_mdsearch(self):
+ """Simple blackbox test for mdsearch"""
+
+ username = os.environ["USERNAME"]
+ password = os.environ["PASSWORD"]
+ config = os.environ["SMB_CONF_PATH"]
+
+ json_in = r'''{
+ "from": 0, "size": 50, "_source": ["path.real"],
+ "query": {
+ "query_string": {
+ "query": "(samba*) AND path.real.fulltext:\"%BASEPATH%\""
+ }
+ }
+ }'''
+ json_out = '''{
+ "hits" : {
+ "total" : { "value" : 2},
+ "hits" : [
+ {"_source" : {"path" : {"real" : "%BASEPATH%/foo"}}},
+ {"_source" : {"path" : {"real" : "%BASEPATH%/bar"}}}
+ ]
+ }
+ }'''
+
+ self.server.json_in = json_in.replace("%BASEPATH%", self.sharepath)
+ self.server.json_out = json_out.replace("%BASEPATH%", self.sharepath)
+
+ output = self.check_output("mdsearch --configfile=%s -U %s%%%s fileserver spotlight '*==\"samba*\"'" % (config, username, password))
+
+ actual = output.decode('utf-8').splitlines()
+ self.assertEqual(testfiles, actual)
diff --git a/python/samba/tests/blackbox/ndrdump.py b/python/samba/tests/blackbox/ndrdump.py
new file mode 100644
index 0000000..99aa7c1
--- /dev/null
+++ b/python/samba/tests/blackbox/ndrdump.py
@@ -0,0 +1,563 @@
+# Blackbox tests for ndrdump
+# Copyright (C) 2008 Andrew Tridgell <tridge@samba.org>
+# Copyright (C) 2008 Andrew Bartlett <abartlet@samba.org>
+# Copyright (C) 2010 Jelmer Vernooij <jelmer@samba.org>
+# based on test_smbclient.sh
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Blackbox tests for ndrdump."""
+
+import os
+import re
+from samba.tests import BlackboxTestCase, BlackboxProcessError
+
+data_path_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../../../../source4/librpc/tests"))
+
+class NdrDumpTests(BlackboxTestCase):
+ """Blackbox tests for ndrdump."""
+
+ def data_path(self, name):
+ return os.path.join(data_path_dir, name)
+
+ def test_ndrdump_with_in(self):
+ self.check_run(("ndrdump --debug-stdout samr samr_CreateUser in %s" %
+ (self.data_path("samr-CreateUser-in.dat"))))
+
+ def test_ndrdump_with_out(self):
+ self.check_run(("ndrdump --debug-stdout samr samr_CreateUser out %s" %
+ (self.data_path("samr-CreateUser-out.dat"))))
+
+ def test_ndrdump_context_file(self):
+ self.check_run(
+ ("ndrdump --debug-stdout --context-file %s samr samr_CreateUser out %s" %
+ (self.data_path("samr-CreateUser-in.dat"),
+ self.data_path("samr-CreateUser-out.dat"))))
+
+ def test_ndrdump_with_validate(self):
+ self.check_run(("ndrdump --debug-stdout --validate samr samr_CreateUser in %s" %
+ (self.data_path("samr-CreateUser-in.dat"))))
+
+ def test_ndrdump_with_hex_decode_function(self):
+ self.check_run(
+ ("ndrdump --debug-stdout dns decode_dns_name_packet in --hex-input %s" %
+ self.data_path("dns-decode_dns_name_packet-hex.dat")))
+
+ def test_ndrdump_with_hex_struct_name(self):
+ expected = open(self.data_path("dns-decode_dns_name_packet-hex.txt")).read()
+ try:
+ actual = self.check_output(
+ "ndrdump --debug-stdout dns dns_name_packet struct --hex-input %s" %
+ self.data_path("dns-decode_dns_name_packet-hex.dat"))
+ except BlackboxProcessError as e:
+ self.fail(e)
+
+ # check_output will return bytes
+ # convert expected to bytes for python 3
+ self.assertEqual(actual, expected.encode('utf-8'))
+
+ def test_ndrdump_with_binary_struct_name(self):
+ # Prefix of the expected unparsed PAC data (without times, as
+ # these vary by host)
+ expected = '''pull returned Success
+ PAC_DATA: struct PAC_DATA
+ num_buffers : 0x00000005 (5)
+ version : 0x00000000 (0)
+ buffers: ARRAY(5)'''
+ try:
+ actual = self.check_output(
+ "ndrdump --debug-stdout krb5pac PAC_DATA struct %s" %
+ self.data_path("krb5pac-PAC_DATA.dat"))
+ except BlackboxProcessError as e:
+ self.fail(e)
+
+ # check_output will return bytes
+ # convert expected to bytes for python 3
+ self.assertEqual(actual[:len(expected)],
+ expected.encode('utf-8'))
+ self.assertTrue(actual.endswith(b"dump OK\n"))
+
+ def test_ndrdump_upn_dns_info_ex(self):
+ with open(self.data_path(
+ 'krb5pac_upn_dns_info_ex.txt')) as f:
+ expected = f.read()
+ data_path = self.data_path(
+ 'krb5pac_upn_dns_info_ex.b64.txt')
+
+ try:
+ actual = self.check_output(
+ 'ndrdump --debug-stdout -d0 krb5pac PAC_DATA struct '
+ '--validate --base64-input ' + data_path)
+ except BlackboxProcessError as e:
+ self.fail(e)
+
+ self.assertEqual(actual, expected.encode('utf-8'))
+
+ def test_ndrdump_upn_dns_info_ex_not_supported(self):
+ with open(self.data_path(
+ 'krb5pac_upn_dns_info_ex_not_supported.txt')) as f:
+ expected = f.read()
+ data_path = self.data_path(
+ 'krb5pac_upn_dns_info_ex_not_supported.b64.txt')
+
+ try:
+ # This PAC has been edited to remove the
+ # PAC_UPN_DNS_FLAG_HAS_SAM_NAME_AND_SID bit, so that we can
+ # simulate older versions of Samba parsing this structure.
+ actual = self.check_output(
+ 'ndrdump --debug-stdout -d0 krb5pac PAC_DATA struct '
+ '--validate --base64-input ' + data_path)
+ except BlackboxProcessError as e:
+ self.fail(e)
+
+ self.assertEqual(actual, expected.encode('utf-8'))
+
+ def test_ndrdump_with_binary_struct_number(self):
+ expected = '''pull returned Success
+ GUID : 33323130-3534-3736-3839-616263646566
+dump OK
+'''
+ try:
+ actual = self.check_output(
+ "ndrdump --debug-stdout misc 0 struct %s" %
+ self.data_path("misc-GUID.dat"))
+ except BlackboxProcessError as e:
+ self.fail(e)
+
+ # check_output will return bytes
+ # convert expected to bytes for python 3
+ self.assertEqual(actual, expected.encode('utf-8'))
+
+ def test_ndrdump_with_enum_not_struct(self):
+ expected = '''Public structure 'netr_SchannelType' not found
+'''
+ try:
+ actual = self.check_exit_code(
+ "ndrdump --debug-stdout misc netr_SchannelType --input=x struct",
+ 1)
+ except BlackboxProcessError as e:
+ self.fail(e)
+
+ # check_output will return bytes
+ # convert expected to bytes for python 3
+ self.assertEqual(actual, expected.encode('utf-8'))
+
+ def test_ndrdump_input_cmdline_short_struct_name(self):
+ expected = '''pull returned Buffer Size Error
+'''
+ try:
+ actual = self.check_exit_code(
+ "ndrdump --debug-stdout -d0 misc GUID struct --input=abcdefg", 2)
+ except BlackboxProcessError as e:
+ self.fail(e)
+
+ # check_output will return bytes
+ # convert expected to bytes for python 3
+ self.assertEqual(actual, expected.encode('utf-8'))
+
+ def test_ndrdump_input_cmdline_short_struct_name_dump(self):
+ expected = '''pull returned Buffer Size Error
+6 bytes consumed
+[0000] 61 62 63 64 65 66 67 abcdefg''' \
+ '''
+'''
+ try:
+ actual = self.check_exit_code(
+ "ndrdump --debug-stdout -d0 misc GUID struct --input=abcdefg --dump-data", 2)
+ except BlackboxProcessError as e:
+ self.fail(e)
+
+ # check_output will return bytes
+ # convert expected to bytes for python 3
+ self.assertEqual(actual, expected.encode('utf-8'))
+
+ def test_ndrdump_input_cmdline_short_struct_name_print_fail(self):
+ expected = '''pull returned Buffer Size Error
+6 bytes consumed
+[0000] 61 62 63 64 65 66 67 abcdefg''' \
+ '''
+WARNING! 1 unread bytes
+[0000] 67 g''' \
+ '''
+WARNING: pull of GUID was incomplete, therefore the parse below may SEGFAULT
+ GUID : 64636261-6665-0000-0000-000000000000
+dump of failed-to-parse GUID complete
+'''
+ try:
+ actual = self.check_exit_code(
+ "ndrdump --debug-stdout -d0 misc GUID struct --input=abcdefg --dump-data --print-after-parse-failure", 2)
+ except BlackboxProcessError as e:
+ self.fail(e)
+
+ # check_output will return bytes
+ # convert expected to bytes for python 3
+ self.assertEqual(actual, expected.encode('utf-8'))
+
+ def test_ndrdump_fuzzed_clusapi_QueryAllValues(self):
+ expected = b'''pull returned Success
+WARNING! 53 unread bytes
+[0000] 00 FF 00 00 FF 00 00 00 00 09 00 00 00 08 00 33 ........ .......3
+[0010] 33 32 37 36 32 36 39 33 32 37 36 38 34 01 00 00 32762693 27684...
+[0020] 80 32 0D FF 00 00 FF 00 00 00 00 08 00 00 00 1C .2...... ........
+[0030] F1 29 08 00 00 .)...''' \
+ b'''
+ clusapi_QueryAllValues: struct clusapi_QueryAllValues
+ out: struct clusapi_QueryAllValues
+ pcbData : *
+ pcbData : 0x01000000 (16777216)
+ ppData : *
+ ppData: ARRAY(1)
+ ppData : NULL
+ rpc_status : *
+ rpc_status : WERR_OK
+ result : WERR_NOT_ENOUGH_MEMORY
+dump OK
+'''
+ try:
+ actual = self.check_output(
+ 'ndrdump --debug-stdout clusapi clusapi_QueryAllValues out ' +\
+ '--base64-input --input=' +\
+ 'AAAAAQEAAAAAAAAAAAAAAAgAAAAA/wAA/wAAAAAJAAAACAAzMzI3NjI2OTMyNzY4NAEAAIAyDf8AAP8AAAAACAAAABzxKQgAAA==')
+ except BlackboxProcessError as e:
+ self.fail(e)
+ self.assertEqual(actual, expected)
+
+ def test_ndrdump_fuzzed_ntlmsssp_AUTHENTICATE_MESSAGE(self):
+ expected = open(self.data_path("fuzzed_ntlmssp-AUTHENTICATE_MESSAGE.txt")).read()
+ try:
+ actual = self.check_output(
+ "ndrdump --debug-stdout ntlmssp AUTHENTICATE_MESSAGE struct --base64-input %s --validate" %
+ self.data_path("fuzzed_ntlmssp-AUTHENTICATE_MESSAGE.b64.txt"))
+ except BlackboxProcessError as e:
+ self.fail(e)
+ # check_output will return bytes
+ # convert expected to bytes for python 3
+ self.assertEqual(actual, expected.encode('utf-8'))
+
+ def test_ndrdump_fuzzed_PackagesBlob(self):
+ expected = 'ndr_pull_string: ndr_pull_error\\(Buffer Size Error\\):'
+ command = (
+ "ndrdump --debug-stdout drsblobs package_PackagesBlob struct --input='aw=='"
+ " --base64-input")
+ try:
+ actual = self.check_exit_code(command, 2)
+ except BlackboxProcessError as e:
+ self.fail(e)
+ # check_output will return bytes
+ # convert expected to bytes for python 3
+ self.assertRegex(actual.decode('utf8'), expected)
+
+ def test_ndrdump_fuzzed_drsuapi_DsAddEntry_1(self):
+ expected = open(self.data_path("fuzzed_drsuapi_DsAddEntry_1.txt")).read()
+ try:
+ actual = self.check_output(
+ "ndrdump --debug-stdout drsuapi drsuapi_DsAddEntry in --base64-input --validate %s" %
+ self.data_path("fuzzed_drsuapi_DsAddEntry_1.b64.txt"))
+ except BlackboxProcessError as e:
+ self.fail(e)
+ # check_output will return bytes
+ # convert expected to bytes for python 3
+ self.assertEqual(actual, expected.encode('utf-8'))
+
+ def test_ndrdump_fuzzed_drsuapi_DsaAddressListItem_V1(self):
+ expected = "Maximum Recursion Exceeded"
+ try:
+ self.check_output(
+ "ndrdump --debug-stdout drsuapi 17 out --base64-input %s" %
+ self.data_path(
+ "fuzzed_drsuapi_DsaAddressListItem_V1-in.b64.txt"))
+ self.fail("Input should have been rejected with %s" % expected)
+ except BlackboxProcessError as e:
+ if expected not in str(e):
+ self.fail(e)
+
+ def test_ndrdump_fuzzed_drsuapi_DsReplicaAttribute(self):
+ expected = open(self.data_path("fuzzed_drsuapi_DsReplicaAttribute.txt")).read()
+ try:
+ actual = self.check_output(
+ "ndrdump --debug-stdout drsuapi drsuapi_DsReplicaAttribute struct --base64-input --validate %s" %
+ self.data_path("fuzzed_drsuapi_DsReplicaAttribute.b64.txt"))
+ except BlackboxProcessError as e:
+ self.fail(e)
+ # check_output will return bytes
+ # convert expected to bytes for python 3
+ self.assertEqual(actual, expected.encode('utf-8'))
+
+ def test_ndrdump_Krb5ccache(self):
+ expected = open(self.data_path("../../../source3/selftest/"
+ "ktest-krb5_ccache-2.txt")).read()
+ try:
+ # Specify -d1 to match the generated output file, because ndrdump
+ # only outputs some additional info if this parameter is specified,
+ # and the --configfile parameter gives us an empty smb.conf to avoid
+ # extraneous output.
+ actual = self.check_output(
+ "ndrdump krb5ccache CCACHE struct "
+ "--configfile /dev/null --debug-stdout -d1 --validate " +
+ self.data_path("../../../source3/selftest/"
+ "ktest-krb5_ccache-2"))
+ except BlackboxProcessError as e:
+ self.fail(e)
+ # check_output will return bytes
+ # convert expected to bytes for python 3
+ self.assertEqual(actual, expected.encode('utf-8'))
+
+ expected = open(self.data_path("../../../source3/selftest/"
+ "ktest-krb5_ccache-3.txt")).read()
+ try:
+ # Specify -d1 to match the generated output file, because ndrdump
+ # only outputs some additional info if this parameter is specified,
+ # and the --configfile parameter gives us an empty smb.conf to avoid
+ # extraneous output.
+ actual = self.check_output(
+ "ndrdump krb5ccache CCACHE struct "
+ "--configfile /dev/null --debug-stdout -d1 --validate " +
+ self.data_path("../../../source3/selftest/"
+ "ktest-krb5_ccache-3"))
+ except BlackboxProcessError as e:
+ self.fail(e)
+ # check_output will return bytes
+ # convert expected to bytes for python 3
+ self.assertEqual(actual, expected.encode('utf-8'))
+
+ # This is a good example of a union with an empty default
+ # and no buffers to parse.
+ def test_ndrdump_fuzzed_spoolss_EnumForms(self):
+ expected_head = b'''pull returned Success
+WARNING! 2 unread bytes
+[0000] 00 00 ..''' b'''
+ spoolss_EnumForms: struct spoolss_EnumForms
+ out: struct spoolss_EnumForms
+ count : *
+ count : 0x00000100 (256)
+ info : *
+ info : *
+ info: ARRAY(256)
+ info : union spoolss_FormInfo(case 0)
+ info : union spoolss_FormInfo(case 0)
+'''
+ expected_tail = b'''info : union spoolss_FormInfo(case 0)
+ info : union spoolss_FormInfo(case 0)
+ info : union spoolss_FormInfo(case 0)
+ info : union spoolss_FormInfo(case 0)
+ info : union spoolss_FormInfo(case 0)
+ info : union spoolss_FormInfo(case 0)
+ needed : *
+ needed : 0x00000000 (0)
+ result : HRES code 0xa9a9a900
+dump OK
+'''
+ try:
+ actual = self.check_output(
+ "ndrdump --debug-stdout spoolss spoolss_EnumForms out --base64-input " +\
+ "--input AAAAAQAAAAAAAAAAAAEAAACpqakAAA="
+ )
+ except BlackboxProcessError as e:
+ self.fail(e)
+ self.assertEqual(actual[:len(expected_head)],
+ expected_head)
+ self.assertTrue(actual.endswith(expected_tail))
+
+ # This is a good example of a union with scalars and buffers
+ def test_ndrdump_xattr_NTACL(self):
+
+ expected_head = open(self.data_path("xattr_NTACL.txt")).read().encode('utf8')
+ expected_tail = b'''dump OK
+'''
+ try:
+ actual = self.check_output(
+ "ndrdump --debug-stdout xattr xattr_NTACL struct --hex-input %s --validate" %
+ self.data_path("xattr_NTACL.dat"))
+ except BlackboxProcessError as e:
+ self.fail(e)
+
+ self.assertEqual(actual[:len(expected_head)],
+ expected_head)
+ self.assertTrue(actual.endswith(expected_tail))
+
+ # Confirm parsing of dnsProperty records
+ def test_ndrdump_dnsp_DnssrvRpcRecord(self):
+
+ expected = open(self.data_path("dnsp-DnssrvRpcRecord.txt")).read().encode('utf8')
+ try:
+ actual = self.check_output(
+ "ndrdump --debug-stdout dnsp dnsp_DnssrvRpcRecord struct " +\
+ "--input BQAPAAXwAAC3AAAAAAADhAAAAAAAAAAAAAoBAAA= "+\
+ "--base64-input --validate")
+ except BlackboxProcessError as e:
+ self.fail(e)
+
+ self.assertEqual(actual, expected)
+
+ # Test a --validate push of a NULL union pointer
+ def test_ndrdump_fuzzed_NULL_union_PAC_BUFFER(self):
+ expected = b'''pull returned Success
+WARNING! 13 unread bytes
+[0000] F5 FF 00 3C 3C 25 FF 70 16 1F A0 12 84 ...<<%.p .....
+ PAC_BUFFER: struct PAC_BUFFER
+ type : UNKNOWN_ENUM_VALUE (1094251328)
+ _ndr_size : 0x048792c6 (75993798)
+ info : NULL
+ _pad : 0x06000000 (100663296)
+push returned Success
+pull returned Success
+ PAC_BUFFER: struct PAC_BUFFER
+ type : UNKNOWN_ENUM_VALUE (1094251328)
+ _ndr_size : 0x00000000 (0)
+ info : NULL
+ _pad : 0x00000000 (0)
+WARNING! orig bytes:29 validated pushed bytes:16
+WARNING! orig and validated differ at byte 0x04 (4)
+WARNING! orig byte[0x04] = 0xC6 validated byte[0x04] = 0x00
+-[0000] 40 F3 38 41 C6 92 87 04 00 00 00 00 00 00 00 06 @.8A.... ........
++[0000] 40 F3 38 41 00 00 00 00 00 00 00 00 00 00 00 00 @.8A.... ........
+-[0010] F5 FF 00 3C 3C 25 FF 70 16 1F A0 12 84 ...<<%.p .....
++[0010] EMPTY BLOCK
+dump OK
+'''
+ try:
+ actual = self.check_output(
+ "ndrdump --debug-stdout krb5pac PAC_BUFFER struct --validate --input " +\
+ "QPM4QcaShwQAAAAAAAAABvX/ADw8Jf9wFh+gEoQ= --base64-input")
+ except BlackboxProcessError as e:
+ self.fail(e)
+
+ self.assertEqual(actual, expected)
+
+ # Test a --validate push of a NULL struct pointer
+ def test_ndrdump_fuzzed_NULL_struct_ntlmssp_CHALLENGE_MESSAGE(self):
+ expected = open(self.data_path("fuzzed_ntlmssp-CHALLENGE_MESSAGE.txt")).read().encode('utf8')
+ try:
+ actual = self.check_output(
+ "ndrdump --debug-stdout ntlmssp CHALLENGE_MESSAGE struct --validate --input " +\
+ "'AAAACwIAAAAAJwIAAAAAAAcAAAAAAAAAAIAbhG8uyk9dAL0mQE73MAAAAAAAAAAA' --base64-input")
+ except BlackboxProcessError as e:
+ self.fail(e)
+
+ # Filter out the C source file and line number
+ regex = rb"\.\./\.\./librpc/ndr/ndr\.c:[0-9]+"
+ actual = re.sub(regex, b"", actual)
+ expected = re.sub(regex, b"", expected)
+
+ self.assertEqual(actual, expected)
+
+ # Test a print of NULL pointer in manually-written ndr_drsuapi.c
+ def test_fuzzed_drsuapi_DsGetNCChanges(self):
+ expected = open(self.data_path("fuzzed_drsuapi_DsGetNCChanges.txt"), 'rb').read()
+ try:
+ actual = self.check_output(
+ "ndrdump --debug-stdout drsuapi 3 out --base64-input --input " +\
+ "AQAAAAEAAAAGAKoAAAAGAKoGAAMAAQAAAAYAEwAAAAAAAAAA/wAAAAAAAAA/AAAAAAAAAAAAAAAAAAAAAABbAAAAAAAAAAAAAAkRAAABAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAPkAAAAAAAABAAD4BgATAAAAAAAAAAD/AAAAAAAAAD8AAAAAAAAAAAAAAAAAAAAAAFsAAAAAAAAAAAAABgAQAAEAAAABAAAAAQAAAAEAAAABAAAAAQAAAAMAAAABAAAACREAAAEAAAABAAAAAAAAAAYAEAABAAgAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAA=")
+ except BlackboxProcessError as e:
+ self.fail(e)
+
+ self.assertEqual(actual, expected)
+
+ def test_ndrdump_fuzzed_ndr_compression(self):
+ expected = r'ndr_pull_compression_start: ndr_pull_error\(Compression Error\): Bad compression algorithm 204 \(PULL\)'
+ command = (
+ "ndrdump --debug-stdout drsuapi 3 out --base64-input "
+ "--input BwAAAAcAAAAGAAAAAwAgICAgICAJAAAAICAgIAkAAAAgIAAA//////8=")
+ try:
+ actual = self.check_exit_code(command, 2)
+ except BlackboxProcessError as e:
+ self.fail(e)
+ # check_output will return bytes
+ # convert expected to bytes for python 3
+ self.assertRegex(actual.decode('utf8'), expected)
+
+ def test_ndrdump_short_dnsProperty(self):
+ expected = b'''pull returned Success
+ dnsp_DnsProperty_short: struct dnsp_DnsProperty_short
+ wDataLength : 0x00000000 (0)
+ namelength : 0x00000000 (0)
+ flag : 0x00000000 (0)
+ version : 0x00000001 (1)
+ id : DSPROPERTY_ZONE_NS_SERVERS_DA (146)
+ data : union dnsPropertyData(case 0)
+ name : 0x00000000 (0)
+dump OK
+'''
+ command = (
+ "ndrdump --debug-stdout dnsp dnsp_DnsProperty_short struct --base64-input "
+ "--input AAAAAAAAAAAAAAAAAQAAAJIAAAAAAAAA")
+ try:
+ actual = self.check_output(command)
+ except BlackboxProcessError as e:
+ self.fail(e)
+ self.assertEqual(actual, expected)
+
+ # This is compressed with Microsoft's compression, so we can't do a validate
+ def test_ndrdump_compressed_claims(self):
+ expected = open(self.data_path("compressed_claims.txt"), 'rb').read()
+
+ try:
+ actual = self.check_output(
+ "ndrdump --debug-stdout claims CLAIMS_SET_METADATA_NDR struct --hex-input --input " + \
+ "01100800cccccccc500200000000000000000200290200000400020004000000282000000000000000000000000000002902000073778788878808880700080007800800060007000700070887770780080088008870070008000808000080000000008070787787770076770867868788000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000007700080080000000870000000000000085070000000000007476800000000000750587000800000066078000000080706677880080008060878708000000008000800000000000800000000000000000000000000000000000000000000000006080080000000070000000000000000000000000000000000000000000000000fd74eaf001add6213aecf4346587eec48c323e3e1a5a32042eecf243669a581e383d2940e80e383c294463b8c0b49024f1def20df819586b086cd2ab98700923386674845663ef57e91718110c1ad4c0ac88912126d2180545e98670ea2aa002052aa54189cc318d26c46b667f18b6876262a9a4985ecdf76e5161033fd457ba020075360c837aaa3aa82749ee8152420999b553c60195be5e5c35c4330557538772972a7d527aeca1fc6b2951ca254ac83960272a930f3194892d4729eff48e48ccfb929329ff501c356c0e8ed18471ec70986c31da86a8090b4022c1db257514fdba4347532146648d4f99f9065e0d9a0d90d80f38389c39cb9ebe6d4e5e681e5a8a5418f591f1dbb7594a3f2aa3220ced1cd18cb49cffcc2ff18eef6caf443663640c5664000012000000")
+ except BlackboxProcessError as e:
+ self.fail(e)
+
+ self.assertEqual(actual, expected)
+
+ def test_ndrdump_uncompressed_claims(self):
+ expected = open(self.data_path("uncompressed_claims.txt"), 'rb').read()
+
+ try:
+ actual = self.check_output(
+ "ndrdump --debug-stdout claims CLAIMS_SET_METADATA_NDR struct --hex-input --input " + \
+ "01100800cccccccc800100000000000000000200580100000400020000000000580100000000000000000000000000005801000001100800cccccccc480100000000000000000200010000000400020000000000000000000000000001000000010000000300000008000200030000000c000200060006000100000010000200140002000300030003000000180002002800020002000200040000002c0002000b000000000000000b000000370032003000660064003300630033005f00390000000000010000000000000001000000000000000b000000000000000b000000370032003000660064003300630033005f00370000000000030000001c000200200002002400020004000000000000000400000066006f006f0000000400000000000000040000006200610072000000040000000000000004000000620061007a0000000b000000000000000b000000370032003000660064003300630033005f003800000000000400000009000a000000000007000100000000000600010000000000000001000000000000000000")
+ except BlackboxProcessError as e:
+ self.fail(e)
+
+ self.assertEqual(actual, expected)
+
+ # We can't run --validate here as currently we can't round-trip
+ # this data due to uninitialised padding in the sample
+ def test_ndrdump_claims_CLAIMS_SET_NDR(self):
+ expected = open(self.data_path("claims_CLAIMS_SET_NDR.txt"), 'rb').read()
+
+ try:
+ actual = self.check_output(
+ "ndrdump --debug-stdout claims CLAIMS_SET_NDR struct --hex-input " + \
+ self.data_path("claims_CLAIMS_SET_NDR.dat"))
+ except BlackboxProcessError as e:
+ self.fail(e)
+
+ self.assertEqual(actual, expected)
+
+ def test_ndrdump_gmsa_MANAGEDPASSWORD_BLOB(self):
+ with open(self.data_path('gmsa_MANAGEDPASSWORD_BLOB.txt'), 'rb') as f:
+ expected = f.read()
+
+ try:
+ actual = self.check_output(
+ 'ndrdump gmsa MANAGEDPASSWORD_BLOB struct '
+ '--debug-stdout --validate --hex-input --input '
+ '01000000220100001000000012011a01678657a1136e547f46ee7988c808'
+ 'd904ed0e4b0592f89eb82bd292685867c3119dd6eaaef5810a1aa4e08e49'
+ '7cc31163b2e799e6ea66e3022c100bf59585a3464274ebad2488fc28acbd'
+ '10a9b44dde436a6d35fff0e95ae7903609e825220ad30db6a86bb544fa34'
+ '0f864d2d3895193d4007df72478d71ce3f789bb139c4c1cffd6d39948c0a'
+ 'fa6a65e3f5f8f90d8c70f7272ce65a3f632793eb0e4697e576c21f36ac55'
+ 'f4167a22b4ebb2593c2d22dc4ac8d4ca455f299a182b8d4d8dd1232dde1e'
+ 'fe3acaf14b137453195f45455f5d48a0c441913b80f94d4696b171379b5a'
+ 'c3b02c501cf8e16b43beaca52263411d5cf772e763e8d29a70a1293e7218'
+ 'a1e300007495016980170000743731b67f170000'
+ )
+ except BlackboxProcessError as e:
+ self.fail(e)
+
+ self.assertEqual(actual, expected)
diff --git a/python/samba/tests/blackbox/netads_dns.py b/python/samba/tests/blackbox/netads_dns.py
new file mode 100644
index 0000000..3466344
--- /dev/null
+++ b/python/samba/tests/blackbox/netads_dns.py
@@ -0,0 +1,83 @@
+# Blackbox tests for the "net ads dns async" commands
+#
+# Copyright (C) Samuel Cabrero <scabrero@samba.org> 2022
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import dns.resolver
+import re
+
+from samba.tests import BlackboxTestCase
+
+SERVER = os.environ["DC_SERVER"]
+REALM = os.environ["REALM"]
+COMMAND = "bin/net ads"
+
+class NetAdsDnsTests(BlackboxTestCase):
+
+ def setUp(self):
+ super().setUp()
+ nameserver = os.environ["DC_SERVER_IP"]
+ # filename=None will disable reading /etc/resolv.conf. The file might
+ # not exist e.g. on build or CI systems.
+ self.resolver = dns.resolver.Resolver(filename=None)
+ self.resolver.nameservers = [nameserver]
+
+ def parse_output(self, output):
+ v4 = []
+ v6 = []
+ for line in output.split("\n"):
+ m = re.search(r'^.*IPv4addr = (.*)$', line)
+ if m:
+ v4.append(m.group(1))
+ m = re.search(r'^.*IPv6addr = (.*)$', line)
+ if m:
+ v6.append(m.group(1))
+ return (v4, v6)
+
+ def test_async_dns(self):
+ host = "%s.%s" % (SERVER, REALM)
+
+ sync_v4 = []
+ answers = self.resolver.query(host, 'A')
+ for rdata in answers:
+ sync_v4.append(rdata.address)
+ self.assertGreaterEqual(len(sync_v4), 1)
+
+ sync_v6 = []
+ answers = self.resolver.query(host, 'AAAA')
+ for rdata in answers:
+ sync_v6.append(rdata.address)
+ self.assertGreaterEqual(len(sync_v6), 1)
+
+ async_v4 = []
+ async_v6 = []
+ argv = "%s dns async %s.%s " % (COMMAND, SERVER, REALM)
+ try:
+ out = self.check_output(argv)
+ (async_v4, async_v6) = self.parse_output(out.decode('utf-8'))
+ except samba.tests.BlackboxProcessError as e:
+ self.fail("Error calling [%s]: %s" % (argv, e))
+
+ self.assertGreaterEqual(len(async_v4), 1)
+ self.assertGreaterEqual(len(async_v6), 1)
+
+ sync_v4.sort()
+ async_v4.sort()
+ self.assertStringsEqual(' '.join(sync_v4), ' '.join(async_v4))
+
+ sync_v6.sort()
+ async_v6.sort()
+ self.assertStringsEqual(' '.join(sync_v6), ' '.join(async_v6))
diff --git a/python/samba/tests/blackbox/netads_json.py b/python/samba/tests/blackbox/netads_json.py
new file mode 100644
index 0000000..706ec3f
--- /dev/null
+++ b/python/samba/tests/blackbox/netads_json.py
@@ -0,0 +1,81 @@
+# Blackbox tests for the "net ads ... --json" commands
+# Copyright (C) 2018 Intra2net AG
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import json
+import re
+
+import samba.tests
+from samba.common import get_string
+
+COMMAND = "bin/net ads"
+# extract keys from non-json version
+PLAIN_KEY_REGEX = re.compile ("^([^ \t:][^:]*):")
+
+class BaseWrapper (object):
+ """
+ Guard the base so it doesn't inherit from TestCase. This prevents it from
+ being run by unittest directly.
+ """
+
+ class NetAdsJSONTests_Base(samba.tests.BlackboxTestCase):
+ """Blackbox tests for JSON output of the net ads suite of commands."""
+ subcmd = None
+
+ def test_json_wellformed (self):
+ """The output of ``--json`` commands must parse as JSON."""
+ argv = "%s %s --json" % (COMMAND, self.subcmd)
+ try:
+ out = self.check_output(argv)
+ json.loads (get_string(out))
+ except samba.tests.BlackboxProcessError as e:
+ self.fail("Error calling [%s]: %s" % (argv, e))
+
+ def test_json_matching_entries (self):
+ """
+ The ``--json`` variants must contain the same keys as their
+ respective plain counterpart.
+
+ Does not check nested dictionaries (e. g. the ``Flags`` value of
+ ``net ads lookup``..
+ """
+ argv = "%s %s" % (COMMAND, self.subcmd)
+ try:
+ out_plain = get_string(self.check_output(argv))
+ except samba.tests.BlackboxProcessError as e:
+ self.fail("Error calling [%s]: %s" % (argv, e))
+
+ argv = "%s %s --json" % (COMMAND, self.subcmd)
+ try:
+ out_jsobj = self.check_output(argv)
+ except samba.tests.BlackboxProcessError as e:
+ self.fail("Error calling [%s]: %s" % (argv, e))
+
+ parsed = json.loads (get_string(out_jsobj))
+
+ for key in [ re.match (PLAIN_KEY_REGEX, line).group(1)
+ for line in out_plain.split ("\n")
+ if line != "" and line [0] not in " \t:" ]:
+ self.assertTrue (parsed.get (key) is not None)
+ del parsed [key]
+
+ self.assertTrue (len (parsed) == 0) # tolerate no leftovers
+
+class NetAdsJSONInfoTests(BaseWrapper.NetAdsJSONTests_Base):
+ subcmd = "info"
+
+class NetAdsJSONlookupTests(BaseWrapper.NetAdsJSONTests_Base):
+ subcmd = "lookup"
diff --git a/python/samba/tests/blackbox/rpcd_witness_samba_only.py b/python/samba/tests/blackbox/rpcd_witness_samba_only.py
new file mode 100755
index 0000000..aa81c34
--- /dev/null
+++ b/python/samba/tests/blackbox/rpcd_witness_samba_only.py
@@ -0,0 +1,1338 @@
+#!/usr/bin/env python3
+# Unix SMB/CIFS implementation.
+#
+# Copyright © 2024 Stefan Metzmacher <metze@samba.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+import os
+
+sys.path.insert(0, "bin/python")
+os.environ["PYTHONUNBUFFERED"] = "1"
+
+import json
+
+import samba.tests
+from samba.credentials import Credentials
+from samba.ndr import ndr_print
+from samba.dcerpc import witness
+from samba.tests import DynamicTestCase, BlackboxTestCase
+from samba.common import get_string
+from samba import werror, WERRORError
+
+@DynamicTestCase
+class RpcdWitnessSambaTests(BlackboxTestCase):
+ @classmethod
+ def setUpDynamicTestCases(cls):
+ cls.num_nodes = int(samba.tests.env_get_var_value('NUM_NODES'))
+
+ def _define_tests(idx1, idx2, ndr64=False):
+ cls._define_GetInterfaceList_test(idx1, idx2, ndr64)
+ if idx1 == 0 and idx2 != -1:
+ cls._define_ResourceChangeCTDB_tests(idx1, idx2, ndr64)
+
+ for idx1 in range(0, cls.num_nodes):
+ _define_tests(idx1, -1, ndr64=False)
+ _define_tests(idx1, -1, ndr64=True)
+ for idx2 in range(0, cls.num_nodes):
+ _define_tests(idx1, idx2, ndr64=False)
+ _define_tests(idx1, idx2, ndr64=True)
+
+ def setUp(self):
+ super().setUp()
+
+ # ctdb/tests/local_daemons.sh doesn't like CTDB_SOCKET to be set already
+ # and it doesn't need CTDB_BASE, so we stash them away
+ self.saved_CTDB_SOCKET = samba.tests.env_get_var_value('CTDB_SOCKET',
+ allow_missing=True)
+ if self.saved_CTDB_SOCKET is not None:
+ del os.environ["CTDB_SOCKET"]
+ self.saved_CTDB_BASE = samba.tests.env_get_var_value('CTDB_BASE',
+ allow_missing=True)
+ if self.saved_CTDB_BASE is not None:
+ del os.environ["CTDB_BASE"]
+
+ self.disabled_idx = -1
+
+ # set this to True in order to get verbose output
+ self.verbose = False
+
+ self.ctdb_prefix = samba.tests.env_get_var_value('CTDB_PREFIX')
+
+ self.cluster_share = samba.tests.env_get_var_value('CLUSTER_SHARE')
+
+ self.lp = self.get_loadparm(s3=True)
+ self.remote_domain = samba.tests.env_get_var_value('DOMAIN')
+ self.remote_user = samba.tests.env_get_var_value('USERNAME')
+ self.remote_password = samba.tests.env_get_var_value('PASSWORD')
+ self.remote_creds = Credentials()
+ self.remote_creds.guess(self.lp)
+ self.remote_creds.set_username(self.remote_user)
+ self.remote_creds.set_domain(self.remote_domain)
+ self.remote_creds.set_password(self.remote_password)
+
+ self.server_hostname = samba.tests.env_get_var_value('SERVER_HOSTNAME')
+ self.interface_group_name = samba.tests.env_get_var_value('INTERFACE_GROUP_NAME')
+
+ common_binding_args = "spnego,sign,target_hostname=%s" % (
+ self.server_hostname)
+ if self.verbose:
+ common_binding_args += ",print"
+
+ common_binding_args32 = common_binding_args
+ common_binding_args64 = common_binding_args + ",ndr64"
+
+ self.nodes = []
+ for node_idx in range(0, self.num_nodes):
+ node = {}
+
+ name_var = 'CTDB_SERVER_NAME_NODE%u' % node_idx
+ node["name"] = samba.tests.env_get_var_value(name_var)
+
+ ip_var = 'CTDB_IFACE_IP_NODE%u' % node_idx
+ node["ip"] = samba.tests.env_get_var_value(ip_var)
+
+ node["binding_string32"] = "ncacn_ip_tcp:%s[%s]" % (
+ node["ip"], common_binding_args32)
+ node["binding_string64"] = "ncacn_ip_tcp:%s[%s]" % (
+ node["ip"], common_binding_args64)
+ self.nodes.append(node)
+
+ self.all_registrations = None
+
+ def tearDown(self):
+ self.destroy_all_registrations()
+
+ if self.disabled_idx != -1:
+ self.enable_node(self.disabled_idx)
+
+ if self.saved_CTDB_SOCKET is not None:
+ os.environ["CTDB_SOCKET"] = self.saved_CTDB_SOCKET
+ self.saved_CTDB_SOCKET = None
+ if self.saved_CTDB_BASE is not None:
+ os.environ["CTDB_BASE"] = self.saved_CTDB_BASE
+ self.saved_CTDB_BASE = None
+
+ super().tearDown()
+
+ def call_onnode(self, nodes, cmd):
+ COMMAND = "ctdb/tests/local_daemons.sh"
+
+ argv = "%s '%s' onnode %s '%s'" % (COMMAND, self.ctdb_prefix, nodes, cmd)
+
+ try:
+ if self.verbose:
+ print("Calling: %s" % argv)
+ out = self.check_output(argv)
+ except samba.tests.BlackboxProcessError as e:
+ self.fail("Error calling [%s]: %s" % (argv, e))
+
+ out_str = get_string(out)
+ return out_str
+
+ def dump_ctdb_status_all(self):
+ for node_idx in range(0, self.num_nodes):
+ print("%s" % self.call_onnode(str(node_idx), "ctdb status"))
+
+ def disable_node(self, node_idx, dump_status=False):
+ if dump_status:
+ self.dump_ctdb_status_all()
+
+ self.assertEqual(self.disabled_idx, -1)
+ self.call_onnode(str(node_idx), "ctdb disable")
+ self.disabled_idx = node_idx
+
+ if dump_status:
+ self.dump_ctdb_status_all()
+
+ def enable_node(self, node_idx, dump_status=False):
+ if dump_status:
+ self.dump_ctdb_status_all()
+
+ self.assertEqual(self.disabled_idx, node_idx)
+ self.call_onnode(str(node_idx), "ctdb enable")
+ self.disabled_idx = -1
+
+ if dump_status:
+ self.dump_ctdb_status_all()
+
+ def call_net_witness_subcmd(self, subcmd,
+ as_json=False,
+ apply_to_all=False,
+ registration=None,
+ net_name=None,
+ share_name=None,
+ ip_address=None,
+ client_computer=None,
+ new_ip=None,
+ new_node=None,
+ forced_response=None):
+ COMMAND = "UID_WRAPPER_ROOT=1 bin/net witness"
+
+ argv = "%s %s" % (COMMAND, subcmd)
+ if as_json:
+ argv += " --json"
+
+ if apply_to_all:
+ argv += " --witness-apply-to-all"
+
+ if registration is not None:
+ argv += " --witness-registration='%s'" % (
+ registration.uuid)
+
+ if net_name is not None:
+ argv += " --witness-net-name='%s'" % (net_name)
+
+ if share_name is not None:
+ argv += " --witness-share-name='%s'" % (share_name)
+
+ if ip_address is not None:
+ argv += " --witness-ip-address='%s'" % (ip_address)
+
+ if client_computer is not None:
+ argv += " --witness-client-computer-name='%s'" % (client_computer)
+
+ if new_ip is not None:
+ argv += " --witness-new-ip='%s'" % (new_ip)
+
+ if new_node is not None:
+ argv += " --witness-new-node='%s'" % (new_node)
+
+ if forced_response:
+ argv += " --witness-forced-response='%s'" % (forced_response)
+
+ try:
+ if self.verbose:
+ print("Calling: %s" % argv)
+ out = self.check_output(argv)
+ except samba.tests.BlackboxProcessError as e:
+ self.fail("Error calling [%s]: %s" % (argv, e))
+
+ out_str = get_string(out)
+ if not as_json:
+ return out_str
+
+ json_out = json.loads(out_str)
+ return json_out
+
+ @classmethod
+ def _define_GetInterfaceList_test(cls, conn_idx, disable_idx, ndr64=False):
+ if disable_idx != -1:
+ disable_name = "%u_disabled" % disable_idx
+ else:
+ disable_name = "all_enabled"
+
+ if ndr64:
+ ndr_name = "NDR64"
+ else:
+ ndr_name = "NDR32"
+
+ name = "Node%u_%s_%s" % (conn_idx, disable_name, ndr_name)
+ args = {
+ 'conn_idx': conn_idx,
+ 'disable_idx': disable_idx,
+ 'ndr64': ndr64,
+ }
+ cls.generate_dynamic_test('test_GetInterfaceList', name, args)
+
+ def _test_GetInterfaceList_with_args(self, args):
+ conn_idx = args.pop('conn_idx')
+ disable_idx = args.pop('disable_idx')
+ ndr64 = args.pop('ndr64')
+ self.assertEqual(len(args.keys()), 0)
+
+ conn_node = self.nodes[conn_idx]
+ if ndr64:
+ binding_string = conn_node["binding_string64"]
+ else:
+ binding_string = conn_node["binding_string32"]
+
+ if disable_idx != -1:
+ self.disable_node(disable_idx)
+
+ conn = witness.witness(binding_string, self.lp, self.remote_creds)
+ interface_list = conn.GetInterfaceList()
+
+ if disable_idx != -1:
+ self.enable_node(disable_idx)
+
+ self.assertIsNotNone(interface_list)
+ self.assertEqual(interface_list.num_interfaces, len(self.nodes))
+ for idx in range(0, interface_list.num_interfaces):
+ iface = interface_list.interfaces[idx]
+ node = self.nodes[idx]
+
+ expected_flags = 0
+ expected_flags |= witness.WITNESS_INFO_IPv4_VALID
+ if conn_idx != idx:
+ expected_flags |= witness.WITNESS_INFO_WITNESS_IF
+
+ if disable_idx == idx:
+ expected_state = witness.WITNESS_STATE_UNAVAILABLE
+ else:
+ expected_state = witness.WITNESS_STATE_AVAILABLE
+
+ self.assertIsNotNone(iface.group_name)
+ self.assertEqual(iface.group_name, self.interface_group_name)
+
+ self.assertEqual(iface.version, witness.WITNESS_V2)
+ self.assertEqual(iface.state, expected_state)
+
+ self.assertIsNotNone(iface.ipv4)
+ self.assertEqual(iface.ipv4, node["ip"])
+
+ self.assertIsNotNone(iface.ipv6)
+ self.assertEqual(iface.ipv6,
+ "0000:0000:0000:0000:0000:0000:0000:0000")
+
+ self.assertEqual(iface.flags, expected_flags)
+
+ def assertResourceChanges(self, response, expected_resource_changes):
+ self.assertIsNotNone(response)
+ self.assertEqual(response.type,
+ witness.WITNESS_NOTIFY_RESOURCE_CHANGE)
+ self.assertEqual(response.num, len(expected_resource_changes))
+ self.assertEqual(len(response.messages), len(expected_resource_changes))
+ for ri in range(0, len(expected_resource_changes)):
+ expected_resource_change = expected_resource_changes[ri]
+ resource_change = response.messages[ri]
+ self.assertIsNotNone(resource_change)
+
+ expected_type = witness.WITNESS_RESOURCE_STATE_UNAVAILABLE
+ expected_type = expected_resource_change.get('type', expected_type)
+
+ expected_name = expected_resource_change.get('name')
+
+ self.assertEqual(resource_change.type, expected_type)
+ self.assertIsNotNone(resource_change.name)
+ self.assertEqual(resource_change.name, expected_name)
+
+ def assertResourceChange(self, response, expected_type, expected_name):
+ expected_resource_change = {
+ 'type': expected_type,
+ 'name': expected_name,
+ }
+ expected_resource_changes = [expected_resource_change]
+ self.assertResourceChanges(response, expected_resource_changes)
+
+ def assertGenericIpLists(self, response, expected_type, expected_ip_lists):
+ self.assertIsNotNone(response)
+ self.assertEqual(response.type, expected_type)
+ self.assertEqual(response.num, len(expected_ip_lists))
+ self.assertEqual(len(response.messages), len(expected_ip_lists))
+ for li in range(0, len(expected_ip_lists)):
+
+ expected_ip_list = expected_ip_lists[li]
+ ip_list = response.messages[li]
+ self.assertIsNotNone(ip_list)
+ self.assertEqual(ip_list.num, len(expected_ip_list))
+
+ for i in range(0, len(expected_ip_list)):
+ ip_info = ip_list.addr[i]
+
+ expected_flags = 0
+ expected_flags |= witness.WITNESS_IPADDR_V4
+ expected_flags |= witness.WITNESS_IPADDR_ONLINE
+ expected_flags = expected_ip_list[i].get('flags', expected_flags)
+
+ expected_ipv4 = '0.0.0.0'
+ expected_ipv4 = expected_ip_list[i].get('ipv4', expected_ipv4)
+
+ expected_ipv6 = '0000:0000:0000:0000:0000:0000:0000:0000'
+ expected_ipv6 = expected_ip_list[i].get('ipv6', expected_ipv6)
+
+ self.assertEqual(ip_info.flags, expected_flags)
+
+ self.assertIsNotNone(ip_info.ipv4)
+ self.assertEqual(ip_info.ipv4, expected_ipv4)
+
+ self.assertIsNotNone(ip_info.ipv6)
+ self.assertEqual(ip_info.ipv6, expected_ipv6)
+
+ @classmethod
+ def _define_ResourceChangeCTDB_tests(cls, conn_idx, monitor_idx, ndr64=False):
+ if ndr64:
+ ndr_name = "NDR64"
+ else:
+ ndr_name = "NDR32"
+
+ name_suffix = "WNode%u_RNode%u_%s" % (conn_idx, monitor_idx, ndr_name)
+ base_args = {
+ 'conn_idx': conn_idx,
+ 'monitor_idx': monitor_idx,
+ 'ndr64': ndr64,
+ }
+
+ name = "v1_disabled_after_%s" % name_suffix
+ args = base_args.copy()
+ args['reg_v1'] = True
+ args['disable_after_reg'] = True
+ args['explicit_unregister'] = False
+ cls.generate_dynamic_test('test_ResourceChangeCTDB', name, args)
+
+ name = "v1_disabled_after_enabled_after_%s" % name_suffix
+ args = base_args.copy()
+ args['reg_v1'] = True
+ args['disable_after_reg'] = True
+ args['enable_after_reg'] = True
+ args['explicit_unregister'] = False
+ cls.generate_dynamic_test('test_ResourceChangeCTDB', name, args)
+
+ name = "v2_disabled_before_enable_after_%s" % name_suffix
+ args = base_args.copy()
+ args['disable_before_reg'] = True
+ args['enable_after_reg'] = True
+ args['wait_for_timeout'] = True
+ args['timeout'] = 6
+ cls.generate_dynamic_test('test_ResourceChangeCTDB', name, args)
+
+ name = "v2_disabled_after_%s" % name_suffix
+ args = base_args.copy()
+ args['disable_after_reg'] = True
+ args['wait_for_not_found'] = True
+ args['explicit_unregister'] = False
+ cls.generate_dynamic_test('test_ResourceChangeCTDB', name, args)
+
+ name = "v2_disabled_after_enabled_after_%s" % name_suffix
+ args = base_args.copy()
+ args['disable_after_reg'] = True
+ args['enable_after_reg'] = True
+ args['wait_for_not_found'] = True
+ args['explicit_unregister'] = False
+ cls.generate_dynamic_test('test_ResourceChangeCTDB', name, args)
+
+ name = "share_v2_disabled_before_enable_after_%s" % name_suffix
+ args = base_args.copy()
+ args['share_reg'] = True
+ args['disable_before_reg'] = True
+ args['enable_after_reg'] = True
+ cls.generate_dynamic_test('test_ResourceChangeCTDB', name, args)
+
+ name = "share_v2_disabled_after_%s" % name_suffix
+ args = base_args.copy()
+ args['share_reg'] = True
+ args['disable_after_reg'] = True
+ args['explicit_unregister'] = False
+ cls.generate_dynamic_test('test_ResourceChangeCTDB', name, args)
+
+ name = "share_v2_disabled_after_enabled_after_%s" % name_suffix
+ args = base_args.copy()
+ args['share_reg'] = True
+ args['disable_after_reg'] = True
+ args['enable_after_reg'] = True
+ args['explicit_unregister'] = False
+ cls.generate_dynamic_test('test_ResourceChangeCTDB', name, args)
+
+ def _test_ResourceChangeCTDB_with_args(self, args):
+ conn_idx = args.pop('conn_idx')
+ monitor_idx = args.pop('monitor_idx')
+ ndr64 = args.pop('ndr64')
+ timeout = int(args.pop('timeout', 15))
+ reg_v1 = args.pop('reg_v1', False)
+ share_reg = args.pop('share_reg', False)
+ disable_before_reg = args.pop('disable_before_reg', False)
+ disable_after_reg = args.pop('disable_after_reg', False)
+ enable_after_reg = args.pop('enable_after_reg', False)
+ explicit_unregister = args.pop('explicit_unregister', True)
+ wait_for_not_found = args.pop('wait_for_not_found', False)
+ wait_for_timeout = args.pop('wait_for_timeout', False)
+ self.assertEqual(len(args.keys()), 0)
+
+ conn_node = self.nodes[conn_idx]
+ if ndr64:
+ binding_string = conn_node["binding_string64"]
+ else:
+ binding_string = conn_node["binding_string32"]
+ monitor_node = self.nodes[monitor_idx]
+
+ computer_name = "test-rpcd-witness-samba-only-client-computer"
+
+ conn = witness.witness(binding_string, self.lp, self.remote_creds)
+
+ if disable_before_reg:
+ self.assertFalse(disable_after_reg)
+ self.disable_node(monitor_idx)
+
+ if reg_v1:
+ self.assertFalse(wait_for_timeout)
+ self.assertFalse(share_reg)
+
+ reg_context = conn.Register(witness.WITNESS_V1,
+ self.server_hostname,
+ monitor_node["ip"],
+ computer_name)
+ else:
+ if share_reg:
+ share_name = self.cluster_share
+ else:
+ share_name = None
+
+ reg_context = conn.RegisterEx(witness.WITNESS_V2,
+ self.server_hostname,
+ share_name,
+ monitor_node["ip"],
+ computer_name,
+ witness.WITNESS_REGISTER_NONE,
+ timeout)
+
+ if disable_after_reg:
+ self.assertFalse(disable_before_reg)
+ self.disable_node(monitor_idx)
+
+ if enable_after_reg:
+ self.enable_node(monitor_idx)
+
+ if disable_after_reg:
+ response_unavailable = conn.AsyncNotify(reg_context)
+ self.assertResourceChange(response_unavailable,
+ witness.WITNESS_RESOURCE_STATE_UNAVAILABLE,
+ monitor_node["ip"])
+
+ if enable_after_reg:
+ response_available = conn.AsyncNotify(reg_context)
+ self.assertResourceChange(response_available,
+ witness.WITNESS_RESOURCE_STATE_AVAILABLE,
+ monitor_node["ip"])
+
+ if wait_for_timeout:
+ self.assertFalse(wait_for_not_found)
+ self.assertFalse(disable_after_reg)
+ try:
+ _ = conn.AsyncNotify(reg_context)
+ self.fail()
+ except WERRORError as e:
+ (num, string) = e.args
+ if num != werror.WERR_TIMEOUT:
+ raise
+
+ if wait_for_not_found:
+ self.assertFalse(wait_for_timeout)
+ self.assertTrue(disable_after_reg)
+ self.assertFalse(explicit_unregister)
+ try:
+ _ = conn.AsyncNotify(reg_context)
+ self.fail()
+ except WERRORError as e:
+ (num, string) = e.args
+ if num != werror.WERR_NOT_FOUND:
+ raise
+
+ if not explicit_unregister:
+ return
+
+ conn.UnRegister(reg_context)
+
+ try:
+ _ = conn.AsyncNotify(reg_context)
+ self.fail()
+ except WERRORError as e:
+ (num, string) = e.args
+ if num != werror.WERR_NOT_FOUND:
+ raise
+
+ try:
+ conn.UnRegister(reg_context)
+ self.fail()
+ except WERRORError as e:
+ (num, string) = e.args
+ if num != werror.WERR_NOT_FOUND:
+ raise
+
+ def prepare_all_registrations(self):
+ self.assertIsNone(self.all_registrations)
+
+ regs = []
+ for node_idx in range(0, self.num_nodes):
+ node = self.nodes[node_idx]
+ for ndr64 in [False, True]:
+ if ndr64:
+ binding_string = node["binding_string64"]
+ ndr_name = "NDR64"
+ else:
+ binding_string = node["binding_string32"]
+ ndr_name = "NDR32"
+
+ conn = witness.witness(binding_string, self.lp, self.remote_creds)
+ conn_ip = node["ip"]
+
+ net_name = self.server_hostname
+ ip_address = node["ip"]
+ share_name = self.cluster_share
+ computer_name = "test-net-witness-list-%s-%s" % (
+ node_idx, ndr_name)
+ flags = witness.WITNESS_REGISTER_NONE
+ timeout = 15
+
+ reg_version = witness.WITNESS_V1
+ reg = {
+ 'node_idx': node_idx,
+ 'ndr64': ndr64,
+ 'binding_string': binding_string,
+ 'conn_ip': conn_ip,
+ 'version': reg_version,
+ 'net_name': net_name,
+ 'share_name': None,
+ 'ip_address': ip_address,
+ 'computer_name': computer_name,
+ 'flags': 0,
+ 'timeout': 0,
+ 'conn': conn,
+ 'context': None,
+ }
+ regs.append(reg)
+
+ reg_version = witness.WITNESS_V2
+ reg = {
+ 'node_idx': node_idx,
+ 'ndr64': ndr64,
+ 'binding_string': binding_string,
+ 'conn_ip': conn_ip,
+ 'version': reg_version,
+ 'net_name': net_name,
+ 'share_name': None,
+ 'ip_address': ip_address,
+ 'computer_name': computer_name,
+ 'flags': flags,
+ 'timeout': timeout,
+ 'conn': conn,
+ 'context': None,
+ }
+ regs.append(reg)
+
+ reg = {
+ 'node_idx': node_idx,
+ 'ndr64': ndr64,
+ 'binding_string': binding_string,
+ 'conn_ip': conn_ip,
+ 'version': reg_version,
+ 'net_name': net_name,
+ 'share_name': share_name,
+ 'ip_address': ip_address,
+ 'computer_name': computer_name,
+ 'flags': flags,
+ 'timeout': timeout,
+ 'conn': conn,
+ 'context': None,
+ }
+ regs.append(reg)
+
+ self.all_registrations = regs
+ return regs
+
+ def close_all_registrations(self):
+ self.assertIsNotNone(self.all_registrations)
+
+ for reg in self.all_registrations:
+ conn = reg['conn']
+ reg_context = reg['context']
+ if reg_context is not None:
+ conn.UnRegister(reg_context)
+ reg_context = None
+ reg['context'] = reg_context
+
+ def open_all_registrations(self):
+ self.assertIsNotNone(self.all_registrations)
+
+ for reg in self.all_registrations:
+ conn = reg['conn']
+ reg_context = reg['context']
+ self.assertIsNone(reg_context)
+
+ reg_version = reg['version']
+ if reg_version == witness.WITNESS_V1:
+ reg_context = conn.Register(reg_version,
+ reg['net_name'],
+ reg['ip_address'],
+ reg['computer_name'])
+ elif reg_version == witness.WITNESS_V2:
+ reg_context = conn.RegisterEx(reg_version,
+ reg['net_name'],
+ reg['share_name'],
+ reg['ip_address'],
+ reg['computer_name'],
+ reg['flags'],
+ reg['timeout'])
+ self.assertIsNotNone(reg_context)
+ reg['context'] = reg_context
+
+ def destroy_all_registrations(self):
+ if self.all_registrations is None:
+ return
+
+ for reg in self.all_registrations:
+ conn = reg['conn']
+ reg_context = reg['context']
+ if reg_context is not None:
+ conn.UnRegister(reg_context)
+ reg_context = None
+ reg['context'] = reg_context
+ conn = None
+ reg['conn'] = conn
+
+ self.all_registrations = None
+
+ def assertJsonReg(self, json_reg, reg):
+ self.assertEqual(json_reg['version'], "0x%08x" % reg['version'])
+ self.assertEqual(json_reg['net_name'], reg['net_name'])
+ if reg['share_name']:
+ self.assertEqual(json_reg['share_name'], reg['share_name'])
+ else:
+ self.assertIsNone(json_reg['share_name'])
+ self.assertEqual(json_reg['client_computer_name'], reg['computer_name'])
+
+ self.assertIn('flags', json_reg)
+ json_flags = json_reg['flags']
+ if reg['flags'] & witness.WITNESS_REGISTER_IP_NOTIFICATION:
+ expected_ip_notifaction = True
+ else:
+ expected_ip_notifaction = False
+ self.assertEqual(json_flags['WITNESS_REGISTER_IP_NOTIFICATION'],
+ expected_ip_notifaction)
+ self.assertEqual(json_flags['int'], reg['flags'])
+ self.assertEqual(json_flags['hex'], "0x%08x" % reg['flags'])
+ self.assertEqual(len(json_flags.keys()), 3)
+
+ self.assertEqual(json_reg['timeout'], reg['timeout'])
+
+ self.assertIn('context_handle', json_reg)
+ json_context = json_reg['context_handle']
+ self.assertEqual(json_context['uuid'], str(reg['context'].uuid))
+ self.assertEqual(json_context['handle_type'], reg['context'].handle_type)
+ self.assertEqual(len(json_context.keys()), 2)
+
+ self.assertIn('server_id', json_reg)
+ json_server_id = json_reg['server_id']
+ self.assertIn('pid', json_server_id)
+ self.assertIn('task_id', json_server_id)
+ self.assertEqual(json_server_id['vnn'], reg['node_idx'])
+ self.assertIn('unique_id', json_server_id)
+ self.assertEqual(len(json_server_id.keys()), 4)
+
+ self.assertIn('auth', json_reg)
+ json_auth = json_reg['auth']
+ self.assertEqual(json_auth['account_name'], self.remote_user)
+ self.assertEqual(json_auth['domain_name'], self.remote_domain)
+ self.assertIn('account_sid', json_auth)
+ self.assertEqual(len(json_auth.keys()), 3)
+
+ self.assertIn('connection', json_reg)
+ json_conn = json_reg['connection']
+ self.assertIn('local_address', json_conn)
+ self.assertIn(reg['conn_ip'], json_conn['local_address'])
+ self.assertIn('remote_address', json_conn)
+ self.assertEqual(len(json_conn.keys()), 2)
+
+ self.assertIn('registration_time', json_reg)
+
+ self.assertEqual(len(json_reg.keys()), 12)
+
+ def max_common_prefix(self, strings):
+ if len(strings) == 0:
+ return ""
+
+ def string_match_len(s1, s2):
+ idx = 0
+ for i in range(0, min(len(s1), len(s2))):
+ c1 = s1[i:i+1]
+ c2 = s2[i:i+1]
+ if c1 != c2:
+ break
+ idx = i
+ return idx
+
+ prefix = None
+ for s in strings:
+ if prefix is None:
+ prefix = s
+ continue
+ l = string_match_len(prefix, s)
+ prefix = prefix[0:l+1]
+
+ return prefix
+
+ def check_net_witness_output(self,
+ cmd,
+ regs,
+ apply_to_all=False,
+ registration_idx=None,
+ net_name=None,
+ share_name=None,
+ ip_address=None,
+ client_computer=None,
+ new_ip=None,
+ new_node=None,
+ forced_response=None,
+ expected_msg_type=None,
+ callback=None):
+ self.open_all_registrations()
+ if registration_idx is not None:
+ registration = regs[registration_idx]['context']
+ self.assertIsNotNone(registration)
+ else:
+ registration = None
+
+ plain_res = self.call_net_witness_subcmd(cmd,
+ apply_to_all=apply_to_all,
+ registration=registration,
+ net_name=net_name,
+ share_name=share_name,
+ ip_address=ip_address,
+ client_computer=client_computer,
+ new_ip=new_ip,
+ new_node=new_node,
+ forced_response=forced_response)
+ if self.verbose:
+ print("%s" % plain_res)
+ plain_lines = plain_res.splitlines()
+
+ num_headlines = 2
+ if expected_msg_type:
+ num_headlines += 1
+ self.assertEqual(len(plain_lines), num_headlines+len(regs))
+ if expected_msg_type:
+ self.assertIn(expected_msg_type, plain_lines[0])
+ plain_lines = plain_lines[num_headlines:]
+ self.assertEqual(len(plain_lines), len(regs))
+
+ for reg in regs:
+ reg_uuid = reg['context'].uuid
+
+ expected_line = "%-36s " % reg_uuid
+ expected_line += "%-20s " % reg['net_name']
+ if reg['share_name']:
+ expected_share = reg['share_name']
+ else:
+ expected_share = "''"
+ expected_line += "%-15s " % expected_share
+ expected_line += "%-20s " % reg['ip_address']
+ expected_line += "%s" % reg['computer_name']
+
+ line = None
+ for l in plain_lines:
+ if not l.startswith(str(reg_uuid)):
+ continue
+ self.assertIsNone(line)
+ line = l
+ self.assertEqual(line, expected_line)
+ self.assertIsNotNone(line)
+
+ if callback is not None:
+ callback(reg)
+
+ self.close_all_registrations()
+
+ self.open_all_registrations()
+ if registration_idx is not None:
+ registration = regs[registration_idx]['context']
+ self.assertIsNotNone(registration)
+ else:
+ registration = None
+
+ json_res = self.call_net_witness_subcmd(cmd,
+ as_json=True,
+ apply_to_all=apply_to_all,
+ registration=registration,
+ net_name=net_name,
+ share_name=share_name,
+ ip_address=ip_address,
+ client_computer=client_computer,
+ new_ip=new_ip,
+ new_node=new_node,
+ forced_response=forced_response)
+
+ num_filters = 0
+ if apply_to_all:
+ num_filters += 1
+ if registration:
+ num_filters += 1
+ if net_name:
+ num_filters += 1
+ if share_name:
+ num_filters += 1
+ if ip_address:
+ num_filters += 1
+ if client_computer:
+ num_filters += 1
+
+ num_toplevel = 2
+ if expected_msg_type:
+ num_toplevel += 1
+
+ self.assertIn('filters', json_res);
+ if expected_msg_type:
+ self.assertIn('message', json_res);
+ self.assertIn('registrations', json_res);
+ self.assertEqual(len(json_res.keys()), num_toplevel)
+
+ json_filters = json_res['filters']
+ self.assertEqual(len(json_filters.keys()), num_filters)
+
+ if apply_to_all:
+ self.assertTrue(json_filters['--witness-apply-to-all'])
+
+ if registration:
+ self.assertEqual(json_filters['--witness-registration'],
+ str(registration.uuid))
+ if net_name:
+ self.assertEqual(json_filters['--witness-net-name'],
+ net_name)
+ if share_name:
+ self.assertEqual(json_filters['--witness-share-name'],
+ share_name)
+ if ip_address:
+ self.assertEqual(json_filters['--witness-ip-address'],
+ ip_address)
+ if client_computer:
+ self.assertEqual(json_filters['--witness-client-computer-name'],
+ client_computer)
+ if expected_msg_type:
+ json_message = json_res['message']
+ num_sub = 1
+ self.assertEqual(json_message['type'], expected_msg_type);
+
+ if new_ip is not None:
+ num_sub += 1
+ self.assertEqual(json_message['new_ip'], new_ip);
+ elif new_node == -1:
+ num_sub += 1
+ self.assertTrue(json_message['all_nodes'])
+ elif new_node is not None:
+ num_sub += 1
+ self.assertEqual(json_message['new_node'], new_node)
+ if forced_response is not None:
+ num_sub += 1
+ forced_response_json = json.loads(str(forced_response))
+ self.assertDictEqual(json_message['json'], forced_response_json)
+
+ self.assertEqual(len(json_message.keys()), num_sub)
+
+ json_regs = json_res['registrations']
+ self.assertEqual(len(json_regs.keys()), len(regs))
+
+ for reg in regs:
+ reg_uuid = reg['context'].uuid
+
+ self.assertIn(str(reg_uuid), json_regs)
+ json_reg = json_regs[str(reg_uuid)]
+ self.assertJsonReg(json_reg, reg)
+
+ if callback is not None:
+ callback(reg)
+
+ self.close_all_registrations()
+
+ def check_combinations(self, check_func, only_shares=False):
+ all_regs = self.prepare_all_registrations()
+
+ share_name_regs = {}
+ all_share_name_regs = []
+ no_share_name_regs = []
+ for reg in all_regs:
+ if reg['share_name'] is not None:
+ if reg['share_name'] not in share_name_regs:
+ share_name_regs[reg['share_name']] = []
+ share_name_regs[reg['share_name']].append(reg)
+ all_share_name_regs.append(reg)
+ else:
+ no_share_name_regs.append(reg)
+
+ if only_shares:
+ all_regs = all_share_name_regs
+ no_share_name_regs = []
+
+ ip_address_regs = {}
+ computer_name_regs = {}
+ for reg in all_regs:
+ if reg['ip_address'] not in ip_address_regs:
+ ip_address_regs[reg['ip_address']] = []
+ ip_address_regs[reg['ip_address']].append(reg)
+
+ if reg['computer_name'] not in computer_name_regs:
+ computer_name_regs[reg['computer_name']] = []
+ computer_name_regs[reg['computer_name']].append(reg)
+
+ all_share_names = '|'.join(share_name_regs.keys())
+ common_share_name = self.max_common_prefix(share_name_regs.keys())
+ all_ip_addresses = '|'.join(ip_address_regs.keys())
+ common_ip_address = self.max_common_prefix(ip_address_regs.keys())
+ all_computer_names = '|'.join(computer_name_regs.keys())
+ common_computer_name = self.max_common_prefix(computer_name_regs.keys())
+
+ check_func(all_regs,
+ apply_to_all=True)
+ check_func(all_regs,
+ net_name=self.server_hostname)
+ check_func(all_regs,
+ ip_address=all_ip_addresses)
+ check_func(all_regs,
+ client_computer=all_computer_names)
+ check_func(all_regs,
+ net_name=self.server_hostname,
+ ip_address=all_ip_addresses,
+ client_computer=all_computer_names)
+ check_func(all_regs,
+ net_name='.*',
+ share_name='.*',
+ ip_address='.*',
+ client_computer='.*')
+ check_func(all_regs,
+ share_name='^$|%s.*' % common_share_name,
+ ip_address='%s.*' % common_ip_address,
+ client_computer='%s.*' % common_computer_name)
+ check_func(all_share_name_regs,
+ share_name=all_share_names)
+ check_func(all_share_name_regs,
+ share_name='%s.*' % common_share_name)
+ check_func(no_share_name_regs,
+ share_name='^$')
+
+ for share_name in share_name_regs.keys():
+ regs = share_name_regs[share_name]
+ check_func(regs, share_name=share_name)
+
+ for ip_address in ip_address_regs.keys():
+ regs = ip_address_regs[ip_address]
+ check_func(regs, ip_address=ip_address)
+
+ for computer_name in computer_name_regs.keys():
+ regs = computer_name_regs[computer_name]
+ check_func(regs, client_computer=computer_name)
+
+ for reg in all_regs:
+ regs = [reg]
+ check_func(regs,
+ registration_idx=0)
+ check_func(regs,
+ registration_idx=0,
+ net_name=reg['net_name'],
+ share_name=reg['share_name'],
+ ip_address=reg['ip_address'],
+ client_computer=reg['computer_name'])
+
+ def test_net_witness_list(self):
+ def check_list(regs,
+ apply_to_all=False,
+ registration_idx=None,
+ net_name=None,
+ share_name=None,
+ ip_address=None,
+ client_computer=None):
+ # --witness-apply-to-all is not needed for 'list'
+ apply_to_all = None
+ return self.check_net_witness_output('list',
+ regs,
+ apply_to_all=apply_to_all,
+ registration_idx=registration_idx,
+ net_name=net_name,
+ share_name=share_name,
+ ip_address=ip_address,
+ client_computer=client_computer)
+
+ self.check_combinations(check_list)
+
+ def _test_net_witness_generic_move(self,
+ move_cmd,
+ msg_type_prefix,
+ msg_type):
+ def _check_generic_move(regs,
+ apply_to_all=False,
+ registration_idx=None,
+ net_name=None,
+ share_name=None,
+ ip_address=None,
+ client_computer=None,
+ new_ip=None,
+ new_node=None):
+
+ if new_ip:
+ expected_msg_type = "%s_IPV4" % msg_type_prefix
+ else:
+ expected_msg_type = "%s_NODE" % msg_type_prefix
+
+ expected_ip_list = []
+ if new_ip:
+ ip = { 'ipv4': str(new_ip), }
+ expected_ip_list.append(ip)
+ if new_node == -1:
+ for node_idx in range(0, len(self.nodes)):
+ node = self.nodes[node_idx]
+ ip = { 'ipv4': str(node['ip']), }
+ expected_ip_list.append(ip)
+ elif new_node is not None:
+ node = self.nodes[new_node]
+ ip = { 'ipv4': str(node['ip']), }
+ expected_ip_list.append(ip)
+
+ expected_ip_lists = [expected_ip_list]
+
+ def check_generic_move_response(reg):
+ conn = reg['conn']
+ reg_context = reg['context']
+ response = conn.AsyncNotify(reg_context)
+ self.assertGenericIpLists(response, msg_type, expected_ip_lists)
+
+ return self.check_net_witness_output(move_cmd,
+ regs,
+ apply_to_all=apply_to_all,
+ registration_idx=registration_idx,
+ net_name=net_name,
+ share_name=share_name,
+ ip_address=ip_address,
+ client_computer=client_computer,
+ new_ip=new_ip,
+ new_node=new_node,
+ expected_msg_type=expected_msg_type,
+ callback=check_generic_move_response)
+
+ def check_generic_move(regs,
+ apply_to_all=False,
+ registration_idx=None,
+ net_name=None,
+ share_name=None,
+ ip_address=None,
+ client_computer=None):
+ _check_generic_move(regs,
+ apply_to_all=apply_to_all,
+ registration_idx=registration_idx,
+ net_name=net_name,
+ share_name=share_name,
+ ip_address=ip_address,
+ client_computer=client_computer,
+ new_node=-1)
+
+ for node_idx in range(0, len(self.nodes)):
+ node = self.nodes[node_idx]
+
+ _check_generic_move(regs,
+ apply_to_all=apply_to_all,
+ registration_idx=registration_idx,
+ net_name=net_name,
+ share_name=share_name,
+ ip_address=ip_address,
+ client_computer=client_computer,
+ new_node=node_idx)
+ _check_generic_move(regs,
+ apply_to_all=apply_to_all,
+ registration_idx=registration_idx,
+ net_name=net_name,
+ share_name=share_name,
+ ip_address=ip_address,
+ client_computer=client_computer,
+ new_ip=node['ip'])
+
+ if msg_type == witness.WITNESS_NOTIFY_CLIENT_MOVE:
+ only_shares = False
+ elif msg_type == witness.WITNESS_NOTIFY_SHARE_MOVE:
+ only_shares = True
+
+ self.check_combinations(check_generic_move, only_shares=only_shares)
+
+ def test_net_witness_client_move(self):
+ self._test_net_witness_generic_move('client-move',
+ 'CLIENT_MOVE_TO',
+ witness.WITNESS_NOTIFY_CLIENT_MOVE)
+ def test_net_witness_share_move(self):
+ self._test_net_witness_generic_move('share-move',
+ 'SHARE_MOVE_TO',
+ witness.WITNESS_NOTIFY_SHARE_MOVE)
+
+ def test_net_witness_force_unregister(self):
+ def check_force_unregister(regs,
+ apply_to_all=False,
+ registration_idx=None,
+ net_name=None,
+ share_name=None,
+ ip_address=None,
+ client_computer=None):
+ def check_force_unregister_happened(reg):
+ conn = reg['conn']
+ reg_context = reg['context']
+ self.assertIsNotNone(reg_context)
+ try:
+ conn.UnRegister(reg_context)
+ self.fail()
+ except WERRORError as e:
+ (num, string) = e.args
+ if num != werror.WERR_NOT_FOUND:
+ raise
+ reg['context'] = None
+
+ return self.check_net_witness_output("force-unregister",
+ regs,
+ apply_to_all=apply_to_all,
+ registration_idx=registration_idx,
+ net_name=net_name,
+ share_name=share_name,
+ ip_address=ip_address,
+ client_computer=client_computer,
+ expected_msg_type="FORCE_UNREGISTER",
+ callback=check_force_unregister_happened)
+
+ self.check_combinations(check_force_unregister)
+
+ def _test_net_witness_force_response(self,
+ msg_type=None,
+ expected_resource_changes=None,
+ expected_ip_lists=None):
+ def check_force_response(regs,
+ apply_to_all=False,
+ registration_idx=None,
+ net_name=None,
+ share_name=None,
+ ip_address=None,
+ client_computer=None):
+ move_types = [
+ witness.WITNESS_NOTIFY_CLIENT_MOVE,
+ witness.WITNESS_NOTIFY_SHARE_MOVE,
+ witness.WITNESS_NOTIFY_IP_CHANGE,
+ ]
+
+ forced_response = '{ '
+ forced_response += '"result": 0, '
+ forced_response += '"response": { '
+ forced_response += '"type": %u, ' % msg_type
+ forced_response += '"messages": [ '
+ if msg_type == witness.WITNESS_NOTIFY_RESOURCE_CHANGE:
+ prefix_d1 = ""
+ for rc in expected_resource_changes:
+ forced_response += prefix_d1
+ forced_response += '{ '
+ prefix_d2 = ""
+ if 'type' in rc:
+ forced_response += prefix_d2
+ forced_response += '"type": %u ' % rc['type']
+ prefix_d2 = ", "
+ if 'name' in rc:
+ forced_response += prefix_d2
+ forced_response += '"name": "%s" ' % rc['name']
+ prefix_d2 = ", "
+ forced_response += '} '
+ prefix_d1 = ", "
+ if msg_type in move_types:
+ prefix_d1 = ""
+ for ip_list in expected_ip_lists:
+ forced_response += prefix_d1
+ forced_response += '['
+ prefix_d2 = ""
+ for ip in ip_list:
+ forced_response += prefix_d2
+ forced_response += '{ '
+ prefix_d3 = ""
+ if 'flags' in ip:
+ forced_response += prefix_d3
+ forced_response += '"flags": %u' % ip['flags']
+ prefix_d3 = ", "
+ if 'ipv4' in ip:
+ forced_response += prefix_d3
+ forced_response += '"ipv4": "%s" ' % ip['ipv4']
+ prefix_d3 = ", "
+ if 'ipv6' in ip:
+ forced_response += prefix_d3
+ forced_response += '"ipv6": "%s" ' % ip['ipv6']
+ prefix_d3 = ", "
+ forced_response += '}'
+ prefix_d2 = ", "
+ forced_response += ']'
+ prefix_d1 = ", "
+ forced_response += ']'
+ forced_response += '}'
+ forced_response += '}'
+
+ def check_forced_response_result(reg):
+ conn = reg['conn']
+ reg_context = reg['context']
+ response = conn.AsyncNotify(reg_context)
+ if msg_type == witness.WITNESS_NOTIFY_RESOURCE_CHANGE:
+ self.assertResourceChanges(response, expected_resource_changes)
+ if msg_type in move_types:
+ self.assertGenericIpLists(response, msg_type, expected_ip_lists)
+
+ return self.check_net_witness_output("force-response",
+ regs,
+ apply_to_all=apply_to_all,
+ registration_idx=registration_idx,
+ net_name=net_name,
+ share_name=share_name,
+ ip_address=ip_address,
+ client_computer=client_computer,
+ forced_response=forced_response,
+ expected_msg_type="FORCE_RESPONSE",
+ callback=check_forced_response_result)
+
+ self.check_combinations(check_force_response)
+
+ def test_net_witness_force_response_resource_changes(self):
+ msg_type = witness.WITNESS_NOTIFY_RESOURCE_CHANGE
+ expected_resource_changes = [
+ {
+ 'type': witness.WITNESS_RESOURCE_STATE_UNAVAILABLE,
+ 'name': "some-resource-name"
+ },
+ {
+ 'type': witness.WITNESS_RESOURCE_STATE_AVAILABLE,
+ 'name': "other-resource-name"
+ },
+ ]
+ self._test_net_witness_force_response(msg_type=msg_type,
+ expected_resource_changes=expected_resource_changes)
+
+ def _test_net_witness_force_response_generic_moves(self, msg_type):
+ expected_flags = 0
+ expected_flags |= witness.WITNESS_IPADDR_V4
+ expected_flags |= witness.WITNESS_IPADDR_ONLINE
+
+ expected_ip_list10 = [
+ {
+ 'flags': expected_flags,
+ 'ipv4': '10.0.10.1',
+ },
+ {
+ 'flags': 0,
+ 'ipv4': '10.0.10.2',
+ 'ipv6': 'fd00:0000:0000:0000:0010:0000:0010:0002',
+ },
+ ]
+ expected_ip_list20 = [
+ {
+ 'flags': expected_flags,
+ 'ipv4': '10.0.20.1',
+ },
+ {
+ 'flags': 0,
+ 'ipv4': '10.0.20.2',
+ 'ipv6': 'fd00:0000:0000:0000:0010:0000:0020:0002',
+ },
+ ]
+
+ expected_ip_lists = [expected_ip_list10, expected_ip_list20]
+ self._test_net_witness_force_response(msg_type=msg_type,
+ expected_ip_lists=expected_ip_lists)
+
+ def test_net_witness_force_response_client_moves(self):
+ msg_type = witness.WITNESS_NOTIFY_CLIENT_MOVE
+ self._test_net_witness_force_response_generic_moves(msg_type)
+
+ def test_net_witness_force_response_share_moves(self):
+ msg_type = witness.WITNESS_NOTIFY_SHARE_MOVE
+ self._test_net_witness_force_response_generic_moves(msg_type)
+
+ def test_net_witness_force_response_ip_changes(self):
+ msg_type = witness.WITNESS_NOTIFY_IP_CHANGE
+ self._test_net_witness_force_response_generic_moves(msg_type)
+
+if __name__ == "__main__":
+ import unittest
+ unittest.main()
diff --git a/python/samba/tests/blackbox/samba_dnsupdate.py b/python/samba/tests/blackbox/samba_dnsupdate.py
new file mode 100644
index 0000000..e326fbb
--- /dev/null
+++ b/python/samba/tests/blackbox/samba_dnsupdate.py
@@ -0,0 +1,125 @@
+# Blackbox tests for "samba_dnsupdate" command
+# Copyright (C) Kamen Mazdrashki <kamenim@samba.org> 2011
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2015
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import samba.tests
+from io import StringIO
+from samba.common import get_string
+from samba.netcmd.main import samba_tool
+from samba.credentials import Credentials
+from samba.auth import system_session
+from samba.samdb import SamDB
+import ldb
+import shutil
+
+
+class SambaDnsUpdateTests(samba.tests.BlackboxTestCase):
+ """Blackbox test case for samba_dnsupdate."""
+
+ def setUp(self):
+ self.server_ip = samba.tests.env_get_var_value("DNS_SERVER_IP")
+ super().setUp()
+ try:
+ out = self.check_output("samba_dnsupdate --verbose")
+ self.assertTrue(b"Looking for DNS entry" in out, out)
+ except samba.tests.BlackboxProcessError:
+ pass
+
+ def test_samba_dnsupate_no_change(self):
+ try:
+ out = self.check_output("samba_dnsupdate --verbose")
+ except samba.tests.BlackboxProcessError as e:
+ self.fail("Error calling samba_dnsupdate: %s" % e)
+ self.assertTrue(b"No DNS updates needed" in out, out)
+
+ def test_samba_dnsupate_set_ip(self):
+ try:
+ out = self.check_output("samba_dnsupdate --verbose --current-ip=10.0.0.1")
+ self.assertTrue(b" DNS updates and" in out, out)
+ self.assertTrue(b" DNS deletes needed" in out, out)
+ except samba.tests.BlackboxProcessError:
+ pass
+
+ try:
+ out = self.check_output("samba_dnsupdate --verbose --use-nsupdate --current-ip=10.0.0.1")
+ except samba.tests.BlackboxProcessError as e:
+ self.fail("Error calling samba_dnsupdate: %s" % e)
+
+ self.assertTrue(b"No DNS updates needed" in out, out)
+ try:
+ rpc_out = self.check_output("samba_dnsupdate --verbose --use-samba-tool --rpc-server-ip=%s" % self.server_ip)
+ except samba.tests.BlackboxProcessError as e:
+ self.fail("Error calling samba_dnsupdate: %s" % e)
+
+ self.assertTrue(b" DNS updates and" in rpc_out, rpc_out)
+ self.assertTrue(b" DNS deletes needed" in rpc_out, rpc_out)
+ out = self.check_output("samba_dnsupdate --verbose")
+ self.assertTrue(b"No DNS updates needed" in out, out + rpc_out)
+
+ def test_add_new_uncovered_site(self):
+ site_name = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+
+ # Clear out any existing site
+ result = samba_tool('sites', 'remove', site_name,
+ outf=StringIO(),
+ errf=StringIO())
+
+ result = samba_tool('sites', 'create', site_name,
+ outf=StringIO(),
+ errf=StringIO())
+
+ if result is not None:
+ self.fail("Error creating new site")
+
+ self.lp = samba.tests.env_loadparm()
+ self.creds = Credentials()
+ self.creds.guess(self.lp)
+ self.session = system_session()
+ uc_fn = self.lp.private_path('dns_update_cache')
+ tmp_uc = uc_fn + '_tmp'
+ shutil.copyfile(uc_fn, tmp_uc)
+
+ self.samdb = SamDB(session_info=self.session,
+ credentials=self.creds,
+ lp=self.lp)
+
+ m = ldb.Message()
+ m.dn = ldb.Dn(self.samdb, 'CN=DEFAULTIPSITELINK,CN=IP,'
+ 'CN=Inter-Site Transports,CN=Sites,{0}'.format(
+ self.samdb.get_config_basedn()))
+ m['siteList'] = ldb.MessageElement("CN={0},CN=Sites,{1}".format(
+ site_name,
+ self.samdb.get_config_basedn()),
+ ldb.FLAG_MOD_ADD, "siteList")
+
+ dns_c = "samba_dnsupdate --verbose --use-file={0}".format(tmp_uc)
+ out = get_string(self.check_output(dns_c))
+ self.assertNotIn(site_name.lower(), out)
+
+ self.samdb.modify(m)
+
+ shutil.copyfile(uc_fn, tmp_uc)
+ out = get_string(self.check_output(dns_c))
+
+ self.assertNotIn("No DNS updates needed", out)
+ self.assertIn(site_name.lower(), out)
+
+ result = samba_tool('sites', 'remove', site_name,
+ outf=StringIO(),
+ errf=StringIO())
+ if result is not None:
+ self.fail("Error deleting site")
diff --git a/python/samba/tests/blackbox/smbcacls.py b/python/samba/tests/blackbox/smbcacls.py
new file mode 100644
index 0000000..dd84f52
--- /dev/null
+++ b/python/samba/tests/blackbox/smbcacls.py
@@ -0,0 +1,148 @@
+# Blackbox tests for smbcacls
+#
+# Copyright (C) Noel Power noel.power@suse.com
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+from samba.tests import BlackboxTestCase, BlackboxProcessError
+from samba.samba3 import param as s3param
+
+from samba.credentials import Credentials
+
+import os
+
+class SmbCaclsBlockboxTestBase(BlackboxTestCase):
+
+ def setUp(self):
+ super().setUp()
+ self.lp = s3param.get_context()
+ self.server = os.environ["SERVER"]
+ self.user = os.environ["USER"]
+ self.passwd = os.environ["PASSWORD"]
+ self.creds = Credentials()
+ self.creds.guess(self.lp)
+ self.creds.set_username(self.user)
+ self.creds.set_password(self.passwd)
+ self.testdir = os.getenv("TESTDIR", "smbcacls")
+ self.share = os.getenv("SHARE", "tmp")
+ self.dirpath = os.path.join(os.environ["LOCAL_PATH"],self.testdir)
+
+ def tearDown(self):
+ try:
+ # remote removal doesn't seem to work with dfs share(s)
+ # #TODO find out if this is intentional (it very well might be)
+ # so if we fail with remote remove perform local remove
+ # (of remote files) instead
+ smbclient_args = self.build_test_cmd("smbclient", ["//%s/%s" % (self.server, self.share), "-c", "deltree %s/*" % self.testdir])
+ out = self.check_output(smbclient_args)
+ if "NT_STATUS_OBJECT_PATH_NOT_FOUND" in out.decode():
+ raise Exception("deltree: failed without setting errcode")
+ except Exception as e:
+ print("remote remove failed: %s" % str(e))
+ print("falling back to removing contents of local dir: %s" % self.dirpath)
+ if os.path.exists(self.dirpath):
+ for entry in os.listdir(self.dirpath):
+ fullpath = os.path.join(self.dirpath, entry)
+ if os.path.isdir(fullpath):
+ import shutil
+ shutil.rmtree(fullpath)
+ else:
+ os.unlink(fullpath)
+
+ def ace_dump(self, ace):
+ for key, value in ace.items():
+ print ("%s=%s," % (key, value), end="")
+ print ("")
+
+ def ace_cmp(self, left, right):
+ for key, value in left.items():
+ if key == "user_dom":
+ continue
+ if not key in right:
+ print ("no entry for: %s" % key)
+ return False
+ if value != right[key]:
+ print ("mismatch: %s:%s != %s:%s" % (key, value, key, right[key]))
+ return False
+ return True
+
+ def ace_parse_str(self, ace):
+ parts = ace.split(':')
+ result = {}
+ if parts[0] != "ACL":
+ raise Exception("invalid ace string:%" % ace)
+ if "\\" in parts[1]:
+ result["user_dom"], result["user"] = parts[1].split("\\")
+ elif "/" in parts[1]:
+ result["user_dom"], result["user"] = parts[1].split("/")
+ else:
+ result["user"] = parts[1]
+ result["type"], result["inherit"], result["permissions"] = parts[2].split('/')
+ return result
+
+ def build_test_cmd(self, cmd, args):
+ cmd = [cmd, "-U%s%%%s" % (self.user, self.passwd)]
+ cmd.extend(args)
+ return cmd
+
+ def smb_cacls(self, args):
+ cacls_args = ["//%s/%s" % (self.server, self.share)]
+ cacls_args.extend(args)
+ out = self.check_output(self.build_test_cmd("smbcacls", cacls_args))
+ return out
+
+ def create_remote_test_file(self, remotepath):
+ with self.mktemp() as tmpfile:
+ filepath = os.path.join(self.testdir, remotepath)
+ (dirpath, filename) = os.path.split(remotepath)
+ remote_path = ""
+ if len(dirpath):
+ remote_path = self.testdir.replace("/", "\\", 10)
+ for seg in dirpath.split(os.sep):
+ remote_path = remote_path + "\\" + seg
+ smbclient_args = self.build_test_cmd("smbclient", ["//%s/%s" % (self.server, self.share), "-c", "mkdir %s" % remote_path])
+ self.check_output(smbclient_args)
+ smbclient_args = self.build_test_cmd("smbclient", ["//%s/%s" % (self.server, self.share), "-c", "put %s %s" % (tmpfile, filepath)])
+ out = self.check_output(smbclient_args)
+ return filepath
+
+
+ def file_ace_check(self, remotepath, ace):
+ smbcacls_args = self.build_test_cmd("smbcacls",
+ ["//%s/%s" % (self.server, self.share),
+ remotepath])
+ try:
+ output = self.check_output(smbcacls_args)
+ except BlackboxProcessError as e:
+ print(str(e))
+ return False
+ out_str = output.decode()
+ aces = []
+ for line in out_str.split("\n"):
+ if line.startswith("ACL"):
+ aces.append(line)
+ for acl in aces:
+ acl_ace = self.ace_parse_str(acl)
+ if ace["user"] == acl_ace["user"] and ace["type"] == acl_ace["type"]:
+ print ("found ACE for %s" % acl_ace["user"])
+ if not self.ace_cmp(acl_ace, ace):
+ print ("differences between file ACE: ")
+ self.ace_dump(acl_ace)
+ print ("and expected ACE: ")
+ self.ace_dump(ace)
+ else:
+ print ("matched ACE for %s" % acl_ace["user"])
+ self.ace_dump(ace)
+ return True
+ return False
diff --git a/python/samba/tests/blackbox/smbcacls_basic.py b/python/samba/tests/blackbox/smbcacls_basic.py
new file mode 100644
index 0000000..7660692
--- /dev/null
+++ b/python/samba/tests/blackbox/smbcacls_basic.py
@@ -0,0 +1,129 @@
+# Blackbox tests for smbcacls
+#
+# Copyright (C) Noel Power noel.power@suse.com
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+from samba.tests.blackbox.smbcacls import SmbCaclsBlockboxTestBase
+from samba.tests import BlackboxProcessError
+
+class BasicSmbCaclsTests(SmbCaclsBlockboxTestBase):
+
+ def test_simple_single_set(self):
+ """test smbcacls '--set' attempts to overwrite the ACL for the file
+
+ before:
+
+ +-test_dir/
+ +-file.1 (I)(F)
+
+ after/expected:
+
+ +-test_dir/
+ +-file.1 (F)"""
+
+ file1 = "file-1"
+ try:
+ filepath = self.create_remote_test_file(file1)
+ except BlackboxProcessError as e:
+ self.fail(str(e))
+
+ acl = ("ACL:%s:ALLOWED/0x0/FULL" % self.user)
+ command = "bin/smbcacls -U%s%%%s --set %s //%s/%s %s" % (self.user, self.passwd, acl, self.server, self.share, filepath)
+
+ try:
+ result = self.check_output(command)
+ except BlackboxProcessError as e:
+ self.fail(str(e))
+
+ ace = self.ace_parse_str(acl)
+ self.assertTrue(self.file_ace_check(filepath, ace))
+
+ def test_simple_single_mod(self):
+
+ """test smbcacls '--modify' attempts to modify the ACL for the file
+ (note: first part of the test 'set' ACL to (F) then attempts to modify
+ before:
+
+ +-test_dir/
+ +-file.1 (F)
+
+ after/expected:
+
+ +-test_dir/
+ +-file.1 (READ)"""
+
+ acl_str = "ACL:%s:ALLOWED/0x0/FULL" % self.user
+ try:
+ remotepath = self.create_remote_test_file("file-1")
+
+ self.smb_cacls(["--set", acl_str, remotepath])
+
+ ace = self.ace_parse_str(acl_str)
+ self.assertTrue(self.file_ace_check(remotepath, ace))
+
+ # overwrite existing entry
+ acl_str = "ACL:%s:ALLOWED/0x0/READ" % self.user
+ self.smb_cacls(["--modify", acl_str, remotepath])
+
+ ace = self.ace_parse_str(acl_str)
+ self.assertTrue(self.file_ace_check(remotepath, ace))
+ except BlackboxProcessError as e:
+ self.fail(str(e))
+
+ def test_simple_single_del(self):
+ """test smbcacls '--delete' attempts to delete the ACL for the file
+ (note: first part of the test 'set' ACL to (F) then attempts to delete
+ before:
+
+ +-tar_test_dir/
+ +-file.1 (F)
+
+ after/expected:
+
+ +-tar_test_dir/
+ +-file.1 (none) - meaning no (F) ACL for this user"""
+
+ acl_str = "ACL:%s:ALLOWED/0x0/FULL" % self.user
+
+ try:
+ remotepath = self.create_remote_test_file("file-1")
+
+ # only a single ACE string in the ACL
+ ace = self.ace_parse_str(acl_str)
+ self.assertTrue(self.file_ace_check(remotepath, ace))
+
+ self.smb_cacls(["--delete", acl_str, remotepath])
+ self.assertFalse(self.file_ace_check(remotepath, ace))
+ except BlackboxProcessError as e:
+ self.fail(str(e))
+
+
+ def test_simple_single_add(self):
+ acl_str = "ACL:%s:ALLOWED/0x0/FULL" % self.user
+ dny_str = "ACL:%s:DENIED/0x0/READ" % self.user
+
+ try:
+ remotepath = self.create_remote_test_file("file-1")
+
+ self.smb_cacls(["--set", acl_str, remotepath])
+
+ ace = self.ace_parse_str(acl_str)
+ self.assertTrue(self.file_ace_check(remotepath, ace))
+
+ self.smb_cacls(["--set", dny_str, remotepath])
+ ace = self.ace_parse_str(dny_str)
+ self.assertTrue(self.file_ace_check(remotepath, ace))
+ except BlackboxProcessError as e:
+ self.fail(str(e))
diff --git a/python/samba/tests/blackbox/smbcacls_dfs_propagate_inherit.py b/python/samba/tests/blackbox/smbcacls_dfs_propagate_inherit.py
new file mode 100644
index 0000000..eed96d3
--- /dev/null
+++ b/python/samba/tests/blackbox/smbcacls_dfs_propagate_inherit.py
@@ -0,0 +1,84 @@
+# Blackbox tests for smbcacls
+#
+# Copyright (C) Noel Power noel.power@suse.com
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+from samba.tests import BlackboxProcessError
+import os
+from samba.tests.blackbox.smbcacls_propagate_inhertance import InheritanceSmbCaclsTests
+from samba.tests.blackbox.smbcacls import SmbCaclsBlockboxTestBase
+
+class DfsInheritanceSmbCaclsTests(InheritanceSmbCaclsTests):
+
+ def setUp(self):
+ # This is some intentional trickery to ensure we skip
+ # InheritanceSmbCaclsTests.setUp so we can create a new
+ # test directory & file hierarchy (including a nested dfs link)
+ SmbCaclsBlockboxTestBase.setUp(self)
+ smbclient_args = self.build_test_cmd("smbclient", ["//%s/%s" % (self.server, self.share), "-c", "mkdir %s" % os.getenv("TESTDIR", "smbcacls")])
+ self.check_output(smbclient_args)
+
+ # create toplevel testdir structure with desired ACL(s)
+ #
+ # +-tar_test_dir/ (OI)(CI)(I)(F)
+ # +-oi_dir/ (OI)(CI)(I)(F)
+ # | +-file.1 (I)(F)
+ # | +-nested/ (OI)(CI)(I)(F)
+ # | +-file.2 (I)(F)
+ # DFS=>| +-nested_again/ (OI)(CI)(I)(F)
+ # | +-file.3 (I)(F)
+
+ self.toplevel = self.create_remote_test_file("tar_test_dir/file-0")
+ self.dfs_target_share = os.getenv("DFS_TARGET_SHARE", "smbcacls_sharedir_dfs")
+ self.f1 = self.create_remote_test_file("tar_test_dir/oi_dir/file-1")
+ self.f2 = self.create_remote_test_file("tar_test_dir/oi_dir/nested/file-2")
+# self.f3 = self.create_remote_test_file("tar_test_dir/oi_dir/nested/nested_again/file-3")
+
+
+ self.tar_dir = os.path.split(self.toplevel)[0]
+ self.oi_dir = os.path.split(self.f1)[0]
+ self.nested_dir = os.path.split(self.f2)[0]
+
+ self.nested_again_dir = os.path.join(self.nested_dir, "nested_again")
+
+ # dfs link
+ link_val = "msdfs:%s\\%s" % (self.server, self.dfs_target_share)
+ dfs_share_path = "smbcacls_share"
+ local_link_path = os.path.join(os.environ["LOCAL_PATH"], dfs_share_path)
+ link_source = link_val
+ link_dest = os.path.join(local_link_path, self.nested_again_dir)
+
+
+ # unfortunately os.link won't work with a source file that doesn't
+ # exist, we need to run 'ln' directly
+ #os.link(link_source, link_dest)
+ link_args = ["ln", "-s", link_source, link_dest]
+ out = self.check_output(link_args)
+
+ self.f3 = self.create_remote_test_file("tar_test_dir/oi_dir/nested/nested_again/file-3")
+
+
+
+ dir_acl_str = "ACL:%s:ALLOWED/OI|CI/FULL" % self.user
+ inherited_dir_acl_str = "ACL:%s:ALLOWED/OI|CI|I/FULL" % self.user
+ file_acl_str = "ACL:%s:ALLOWED/I/FULL" % self.user
+
+ self.smb_cacls(["--modify", dir_acl_str, self.tar_dir])
+ self.smb_cacls(["--modify", inherited_dir_acl_str, self.oi_dir])
+ self.smb_cacls(["--modify", inherited_dir_acl_str, self.nested_dir])
+ self.smb_cacls(["--modify", inherited_dir_acl_str, self.nested_again_dir])
+ self.smb_cacls(["--modify", file_acl_str, self.f1])
+ self.smb_cacls(["--modify", file_acl_str, self.f2])
+ self.smb_cacls(["--modify", file_acl_str, self.f3])
diff --git a/python/samba/tests/blackbox/smbcacls_propagate_inhertance.py b/python/samba/tests/blackbox/smbcacls_propagate_inhertance.py
new file mode 100644
index 0000000..cc13727
--- /dev/null
+++ b/python/samba/tests/blackbox/smbcacls_propagate_inhertance.py
@@ -0,0 +1,1290 @@
+# Blackbox tests for smbcacls
+#
+# Copyright (C) Noel Power noel.power@suse.com
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+from samba.tests.blackbox.smbcacls import SmbCaclsBlockboxTestBase
+from samba.tests import BlackboxProcessError
+import os
+
+class InheritanceSmbCaclsTests(SmbCaclsBlockboxTestBase):
+
+ def setUp(self):
+ super().setUp()
+
+ # create toplevel testdir structure with desired ACL(s)
+ #
+ # +-tar_test_dir/ (OI)(CI)(I)(F)
+ # +-oi_dir/ (OI)(CI)(I)(F)
+ # | +-file.1 (I)(F)
+ # | +-nested/ (OI)(CI)(I)(F)
+ # | +-file.2 (I)(F)
+ # | +-nested_again/ (OI)(CI)(I)(F)
+ # | +-file.3 (I)(F)
+
+ self.toplevel = self.create_remote_test_file("tar_test_dir/file-0")
+ self.f1 = self.create_remote_test_file("tar_test_dir/oi_dir/file-1")
+ self.f2 = self.create_remote_test_file("tar_test_dir/oi_dir/nested/file-2")
+ self.f3 = self.create_remote_test_file("tar_test_dir/oi_dir/nested/nested_again/file-3")
+ self.tar_dir = os.path.split(self.toplevel)[0]
+ self.oi_dir = os.path.split(self.f1)[0]
+ self.nested_dir = os.path.split(self.f2)[0]
+ self.nested_again_dir = os.path.split(self.f3)[0]
+
+ dir_acl_str = "ACL:%s:ALLOWED/OI|CI/FULL" % self.user
+ inherited_dir_acl_str = "ACL:%s:ALLOWED/OI|CI|I/FULL" % self.user
+ file_acl_str = "ACL:%s:ALLOWED/I/FULL" % self.user
+
+ self.smb_cacls(["--modify", dir_acl_str, self.tar_dir])
+ self.smb_cacls(["--modify", inherited_dir_acl_str, self.oi_dir])
+ self.smb_cacls(["--modify", inherited_dir_acl_str, self.nested_dir])
+ self.smb_cacls(["--modify", inherited_dir_acl_str, self.nested_again_dir])
+ self.smb_cacls(["--modify", file_acl_str, self.f1])
+ self.smb_cacls(["--modify", file_acl_str, self.f2])
+ self.smb_cacls(["--modify", file_acl_str, self.f3])
+
+ def tearDown(self):
+ # tmp is the default share which has an existing testdir smbcacls
+ # we need to be prepared to deal with a 'custom' share (which also
+ # would have an existing testdir)
+ if self.share != "tmp":
+ self.dirpath = os.path.join(os.environ["LOCAL_PATH"],self.share)
+ self.dirpath = os.path.join(self.dirpath,self.testdir)
+ super().tearDown()
+
+ def test_simple_oi_add(self):
+ """test smbcacls '--propagate-inheritance --add' which attempts to add the ACL
+ for the file and additionally use inheritance rules to propagate appropriate
+ changes to children
+
+ This test adds an ACL with (OI)(READ)
+
+ before:
+
+ +-tar_test_dir/ (OI)(CI)(I)(F)
+ +-oi_dir/ (OI)(CI)(I)(F)
+ | +-file.1 (I)(F)
+ | +-nested/ (OI)(CI)(I)(F)
+ | +-file.2 (I)(F)
+ | +-nested_again/ (OI)(CI)(I)(F)
+ | +-file.3 (I)(F)
+
+ after/expected:
+
+ +-tar_test_dir/ (OI)(CI)(I)(F)
+ +-oi_dir/ (OI)(CI)(I)(F), (OI)(READ)
+ | +-file.1 (I)(F), (I)(READ)
+ | +-nested/ (OI)(CI)(I)(F), (OI)(IO)(I)(READ)
+ | +-file.2 (I)(F), (I)(READ)
+ | +-nested_again/ (OI)(CI)(I)(F), (OI)(IO)(I)(READ)
+ | +-file.3 (I)(F), (I)(READ)"""
+
+ dir_add_acl_str = "ACL:%s:ALLOWED/OI/READ" % self.user
+ obj_inherited_ace_str = "ACL:%s:ALLOWED/I/READ" % self.user
+ dir_inherited_ace_str = "ACL:%s:ALLOWED/OI|IO|I/READ" % self.user
+
+ try:
+
+ self.smb_cacls(["--propagate-inheritance", "--add",
+ dir_add_acl_str, self.oi_dir])
+
+ # check top level container 'oi_dir' has OI/READ
+ dir_ace = self.ace_parse_str(dir_add_acl_str)
+ self.assertTrue(self.file_ace_check(self.oi_dir, dir_ace))
+
+ # file 'oi_dir/file-1' should have inherited I/READ
+ child_file_ace = self.ace_parse_str(obj_inherited_ace_str)
+ self.assertTrue(self.file_ace_check(self.f1, child_file_ace))
+
+ # nested dir 'oi_dir/nested/' should have OI|IO/READ
+ child_dir_ace = self.ace_parse_str(dir_inherited_ace_str)
+ self.assertTrue(self.file_ace_check(self.nested_dir, child_dir_ace))
+
+ # nested file 'oi_dir/nested/file-2' should have inherited I/READ
+ self.assertTrue(self.file_ace_check(self.f2, child_file_ace))
+
+ # nested_again dir 'oi_dir/nested/nested_again' should have OI|IO/READ
+ child_dir_ace = self.ace_parse_str(dir_inherited_ace_str)
+ self.assertTrue(self.file_ace_check(self.nested_again_dir, child_dir_ace))
+ # nested_again file 'oi_dir/nested/nested_again/file-3' should have inherited I/READ
+ self.assertTrue(self.file_ace_check(self.f3, child_file_ace))
+ except BlackboxProcessError as e:
+ self.fail(str(e))
+
+ def test_simple_oi_delete(self):
+ """test smbcacls '--propagate-inheritance --add' which attempts to add the ACL
+ for the file and additionally use inheritance rules to propagate appropriate
+ changes to children
+
+ This test adds an ACL with (OI)(READ)
+
+ before:
+
+ +-tar_test_dir/ (OI)(CI)(I)(F)
+ +-oi_dir/ (OI)(CI)(I)(F), (OI)(IO)(READ)
+ | +-file.1 (I)(F), (I)(READ)
+ | +-nested/ (OI)(CI)(I)(F), (OI)(IO)(I)(READ)
+ | +-file.2 (I)(F), (I)(READ)
+ | +-nested_again/ (OI)(CI)(I)(F)
+ | +-file.3 (I)(F)
+
+ after/expected:
+
+ +-tar_test_dir/ (OI)(CI)(I)(F)
+ +-oi_dir/ (OI)(CI)(I)(F)
+ | +-file.1 (I)(F)
+ | +-nested/ (OI)(CI)(I)(F)
+ | +-file.2 (I)(F)
+ | +-nested_again/ (OI)(CI)(I)(F)
+ | +-file.3 (I)(F)"""
+
+ dir_acl_str = "ACL:%s:ALLOWED/OI/READ" % self.user
+ obj_inherited_ace_str = "ACL:%s:ALLOWED/I/READ" % self.user
+ dir_inherited_ace_str = "ACL:%s:ALLOWED/OI|IO|I/READ" % self.user
+ try:
+
+ # add flags on oi_dir
+ self.smb_cacls([ "--add", dir_acl_str, self.oi_dir])
+
+ # add flags on oi_dir/nested
+ self.smb_cacls([ "--add", dir_inherited_ace_str, self.nested_dir])
+
+ # add flags on oi_dir/nested/nested_again
+ self.smb_cacls([ "--add", dir_inherited_ace_str, self.nested_again_dir])
+
+ # add flags on oi_dir/file-1
+ self.smb_cacls(["--add", obj_inherited_ace_str, self.f1])
+
+ # add flags on oi_dir/nested/file-2
+ self.smb_cacls([ "--add", obj_inherited_ace_str, self.f2])
+
+ # add flags on oi_dir/nested/nested_again/file-3
+ self.smb_cacls([ "--add", obj_inherited_ace_str, self.f3])
+
+ self.smb_cacls(["--propagate-inheritance",
+ "--delete", dir_acl_str, self.oi_dir])
+
+ # check top level container 'oi_dir' no longer has OI/READ
+ dir_ace = self.ace_parse_str(dir_acl_str)
+ self.assertTrue(self.file_ace_check(self.oi_dir, dir_ace) == False)
+
+ # file 'oi_dir/file-1' should no longer have inherited I/READ
+ child_file_ace = self.ace_parse_str(obj_inherited_ace_str)
+ self.assertTrue(self.file_ace_check(self.f1, child_file_ace) == False)
+
+ # nested dir 'oi_dir/nested/' should no longer have OI|IO/READ
+ child_dir_ace = self.ace_parse_str(dir_inherited_ace_str)
+ self.assertTrue(self.file_ace_check(self.nested_dir, child_dir_ace) == False)
+
+ # nested file 'oi_dir/nested/file-2' should no longer have inherited I/READ
+ self.assertTrue(self.file_ace_check(self.f2, child_file_ace) == False)
+
+ # nested dir 'oi_dir/nested/nested_agin' should no longer have OI|IO/READ
+ child_dir_ace = self.ace_parse_str(dir_inherited_ace_str)
+ self.assertTrue(self.file_ace_check(self.nested_again_dir, child_dir_ace) == False)
+
+ # nested file 'oi_dir/nested/nested_again/file-3' should no longer have inherited I/READ
+ self.assertTrue(self.file_ace_check(self.f3, child_file_ace) == False)
+
+ except BlackboxProcessError as e:
+ self.fail(str(e))
+
+ def test_simple_oi_modify(self):
+ """test smbcacls '--propagate-inheritance --modify' which attempts to modify ACL
+ for the file and additionally use inheritance rules to propagate appropriate
+ changes to children
+
+ This test first adds an ACL with (OI)(R), then it modifies that acl to be
+ (OI)(D) - where D == 0x00110000
+
+ before:
+
+ +-tar_test_dir/ (OI)(CI)(I)(F)
+ +-oi_dir/ (OI)(IO)(R)
+ | +-file.1 (I)(R)
+ | +-nested/ (OI)(IO)(I)(R)
+ | +-file.2 (I)(R)
+ | +-nested_again/ (OI)(IO)(I)(R)
+ | +-file.3 (I)(R)
+
+ after/expected:
+
+ +-tar_test_dir/ (OI)(CI)(I)(F)
+ +-oi_dir/ (OI)(IO)(CHANGE)
+ | +-file.1 (I)(CHANGED)
+ | +-nested/ (OI)(IO)(I)(CHANGED)
+ | +-file.2 (I)(CHANGED)
+ | +-nested_again/ (OI)(IO)(I)(CHANGE)
+ | +-file.3 (I)(CHANGE)"""
+
+ explict_access_ace_str = "ACL:%s:ALLOWED/0x0/RWD" % self.user
+ dir_mod_acl_str = "ACL:%s:ALLOWED/OI/CHANGE" % self.user
+ file_mod_inherited_ace_str = "ACL:%s:ALLOWED/I/CHANGE" % self.user
+ dir_mod_inherited_ace_str = "ACL:%s:ALLOWED/OI|IO|I/CHANGE" % self.user
+
+ try:
+ # add flags on oi_dir
+
+ # This is somewhat artificial, we need to add a new acl to the directory
+ # so that the following modify operation doesn't fail. Previously
+ # '--modify' was used in place of '--add' but that resulted in failure
+ # to access the directory ( or even modify the acl ).
+ # Note: when running this test against a windows server it seems that
+ # running as Administrator ensures best results
+
+ # add flags on oi_dir/oi_dir
+ self.smb_cacls(["--add", explict_access_ace_str, self.oi_dir])
+
+ # add flags on oi_dir/nested
+ self.smb_cacls(["--add", explict_access_ace_str, self.nested_dir])
+
+ # add flags on oi_dir/nested/nested_again
+ self.smb_cacls(["--add", explict_access_ace_str, self.nested_again_dir])
+
+ # add flags on oi_dir/file-1
+ self.smb_cacls([ "--add", explict_access_ace_str, self.f1])
+
+ # add flags on oi_dir/nested/file-2
+ self.smb_cacls(["--add", explict_access_ace_str, self.f2])
+
+ # add flags on oi_dir/nested/nested_again/file-3
+ self.smb_cacls(["--add", explict_access_ace_str, self.f3])
+
+ self.smb_cacls(["--propagate-inheritance", "--modify",
+ dir_mod_acl_str, self.oi_dir])
+
+
+ # check top level container 'oi_dir' has OI/CHANGE
+ dir_ace = self.ace_parse_str(dir_mod_acl_str)
+ self.assertTrue(self.file_ace_check(self.oi_dir, dir_ace))
+
+ # file 'oi_dir/file-1' should have inherited I/CHANGE
+ child_file_ace = self.ace_parse_str(file_mod_inherited_ace_str)
+ self.assertTrue(self.file_ace_check(self.f1, child_file_ace))
+
+ # nested dir 'oi_dir/nested/' should have OI|IO/CHANGE
+ child_dir_ace = self.ace_parse_str(dir_mod_inherited_ace_str)
+ self.assertTrue(self.file_ace_check(self.nested_dir, child_dir_ace))
+
+ # nested file 'oi_dir/nested/file-2' should have inherited I/CHANGE
+ self.assertTrue(self.file_ace_check(self.f2, child_file_ace))
+
+ # nested dir 'oi_dir/nested/nested_again' should have OI|IO/CHANGE
+ child_dir_ace = self.ace_parse_str(dir_mod_inherited_ace_str)
+ self.assertTrue(self.file_ace_check(self.nested_again_dir, child_dir_ace))
+
+ # nested file 'oi_dir/nested/nested_agsin/file-3' should have inherited I/CHANGE
+ self.assertTrue(self.file_ace_check(self.f3, child_file_ace))
+
+ except BlackboxProcessError as e:
+ self.fail(str(e))
+
+ def test_simple_ci_add(self):
+ """test smbcacls '--propagate-inheritance --add' which attempts to add the ACL
+ for the file and additionally use inheritance rules to propagate appropriate
+ changes to children
+
+ This test adds an ACL with (CI)(READ)
+
+ before:
+
+ +-tar_test_dir/ (OI)(CI)(I)(F)
+ +-oi_dir/ (OI)(CI)(I)(F)
+ | +-file.1 (I)(F)
+ | +-nested/ (OI)(CI)(I)(F)
+ | +-file.2 (I)(F)
+ | +-nested_again/ (OI)(CI)(I)(F)
+ | +-file.3 (I)(F)
+
+ after/expected:
+
+ +-tar_test_dir/ (OI)(CI)(I)(F)
+ +-oi_dir/ (OI)(CI)(I)(F), (CI)(READ)
+ | +-file.1 (I)(F)
+ | +-nested/ (OI)(CI)(I)(F), (CI)((I)(READ)
+ | +-file.2 (I)(F)
+ | +-nested_again/ (OI)(CI)(I)(F), (CI)((I)(READ)
+ | +-file.3 (I)(F)"""
+ try:
+ dir_add_acl_str = "ACL:%s:ALLOWED/CI/READ" % self.user
+ file_inherited_ace_str = "ACL:%s:ALLOWED/I/READ" % self.user
+ dir_inherited_ace_str = "ACL:%s:ALLOWED/CI|I/READ" % self.user
+
+ self.smb_cacls(["--propagate-inheritance", "--add",
+ dir_add_acl_str, self.oi_dir])
+
+ # check top level container 'oi_dir' has CI/READ
+ dir_ace = self.ace_parse_str(dir_add_acl_str)
+ self.assertTrue(self.file_ace_check(self.oi_dir, dir_ace))
+
+ # nested file 'oi_dir/file-1' should NOT have inherited I/READ
+ child_file_ace = self.ace_parse_str(file_inherited_ace_str)
+ self.assertTrue(self.file_ace_check(self.f1, child_file_ace) == False)
+
+ # nested dir 'oi_dir/nested/' should have CI|I|READ
+ child_dir_ace = self.ace_parse_str(dir_inherited_ace_str)
+ self.assertTrue(self.file_ace_check(self.nested_dir, child_dir_ace))
+
+ # nested file 'oi_dir/nested/file-2' should NOT have inherited I/READ
+ child_file_ace = self.ace_parse_str(file_inherited_ace_str)
+ self.assertTrue(self.file_ace_check(self.f2, child_file_ace) == False)
+
+ # nested dir 'oi_dir/nested/nested_again' should have CI|I|READ
+ child_dir_ace = self.ace_parse_str(dir_inherited_ace_str)
+ self.assertTrue(self.file_ace_check(self.nested_again_dir, child_dir_ace))
+
+ # nested file 'oi_dir/nested/nested_again/file-3' should NOT have inherited I/READ
+ child_file_ace = self.ace_parse_str(file_inherited_ace_str)
+ self.assertTrue(self.file_ace_check(self.f3, child_file_ace) == False)
+
+ except BlackboxProcessError as e:
+ self.fail(str(e))
+
+ def test_simple_ci_delete(self):
+ """test smbcacls '--propagate-inheritance --add' which attempts to add the ACL
+ for the file and additionally use inheritance rules to propagate appropriate
+ changes to children
+
+ This test delete an ACL with (CI)(READ)
+
+ before:
+
+ +-tar_test_dir/ (OI)(CI)(I)(F)
+ +-oi_dir/ (OI)(CI)(I)(F), (CI)(READ)
+ | +-file.1 (I)(F)
+ | +-nested/ (OI)(CI)(I)(F), (CI)((I)(READ)
+ | +-file.2 (I)(F)
+ | +-nested_again/ (OI)(CI)(I)(F), (CI)((I)(READ)
+ | +-file.3 (I)(F)
+
+ after/expected:
+
+ +-tar_test_dir/ (OI)(CI)(I)(F)
+ +-oi_dir/ (OI)(CI)(I)(F)
+ | +-file.1 (I)(F)
+ | +-nested/ (OI)(CI)(I)(F)
+ | +-file.2 (I)(F)
+ | +-nested_again/ (OI)(CI)(I)(F)
+ | +-file.3 (I)(F)"""
+
+ dir_acl_str = "ACL:%s:ALLOWED/CI/READ" % self.user
+ file_inherited_ace_str = "ACL:%s:ALLOWED/I/READ" % self.user
+ dir_inherited_ace_str = "ACL:%s:ALLOWED/CI|I/READ" % self.user
+ try:
+
+ # add flags on oi_dir
+ self.smb_cacls(["--add", dir_acl_str, self.oi_dir])
+
+ # add flags on oi_dir/nested
+ self.smb_cacls(["--add", dir_inherited_ace_str, self.nested_dir])
+
+ # add flags on oi_dir/nested/nested_again
+ self.smb_cacls(["--add", dir_inherited_ace_str, self.nested_dir])
+
+ # make sure no (I|READ) flags on oi_dir/file-1
+ self.smb_cacls(["--delete", file_inherited_ace_str, self.f1])
+
+ # make sure no (I|READ) flags on oi_dir/nested/file-2
+ self.smb_cacls(["--delete", file_inherited_ace_str, self.f2])
+
+ # make sure no (I|READ) flags on oi_dir/nested/nested_again/file-3
+ self.smb_cacls(["--delete", file_inherited_ace_str, self.f2])
+
+ self.smb_cacls(["--propagate-inheritance",
+ "--delete",
+ dir_acl_str, self.oi_dir])
+
+ # check top level container 'oi_dir' no longer has CI/READ
+ dir_ace = self.ace_parse_str(dir_acl_str)
+ self.assertTrue(self.file_ace_check(self.oi_dir, dir_ace) == False)
+
+ child_file_ace = self.ace_parse_str(file_inherited_ace_str)
+ # nested file 'oi_dir/file-1' should NOT have inherited I/READ
+ self.assertTrue(self.file_ace_check(self.f1, child_file_ace) == False)
+
+ child_dir_ace = self.ace_parse_str(dir_inherited_ace_str)
+ # nested dir 'oi_dir/nested/' should no longer have CI|I|READ
+ self.assertTrue(self.file_ace_check(self.nested_dir, child_dir_ace) == False)
+
+ # nested dir 'oi_dir/nested/nested_again' should no longer have CI|I|READ
+ self.assertTrue(self.file_ace_check(self.nested_again_dir, child_dir_ace) == False)
+
+ except BlackboxProcessError as e:
+ self.fail(str(e))
+
+ def test_simple_ci_modify(self):
+ """test smbcacls '--propagate-inheritance --modify' which attempts to modify ACL
+ for the file and additionally use inheritance rules to propagate appropriate
+ changes to children
+
+ This test first adds an ACL with (CI)(R), then it modifies that acl to be
+ (CI)(D) - where D == 0x00110000
+
+ before:
+
+ +-tar_test_dir/ (OI)(CI)(I)(F)
+ +-oi_dir/ (CI)(R)
+ | +-file.1 (I)(F)
+ | +-nested/ (CI)(I)(R)
+ | +-file.2 (I)(F)
+ | +-nested_again/ (CI)(I)(R)
+ | +-file.3 (I)(F)
+
+
+ after/expected:
+
+ +-tar_test_dir/ (OI)(CI)(I)(F)
+ +-oi_dir/ (CI)(CHANGE)
+ | +-file.1 (I)(F)
+ | +-nested/ (CI)(I)(CHANGE)
+ | +-file.2 (I)(F)
+ | +-nested_again/ (CI)(I)(CHANGE)
+ | +-file.3 (I)(F)"""
+
+ dir_acl_str = "ACL:%s:ALLOWED/CI/READ" % self.user
+ file_inherited_ace_str = "ACL:%s:ALLOWED/I/READ" % self.user
+ dir_inherited_ace_str = "ACL:%s:ALLOWED/CI|I/READ" % self.user
+ dir_mod_acl_str = "ACL:%s:ALLOWED/CI/CHANGE" % self.user
+ file_mod_inherited_ace_str = "ACL:%s:ALLOWED/I/CHANGE" % self.user
+ dir_mod_inherited_ace_str = "ACL:%s:ALLOWED/CI|I/CHANGE" % self.user
+ delete_ace_str = "ACL:%s:ALLOWED/0x0/RWD" % self.user
+
+ try:
+ # This is somewhat artificial, we need to add a new acl to the
+ # directory so that the following modify operation doesn't fail.
+ # Previously '--modify' was used in place of '--add' but that
+ # resulted in failure to access the directory ( or even modify
+ # the acl ).
+ # Note: when running this test against a windows server it seems
+ # that running as Administrator ensures best results
+ self.smb_cacls(["--add", dir_acl_str, self.oi_dir])
+
+ # add flags on oi_dir/nested
+ self.smb_cacls(["--add", dir_inherited_ace_str, self.nested_dir])
+
+ # add flags on oi_dir/nested/nested_again
+ self.smb_cacls(["--add", dir_inherited_ace_str, self.nested_again_dir])
+
+ self.smb_cacls(["--propagate-inheritance", "--modify",
+ dir_mod_acl_str, self.oi_dir])
+
+ # check top level container 'oi_dir' has CI/CHANGE
+ dir_ace = self.ace_parse_str(dir_mod_acl_str)
+ self.assertTrue(self.file_ace_check(self.oi_dir, dir_ace))
+
+ # nested file 'oi_dir/file-1' should NOT have inherited I/CHANGE
+ child_file_ace = self.ace_parse_str(file_mod_inherited_ace_str)
+ self.assertTrue(self.file_ace_check(self.f1, child_file_ace) == False)
+
+ # nested dir 'oi_dir/nested/' should have OI|I/CHANGE
+ child_dir_ace = self.ace_parse_str(dir_mod_inherited_ace_str)
+ self.assertTrue(self.file_ace_check(self.nested_dir, child_dir_ace))
+
+ # nested file 'oi_dir/nested/file-2' should NOT have inherited I/CHANGE
+ child_file_ace = self.ace_parse_str(file_mod_inherited_ace_str)
+ self.assertTrue(self.file_ace_check(self.f2, child_file_ace) == False)
+
+ # nested dir 'oi_dir/nested/nested_again' should have OI|I/CHANGE
+ child_dir_ace = self.ace_parse_str(dir_mod_inherited_ace_str)
+ self.assertTrue(self.file_ace_check(self.nested_again_dir, child_dir_ace))
+
+ # nested file 'oi_dir/nested/nested_again/file-3' should NOT have inherited I/CHANGE
+ child_file_ace = self.ace_parse_str(file_mod_inherited_ace_str)
+ self.assertTrue(self.file_ace_check(self.f3, child_file_ace) == False)
+
+ # set some flags to allow us to delete the files
+ self.smb_cacls(["--set", delete_ace_str, self.f1])
+ self.smb_cacls(["--set", delete_ace_str, self.f2])
+ self.smb_cacls(["--set", delete_ace_str, self.f3])
+
+ except BlackboxProcessError as e:
+ self.fail(str(e))
+
+ def test_simple_cioi_add(self):
+ """test smbcacls '--propagate-inheritance --add' which attempts to add the ACL
+ for the file and additionally use inheritance rules to propagate appropriate
+ changes to children
+
+ This test adds an ACL with (CI)(OI)(READ)
+
+ before:
+
+ +-tar_test_dir/ (OI)(CI)(I)(F)
+ +-oi_dir/ (OI)(CI)(I)(F)
+ | +-file.1 (I)(F)
+ | +-nested/ (OI)(CI)(I)(F)
+ | +-file.2 (I)(F)
+ | +-nested_again/ (OI)(CI)(I)(F)
+ | +-file.3 (I)(F)
+
+ after/expected:
+
+ +-tar_test_dir/ (OI)(CI)(I)(F)
+ +-oi_dir/ (OI)(CI)(I)(F), (CI)(OI)READ)
+ | +-file.1 (I)(F), (I)(READ)
+ | +-nested/ (OI)(CI)(I)(F), (CI)(OI)(I)(READ)
+ | +-file.2 (I)(F), (I)(READ)
+ | +-nested_again/ (OI)(CI)(I)(F), (CI)(OI)(I)(READ)
+ | +-file.3 (I)(F), (I)(READ)"""
+
+ dir_add_acl_str = "ACL:%s:ALLOWED/OI|CI/READ" % self.user
+ file_inherited_ace_str = "ACL:%s:ALLOWED/I/READ" % self.user
+ dir_inherited_ace_str = "ACL:%s:ALLOWED/OI|CI|I/READ" % self.user
+
+ try:
+
+ self.smb_cacls(["--propagate-inheritance", "--add",
+ dir_add_acl_str, self.oi_dir])
+
+ # check top level container 'oi_dir' has OI|CI/READ
+ dir_ace = self.ace_parse_str(dir_add_acl_str)
+ self.assertTrue(self.file_ace_check(self.oi_dir, dir_ace))
+
+ # nested file 'oi_dir/file-1' should have inherited I/READ
+ child_file_ace = self.ace_parse_str(file_inherited_ace_str)
+ self.assertTrue(self.file_ace_check(self.f1, child_file_ace))
+
+ # nested dir 'oi_dir/nested/' should have OI|CI|I|READ
+ child_dir_ace = self.ace_parse_str(dir_inherited_ace_str)
+ self.assertTrue(self.file_ace_check(self.nested_dir, child_dir_ace))
+
+ # nested file 'oi_dir/nested/file-2' should have inherited I/READ
+ child_file_ace = self.ace_parse_str(file_inherited_ace_str)
+ self.assertTrue(self.file_ace_check(self.f2, child_file_ace))
+
+ # nested dir 'oi_dir/nested/nested_again' should have OI|CI|I|READ
+ child_dir_ace = self.ace_parse_str(dir_inherited_ace_str)
+ self.assertTrue(self.file_ace_check(self.nested_again_dir, child_dir_ace))
+
+ # nested file 'oi_dir/nested/nested_again/file-3' should have inherited I/READ
+ child_file_ace = self.ace_parse_str(file_inherited_ace_str)
+ self.assertTrue(self.file_ace_check(self.f3, child_file_ace))
+
+ except BlackboxProcessError as e:
+ self.fail(str(e))
+
+ def test_simple_cioi_delete(self):
+ """test smbcacls '--propagate-inheritance --delete' which attempts to delete the
+ ACL for the file and additionally use inheritance rules to propagate
+ appropriate changes to children
+
+ This test deletes an ACL with (CI)(OI)(READ)
+
+ before:
+
+ +-tar_test_dir/ (OI)(CI)(I)(F)
+ +-oi_dir/ (OI)(CI)(I)(F), (CI)(OI)(READ)
+ | +-file.1 (I)(F), (I)(READ)
+ | +-nested/ (OI)(CI)(I)(F), (CI)(OI)(I)(READ)
+ | +-file.2 (I)(F), (I)(READ)
+ | +-nested_again/ (OI)(CI)(I)(F), (CI)(OI)(I)(READ)
+ | +-file.3 (I)(F), (I)(READ)
+
+ after/expected:
+
+ +-tar_test_dir/ (OI)(CI)(I)(F)
+ +-oi_dir/ (OI)(CI)(I)(F)
+ | +-file.1 (I)(F)
+ | +-nested/ (OI)(CI)(I)(F)
+ | +-file.2 (I)(F)
+ | +-nested_again/ (OI)(CI)(I)(F)
+ | +-file.3 (I)(F)"""
+
+
+ dir_acl_str = "ACL:%s:ALLOWED/OI|CI/READ" % self.user
+ file_inherited_ace_str = "ACL:%s:ALLOWED/I/READ" % self.user
+ dir_inherited_ace_str = "ACL:%s:ALLOWED/OI|CI|I/READ" % self.user
+
+ try:
+
+ # add flags on oi_dir
+ self.smb_cacls(["--add", dir_acl_str, self.oi_dir])
+
+ # add flags on oi_dir/nested
+ self.smb_cacls(["--add", dir_inherited_ace_str, self.nested_dir])
+
+ # add flags on oi_dir/file-1
+ self.smb_cacls(["--add", file_inherited_ace_str, self.f1])
+
+ # add flags on oi_dir/nested/file-2
+ self.smb_cacls(["--add", file_inherited_ace_str, self.f2])
+
+ # add flags on oi_dir/nested/nested_again/file-3
+ self.smb_cacls(["--add", file_inherited_ace_str, self.f2])
+
+ self.smb_cacls(["--propagate-inheritance", "--delete",
+ dir_acl_str, self.oi_dir])
+
+ # check top level container 'oi_dir' no longer has OI|CI/READ
+ dir_ace = self.ace_parse_str(dir_acl_str)
+ self.assertTrue(self.file_ace_check(self.oi_dir, dir_ace) == False)
+
+ # nested file 'oi_dir/file-1' should NOT have inherited I/READ
+ child_file_ace = self.ace_parse_str(file_inherited_ace_str)
+ self.assertTrue(self.file_ace_check(self.f1, child_file_ace) == False)
+
+ # nested dir 'oi_dir/nested/' should no longer have OI|CI|I|READ
+ child_dir_ace = self.ace_parse_str(dir_inherited_ace_str)
+ self.assertTrue(self.file_ace_check(self.nested_dir, child_dir_ace) == False)
+ # nested file 'oi_dir/nested/file-2' should NOT have inherited I/READ
+ child_file_ace = self.ace_parse_str(file_inherited_ace_str)
+ self.assertTrue(self.file_ace_check(self.f2, child_file_ace) == False)
+ # nested dir 'oi_dir/nested/nested_again' should no longer have OI|CI|I|READ
+ child_dir_ace = self.ace_parse_str(dir_inherited_ace_str)
+ self.assertTrue(self.file_ace_check(self.nested_again_dir, child_dir_ace) == False)
+ # nested file 'oi_dir/nested/nested_againfile-2' should NOT have inherited I/READ
+ child_file_ace = self.ace_parse_str(file_inherited_ace_str)
+ self.assertTrue(self.file_ace_check(self.f3, child_file_ace) == False)
+ except BlackboxProcessError as e:
+ self.fail(str(e))
+
+ def test_simple_cioi_modify(self):
+ """test smbcacls '--propagate-inheritance --modify' which attempts to modify the
+ ACLfor the file and additionally use inheritance rules to propagate
+ appropriate changes to children
+
+ This test first adds an ACL with (CI)(OI)(R), then it modifies that acl to be
+ (CI)(OI)(D) - where D == 0x00110000
+
+ before:
+
+ +-tar_test_dir/ (OI)(CI)(I)(F)
+ +-oi_dir/ (CI)(OI)(R)
+ | +-file.1 (I)(R)
+ | +-nested/ (CI)(OI)(I)(R)
+ | +-file.2 (I)(R)
+ | +-nested_again/ (CI)(OI)(I)(R)
+ | +-file.3 (I)(R)
+
+ after/expected:
+
+ +-tar_test_dir/ (OI)(CI)(I)(F)
+ +-oi_dir/ (CI)(OI)(CHANGE)
+ | +-file.1 (I)(CHANGE)
+ | +-nested/ (CI)(OI)(I)(CHANGE)
+ | +-file.2 (I)(CHANGE)
+ | +-nested_again/ (CI)(OI)(I)(CHANGE)
+ | +-file.3 (I)(CHANGE)"""
+
+ dir_acl_str = "ACL:%s:ALLOWED/OI|CI/R" % self.user
+ file_inherited_ace_str = "ACL:%s:ALLOWED/I/R" % self.user
+ dir_inherited_ace_str = "ACL:%s:ALLOWED/OI|CI|I/R" % self.user
+
+ dir_mod_acl_str = "ACL:%s:ALLOWED/OI|CI/CHANGE" % self.user
+ file_mod_inherited_ace_str = "ACL:%s:ALLOWED/I/CHANGE" % self.user
+ dir_mod_inherited_ace_str = "ACL:%s:ALLOWED/OI|CI|I/CHANGE" % self.user
+ try:
+ # add flags on oi_dir
+
+ # This is somewhat artificial, we need to add a new acl to the
+ # directory so that the following modify operation doesn't fail.
+ # Previously '--modify' was used in place of '--add' but that
+ # resulted in failure to access the directory ( or even modify
+ # the acl ). Note: when running this test against a windows server
+ # it seems that running as Administrator ensures best results
+
+ self.smb_cacls(["--add", dir_acl_str, self.oi_dir])
+
+ # add flags on oi_dir/nested
+ self.smb_cacls(["--add", dir_inherited_ace_str, self.nested_dir])
+
+ # add flags on oi_dir/nested/nested_again
+ self.smb_cacls(["--add", dir_inherited_ace_str, self.nested_again_dir])
+
+ # add flags on oi_dir/file-1
+ self.smb_cacls(["--add", file_inherited_ace_str, self.f1])
+
+ # add flags on oi_dir/nested/file-2
+ self.smb_cacls(["--add", file_inherited_ace_str, self.f2])
+
+ # add flags on oi_dir/nested/nested_again/file-2
+ self.smb_cacls(["--add", file_inherited_ace_str, self.f3])
+
+ self.smb_cacls(["--propagate-inheritance", "--modify",
+ dir_mod_acl_str, self.oi_dir])
+
+ # check top level container 'oi_dir' has OI|CI/CHANGE
+ dir_ace = self.ace_parse_str(dir_mod_acl_str)
+ self.assertTrue(self.file_ace_check(self.oi_dir, dir_ace))
+
+ # nested file 'oi_dir/file-1' should have inherited I|CHANGE
+ child_file_ace = self.ace_parse_str(file_mod_inherited_ace_str)
+ self.assertTrue(self.file_ace_check(self.f1, child_file_ace))
+
+ # nested dir 'oi_dir/nested/' should have OI|CI|I|CHANGE
+ child_dir_ace = self.ace_parse_str(dir_mod_inherited_ace_str)
+ self.file_ace_check(self.nested_dir, child_dir_ace)
+
+ # nested file 'oi_dir/nested/file-2' should have inherited I|CHANGE
+ child_file_ace = self.ace_parse_str(file_mod_inherited_ace_str)
+ self.assertTrue(self.file_ace_check(self.f2, child_file_ace))
+
+ # nested dir 'oi_dir/nested/nested_again' should have OI|CI|I|CHANGE
+ child_dir_ace = self.ace_parse_str(dir_mod_inherited_ace_str)
+ self.file_ace_check(self.nested_again_dir, child_dir_ace)
+
+ # nested file 'oi_dir/nested/nested_again/file-3' should have inherited I|CHANGE
+ child_file_ace = self.ace_parse_str(file_mod_inherited_ace_str)
+ self.assertTrue(self.file_ace_check(self.f3, child_file_ace))
+
+ except BlackboxProcessError as e:
+ self.fail(str(e))
+
+ def test_simple_set_fail(self):
+ """test smbcacls '--propagate-inheritance --set' which attempts to set the ACL
+ for the file and additionally use inheritance rules to propagate appropriate
+ changes to children
+
+ This test adds an ACL with (CI)(OI)(READ)
+
+ before:
+
+ +-tar_test_dir/ (OI)(CI)(I)(F)
+ +-oi_dir/ (OI)(CI)(I)(F)
+ | +-file.1 (I)(F)
+ | +-nested/ (OI)(CI)(I)(F)
+ | +-file.2 (I)(F)
+ | +-nested_again/ (OI)(CI)(I)(F)
+ | +-file.3 (I)(F)
+
+ after/expected:
+ fail, oid_dir has inheritance enabled, set should fail and exit with '1'"""
+ dir_acl_str = "ACL:%s:ALLOWED/OI|CI/R" % self.user
+ file_inherited_ace_str = "ACL:%s:ALLOWED/I/R" % self.user
+ dir_inherited_ace_str = "ACL:%s:ALLOWED/OI|CI|I/R" % self.user
+
+ try:
+ f1 = self.create_remote_test_file("oi_dir/file-1")
+ f2 = self.create_remote_test_file("oi_dir/nested/file-2")
+ oi_dir = os.path.split(f1)[0]
+ nested_dir = os.path.split(f2)[0]
+
+ try:
+ self.smb_cacls(["--propagate-inheritance", "--set",
+ dir_acl_str, oi_dir])
+ self.fail("%s succeeded unexpectedly while processing container with inheritance enabled")
+ except BlackboxProcessError as e:
+ pass
+
+ except BlackboxProcessError as e:
+ self.fail(str(e))
+
+ def test_simple_oici_set(self):
+ """test smbcacls '--propagate-inheritance --set' which attempts to set the ACL
+ for the file and additionally use inheritance rules to propagate appropriate
+ changes to children
+
+ This test adds an ACL with (CI)(OI)(RWD) additionally it removes
+ inheritance from oi_dir
+
+ before:
+
+ +-tar_test_dir/ (OI)(CI)(I)(F)
+ +-oi_dir/ (OI)(CI)(I)(F)
+ | +-file.1 (I)(F)
+ | +-nested/ (OI)(CI)(I)(F)
+ | +-file.2 (I)(F)
+ | +-nested_again/ (OI)(CI)(I)(F)
+ | +-file.3 (I)(F)
+
+ after/expected:
+
+ +-tar_test_dir/ (OI)(CI)(I)(F)
+ +-oi_dir/ (OI)(CI)(RWD)
+ | +-file.1 (I)(RWD)
+ | +-nested/ (OI)(CI)(I)(RWD)
+ | +-file.2 (I)(RWD)
+ | +-nested_again/ (OI)(CI)(I)(RWD)
+ | +-file.3 (I)(RWD)"""
+
+ dir_acl_str = "ACL:%s:ALLOWED/OI|CI/RWD" % self.user
+ file_inherited_ace_str = "ACL:%s:ALLOWED/I/RWD" % self.user
+ dir_inherited_ace_str = "ACL:%s:ALLOWED/OI|CI|I/RWD" % self.user
+
+ try:
+ # smb_cacls --inherit=copy
+ self.smb_cacls(["--inherit=copy", self.oi_dir])
+
+ self.smb_cacls(["--propagate-inheritance", "--set",
+ dir_acl_str, self.oi_dir])
+
+ # check top level container 'oi_dir' has OI|CI/RWD
+ dir_ace = self.ace_parse_str(dir_acl_str)
+ self.assertTrue(self.file_ace_check(self.oi_dir, dir_ace))
+
+ # check nested file oi_dir/file-1 has I/RWD
+ child_file_ace = self.ace_parse_str(file_inherited_ace_str)
+ self.assertTrue(self.file_ace_check(self.f1, child_file_ace))
+
+ # check nested dir oi_dir/nested has OI|CI|I/RWD
+ child_dir_ace = self.ace_parse_str(dir_inherited_ace_str)
+ self.assertTrue(self.file_ace_check(self.nested_dir, child_dir_ace))
+
+ # check nested file oi_dir/nested/file-2 has I/RWD
+ child_file_ace = self.ace_parse_str(file_inherited_ace_str)
+ self.assertTrue(self.file_ace_check(self.f2, child_file_ace))
+
+ # check nested dir oi_dir/nested/nested_again has OI|CI|I/RWD
+ child_dir_ace = self.ace_parse_str(dir_inherited_ace_str)
+ self.assertTrue(self.file_ace_check(self.nested_again_dir, child_dir_ace))
+
+ # check nested file oi_dir/nested/nested_again/file-3 has I/RWD
+ child_file_ace = self.ace_parse_str(file_inherited_ace_str)
+ self.assertTrue(self.file_ace_check(self.f3, child_file_ace))
+
+ except BlackboxProcessError as e:
+ self.fail(str(e))
+
+ def test_simple_ci_set(self):
+ """test smbcacls '--propagate-inheritance --set' which attempts to set the ACL
+ for the file and additionally use inheritance rules to propagate appropriate
+ changes to children
+
+ This test adds an ACL with (CI)(RWD) additionally it removes
+ inheritance from oi_dir
+
+ before:
+
+ +-tar_test_dir/ (OI)(CI)(I)(F)
+ +-oi_dir/ (OI)(CI)(I)(F)
+ | +-file.1 (I)(F)
+ | +-nested/ (OI)(CI)(I)(F)
+ | +-file.2 (I)(F)
+ | +-nested_again/ (OI)(CI)(I)(F)
+ | +-file.3 (I)(F)
+
+ after/expected:
+
+ +-tar_test_dir/ (OI)(CI)(I)(RWD)
+ +-oi_dir/ (CI)(RWD)
+ | +-file.1
+ | +-nested/ (CI)(I)(RWD)
+ | +-file.2
+ | +-nested_again/ (CI)(I)(RWD)
+ | +-file.3 """
+ dir_acl_str = "ACL:%s:ALLOWED/CI/RWD" % self.user
+ file_inherited_ace_str = "ACL:%s:ALLOWED/I/RWD" % self.user
+ dir_inherited_ace_str = "ACL:%s:ALLOWED/CI|I/RWD" % self.user
+ delete_ace_str = "ACL:%s:ALLOWED/0x0/RWD" % self.user
+
+ try:
+ # smb_cacls --inherit=copy
+ self.smb_cacls(["--inherit=copy", self.oi_dir])
+
+ self.smb_cacls(["--propagate-inheritance", "--set",
+ dir_acl_str, self.oi_dir])
+
+ out = self.smb_cacls([self.oi_dir])
+ #count the ACL(s)
+ nacls = len([i for i in out.decode().split("\n") if i.startswith("ACL")])
+
+ # Although there maybe a couple of users with associated acl(s)
+ # before set, after set there should only be 1 acl
+
+ self.assertEqual(nacls, 1)
+
+ # check top level container 'oi_dir' has OI|CI/RWD
+ dir_ace = self.ace_parse_str(dir_acl_str)
+ self.assertTrue(self.file_ace_check(self.oi_dir, dir_ace))
+
+ # note can't check file because it has no ACL ( due to CI )
+ # check nested dir 'oi_dir/nested' has CI|I/RWD
+ child_dir_ace = self.ace_parse_str(dir_inherited_ace_str)
+ self.assertTrue(self.file_ace_check(self.nested_dir, child_dir_ace))
+
+ # check nested dir 'oi_dir/nested/nested_again' has CI|I/RWD
+ child_dir_ace = self.ace_parse_str(dir_inherited_ace_str)
+ self.assertTrue(self.file_ace_check(self.nested_dir, child_dir_ace))
+ self.smb_cacls(["--set", delete_ace_str, self.f1])
+ self.smb_cacls(["--set", delete_ace_str, self.f2])
+ self.smb_cacls(["--set", delete_ace_str, self.f3])
+ except BlackboxProcessError as e:
+ self.fail(str(e))
+
+ def test_simple_cioinp_add(self):
+ """test smbcacls '--propagate-inheritance --add' which attempts to add the ACL
+ for the file and additionally use inheritance rules to propagate appropriate
+ changes to children
+
+ This test adds an ACL with (CI)(OI)(NP)(CHANGE)
+ (NP) - no propagation should not propagate the changes any further containers
+
+ before:
+
+ +-tar_test_dir/ (OI)(CI)(I)(F)
+ +-oi_dir/ (OI)(CI)(I)(F)
+ | +-file.1 (I)(F)
+ | +-nested/ (OI)(CI)(I)(F)
+ | +-file.2 (I)(F)
+ | +-nested_again/ (OI)(CI)(I)(F)
+ | +-file.3 (I)(F)
+
+ after/expected:
+
+ +-tar_test_dir/ (OI)(CI)(I)(F)
+ +-oi_dir/ (OI)(CI)(I)(F), (CI)(OI)(NP)(CHANGE)
+ | +-file.1 (I)(F), (I)(CHANGE)
+ | +-nested/ (OI)(CI)(I)(F), (I)(M)
+ | +-file.2 (I)(F)
+ | +-nested_again/ (OI)(CI)(I)(F)
+ | +-file.3 (I)(F)"""
+
+ dir_add_acl_str = "ACL:%s:ALLOWED/OI|CI|NP/CHANGE" % self.user
+ inherited_ace_str = "ACL:%s:ALLOWED/I/CHANGE" % self.user
+ try:
+ self.smb_cacls(["--propagate-inheritance", "--add",
+ dir_add_acl_str, self.oi_dir])
+
+ # check top level container 'oi_dir' has OI|CI|NP/READ
+ dir_ace = self.ace_parse_str(dir_add_acl_str)
+ self.assertTrue(self.file_ace_check(self.oi_dir, dir_ace))
+
+ child_file_ace = self.ace_parse_str(inherited_ace_str)
+ # nested file 'oi_dir/file-1' should have inherited I/CHANGE
+ self.assertTrue(self.file_ace_check(self.f1, child_file_ace))
+
+ # nested dir 'oi_dir/nested' should have inherited I/CHANGE
+ child_dir_ace = self.ace_parse_str(inherited_ace_str)
+ self.assertTrue(self.file_ace_check(self.nested_dir, child_dir_ace))
+ # nested file 'oi_dir/nested/file-2' should NOT have I/CHANGE
+ child_dir_ace = self.ace_parse_str(inherited_ace_str)
+ self.assertTrue(self.file_ace_check(self.f2, child_dir_ace) == False)
+ # nested dir 'oi_dir/nested/nested_again/' should NOT have I/CHANGE
+ child_dir_ace = self.ace_parse_str(inherited_ace_str)
+ self.assertTrue(self.file_ace_check(self.nested_again_dir, child_dir_ace) == False)
+ # nested file 'oi_dir/nested/nested_again/file-3' should NOT have I/CHANGE
+ child_dir_ace = self.ace_parse_str(inherited_ace_str)
+ self.assertTrue(self.file_ace_check(self.f3, child_dir_ace) == False)
+
+ except BlackboxProcessError as e:
+ self.fail(str(e))
+
+ def test_simple_oinp_add(self):
+ """test smbcacls '--propagate-inheritance --add' which attempts to add the ACL
+ for the file and additionally use inheritance rules to propagate appropriate
+ changes to children
+
+ This test adds an ACL with (OI)(NP)(CHANGE)
+ (NP) - no propagation should not propagate the changes any further containers
+
+ before:
+
+ +-tar_test_dir/ (OI)(CI)(I)(F)
+ +-oi_dir/ (OI)(CI)(I)(F)
+ | +-file.1 (I)(F)
+ | +-nested/ (OI)(CI)(I)(F)
+ | +-file.2 (I)(F)
+ | +-nested_again/ (OI)(CI)(I)(F)
+ | +-file.3 (I)(F)
+
+ after/expected:
+
+ +-tar_test_dir/ (OI)(CI)(I)(F)
+ +-oi_dir/ (OI)(CI)(I)(F), (OI)(NP)(CHANGE)
+ | +-file.1 (I)(F), (I)(CHANGE)
+ | +-nested/ (OI)(CI)(I)(F)
+ | +-file.2 (I)(F)
+ | +-nested_again/ (OI)(CI)(I)(F)
+ | +-file.3 (I)(F)"""
+
+ dir_add_acl_str = "ACL:%s:ALLOWED/OI|NP/CHANGE" % self.user
+ inherited_ace_str = "ACL:%s:ALLOWED/I/CHANGE" % self.user
+ try:
+ self.smb_cacls(["--propagate-inheritance",
+ "--add",
+ dir_add_acl_str, self.oi_dir])
+
+ # check top level container 'oi_dir' has OI|NP/CHANGE
+ dir_ace = self.ace_parse_str(dir_add_acl_str)
+ self.assertTrue(self.file_ace_check(self.oi_dir, dir_ace))
+
+ child_file_ace = self.ace_parse_str(inherited_ace_str)
+ # nested file 'oi_dir/file-1' should have inherited I/CHANGE
+ self.assertTrue(self.file_ace_check(self.f1, child_file_ace))
+
+ # nested dir 'oi_dir/nested' should NOT have I/CHANGE
+ child_dir_ace = self.ace_parse_str(inherited_ace_str)
+ self.assertTrue(self.file_ace_check(self.nested_dir, child_dir_ace) == False)
+
+ child_file_ace = self.ace_parse_str(inherited_ace_str)
+ # nested file 'oi_dir/nested/file-1' should NOT have inherited I/CHANGE
+ self.assertTrue(self.file_ace_check(self.f2, child_file_ace) == False)
+
+ except BlackboxProcessError as e:
+ self.fail(str(e))
+
+ def test_simple_cinp_add(self):
+ """# test smbcacls '--propagate-inheritance --add' which attempts to add the ACL
+ for the file and additionally use inheritance rules to propagate appropriate
+ changes to children
+
+ This test adds an ACL with (CI)(NP)(CHANGE)
+ (NP) - no propagation should not propagate the changes any further containers
+
+ before:
+
+ +-tar_test_dir/ (OI)(CI)(I)(F)
+ +-oi_dir/ (OI)(CI)(I)(F)
+ | +-file.1 (I)(F)
+ | +-nested/ (OI)(CI)(I)(F)
+ | +-file.2 (I)(F)
+ | +-nested_again/ (OI)(CI)(I)(F)
+ | +-file.3 (I)(F)
+
+ after/expected:
+
+ +-tar_test_dir/ (OI)(CI)(I)(F)
+ +-oi_dir/ (OI)(CI)(I)(F), (CI)(NP)(CHANGE)
+ | +-file.1 (I)(F)
+ | +-nested/ (OI)(CI)(I)(F), (I)(CHANGE)
+ | +-file.2 (I)(F)
+ | +-nested_again/ (OI)(CI)(I)(F)
+ | +-file.3 (I)(F)"""
+
+ dir_add_acl_str = "ACL:%s:ALLOWED/CI|NP/CHANGE" % self.user
+ inherited_ace_str = "ACL:%s:ALLOWED/I/CHANGE" % self.user
+ try:
+ self.smb_cacls(["--propagate-inheritance", "--add",
+ dir_add_acl_str, self.oi_dir])
+
+ # check top level container 'oi_dir' has CI|NP/READ
+ dir_ace = self.ace_parse_str(dir_add_acl_str)
+ self.assertTrue(self.file_ace_check(self.oi_dir, dir_ace))
+
+ # nested file 'oi_dir/file-1' should NOT have inherited I/CHANGE
+ child_file_ace = self.ace_parse_str(inherited_ace_str)
+ self.assertTrue(self.file_ace_check(self.f1, child_file_ace) == False)
+
+ # nested dir 'oi_dir/nested' should have I/CHANGE
+ child_dir_ace = self.ace_parse_str(inherited_ace_str)
+ self.assertTrue(self.file_ace_check(self.nested_dir, child_dir_ace))
+
+ # nested file 'oi_dir/nested/file-2' should NOT have inherited I/CHANGE
+ child_file_ace = self.ace_parse_str(inherited_ace_str)
+ self.assertTrue(self.file_ace_check(self.f2, child_file_ace) == False)
+
+ # nested dir 'oi_dir/nested/nested_again' should have NOT I/CHANGE
+ child_dir_ace = self.ace_parse_str(inherited_ace_str)
+ self.assertTrue(self.file_ace_check(self.nested_again_dir, child_dir_ace) == False)
+ # nested file 'oi_dir/nested/nested_again/file-3' should NOT have inherited I/CHANGE
+ child_file_ace = self.ace_parse_str(inherited_ace_str)
+ self.assertTrue(self.file_ace_check(self.f3, child_file_ace) == False)
+
+ except BlackboxProcessError as e:
+ self.fail(str(e))
+
+ def test_simple_cioinp_delete(self):
+ """test smbcacls '--propagate-inheritance --delete' which attempts to delete
+ the ACL for the file and additionally use inheritance rules to propagate
+ appropriate changes to children
+
+ This test adds an ACL with (CI)(OI)(NP)(CHANGE)
+ (NP) - no propagation should not propagate the changes any further containers
+
+ before:
+
+ +-tar_test_dir/ (OI)(CI)(I)(F)
+ +-oi_dir/ (OI)(CI)(I)(F), (CI)(OI)(NP)(CHANGE)
+ | +-file.1 (I)(F), (I)(CHANGE)
+ | +-nested/ (OI)(CI)(I)(F), (I)(CHANGE)
+ | +-file.2 (I)(F)
+
+ after/expected:
+
+ +-tar_test_dir/ (OI)(CI)(I)(F)
+ +-oi_dir/ (OI)(CI)(I)(F)
+ | +-file.1 (I)(F)
+ | +-nested/ (OI)(CI)(I)(F)
+ | +-file.2 (I)(F)"""
+
+ dir_add_acl_str = "ACL:%s:ALLOWED/OI|CI|NP/CHANGE" % self.user
+ inherited_ace_str = "ACL:%s:ALLOWED/I/CHANGE" % self.user
+
+ try:
+ self.smb_cacls(["--add", dir_add_acl_str, self.oi_dir])
+
+ self.smb_cacls(["--add", inherited_ace_str, self.f1])
+
+ self.smb_cacls(["--add", inherited_ace_str, self.nested_dir])
+
+ self.smb_cacls(["--propagate-inheritance", "--delete",
+ dir_add_acl_str, self.oi_dir])
+
+ # check top level container 'oi_dir' does NOT have OI|CI|NP/READ
+ dir_ace = self.ace_parse_str(dir_add_acl_str)
+ self.assertTrue(self.file_ace_check(self.oi_dir, dir_ace) == False)
+
+ # nested file 'oi_dir/file-1' should NOT have inherited I/CHANGE
+ child_file_ace = self.ace_parse_str(inherited_ace_str)
+ self.assertTrue(self.file_ace_check(self.f1, child_file_ace) == False)
+
+ # nested dir 'oi_dir/nested' should NOT have inherited I/CHANGE
+ child_dir_ace = self.ace_parse_str(inherited_ace_str)
+ self.assertTrue(self.file_ace_check(self.nested_dir, child_dir_ace) == False)
+ except BlackboxProcessError as e:
+ self.fail(str(e))
+
+ def test_simple_oinp_delete(self):
+ """test smbcacls '--propagate-inheritance --delete' which attempts to delete the
+ ACL for the file and additionally use inheritance rules to propagate
+ appropriate changes to children
+
+ This test adds an ACL with (OI)(NP)(CHANGE)
+ (NP) - no propagation should not propagate the changes any further containers
+
+ before:
+
+ +-tar_test_dir/ (OI)(CI)(I)(F)
+ +-oi_dir/ (OI)(CI)(I)(F), (OI)(NP)(CHANGE)
+ | +-file.1 (I)(F), (I)(CHANGE)
+ | +-nested/ (OI)(CI)(I)(F)
+ | +-file.2 (I)(F)
+
+ after/expected:
+
+ +-tar_test_dir/ (OI)(CI)(I)(F)
+ +-oi_dir/ (OI)(CI)(I)(F)
+ | +-file.1 (I)(F)
+ | +-nested/ (OI)(CI)(I)(F)
+ | +-file.2 (I)(F)"""
+
+ dir_add_acl_str = "ACL:%s:ALLOWED/OI|NP/CHANGE" % self.user
+ inherited_ace_str = "ACL:%s:ALLOWED/I/CHANGE" % self.user
+ try:
+
+ # set up 'before' permissions
+ self.smb_cacls(["--add", dir_add_acl_str, self.oi_dir])
+
+ self.smb_cacls(["--add", inherited_ace_str, self.f1])
+
+ self.smb_cacls(["--propagate-inheritance", "--delete",
+ dir_add_acl_str, self.oi_dir])
+
+ # check top level container 'oi_dir' does NOT have OI|NP/READ
+ dir_ace = self.ace_parse_str(dir_add_acl_str)
+ self.assertTrue(self.file_ace_check(self.oi_dir, dir_ace) == False)
+
+ child_file_ace = self.ace_parse_str(inherited_ace_str)
+ # nested file 'oi_dir/file-1' should NOT have inherited I/CHANGE
+ self.assertTrue(self.file_ace_check(self.f1, child_file_ace) == False)
+
+ except BlackboxProcessError as e:
+ self.fail(str(e))
+
+ def test_simple_cinp_delete(self):
+ """test smbcacls '--propagate-inheritance --delete' which attempts to delete the
+ ACL for the file and additionally use inheritance rules to propagate
+ appropriate changes to children
+
+ This test adds an ACL with (CI)(NP)(CHANGE)
+ (NP) - no propagation should not propagate the changes any further containers
+
+ before:
+
+ +-tar_test_dir/ (OI)(CI)(I)(F)
+ +-oi_dir/ (OI)(CI)(I)(F), (CI)(NP)(CHANGE)
+ | +-file.1 (I)(F)
+ | +-nested/ (OI)(CI)(I)(F), (I)(CHANGE)
+ | +-file.2 (I)(F)
+
+ after/expected:
+
+ +-tar_test_dir/ (OI)(CI)(I)(F)
+ +-oi_dir/ (OI)(CI)(I)(F)
+ | +-file.1 (I)(F)
+ | +-nested/ (OI)(CI)(I)(F)
+ | +-file.2 (I)(F)"""
+
+ dir_add_acl_str = "ACL:%s:ALLOWED/CI|NP/CHANGE" % self.user
+ inherited_ace_str = "ACL:%s:ALLOWED/I/CHANGE" % self.user
+
+ try:
+ self.smb_cacls(["--add", dir_add_acl_str, self.oi_dir])
+
+ self.smb_cacls(["--add", inherited_ace_str, self.nested_dir])
+
+ self.smb_cacls(["--propagate-inheritance", "--delete",
+ dir_add_acl_str, self.oi_dir])
+
+ # check top level container 'oi_dir' doesn't have CI|NP/READ
+ dir_ace = self.ace_parse_str(dir_add_acl_str)
+ self.assertTrue(self.file_ace_check(self.oi_dir, dir_ace) == False)
+
+ child_file_ace = self.ace_parse_str(inherited_ace_str)
+ # nested file 'oi_dir/file-1' should NOT have inherited I/CHANGE
+ self.assertTrue(self.file_ace_check(self.f1, child_file_ace) == False)
+
+ # nested dir 'oi_dir/nested' should NOT have I/CHANGE
+ child_dir_ace = self.ace_parse_str(inherited_ace_str)
+ self.assertTrue(self.file_ace_check(self.nested_dir, child_dir_ace) == False)
+
+ except BlackboxProcessError as e:
+ self.fail(str(e))
+
+ def test_simple_cioi_inhibit(self):
+ """test smbcacls '--propagate-inheritance --add' which attempts to add the ACL
+ for the file and additionally use inheritance rules to propagate appropriate
+ changes to children. In particular it tests that inheritance removed does
+ indeed prevent inheritance propagation
+
+ This test adds an ACL with (CI)(OI)(CHANGE) at oi_dir
+
+ Note: Inheritance has been removed ( and ace(s) copied ) at
+ tar_test_dir/oi_dir/nested
+
+ before:
+
+ +-tar_test_dir/ (OI)(CI)(I)(F)
+ +-oi_dir/ (OI)(CI)(I)(F)
+ | +-file.1 (I)(F)
+ | +-nested/ (OI)(CI)(F)
+ | +-file.2 (I)(F)
+
+ after/expected:
+
+ +-tar_test_dir/ (OI)(CI)(I)(F)
+ +-oi_dir/ (OI)(CI)(I)(F), (CI)(OI)(CHANGE)
+ | +-file.1 (I)(F), (I)((CHANGE)
+ | +-nested/ (OI)(CI)(F)
+ | +-file.2 (I)(F)"""
+ dir_add_acl_str = "ACL:%s:ALLOWED/OI|CI/CHANGE" % self.user
+ file_inherited_ace_str = "ACL:%s:ALLOWED/I/CHANGE" % self.user
+ dir_inherited_ace_str = "ACL:%s:ALLOWED/OI|CI|I/CHANGE" % self.user
+
+ try:
+ # smb_cacls --inherit=copy
+ self.smb_cacls(["--inherit=copy", self.nested_dir])
+
+ self.smb_cacls(["--propagate-inheritance", "--add",
+ dir_add_acl_str, self.oi_dir])
+
+ # check top level container 'oi_dir' has OI|CI/CHANGE
+ dir_ace = self.ace_parse_str(dir_add_acl_str)
+ self.assertTrue(self.file_ace_check(self.oi_dir, dir_ace))
+
+ # nested file 'oi_dir/file-1' should have inherited I/CHANGE
+ child_file_ace = self.ace_parse_str(file_inherited_ace_str)
+ self.assertTrue(self.file_ace_check(self.f1, child_file_ace))
+
+ # nested dir 'oi_dir/nested/' should NOT have OI|CI|I/CHANGE
+ child_dir_ace = self.ace_parse_str(dir_inherited_ace_str)
+ self.assertTrue(self.file_ace_check(self.nested_dir, child_dir_ace) == False)
+
+ # nested file 'oi_dir/nested/file-2' should NOT have I/CHANGE
+ child_dir_ace = self.ace_parse_str(dir_inherited_ace_str)
+ self.assertTrue(self.file_ace_check(self.f2, child_dir_ace) == False)
+
+ except BlackboxProcessError as e:
+ self.fail(str(e))
diff --git a/python/samba/tests/blackbox/smbcacls_save_restore.py b/python/samba/tests/blackbox/smbcacls_save_restore.py
new file mode 100644
index 0000000..b399531
--- /dev/null
+++ b/python/samba/tests/blackbox/smbcacls_save_restore.py
@@ -0,0 +1,205 @@
+# Blackbox tests for smbcacls
+#
+# Copyright (C) Noel Power noel.power@suse.com
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+from samba.tests.blackbox.smbcacls import SmbCaclsBlockboxTestBase
+from samba.tests import BlackboxProcessError
+import os
+
+class SaveRestoreSmbCaclsTests(SmbCaclsBlockboxTestBase):
+
+ def setUp(self):
+ super().setUp()
+
+ # create toplevel testdir structure with desired ACL(s)
+ #
+ # +-tar_test_dir/ (OI)(CI)(I)(F)
+ # +-oi_dir/ (OI)(CI)(I)(F)
+ # | +-file.1 (I)(F)
+ # | +-nested/ (OI)(CI)(I)(F)
+ # | +-file.2 (I)(F)
+ # | +-nested_again/ (OI)(CI)(I)(F)
+ # | +-file.3 (I)(F)
+
+ self.toplevel = self.create_remote_test_file("tar_test_dir/file-0")
+ self.f1 = self.create_remote_test_file("tar_test_dir/oi_dir/file-1")
+ self.f2 = self.create_remote_test_file("tar_test_dir/oi_dir/nested/file-2")
+ self.f3 = self.create_remote_test_file("tar_test_dir/oi_dir/nested/nested_again/file-3")
+ self.tar_dir = os.path.split(self.toplevel)[0]
+ self.oi_dir = os.path.split(self.f1)[0]
+ self.nested_dir = os.path.split(self.f2)[0]
+ self.nested_again_dir = os.path.split(self.f3)[0]
+
+ dir_acl_str = "ACL:%s:ALLOWED/OI|CI/FULL" % self.user
+ inherited_dir_acl_str = "ACL:%s:ALLOWED/OI|CI|I/FULL" % self.user
+ file_acl_str = "ACL:%s:ALLOWED/I/FULL" % self.user
+
+ self.smb_cacls(["--modify", dir_acl_str, self.tar_dir])
+ self.smb_cacls(["--modify", inherited_dir_acl_str, self.oi_dir])
+ self.smb_cacls(["--modify", inherited_dir_acl_str, self.nested_dir])
+ self.smb_cacls(["--modify", inherited_dir_acl_str, self.nested_again_dir])
+ self.smb_cacls(["--modify", file_acl_str, self.f1])
+ self.smb_cacls(["--modify", file_acl_str, self.f2])
+ self.smb_cacls(["--modify", file_acl_str, self.f3])
+
+ def tearDown(self):
+ # tmp is the default share which has an existing testdir smbcacls
+ # we need to be prepared to deal with a 'custom' share (which also
+ # would have an existing testdir)
+ if self.share != "tmp":
+ self.dirpath = os.path.join(os.environ["LOCAL_PATH"],self.share)
+ self.dirpath = os.path.join(self.dirpath,self.testdir)
+ super().tearDown()
+
+ def test_simple_save_dir(self):
+ try:
+ # simple test to just store dacl of directory
+ with self.mktemp() as tmpfile:
+ out = self.smb_cacls(["--save", tmpfile,
+ self.oi_dir])
+ with open(tmpfile, 'rb') as infile:
+ contents = infile.read().decode('utf16')
+ lines = contents.splitlines()
+ # should be 2 lines
+ self.assertEqual(len(lines), 2)
+ # first line should be the path
+ self.assertEqual(self.oi_dir.replace('/','\\'), lines[0])
+
+ except BlackboxProcessError as e:
+ self.fail(str(e))
+
+ def test_simple_save_dir_r(self):
+ try:
+ # simple test to just store dacl of directory (recursively)
+ with self.mktemp() as tmpfile:
+ out = self.smb_cacls(["--recurse", "--save", tmpfile,
+ self.oi_dir])
+ with open(tmpfile, 'rb') as infile:
+ contents = infile.read().decode('utf16')
+ print("contents = %s" % contents)
+ lines = contents.splitlines()
+ # should be 12 lines
+ self.assertEqual(len(lines), 12)
+ paths = [
+ self.oi_dir.replace('/','\\'),
+ self.f1.replace('/','\\'),
+ self.nested_dir.replace('/','\\'),
+ self.f2.replace('/','\\'),
+ self.nested_again_dir.replace('/','\\'),
+ self.f3.replace('/','\\')
+ ]
+ i = 0
+ for line in lines:
+ if not i % 2:
+ paths.remove(line)
+ i = i + 1
+ self.assertEqual(0, len(paths))
+
+ except BlackboxProcessError as e:
+ self.fail(str(e))
+
+ def test_simple_restore_dir(self):
+ try:
+ # simple test to just store dacl of directory
+ orig_saved = None
+ modified = None
+ restored = None
+ with self.mktemp() as tmpfile:
+ self.smb_cacls(["--save", tmpfile,
+ self.oi_dir])
+ with open(tmpfile, 'rb') as infile:
+ orig_saved = infile.read()
+
+ # modify directory structure
+ dir_add_acl_str = "ACL:%s:ALLOWED/OI|CI/READ" % self.user
+ self.smb_cacls(["--propagate-inheritance", "--add",
+ dir_add_acl_str, self.oi_dir])
+
+ # save modified directory dacls to file
+ with self.mktemp() as tmpfile:
+ self.smb_cacls(["--save", tmpfile,
+ self.oi_dir])
+ with open(tmpfile, 'rb') as infile:
+ modified = infile.read()
+
+ # compare orig and unmodified dacls
+ # they shouldn't match
+ self.assertNotEqual(orig_saved.decode('utf16'), modified.decode('utf16'))
+ # restore original dacls from file
+ with self.mktemp() as tmpfile:
+ with open(tmpfile, 'wb') as outfile:
+ outfile.write(orig_saved)
+ outfile.close()
+ out = self.smb_cacls([".", "--restore", tmpfile])
+
+ # save newly restored dacls to file
+ with self.mktemp() as tmpfile:
+ self.smb_cacls(["--save", tmpfile,
+ self.oi_dir])
+ with open(tmpfile, 'rb') as infile:
+ restored = infile.read()
+
+ # after restoring the dalcs, orig unmodified dacls should match
+ # restored dacls
+ self.assertEqual(orig_saved.decode('utf16'), restored.decode('utf16'))
+
+ except BlackboxProcessError as e:
+ self.fail(str(e))
+
+ def test_simple_restore_dir_r(self):
+ try:
+ # simple test to just store dacl(s) of directory recursively
+ orig_saved = None
+ modified = None
+ restored = None
+ with self.mktemp() as tmpfile:
+ self.smb_cacls(["--recurse", "--save", tmpfile,
+ self.oi_dir])
+ with open(tmpfile, 'rb') as infile:
+ orig_saved = infile.read()
+
+ # modify directory's dacls recursively
+ dir_add_acl_str = "ACL:%s:ALLOWED/OI|CI/READ" % self.user
+ self.smb_cacls(["--propagate-inheritance", "--add",
+ dir_add_acl_str, self.oi_dir])
+
+ # save modified directories dacls recursively
+ with self.mktemp() as tmpfile:
+ self.smb_cacls(["--recurse", "--save", tmpfile,
+ self.oi_dir])
+ with open(tmpfile, 'rb') as infile:
+ modified = infile.read()
+
+ # the unmodified stringified dacls shouldn't match
+ # modified
+ self.assertNotEqual(orig_saved.decode('utf16'), modified.decode('utf16'))
+ # restore original dacls from file
+ with self.mktemp() as tmpfile:
+ with open(tmpfile, 'wb') as outfile:
+ outfile.write(orig_saved)
+ outfile.close()
+ out = self.smb_cacls([".", "--restore", tmpfile])
+
+ with self.mktemp() as tmpfile:
+ out = self.smb_cacls(["--recurse", "--save", tmpfile,
+ self.oi_dir])
+ with open(tmpfile, 'rb') as infile:
+ restored = infile.read()
+ # after restoring the dalcs orig unmodified dacls should match
+ # current dacls
+ self.assertEqual(orig_saved.decode('utf16'), restored.decode('utf16'))
+ except BlackboxProcessError as e:
+ self.fail(str(e))
diff --git a/python/samba/tests/blackbox/smbcontrol.py b/python/samba/tests/blackbox/smbcontrol.py
new file mode 100644
index 0000000..95e2123
--- /dev/null
+++ b/python/samba/tests/blackbox/smbcontrol.py
@@ -0,0 +1,82 @@
+# Blackbox tests for smbcontrol
+#
+# Copyright (C) Catalyst IT Ltd. 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+from samba.tests import BlackboxTestCase, BlackboxProcessError
+from samba.messaging import Messaging
+
+COMMAND = "bin/smbcontrol"
+PING = "ping"
+USAGE = "pool-usage"
+
+
+class SmbcontrolBlackboxTests(BlackboxTestCase):
+
+ def setUp(self):
+ super().setUp()
+ lp_ctx = self.get_loadparm()
+ self.msg_ctx = Messaging(lp_ctx=lp_ctx)
+
+ def test_expected_processes(self):
+ """
+ Test that the expected samba processes are running, currently we only
+ check that at least one process is running
+ """
+ processes = self.msg_ctx.irpc_all_servers()
+ if not processes:
+ self.fail("No samba processes returned")
+
+ def test_ping(self):
+ """Test that all the samba processes can be pinged"""
+
+ processes = self.msg_ctx.irpc_all_servers()
+
+ for p in processes:
+ for id in p.ids:
+ if p.name != "samba":
+ try:
+ self.check_run("%s %d %s" % (COMMAND, id.pid, PING),
+ msg="trying to ping %s" % p.name)
+ except BlackboxProcessError as e:
+ # This process could not be pinged, which is
+ # expected (occasionally) if the ldap_server
+ # is using the "standard process model" and
+ # forking a short-lived child for each
+ # connection. We don't care about this, so we
+ # list the processes again and assume that
+ # only those that remain are relevant to the
+ # ping test. Additionally we ensure that at
+ # least one process of each name remains -- in
+ # the ldap_server case, we expect at least the
+ # parent to be there.
+ name_exists = False
+ surviving_processes = self.msg_ctx.irpc_all_servers()
+ for q in surviving_processes:
+ if q.name == p.name:
+ name_exists = True
+ if id.pid in [x.pid for x in q.ids]:
+ # the unpingable server is still
+ # listed, meaning it is broken
+ raise
+
+ if not name_exists:
+ # it looks like the service genuinely died
+ # just at this moment
+ raise
+
+ print("Ignoring error %s:" % e)
+ print("the process probably died before our ping")
+ continue
diff --git a/python/samba/tests/blackbox/smbcontrol_process.py b/python/samba/tests/blackbox/smbcontrol_process.py
new file mode 100644
index 0000000..1ff7720
--- /dev/null
+++ b/python/samba/tests/blackbox/smbcontrol_process.py
@@ -0,0 +1,131 @@
+# Blackbox tests for the smbcontrol fault injection commands
+#
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2018
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# As the test terminates and sleeps samba processes these tests need to run
+# in the preforkrestartdc test environment to prevent them impacting other
+# tests.
+#
+import time
+from samba.tests import BlackboxTestCase, BlackboxProcessError
+from samba.messaging import Messaging
+
+COMMAND = "bin/smbcontrol"
+PING = "ping"
+
+
+class SmbcontrolProcessBlockboxTests(BlackboxTestCase):
+
+ def setUp(self):
+ super().setUp()
+ lp_ctx = self.get_loadparm()
+ self.msg_ctx = Messaging(lp_ctx=lp_ctx)
+
+ def get_process_data(self):
+ services = self.msg_ctx.irpc_all_servers()
+
+ processes = []
+ for service in services:
+ for id in service.ids:
+ processes.append((service.name, id.pid))
+ return processes
+
+ def get_process(self, name):
+ processes = self.get_process_data()
+ for pname, pid in processes:
+ if name == pname:
+ return pid
+ return None
+
+ def test_inject_fault(self):
+ INJECT = "inject"
+ FAULT = "segv"
+ #
+ # Note that this process name needs to be different to the one used
+ # in the sleep test to avoid a race condition
+ #
+ pid = self.get_process("rpc_server")
+
+ #
+ # Ensure we can ping the process before injecting a fault.
+ #
+ try:
+ self.check_run("%s %s %s" % (COMMAND, pid, PING),
+ msg="trying to ping rpc_server")
+ except BlackboxProcessError as e:
+ self.fail("Unable to ping rpc_server process")
+
+ #
+ # Now inject a fault.
+ #
+ try:
+ self.check_run("%s %s %s %s" % (COMMAND, pid, INJECT, FAULT),
+ msg="injecting fault into rpc_server")
+ except BlackboxProcessError as e:
+ print(e)
+ self.fail("Unable to inject a fault into the rpc_server process")
+
+ #
+ # The process should have died, so we should not be able to ping it
+ #
+ try:
+ self.check_run("%s %s %s" % (COMMAND, pid, PING),
+ msg="trying to ping rpc_server")
+ self.fail("Could ping rpc_server process")
+ except BlackboxProcessError as e:
+ pass
+
+ def test_sleep(self):
+ SLEEP = "sleep" # smbcontrol sleep command
+ DURATION = 5 # duration to sleep server for
+ DELTA = 1 # permitted error for the sleep duration
+
+ #
+ # Note that this process name needs to be different to the one used
+ # in the inject fault test to avoid a race condition
+ #
+ pid = self.get_process("ldap_server")
+ #
+ # Ensure we can ping the process before getting it to sleep
+ #
+ try:
+ self.check_run("%s %s %s" % (COMMAND, pid, PING),
+ msg="trying to ping rpc_server")
+ except BlackboxProcessError as e:
+ self.fail("Unable to ping rpc_server process")
+
+ #
+ # Now ask it to sleep
+ #
+ start = time.time()
+ try:
+ self.check_run("%s %s %s %s" % (COMMAND, pid, SLEEP, DURATION),
+ msg="putting rpc_server to sleep for %d" % DURATION)
+ except BlackboxProcessError as e:
+ print(e)
+ self.fail("Failed to get rpc_server to sleep for %d" % DURATION)
+
+ #
+ # The process should be sleeping and not respond until it wakes
+ #
+ try:
+ self.check_run("%s %s %s" % (COMMAND, pid, PING),
+ msg="trying to ping rpc_server")
+ end = time.time()
+ duration = end - start
+ self.assertGreater(duration + DELTA, DURATION)
+ except BlackboxProcessError as e:
+ self.fail("Unable to ping rpc_server process")
diff --git a/python/samba/tests/blackbox/testdata/traffic-sample-very-short.model b/python/samba/tests/blackbox/testdata/traffic-sample-very-short.model
new file mode 100644
index 0000000..0de93ed
--- /dev/null
+++ b/python/samba/tests/blackbox/testdata/traffic-sample-very-short.model
@@ -0,0 +1,61 @@
+{
+ "ngrams": {
+ "-\t-": {
+ "cldap:3": 1,
+ "ldap:3": 1
+ },
+ "-\tldap:3": {
+ "wait:0": 1
+ },
+ "wait:0\trpc_netlogon:29": {
+ "kerberos:": 1
+ },
+ "rpc_netlogon:29\tkerberos:": {
+ "ldap:3": 1
+ },
+ "cldap:3\twait:0": {
+ "rpc_netlogon:29": 1
+ },
+ "-\tcldap:3": {
+ "cldap:3": 1
+ },
+ "ldap:3\twait:0": {
+ "ldap:2": 1
+ },
+ "cldap:3\tcldap:3": {
+ "cldap:3": 1,
+ "wait:0": 1
+ },
+ "kerberos:\tldap:3": {
+ "-": 1
+ }
+ },
+ "dns": {
+ "1": 9,
+ "0": 9
+ },
+ "packet_rate": [
+ 50,
+ 0.32482
+ ],
+ "query_details": {
+ "rpc_netlogon:29": {
+ "-": 1
+ },
+ "cldap:3": {
+ "\t\t\tNetlogon\t\t\t": 3
+ },
+ "ldap:3": {
+ "\t\t\tsubschemaSubentry,dsServiceName,namingContexts,defaultNamingContext,schemaNamingContext,configurationNamingContext,rootDomainNamingContext,supportedControl,supportedLDAPVersion,supportedLDAPPolicies,supportedSASLMechanisms,dnsHostName,ldapServiceName,serverName,supportedCapabilities\t\t\t": 1,
+ "2\tDC,DC\t\tcn\t\t\t": 1
+ },
+ "ldap:2": {
+ "\t\t\t\t\t\t": 1
+ },
+ "kerberos:": {
+ "": 1
+ }
+ },
+ "cumulative_duration": 0.39243292808532715,
+ "version": 2
+} \ No newline at end of file
diff --git a/python/samba/tests/blackbox/testdata/traffic-sample-very-short.txt b/python/samba/tests/blackbox/testdata/traffic-sample-very-short.txt
new file mode 100644
index 0000000..ae766f1
--- /dev/null
+++ b/python/samba/tests/blackbox/testdata/traffic-sample-very-short.txt
@@ -0,0 +1,50 @@
+1487921562.592126000 11 3 1 dns 0 query
+1487921562.592285000 11 1 4 dns 0 query
+1487921562.592636000 11 4 1 dns 1 response
+1487921562.592911000 11 1 3 dns 1 response
+1487921562.593315000 06 3 5 1 ldap 3 searchRequest 2 DC,DC cn
+1487921562.596247000 11 3 1 dns 0 query
+1487921562.596362000 11 1 4 dns 0 query
+1487921562.596697000 11 4 1 dns 1 response
+1487921562.596921000 11 1 3 dns 1 response
+1487921562.598308000 11 3 1 dns 0 query
+1487921562.598414000 11 1 4 dns 0 query
+1487921562.598729000 11 4 1 dns 1 response
+1487921562.598963000 11 1 3 dns 1 response
+1487921562.607624000 11 6 1 dns 0 query
+1487921562.607956000 11 6 1 dns 0 query
+1487921562.608009000 11 1 6 dns 1 response
+1487921562.608232000 11 1 6 dns 1 response
+1487921562.612424000 11 6 1 dns 0 query
+1487921562.612648000 11 1 6 dns 1 response
+1487921562.720442000 11 6 1 cldap 3 searchRequest Netlogon
+1487921562.720706000 11 6 1 cldap 3 searchRequest Netlogon
+1487921562.721004000 11 6 1 cldap 3 searchRequest Netlogon
+1487921562.724801000 11 1 6 cldap 5 searchResDone
+1487921562.728632000 11 1 6 cldap 5 searchResDone
+1487921562.732508000 11 1 6 cldap 5 searchResDone
+1487921562.748004000 06 3 1 5 ldap 5 searchResDone
+1487921562.820387000 06 3 5 1 ldap 2 unbindRequest
+1487921562.831445000 06 14 6 1 dcerpc 11 Bind
+1487921562.831565000 06 14 1 6 dcerpc 12 Bind_ack
+1487921562.831776000 06 14 6 1 epm 3 Map
+1487921562.832483000 06 14 1 6 epm 3 Map
+1487921562.833521000 06 15 6 1 dcerpc 11 Bind
+1487921562.833775000 06 15 1 6 dcerpc 12 Bind_ack
+1487921562.833955000 06 15 6 1 rpc_netlogon 4 NetrServerReqChallenge
+1487921562.834039000 06 15 1 6 rpc_netlogon 4 NetrServerReqChallenge
+1487921562.834325000 06 15 6 1 rpc_netlogon 26 NetrServerAuthenticate3
+1487921562.834895000 06 15 1 6 rpc_netlogon 26 NetrServerAuthenticate3
+1487921562.835515000 06 16 6 1 dcerpc 11 Bind
+1487921562.836417000 06 16 1 6 dcerpc 12 Bind_ack
+1487921562.836694000 06 16 6 1 rpc_netlogon 21 NetrLogonDummyRoutine1
+1487921562.836917000 06 16 1 6 rpc_netlogon 21 NetrLogonDummyRoutine1
+1487921562.852041000 06 14 6 1 epm 3 Map
+1487921562.852687000 06 14 1 6 epm 3 Map
+1487921562.876310000 06 16 6 1 rpc_netlogon 29 NetrLogonGetDomainInfo
+1487921562.880868000 06 18 6 1 kerberos
+1487921562.881074000 06 16 1 6 rpc_netlogon 29 NetrLogonGetDomainInfo
+1487921562.884476000 06 19 6 1 ldap 3 searchRequest subschemaSubentry,dsServiceName,namingContexts,defaultNamingContext,schemaNamingContext,configurationNamingContext,rootDomainNamingContext,supportedControl,supportedLDAPVersion,supportedLDAPPolicies,supportedSASLMechanisms,dnsHostName,ldapServiceName,serverName,supportedCapabilities
+1487921562.885803000 06 18 1 6 kerberos
+1487921562.892086000 06 19 1 6 ldap 5 searchResDone
+1487921562.916946000 06 20 6 1 smb 0x72 Negotiate Protocol (0x72)
diff --git a/python/samba/tests/blackbox/testdata/traffic_learner.expected b/python/samba/tests/blackbox/testdata/traffic_learner.expected
new file mode 100644
index 0000000..3ae8089
--- /dev/null
+++ b/python/samba/tests/blackbox/testdata/traffic_learner.expected
@@ -0,0 +1,61 @@
+{
+ "packet_rate": [
+ 10,
+ 0.22707200050354004
+ ],
+ "query_details": {
+ "rpc_netlogon:29": {
+ "-": 1
+ },
+ "cldap:3": {
+ "\t\t\tNetlogon\t\t\t": 3
+ },
+ "ldap:3": {
+ "\t\t\tsubschemaSubentry,dsServiceName,namingContexts,defaultNamingContext,schemaNamingContext,configurationNamingContext,rootDomainNamingContext,supportedControl,supportedLDAPVersion,supportedLDAPPolicies,supportedSASLMechanisms,dnsHostName,ldapServiceName,serverName,supportedCapabilities\t\t\t": 1,
+ "2\tDC,DC\t\tcn\t\t\t": 1
+ },
+ "ldap:2": {
+ "\t\t\t\t\t\t": 1
+ },
+ "kerberos:": {
+ "": 1
+ }
+ },
+ "ngrams": {
+ "-\t-": {
+ "cldap:3": 1,
+ "ldap:3": 1
+ },
+ "-\tldap:3": {
+ "wait:0": 1
+ },
+ "wait:0\trpc_netlogon:29": {
+ "kerberos:": 1
+ },
+ "rpc_netlogon:29\tkerberos:": {
+ "ldap:3": 1
+ },
+ "cldap:3\twait:0": {
+ "rpc_netlogon:29": 1
+ },
+ "-\tcldap:3": {
+ "cldap:3": 1
+ },
+ "ldap:3\twait:0": {
+ "ldap:2": 1
+ },
+ "cldap:3\tcldap:3": {
+ "cldap:3": 1,
+ "wait:0": 1
+ },
+ "kerberos:\tldap:3": {
+ "-": 1
+ }
+ },
+ "version": 2,
+ "dns": {
+ "1": 9,
+ "0": 9
+ },
+ "cumulative_duration": 0.39243292808532715
+} \ No newline at end of file
diff --git a/python/samba/tests/blackbox/testdata/traffic_replay-0.expected b/python/samba/tests/blackbox/testdata/traffic_replay-0.expected
new file mode 100644
index 0000000..8f44438
--- /dev/null
+++ b/python/samba/tests/blackbox/testdata/traffic_replay-0.expected
@@ -0,0 +1,18 @@
+0.011388 06 2 1 ldap 3 searchRequest 2 DC,DC cn
+0.221447 06 2 1 ldap 2 unbindRequest
+0.460878 06 3 1 ldap 3 searchRequest 2 DC,DC cn
+0.581933 11 4 1 cldap 3 searchRequest Netlogon
+0.596977 11 4 1 cldap 3 searchRequest Netlogon
+0.611184 11 4 1 cldap 3 searchRequest Netlogon
+0.666808 06 3 1 ldap 2 unbindRequest
+0.744297 06 4 1 rpc_netlogon 29 NetrLogonGetDomainInfo
+0.768994 06 4 1 kerberos
+0.772476 06 4 1 ldap 3 searchRequest 2 DC,DC cn
+0.805442 11 5 1 cldap 3 searchRequest Netlogon
+0.805536 11 5 1 cldap 3 searchRequest Netlogon
+0.807659 11 5 1 cldap 3 searchRequest Netlogon
+0.808614 11 5 1 cldap 3 searchRequest Netlogon
+0.808819 11 5 1 cldap 3 searchRequest Netlogon
+0.865384 06 6 1 ldap 3 searchRequest subschemaSubentry,dsServiceName,namingContexts,defaultNamingContext,schemaNamingContext,configurationNamingContext,rootDomainNamingContext,supportedControl,supportedLDAPVersion,supportedLDAPPolicies,supportedSASLMechanisms,dnsHostName,ldapServiceName,serverName,supportedCapabilities
+0.973595 06 5 1 rpc_netlogon 29 NetrLogonGetDomainInfo
+0.974012 06 5 1 kerberos
diff --git a/python/samba/tests/blackbox/testdata/traffic_replay-1.expected b/python/samba/tests/blackbox/testdata/traffic_replay-1.expected
new file mode 100644
index 0000000..1ac6968
--- /dev/null
+++ b/python/samba/tests/blackbox/testdata/traffic_replay-1.expected
@@ -0,0 +1,19 @@
+0.011519 11 2 1 cldap 3 searchRequest Netlogon
+0.012916 11 2 1 cldap 3 searchRequest Netlogon
+0.158388 06 3 1 ldap 3 searchRequest 2 DC,DC cn
+0.164506 06 2 1 rpc_netlogon 29 NetrLogonGetDomainInfo
+0.166151 06 2 1 kerberos
+0.166301 06 2 1 ldap 3 searchRequest subschemaSubentry,dsServiceName,namingContexts,defaultNamingContext,schemaNamingContext,configurationNamingContext,rootDomainNamingContext,supportedControl,supportedLDAPVersion,supportedLDAPPolicies,supportedSASLMechanisms,dnsHostName,ldapServiceName,serverName,supportedCapabilities
+0.258932 06 4 1 rpc_netlogon 29 NetrLogonGetDomainInfo
+0.259908 06 4 1 kerberos
+0.260073 06 4 1 ldap 3 searchRequest 2 DC,DC cn
+0.286044 06 5 1 ldap 3 searchRequest 2 DC,DC cn
+0.295757 06 3 1 ldap 2 unbindRequest
+0.459791 06 5 1 ldap 2 unbindRequest
+0.553887 06 6 1 ldap 3 searchRequest subschemaSubentry,dsServiceName,namingContexts,defaultNamingContext,schemaNamingContext,configurationNamingContext,rootDomainNamingContext,supportedControl,supportedLDAPVersion,supportedLDAPPolicies,supportedSASLMechanisms,dnsHostName,ldapServiceName,serverName,supportedCapabilities
+0.641127 11 7 1 cldap 3 searchRequest Netlogon
+0.641297 11 7 1 cldap 3 searchRequest Netlogon
+0.783989 06 6 1 ldap 2 unbindRequest
+0.901096 06 7 1 rpc_netlogon 29 NetrLogonGetDomainInfo
+0.915260 06 7 1 kerberos
+0.915711 06 7 1 ldap 3 searchRequest 2 DC,DC cn
diff --git a/python/samba/tests/blackbox/testdata/traffic_replay-2.expected b/python/samba/tests/blackbox/testdata/traffic_replay-2.expected
new file mode 100644
index 0000000..7850a25
--- /dev/null
+++ b/python/samba/tests/blackbox/testdata/traffic_replay-2.expected
@@ -0,0 +1,17 @@
+0.011388 06 2 1 ldap 3 searchRequest 2 DC,DC cn
+0.221447 06 2 1 ldap 2 unbindRequest
+0.460878 06 3 1 ldap 3 searchRequest 2 DC,DC cn
+0.581933 11 4 1 cldap 3 searchRequest Netlogon
+0.596977 11 4 1 cldap 3 searchRequest Netlogon
+0.611184 11 4 1 cldap 3 searchRequest Netlogon
+0.666808 06 3 1 ldap 2 unbindRequest
+0.692730 11 5 1 cldap 3 searchRequest Netlogon
+0.692879 11 5 1 cldap 3 searchRequest Netlogon
+0.692946 11 5 1 cldap 3 searchRequest Netlogon
+0.744297 06 4 1 rpc_netlogon 29 NetrLogonGetDomainInfo
+0.768994 06 4 1 kerberos
+0.772476 06 4 1 ldap 3 searchRequest 2 DC,DC cn
+0.827760 06 5 1 rpc_netlogon 29 NetrLogonGetDomainInfo
+0.828419 06 5 1 kerberos
+0.862850 06 5 1 ldap 3 searchRequest subschemaSubentry,dsServiceName,namingContexts,defaultNamingContext,schemaNamingContext,configurationNamingContext,rootDomainNamingContext,supportedControl,supportedLDAPVersion,supportedLDAPPolicies,supportedSASLMechanisms,dnsHostName,ldapServiceName,serverName,supportedCapabilities
+0.865384 06 6 1 ldap 3 searchRequest subschemaSubentry,dsServiceName,namingContexts,defaultNamingContext,schemaNamingContext,configurationNamingContext,rootDomainNamingContext,supportedControl,supportedLDAPVersion,supportedLDAPPolicies,supportedSASLMechanisms,dnsHostName,ldapServiceName,serverName,supportedCapabilities
diff --git a/python/samba/tests/blackbox/testdata/traffic_replay-3.expected b/python/samba/tests/blackbox/testdata/traffic_replay-3.expected
new file mode 100644
index 0000000..3c9b4e7
--- /dev/null
+++ b/python/samba/tests/blackbox/testdata/traffic_replay-3.expected
@@ -0,0 +1,11 @@
+0.011388 06 2 1 ldap 3 searchRequest 2 DC,DC cn
+0.221447 06 2 1 ldap 2 unbindRequest
+0.460878 06 3 1 ldap 3 searchRequest 2 DC,DC cn
+0.581933 11 4 1 cldap 3 searchRequest Netlogon
+0.596977 11 4 1 cldap 3 searchRequest Netlogon
+0.611184 11 4 1 cldap 3 searchRequest Netlogon
+0.666808 06 3 1 ldap 2 unbindRequest
+0.744297 06 4 1 rpc_netlogon 29 NetrLogonGetDomainInfo
+0.768994 06 4 1 kerberos
+0.772476 06 4 1 ldap 3 searchRequest 2 DC,DC cn
+0.865384 06 5 1 ldap 3 searchRequest subschemaSubentry,dsServiceName,namingContexts,defaultNamingContext,schemaNamingContext,configurationNamingContext,rootDomainNamingContext,supportedControl,supportedLDAPVersion,supportedLDAPPolicies,supportedSASLMechanisms,dnsHostName,ldapServiceName,serverName,supportedCapabilities
diff --git a/python/samba/tests/blackbox/testdata/traffic_replay.expected b/python/samba/tests/blackbox/testdata/traffic_replay.expected
new file mode 100644
index 0000000..0c6b2a2
--- /dev/null
+++ b/python/samba/tests/blackbox/testdata/traffic_replay.expected
@@ -0,0 +1,18 @@
+0.040433 06 2 1 rpc_netlogon 29 NetrLogonGetDomainInfo
+0.059203 06 2 1 kerberos
+0.061641 06 2 1 ldap 3 searchRequest 2 DC,DC cn
+0.535074 11 3 1 cldap 3 searchRequest Netlogon
+0.535369 11 3 1 cldap 3 searchRequest Netlogon
+0.536671 11 3 1 cldap 3 searchRequest Netlogon
+0.537238 11 3 1 cldap 3 searchRequest Netlogon
+0.537362 11 3 1 cldap 3 searchRequest Netlogon
+0.602824 11 4 1 cldap 3 searchRequest Netlogon
+0.640115 11 4 1 cldap 3 searchRequest Netlogon
+0.714546 06 3 1 rpc_netlogon 29 NetrLogonGetDomainInfo
+0.715865 06 3 1 kerberos
+0.716613 06 3 1 ldap 3 searchRequest 2 DC,DC cn
+0.767674 06 4 1 rpc_netlogon 29 NetrLogonGetDomainInfo
+0.778022 06 5 1 ldap 3 searchRequest 2 DC,DC cn
+0.792356 06 4 1 kerberos
+0.792763 06 4 1 ldap 3 searchRequest 2 DC,DC cn
+0.960412 06 5 1 ldap 2 unbindRequest
diff --git a/python/samba/tests/blackbox/testdata/traffic_summary.expected b/python/samba/tests/blackbox/testdata/traffic_summary.expected
new file mode 100644
index 0000000..b1db327
--- /dev/null
+++ b/python/samba/tests/blackbox/testdata/traffic_summary.expected
@@ -0,0 +1,29 @@
+1486690576.530451000 11 0 1 2 nbns 0 query
+1486690578.137335000 06 0 3 3 kerberos 10 krb-as-req machine
+1486690578.141276000 06 0 3 3 kerberos 11 krb-as-rep
+1486690584.104038000 06 49 4 3 kerberos 10 krb-as-req user
+1486690584.108221000 06 49 3 4 kerberos 11 krb-as-rep
+1486690584.139378000 06 50 4 3 kerberos 14 krb-ap-req
+1486690584.143220000 06 50 3 4 kerberos 13 krb-tgs-rep
+1486690584.770344000 06 60 4 3 ldap 0 bindRequest 3 sasl 1.3.6.1.5.5.2
+1486690584.774978000 06 60 3 4 ldap 1 bindResponse
+1486690584.775218000 06 60 4 3 ldap 3 searchRequest (objectClass=*) rootDomainNamingContext,configurationNamingContext,schemaNamingContext,defaultNamingContext
+1486690584.775574000 06 60 4 3 ldap 3 searchRequest DC,DC,DC (objectSid) objectSid
+1486690586.238734000 06 92 4 3 ldap 3 searchRequest 2 WKGUID,DC,DC,DC (objectClass=*)
+1486934236.150107000 6 5 6 smb 255 No further commands (0xff)
+1486934236.150278000 6 6 5 dcerpc 11 Bind
+1486934236.201029000 6 6 5 srvsvc 15 NetShareEnumAll
+1486934237.552194000 11 30 7 3 browser 0x00000008 Browser Election Request (0x08)
+1486690678.178692000 06 1177 8 9 lsarpc 27 lsa_SetInformationTrustedDomain
+1486690679.853951000 06 1183 9 8 epm 3 Map
+1486690679.854842000 06 1184 9 8 rpc_netlogon 4 NetrServerReqChallenge
+1487197586.858394000 11 66 10 8 cldap 3 searchRequest (&(&(NtVer)(DnsDomain))(AAC)) NetLogon
+1487197586.864862000 06 12 10 8 smb2 0 Negotiate Protocol
+1487197588.515337000 11 76 10 11 dns 0 query
+1487197588.911149000 11 76 11 10 dns 1 response
+1487197589.619792000 06 29 10 10 dnsserver 9 DnssrvUpdateRecord2
+1487200690.757022000 06 10 4 3 samr 0 Connect
+1487200691.039416000 06 14 4 3 drsuapi 0 DsBind
+1486934584.809271000 11 322 12 7 smb_netlogon 0x00000012 SAM LOGON request from client (0x12)
+1486690719.940434000 06 1400 4 3 ldap 6 modifyRequest servicePrincipalName 2 replace
+1486690682.579057000 06 1207 4 3 ldap 0 bindRequest 0 simple
diff --git a/python/samba/tests/blackbox/testdata/traffic_summary.pdml b/python/samba/tests/blackbox/testdata/traffic_summary.pdml
new file mode 100644
index 0000000..ac56a24
--- /dev/null
+++ b/python/samba/tests/blackbox/testdata/traffic_summary.pdml
@@ -0,0 +1,4989 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="pdml2html.xsl"?>
+<!-- You can find pdml2html.xsl in /usr/share/wireshark or at https://code.wireshark.org/review/gitweb?p=wireshark.git;a=blob_plain;f=pdml2html.xsl. -->
+<!-- Examples in this file are taken from a packet capture of make test -->
+<!-- where values where too large and of no interest they where replaced with "...elided..." -->
+<pdml version="0" creator="wireshark/2.0.2" time="Wed Feb 15 14:51:04 2017" capture_file="sample.pcap">
+
+<packet>
+ <proto name="geninfo" pos="0" showname="General information" size="78">
+ <field name="num" pos="0" show="1" showname="Number" value="1" size="78"/>
+ <field name="len" pos="0" show="78" showname="Frame Length" value="4e" size="78"/>
+ <field name="caplen" pos="0" show="78" showname="Captured Length" value="4e" size="78"/>
+ <field name="timestamp" pos="0" show="Feb 10, 2017 14:36:16.530451000 NZDT" showname="Captured Time" value="1486690576.530451000" size="78"/>
+ </proto>
+ <proto name="frame" showname="Frame 1: 78 bytes on wire (624 bits), 78 bytes captured (624 bits)" size="78" pos="0">
+ <field name="frame.encap_type" showname="Encapsulation type: Raw IP (7)" size="0" pos="0" show="7"/>
+ <field name="frame.time" showname="Arrival Time: Feb 10, 2017 14:36:16.530451000 NZDT" size="0" pos="0" show="Feb 10, 2017 14:36:16.530451000 NZDT"/>
+ <field name="frame.offset_shift" showname="Time shift for this packet: 0.000000000 seconds" size="0" pos="0" show="0.000000000"/>
+ <field name="frame.time_epoch" showname="Epoch Time: 1486690576.530451000 seconds" size="0" pos="0" show="1486690576.530451000"/>
+ <field name="frame.time_delta" showname="Time delta from previous captured frame: 0.000000000 seconds" size="0" pos="0" show="0.000000000"/>
+ <field name="frame.time_delta_displayed" showname="Time delta from previous displayed frame: 0.000000000 seconds" size="0" pos="0" show="0.000000000"/>
+ <field name="frame.time_relative" showname="Time since reference or first frame: 0.000000000 seconds" size="0" pos="0" show="0.000000000"/>
+ <field name="frame.number" showname="Frame Number: 1" size="0" pos="0" show="1"/>
+ <field name="frame.len" showname="Frame Length: 78 bytes (624 bits)" size="0" pos="0" show="78"/>
+ <field name="frame.cap_len" showname="Capture Length: 78 bytes (624 bits)" size="0" pos="0" show="78"/>
+ <field name="frame.marked" showname="Frame is marked: False" size="0" pos="0" show="0"/>
+ <field name="frame.ignored" showname="Frame is ignored: False" size="0" pos="0" show="0"/>
+ <field name="frame.protocols" showname="Protocols in frame: raw:ip:udp:nbns" size="0" pos="0" show="raw:ip:udp:nbns"/>
+ </proto>
+ <proto name="raw" showname="Raw packet data" size="78" pos="0"/>
+ <proto name="ip" showname="Internet Protocol Version 4, Src: 127.0.0.1, Dst: 127.255.255.255" size="20" pos="0">
+ <field name="ip.version" showname="0100 .... = Version: 4" size="1" pos="0" show="4" value="4" unmaskedvalue="45"/>
+ <field name="ip.hdr_len" showname=".... 0101 = Header Length: 20 bytes" size="1" pos="0" show="5" value="5" unmaskedvalue="45"/>
+ <field name="ip.dsfield" showname="Differentiated Services Field: 0x00 (DSCP: CS0, ECN: Not-ECT)" size="1" pos="1" show="0x00000000" value="00">
+ <field name="ip.dsfield.dscp" showname="0000 00.. = Differentiated Services Codepoint: Default (0)" size="1" pos="1" show="0" value="0" unmaskedvalue="00"/>
+ <field name="ip.dsfield.ecn" showname=".... ..00 = Explicit Congestion Notification: Not ECN-Capable Transport (0)" size="1" pos="1" show="0" value="0" unmaskedvalue="00"/>
+ </field>
+ <field name="ip.len" showname="Total Length: 78" size="2" pos="2" show="78" value="004e"/>
+ <field name="ip.id" showname="Identification: 0xffff (65535)" size="2" pos="4" show="0x0000ffff" value="ffff"/>
+ <field name="ip.flags" showname="Flags: 0x02 (Don&#x27;t Fragment)" size="1" pos="6" show="0x00000002" value="40">
+ <field name="ip.flags.rb" showname="0... .... = Reserved bit: Not set" size="1" pos="6" show="0" value="40"/>
+ <field name="ip.flags.df" showname=".1.. .... = Don&#x27;t fragment: Set" size="1" pos="6" show="1" value="40"/>
+ <field name="ip.flags.mf" showname="..0. .... = More fragments: Not set" size="1" pos="6" show="0" value="40"/>
+ </field>
+ <field name="ip.frag_offset" showname="Fragment offset: 0" size="2" pos="6" show="0" value="4000"/>
+ <field name="ip.ttl" showname="Time to live: 255" size="1" pos="8" show="255" value="ff"/>
+ <field name="ip.proto" showname="Protocol: UDP (17)" size="1" pos="9" show="17" value="11"/>
+ <field name="ip.checksum" showname="Header checksum: 0x0000 [validation disabled]" size="2" pos="10" show="0x00000000" value="0000">
+ <field name="ip.checksum_good" showname="Good: False" size="2" pos="10" show="0" value="0000"/>
+ <field name="ip.checksum_bad" showname="Bad: False" size="2" pos="10" show="0" value="0000"/>
+ </field>
+ <field name="ip.src" showname="Source: 127.0.0.1" size="4" pos="12" show="127.0.0.1" value="7f000001"/>
+ <field name="ip.addr" showname="Source or Destination Address: 127.0.0.1" hide="yes" size="4" pos="12" show="127.0.0.1" value="7f000001"/>
+ <field name="ip.src_host" showname="Source Host: 127.0.0.1" hide="yes" size="4" pos="12" show="127.0.0.1" value="7f000001"/>
+ <field name="ip.host" showname="Source or Destination Host: 127.0.0.1" hide="yes" size="4" pos="12" show="127.0.0.1" value="7f000001"/>
+ <field name="ip.dst" showname="Destination: 127.255.255.255" size="4" pos="16" show="127.255.255.255" value="7fffffff"/>
+ <field name="ip.addr" showname="Source or Destination Address: 127.255.255.255" hide="yes" size="4" pos="16" show="127.255.255.255" value="7fffffff"/>
+ <field name="ip.dst_host" showname="Destination Host: 127.255.255.255" hide="yes" size="4" pos="16" show="127.255.255.255" value="7fffffff"/>
+ <field name="ip.host" showname="Source or Destination Host: 127.255.255.255" hide="yes" size="4" pos="16" show="127.255.255.255" value="7fffffff"/>
+ <field name="" show="Source GeoIP: Unknown" size="4" pos="12" value="7f000001"/>
+ <field name="" show="Destination GeoIP: Unknown" size="4" pos="16" value="7fffffff"/>
+ </proto>
+ <proto name="udp" showname="User Datagram Protocol, Src Port: 14705 (14705), Dst Port: 137 (137)" size="8" pos="20">
+ <field name="udp.srcport" showname="Source Port: 14705" size="2" pos="20" show="14705" value="3971"/>
+ <field name="udp.dstport" showname="Destination Port: 137" size="2" pos="22" show="137" value="0089"/>
+ <field name="udp.port" showname="Source or Destination Port: 14705" hide="yes" size="2" pos="20" show="14705" value="3971"/>
+ <field name="udp.port" showname="Source or Destination Port: 137" hide="yes" size="2" pos="22" show="137" value="0089"/>
+ <field name="udp.length" showname="Length: 58" size="2" pos="24" show="58" value="003a"/>
+ <field name="udp.checksum" showname="Checksum: 0x0000 (none)" size="2" pos="26" show="0x00000000" value="0000">
+ <field name="udp.checksum_good" showname="Good Checksum: False" size="2" pos="26" show="0" value="0000"/>
+ <field name="udp.checksum_bad" showname="Bad Checksum: False" size="2" pos="26" show="0" value="0000"/>
+ </field>
+ <field name="udp.stream" showname="Stream index: 0" size="0" pos="28" show="0"/>
+ </proto>
+ <proto name="nbns" showname="NetBIOS Name Service" size="50" pos="28">
+ <field name="nbns.id" showname="Transaction ID: 0x29d6" size="2" pos="28" show="0x000029d6" value="29d6"/>
+ <field name="nbns.flags" showname="Flags: 0x0010, Opcode: Name query, Broadcast" size="2" pos="30" show="0x00000010" value="0010">
+ <field name="nbns.flags.response" showname="0... .... .... .... = Response: Message is a query" size="2" pos="30" show="0" value="0" unmaskedvalue="0010"/>
+ <field name="nbns.flags.opcode" showname=".000 0... .... .... = Opcode: Name query (0)" size="2" pos="30" show="0" value="0" unmaskedvalue="0010"/>
+ <field name="nbns.flags.truncated" showname=".... ..0. .... .... = Truncated: Message is not truncated" size="2" pos="30" show="0" value="0" unmaskedvalue="0010"/>
+ <field name="nbns.flags.recdesired" showname=".... ...0 .... .... = Recursion desired: Don&#x27;t do query recursively" size="2" pos="30" show="0" value="0" unmaskedvalue="0010"/>
+ <field name="nbns.flags.broadcast" showname=".... .... ...1 .... = Broadcast: Broadcast packet" size="2" pos="30" show="1" value="FFFFFFFF" unmaskedvalue="0010"/>
+ </field>
+ <field name="nbns.count.queries" showname="Questions: 1" size="2" pos="32" show="1" value="0001"/>
+ <field name="nbns.count.answers" showname="Answer RRs: 0" size="2" pos="34" show="0" value="0000"/>
+ <field name="nbns.count.auth_rr" showname="Authority RRs: 0" size="2" pos="36" show="0" value="0000"/>
+ <field name="nbns.count.add_rr" showname="Additional RRs: 0" size="2" pos="38" show="0" value="0000"/>
+ <field name="" show="Queries" size="38" pos="40" value="20454d455045444542454d454545444341434143414341434143414341434141410000200001">
+ <field name="" show="LOCALDC&lt;00&gt;: type NB, class IN" size="38" pos="40" value="20454d455045444542454d454545444341434143414341434143414341434141410000200001">
+ <field name="nbns.name" showname="Name: LOCALDC&lt;00&gt; (Workstation/Redirector)" size="34" pos="40" show="LOCALDC&lt;00&gt;" value="20454d455045444542454d4545454443414341434143414341434143414341414100"/>
+ <field name="nbns.type" showname="Type: NB (32)" size="2" pos="74" show="32" value="0020"/>
+ <field name="nbns.class" showname="Class: IN (1)" size="2" pos="76" show="1" value="0001"/>
+ </field>
+ </field>
+ </proto>
+</packet>
+
+<packet>
+ <proto name="geninfo" pos="0" showname="General information" size="296">
+ <field name="num" pos="0" show="47" showname="Number" value="2f" size="296"/>
+ <field name="len" pos="0" show="296" showname="Frame Length" value="128" size="296"/>
+ <field name="caplen" pos="0" show="296" showname="Captured Length" value="128" size="296"/>
+ <field name="timestamp" pos="0" show="Feb 10, 2017 14:36:18.137335000 NZDT" showname="Captured Time" value="1486690578.137335000" size="296"/>
+ </proto>
+ <proto name="frame" showname="Frame 47: 296 bytes on wire (2368 bits), 296 bytes captured (2368 bits)" size="296" pos="0">
+ <field name="frame.encap_type" showname="Encapsulation type: Raw IP (7)" size="0" pos="0" show="7"/>
+ <field name="frame.time" showname="Arrival Time: Feb 10, 2017 14:36:18.137335000 NZDT" size="0" pos="0" show="Feb 10, 2017 14:36:18.137335000 NZDT"/>
+ <field name="frame.offset_shift" showname="Time shift for this packet: 0.000000000 seconds" size="0" pos="0" show="0.000000000"/>
+ <field name="frame.time_epoch" showname="Epoch Time: 1486690578.137335000 seconds" size="0" pos="0" show="1486690578.137335000"/>
+ <field name="frame.time_delta" showname="Time delta from previous captured frame: 0.000016000 seconds" size="0" pos="0" show="0.000016000"/>
+ <field name="frame.time_delta_displayed" showname="Time delta from previous displayed frame: 0.000016000 seconds" size="0" pos="0" show="0.000016000"/>
+ <field name="frame.time_relative" showname="Time since reference or first frame: 1.606884000 seconds" size="0" pos="0" show="1.606884000"/>
+ <field name="frame.number" showname="Frame Number: 47" size="0" pos="0" show="47"/>
+ <field name="frame.len" showname="Frame Length: 296 bytes (2368 bits)" size="0" pos="0" show="296"/>
+ <field name="frame.cap_len" showname="Capture Length: 296 bytes (2368 bits)" size="0" pos="0" show="296"/>
+ <field name="frame.marked" showname="Frame is marked: False" size="0" pos="0" show="0"/>
+ <field name="frame.ignored" showname="Frame is ignored: False" size="0" pos="0" show="0"/>
+ <field name="frame.protocols" showname="Protocols in frame: raw:ip:tcp:kerberos" size="0" pos="0" show="raw:ip:tcp:kerberos"/>
+ </proto>
+ <proto name="raw" showname="Raw packet data" size="296" pos="0"/>
+ <proto name="ip" showname="Internet Protocol Version 4, Src: 127.0.0.21, Dst: 127.0.0.21" size="20" pos="0">
+ <field name="ip.version" showname="0100 .... = Version: 4" size="1" pos="0" show="4" value="4" unmaskedvalue="45"/>
+ <field name="ip.hdr_len" showname=".... 0101 = Header Length: 20 bytes" size="1" pos="0" show="5" value="5" unmaskedvalue="45"/>
+ <field name="ip.dsfield" showname="Differentiated Services Field: 0x00 (DSCP: CS0, ECN: Not-ECT)" size="1" pos="1" show="0x00000000" value="00">
+ <field name="ip.dsfield.dscp" showname="0000 00.. = Differentiated Services Codepoint: Default (0)" size="1" pos="1" show="0" value="0" unmaskedvalue="00"/>
+ <field name="ip.dsfield.ecn" showname=".... ..00 = Explicit Congestion Notification: Not ECN-Capable Transport (0)" size="1" pos="1" show="0" value="0" unmaskedvalue="00"/>
+ </field>
+ <field name="ip.len" showname="Total Length: 296" size="2" pos="2" show="296" value="0128"/>
+ <field name="ip.id" showname="Identification: 0xffff (65535)" size="2" pos="4" show="0x0000ffff" value="ffff"/>
+ <field name="ip.flags" showname="Flags: 0x02 (Don&#x27;t Fragment)" size="1" pos="6" show="0x00000002" value="40">
+ <field name="ip.flags.rb" showname="0... .... = Reserved bit: Not set" size="1" pos="6" show="0" value="40"/>
+ <field name="ip.flags.df" showname=".1.. .... = Don&#x27;t fragment: Set" size="1" pos="6" show="1" value="40"/>
+ <field name="ip.flags.mf" showname="..0. .... = More fragments: Not set" size="1" pos="6" show="0" value="40"/>
+ </field>
+ <field name="ip.frag_offset" showname="Fragment offset: 0" size="2" pos="6" show="0" value="4000"/>
+ <field name="ip.ttl" showname="Time to live: 255" size="1" pos="8" show="255" value="ff"/>
+ <field name="ip.proto" showname="Protocol: TCP (6)" size="1" pos="9" show="6" value="06"/>
+ <field name="ip.checksum" showname="Header checksum: 0x0000 [validation disabled]" size="2" pos="10" show="0x00000000" value="0000">
+ <field name="ip.checksum_good" showname="Good: False" size="2" pos="10" show="0" value="0000"/>
+ <field name="ip.checksum_bad" showname="Bad: False" size="2" pos="10" show="0" value="0000"/>
+ </field>
+ <field name="ip.src" showname="Source: 127.0.0.21" size="4" pos="12" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.addr" showname="Source or Destination Address: 127.0.0.21" hide="yes" size="4" pos="12" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.src_host" showname="Source Host: 127.0.0.21" hide="yes" size="4" pos="12" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.host" showname="Source or Destination Host: 127.0.0.21" hide="yes" size="4" pos="12" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.dst" showname="Destination: 127.0.0.21" size="4" pos="16" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.addr" showname="Source or Destination Address: 127.0.0.21" hide="yes" size="4" pos="16" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.dst_host" showname="Destination Host: 127.0.0.21" hide="yes" size="4" pos="16" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.host" showname="Source or Destination Host: 127.0.0.21" hide="yes" size="4" pos="16" show="127.0.0.21" value="7f000015"/>
+ <field name="" show="Source GeoIP: Unknown" size="4" pos="12" value="7f000015"/>
+ <field name="" show="Destination GeoIP: Unknown" size="4" pos="16" value="7f000015"/>
+ </proto>
+ <proto name="tcp" showname="Transmission Control Protocol, Src Port: 14723 (14723), Dst Port: 88 (88), Seq: 1, Ack: 1, Len: 256" size="20" pos="20">
+ <field name="tcp.srcport" showname="Source Port: 14723" size="2" pos="20" show="14723" value="3983"/>
+ <field name="tcp.dstport" showname="Destination Port: 88" size="2" pos="22" show="88" value="0058"/>
+ <field name="tcp.port" showname="Source or Destination Port: 14723" hide="yes" size="2" pos="20" show="14723" value="3983"/>
+ <field name="tcp.port" showname="Source or Destination Port: 88" hide="yes" size="2" pos="22" show="88" value="0058"/>
+ <field name="tcp.stream" showname="Stream index: 0" size="0" pos="20" show="0"/>
+ <field name="tcp.len" showname="TCP Segment Len: 256" size="1" pos="32" show="256" value="50"/>
+ <field name="tcp.seq" showname="Sequence number: 1 (relative sequence number)" size="4" pos="24" show="1" value="00000001"/>
+ <field name="tcp.nxtseq" showname="Next sequence number: 257 (relative sequence number)" size="0" pos="20" show="257"/>
+ <field name="tcp.ack" showname="Acknowledgment number: 1 (relative ack number)" size="4" pos="28" show="1" value="00000001"/>
+ <field name="tcp.hdr_len" showname="Header Length: 20 bytes" size="1" pos="32" show="20" value="50"/>
+ <field name="tcp.flags" showname="Flags: 0x018 (PSH, ACK)" size="2" pos="32" show="0x00000018" value="18" unmaskedvalue="5018">
+ <field name="tcp.flags.res" showname="000. .... .... = Reserved: Not set" size="1" pos="32" show="0" value="0" unmaskedvalue="50"/>
+ <field name="tcp.flags.ns" showname="...0 .... .... = Nonce: Not set" size="1" pos="32" show="0" value="0" unmaskedvalue="50"/>
+ <field name="tcp.flags.cwr" showname=".... 0... .... = Congestion Window Reduced (CWR): Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.ecn" showname=".... .0.. .... = ECN-Echo: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.urg" showname=".... ..0. .... = Urgent: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.ack" showname=".... ...1 .... = Acknowledgment: Set" size="1" pos="33" show="1" value="FFFFFFFF" unmaskedvalue="18"/>
+ <field name="tcp.flags.push" showname=".... .... 1... = Push: Set" size="1" pos="33" show="1" value="FFFFFFFF" unmaskedvalue="18"/>
+ <field name="tcp.flags.reset" showname=".... .... .0.. = Reset: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.syn" showname=".... .... ..0. = Syn: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.fin" showname=".... .... ...0 = Fin: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.str" showname="TCP Flags: *******AP***" size="2" pos="32" show="*******AP***" value="5018"/>
+ </field>
+ <field name="tcp.window_size_value" showname="Window size value: 32767" size="2" pos="34" show="32767" value="7fff"/>
+ <field name="tcp.window_size" showname="Calculated window size: 32767" size="2" pos="34" show="32767" value="7fff"/>
+ <field name="tcp.window_size_scalefactor" showname="Window size scaling factor: -2 (no window scaling used)" size="2" pos="34" show="-2" value="7fff"/>
+ <field name="tcp.checksum" showname="Checksum: 0x0000 [validation disabled]" size="2" pos="36" show="0x00000000" value="0000">
+ <field name="tcp.checksum_good" showname="Good Checksum: False" size="2" pos="36" show="0" value="0000"/>
+ <field name="tcp.checksum_bad" showname="Bad Checksum: False" size="2" pos="36" show="0" value="0000"/>
+ </field>
+ <field name="tcp.urgent_pointer" showname="Urgent pointer: 0" size="2" pos="38" show="0" value="0000"/>
+ <field name="tcp.analysis" showname="SEQ/ACK analysis" size="0" pos="20" show="" value="">
+ <field name="tcp.analysis.initial_rtt" showname="iRTT: 0.000012000 seconds" size="0" pos="20" show="0.000012000"/>
+ <field name="tcp.analysis.bytes_in_flight" showname="Bytes in flight: 256" size="0" pos="20" show="256"/>
+ </field>
+ <field name="tcp.pdu.size" showname="PDU Size: 256" size="256" pos="40" show="256" value="000000fc6a81f93081f6a103020105a20302010aa350304e304ca103020102a24504433041a003020112a23a0438cecfe4905d9670c770a992a4645a9c477b639cfafad21ba2e12cc397eb617687733caf785f07d6f23cf87adc9a1fc5cb1b3ca7e6d17cc86fa48197308194a00703050000000000a1153013a003020101a10c300a1b084c4f43414c444324a2131b1153414d42412e4558414d504c452e434f4da3263024a003020102a11d301b1b066b72627467741b1153414d42412e4558414d504c452e434f4da511180f32303137303231313031333631375aa70602043e9a5c0ea81a3018020112020111020110020105020117020103020102020101"/>
+ </proto>
+ <proto name="kerberos" showname="Kerberos" size="256" pos="40">
+ <field name="" show="Record Mark: 252 bytes" size="4" pos="40" value="000000fc">
+ <field name="kerberos.rm.reserved" showname="0... .... .... .... .... .... .... .... = Reserved: Not set" size="4" pos="40" show="0" value="0" unmaskedvalue="000000fc"/>
+ <field name="kerberos.rm.length" showname=".000 0000 0000 0000 0000 0000 1111 1100 = Record Length: 252" size="4" pos="40" show="252" value="FC" unmaskedvalue="000000fc"/>
+ </field>
+ <field name="kerberos.as_req_element" showname="as-req" size="249" pos="47" show="" value="">
+ <field name="kerberos.pvno" showname="pvno: 5" size="1" pos="54" show="5" value="05"/>
+ <field name="kerberos.msg_type" showname="msg-type: krb-as-req (10)" size="1" pos="59" show="10" value="0a"/>
+ <field name="kerberos.padata" showname="padata: 1 item" size="78" pos="64" show="1" value="304ca103020102a24504433041a003020112a23a0438cecfe4905d9670c770a992a4645a9c477b639cfafad21ba2e12cc397eb617687733caf785f07d6f23cf87adc9a1fc5cb1b3ca7e6d17cc86f">
+ <field name="kerberos.PA_DATA_element" showname="PA-DATA PA-ENC-TIMESTAMP" size="78" pos="64" show="" value="">
+ <field name="kerberos.padata_type" showname="padata-type: kRB5-PADATA-ENC-TIMESTAMP (2)" size="1" pos="70" show="2" value="02">
+ <field name="kerberos.padata_value" showname="padata-value: 3041a003020112a23a0438cecfe4905d9670c770a992a464..." size="67" pos="75" show="30:41:a0:03:02:01:12:a2:3a:04:38:ce:cf:e4:90:5d:96:70:c7:70:a9:92:a4:64:5a:9c:47:7b:63:9c:fa:fa:d2:1b:a2:e1:2c:c3:97:eb:61:76:87:73:3c:af:78:5f:07:d6:f2:3c:f8:7a:dc:9a:1f:c5:cb:1b:3c:a7:e6:d1:7c:c8:6f" value="3041a003020112a23a0438cecfe4905d9670c770a992a4645a9c477b639cfafad21ba2e12cc397eb617687733caf785f07d6f23cf87adc9a1fc5cb1b3ca7e6d17cc86f">
+ <field name="kerberos.etype" showname="etype: eTYPE-AES256-CTS-HMAC-SHA1-96 (18)" size="1" pos="81" show="18" value="12"/>
+ <field name="kerberos.cipher" showname="cipher: cecfe4905d9670c770a992a4645a9c477b639cfafad21ba2..." size="56" pos="86" show="ce:cf:e4:90:5d:96:70:c7:70:a9:92:a4:64:5a:9c:47:7b:63:9c:fa:fa:d2:1b:a2:e1:2c:c3:97:eb:61:76:87:73:3c:af:78:5f:07:d6:f2:3c:f8:7a:dc:9a:1f:c5:cb:1b:3c:a7:e6:d1:7c:c8:6f" value="cecfe4905d9670c770a992a4645a9c477b639cfafad21ba2e12cc397eb617687733caf785f07d6f23cf87adc9a1fc5cb1b3ca7e6d17cc86f"/>
+ </field>
+ </field>
+ </field>
+ </field>
+ <field name="kerberos.req_body_element" showname="req-body" size="151" pos="145" show="" value="">
+ <field name="ber.bitstring.padding" showname="Padding: 0" size="1" pos="152" show="0" value="00"/>
+ <field name="kerberos.kdc_options" showname="kdc-options: 00000000" size="4" pos="153" show="00:00:00:00" value="00000000">
+ <field name="kerberos.reserved" showname="0... .... = reserved: False" size="1" pos="153" show="0" value="0" unmaskedvalue="00"/>
+ <field name="kerberos.forwardable" showname=".0.. .... = forwardable: False" size="1" pos="153" show="0" value="0" unmaskedvalue="00"/>
+ <field name="kerberos.forwarded" showname="..0. .... = forwarded: False" size="1" pos="153" show="0" value="0" unmaskedvalue="00"/>
+ <field name="kerberos.proxiable" showname="...0 .... = proxiable: False" size="1" pos="153" show="0" value="0" unmaskedvalue="00"/>
+ <field name="kerberos.proxy" showname=".... 0... = proxy: False" size="1" pos="153" show="0" value="0" unmaskedvalue="00"/>
+ <field name="kerberos.allow-postdate" showname=".... .0.. = allow-postdate: False" size="1" pos="153" show="0" value="0" unmaskedvalue="00"/>
+ <field name="kerberos.postdated" showname=".... ..0. = postdated: False" size="1" pos="153" show="0" value="0" unmaskedvalue="00"/>
+ <field name="kerberos.unused7" showname=".... ...0 = unused7: False" size="1" pos="153" show="0" value="0" unmaskedvalue="00"/>
+ <field name="kerberos.renewable" showname="0... .... = renewable: False" size="1" pos="154" show="0" value="0" unmaskedvalue="00"/>
+ <field name="kerberos.unused9" showname=".0.. .... = unused9: False" size="1" pos="154" show="0" value="0" unmaskedvalue="00"/>
+ <field name="kerberos.unused10" showname="..0. .... = unused10: False" size="1" pos="154" show="0" value="0" unmaskedvalue="00"/>
+ <field name="kerberos.opt-hardware-auth" showname="...0 .... = opt-hardware-auth: False" size="1" pos="154" show="0" value="0" unmaskedvalue="00"/>
+ <field name="kerberos.request-anonymous" showname=".... ..0. = request-anonymous: False" size="1" pos="154" show="0" value="0" unmaskedvalue="00"/>
+ <field name="kerberos.canonicalize" showname=".... ...0 = canonicalize: False" size="1" pos="154" show="0" value="0" unmaskedvalue="00"/>
+ <field name="kerberos.constrained-delegation" showname="0... .... = constrained-delegation: False" size="1" pos="155" show="0" value="0" unmaskedvalue="00"/>
+ <field name="kerberos.disable-transited-check" showname="..0. .... = disable-transited-check: False" size="1" pos="156" show="0" value="0" unmaskedvalue="00"/>
+ <field name="kerberos.renewable-ok" showname="...0 .... = renewable-ok: False" size="1" pos="156" show="0" value="0" unmaskedvalue="00"/>
+ <field name="kerberos.enc-tkt-in-skey" showname=".... 0... = enc-tkt-in-skey: False" size="1" pos="156" show="0" value="0" unmaskedvalue="00"/>
+ <field name="kerberos.renew" showname=".... ..0. = renew: False" size="1" pos="156" show="0" value="0" unmaskedvalue="00"/>
+ <field name="kerberos.validate" showname=".... ...0 = validate: False" size="1" pos="156" show="0" value="0" unmaskedvalue="00"/>
+ </field>
+ <field name="kerberos.cname_element" showname="cname" size="21" pos="159" show="" value="">
+ <field name="kerberos.name_type" showname="name-type: kRB5-NT-PRINCIPAL (1)" size="1" pos="165" show="1" value="01"/>
+ <field name="kerberos.name_string" showname="name-string: 1 item" size="10" pos="170" show="1" value="1b084c4f43414c444324">
+ <field name="kerberos.KerberosString" showname="KerberosString: LOCALDC$" size="8" pos="172" show="LOCALDC$" value="4c4f43414c444324"/>
+ </field>
+ </field>
+ <field name="kerberos.realm" showname="realm: SAMBA.EXAMPLE.COM" size="17" pos="184" show="SAMBA.EXAMPLE.COM" value="53414d42412e4558414d504c452e434f4d"/>
+ <field name="kerberos.sname_element" showname="sname" size="38" pos="203" show="" value="">
+ <field name="kerberos.name_type" showname="name-type: kRB5-NT-SRV-INST (2)" size="1" pos="209" show="2" value="02"/>
+ <field name="kerberos.name_string" showname="name-string: 2 items" size="27" pos="214" show="2" value="1b066b72627467741b1153414d42412e4558414d504c452e434f4d">
+ <field name="kerberos.KerberosString" showname="KerberosString: krbtgt" size="6" pos="216" show="krbtgt" value="6b7262746774"/>
+ <field name="kerberos.KerberosString" showname="KerberosString: SAMBA.EXAMPLE.COM" size="17" pos="224" show="SAMBA.EXAMPLE.COM" value="53414d42412e4558414d504c452e434f4d"/>
+ </field>
+ </field>
+ <field name="kerberos.till" showname="till: 2017-02-11 01:36:17 (UTC)" size="15" pos="245" show="2017-02-11 01:36:17 (UTC)" value="32303137303231313031333631375a"/>
+ <field name="kerberos.nonce" showname="nonce: 1050303502" size="4" pos="264" show="1050303502" value="3e9a5c0e"/>
+ <field name="kerberos.etype" showname="etype: 8 items" size="24" pos="272" show="8" value="020112020111020110020105020117020103020102020101">
+ <field name="kerberos.ENCTYPE" showname="ENCTYPE: eTYPE-AES256-CTS-HMAC-SHA1-96 (18)" size="1" pos="274" show="18" value="12"/>
+ <field name="kerberos.ENCTYPE" showname="ENCTYPE: eTYPE-AES128-CTS-HMAC-SHA1-96 (17)" size="1" pos="277" show="17" value="11"/>
+ <field name="kerberos.ENCTYPE" showname="ENCTYPE: eTYPE-DES3-CBC-SHA1 (16)" size="1" pos="280" show="16" value="10"/>
+ <field name="kerberos.ENCTYPE" showname="ENCTYPE: eTYPE-DES3-CBC-MD5 (5)" size="1" pos="283" show="5" value="05"/>
+ <field name="kerberos.ENCTYPE" showname="ENCTYPE: eTYPE-ARCFOUR-HMAC-MD5 (23)" size="1" pos="286" show="23" value="17"/>
+ <field name="kerberos.ENCTYPE" showname="ENCTYPE: eTYPE-DES-CBC-MD5 (3)" size="1" pos="289" show="3" value="03"/>
+ <field name="kerberos.ENCTYPE" showname="ENCTYPE: eTYPE-DES-CBC-MD4 (2)" size="1" pos="292" show="2" value="02"/>
+ <field name="kerberos.ENCTYPE" showname="ENCTYPE: eTYPE-DES-CBC-CRC (1)" size="1" pos="295" show="1" value="01"/>
+ </field>
+ </field>
+ </field>
+ </proto>
+</packet>
+
+<packet>
+ <proto name="geninfo" pos="0" showname="General information" size="1527">
+ <field name="num" pos="0" show="53" showname="Number" value="35" size="1527"/>
+ <field name="len" pos="0" show="1527" showname="Frame Length" value="5f7" size="1527"/>
+ <field name="caplen" pos="0" show="1527" showname="Captured Length" value="5f7" size="1527"/>
+ <field name="timestamp" pos="0" show="Feb 10, 2017 14:36:18.141276000 NZDT" showname="Captured Time" value="1486690578.141276000" size="1527"/>
+ </proto>
+ <proto name="frame" showname="Frame 53: 1527 bytes on wire (12216 bits), 1527 bytes captured (12216 bits)" size="1527" pos="0">
+ <field name="frame.encap_type" showname="Encapsulation type: Raw IP (7)" size="0" pos="0" show="7"/>
+ <field name="frame.time" showname="Arrival Time: Feb 10, 2017 14:36:18.141276000 NZDT" size="0" pos="0" show="Feb 10, 2017 14:36:18.141276000 NZDT"/>
+ <field name="frame.offset_shift" showname="Time shift for this packet: 0.000000000 seconds" size="0" pos="0" show="0.000000000"/>
+ <field name="frame.time_epoch" showname="Epoch Time: 1486690578.141276000 seconds" size="0" pos="0" show="1486690578.141276000"/>
+ <field name="frame.time_delta" showname="Time delta from previous captured frame: 0.003784000 seconds" size="0" pos="0" show="0.003784000"/>
+ <field name="frame.time_delta_displayed" showname="Time delta from previous displayed frame: 0.003784000 seconds" size="0" pos="0" show="0.003784000"/>
+ <field name="frame.time_relative" showname="Time since reference or first frame: 1.610825000 seconds" size="0" pos="0" show="1.610825000"/>
+ <field name="frame.number" showname="Frame Number: 53" size="0" pos="0" show="53"/>
+ <field name="frame.len" showname="Frame Length: 1527 bytes (12216 bits)" size="0" pos="0" show="1527"/>
+ <field name="frame.cap_len" showname="Capture Length: 1527 bytes (12216 bits)" size="0" pos="0" show="1527"/>
+ <field name="frame.marked" showname="Frame is marked: False" size="0" pos="0" show="0"/>
+ <field name="frame.ignored" showname="Frame is ignored: False" size="0" pos="0" show="0"/>
+ <field name="frame.protocols" showname="Protocols in frame: raw:ip:tcp:kerberos" size="0" pos="0" show="raw:ip:tcp:kerberos"/>
+ </proto>
+ <proto name="raw" showname="Raw packet data" size="1527" pos="0"/>
+ <proto name="ip" showname="Internet Protocol Version 4, Src: 127.0.0.21, Dst: 127.0.0.21" size="20" pos="0">
+ <field name="ip.version" showname="0100 .... = Version: 4" size="1" pos="0" show="4" value="4" unmaskedvalue="45"/>
+ <field name="ip.hdr_len" showname=".... 0101 = Header Length: 20 bytes" size="1" pos="0" show="5" value="5" unmaskedvalue="45"/>
+ <field name="ip.dsfield" showname="Differentiated Services Field: 0x00 (DSCP: CS0, ECN: Not-ECT)" size="1" pos="1" show="0x00000000" value="00">
+ <field name="ip.dsfield.dscp" showname="0000 00.. = Differentiated Services Codepoint: Default (0)" size="1" pos="1" show="0" value="0" unmaskedvalue="00"/>
+ <field name="ip.dsfield.ecn" showname=".... ..00 = Explicit Congestion Notification: Not ECN-Capable Transport (0)" size="1" pos="1" show="0" value="0" unmaskedvalue="00"/>
+ </field>
+ <field name="ip.len" showname="Total Length: 1527" size="2" pos="2" show="1527" value="05f7"/>
+ <field name="ip.id" showname="Identification: 0xffff (65535)" size="2" pos="4" show="0x0000ffff" value="ffff"/>
+ <field name="ip.flags" showname="Flags: 0x02 (Don&#x27;t Fragment)" size="1" pos="6" show="0x00000002" value="40">
+ <field name="ip.flags.rb" showname="0... .... = Reserved bit: Not set" size="1" pos="6" show="0" value="40"/>
+ <field name="ip.flags.df" showname=".1.. .... = Don&#x27;t fragment: Set" size="1" pos="6" show="1" value="40"/>
+ <field name="ip.flags.mf" showname="..0. .... = More fragments: Not set" size="1" pos="6" show="0" value="40"/>
+ </field>
+ <field name="ip.frag_offset" showname="Fragment offset: 0" size="2" pos="6" show="0" value="4000"/>
+ <field name="ip.ttl" showname="Time to live: 255" size="1" pos="8" show="255" value="ff"/>
+ <field name="ip.proto" showname="Protocol: TCP (6)" size="1" pos="9" show="6" value="06"/>
+ <field name="ip.checksum" showname="Header checksum: 0x0000 [validation disabled]" size="2" pos="10" show="0x00000000" value="0000">
+ <field name="ip.checksum_good" showname="Good: False" size="2" pos="10" show="0" value="0000"/>
+ <field name="ip.checksum_bad" showname="Bad: False" size="2" pos="10" show="0" value="0000"/>
+ </field>
+ <field name="ip.src" showname="Source: 127.0.0.21" size="4" pos="12" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.addr" showname="Source or Destination Address: 127.0.0.21" hide="yes" size="4" pos="12" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.src_host" showname="Source Host: 127.0.0.21" hide="yes" size="4" pos="12" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.host" showname="Source or Destination Host: 127.0.0.21" hide="yes" size="4" pos="12" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.dst" showname="Destination: 127.0.0.21" size="4" pos="16" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.addr" showname="Source or Destination Address: 127.0.0.21" hide="yes" size="4" pos="16" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.dst_host" showname="Destination Host: 127.0.0.21" hide="yes" size="4" pos="16" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.host" showname="Source or Destination Host: 127.0.0.21" hide="yes" size="4" pos="16" show="127.0.0.21" value="7f000015"/>
+ <field name="" show="Source GeoIP: Unknown" size="4" pos="12" value="7f000015"/>
+ <field name="" show="Destination GeoIP: Unknown" size="4" pos="16" value="7f000015"/>
+ </proto>
+ <proto name="tcp" showname="Transmission Control Protocol, Src Port: 88 (88), Dst Port: 14723 (14723), Seq: 1, Ack: 257, Len: 1487" size="20" pos="20">
+ <field name="tcp.srcport" showname="Source Port: 88" size="2" pos="20" show="88" value="0058"/>
+ <field name="tcp.dstport" showname="Destination Port: 14723" size="2" pos="22" show="14723" value="3983"/>
+ <field name="tcp.port" showname="Source or Destination Port: 88" hide="yes" size="2" pos="20" show="88" value="0058"/>
+ <field name="tcp.port" showname="Source or Destination Port: 14723" hide="yes" size="2" pos="22" show="14723" value="3983"/>
+ <field name="tcp.stream" showname="Stream index: 0" size="0" pos="20" show="0"/>
+ <field name="tcp.len" showname="TCP Segment Len: 1487" size="1" pos="32" show="1487" value="50"/>
+ <field name="tcp.seq" showname="Sequence number: 1 (relative sequence number)" size="4" pos="24" show="1" value="00000001"/>
+ <field name="tcp.nxtseq" showname="Next sequence number: 1488 (relative sequence number)" size="0" pos="20" show="1488"/>
+ <field name="tcp.ack" showname="Acknowledgment number: 257 (relative ack number)" size="4" pos="28" show="257" value="00000101"/>
+ <field name="tcp.hdr_len" showname="Header Length: 20 bytes" size="1" pos="32" show="20" value="50"/>
+ <field name="tcp.flags" showname="Flags: 0x018 (PSH, ACK)" size="2" pos="32" show="0x00000018" value="18" unmaskedvalue="5018">
+ <field name="tcp.flags.res" showname="000. .... .... = Reserved: Not set" size="1" pos="32" show="0" value="0" unmaskedvalue="50"/>
+ <field name="tcp.flags.ns" showname="...0 .... .... = Nonce: Not set" size="1" pos="32" show="0" value="0" unmaskedvalue="50"/>
+ <field name="tcp.flags.cwr" showname=".... 0... .... = Congestion Window Reduced (CWR): Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.ecn" showname=".... .0.. .... = ECN-Echo: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.urg" showname=".... ..0. .... = Urgent: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.ack" showname=".... ...1 .... = Acknowledgment: Set" size="1" pos="33" show="1" value="FFFFFFFF" unmaskedvalue="18"/>
+ <field name="tcp.flags.push" showname=".... .... 1... = Push: Set" size="1" pos="33" show="1" value="FFFFFFFF" unmaskedvalue="18"/>
+ <field name="tcp.flags.reset" showname=".... .... .0.. = Reset: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.syn" showname=".... .... ..0. = Syn: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.fin" showname=".... .... ...0 = Fin: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.str" showname="TCP Flags: *******AP***" size="2" pos="32" show="*******AP***" value="5018"/>
+ </field>
+ <field name="tcp.window_size_value" showname="Window size value: 32767" size="2" pos="34" show="32767" value="7fff"/>
+ <field name="tcp.window_size" showname="Calculated window size: 32767" size="2" pos="34" show="32767" value="7fff"/>
+ <field name="tcp.window_size_scalefactor" showname="Window size scaling factor: -2 (no window scaling used)" size="2" pos="34" show="-2" value="7fff"/>
+ <field name="tcp.checksum" showname="Checksum: 0x0000 [validation disabled]" size="2" pos="36" show="0x00000000" value="0000">
+ <field name="tcp.checksum_good" showname="Good Checksum: False" size="2" pos="36" show="0" value="0000"/>
+ <field name="tcp.checksum_bad" showname="Bad Checksum: False" size="2" pos="36" show="0" value="0000"/>
+ </field>
+ <field name="tcp.urgent_pointer" showname="Urgent pointer: 0" size="2" pos="38" show="0" value="0000"/>
+ <field name="tcp.analysis" showname="SEQ/ACK analysis" size="0" pos="20" show="" value="">
+ <field name="tcp.analysis.acks_frame" showname="This is an ACK to the segment in frame: 47" size="0" pos="20" show="47"/>
+ <field name="tcp.analysis.ack_rtt" showname="The RTT to ACK the segment was: 0.003941000 seconds" size="0" pos="20" show="0.003941000"/>
+ <field name="tcp.analysis.initial_rtt" showname="iRTT: 0.000012000 seconds" size="0" pos="20" show="0.000012000"/>
+ <field name="tcp.analysis.bytes_in_flight" showname="Bytes in flight: 1487" size="0" pos="20" show="1487"/>
+ </field>
+ <field name="tcp.pdu.size" showname="PDU Size: 1487" size="1487" pos="40" show="1487" value="...elided..."/>
+ </proto>
+ <proto name="kerberos" showname="Kerberos" size="1487" pos="40">
+ <field name="" show="Record Mark: 1483 bytes" size="4" pos="40" value="000005cb">
+ <field name="kerberos.rm.reserved" showname="0... .... .... .... .... .... .... .... = Reserved: Not set" size="4" pos="40" show="0" value="0" unmaskedvalue="000005cb"/>
+ <field name="kerberos.rm.length" showname=".000 0000 0000 0000 0000 0101 1100 1011 = Record Length: 1483" size="4" pos="40" show="1483" value="5CB" unmaskedvalue="000005cb"/>
+ </field>
+ <field name="kerberos.as_rep_element" showname="as-rep" size="1479" pos="48" show="" value="">
+ <field name="kerberos.pvno" showname="pvno: 5" size="1" pos="56" show="5" value="05"/>
+ <field name="kerberos.msg_type" showname="msg-type: krb-as-rep (11)" size="1" pos="61" show="11" value="0b"/>
+ <field name="kerberos.padata" showname="padata: 1 item" size="57" pos="66" show="1" value="3037a103020103a230042e53414d42412e4558414d504c452e434f4d686f73746c6f63616c64632e73616d62612e6578616d706c652e636f6d">
+ <field name="kerberos.PA_DATA_element" showname="PA-DATA PA-PW-SALT" size="57" pos="66" show="" value="">
+ <field name="kerberos.padata_type" showname="padata-type: kRB5-PADATA-PW-SALT (3)" size="1" pos="72" show="3" value="03">
+ <field name="kerberos.padata_value" showname="padata-value: 53414d42412e4558414d504c452e434f4d686f73746c6f63..." size="46" pos="77" show="53:41:4d:42:41:2e:45:58:41:4d:50:4c:45:2e:43:4f:4d:68:6f:73:74:6c:6f:63:61:6c:64:63:2e:73:61:6d:62:61:2e:65:78:61:6d:70:6c:65:2e:63:6f:6d" value="53414d42412e4558414d504c452e434f4d686f73746c6f63616c64632e73616d62612e6578616d706c652e636f6d">
+ <field name="kerberos.smb.nt_status" showname="NT Status: Unknown (0x424d4153)" size="4" pos="77" show="0x424d4153" value="53414d42"/>
+ <field name="kerberos.smb.unknown" showname="Unknown: 0x58452e41" size="4" pos="81" show="0x58452e41" value="412e4558"/>
+ <field name="kerberos.smb.unknown" showname="Unknown: 0x4c504d41" size="4" pos="85" show="0x4c504d41" value="414d504c"/>
+ </field>
+ </field>
+ </field>
+ </field>
+ <field name="kerberos.crealm" showname="crealm: SAMBA.EXAMPLE.COM" size="17" pos="127" show="SAMBA.EXAMPLE.COM" value="53414d42412e4558414d504c452e434f4d"/>
+ <field name="kerberos.cname_element" showname="cname" size="21" pos="146" show="" value="">
+ <field name="kerberos.name_type" showname="name-type: kRB5-NT-PRINCIPAL (1)" size="1" pos="152" show="1" value="01"/>
+ <field name="kerberos.name_string" showname="name-string: 1 item" size="10" pos="157" show="1" value="1b084c4f43414c444324">
+ <field name="kerberos.KerberosString" showname="KerberosString: LOCALDC$" size="8" pos="159" show="LOCALDC$" value="4c4f43414c444324"/>
+ </field>
+ </field>
+ <field name="kerberos.ticket_element" showname="ticket" size="1105" pos="175" show="" value="">
+ <field name="kerberos.tkt_vno" showname="tkt-vno: 5" size="1" pos="183" show="5" value="05"/>
+ <field name="kerberos.realm" showname="realm: SAMBA.EXAMPLE.COM" size="17" pos="188" show="SAMBA.EXAMPLE.COM" value="53414d42412e4558414d504c452e434f4d"/>
+ <field name="kerberos.sname_element" showname="sname" size="38" pos="207" show="" value="">
+ <field name="kerberos.name_type" showname="name-type: kRB5-NT-SRV-INST (2)" size="1" pos="213" show="2" value="02"/>
+ <field name="kerberos.name_string" showname="name-string: 2 items" size="27" pos="218" show="2" value="1b066b72627467741b1153414d42412e4558414d504c452e434f4d">
+ <field name="kerberos.KerberosString" showname="KerberosString: krbtgt" size="6" pos="220" show="krbtgt" value="6b7262746774"/>
+ <field name="kerberos.KerberosString" showname="KerberosString: SAMBA.EXAMPLE.COM" size="17" pos="228" show="SAMBA.EXAMPLE.COM" value="53414d42412e4558414d504c452e434f4d"/>
+ </field>
+ </field>
+ <field name="kerberos.enc_part_element" showname="enc-part" size="1031" pos="249" show="" value="">
+ <field name="kerberos.etype" showname="etype: eTYPE-AES256-CTS-HMAC-SHA1-96 (18)" size="1" pos="257" show="18" value="12"/>
+ <field name="kerberos.kvno" showname="kvno: 1" size="1" pos="262" show="1" value="01"/>
+ <field name="kerberos.cipher" showname="cipher: 22e144d817a8c9e491c0eaa7aaf8e719ed4e92231d14006c..." size="1009" pos="271" show="...elided..." value="...elided..."/>
+ </field>
+ </field>
+ <field name="kerberos.enc_part_element" showname="enc-part" size="244" pos="1283" show="" value="">
+ <field name="kerberos.etype" showname="etype: eTYPE-AES256-CTS-HMAC-SHA1-96 (18)" size="1" pos="1290" show="18" value="12"/>
+ <field name="kerberos.kvno" showname="kvno: 1" size="1" pos="1295" show="1" value="01"/>
+ <field name="kerberos.cipher" showname="cipher: 0131d06ef55ec3e3dd9a2de408afb6236c32fc6776e0cde6..." size="225" pos="1302" show="...elided..." value="...elided..."/>
+ </field>
+ </field>
+ </proto>
+</packet>
+
+<packet>
+ <proto name="geninfo" pos="0" showname="General information" size="301">
+ <field name="num" pos="0" show="2400" showname="Number" value="960" size="301"/>
+ <field name="len" pos="0" show="301" showname="Frame Length" value="12d" size="301"/>
+ <field name="caplen" pos="0" show="301" showname="Captured Length" value="12d" size="301"/>
+ <field name="timestamp" pos="0" show="Feb 10, 2017 14:36:24.104038000 NZDT" showname="Captured Time" value="1486690584.104038000" size="301"/>
+ </proto>
+ <proto name="frame" showname="Frame 2400: 301 bytes on wire (2408 bits), 301 bytes captured (2408 bits)" size="301" pos="0">
+ <field name="frame.encap_type" showname="Encapsulation type: Raw IP (7)" size="0" pos="0" show="7"/>
+ <field name="frame.time" showname="Arrival Time: Feb 10, 2017 14:36:24.104038000 NZDT" size="0" pos="0" show="Feb 10, 2017 14:36:24.104038000 NZDT"/>
+ <field name="frame.offset_shift" showname="Time shift for this packet: 0.000000000 seconds" size="0" pos="0" show="0.000000000"/>
+ <field name="frame.time_epoch" showname="Epoch Time: 1486690584.104038000 seconds" size="0" pos="0" show="1486690584.104038000"/>
+ <field name="frame.time_delta" showname="Time delta from previous captured frame: 0.000010000 seconds" size="0" pos="0" show="0.000010000"/>
+ <field name="frame.time_delta_displayed" showname="Time delta from previous displayed frame: 0.000010000 seconds" size="0" pos="0" show="0.000010000"/>
+ <field name="frame.time_relative" showname="Time since reference or first frame: 7.573587000 seconds" size="0" pos="0" show="7.573587000"/>
+ <field name="frame.number" showname="Frame Number: 2400" size="0" pos="0" show="2400"/>
+ <field name="frame.len" showname="Frame Length: 301 bytes (2408 bits)" size="0" pos="0" show="301"/>
+ <field name="frame.cap_len" showname="Capture Length: 301 bytes (2408 bits)" size="0" pos="0" show="301"/>
+ <field name="frame.marked" showname="Frame is marked: False" size="0" pos="0" show="0"/>
+ <field name="frame.ignored" showname="Frame is ignored: False" size="0" pos="0" show="0"/>
+ <field name="frame.protocols" showname="Protocols in frame: raw:ip:tcp:kerberos" size="0" pos="0" show="raw:ip:tcp:kerberos"/>
+ </proto>
+ <proto name="raw" showname="Raw packet data" size="301" pos="0"/>
+ <proto name="ip" showname="Internet Protocol Version 4, Src: 127.0.0.11, Dst: 127.0.0.21" size="20" pos="0">
+ <field name="ip.version" showname="0100 .... = Version: 4" size="1" pos="0" show="4" value="4" unmaskedvalue="45"/>
+ <field name="ip.hdr_len" showname=".... 0101 = Header Length: 20 bytes" size="1" pos="0" show="5" value="5" unmaskedvalue="45"/>
+ <field name="ip.dsfield" showname="Differentiated Services Field: 0x00 (DSCP: CS0, ECN: Not-ECT)" size="1" pos="1" show="0x00000000" value="00">
+ <field name="ip.dsfield.dscp" showname="0000 00.. = Differentiated Services Codepoint: Default (0)" size="1" pos="1" show="0" value="0" unmaskedvalue="00"/>
+ <field name="ip.dsfield.ecn" showname=".... ..00 = Explicit Congestion Notification: Not ECN-Capable Transport (0)" size="1" pos="1" show="0" value="0" unmaskedvalue="00"/>
+ </field>
+ <field name="ip.len" showname="Total Length: 301" size="2" pos="2" show="301" value="012d"/>
+ <field name="ip.id" showname="Identification: 0xffff (65535)" size="2" pos="4" show="0x0000ffff" value="ffff"/>
+ <field name="ip.flags" showname="Flags: 0x02 (Don&#x27;t Fragment)" size="1" pos="6" show="0x00000002" value="40">
+ <field name="ip.flags.rb" showname="0... .... = Reserved bit: Not set" size="1" pos="6" show="0" value="40"/>
+ <field name="ip.flags.df" showname=".1.. .... = Don&#x27;t fragment: Set" size="1" pos="6" show="1" value="40"/>
+ <field name="ip.flags.mf" showname="..0. .... = More fragments: Not set" size="1" pos="6" show="0" value="40"/>
+ </field>
+ <field name="ip.frag_offset" showname="Fragment offset: 0" size="2" pos="6" show="0" value="4000"/>
+ <field name="ip.ttl" showname="Time to live: 255" size="1" pos="8" show="255" value="ff"/>
+ <field name="ip.proto" showname="Protocol: TCP (6)" size="1" pos="9" show="6" value="06"/>
+ <field name="ip.checksum" showname="Header checksum: 0x0000 [validation disabled]" size="2" pos="10" show="0x00000000" value="0000">
+ <field name="ip.checksum_good" showname="Good: False" size="2" pos="10" show="0" value="0000"/>
+ <field name="ip.checksum_bad" showname="Bad: False" size="2" pos="10" show="0" value="0000"/>
+ </field>
+ <field name="ip.src" showname="Source: 127.0.0.11" size="4" pos="12" show="127.0.0.11" value="7f00000b"/>
+ <field name="ip.addr" showname="Source or Destination Address: 127.0.0.11" hide="yes" size="4" pos="12" show="127.0.0.11" value="7f00000b"/>
+ <field name="ip.src_host" showname="Source Host: 127.0.0.11" hide="yes" size="4" pos="12" show="127.0.0.11" value="7f00000b"/>
+ <field name="ip.host" showname="Source or Destination Host: 127.0.0.11" hide="yes" size="4" pos="12" show="127.0.0.11" value="7f00000b"/>
+ <field name="ip.dst" showname="Destination: 127.0.0.21" size="4" pos="16" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.addr" showname="Source or Destination Address: 127.0.0.21" hide="yes" size="4" pos="16" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.dst_host" showname="Destination Host: 127.0.0.21" hide="yes" size="4" pos="16" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.host" showname="Source or Destination Host: 127.0.0.21" hide="yes" size="4" pos="16" show="127.0.0.21" value="7f000015"/>
+ <field name="" show="Source GeoIP: Unknown" size="4" pos="12" value="7f00000b"/>
+ <field name="" show="Destination GeoIP: Unknown" size="4" pos="16" value="7f000015"/>
+ </proto>
+ <proto name="tcp" showname="Transmission Control Protocol, Src Port: 14787 (14787), Dst Port: 88 (88), Seq: 1, Ack: 1, Len: 261" size="20" pos="20">
+ <field name="tcp.srcport" showname="Source Port: 14787" size="2" pos="20" show="14787" value="39c3"/>
+ <field name="tcp.dstport" showname="Destination Port: 88" size="2" pos="22" show="88" value="0058"/>
+ <field name="tcp.port" showname="Source or Destination Port: 14787" hide="yes" size="2" pos="20" show="14787" value="39c3"/>
+ <field name="tcp.port" showname="Source or Destination Port: 88" hide="yes" size="2" pos="22" show="88" value="0058"/>
+ <field name="tcp.stream" showname="Stream index: 49" size="0" pos="20" show="49"/>
+ <field name="tcp.len" showname="TCP Segment Len: 261" size="1" pos="32" show="261" value="50"/>
+ <field name="tcp.seq" showname="Sequence number: 1 (relative sequence number)" size="4" pos="24" show="1" value="00000001"/>
+ <field name="tcp.nxtseq" showname="Next sequence number: 262 (relative sequence number)" size="0" pos="20" show="262"/>
+ <field name="tcp.ack" showname="Acknowledgment number: 1 (relative ack number)" size="4" pos="28" show="1" value="00000001"/>
+ <field name="tcp.hdr_len" showname="Header Length: 20 bytes" size="1" pos="32" show="20" value="50"/>
+ <field name="tcp.flags" showname="Flags: 0x018 (PSH, ACK)" size="2" pos="32" show="0x00000018" value="18" unmaskedvalue="5018">
+ <field name="tcp.flags.res" showname="000. .... .... = Reserved: Not set" size="1" pos="32" show="0" value="0" unmaskedvalue="50"/>
+ <field name="tcp.flags.ns" showname="...0 .... .... = Nonce: Not set" size="1" pos="32" show="0" value="0" unmaskedvalue="50"/>
+ <field name="tcp.flags.cwr" showname=".... 0... .... = Congestion Window Reduced (CWR): Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.ecn" showname=".... .0.. .... = ECN-Echo: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.urg" showname=".... ..0. .... = Urgent: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.ack" showname=".... ...1 .... = Acknowledgment: Set" size="1" pos="33" show="1" value="FFFFFFFF" unmaskedvalue="18"/>
+ <field name="tcp.flags.push" showname=".... .... 1... = Push: Set" size="1" pos="33" show="1" value="FFFFFFFF" unmaskedvalue="18"/>
+ <field name="tcp.flags.reset" showname=".... .... .0.. = Reset: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.syn" showname=".... .... ..0. = Syn: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.fin" showname=".... .... ...0 = Fin: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.str" showname="TCP Flags: *******AP***" size="2" pos="32" show="*******AP***" value="5018"/>
+ </field>
+ <field name="tcp.window_size_value" showname="Window size value: 32767" size="2" pos="34" show="32767" value="7fff"/>
+ <field name="tcp.window_size" showname="Calculated window size: 32767" size="2" pos="34" show="32767" value="7fff"/>
+ <field name="tcp.window_size_scalefactor" showname="Window size scaling factor: -2 (no window scaling used)" size="2" pos="34" show="-2" value="7fff"/>
+ <field name="tcp.checksum" showname="Checksum: 0x0000 [validation disabled]" size="2" pos="36" show="0x00000000" value="0000">
+ <field name="tcp.checksum_good" showname="Good Checksum: False" size="2" pos="36" show="0" value="0000"/>
+ <field name="tcp.checksum_bad" showname="Bad Checksum: False" size="2" pos="36" show="0" value="0000"/>
+ </field>
+ <field name="tcp.urgent_pointer" showname="Urgent pointer: 0" size="2" pos="38" show="0" value="0000"/>
+ <field name="tcp.analysis" showname="SEQ/ACK analysis" size="0" pos="20" show="" value="">
+ <field name="tcp.analysis.initial_rtt" showname="iRTT: 0.000009000 seconds" size="0" pos="20" show="0.000009000"/>
+ <field name="tcp.analysis.bytes_in_flight" showname="Bytes in flight: 261" size="0" pos="20" show="261"/>
+ </field>
+ <field name="tcp.pdu.size" showname="PDU Size: 261" size="261" pos="40" show="261" value="...elided..."/>
+ </proto>
+ <proto name="kerberos" showname="Kerberos" size="261" pos="40">
+ <field name="" show="Record Mark: 257 bytes" size="4" pos="40" value="00000101">
+ <field name="kerberos.rm.reserved" showname="0... .... .... .... .... .... .... .... = Reserved: Not set" size="4" pos="40" show="0" value="0" unmaskedvalue="00000101"/>
+ <field name="kerberos.rm.length" showname=".000 0000 0000 0000 0000 0001 0000 0001 = Record Length: 257" size="4" pos="40" show="257" value="101" unmaskedvalue="00000101"/>
+ </field>
+ <field name="kerberos.as_req_element" showname="as-req" size="254" pos="47" show="" value="">
+ <field name="kerberos.pvno" showname="pvno: 5" size="1" pos="54" show="5" value="05"/>
+ <field name="kerberos.msg_type" showname="msg-type: krb-as-req (10)" size="1" pos="59" show="10" value="0a"/>
+ <field name="kerberos.padata" showname="padata: 1 item" size="78" pos="64" show="1" value="...elided...">
+ <field name="kerberos.PA_DATA_element" showname="PA-DATA PA-ENC-TIMESTAMP" size="78" pos="64" show="" value="">
+ <field name="kerberos.padata_type" showname="padata-type: kRB5-PADATA-ENC-TIMESTAMP (2)" size="1" pos="70" show="2" value="02">
+ <field name="kerberos.padata_value" showname="padata-value: 3041a003020112a23a0438cf413abdcde5fe3a6b82a38e52..." size="67" pos="75" show="...elided..." value="...elided...">
+ <field name="kerberos.etype" showname="etype: eTYPE-AES256-CTS-HMAC-SHA1-96 (18)" size="1" pos="81" show="18" value="12"/>
+ <field name="kerberos.cipher" showname="cipher: cf413abdcde5fe3a6b82a38e520fb2dc3063cae319cecdc1..." size="56" pos="86" show="...elided..." value="...elided..."/>
+ </field>
+ </field>
+ </field>
+ </field>
+ <field name="kerberos.req_body_element" showname="req-body" size="156" pos="145" show="" value="">
+ <field name="ber.bitstring.padding" showname="Padding: 0" size="1" pos="152" show="0" value="00"/>
+ <field name="kerberos.kdc_options" showname="kdc-options: 40000000 (forwardable)" size="4" pos="153" show="40:00:00:00" value="40000000">
+ <field name="kerberos.reserved" showname="0... .... = reserved: False" size="1" pos="153" show="0" value="0" unmaskedvalue="40"/>
+ <field name="kerberos.forwardable" showname=".1.. .... = forwardable: True" size="1" pos="153" show="1" value="FFFFFFFF" unmaskedvalue="40"/>
+ <field name="kerberos.forwarded" showname="..0. .... = forwarded: False" size="1" pos="153" show="0" value="0" unmaskedvalue="40"/>
+ <field name="kerberos.proxiable" showname="...0 .... = proxiable: False" size="1" pos="153" show="0" value="0" unmaskedvalue="40"/>
+ <field name="kerberos.proxy" showname=".... 0... = proxy: False" size="1" pos="153" show="0" value="0" unmaskedvalue="40"/>
+ <field name="kerberos.allow-postdate" showname=".... .0.. = allow-postdate: False" size="1" pos="153" show="0" value="0" unmaskedvalue="40"/>
+ <field name="kerberos.postdated" showname=".... ..0. = postdated: False" size="1" pos="153" show="0" value="0" unmaskedvalue="40"/>
+ <field name="kerberos.unused7" showname=".... ...0 = unused7: False" size="1" pos="153" show="0" value="0" unmaskedvalue="40"/>
+ <field name="kerberos.renewable" showname="0... .... = renewable: False" size="1" pos="154" show="0" value="0" unmaskedvalue="00"/>
+ <field name="kerberos.unused9" showname=".0.. .... = unused9: False" size="1" pos="154" show="0" value="0" unmaskedvalue="00"/>
+ <field name="kerberos.unused10" showname="..0. .... = unused10: False" size="1" pos="154" show="0" value="0" unmaskedvalue="00"/>
+ <field name="kerberos.opt-hardware-auth" showname="...0 .... = opt-hardware-auth: False" size="1" pos="154" show="0" value="0" unmaskedvalue="00"/>
+ <field name="kerberos.request-anonymous" showname=".... ..0. = request-anonymous: False" size="1" pos="154" show="0" value="0" unmaskedvalue="00"/>
+ <field name="kerberos.canonicalize" showname=".... ...0 = canonicalize: False" size="1" pos="154" show="0" value="0" unmaskedvalue="00"/>
+ <field name="kerberos.constrained-delegation" showname="0... .... = constrained-delegation: False" size="1" pos="155" show="0" value="0" unmaskedvalue="00"/>
+ <field name="kerberos.disable-transited-check" showname="..0. .... = disable-transited-check: False" size="1" pos="156" show="0" value="0" unmaskedvalue="00"/>
+ <field name="kerberos.renewable-ok" showname="...0 .... = renewable-ok: False" size="1" pos="156" show="0" value="0" unmaskedvalue="00"/>
+ <field name="kerberos.enc-tkt-in-skey" showname=".... 0... = enc-tkt-in-skey: False" size="1" pos="156" show="0" value="0" unmaskedvalue="00"/>
+ <field name="kerberos.renew" showname=".... ..0. = renew: False" size="1" pos="156" show="0" value="0" unmaskedvalue="00"/>
+ <field name="kerberos.validate" showname=".... ...0 = validate: False" size="1" pos="156" show="0" value="0" unmaskedvalue="00"/>
+ </field>
+ <field name="kerberos.cname_element" showname="cname" size="26" pos="159" show="" value="">
+ <field name="kerberos.name_type" showname="name-type: kRB5-NT-PRINCIPAL (1)" size="1" pos="165" show="1" value="01"/>
+ <field name="kerberos.name_string" showname="name-string: 1 item" size="15" pos="170" show="1" value="1b0d41646d696e6973747261746f72">
+ <field name="kerberos.KerberosString" showname="KerberosString: Administrator" size="13" pos="172" show="Administrator" value="41646d696e6973747261746f72"/>
+ </field>
+ </field>
+ <field name="kerberos.realm" showname="realm: SAMBA.EXAMPLE.COM" size="17" pos="189" show="SAMBA.EXAMPLE.COM" value="53414d42412e4558414d504c452e434f4d"/>
+ <field name="kerberos.sname_element" showname="sname" size="38" pos="208" show="" value="">
+ <field name="kerberos.name_type" showname="name-type: kRB5-NT-SRV-INST (2)" size="1" pos="214" show="2" value="02"/>
+ <field name="kerberos.name_string" showname="name-string: 2 items" size="27" pos="219" show="2" value="1b066b72627467741b1153414d42412e4558414d504c452e434f4d">
+ <field name="kerberos.KerberosString" showname="KerberosString: krbtgt" size="6" pos="221" show="krbtgt" value="6b7262746774"/>
+ <field name="kerberos.KerberosString" showname="KerberosString: SAMBA.EXAMPLE.COM" size="17" pos="229" show="SAMBA.EXAMPLE.COM" value="53414d42412e4558414d504c452e434f4d"/>
+ </field>
+ </field>
+ <field name="kerberos.till" showname="till: 2017-02-11 01:36:24 (UTC)" size="15" pos="250" show="2017-02-11 01:36:24 (UTC)" value="32303137303231313031333632345a"/>
+ <field name="kerberos.nonce" showname="nonce: 1225047325" size="4" pos="269" show="1225047325" value="4904bd1d"/>
+ <field name="kerberos.etype" showname="etype: 8 items" size="24" pos="277" show="8" value="020112020111020110020105020117020103020102020101">
+ <field name="kerberos.ENCTYPE" showname="ENCTYPE: eTYPE-AES256-CTS-HMAC-SHA1-96 (18)" size="1" pos="279" show="18" value="12"/>
+ <field name="kerberos.ENCTYPE" showname="ENCTYPE: eTYPE-AES128-CTS-HMAC-SHA1-96 (17)" size="1" pos="282" show="17" value="11"/>
+ <field name="kerberos.ENCTYPE" showname="ENCTYPE: eTYPE-DES3-CBC-SHA1 (16)" size="1" pos="285" show="16" value="10"/>
+ <field name="kerberos.ENCTYPE" showname="ENCTYPE: eTYPE-DES3-CBC-MD5 (5)" size="1" pos="288" show="5" value="05"/>
+ <field name="kerberos.ENCTYPE" showname="ENCTYPE: eTYPE-ARCFOUR-HMAC-MD5 (23)" size="1" pos="291" show="23" value="17"/>
+ <field name="kerberos.ENCTYPE" showname="ENCTYPE: eTYPE-DES-CBC-MD5 (3)" size="1" pos="294" show="3" value="03"/>
+ <field name="kerberos.ENCTYPE" showname="ENCTYPE: eTYPE-DES-CBC-MD4 (2)" size="1" pos="297" show="2" value="02"/>
+ <field name="kerberos.ENCTYPE" showname="ENCTYPE: eTYPE-DES-CBC-CRC (1)" size="1" pos="300" show="1" value="01"/>
+ </field>
+ </field>
+ </field>
+ </proto>
+</packet>
+
+
+<packet>
+ <proto name="geninfo" pos="0" showname="General information" size="70">
+ <field name="num" pos="0" show="2408" showname="Number" value="968" size="70"/>
+ <field name="len" pos="0" show="70" showname="Frame Length" value="46" size="70"/>
+ <field name="caplen" pos="0" show="70" showname="Captured Length" value="46" size="70"/>
+ <field name="timestamp" pos="0" show="Feb 10, 2017 14:36:24.108221000 NZDT" showname="Captured Time" value="1486690584.108221000" size="70"/>
+ </proto>
+ <proto name="frame" showname="Frame 2408: 70 bytes on wire (560 bits), 70 bytes captured (560 bits)" size="70" pos="0">
+ <field name="frame.encap_type" showname="Encapsulation type: Raw IP (7)" size="0" pos="0" show="7"/>
+ <field name="frame.time" showname="Arrival Time: Feb 10, 2017 14:36:24.108221000 NZDT" size="0" pos="0" show="Feb 10, 2017 14:36:24.108221000 NZDT"/>
+ <field name="frame.offset_shift" showname="Time shift for this packet: 0.000000000 seconds" size="0" pos="0" show="0.000000000"/>
+ <field name="frame.time_epoch" showname="Epoch Time: 1486690584.108221000 seconds" size="0" pos="0" show="1486690584.108221000"/>
+ <field name="frame.time_delta" showname="Time delta from previous captured frame: 0.000003000 seconds" size="0" pos="0" show="0.000003000"/>
+ <field name="frame.time_delta_displayed" showname="Time delta from previous displayed frame: 0.000003000 seconds" size="0" pos="0" show="0.000003000"/>
+ <field name="frame.time_relative" showname="Time since reference or first frame: 7.577770000 seconds" size="0" pos="0" show="7.577770000"/>
+ <field name="frame.number" showname="Frame Number: 2408" size="0" pos="0" show="2408"/>
+ <field name="frame.len" showname="Frame Length: 70 bytes (560 bits)" size="0" pos="0" show="70"/>
+ <field name="frame.cap_len" showname="Capture Length: 70 bytes (560 bits)" size="0" pos="0" show="70"/>
+ <field name="frame.marked" showname="Frame is marked: False" size="0" pos="0" show="0"/>
+ <field name="frame.ignored" showname="Frame is ignored: False" size="0" pos="0" show="0"/>
+ <field name="frame.protocols" showname="Protocols in frame: raw:ip:tcp:kerberos" size="0" pos="0" show="raw:ip:tcp:kerberos"/>
+ </proto>
+ <proto name="raw" showname="Raw packet data" size="70" pos="0"/>
+ <proto name="ip" showname="Internet Protocol Version 4, Src: 127.0.0.21, Dst: 127.0.0.11" size="20" pos="0">
+ <field name="ip.version" showname="0100 .... = Version: 4" size="1" pos="0" show="4" value="4" unmaskedvalue="45"/>
+ <field name="ip.hdr_len" showname=".... 0101 = Header Length: 20 bytes" size="1" pos="0" show="5" value="5" unmaskedvalue="45"/>
+ <field name="ip.dsfield" showname="Differentiated Services Field: 0x00 (DSCP: CS0, ECN: Not-ECT)" size="1" pos="1" show="0x00000000" value="00">
+ <field name="ip.dsfield.dscp" showname="0000 00.. = Differentiated Services Codepoint: Default (0)" size="1" pos="1" show="0" value="0" unmaskedvalue="00"/>
+ <field name="ip.dsfield.ecn" showname=".... ..00 = Explicit Congestion Notification: Not ECN-Capable Transport (0)" size="1" pos="1" show="0" value="0" unmaskedvalue="00"/>
+ </field>
+ <field name="ip.len" showname="Total Length: 70" size="2" pos="2" show="70" value="0046"/>
+ <field name="ip.id" showname="Identification: 0xffff (65535)" size="2" pos="4" show="0x0000ffff" value="ffff"/>
+ <field name="ip.flags" showname="Flags: 0x02 (Don&#x27;t Fragment)" size="1" pos="6" show="0x00000002" value="40">
+ <field name="ip.flags.rb" showname="0... .... = Reserved bit: Not set" size="1" pos="6" show="0" value="40"/>
+ <field name="ip.flags.df" showname=".1.. .... = Don&#x27;t fragment: Set" size="1" pos="6" show="1" value="40"/>
+ <field name="ip.flags.mf" showname="..0. .... = More fragments: Not set" size="1" pos="6" show="0" value="40"/>
+ </field>
+ <field name="ip.frag_offset" showname="Fragment offset: 0" size="2" pos="6" show="0" value="4000"/>
+ <field name="ip.ttl" showname="Time to live: 255" size="1" pos="8" show="255" value="ff"/>
+ <field name="ip.proto" showname="Protocol: TCP (6)" size="1" pos="9" show="6" value="06"/>
+ <field name="ip.checksum" showname="Header checksum: 0x0000 [validation disabled]" size="2" pos="10" show="0x00000000" value="0000">
+ <field name="ip.checksum_good" showname="Good: False" size="2" pos="10" show="0" value="0000"/>
+ <field name="ip.checksum_bad" showname="Bad: False" size="2" pos="10" show="0" value="0000"/>
+ </field>
+ <field name="ip.src" showname="Source: 127.0.0.21" size="4" pos="12" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.addr" showname="Source or Destination Address: 127.0.0.21" hide="yes" size="4" pos="12" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.src_host" showname="Source Host: 127.0.0.21" hide="yes" size="4" pos="12" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.host" showname="Source or Destination Host: 127.0.0.21" hide="yes" size="4" pos="12" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.dst" showname="Destination: 127.0.0.11" size="4" pos="16" show="127.0.0.11" value="7f00000b"/>
+ <field name="ip.addr" showname="Source or Destination Address: 127.0.0.11" hide="yes" size="4" pos="16" show="127.0.0.11" value="7f00000b"/>
+ <field name="ip.dst_host" showname="Destination Host: 127.0.0.11" hide="yes" size="4" pos="16" show="127.0.0.11" value="7f00000b"/>
+ <field name="ip.host" showname="Source or Destination Host: 127.0.0.11" hide="yes" size="4" pos="16" show="127.0.0.11" value="7f00000b"/>
+ <field name="" show="Source GeoIP: Unknown" size="4" pos="12" value="7f000015"/>
+ <field name="" show="Destination GeoIP: Unknown" size="4" pos="16" value="7f00000b"/>
+ </proto>
+ <proto name="tcp" showname="Transmission Control Protocol, Src Port: 88 (88), Dst Port: 14787 (14787), Seq: 1505, Ack: 262, Len: 30" size="20" pos="20">
+ <field name="tcp.srcport" showname="Source Port: 88" size="2" pos="20" show="88" value="0058"/>
+ <field name="tcp.dstport" showname="Destination Port: 14787" size="2" pos="22" show="14787" value="39c3"/>
+ <field name="tcp.port" showname="Source or Destination Port: 88" hide="yes" size="2" pos="20" show="88" value="0058"/>
+ <field name="tcp.port" showname="Source or Destination Port: 14787" hide="yes" size="2" pos="22" show="14787" value="39c3"/>
+ <field name="tcp.stream" showname="Stream index: 49" size="0" pos="20" show="49"/>
+ <field name="tcp.len" showname="TCP Segment Len: 30" size="1" pos="32" show="30" value="50"/>
+ <field name="tcp.seq" showname="Sequence number: 1505 (relative sequence number)" size="4" pos="24" show="1505" value="000005e1"/>
+ <field name="tcp.nxtseq" showname="Next sequence number: 1535 (relative sequence number)" size="0" pos="20" show="1535"/>
+ <field name="tcp.ack" showname="Acknowledgment number: 262 (relative ack number)" size="4" pos="28" show="262" value="00000106"/>
+ <field name="tcp.hdr_len" showname="Header Length: 20 bytes" size="1" pos="32" show="20" value="50"/>
+ <field name="tcp.flags" showname="Flags: 0x018 (PSH, ACK)" size="2" pos="32" show="0x00000018" value="18" unmaskedvalue="5018">
+ <field name="tcp.flags.res" showname="000. .... .... = Reserved: Not set" size="1" pos="32" show="0" value="0" unmaskedvalue="50"/>
+ <field name="tcp.flags.ns" showname="...0 .... .... = Nonce: Not set" size="1" pos="32" show="0" value="0" unmaskedvalue="50"/>
+ <field name="tcp.flags.cwr" showname=".... 0... .... = Congestion Window Reduced (CWR): Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.ecn" showname=".... .0.. .... = ECN-Echo: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.urg" showname=".... ..0. .... = Urgent: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.ack" showname=".... ...1 .... = Acknowledgment: Set" size="1" pos="33" show="1" value="FFFFFFFF" unmaskedvalue="18"/>
+ <field name="tcp.flags.push" showname=".... .... 1... = Push: Set" size="1" pos="33" show="1" value="FFFFFFFF" unmaskedvalue="18"/>
+ <field name="tcp.flags.reset" showname=".... .... .0.. = Reset: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.syn" showname=".... .... ..0. = Syn: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.fin" showname=".... .... ...0 = Fin: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.str" showname="TCP Flags: *******AP***" size="2" pos="32" show="*******AP***" value="5018"/>
+ </field>
+ <field name="tcp.window_size_value" showname="Window size value: 32767" size="2" pos="34" show="32767" value="7fff"/>
+ <field name="tcp.window_size" showname="Calculated window size: 32767" size="2" pos="34" show="32767" value="7fff"/>
+ <field name="tcp.window_size_scalefactor" showname="Window size scaling factor: -2 (no window scaling used)" size="2" pos="34" show="-2" value="7fff"/>
+ <field name="tcp.checksum" showname="Checksum: 0x0000 [validation disabled]" size="2" pos="36" show="0x00000000" value="0000">
+ <field name="tcp.checksum_good" showname="Good Checksum: False" size="2" pos="36" show="0" value="0000"/>
+ <field name="tcp.checksum_bad" showname="Bad Checksum: False" size="2" pos="36" show="0" value="0000"/>
+ </field>
+ <field name="tcp.urgent_pointer" showname="Urgent pointer: 0" size="2" pos="38" show="0" value="0000"/>
+ <field name="tcp.analysis" showname="SEQ/ACK analysis" size="0" pos="20" show="" value="">
+ <field name="tcp.analysis.initial_rtt" showname="iRTT: 0.000009000 seconds" size="0" pos="20" show="0.000009000"/>
+ <field name="tcp.analysis.bytes_in_flight" showname="Bytes in flight: 1534" size="0" pos="20" show="1534"/>
+ </field>
+ <field name="tcp.pdu.size" showname="PDU Size: 1534" size="1534" pos="20" show="1534" value="...elided..."/>
+ <field name="tcp.segment_data" showname="TCP segment data (30 bytes)" size="30" pos="40" show="54:c1:fb:c3:43:df:f3:ce:39:c5:50:6d:bb:0a:e1:fb:63:1d:43:4e:45:94:4b:8a:05:ae:cf:89:93:62" value="54c1fbc343dff3ce39c5506dbb0ae1fb631d434e45944b8a05aecf899362"/>
+ </proto>
+ <proto name="fake-field-wrapper">
+ <field name="tcp.segments" showname="3 Reassembled TCP Segments (1534 bytes): #2406(4), #2407(1500), #2408(30)" size="1534" pos="0" show="" value="">
+ <field name="tcp.segment" showname="Frame: 2406, payload: 0-3 (4 bytes)" size="4" pos="0" show="2406" value="000005fa"/>
+ <field name="tcp.segment" showname="Frame: 2407, payload: 4-1503 (1500 bytes)" size="1500" pos="4" show="2407" value="...elided..."/>
+ <field name="tcp.segment" showname="Frame: 2408, payload: 1504-1533 (30 bytes)" size="30" pos="1504" show="2408" value="54c1fbc343dff3ce39c5506dbb0ae1fb631d434e45944b8a05aecf899362"/>
+ <field name="tcp.segment.count" showname="Segment count: 3" size="0" pos="0" show="3"/>
+ <field name="tcp.reassembled.length" showname="Reassembled TCP length: 1534" size="0" pos="0" show="1534"/>
+ <field name="tcp.reassembled.data" showname="Reassembled TCP Data: 000005fa6b8205f6308205f2a003020105a10302010ba22b..." size="1534" pos="0" show="...elided..." value="...elided..."/>
+ </field>
+</proto>
+ <proto name="kerberos" showname="Kerberos" size="1534" pos="0">
+ <field name="" show="Record Mark: 1530 bytes" size="4" pos="0" value="000005fa">
+ <field name="kerberos.rm.reserved" showname="0... .... .... .... .... .... .... .... = Reserved: Not set" size="4" pos="0" show="0" value="0" unmaskedvalue="000005fa"/>
+ <field name="kerberos.rm.length" showname=".000 0000 0000 0000 0000 0101 1111 1010 = Record Length: 1530" size="4" pos="0" show="1530" value="5FA" unmaskedvalue="000005fa"/>
+ </field>
+ <field name="kerberos.as_rep_element" showname="as-rep" size="1526" pos="8" show="" value="">
+ <field name="kerberos.pvno" showname="pvno: 5" size="1" pos="16" show="5" value="05"/>
+ <field name="kerberos.msg_type" showname="msg-type: krb-as-rep (11)" size="1" pos="21" show="11" value="0b"/>
+ <field name="kerberos.padata" showname="padata: 1 item" size="41" pos="26" show="1" value="3027a103020103a220041e53414d42412e4558414d504c452e434f4d41646d696e6973747261746f72">
+ <field name="kerberos.PA_DATA_element" showname="PA-DATA PA-PW-SALT" size="41" pos="26" show="" value="">
+ <field name="kerberos.padata_type" showname="padata-type: kRB5-PADATA-PW-SALT (3)" size="1" pos="32" show="3" value="03">
+ <field name="kerberos.padata_value" showname="padata-value: 53414d42412e4558414d504c452e434f4d41646d696e6973..." size="30" pos="37" show="53:41:4d:42:41:2e:45:58:41:4d:50:4c:45:2e:43:4f:4d:41:64:6d:69:6e:69:73:74:72:61:74:6f:72" value="53414d42412e4558414d504c452e434f4d41646d696e6973747261746f72">
+ <field name="kerberos.smb.nt_status" showname="NT Status: Unknown (0x424d4153)" size="4" pos="37" show="0x424d4153" value="53414d42"/>
+ <field name="kerberos.smb.unknown" showname="Unknown: 0x58452e41" size="4" pos="41" show="0x58452e41" value="412e4558"/>
+ <field name="kerberos.smb.unknown" showname="Unknown: 0x4c504d41" size="4" pos="45" show="0x4c504d41" value="414d504c"/>
+ </field>
+ </field>
+ </field>
+ </field>
+ <field name="kerberos.crealm" showname="crealm: SAMBA.EXAMPLE.COM" size="17" pos="71" show="SAMBA.EXAMPLE.COM" value="53414d42412e4558414d504c452e434f4d"/>
+ <field name="kerberos.cname_element" showname="cname" size="26" pos="90" show="" value="">
+ <field name="kerberos.name_type" showname="name-type: kRB5-NT-PRINCIPAL (1)" size="1" pos="96" show="1" value="01"/>
+ <field name="kerberos.name_string" showname="name-string: 1 item" size="15" pos="101" show="1" value="1b0d41646d696e6973747261746f72">
+ <field name="kerberos.KerberosString" showname="KerberosString: Administrator" size="13" pos="103" show="Administrator" value="41646d696e6973747261746f72"/>
+ </field>
+ </field>
+ <field name="kerberos.ticket_element" showname="ticket" size="1142" pos="124" show="" value="">
+ <field name="kerberos.tkt_vno" showname="tkt-vno: 5" size="1" pos="132" show="5" value="05"/>
+ <field name="kerberos.realm" showname="realm: SAMBA.EXAMPLE.COM" size="17" pos="137" show="SAMBA.EXAMPLE.COM" value="53414d42412e4558414d504c452e434f4d"/>
+ <field name="kerberos.sname_element" showname="sname" size="38" pos="156" show="" value="">
+ <field name="kerberos.name_type" showname="name-type: kRB5-NT-SRV-INST (2)" size="1" pos="162" show="2" value="02"/>
+ <field name="kerberos.name_string" showname="name-string: 2 items" size="27" pos="167" show="2" value="1b066b72627467741b1153414d42412e4558414d504c452e434f4d">
+ <field name="kerberos.KerberosString" showname="KerberosString: krbtgt" size="6" pos="169" show="krbtgt" value="6b7262746774"/>
+ <field name="kerberos.KerberosString" showname="KerberosString: SAMBA.EXAMPLE.COM" size="17" pos="177" show="SAMBA.EXAMPLE.COM" value="53414d42412e4558414d504c452e434f4d"/>
+ </field>
+ </field>
+ <field name="kerberos.enc_part_element" showname="enc-part" size="1068" pos="198" show="" value="">
+ <field name="kerberos.etype" showname="etype: eTYPE-AES256-CTS-HMAC-SHA1-96 (18)" size="1" pos="206" show="18" value="12"/>
+ <field name="kerberos.kvno" showname="kvno: 1" size="1" pos="211" show="1" value="01"/>
+ <field name="kerberos.cipher" showname="cipher: 5a2a14fec09c49807c1be2a0b335af26ed64f89184336870..." size="1046" pos="220" show="...elided..." value="...elided..."/>
+ </field>
+ </field>
+ <field name="kerberos.enc_part_element" showname="enc-part" size="264" pos="1270" show="" value="">
+ <field name="kerberos.etype" showname="etype: eTYPE-AES256-CTS-HMAC-SHA1-96 (18)" size="1" pos="1278" show="18" value="12"/>
+ <field name="kerberos.kvno" showname="kvno: 1" size="1" pos="1283" show="1" value="01"/>
+ <field name="kerberos.cipher" showname="cipher: 3ecb1568e8ee09adcebe1ec65c1e767415512cd84449fabc..." size="244" pos="1290" show="...elided..." value="...elided..."/>
+ </field>
+ </field>
+ </proto>
+</packet>
+
+
+<packet>
+ <proto name="geninfo" pos="0" showname="General information" size="82">
+ <field name="num" pos="0" show="2422" showname="Number" value="976" size="82"/>
+ <field name="len" pos="0" show="82" showname="Frame Length" value="52" size="82"/>
+ <field name="caplen" pos="0" show="82" showname="Captured Length" value="52" size="82"/>
+ <field name="timestamp" pos="0" show="Feb 10, 2017 14:36:24.139378000 NZDT" showname="Captured Time" value="1486690584.139378000" size="82"/>
+ </proto>
+ <proto name="frame" showname="Frame 2422: 82 bytes on wire (656 bits), 82 bytes captured (656 bits)" size="82" pos="0">
+ <field name="frame.encap_type" showname="Encapsulation type: Raw IP (7)" size="0" pos="0" show="7"/>
+ <field name="frame.time" showname="Arrival Time: Feb 10, 2017 14:36:24.139378000 NZDT" size="0" pos="0" show="Feb 10, 2017 14:36:24.139378000 NZDT"/>
+ <field name="frame.offset_shift" showname="Time shift for this packet: 0.000000000 seconds" size="0" pos="0" show="0.000000000"/>
+ <field name="frame.time_epoch" showname="Epoch Time: 1486690584.139378000 seconds" size="0" pos="0" show="1486690584.139378000"/>
+ <field name="frame.time_delta" showname="Time delta from previous captured frame: 0.000003000 seconds" size="0" pos="0" show="0.000003000"/>
+ <field name="frame.time_delta_displayed" showname="Time delta from previous displayed frame: 0.000003000 seconds" size="0" pos="0" show="0.000003000"/>
+ <field name="frame.time_relative" showname="Time since reference or first frame: 7.608927000 seconds" size="0" pos="0" show="7.608927000"/>
+ <field name="frame.number" showname="Frame Number: 2422" size="0" pos="0" show="2422"/>
+ <field name="frame.len" showname="Frame Length: 82 bytes (656 bits)" size="0" pos="0" show="82"/>
+ <field name="frame.cap_len" showname="Capture Length: 82 bytes (656 bits)" size="0" pos="0" show="82"/>
+ <field name="frame.marked" showname="Frame is marked: False" size="0" pos="0" show="0"/>
+ <field name="frame.ignored" showname="Frame is ignored: False" size="0" pos="0" show="0"/>
+ <field name="frame.protocols" showname="Protocols in frame: raw:ip:tcp:kerberos" size="0" pos="0" show="raw:ip:tcp:kerberos"/>
+ </proto>
+ <proto name="raw" showname="Raw packet data" size="82" pos="0"/>
+ <proto name="ip" showname="Internet Protocol Version 4, Src: 127.0.0.11, Dst: 127.0.0.21" size="20" pos="0">
+ <field name="ip.version" showname="0100 .... = Version: 4" size="1" pos="0" show="4" value="4" unmaskedvalue="45"/>
+ <field name="ip.hdr_len" showname=".... 0101 = Header Length: 20 bytes" size="1" pos="0" show="5" value="5" unmaskedvalue="45"/>
+ <field name="ip.dsfield" showname="Differentiated Services Field: 0x00 (DSCP: CS0, ECN: Not-ECT)" size="1" pos="1" show="0x00000000" value="00">
+ <field name="ip.dsfield.dscp" showname="0000 00.. = Differentiated Services Codepoint: Default (0)" size="1" pos="1" show="0" value="0" unmaskedvalue="00"/>
+ <field name="ip.dsfield.ecn" showname=".... ..00 = Explicit Congestion Notification: Not ECN-Capable Transport (0)" size="1" pos="1" show="0" value="0" unmaskedvalue="00"/>
+ </field>
+ <field name="ip.len" showname="Total Length: 82" size="2" pos="2" show="82" value="0052"/>
+ <field name="ip.id" showname="Identification: 0xffff (65535)" size="2" pos="4" show="0x0000ffff" value="ffff"/>
+ <field name="ip.flags" showname="Flags: 0x02 (Don&#x27;t Fragment)" size="1" pos="6" show="0x00000002" value="40">
+ <field name="ip.flags.rb" showname="0... .... = Reserved bit: Not set" size="1" pos="6" show="0" value="40"/>
+ <field name="ip.flags.df" showname=".1.. .... = Don&#x27;t fragment: Set" size="1" pos="6" show="1" value="40"/>
+ <field name="ip.flags.mf" showname="..0. .... = More fragments: Not set" size="1" pos="6" show="0" value="40"/>
+ </field>
+ <field name="ip.frag_offset" showname="Fragment offset: 0" size="2" pos="6" show="0" value="4000"/>
+ <field name="ip.ttl" showname="Time to live: 255" size="1" pos="8" show="255" value="ff"/>
+ <field name="ip.proto" showname="Protocol: TCP (6)" size="1" pos="9" show="6" value="06"/>
+ <field name="ip.checksum" showname="Header checksum: 0x0000 [validation disabled]" size="2" pos="10" show="0x00000000" value="0000">
+ <field name="ip.checksum_good" showname="Good: False" size="2" pos="10" show="0" value="0000"/>
+ <field name="ip.checksum_bad" showname="Bad: False" size="2" pos="10" show="0" value="0000"/>
+ </field>
+ <field name="ip.src" showname="Source: 127.0.0.11" size="4" pos="12" show="127.0.0.11" value="7f00000b"/>
+ <field name="ip.addr" showname="Source or Destination Address: 127.0.0.11" hide="yes" size="4" pos="12" show="127.0.0.11" value="7f00000b"/>
+ <field name="ip.src_host" showname="Source Host: 127.0.0.11" hide="yes" size="4" pos="12" show="127.0.0.11" value="7f00000b"/>
+ <field name="ip.host" showname="Source or Destination Host: 127.0.0.11" hide="yes" size="4" pos="12" show="127.0.0.11" value="7f00000b"/>
+ <field name="ip.dst" showname="Destination: 127.0.0.21" size="4" pos="16" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.addr" showname="Source or Destination Address: 127.0.0.21" hide="yes" size="4" pos="16" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.dst_host" showname="Destination Host: 127.0.0.21" hide="yes" size="4" pos="16" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.host" showname="Source or Destination Host: 127.0.0.21" hide="yes" size="4" pos="16" show="127.0.0.21" value="7f000015"/>
+ <field name="" show="Source GeoIP: Unknown" size="4" pos="12" value="7f00000b"/>
+ <field name="" show="Destination GeoIP: Unknown" size="4" pos="16" value="7f000015"/>
+ </proto>
+ <proto name="tcp" showname="Transmission Control Protocol, Src Port: 14788 (14788), Dst Port: 88 (88), Seq: 1501, Ack: 1, Len: 42" size="20" pos="20">
+ <field name="tcp.srcport" showname="Source Port: 14788" size="2" pos="20" show="14788" value="39c4"/>
+ <field name="tcp.dstport" showname="Destination Port: 88" size="2" pos="22" show="88" value="0058"/>
+ <field name="tcp.port" showname="Source or Destination Port: 14788" hide="yes" size="2" pos="20" show="14788" value="39c4"/>
+ <field name="tcp.port" showname="Source or Destination Port: 88" hide="yes" size="2" pos="22" show="88" value="0058"/>
+ <field name="tcp.stream" showname="Stream index: 50" size="0" pos="20" show="50"/>
+ <field name="tcp.len" showname="TCP Segment Len: 42" size="1" pos="32" show="42" value="50"/>
+ <field name="tcp.seq" showname="Sequence number: 1501 (relative sequence number)" size="4" pos="24" show="1501" value="000005dd"/>
+ <field name="tcp.nxtseq" showname="Next sequence number: 1543 (relative sequence number)" size="0" pos="20" show="1543"/>
+ <field name="tcp.ack" showname="Acknowledgment number: 1 (relative ack number)" size="4" pos="28" show="1" value="00000001"/>
+ <field name="tcp.hdr_len" showname="Header Length: 20 bytes" size="1" pos="32" show="20" value="50"/>
+ <field name="tcp.flags" showname="Flags: 0x018 (PSH, ACK)" size="2" pos="32" show="0x00000018" value="18" unmaskedvalue="5018">
+ <field name="tcp.flags.res" showname="000. .... .... = Reserved: Not set" size="1" pos="32" show="0" value="0" unmaskedvalue="50"/>
+ <field name="tcp.flags.ns" showname="...0 .... .... = Nonce: Not set" size="1" pos="32" show="0" value="0" unmaskedvalue="50"/>
+ <field name="tcp.flags.cwr" showname=".... 0... .... = Congestion Window Reduced (CWR): Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.ecn" showname=".... .0.. .... = ECN-Echo: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.urg" showname=".... ..0. .... = Urgent: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.ack" showname=".... ...1 .... = Acknowledgment: Set" size="1" pos="33" show="1" value="FFFFFFFF" unmaskedvalue="18"/>
+ <field name="tcp.flags.push" showname=".... .... 1... = Push: Set" size="1" pos="33" show="1" value="FFFFFFFF" unmaskedvalue="18"/>
+ <field name="tcp.flags.reset" showname=".... .... .0.. = Reset: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.syn" showname=".... .... ..0. = Syn: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.fin" showname=".... .... ...0 = Fin: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.str" showname="TCP Flags: *******AP***" size="2" pos="32" show="*******AP***" value="5018"/>
+ </field>
+ <field name="tcp.window_size_value" showname="Window size value: 32767" size="2" pos="34" show="32767" value="7fff"/>
+ <field name="tcp.window_size" showname="Calculated window size: 32767" size="2" pos="34" show="32767" value="7fff"/>
+ <field name="tcp.window_size_scalefactor" showname="Window size scaling factor: -2 (no window scaling used)" size="2" pos="34" show="-2" value="7fff"/>
+ <field name="tcp.checksum" showname="Checksum: 0x0000 [validation disabled]" size="2" pos="36" show="0x00000000" value="0000">
+ <field name="tcp.checksum_good" showname="Good Checksum: False" size="2" pos="36" show="0" value="0000"/>
+ <field name="tcp.checksum_bad" showname="Bad Checksum: False" size="2" pos="36" show="0" value="0000"/>
+ </field>
+ <field name="tcp.urgent_pointer" showname="Urgent pointer: 0" size="2" pos="38" show="0" value="0000"/>
+ <field name="tcp.analysis" showname="SEQ/ACK analysis" size="0" pos="20" show="" value="">
+ <field name="tcp.analysis.initial_rtt" showname="iRTT: 0.000009000 seconds" size="0" pos="20" show="0.000009000"/>
+ <field name="tcp.analysis.bytes_in_flight" showname="Bytes in flight: 1542" size="0" pos="20" show="1542"/>
+ </field>
+ <field name="tcp.pdu.size" showname="PDU Size: 1542" size="1542" pos="20" show="1542" value="...elided..."/>
+ <field name="tcp.segment_data" showname="TCP segment data (42 bytes)" size="42" pos="40" show="30:30:30:30:30:5a:a7:06:02:04:99:16:39:d0:a8:1a:30:18:02:01:12:02:01:11:02:01:10:02:01:05:02:01:17:02:01:03:02:01:02:02:01:01" value="30303030305aa7060204991639d0a81a3018020112020111020110020105020117020103020102020101"/>
+ </proto>
+ <proto name="fake-field-wrapper">
+ <field name="tcp.segments" showname="2 Reassembled TCP Segments (1542 bytes): #2421(1500), #2422(42)" size="1542" pos="0" show="" value="">
+ <field name="tcp.segment" showname="Frame: 2421, payload: 0-1499 (1500 bytes)" size="1500" pos="0" show="2421" value="...elided..."/>
+ <field name="tcp.segment" showname="Frame: 2422, payload: 1500-1541 (42 bytes)" size="42" pos="1500" show="2422" value="30303030305aa7060204991639d0a81a3018020112020111020110020105020117020103020102020101"/>
+ <field name="tcp.segment.count" showname="Segment count: 2" size="0" pos="0" show="2"/>
+ <field name="tcp.reassembled.length" showname="Reassembled TCP length: 1542" size="0" pos="0" show="1542"/>
+ <field name="tcp.reassembled.data" showname="Reassembled TCP Data: 000006026c8205fe308205faa103020105a20302010ca382..." size="1542" pos="0" show="...elided..." value="...elided..."/>
+ </field>
+</proto>
+ <proto name="kerberos" showname="Kerberos" size="1542" pos="0">
+ <field name="" show="Record Mark: 1538 bytes" size="4" pos="0" value="00000602">
+ <field name="kerberos.rm.reserved" showname="0... .... .... .... .... .... .... .... = Reserved: Not set" size="4" pos="0" show="0" value="0" unmaskedvalue="00000602"/>
+ <field name="kerberos.rm.length" showname=".000 0000 0000 0000 0000 0110 0000 0010 = Record Length: 1538" size="4" pos="0" show="1538" value="602" unmaskedvalue="00000602"/>
+ </field>
+ <field name="kerberos.tgs_req_element" showname="tgs-req" size="1534" pos="8" show="" value="">
+ <field name="kerberos.pvno" showname="pvno: 5" size="1" pos="16" show="5" value="05"/>
+ <field name="kerberos.msg_type" showname="msg-type: krb-tgs-req (12)" size="1" pos="21" show="12" value="0c"/>
+ <field name="kerberos.padata" showname="padata: 1 item" size="1395" pos="30" show="1" value="...elided...">
+ <field name="kerberos.PA_DATA_element" showname="PA-DATA PA-TGS-REQ" size="1395" pos="30" show="" value="">
+ <field name="kerberos.padata_type" showname="padata-type: kRB5-PADATA-TGS-REQ (1)" size="1" pos="38" show="1" value="01">
+ <field name="kerberos.padata_value" showname="padata-value: 6e82055e3082055aa003020105a10302010ea20703050000..." size="1378" pos="47" show="...elided..." value="...elided...">
+ <field name="kerberos.ap_req_element" showname="ap-req" size="1374" pos="51" show="" value="">
+ <field name="kerberos.pvno" showname="pvno: 5" size="1" pos="59" show="5" value="05"/>
+ <field name="kerberos.msg_type" showname="msg-type: krb-ap-req (14)" size="1" pos="64" show="14" value="0e"/>
+ <field name="ber.bitstring.padding" showname="Padding: 0" size="1" pos="69" show="0" value="00"/>
+ <field name="kerberos.ap_options" showname="ap-options: 00000000" size="4" pos="70" show="00:00:00:00" value="00000000">
+ <field name="kerberos.reserved" showname="0... .... = reserved: False" size="1" pos="70" show="0" value="0" unmaskedvalue="00"/>
+ <field name="kerberos.use-session-key" showname=".0.. .... = use-session-key: False" size="1" pos="70" show="0" value="0" unmaskedvalue="00"/>
+ <field name="kerberos.mutual-required" showname="..0. .... = mutual-required: False" size="1" pos="70" show="0" value="0" unmaskedvalue="00"/>
+ </field>
+ <field name="kerberos.ticket_element" showname="ticket" size="1142" pos="82" show="" value="">
+ <field name="kerberos.tkt_vno" showname="tkt-vno: 5" size="1" pos="90" show="5" value="05"/>
+ <field name="kerberos.realm" showname="realm: SAMBA.EXAMPLE.COM" size="17" pos="95" show="SAMBA.EXAMPLE.COM" value="53414d42412e4558414d504c452e434f4d"/>
+ <field name="kerberos.sname_element" showname="sname" size="38" pos="114" show="" value="">
+ <field name="kerberos.name_type" showname="name-type: kRB5-NT-SRV-INST (2)" size="1" pos="120" show="2" value="02"/>
+ <field name="kerberos.name_string" showname="name-string: 2 items" size="27" pos="125" show="2" value="1b066b72627467741b1153414d42412e4558414d504c452e434f4d">
+ <field name="kerberos.KerberosString" showname="KerberosString: krbtgt" size="6" pos="127" show="krbtgt" value="6b7262746774"/>
+ <field name="kerberos.KerberosString" showname="KerberosString: SAMBA.EXAMPLE.COM" size="17" pos="135" show="SAMBA.EXAMPLE.COM" value="53414d42412e4558414d504c452e434f4d"/>
+ </field>
+ </field>
+ <field name="kerberos.enc_part_element" showname="enc-part" size="1068" pos="156" show="" value="">
+ <field name="kerberos.etype" showname="etype: eTYPE-AES256-CTS-HMAC-SHA1-96 (18)" size="1" pos="164" show="18" value="12"/>
+ <field name="kerberos.kvno" showname="kvno: 1" size="1" pos="169" show="1" value="01"/>
+ <field name="kerberos.cipher" showname="cipher: 5a2a14fec09c49807c1be2a0b335af26ed64f89184336870..." size="1046" pos="178" show="...elided..." value="...elided..."/>
+ </field>
+ </field>
+ <field name="kerberos.authenticator_element" showname="authenticator" size="198" pos="1227" show="" value="">
+ <field name="kerberos.etype" showname="etype: eTYPE-AES256-CTS-HMAC-SHA1-96 (18)" size="1" pos="1234" show="18" value="12"/>
+ <field name="kerberos.cipher" showname="cipher: 263f6091496efbdb7c0b3c7e40fa7bfbf2e284a38b105cb9..." size="184" pos="1241" show="...elided..." value="...elided..."/>
+ </field>
+ </field>
+ </field>
+ </field>
+ </field>
+ </field>
+ <field name="kerberos.req_body_element" showname="req-body" size="115" pos="1427" show="" value="">
+ <field name="ber.bitstring.padding" showname="Padding: 0" size="1" pos="1433" show="0" value="00"/>
+ <field name="kerberos.kdc_options" showname="kdc-options: 00010000 (canonicalize)" size="4" pos="1434" show="00:01:00:00" value="00010000">
+ <field name="kerberos.reserved" showname="0... .... = reserved: False" size="1" pos="1434" show="0" value="0" unmaskedvalue="00"/>
+ <field name="kerberos.forwardable" showname=".0.. .... = forwardable: False" size="1" pos="1434" show="0" value="0" unmaskedvalue="00"/>
+ <field name="kerberos.forwarded" showname="..0. .... = forwarded: False" size="1" pos="1434" show="0" value="0" unmaskedvalue="00"/>
+ <field name="kerberos.proxiable" showname="...0 .... = proxiable: False" size="1" pos="1434" show="0" value="0" unmaskedvalue="00"/>
+ <field name="kerberos.proxy" showname=".... 0... = proxy: False" size="1" pos="1434" show="0" value="0" unmaskedvalue="00"/>
+ <field name="kerberos.allow-postdate" showname=".... .0.. = allow-postdate: False" size="1" pos="1434" show="0" value="0" unmaskedvalue="00"/>
+ <field name="kerberos.postdated" showname=".... ..0. = postdated: False" size="1" pos="1434" show="0" value="0" unmaskedvalue="00"/>
+ <field name="kerberos.unused7" showname=".... ...0 = unused7: False" size="1" pos="1434" show="0" value="0" unmaskedvalue="00"/>
+ <field name="kerberos.renewable" showname="0... .... = renewable: False" size="1" pos="1435" show="0" value="0" unmaskedvalue="01"/>
+ <field name="kerberos.unused9" showname=".0.. .... = unused9: False" size="1" pos="1435" show="0" value="0" unmaskedvalue="01"/>
+ <field name="kerberos.unused10" showname="..0. .... = unused10: False" size="1" pos="1435" show="0" value="0" unmaskedvalue="01"/>
+ <field name="kerberos.opt-hardware-auth" showname="...0 .... = opt-hardware-auth: False" size="1" pos="1435" show="0" value="0" unmaskedvalue="01"/>
+ <field name="kerberos.request-anonymous" showname=".... ..0. = request-anonymous: False" size="1" pos="1435" show="0" value="0" unmaskedvalue="01"/>
+ <field name="kerberos.canonicalize" showname=".... ...1 = canonicalize: True" size="1" pos="1435" show="1" value="FFFFFFFF" unmaskedvalue="01"/>
+ <field name="kerberos.constrained-delegation" showname="0... .... = constrained-delegation: False" size="1" pos="1436" show="0" value="0" unmaskedvalue="00"/>
+ <field name="kerberos.disable-transited-check" showname="..0. .... = disable-transited-check: False" size="1" pos="1437" show="0" value="0" unmaskedvalue="00"/>
+ <field name="kerberos.renewable-ok" showname="...0 .... = renewable-ok: False" size="1" pos="1437" show="0" value="0" unmaskedvalue="00"/>
+ <field name="kerberos.enc-tkt-in-skey" showname=".... 0... = enc-tkt-in-skey: False" size="1" pos="1437" show="0" value="0" unmaskedvalue="00"/>
+ <field name="kerberos.renew" showname=".... ..0. = renew: False" size="1" pos="1437" show="0" value="0" unmaskedvalue="00"/>
+ <field name="kerberos.validate" showname=".... ...0 = validate: False" size="1" pos="1437" show="0" value="0" unmaskedvalue="00"/>
+ </field>
+ <field name="kerberos.realm" showname="realm: SAMBA.EXAMPLE.COM" size="17" pos="1442" show="SAMBA.EXAMPLE.COM" value="53414d42412e4558414d504c452e434f4d"/>
+ <field name="kerberos.sname_element" showname="sname" size="26" pos="1461" show="" value="">
+ <field name="kerberos.name_type" showname="name-type: kRB5-NT-PRINCIPAL (1)" size="1" pos="1467" show="1" value="01"/>
+ <field name="kerberos.name_string" showname="name-string: 2 items" size="15" pos="1472" show="2" value="1b046c6461701b076c6f63616c6463">
+ <field name="kerberos.KerberosString" showname="KerberosString: ldap" size="4" pos="1474" show="ldap" value="6c646170"/>
+ <field name="kerberos.KerberosString" showname="KerberosString: localdc" size="7" pos="1480" show="localdc" value="6c6f63616c6463"/>
+ </field>
+ </field>
+ <field name="kerberos.till" showname="till: 1970-01-01 00:00:00 (UTC)" size="15" pos="1491" show="1970-01-01 00:00:00 (UTC)" value="31393730303130313030303030305a"/>
+ <field name="kerberos.nonce" showname="nonce: 2568370640" size="4" pos="1510" show="2568370640" value="991639d0"/>
+ <field name="kerberos.etype" showname="etype: 8 items" size="24" pos="1518" show="8" value="020112020111020110020105020117020103020102020101">
+ <field name="kerberos.ENCTYPE" showname="ENCTYPE: eTYPE-AES256-CTS-HMAC-SHA1-96 (18)" size="1" pos="1520" show="18" value="12"/>
+ <field name="kerberos.ENCTYPE" showname="ENCTYPE: eTYPE-AES128-CTS-HMAC-SHA1-96 (17)" size="1" pos="1523" show="17" value="11"/>
+ <field name="kerberos.ENCTYPE" showname="ENCTYPE: eTYPE-DES3-CBC-SHA1 (16)" size="1" pos="1526" show="16" value="10"/>
+ <field name="kerberos.ENCTYPE" showname="ENCTYPE: eTYPE-DES3-CBC-MD5 (5)" size="1" pos="1529" show="5" value="05"/>
+ <field name="kerberos.ENCTYPE" showname="ENCTYPE: eTYPE-ARCFOUR-HMAC-MD5 (23)" size="1" pos="1532" show="23" value="17"/>
+ <field name="kerberos.ENCTYPE" showname="ENCTYPE: eTYPE-DES-CBC-MD5 (3)" size="1" pos="1535" show="3" value="03"/>
+ <field name="kerberos.ENCTYPE" showname="ENCTYPE: eTYPE-DES-CBC-MD4 (2)" size="1" pos="1538" show="2" value="02"/>
+ <field name="kerberos.ENCTYPE" showname="ENCTYPE: eTYPE-DES-CBC-CRC (1)" size="1" pos="1541" show="1" value="01"/>
+ </field>
+ </field>
+ </field>
+ </proto>
+</packet>
+
+
+<packet>
+ <proto name="geninfo" pos="0" showname="General information" size="1517">
+ <field name="num" pos="0" show="2429" showname="Number" value="97d" size="1517"/>
+ <field name="len" pos="0" show="1517" showname="Frame Length" value="5ed" size="1517"/>
+ <field name="caplen" pos="0" show="1517" showname="Captured Length" value="5ed" size="1517"/>
+ <field name="timestamp" pos="0" show="Feb 10, 2017 14:36:24.143220000 NZDT" showname="Captured Time" value="1486690584.143220000" size="1517"/>
+ </proto>
+ <proto name="frame" showname="Frame 2429: 1517 bytes on wire (12136 bits), 1517 bytes captured (12136 bits)" size="1517" pos="0">
+ <field name="frame.encap_type" showname="Encapsulation type: Raw IP (7)" size="0" pos="0" show="7"/>
+ <field name="frame.time" showname="Arrival Time: Feb 10, 2017 14:36:24.143220000 NZDT" size="0" pos="0" show="Feb 10, 2017 14:36:24.143220000 NZDT"/>
+ <field name="frame.offset_shift" showname="Time shift for this packet: 0.000000000 seconds" size="0" pos="0" show="0.000000000"/>
+ <field name="frame.time_epoch" showname="Epoch Time: 1486690584.143220000 seconds" size="0" pos="0" show="1486690584.143220000"/>
+ <field name="frame.time_delta" showname="Time delta from previous captured frame: 0.003735000 seconds" size="0" pos="0" show="0.003735000"/>
+ <field name="frame.time_delta_displayed" showname="Time delta from previous displayed frame: 0.003735000 seconds" size="0" pos="0" show="0.003735000"/>
+ <field name="frame.time_relative" showname="Time since reference or first frame: 7.612769000 seconds" size="0" pos="0" show="7.612769000"/>
+ <field name="frame.number" showname="Frame Number: 2429" size="0" pos="0" show="2429"/>
+ <field name="frame.len" showname="Frame Length: 1517 bytes (12136 bits)" size="0" pos="0" show="1517"/>
+ <field name="frame.cap_len" showname="Capture Length: 1517 bytes (12136 bits)" size="0" pos="0" show="1517"/>
+ <field name="frame.marked" showname="Frame is marked: False" size="0" pos="0" show="0"/>
+ <field name="frame.ignored" showname="Frame is ignored: False" size="0" pos="0" show="0"/>
+ <field name="frame.protocols" showname="Protocols in frame: raw:ip:tcp:kerberos" size="0" pos="0" show="raw:ip:tcp:kerberos"/>
+ </proto>
+ <proto name="raw" showname="Raw packet data" size="1517" pos="0"/>
+ <proto name="ip" showname="Internet Protocol Version 4, Src: 127.0.0.21, Dst: 127.0.0.11" size="20" pos="0">
+ <field name="ip.version" showname="0100 .... = Version: 4" size="1" pos="0" show="4" value="4" unmaskedvalue="45"/>
+ <field name="ip.hdr_len" showname=".... 0101 = Header Length: 20 bytes" size="1" pos="0" show="5" value="5" unmaskedvalue="45"/>
+ <field name="ip.dsfield" showname="Differentiated Services Field: 0x00 (DSCP: CS0, ECN: Not-ECT)" size="1" pos="1" show="0x00000000" value="00">
+ <field name="ip.dsfield.dscp" showname="0000 00.. = Differentiated Services Codepoint: Default (0)" size="1" pos="1" show="0" value="0" unmaskedvalue="00"/>
+ <field name="ip.dsfield.ecn" showname=".... ..00 = Explicit Congestion Notification: Not ECN-Capable Transport (0)" size="1" pos="1" show="0" value="0" unmaskedvalue="00"/>
+ </field>
+ <field name="ip.len" showname="Total Length: 1517" size="2" pos="2" show="1517" value="05ed"/>
+ <field name="ip.id" showname="Identification: 0xffff (65535)" size="2" pos="4" show="0x0000ffff" value="ffff"/>
+ <field name="ip.flags" showname="Flags: 0x02 (Don&#x27;t Fragment)" size="1" pos="6" show="0x00000002" value="40">
+ <field name="ip.flags.rb" showname="0... .... = Reserved bit: Not set" size="1" pos="6" show="0" value="40"/>
+ <field name="ip.flags.df" showname=".1.. .... = Don&#x27;t fragment: Set" size="1" pos="6" show="1" value="40"/>
+ <field name="ip.flags.mf" showname="..0. .... = More fragments: Not set" size="1" pos="6" show="0" value="40"/>
+ </field>
+ <field name="ip.frag_offset" showname="Fragment offset: 0" size="2" pos="6" show="0" value="4000"/>
+ <field name="ip.ttl" showname="Time to live: 255" size="1" pos="8" show="255" value="ff"/>
+ <field name="ip.proto" showname="Protocol: TCP (6)" size="1" pos="9" show="6" value="06"/>
+ <field name="ip.checksum" showname="Header checksum: 0x0000 [validation disabled]" size="2" pos="10" show="0x00000000" value="0000">
+ <field name="ip.checksum_good" showname="Good: False" size="2" pos="10" show="0" value="0000"/>
+ <field name="ip.checksum_bad" showname="Bad: False" size="2" pos="10" show="0" value="0000"/>
+ </field>
+ <field name="ip.src" showname="Source: 127.0.0.21" size="4" pos="12" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.addr" showname="Source or Destination Address: 127.0.0.21" hide="yes" size="4" pos="12" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.src_host" showname="Source Host: 127.0.0.21" hide="yes" size="4" pos="12" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.host" showname="Source or Destination Host: 127.0.0.21" hide="yes" size="4" pos="12" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.dst" showname="Destination: 127.0.0.11" size="4" pos="16" show="127.0.0.11" value="7f00000b"/>
+ <field name="ip.addr" showname="Source or Destination Address: 127.0.0.11" hide="yes" size="4" pos="16" show="127.0.0.11" value="7f00000b"/>
+ <field name="ip.dst_host" showname="Destination Host: 127.0.0.11" hide="yes" size="4" pos="16" show="127.0.0.11" value="7f00000b"/>
+ <field name="ip.host" showname="Source or Destination Host: 127.0.0.11" hide="yes" size="4" pos="16" show="127.0.0.11" value="7f00000b"/>
+ <field name="" show="Source GeoIP: Unknown" size="4" pos="12" value="7f000015"/>
+ <field name="" show="Destination GeoIP: Unknown" size="4" pos="16" value="7f00000b"/>
+ </proto>
+ <proto name="tcp" showname="Transmission Control Protocol, Src Port: 88 (88), Dst Port: 14788 (14788), Seq: 1, Ack: 1543, Len: 1477" size="20" pos="20">
+ <field name="tcp.srcport" showname="Source Port: 88" size="2" pos="20" show="88" value="0058"/>
+ <field name="tcp.dstport" showname="Destination Port: 14788" size="2" pos="22" show="14788" value="39c4"/>
+ <field name="tcp.port" showname="Source or Destination Port: 88" hide="yes" size="2" pos="20" show="88" value="0058"/>
+ <field name="tcp.port" showname="Source or Destination Port: 14788" hide="yes" size="2" pos="22" show="14788" value="39c4"/>
+ <field name="tcp.stream" showname="Stream index: 50" size="0" pos="20" show="50"/>
+ <field name="tcp.len" showname="TCP Segment Len: 1477" size="1" pos="32" show="1477" value="50"/>
+ <field name="tcp.seq" showname="Sequence number: 1 (relative sequence number)" size="4" pos="24" show="1" value="00000001"/>
+ <field name="tcp.nxtseq" showname="Next sequence number: 1478 (relative sequence number)" size="0" pos="20" show="1478"/>
+ <field name="tcp.ack" showname="Acknowledgment number: 1543 (relative ack number)" size="4" pos="28" show="1543" value="00000607"/>
+ <field name="tcp.hdr_len" showname="Header Length: 20 bytes" size="1" pos="32" show="20" value="50"/>
+ <field name="tcp.flags" showname="Flags: 0x018 (PSH, ACK)" size="2" pos="32" show="0x00000018" value="18" unmaskedvalue="5018">
+ <field name="tcp.flags.res" showname="000. .... .... = Reserved: Not set" size="1" pos="32" show="0" value="0" unmaskedvalue="50"/>
+ <field name="tcp.flags.ns" showname="...0 .... .... = Nonce: Not set" size="1" pos="32" show="0" value="0" unmaskedvalue="50"/>
+ <field name="tcp.flags.cwr" showname=".... 0... .... = Congestion Window Reduced (CWR): Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.ecn" showname=".... .0.. .... = ECN-Echo: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.urg" showname=".... ..0. .... = Urgent: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.ack" showname=".... ...1 .... = Acknowledgment: Set" size="1" pos="33" show="1" value="FFFFFFFF" unmaskedvalue="18"/>
+ <field name="tcp.flags.push" showname=".... .... 1... = Push: Set" size="1" pos="33" show="1" value="FFFFFFFF" unmaskedvalue="18"/>
+ <field name="tcp.flags.reset" showname=".... .... .0.. = Reset: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.syn" showname=".... .... ..0. = Syn: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.fin" showname=".... .... ...0 = Fin: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.str" showname="TCP Flags: *******AP***" size="2" pos="32" show="*******AP***" value="5018"/>
+ </field>
+ <field name="tcp.window_size_value" showname="Window size value: 32767" size="2" pos="34" show="32767" value="7fff"/>
+ <field name="tcp.window_size" showname="Calculated window size: 32767" size="2" pos="34" show="32767" value="7fff"/>
+ <field name="tcp.window_size_scalefactor" showname="Window size scaling factor: -2 (no window scaling used)" size="2" pos="34" show="-2" value="7fff"/>
+ <field name="tcp.checksum" showname="Checksum: 0x0000 [validation disabled]" size="2" pos="36" show="0x00000000" value="0000">
+ <field name="tcp.checksum_good" showname="Good Checksum: False" size="2" pos="36" show="0" value="0000"/>
+ <field name="tcp.checksum_bad" showname="Bad Checksum: False" size="2" pos="36" show="0" value="0000"/>
+ </field>
+ <field name="tcp.urgent_pointer" showname="Urgent pointer: 0" size="2" pos="38" show="0" value="0000"/>
+ <field name="tcp.analysis" showname="SEQ/ACK analysis" size="0" pos="20" show="" value="">
+ <field name="tcp.analysis.acks_frame" showname="This is an ACK to the segment in frame: 2422" size="0" pos="20" show="2422"/>
+ <field name="tcp.analysis.ack_rtt" showname="The RTT to ACK the segment was: 0.003842000 seconds" size="0" pos="20" show="0.003842000"/>
+ <field name="tcp.analysis.initial_rtt" showname="iRTT: 0.000009000 seconds" size="0" pos="20" show="0.000009000"/>
+ <field name="tcp.analysis.bytes_in_flight" showname="Bytes in flight: 1477" size="0" pos="20" show="1477"/>
+ </field>
+ <field name="tcp.pdu.size" showname="PDU Size: 1477" size="1477" pos="40" show="1477" value="...elided..."/>
+ </proto>
+ <proto name="kerberos" showname="Kerberos" size="1477" pos="40">
+ <field name="" show="Record Mark: 1473 bytes" size="4" pos="40" value="000005c1">
+ <field name="kerberos.rm.reserved" showname="0... .... .... .... .... .... .... .... = Reserved: Not set" size="4" pos="40" show="0" value="0" unmaskedvalue="000005c1"/>
+ <field name="kerberos.rm.length" showname=".000 0000 0000 0000 0000 0101 1100 0001 = Record Length: 1473" size="4" pos="40" show="1473" value="5C1" unmaskedvalue="000005c1"/>
+ </field>
+ <field name="kerberos.tgs_rep_element" showname="tgs-rep" size="1469" pos="48" show="" value="">
+ <field name="kerberos.pvno" showname="pvno: 5" size="1" pos="56" show="5" value="05"/>
+ <field name="kerberos.msg_type" showname="msg-type: krb-tgs-rep (13)" size="1" pos="61" show="13" value="0d"/>
+ <field name="kerberos.crealm" showname="crealm: SAMBA.EXAMPLE.COM" size="17" pos="66" show="SAMBA.EXAMPLE.COM" value="53414d42412e4558414d504c452e434f4d"/>
+ <field name="kerberos.cname_element" showname="cname" size="26" pos="85" show="" value="">
+ <field name="kerberos.name_type" showname="name-type: kRB5-NT-PRINCIPAL (1)" size="1" pos="91" show="1" value="01"/>
+ <field name="kerberos.name_string" showname="name-string: 1 item" size="15" pos="96" show="1" value="1b0d41646d696e6973747261746f72">
+ <field name="kerberos.KerberosString" showname="KerberosString: Administrator" size="13" pos="98" show="Administrator" value="41646d696e6973747261746f72"/>
+ </field>
+ </field>
+ <field name="kerberos.ticket_element" showname="ticket" size="1149" pos="119" show="" value="">
+ <field name="kerberos.tkt_vno" showname="tkt-vno: 5" size="1" pos="127" show="5" value="05"/>
+ <field name="kerberos.realm" showname="realm: SAMBA.EXAMPLE.COM" size="17" pos="132" show="SAMBA.EXAMPLE.COM" value="53414d42412e4558414d504c452e434f4d"/>
+ <field name="kerberos.sname_element" showname="sname" size="26" pos="151" show="" value="">
+ <field name="kerberos.name_type" showname="name-type: kRB5-NT-PRINCIPAL (1)" size="1" pos="157" show="1" value="01"/>
+ <field name="kerberos.name_string" showname="name-string: 2 items" size="15" pos="162" show="2" value="1b046c6461701b076c6f63616c6463">
+ <field name="kerberos.KerberosString" showname="KerberosString: ldap" size="4" pos="164" show="ldap" value="6c646170"/>
+ <field name="kerberos.KerberosString" showname="KerberosString: localdc" size="7" pos="170" show="localdc" value="6c6f63616c6463"/>
+ </field>
+ </field>
+ <field name="kerberos.enc_part_element" showname="enc-part" size="1087" pos="181" show="" value="">
+ <field name="kerberos.etype" showname="etype: eTYPE-AES256-CTS-HMAC-SHA1-96 (18)" size="1" pos="189" show="18" value="12"/>
+ <field name="kerberos.kvno" showname="kvno: 1" size="1" pos="194" show="1" value="01"/>
+ <field name="kerberos.cipher" showname="cipher: 9cbdd51b88f631bfc183eee24f54171f1e6222ebd70ef513..." size="1065" pos="203" show="...elided..." value="...elided..."/>
+ </field>
+ </field>
+ <field name="kerberos.enc_part_element" showname="enc-part" size="246" pos="1271" show="" value="">
+ <field name="kerberos.etype" showname="etype: eTYPE-AES256-CTS-HMAC-SHA1-96 (18)" size="1" pos="1278" show="18" value="12"/>
+ <field name="kerberos.cipher" showname="cipher: 144b5a45ac6ad8fd830d6f7ec0b00a5cf26d277598a63a5e..." size="232" pos="1285" show="...elided..." value="...elided..."/>
+ </field>
+ </field>
+ </proto>
+</packet>
+
+<packet>
+ <proto name="geninfo" pos="0" showname="General information" size="138">
+ <field name="num" pos="0" show="3105" showname="Number" value="c21" size="138"/>
+ <field name="len" pos="0" show="138" showname="Frame Length" value="8a" size="138"/>
+ <field name="caplen" pos="0" show="138" showname="Captured Length" value="8a" size="138"/>
+ <field name="timestamp" pos="0" show="Feb 10, 2017 14:36:24.770344000 NZDT" showname="Captured Time" value="1486690584.770344000" size="138"/>
+ </proto>
+ <proto name="frame" showname="Frame 3105: 138 bytes on wire (1104 bits), 138 bytes captured (1104 bits)" size="138" pos="0">
+ <field name="frame.encap_type" showname="Encapsulation type: Raw IP (7)" size="0" pos="0" show="7"/>
+ <field name="frame.time" showname="Arrival Time: Feb 10, 2017 14:36:24.770344000 NZDT" size="0" pos="0" show="Feb 10, 2017 14:36:24.770344000 NZDT"/>
+ <field name="frame.offset_shift" showname="Time shift for this packet: 0.000000000 seconds" size="0" pos="0" show="0.000000000"/>
+ <field name="frame.time_epoch" showname="Epoch Time: 1486690584.770344000 seconds" size="0" pos="0" show="1486690584.770344000"/>
+ <field name="frame.time_delta" showname="Time delta from previous captured frame: 0.000005000 seconds" size="0" pos="0" show="0.000005000"/>
+ <field name="frame.time_delta_displayed" showname="Time delta from previous displayed frame: 0.000005000 seconds" size="0" pos="0" show="0.000005000"/>
+ <field name="frame.time_relative" showname="Time since reference or first frame: 8.239893000 seconds" size="0" pos="0" show="8.239893000"/>
+ <field name="frame.number" showname="Frame Number: 3105" size="0" pos="0" show="3105"/>
+ <field name="frame.len" showname="Frame Length: 138 bytes (1104 bits)" size="0" pos="0" show="138"/>
+ <field name="frame.cap_len" showname="Capture Length: 138 bytes (1104 bits)" size="0" pos="0" show="138"/>
+ <field name="frame.marked" showname="Frame is marked: False" size="0" pos="0" show="0"/>
+ <field name="frame.ignored" showname="Frame is ignored: False" size="0" pos="0" show="0"/>
+ <field name="frame.protocols" showname="Protocols in frame: raw:ip:tcp:ldap:gss-api:spnego:spnego-krb5" size="0" pos="0" show="raw:ip:tcp:ldap:gss-api:spnego:spnego-krb5"/>
+ </proto>
+ <proto name="raw" showname="Raw packet data" size="138" pos="0"/>
+ <proto name="ip" showname="Internet Protocol Version 4, Src: 127.0.0.11, Dst: 127.0.0.21" size="20" pos="0">
+ <field name="ip.version" showname="0100 .... = Version: 4" size="1" pos="0" show="4" value="4" unmaskedvalue="45"/>
+ <field name="ip.hdr_len" showname=".... 0101 = Header Length: 20 bytes" size="1" pos="0" show="5" value="5" unmaskedvalue="45"/>
+ <field name="ip.dsfield" showname="Differentiated Services Field: 0x00 (DSCP: CS0, ECN: Not-ECT)" size="1" pos="1" show="0x00000000" value="00">
+ <field name="ip.dsfield.dscp" showname="0000 00.. = Differentiated Services Codepoint: Default (0)" size="1" pos="1" show="0" value="0" unmaskedvalue="00"/>
+ <field name="ip.dsfield.ecn" showname=".... ..00 = Explicit Congestion Notification: Not ECN-Capable Transport (0)" size="1" pos="1" show="0" value="0" unmaskedvalue="00"/>
+ </field>
+ <field name="ip.len" showname="Total Length: 138" size="2" pos="2" show="138" value="008a"/>
+ <field name="ip.id" showname="Identification: 0xffff (65535)" size="2" pos="4" show="0x0000ffff" value="ffff"/>
+ <field name="ip.flags" showname="Flags: 0x02 (Don&#x27;t Fragment)" size="1" pos="6" show="0x00000002" value="40">
+ <field name="ip.flags.rb" showname="0... .... = Reserved bit: Not set" size="1" pos="6" show="0" value="40"/>
+ <field name="ip.flags.df" showname=".1.. .... = Don&#x27;t fragment: Set" size="1" pos="6" show="1" value="40"/>
+ <field name="ip.flags.mf" showname="..0. .... = More fragments: Not set" size="1" pos="6" show="0" value="40"/>
+ </field>
+ <field name="ip.frag_offset" showname="Fragment offset: 0" size="2" pos="6" show="0" value="4000"/>
+ <field name="ip.ttl" showname="Time to live: 255" size="1" pos="8" show="255" value="ff"/>
+ <field name="ip.proto" showname="Protocol: TCP (6)" size="1" pos="9" show="6" value="06"/>
+ <field name="ip.checksum" showname="Header checksum: 0x0000 [validation disabled]" size="2" pos="10" show="0x00000000" value="0000">
+ <field name="ip.checksum_good" showname="Good: False" size="2" pos="10" show="0" value="0000"/>
+ <field name="ip.checksum_bad" showname="Bad: False" size="2" pos="10" show="0" value="0000"/>
+ </field>
+ <field name="ip.src" showname="Source: 127.0.0.11" size="4" pos="12" show="127.0.0.11" value="7f00000b"/>
+ <field name="ip.addr" showname="Source or Destination Address: 127.0.0.11" hide="yes" size="4" pos="12" show="127.0.0.11" value="7f00000b"/>
+ <field name="ip.src_host" showname="Source Host: 127.0.0.11" hide="yes" size="4" pos="12" show="127.0.0.11" value="7f00000b"/>
+ <field name="ip.host" showname="Source or Destination Host: 127.0.0.11" hide="yes" size="4" pos="12" show="127.0.0.11" value="7f00000b"/>
+ <field name="ip.dst" showname="Destination: 127.0.0.21" size="4" pos="16" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.addr" showname="Source or Destination Address: 127.0.0.21" hide="yes" size="4" pos="16" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.dst_host" showname="Destination Host: 127.0.0.21" hide="yes" size="4" pos="16" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.host" showname="Source or Destination Host: 127.0.0.21" hide="yes" size="4" pos="16" show="127.0.0.21" value="7f000015"/>
+ <field name="" show="Source GeoIP: Unknown" size="4" pos="12" value="7f00000b"/>
+ <field name="" show="Destination GeoIP: Unknown" size="4" pos="16" value="7f000015"/>
+ </proto>
+ <proto name="tcp" showname="Transmission Control Protocol, Src Port: 14794 (14794), Dst Port: 389 (389), Seq: 6184, Ack: 332, Len: 98" size="20" pos="20">
+ <field name="tcp.srcport" showname="Source Port: 14794" size="2" pos="20" show="14794" value="39ca"/>
+ <field name="tcp.dstport" showname="Destination Port: 389" size="2" pos="22" show="389" value="0185"/>
+ <field name="tcp.port" showname="Source or Destination Port: 14794" hide="yes" size="2" pos="20" show="14794" value="39ca"/>
+ <field name="tcp.port" showname="Source or Destination Port: 389" hide="yes" size="2" pos="22" show="389" value="0185"/>
+ <field name="tcp.stream" showname="Stream index: 60" size="0" pos="20" show="60"/>
+ <field name="tcp.len" showname="TCP Segment Len: 98" size="1" pos="32" show="98" value="50"/>
+ <field name="tcp.seq" showname="Sequence number: 6184 (relative sequence number)" size="4" pos="24" show="6184" value="00001828"/>
+ <field name="tcp.nxtseq" showname="Next sequence number: 6282 (relative sequence number)" size="0" pos="20" show="6282"/>
+ <field name="tcp.ack" showname="Acknowledgment number: 332 (relative ack number)" size="4" pos="28" show="332" value="0000014c"/>
+ <field name="tcp.hdr_len" showname="Header Length: 20 bytes" size="1" pos="32" show="20" value="50"/>
+ <field name="tcp.flags" showname="Flags: 0x018 (PSH, ACK)" size="2" pos="32" show="0x00000018" value="18" unmaskedvalue="5018">
+ <field name="tcp.flags.res" showname="000. .... .... = Reserved: Not set" size="1" pos="32" show="0" value="0" unmaskedvalue="50"/>
+ <field name="tcp.flags.ns" showname="...0 .... .... = Nonce: Not set" size="1" pos="32" show="0" value="0" unmaskedvalue="50"/>
+ <field name="tcp.flags.cwr" showname=".... 0... .... = Congestion Window Reduced (CWR): Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.ecn" showname=".... .0.. .... = ECN-Echo: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.urg" showname=".... ..0. .... = Urgent: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.ack" showname=".... ...1 .... = Acknowledgment: Set" size="1" pos="33" show="1" value="FFFFFFFF" unmaskedvalue="18"/>
+ <field name="tcp.flags.push" showname=".... .... 1... = Push: Set" size="1" pos="33" show="1" value="FFFFFFFF" unmaskedvalue="18"/>
+ <field name="tcp.flags.reset" showname=".... .... .0.. = Reset: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.syn" showname=".... .... ..0. = Syn: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.fin" showname=".... .... ...0 = Fin: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.str" showname="TCP Flags: *******AP***" size="2" pos="32" show="*******AP***" value="5018"/>
+ </field>
+ <field name="tcp.window_size_value" showname="Window size value: 32767" size="2" pos="34" show="32767" value="7fff"/>
+ <field name="tcp.window_size" showname="Calculated window size: 32767" size="2" pos="34" show="32767" value="7fff"/>
+ <field name="tcp.window_size_scalefactor" showname="Window size scaling factor: -2 (no window scaling used)" size="2" pos="34" show="-2" value="7fff"/>
+ <field name="tcp.checksum" showname="Checksum: 0x0000 [validation disabled]" size="2" pos="36" show="0x00000000" value="0000">
+ <field name="tcp.checksum_good" showname="Good Checksum: False" size="2" pos="36" show="0" value="0000"/>
+ <field name="tcp.checksum_bad" showname="Bad Checksum: False" size="2" pos="36" show="0" value="0000"/>
+ </field>
+ <field name="tcp.urgent_pointer" showname="Urgent pointer: 0" size="2" pos="38" show="0" value="0000"/>
+ <field name="tcp.analysis" showname="SEQ/ACK analysis" size="0" pos="20" show="" value="">
+ <field name="tcp.analysis.initial_rtt" showname="iRTT: 0.000020000 seconds" size="0" pos="20" show="0.000020000"/>
+ <field name="tcp.analysis.bytes_in_flight" showname="Bytes in flight: 3098" size="0" pos="20" show="3098"/>
+ </field>
+ <field name="tcp.pdu.size" showname="PDU Size: 3098" size="3098" pos="20" show="3098" value="...elided..."/>
+ <field name="tcp.segment_data" showname="TCP segment data (98 bytes)" size="98" pos="40" show="...elided..." value="...elided..."/>
+ </proto>
+ <proto name="fake-field-wrapper">
+ <field name="tcp.segments" showname="3 Reassembled TCP Segments (3098 bytes): #3103(1500), #3104(1500), #3105(98)" size="3098" pos="0" show="" value="">
+ <field name="tcp.segment" showname="Frame: 3103, payload: 0-1499 (1500 bytes)" size="1500" pos="0" show="3103" value="...elided..."/>
+ <field name="tcp.segment" showname="Frame: 3104, payload: 1500-2999 (1500 bytes)" size="1500" pos="1500" show="3104" value="...elided..."/>
+ <field name="tcp.segment" showname="Frame: 3105, payload: 3000-3097 (98 bytes)" size="98" pos="3000" show="3105" value="...elided..."/>
+ <field name="tcp.segment.count" showname="Segment count: 3" size="0" pos="0" show="3"/>
+ <field name="tcp.reassembled.length" showname="Reassembled TCP length: 3098" size="0" pos="0" show="3098"/>
+<field name="tcp.reassembled.data" showname="Reassembled TCP Data: 30820c1602010360820c0f0201030400a3820c06040a4753..." size="3098" pos="0" show="...elided ..."/>
+ </field>
+</proto>
+ <proto name="ldap" showname="Lightweight Directory Access Protocol" size="3098" pos="0">
+ <field name="ldap.LDAPMessage_element" showname="LDAPMessage bindRequest(3) &quot;&lt;ROOT&gt;&quot; sasl" size="3098" pos="0" show="" value="">
+ <field name="ldap.messageID" showname="messageID: 3" size="1" pos="6" show="3" value="03"/>
+ <field name="ldap.protocolOp" showname="protocolOp: bindRequest (0)" size="3091" pos="7" show="0" value="...elided...">
+ <field name="ldap.bindRequest_element" showname="bindRequest" size="3087" pos="11" show="" value="">
+ <field name="ldap.version" showname="version: 3" size="1" pos="13" show="3" value="03"/>
+ <field name="ldap.name" showname="name: " size="0" pos="16" show=""/>
+ <field name="ldap.authentication" showname="authentication: sasl (3)" size="3078" pos="20" show="3" value="...elided...">
+ <field name="ldap.sasl_element" showname="sasl" size="3078" pos="20" show="" value="">
+ <field name="ldap.mechanism" showname="mechanism: GSS-SPNEGO" size="10" pos="22" show="GSS-SPNEGO" value="4753532d53504e45474f"/>
+<field name="ldap.credentials" showname="credentials: 60820bf206062b0601050502a0820be630820be2a0243022..." size="3062" pos="36" show="...elided..."/>
+ <proto name="gss-api" showname="GSS-API Generic Security Service Application Program Interface" size="3062" pos="36">
+ <field name="gss-api.OID" showname="OID: 1.3.6.1.5.5.2 (SPNEGO - Simple Protected Negotiation)" size="6" pos="42" show="1.3.6.1.5.5.2" value="2b0601050502"/>
+ <proto name="spnego" showname="Simple Protected Negotiation" size="3050" pos="48">
+ <field name="spnego.negTokenInit_element" showname="negTokenInit" size="3046" pos="52" show="" value="">
+ <field name="spnego.mechTypes" showname="mechTypes: 3 items" size="34" pos="60" show="3" value="06092a864882f71201020206092a864886f712010202060a2b06010401823702020a">
+ <field name="spnego.MechType" showname="MechType: 1.2.840.48018.1.2.2 (MS KRB5 - Microsoft Kerberos 5)" size="9" pos="62" show="1.2.840.48018.1.2.2" value="2a864882f712010202"/>
+ <field name="spnego.MechType" showname="MechType: 1.2.840.113554.1.2.2 (KRB5 - Kerberos 5)" size="9" pos="73" show="1.2.840.113554.1.2.2" value="2a864886f712010202"/>
+ <field name="spnego.MechType" showname="MechType: 1.3.6.1.4.1.311.2.2.10 (NTLMSSP - Microsoft NTLM Security Support Provider)" size="10" pos="84" show="1.3.6.1.4.1.311.2.2.10" value="2b06010401823702020a"/>
+ </field>
+ <field name="spnego.mechToken" showname="mechToken: 60820bb006092a864886f71201020201006e820b9f30820b..." size="2996" pos="102" show="... elided ..."/>
+<field name="spnego.krb5.blob" showname="krb5_blob: 60820bb006092a864886f71201020201006e820b9f30820b..." size="2979" pos="102" show="...elided...">
+ <field name="spnego.krb5_oid" showname="KRB5 OID: 1.2.840.113554.1.2.2 (KRB5 - Kerberos 5)" size="9" pos="108" show="1.2.840.113554.1.2.2" value="2a864886f712010202"/>
+ <field name="spnego.krb5.tok_id" showname="krb5_tok_id: KRB5_AP_REQ (0x0001)" size="2" pos="117" show="0x00000001" value="0100"/>
+ <proto name="kerberos" showname="Kerberos" size="2979" pos="119">
+ <field name="kerberos.ap_req_element" showname="ap-req" size="2975" pos="123" show="" value="">
+ <field name="kerberos.pvno" showname="pvno: 5" size="1" pos="131" show="5" value="05"/>
+ <field name="kerberos.msg_type" showname="msg-type: krb-ap-req (14)" size="1" pos="136" show="14" value="0e"/>
+ <field name="ber.bitstring.padding" showname="Padding: 0" size="1" pos="141" show="0" value="00"/>
+ <field name="kerberos.ap_options" showname="ap-options: 20000000 (mutual-required)" size="4" pos="142" show="20:00:00:00" value="20000000">
+ <field name="kerberos.reserved" showname="0... .... = reserved: False" size="1" pos="142" show="0" value="0" unmaskedvalue="20"/>
+ <field name="kerberos.use-session-key" showname=".0.. .... = use-session-key: False" size="1" pos="142" show="0" value="0" unmaskedvalue="20"/>
+ <field name="kerberos.mutual-required" showname="..1. .... = mutual-required: True" size="1" pos="142" show="1" value="FFFFFFFF" unmaskedvalue="20"/>
+ </field>
+ <field name="kerberos.ticket_element" showname="ticket" size="1149" pos="154" show="" value="">
+ <field name="kerberos.tkt_vno" showname="tkt-vno: 5" size="1" pos="162" show="5" value="05"/>
+ <field name="kerberos.realm" showname="realm: SAMBA.EXAMPLE.COM" size="17" pos="167" show="SAMBA.EXAMPLE.COM" value="53414d42412e4558414d504c452e434f4d"/>
+ <field name="kerberos.sname_element" showname="sname" size="26" pos="186" show="" value="">
+ <field name="kerberos.name_type" showname="name-type: kRB5-NT-PRINCIPAL (1)" size="1" pos="192" show="1" value="01"/>
+ <field name="kerberos.name_string" showname="name-string: 2 items" size="15" pos="197" show="2" value="1b046c6461701b076c6f63616c6463">
+ <field name="kerberos.KerberosString" showname="KerberosString: ldap" size="4" pos="199" show="ldap" value="6c646170"/>
+ <field name="kerberos.KerberosString" showname="KerberosString: localdc" size="7" pos="205" show="localdc" value="6c6f63616c6463"/>
+ </field>
+ </field>
+ <field name="kerberos.enc_part_element" showname="enc-part" size="1087" pos="216" show="" value="">
+ <field name="kerberos.etype" showname="etype: eTYPE-AES256-CTS-HMAC-SHA1-96 (18)" size="1" pos="224" show="18" value="12"/>
+ <field name="kerberos.kvno" showname="kvno: 1" size="1" pos="229" show="1" value="01"/>
+ <field name="kerberos.cipher" showname="cipher: 024239fcb8e525339bcf284915f78b5e83507ed9ab592579..." size="1065" pos="238" show="...elided..."/>
+ </field>
+ </field>
+ <field name="kerberos.authenticator_element" showname="authenticator" size="1791" pos="1307" show="" value="">
+ <field name="kerberos.etype" showname="etype: eTYPE-AES256-CTS-HMAC-SHA1-96 (18)" size="1" pos="1315" show="18" value="12"/>
+ <field name="kerberos.cipher" showname="cipher: fce1dd0bc30bb4341ecc246b1a495b189ed13aec7c2c304c..." size="1774" pos="1324" show="...elided..."/>
+ </field>
+ </field>
+ </proto>
+ </field>
+ </field>
+ </proto>
+ </proto>
+ </field>
+ </field>
+ </field>
+ </field>
+ </field>
+ </proto>
+</packet>
+
+
+<packet>
+ <proto name="geninfo" pos="0" showname="General information" size="245">
+ <field name="num" pos="0" show="3110" showname="Number" value="c26" size="245"/>
+ <field name="len" pos="0" show="245" showname="Frame Length" value="f5" size="245"/>
+ <field name="caplen" pos="0" show="245" showname="Captured Length" value="f5" size="245"/>
+ <field name="timestamp" pos="0" show="Feb 10, 2017 14:36:24.774978000 NZDT" showname="Captured Time" value="1486690584.774978000" size="245"/>
+ </proto>
+ <proto name="frame" showname="Frame 3110: 245 bytes on wire (1960 bits), 245 bytes captured (1960 bits)" size="245" pos="0">
+ <field name="frame.encap_type" showname="Encapsulation type: Raw IP (7)" size="0" pos="0" show="7"/>
+ <field name="frame.time" showname="Arrival Time: Feb 10, 2017 14:36:24.774978000 NZDT" size="0" pos="0" show="Feb 10, 2017 14:36:24.774978000 NZDT"/>
+ <field name="frame.offset_shift" showname="Time shift for this packet: 0.000000000 seconds" size="0" pos="0" show="0.000000000"/>
+ <field name="frame.time_epoch" showname="Epoch Time: 1486690584.774978000 seconds" size="0" pos="0" show="1486690584.774978000"/>
+ <field name="frame.time_delta" showname="Time delta from previous captured frame: 0.004542000 seconds" size="0" pos="0" show="0.004542000"/>
+ <field name="frame.time_delta_displayed" showname="Time delta from previous displayed frame: 0.004542000 seconds" size="0" pos="0" show="0.004542000"/>
+ <field name="frame.time_relative" showname="Time since reference or first frame: 8.244527000 seconds" size="0" pos="0" show="8.244527000"/>
+ <field name="frame.number" showname="Frame Number: 3110" size="0" pos="0" show="3110"/>
+ <field name="frame.len" showname="Frame Length: 245 bytes (1960 bits)" size="0" pos="0" show="245"/>
+ <field name="frame.cap_len" showname="Capture Length: 245 bytes (1960 bits)" size="0" pos="0" show="245"/>
+ <field name="frame.marked" showname="Frame is marked: False" size="0" pos="0" show="0"/>
+ <field name="frame.ignored" showname="Frame is ignored: False" size="0" pos="0" show="0"/>
+ <field name="frame.protocols" showname="Protocols in frame: raw:ip:tcp:ldap:spnego:spnego-krb5" size="0" pos="0" show="raw:ip:tcp:ldap:spnego:spnego-krb5"/>
+ </proto>
+ <proto name="raw" showname="Raw packet data" size="245" pos="0"/>
+ <proto name="ip" showname="Internet Protocol Version 4, Src: 127.0.0.21, Dst: 127.0.0.11" size="20" pos="0">
+ <field name="ip.version" showname="0100 .... = Version: 4" size="1" pos="0" show="4" value="4" unmaskedvalue="45"/>
+ <field name="ip.hdr_len" showname=".... 0101 = Header Length: 20 bytes" size="1" pos="0" show="5" value="5" unmaskedvalue="45"/>
+ <field name="ip.dsfield" showname="Differentiated Services Field: 0x00 (DSCP: CS0, ECN: Not-ECT)" size="1" pos="1" show="0x00000000" value="00">
+ <field name="ip.dsfield.dscp" showname="0000 00.. = Differentiated Services Codepoint: Default (0)" size="1" pos="1" show="0" value="0" unmaskedvalue="00"/>
+ <field name="ip.dsfield.ecn" showname=".... ..00 = Explicit Congestion Notification: Not ECN-Capable Transport (0)" size="1" pos="1" show="0" value="0" unmaskedvalue="00"/>
+ </field>
+ <field name="ip.len" showname="Total Length: 245" size="2" pos="2" show="245" value="00f5"/>
+ <field name="ip.id" showname="Identification: 0xffff (65535)" size="2" pos="4" show="0x0000ffff" value="ffff"/>
+ <field name="ip.flags" showname="Flags: 0x02 (Don&#x27;t Fragment)" size="1" pos="6" show="0x00000002" value="40">
+ <field name="ip.flags.rb" showname="0... .... = Reserved bit: Not set" size="1" pos="6" show="0" value="40"/>
+ <field name="ip.flags.df" showname=".1.. .... = Don&#x27;t fragment: Set" size="1" pos="6" show="1" value="40"/>
+ <field name="ip.flags.mf" showname="..0. .... = More fragments: Not set" size="1" pos="6" show="0" value="40"/>
+ </field>
+ <field name="ip.frag_offset" showname="Fragment offset: 0" size="2" pos="6" show="0" value="4000"/>
+ <field name="ip.ttl" showname="Time to live: 255" size="1" pos="8" show="255" value="ff"/>
+ <field name="ip.proto" showname="Protocol: TCP (6)" size="1" pos="9" show="6" value="06"/>
+ <field name="ip.checksum" showname="Header checksum: 0x0000 [validation disabled]" size="2" pos="10" show="0x00000000" value="0000">
+ <field name="ip.checksum_good" showname="Good: False" size="2" pos="10" show="0" value="0000"/>
+ <field name="ip.checksum_bad" showname="Bad: False" size="2" pos="10" show="0" value="0000"/>
+ </field>
+ <field name="ip.src" showname="Source: 127.0.0.21" size="4" pos="12" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.addr" showname="Source or Destination Address: 127.0.0.21" hide="yes" size="4" pos="12" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.src_host" showname="Source Host: 127.0.0.21" hide="yes" size="4" pos="12" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.host" showname="Source or Destination Host: 127.0.0.21" hide="yes" size="4" pos="12" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.dst" showname="Destination: 127.0.0.11" size="4" pos="16" show="127.0.0.11" value="7f00000b"/>
+ <field name="ip.addr" showname="Source or Destination Address: 127.0.0.11" hide="yes" size="4" pos="16" show="127.0.0.11" value="7f00000b"/>
+ <field name="ip.dst_host" showname="Destination Host: 127.0.0.11" hide="yes" size="4" pos="16" show="127.0.0.11" value="7f00000b"/>
+ <field name="ip.host" showname="Source or Destination Host: 127.0.0.11" hide="yes" size="4" pos="16" show="127.0.0.11" value="7f00000b"/>
+ <field name="" show="Source GeoIP: Unknown" size="4" pos="12" value="7f000015"/>
+ <field name="" show="Destination GeoIP: Unknown" size="4" pos="16" value="7f00000b"/>
+ </proto>
+ <proto name="tcp" showname="Transmission Control Protocol, Src Port: 389 (389), Dst Port: 14794 (14794), Seq: 332, Ack: 6282, Len: 205" size="20" pos="20">
+ <field name="tcp.srcport" showname="Source Port: 389" size="2" pos="20" show="389" value="0185"/>
+ <field name="tcp.dstport" showname="Destination Port: 14794" size="2" pos="22" show="14794" value="39ca"/>
+ <field name="tcp.port" showname="Source or Destination Port: 389" hide="yes" size="2" pos="20" show="389" value="0185"/>
+ <field name="tcp.port" showname="Source or Destination Port: 14794" hide="yes" size="2" pos="22" show="14794" value="39ca"/>
+ <field name="tcp.stream" showname="Stream index: 60" size="0" pos="20" show="60"/>
+ <field name="tcp.len" showname="TCP Segment Len: 205" size="1" pos="32" show="205" value="50"/>
+ <field name="tcp.seq" showname="Sequence number: 332 (relative sequence number)" size="4" pos="24" show="332" value="0000014c"/>
+ <field name="tcp.nxtseq" showname="Next sequence number: 537 (relative sequence number)" size="0" pos="20" show="537"/>
+ <field name="tcp.ack" showname="Acknowledgment number: 6282 (relative ack number)" size="4" pos="28" show="6282" value="0000188a"/>
+ <field name="tcp.hdr_len" showname="Header Length: 20 bytes" size="1" pos="32" show="20" value="50"/>
+ <field name="tcp.flags" showname="Flags: 0x018 (PSH, ACK)" size="2" pos="32" show="0x00000018" value="18" unmaskedvalue="5018">
+ <field name="tcp.flags.res" showname="000. .... .... = Reserved: Not set" size="1" pos="32" show="0" value="0" unmaskedvalue="50"/>
+ <field name="tcp.flags.ns" showname="...0 .... .... = Nonce: Not set" size="1" pos="32" show="0" value="0" unmaskedvalue="50"/>
+ <field name="tcp.flags.cwr" showname=".... 0... .... = Congestion Window Reduced (CWR): Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.ecn" showname=".... .0.. .... = ECN-Echo: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.urg" showname=".... ..0. .... = Urgent: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.ack" showname=".... ...1 .... = Acknowledgment: Set" size="1" pos="33" show="1" value="FFFFFFFF" unmaskedvalue="18"/>
+ <field name="tcp.flags.push" showname=".... .... 1... = Push: Set" size="1" pos="33" show="1" value="FFFFFFFF" unmaskedvalue="18"/>
+ <field name="tcp.flags.reset" showname=".... .... .0.. = Reset: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.syn" showname=".... .... ..0. = Syn: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.fin" showname=".... .... ...0 = Fin: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.str" showname="TCP Flags: *******AP***" size="2" pos="32" show="*******AP***" value="5018"/>
+ </field>
+ <field name="tcp.window_size_value" showname="Window size value: 32767" size="2" pos="34" show="32767" value="7fff"/>
+ <field name="tcp.window_size" showname="Calculated window size: 32767" size="2" pos="34" show="32767" value="7fff"/>
+ <field name="tcp.window_size_scalefactor" showname="Window size scaling factor: -2 (no window scaling used)" size="2" pos="34" show="-2" value="7fff"/>
+ <field name="tcp.checksum" showname="Checksum: 0x0000 [validation disabled]" size="2" pos="36" show="0x00000000" value="0000">
+ <field name="tcp.checksum_good" showname="Good Checksum: False" size="2" pos="36" show="0" value="0000"/>
+ <field name="tcp.checksum_bad" showname="Bad Checksum: False" size="2" pos="36" show="0" value="0000"/>
+ </field>
+ <field name="tcp.urgent_pointer" showname="Urgent pointer: 0" size="2" pos="38" show="0" value="0000"/>
+ <field name="tcp.analysis" showname="SEQ/ACK analysis" size="0" pos="20" show="" value="">
+ <field name="tcp.analysis.acks_frame" showname="This is an ACK to the segment in frame: 3105" size="0" pos="20" show="3105"/>
+ <field name="tcp.analysis.ack_rtt" showname="The RTT to ACK the segment was: 0.004634000 seconds" size="0" pos="20" show="0.004634000"/>
+ <field name="tcp.analysis.initial_rtt" showname="iRTT: 0.000020000 seconds" size="0" pos="20" show="0.000020000"/>
+ <field name="tcp.analysis.bytes_in_flight" showname="Bytes in flight: 205" size="0" pos="20" show="205"/>
+ </field>
+ <field name="tcp.pdu.size" showname="PDU Size: 205" size="205" pos="40" show="205" value="3081ca0201036181c40a0100040004008781baa181b73081b4a0030a0100a10b06092a864882f712010202a2819f04819c60819906092a864886f71201020202006f8189308186a003020105a10302010fa27a3078a003020112a271046f2db61233c561f67b14ba4337ca9dcef2c88e925b29c1d2cfa6d4852ae0ac9c3d140a024f2e8d4c2d7211bd69c753416bfa8160c7317e948e506ce510e46456672b4b42d14a0c83f34bf0f6afb2b073adeed33044cb414ab8e7ff72208b26f402155c562e6ff9201f5d5a7cd9a4a244"/>
+ </proto>
+ <proto name="ldap" showname="Lightweight Directory Access Protocol" size="205" pos="40">
+ <field name="ldap.LDAPMessage_element" showname="LDAPMessage bindResponse(3) success" size="205" pos="40" show="" value="">
+ <field name="ldap.messageID" showname="messageID: 3" size="1" pos="45" show="3" value="03"/>
+ <field name="ldap.protocolOp" showname="protocolOp: bindResponse (1)" size="199" pos="46" show="1" value="6181c40a0100040004008781baa181b73081b4a0030a0100a10b06092a864882f712010202a2819f04819c60819906092a864886f71201020202006f8189308186a003020105a10302010fa27a3078a003020112a271046f2db61233c561f67b14ba4337ca9dcef2c88e925b29c1d2cfa6d4852ae0ac9c3d140a024f2e8d4c2d7211bd69c753416bfa8160c7317e948e506ce510e46456672b4b42d14a0c83f34bf0f6afb2b073adeed33044cb414ab8e7ff72208b26f402155c562e6ff9201f5d5a7cd9a4a244">
+ <field name="ldap.bindResponse_element" showname="bindResponse" size="196" pos="49" show="" value="">
+ <field name="ldap.resultCode" showname="resultCode: success (0)" size="1" pos="51" show="0" value="00"/>
+ <field name="ldap.matchedDN" showname="matchedDN: " size="0" pos="54" show=""/>
+ <field name="ldap.errorMessage" showname="errorMessage: " size="0" pos="56" show=""/>
+ <field name="ldap.serverSaslCreds" showname="serverSaslCreds: a181b73081b4a0030a0100a10b06092a864882f712010202..." size="186" pos="59" show="a1:81:b7:30:81:b4:a0:03:0a:01:00:a1:0b:06:09:2a:86:48:82:f7:12:01:02:02:a2:81:9f:04:81:9c:60:81:99:06:09:2a:86:48:86:f7:12:01:02:02:02:00:6f:81:89:30:81:86:a0:03:02:01:05:a1:03:02:01:0f:a2:7a:30:78:a0:03:02:01:12:a2:71:04:6f:2d:b6:12:33:c5:61:f6:7b:14:ba:43:37:ca:9d:ce:f2:c8:8e:92:5b:29:c1:d2:cf:a6:d4:85:2a:e0:ac:9c:3d:14:0a:02:4f:2e:8d:4c:2d:72:11:bd:69:c7:53:41:6b:fa:81:60:c7:31:7e:94:8e:50:6c:e5:10:e4:64:56:67:2b:4b:42:d1:4a:0c:83:f3:4b:f0:f6:af:b2:b0:73:ad:ee:d3:30:44:cb:41:4a:b8:e7:ff:72:20:8b:26:f4:02:15:5c:56:2e:6f:f9:20:1f:5d:5a:7c:d9:a4:a2:44" value="a181b73081b4a0030a0100a10b06092a864882f712010202a2819f04819c60819906092a864886f71201020202006f8189308186a003020105a10302010fa27a3078a003020112a271046f2db61233c561f67b14ba4337ca9dcef2c88e925b29c1d2cfa6d4852ae0ac9c3d140a024f2e8d4c2d7211bd69c753416bfa8160c7317e948e506ce510e46456672b4b42d14a0c83f34bf0f6afb2b073adeed33044cb414ab8e7ff72208b26f402155c562e6ff9201f5d5a7cd9a4a244"/>
+ <proto name="spnego" showname="Simple Protected Negotiation" size="186" pos="59">
+ <field name="spnego.negTokenTarg_element" showname="negTokenTarg" size="183" pos="62" show="" value="">
+ <field name="spnego.negResult" showname="negResult: accept-completed (0)" size="1" pos="69" show="0" value="00"/>
+ <field name="spnego.supportedMech" showname="supportedMech: 1.2.840.48018.1.2.2 (MS KRB5 - Microsoft Kerberos 5)" size="9" pos="74" show="1.2.840.48018.1.2.2" value="2a864882f712010202"/>
+ <field name="spnego.responseToken" showname="responseToken: 60819906092a864886f71201020202006f8189308186a003..." size="156" pos="89" show="60:81:99:06:09:2a:86:48:86:f7:12:01:02:02:02:00:6f:81:89:30:81:86:a0:03:02:01:05:a1:03:02:01:0f:a2:7a:30:78:a0:03:02:01:12:a2:71:04:6f:2d:b6:12:33:c5:61:f6:7b:14:ba:43:37:ca:9d:ce:f2:c8:8e:92:5b:29:c1:d2:cf:a6:d4:85:2a:e0:ac:9c:3d:14:0a:02:4f:2e:8d:4c:2d:72:11:bd:69:c7:53:41:6b:fa:81:60:c7:31:7e:94:8e:50:6c:e5:10:e4:64:56:67:2b:4b:42:d1:4a:0c:83:f3:4b:f0:f6:af:b2:b0:73:ad:ee:d3:30:44:cb:41:4a:b8:e7:ff:72:20:8b:26:f4:02:15:5c:56:2e:6f:f9:20:1f:5d:5a:7c:d9:a4:a2:44" value="60819906092a864886f71201020202006f8189308186a003020105a10302010fa27a3078a003020112a271046f2db61233c561f67b14ba4337ca9dcef2c88e925b29c1d2cfa6d4852ae0ac9c3d140a024f2e8d4c2d7211bd69c753416bfa8160c7317e948e506ce510e46456672b4b42d14a0c83f34bf0f6afb2b073adeed33044cb414ab8e7ff72208b26f402155c562e6ff9201f5d5a7cd9a4a244"/>
+ <field name="spnego.krb5.blob" showname="krb5_blob: 60819906092a864886f71201020202006f8189308186a003..." size="140" pos="89" show="60:81:99:06:09:2a:86:48:86:f7:12:01:02:02:02:00:6f:81:89:30:81:86:a0:03:02:01:05:a1:03:02:01:0f:a2:7a:30:78:a0:03:02:01:12:a2:71:04:6f:2d:b6:12:33:c5:61:f6:7b:14:ba:43:37:ca:9d:ce:f2:c8:8e:92:5b:29:c1:d2:cf:a6:d4:85:2a:e0:ac:9c:3d:14:0a:02:4f:2e:8d:4c:2d:72:11:bd:69:c7:53:41:6b:fa:81:60:c7:31:7e:94:8e:50:6c:e5:10:e4:64:56:67:2b:4b:42:d1:4a:0c:83:f3:4b:f0:f6:af:b2:b0:73:ad:ee:d3:30:44:cb:41:4a:b8:e7:ff:72:20:8b:26:f4" value="60819906092a864886f71201020202006f8189308186a003020105a10302010fa27a3078a003020112a271046f2db61233c561f67b14ba4337ca9dcef2c88e925b29c1d2cfa6d4852ae0ac9c3d140a024f2e8d4c2d7211bd69c753416bfa8160c7317e948e506ce510e46456672b4b42d14a0c83f34bf0f6afb2b073adeed33044cb414ab8e7ff72208b26f4">
+ <field name="spnego.krb5_oid" showname="KRB5 OID: 1.2.840.113554.1.2.2 (KRB5 - Kerberos 5)" size="9" pos="94" show="1.2.840.113554.1.2.2" value="2a864886f712010202"/>
+ <field name="spnego.krb5.tok_id" showname="krb5_tok_id: KRB5_AP_REP (0x0002)" size="2" pos="103" show="0x00000002" value="0200"/>
+ <proto name="kerberos" showname="Kerberos" size="140" pos="105">
+ <field name="kerberos.ap_rep_element" showname="ap-rep" size="137" pos="108" show="" value="">
+ <field name="kerberos.pvno" showname="pvno: 5" size="1" pos="115" show="5" value="05"/>
+ <field name="kerberos.msg_type" showname="msg-type: krb-ap-rep (15)" size="1" pos="120" show="15" value="0f"/>
+ <field name="kerberos.enc_part_element" showname="enc-part" size="122" pos="123" show="" value="">
+ <field name="kerberos.etype" showname="etype: eTYPE-AES256-CTS-HMAC-SHA1-96 (18)" size="1" pos="129" show="18" value="12"/>
+ <field name="kerberos.cipher" showname="cipher: 2db61233c561f67b14ba4337ca9dcef2c88e925b29c1d2cf..." size="111" pos="134" show="2d:b6:12:33:c5:61:f6:7b:14:ba:43:37:ca:9d:ce:f2:c8:8e:92:5b:29:c1:d2:cf:a6:d4:85:2a:e0:ac:9c:3d:14:0a:02:4f:2e:8d:4c:2d:72:11:bd:69:c7:53:41:6b:fa:81:60:c7:31:7e:94:8e:50:6c:e5:10:e4:64:56:67:2b:4b:42:d1:4a:0c:83:f3:4b:f0:f6:af:b2:b0:73:ad:ee:d3:30:44:cb:41:4a:b8:e7:ff:72:20:8b:26:f4:02:15:5c:56:2e:6f:f9:20:1f:5d:5a:7c:d9:a4:a2:44" value="2db61233c561f67b14ba4337ca9dcef2c88e925b29c1d2cfa6d4852ae0ac9c3d140a024f2e8d4c2d7211bd69c753416bfa8160c7317e948e506ce510e46456672b4b42d14a0c83f34bf0f6afb2b073adeed33044cb414ab8e7ff72208b26f402155c562e6ff9201f5d5a7cd9a4a244"/>
+ </field>
+ </field>
+ </proto>
+ </field>
+ </field>
+ </proto>
+ </field>
+ </field>
+ <field name="ldap.response_to" showname="Response To: 3105" size="0" pos="46" show="3105"/>
+ <field name="ldap.time" showname="Time: 0.004634000 seconds" size="0" pos="46" show="0.004634000"/>
+ </field>
+ </proto>
+</packet>
+
+<packet>
+ <proto name="geninfo" pos="0" showname="General information" size="209">
+ <field name="num" pos="0" show="3113" showname="Number" value="c29" size="209"/>
+ <field name="len" pos="0" show="209" showname="Frame Length" value="d1" size="209"/>
+ <field name="caplen" pos="0" show="209" showname="Captured Length" value="d1" size="209"/>
+ <field name="timestamp" pos="0" show="Feb 10, 2017 14:36:24.775218000 NZDT" showname="Captured Time" value="1486690584.775218000" size="209"/>
+ </proto>
+ <proto name="frame" showname="Frame 3113: 209 bytes on wire (1672 bits), 209 bytes captured (1672 bits)" size="209" pos="0">
+ <field name="frame.encap_type" showname="Encapsulation type: Raw IP (7)" size="0" pos="0" show="7"/>
+ <field name="frame.time" showname="Arrival Time: Feb 10, 2017 14:36:24.775218000 NZDT" size="0" pos="0" show="Feb 10, 2017 14:36:24.775218000 NZDT"/>
+ <field name="frame.offset_shift" showname="Time shift for this packet: 0.000000000 seconds" size="0" pos="0" show="0.000000000"/>
+ <field name="frame.time_epoch" showname="Epoch Time: 1486690584.775218000 seconds" size="0" pos="0" show="1486690584.775218000"/>
+ <field name="frame.time_delta" showname="Time delta from previous captured frame: 0.000137000 seconds" size="0" pos="0" show="0.000137000"/>
+ <field name="frame.time_delta_displayed" showname="Time delta from previous displayed frame: 0.000137000 seconds" size="0" pos="0" show="0.000137000"/>
+ <field name="frame.time_relative" showname="Time since reference or first frame: 8.244767000 seconds" size="0" pos="0" show="8.244767000"/>
+ <field name="frame.number" showname="Frame Number: 3113" size="0" pos="0" show="3113"/>
+ <field name="frame.len" showname="Frame Length: 209 bytes (1672 bits)" size="0" pos="0" show="209"/>
+ <field name="frame.cap_len" showname="Capture Length: 209 bytes (1672 bits)" size="0" pos="0" show="209"/>
+ <field name="frame.marked" showname="Frame is marked: False" size="0" pos="0" show="0"/>
+ <field name="frame.ignored" showname="Frame is ignored: False" size="0" pos="0" show="0"/>
+ <field name="frame.protocols" showname="Protocols in frame: raw:ip:tcp:ldap:gss-api:spnego-krb5" size="0" pos="0" show="raw:ip:tcp:ldap:gss-api:spnego-krb5"/>
+ </proto>
+ <proto name="raw" showname="Raw packet data" size="209" pos="0"/>
+ <proto name="ip" showname="Internet Protocol Version 4, Src: 127.0.0.11, Dst: 127.0.0.21" size="20" pos="0">
+ <field name="ip.version" showname="0100 .... = Version: 4" size="1" pos="0" show="4" value="4" unmaskedvalue="45"/>
+ <field name="ip.hdr_len" showname=".... 0101 = Header Length: 20 bytes" size="1" pos="0" show="5" value="5" unmaskedvalue="45"/>
+ <field name="ip.dsfield" showname="Differentiated Services Field: 0x00 (DSCP: CS0, ECN: Not-ECT)" size="1" pos="1" show="0x00000000" value="00">
+ <field name="ip.dsfield.dscp" showname="0000 00.. = Differentiated Services Codepoint: Default (0)" size="1" pos="1" show="0" value="0" unmaskedvalue="00"/>
+ <field name="ip.dsfield.ecn" showname=".... ..00 = Explicit Congestion Notification: Not ECN-Capable Transport (0)" size="1" pos="1" show="0" value="0" unmaskedvalue="00"/>
+ </field>
+ <field name="ip.len" showname="Total Length: 209" size="2" pos="2" show="209" value="00d1"/>
+ <field name="ip.id" showname="Identification: 0xffff (65535)" size="2" pos="4" show="0x0000ffff" value="ffff"/>
+ <field name="ip.flags" showname="Flags: 0x02 (Don&#x27;t Fragment)" size="1" pos="6" show="0x00000002" value="40">
+ <field name="ip.flags.rb" showname="0... .... = Reserved bit: Not set" size="1" pos="6" show="0" value="40"/>
+ <field name="ip.flags.df" showname=".1.. .... = Don&#x27;t fragment: Set" size="1" pos="6" show="1" value="40"/>
+ <field name="ip.flags.mf" showname="..0. .... = More fragments: Not set" size="1" pos="6" show="0" value="40"/>
+ </field>
+ <field name="ip.frag_offset" showname="Fragment offset: 0" size="2" pos="6" show="0" value="4000"/>
+ <field name="ip.ttl" showname="Time to live: 255" size="1" pos="8" show="255" value="ff"/>
+ <field name="ip.proto" showname="Protocol: TCP (6)" size="1" pos="9" show="6" value="06"/>
+ <field name="ip.checksum" showname="Header checksum: 0x0000 [validation disabled]" size="2" pos="10" show="0x00000000" value="0000">
+ <field name="ip.checksum_good" showname="Good: False" size="2" pos="10" show="0" value="0000"/>
+ <field name="ip.checksum_bad" showname="Bad: False" size="2" pos="10" show="0" value="0000"/>
+ </field>
+ <field name="ip.src" showname="Source: 127.0.0.11" size="4" pos="12" show="127.0.0.11" value="7f00000b"/>
+ <field name="ip.addr" showname="Source or Destination Address: 127.0.0.11" hide="yes" size="4" pos="12" show="127.0.0.11" value="7f00000b"/>
+ <field name="ip.src_host" showname="Source Host: 127.0.0.11" hide="yes" size="4" pos="12" show="127.0.0.11" value="7f00000b"/>
+ <field name="ip.host" showname="Source or Destination Host: 127.0.0.11" hide="yes" size="4" pos="12" show="127.0.0.11" value="7f00000b"/>
+ <field name="ip.dst" showname="Destination: 127.0.0.21" size="4" pos="16" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.addr" showname="Source or Destination Address: 127.0.0.21" hide="yes" size="4" pos="16" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.dst_host" showname="Destination Host: 127.0.0.21" hide="yes" size="4" pos="16" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.host" showname="Source or Destination Host: 127.0.0.21" hide="yes" size="4" pos="16" show="127.0.0.21" value="7f000015"/>
+ <field name="" show="Source GeoIP: Unknown" size="4" pos="12" value="7f00000b"/>
+ <field name="" show="Destination GeoIP: Unknown" size="4" pos="16" value="7f000015"/>
+ </proto>
+ <proto name="tcp" showname="Transmission Control Protocol, Src Port: 14794 (14794), Dst Port: 389 (389), Seq: 6282, Ack: 537, Len: 169" size="20" pos="20">
+ <field name="tcp.srcport" showname="Source Port: 14794" size="2" pos="20" show="14794" value="39ca"/>
+ <field name="tcp.dstport" showname="Destination Port: 389" size="2" pos="22" show="389" value="0185"/>
+ <field name="tcp.port" showname="Source or Destination Port: 14794" hide="yes" size="2" pos="20" show="14794" value="39ca"/>
+ <field name="tcp.port" showname="Source or Destination Port: 389" hide="yes" size="2" pos="22" show="389" value="0185"/>
+ <field name="tcp.stream" showname="Stream index: 60" size="0" pos="20" show="60"/>
+ <field name="tcp.len" showname="TCP Segment Len: 169" size="1" pos="32" show="169" value="50"/>
+ <field name="tcp.seq" showname="Sequence number: 6282 (relative sequence number)" size="4" pos="24" show="6282" value="0000188a"/>
+ <field name="tcp.nxtseq" showname="Next sequence number: 6451 (relative sequence number)" size="0" pos="20" show="6451"/>
+ <field name="tcp.ack" showname="Acknowledgment number: 537 (relative ack number)" size="4" pos="28" show="537" value="00000219"/>
+ <field name="tcp.hdr_len" showname="Header Length: 20 bytes" size="1" pos="32" show="20" value="50"/>
+ <field name="tcp.flags" showname="Flags: 0x018 (PSH, ACK)" size="2" pos="32" show="0x00000018" value="18" unmaskedvalue="5018">
+ <field name="tcp.flags.res" showname="000. .... .... = Reserved: Not set" size="1" pos="32" show="0" value="0" unmaskedvalue="50"/>
+ <field name="tcp.flags.ns" showname="...0 .... .... = Nonce: Not set" size="1" pos="32" show="0" value="0" unmaskedvalue="50"/>
+ <field name="tcp.flags.cwr" showname=".... 0... .... = Congestion Window Reduced (CWR): Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.ecn" showname=".... .0.. .... = ECN-Echo: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.urg" showname=".... ..0. .... = Urgent: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.ack" showname=".... ...1 .... = Acknowledgment: Set" size="1" pos="33" show="1" value="FFFFFFFF" unmaskedvalue="18"/>
+ <field name="tcp.flags.push" showname=".... .... 1... = Push: Set" size="1" pos="33" show="1" value="FFFFFFFF" unmaskedvalue="18"/>
+ <field name="tcp.flags.reset" showname=".... .... .0.. = Reset: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.syn" showname=".... .... ..0. = Syn: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.fin" showname=".... .... ...0 = Fin: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.str" showname="TCP Flags: *******AP***" size="2" pos="32" show="*******AP***" value="5018"/>
+ </field>
+ <field name="tcp.window_size_value" showname="Window size value: 32767" size="2" pos="34" show="32767" value="7fff"/>
+ <field name="tcp.window_size" showname="Calculated window size: 32767" size="2" pos="34" show="32767" value="7fff"/>
+ <field name="tcp.window_size_scalefactor" showname="Window size scaling factor: -2 (no window scaling used)" size="2" pos="34" show="-2" value="7fff"/>
+ <field name="tcp.checksum" showname="Checksum: 0x0000 [validation disabled]" size="2" pos="36" show="0x00000000" value="0000">
+ <field name="tcp.checksum_good" showname="Good Checksum: False" size="2" pos="36" show="0" value="0000"/>
+ <field name="tcp.checksum_bad" showname="Bad Checksum: False" size="2" pos="36" show="0" value="0000"/>
+ </field>
+ <field name="tcp.urgent_pointer" showname="Urgent pointer: 0" size="2" pos="38" show="0" value="0000"/>
+ <field name="tcp.analysis" showname="SEQ/ACK analysis" size="0" pos="20" show="" value="">
+ <field name="tcp.analysis.acks_frame" showname="This is an ACK to the segment in frame: 3110" size="0" pos="20" show="3110"/>
+ <field name="tcp.analysis.ack_rtt" showname="The RTT to ACK the segment was: 0.000240000 seconds" size="0" pos="20" show="0.000240000"/>
+ <field name="tcp.analysis.initial_rtt" showname="iRTT: 0.000020000 seconds" size="0" pos="20" show="0.000020000"/>
+ <field name="tcp.analysis.bytes_in_flight" showname="Bytes in flight: 169" size="0" pos="20" show="169"/>
+ </field>
+ <field name="tcp.pdu.size" showname="PDU Size: 169" size="169" pos="40" show="169" value="000000a5050404ff000c000c00000000327fba42f2b5c0e4f071d599072f1f8530818602010463818004000a01000a0100020100020100010100870b6f626a656374436c61737330600417726f6f74446f6d61696e4e616d696e67436f6e74657874041a636f6e66696775726174696f6e4e616d696e67436f6e746578740413736368656d614e616d696e67436f6e74657874041464656661756c744e616d696e67436f6e74657874"/>
+ </proto>
+ <proto name="ldap" showname="Lightweight Directory Access Protocol" size="169" pos="40">
+ <field name="ldap.sasl_buffer_length" showname="SASL Buffer Length: 165" size="4" pos="40" show="165" value="000000a5"/>
+ <field name="" show="SASL Buffer" size="169" pos="40" value="000000a5050404ff000c000c00000000327fba42f2b5c0e4f071d599072f1f8530818602010463818004000a01000a0100020100020100010100870b6f626a656374436c61737330600417726f6f74446f6d61696e4e616d696e67436f6e74657874041a636f6e66696775726174696f6e4e616d696e67436f6e746578740413736368656d614e616d696e67436f6e74657874041464656661756c744e616d696e67436f6e74657874">
+ <proto name="gss-api" showname="GSS-API Generic Security Service Application Program Interface" size="28" pos="44">
+ <field name="spnego.krb5.blob" showname="krb5_blob: 050404ff000c000c00000000327fba42f2b5c0e4f071d599..." size="28" pos="44" show="05:04:04:ff:00:0c:00:0c:00:00:00:00:32:7f:ba:42:f2:b5:c0:e4:f0:71:d5:99:07:2f:1f:85" value="050404ff000c000c00000000327fba42f2b5c0e4f071d599072f1f85">
+ <field name="spnego.krb5.tok_id" showname="krb5_tok_id: KRB_TOKEN_CFX_WRAP (0x0405)" size="2" pos="44" show="0x00000405" value="0504"/>
+ <field name="spnego.krb5.cfx_flags" showname="krb5_cfx_flags: 0x04, AcceptorSubkey" size="1" pos="46" show="0x00000004" value="04">
+ <field name="spnego.krb5.acceptor_subkey" showname=".... .1.. = AcceptorSubkey: Set" size="1" pos="46" show="1" value="FFFFFFFF" unmaskedvalue="04"/>
+ <field name="spnego.krb5.sealed" showname=".... ..0. = Sealed: Not set" size="1" pos="46" show="0" value="0" unmaskedvalue="04"/>
+ <field name="spnego.krb5.send_by_acceptor" showname=".... ...0 = SendByAcceptor: Not set" size="1" pos="46" show="0" value="0" unmaskedvalue="04"/>
+ </field>
+ <field name="spnego.krb5.filler" showname="krb5_filler: ff" size="1" pos="47" show="ff" value="ff"/>
+ <field name="spnego.krb5.cfx_ec" showname="krb5_cfx_ec: 12" size="2" pos="48" show="12" value="000c"/>
+ <field name="spnego.krb5.cfx_rrc" showname="krb5_cfx_rrc: 12" size="2" pos="50" show="12" value="000c"/>
+ <field name="spnego.krb5.cfx_seq" showname="krb5_cfx_seq: 847231554" size="8" pos="52" show="847231554" value="00000000327fba42"/>
+ <field name="spnego.krb5.sgn_cksum" showname="krb5_sgn_cksum: f2b5c0e4f071d599072f1f85" size="12" pos="60" show="f2:b5:c0:e4:f0:71:d5:99:07:2f:1f:85" value="f2b5c0e4f071d599072f1f85"/>
+ </field>
+ </proto>
+ <field name="" show="GSS-API payload (137 bytes)" size="137" pos="72" value="30818602010463818004000a01000a0100020100020100010100870b6f626a656374436c61737330600417726f6f74446f6d61696e4e616d696e67436f6e74657874041a636f6e66696775726174696f6e4e616d696e67436f6e746578740413736368656d614e616d696e67436f6e74657874041464656661756c744e616d696e67436f6e74657874">
+ <field name="ldap.LDAPMessage_element" showname="LDAPMessage searchRequest(4) &quot;&lt;ROOT&gt;&quot; baseObject" size="137" pos="72" show="" value="">
+ <field name="ldap.messageID" showname="messageID: 4" size="1" pos="77" show="4" value="04"/>
+ <field name="ldap.protocolOp" showname="protocolOp: searchRequest (3)" size="131" pos="78" show="3" value="63818004000a01000a0100020100020100010100870b6f626a656374436c61737330600417726f6f74446f6d61696e4e616d696e67436f6e74657874041a636f6e66696775726174696f6e4e616d696e67436f6e746578740413736368656d614e616d696e67436f6e74657874041464656661756c744e616d696e67436f6e74657874">
+ <field name="ldap.searchRequest_element" showname="searchRequest" size="128" pos="81" show="" value="">
+ <field name="ldap.baseObject" showname="baseObject: " size="0" pos="83" show=""/>
+ <field name="ldap.scope" showname="scope: baseObject (0)" size="1" pos="85" show="0" value="00"/>
+ <field name="ldap.derefAliases" showname="derefAliases: neverDerefAliases (0)" size="1" pos="88" show="0" value="00"/>
+ <field name="ldap.sizeLimit" showname="sizeLimit: 0" size="1" pos="91" show="0" value="00"/>
+ <field name="ldap.timeLimit" showname="timeLimit: 0" size="1" pos="94" show="0" value="00"/>
+ <field name="ldap.typesOnly" showname="typesOnly: False" size="1" pos="97" show="0" value="00"/>
+ <field name="" show="Filter: (objectClass=*)" size="13" pos="98" value="870b6f626a656374436c617373">
+ <field name="ldap.filter" showname="filter: present (7)" size="11" pos="100" show="7" value="6f626a656374436c617373">
+ <field name="ldap.present" showname="present: objectClass" size="11" pos="100" show="objectClass" value="6f626a656374436c617373"/>
+ </field>
+ </field>
+ <field name="ldap.attributes" showname="attributes: 4 items" size="96" pos="113" show="4" value="0417726f6f74446f6d61696e4e616d696e67436f6e74657874041a636f6e66696775726174696f6e4e616d696e67436f6e746578740413736368656d614e616d696e67436f6e74657874041464656661756c744e616d696e67436f6e74657874">
+ <field name="ldap.AttributeDescription" showname="AttributeDescription: rootDomainNamingContext" size="23" pos="115" show="rootDomainNamingContext" value="726f6f74446f6d61696e4e616d696e67436f6e74657874"/>
+ <field name="ldap.AttributeDescription" showname="AttributeDescription: configurationNamingContext" size="26" pos="140" show="configurationNamingContext" value="636f6e66696775726174696f6e4e616d696e67436f6e74657874"/>
+ <field name="ldap.AttributeDescription" showname="AttributeDescription: schemaNamingContext" size="19" pos="168" show="schemaNamingContext" value="736368656d614e616d696e67436f6e74657874"/>
+ <field name="ldap.AttributeDescription" showname="AttributeDescription: defaultNamingContext" size="20" pos="189" show="defaultNamingContext" value="64656661756c744e616d696e67436f6e74657874"/>
+ </field>
+ </field>
+ </field>
+ </field>
+ </field>
+ </field>
+ </proto>
+</packet>
+
+<packet>
+ <proto name="geninfo" pos="0" showname="General information" size="146">
+ <field name="num" pos="0" show="3119" showname="Number" value="c2f" size="146"/>
+ <field name="len" pos="0" show="146" showname="Frame Length" value="92" size="146"/>
+ <field name="caplen" pos="0" show="146" showname="Captured Length" value="92" size="146"/>
+ <field name="timestamp" pos="0" show="Feb 10, 2017 14:36:24.775574000 NZDT" showname="Captured Time" value="1486690584.775574000" size="146"/>
+ </proto>
+ <proto name="frame" showname="Frame 3119: 146 bytes on wire (1168 bits), 146 bytes captured (1168 bits)" size="146" pos="0">
+ <field name="frame.encap_type" showname="Encapsulation type: Raw IP (7)" size="0" pos="0" show="7"/>
+ <field name="frame.time" showname="Arrival Time: Feb 10, 2017 14:36:24.775574000 NZDT" size="0" pos="0" show="Feb 10, 2017 14:36:24.775574000 NZDT"/>
+ <field name="frame.offset_shift" showname="Time shift for this packet: 0.000000000 seconds" size="0" pos="0" show="0.000000000"/>
+ <field name="frame.time_epoch" showname="Epoch Time: 1486690584.775574000 seconds" size="0" pos="0" show="1486690584.775574000"/>
+ <field name="frame.time_delta" showname="Time delta from previous captured frame: 0.000096000 seconds" size="0" pos="0" show="0.000096000"/>
+ <field name="frame.time_delta_displayed" showname="Time delta from previous displayed frame: 0.000096000 seconds" size="0" pos="0" show="0.000096000"/>
+ <field name="frame.time_relative" showname="Time since reference or first frame: 8.245123000 seconds" size="0" pos="0" show="8.245123000"/>
+ <field name="frame.number" showname="Frame Number: 3119" size="0" pos="0" show="3119"/>
+ <field name="frame.len" showname="Frame Length: 146 bytes (1168 bits)" size="0" pos="0" show="146"/>
+ <field name="frame.cap_len" showname="Capture Length: 146 bytes (1168 bits)" size="0" pos="0" show="146"/>
+ <field name="frame.marked" showname="Frame is marked: False" size="0" pos="0" show="0"/>
+ <field name="frame.ignored" showname="Frame is ignored: False" size="0" pos="0" show="0"/>
+ <field name="frame.protocols" showname="Protocols in frame: raw:ip:tcp:ldap:gss-api:spnego-krb5" size="0" pos="0" show="raw:ip:tcp:ldap:gss-api:spnego-krb5"/>
+ </proto>
+ <proto name="raw" showname="Raw packet data" size="146" pos="0"/>
+ <proto name="ip" showname="Internet Protocol Version 4, Src: 127.0.0.11, Dst: 127.0.0.21" size="20" pos="0">
+ <field name="ip.version" showname="0100 .... = Version: 4" size="1" pos="0" show="4" value="4" unmaskedvalue="45"/>
+ <field name="ip.hdr_len" showname=".... 0101 = Header Length: 20 bytes" size="1" pos="0" show="5" value="5" unmaskedvalue="45"/>
+ <field name="ip.dsfield" showname="Differentiated Services Field: 0x00 (DSCP: CS0, ECN: Not-ECT)" size="1" pos="1" show="0x00000000" value="00">
+ <field name="ip.dsfield.dscp" showname="0000 00.. = Differentiated Services Codepoint: Default (0)" size="1" pos="1" show="0" value="0" unmaskedvalue="00"/>
+ <field name="ip.dsfield.ecn" showname=".... ..00 = Explicit Congestion Notification: Not ECN-Capable Transport (0)" size="1" pos="1" show="0" value="0" unmaskedvalue="00"/>
+ </field>
+ <field name="ip.len" showname="Total Length: 146" size="2" pos="2" show="146" value="0092"/>
+ <field name="ip.id" showname="Identification: 0xffff (65535)" size="2" pos="4" show="0x0000ffff" value="ffff"/>
+ <field name="ip.flags" showname="Flags: 0x02 (Don&#x27;t Fragment)" size="1" pos="6" show="0x00000002" value="40">
+ <field name="ip.flags.rb" showname="0... .... = Reserved bit: Not set" size="1" pos="6" show="0" value="40"/>
+ <field name="ip.flags.df" showname=".1.. .... = Don&#x27;t fragment: Set" size="1" pos="6" show="1" value="40"/>
+ <field name="ip.flags.mf" showname="..0. .... = More fragments: Not set" size="1" pos="6" show="0" value="40"/>
+ </field>
+ <field name="ip.frag_offset" showname="Fragment offset: 0" size="2" pos="6" show="0" value="4000"/>
+ <field name="ip.ttl" showname="Time to live: 255" size="1" pos="8" show="255" value="ff"/>
+ <field name="ip.proto" showname="Protocol: TCP (6)" size="1" pos="9" show="6" value="06"/>
+ <field name="ip.checksum" showname="Header checksum: 0x0000 [validation disabled]" size="2" pos="10" show="0x00000000" value="0000">
+ <field name="ip.checksum_good" showname="Good: False" size="2" pos="10" show="0" value="0000"/>
+ <field name="ip.checksum_bad" showname="Bad: False" size="2" pos="10" show="0" value="0000"/>
+ </field>
+ <field name="ip.src" showname="Source: 127.0.0.11" size="4" pos="12" show="127.0.0.11" value="7f00000b"/>
+ <field name="ip.addr" showname="Source or Destination Address: 127.0.0.11" hide="yes" size="4" pos="12" show="127.0.0.11" value="7f00000b"/>
+ <field name="ip.src_host" showname="Source Host: 127.0.0.11" hide="yes" size="4" pos="12" show="127.0.0.11" value="7f00000b"/>
+ <field name="ip.host" showname="Source or Destination Host: 127.0.0.11" hide="yes" size="4" pos="12" show="127.0.0.11" value="7f00000b"/>
+ <field name="ip.dst" showname="Destination: 127.0.0.21" size="4" pos="16" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.addr" showname="Source or Destination Address: 127.0.0.21" hide="yes" size="4" pos="16" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.dst_host" showname="Destination Host: 127.0.0.21" hide="yes" size="4" pos="16" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.host" showname="Source or Destination Host: 127.0.0.21" hide="yes" size="4" pos="16" show="127.0.0.21" value="7f000015"/>
+ <field name="" show="Source GeoIP: Unknown" size="4" pos="12" value="7f00000b"/>
+ <field name="" show="Destination GeoIP: Unknown" size="4" pos="16" value="7f000015"/>
+ </proto>
+ <proto name="tcp" showname="Transmission Control Protocol, Src Port: 14794 (14794), Dst Port: 389 (389), Seq: 6451, Ack: 868, Len: 106" size="20" pos="20">
+ <field name="tcp.srcport" showname="Source Port: 14794" size="2" pos="20" show="14794" value="39ca"/>
+ <field name="tcp.dstport" showname="Destination Port: 389" size="2" pos="22" show="389" value="0185"/>
+ <field name="tcp.port" showname="Source or Destination Port: 14794" hide="yes" size="2" pos="20" show="14794" value="39ca"/>
+ <field name="tcp.port" showname="Source or Destination Port: 389" hide="yes" size="2" pos="22" show="389" value="0185"/>
+ <field name="tcp.stream" showname="Stream index: 60" size="0" pos="20" show="60"/>
+ <field name="tcp.len" showname="TCP Segment Len: 106" size="1" pos="32" show="106" value="50"/>
+ <field name="tcp.seq" showname="Sequence number: 6451 (relative sequence number)" size="4" pos="24" show="6451" value="00001933"/>
+ <field name="tcp.nxtseq" showname="Next sequence number: 6557 (relative sequence number)" size="0" pos="20" show="6557"/>
+ <field name="tcp.ack" showname="Acknowledgment number: 868 (relative ack number)" size="4" pos="28" show="868" value="00000364"/>
+ <field name="tcp.hdr_len" showname="Header Length: 20 bytes" size="1" pos="32" show="20" value="50"/>
+ <field name="tcp.flags" showname="Flags: 0x018 (PSH, ACK)" size="2" pos="32" show="0x00000018" value="18" unmaskedvalue="5018">
+ <field name="tcp.flags.res" showname="000. .... .... = Reserved: Not set" size="1" pos="32" show="0" value="0" unmaskedvalue="50"/>
+ <field name="tcp.flags.ns" showname="...0 .... .... = Nonce: Not set" size="1" pos="32" show="0" value="0" unmaskedvalue="50"/>
+ <field name="tcp.flags.cwr" showname=".... 0... .... = Congestion Window Reduced (CWR): Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.ecn" showname=".... .0.. .... = ECN-Echo: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.urg" showname=".... ..0. .... = Urgent: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.ack" showname=".... ...1 .... = Acknowledgment: Set" size="1" pos="33" show="1" value="FFFFFFFF" unmaskedvalue="18"/>
+ <field name="tcp.flags.push" showname=".... .... 1... = Push: Set" size="1" pos="33" show="1" value="FFFFFFFF" unmaskedvalue="18"/>
+ <field name="tcp.flags.reset" showname=".... .... .0.. = Reset: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.syn" showname=".... .... ..0. = Syn: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.fin" showname=".... .... ...0 = Fin: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.str" showname="TCP Flags: *******AP***" size="2" pos="32" show="*******AP***" value="5018"/>
+ </field>
+ <field name="tcp.window_size_value" showname="Window size value: 32767" size="2" pos="34" show="32767" value="7fff"/>
+ <field name="tcp.window_size" showname="Calculated window size: 32767" size="2" pos="34" show="32767" value="7fff"/>
+ <field name="tcp.window_size_scalefactor" showname="Window size scaling factor: -2 (no window scaling used)" size="2" pos="34" show="-2" value="7fff"/>
+ <field name="tcp.checksum" showname="Checksum: 0x0000 [validation disabled]" size="2" pos="36" show="0x00000000" value="0000">
+ <field name="tcp.checksum_good" showname="Good Checksum: False" size="2" pos="36" show="0" value="0000"/>
+ <field name="tcp.checksum_bad" showname="Bad Checksum: False" size="2" pos="36" show="0" value="0000"/>
+ </field>
+ <field name="tcp.urgent_pointer" showname="Urgent pointer: 0" size="2" pos="38" show="0" value="0000"/>
+ <field name="tcp.analysis" showname="SEQ/ACK analysis" size="0" pos="20" show="" value="">
+ <field name="tcp.analysis.acks_frame" showname="This is an ACK to the segment in frame: 3116" size="0" pos="20" show="3116"/>
+ <field name="tcp.analysis.ack_rtt" showname="The RTT to ACK the segment was: 0.000167000 seconds" size="0" pos="20" show="0.000167000"/>
+ <field name="tcp.analysis.initial_rtt" showname="iRTT: 0.000020000 seconds" size="0" pos="20" show="0.000020000"/>
+ <field name="tcp.analysis.bytes_in_flight" showname="Bytes in flight: 106" size="0" pos="20" show="106"/>
+ </field>
+ <field name="tcp.pdu.size" showname="PDU Size: 106" size="106" pos="40" show="106" value="00000066050404ff000c000c00000000327fba430a655f88ee6b2540ee57965f30480201056343041a44433d73616d62612c44433d6578616d706c652c44433d636f6d0a01000a010002010002010001010087096f626a656374536964300b04096f626a656374536964"/>
+ </proto>
+ <proto name="ldap" showname="Lightweight Directory Access Protocol" size="106" pos="40">
+ <field name="ldap.sasl_buffer_length" showname="SASL Buffer Length: 102" size="4" pos="40" show="102" value="00000066"/>
+ <field name="" show="SASL Buffer" size="106" pos="40" value="00000066050404ff000c000c00000000327fba430a655f88ee6b2540ee57965f30480201056343041a44433d73616d62612c44433d6578616d706c652c44433d636f6d0a01000a010002010002010001010087096f626a656374536964300b04096f626a656374536964">
+ <proto name="gss-api" showname="GSS-API Generic Security Service Application Program Interface" size="28" pos="44">
+ <field name="spnego.krb5.blob" showname="krb5_blob: 050404ff000c000c00000000327fba430a655f88ee6b2540..." size="28" pos="44" show="05:04:04:ff:00:0c:00:0c:00:00:00:00:32:7f:ba:43:0a:65:5f:88:ee:6b:25:40:ee:57:96:5f" value="050404ff000c000c00000000327fba430a655f88ee6b2540ee57965f">
+ <field name="spnego.krb5.tok_id" showname="krb5_tok_id: KRB_TOKEN_CFX_WRAP (0x0405)" size="2" pos="44" show="0x00000405" value="0504"/>
+ <field name="spnego.krb5.cfx_flags" showname="krb5_cfx_flags: 0x04, AcceptorSubkey" size="1" pos="46" show="0x00000004" value="04">
+ <field name="spnego.krb5.acceptor_subkey" showname=".... .1.. = AcceptorSubkey: Set" size="1" pos="46" show="1" value="FFFFFFFF" unmaskedvalue="04"/>
+ <field name="spnego.krb5.sealed" showname=".... ..0. = Sealed: Not set" size="1" pos="46" show="0" value="0" unmaskedvalue="04"/>
+ <field name="spnego.krb5.send_by_acceptor" showname=".... ...0 = SendByAcceptor: Not set" size="1" pos="46" show="0" value="0" unmaskedvalue="04"/>
+ </field>
+ <field name="spnego.krb5.filler" showname="krb5_filler: ff" size="1" pos="47" show="ff" value="ff"/>
+ <field name="spnego.krb5.cfx_ec" showname="krb5_cfx_ec: 12" size="2" pos="48" show="12" value="000c"/>
+ <field name="spnego.krb5.cfx_rrc" showname="krb5_cfx_rrc: 12" size="2" pos="50" show="12" value="000c"/>
+ <field name="spnego.krb5.cfx_seq" showname="krb5_cfx_seq: 847231555" size="8" pos="52" show="847231555" value="00000000327fba43"/>
+ <field name="spnego.krb5.sgn_cksum" showname="krb5_sgn_cksum: 0a655f88ee6b2540ee57965f" size="12" pos="60" show="0a:65:5f:88:ee:6b:25:40:ee:57:96:5f" value="0a655f88ee6b2540ee57965f"/>
+ </field>
+ </proto>
+ <field name="" show="GSS-API payload (74 bytes)" size="74" pos="72" value="30480201056343041a44433d73616d62612c44433d6578616d706c652c44433d636f6d0a01000a010002010002010001010087096f626a656374536964300b04096f626a656374536964">
+ <field name="ldap.LDAPMessage_element" showname="LDAPMessage searchRequest(5) &quot;DC=samba,DC=example,DC=com&quot; baseObject" size="74" pos="72" show="" value="">
+ <field name="ldap.messageID" showname="messageID: 5" size="1" pos="76" show="5" value="05"/>
+ <field name="ldap.protocolOp" showname="protocolOp: searchRequest (3)" size="69" pos="77" show="3" value="6343041a44433d73616d62612c44433d6578616d706c652c44433d636f6d0a01000a010002010002010001010087096f626a656374536964300b04096f626a656374536964">
+ <field name="ldap.searchRequest_element" showname="searchRequest" size="67" pos="79" show="" value="">
+ <field name="ldap.baseObject" showname="baseObject: DC=samba,DC=example,DC=com" size="26" pos="81" show="DC=samba,DC=example,DC=com" value="44433d73616d62612c44433d6578616d706c652c44433d636f6d"/>
+ <field name="ldap.scope" showname="scope: baseObject (0)" size="1" pos="109" show="0" value="00"/>
+ <field name="ldap.derefAliases" showname="derefAliases: neverDerefAliases (0)" size="1" pos="112" show="0" value="00"/>
+ <field name="ldap.sizeLimit" showname="sizeLimit: 0" size="1" pos="115" show="0" value="00"/>
+ <field name="ldap.timeLimit" showname="timeLimit: 0" size="1" pos="118" show="0" value="00"/>
+ <field name="ldap.typesOnly" showname="typesOnly: False" size="1" pos="121" show="0" value="00"/>
+ <field name="" show="Filter: (objectSid=*)" size="11" pos="122" value="87096f626a656374536964">
+ <field name="ldap.filter" showname="filter: present (7)" size="9" pos="124" show="7" value="6f626a656374536964">
+ <field name="ldap.present" showname="present: objectSid" size="9" pos="124" show="objectSid" value="6f626a656374536964"/>
+ </field>
+ </field>
+ <field name="ldap.attributes" showname="attributes: 1 item" size="11" pos="135" show="1" value="04096f626a656374536964">
+ <field name="ldap.AttributeDescription" showname="AttributeDescription: objectSid" size="9" pos="137" show="objectSid" value="6f626a656374536964"/>
+ </field>
+ </field>
+ </field>
+ </field>
+ </field>
+ </field>
+ </proto>
+</packet>
+
+<packet>
+ <proto name="geninfo" pos="0" showname="General information" size="179">
+ <field name="num" pos="0" show="4576" showname="Number" value="11e0" size="179"/>
+ <field name="len" pos="0" show="179" showname="Frame Length" value="b3" size="179"/>
+ <field name="caplen" pos="0" show="179" showname="Captured Length" value="b3" size="179"/>
+ <field name="timestamp" pos="0" show="Feb 10, 2017 14:36:26.238734000 NZDT" showname="Captured Time" value="1486690586.238734000" size="179"/>
+ </proto>
+ <proto name="frame" showname="Frame 4576: 179 bytes on wire (1432 bits), 179 bytes captured (1432 bits)" size="179" pos="0">
+ <field name="frame.encap_type" showname="Encapsulation type: Raw IP (7)" size="0" pos="0" show="7"/>
+ <field name="frame.time" showname="Arrival Time: Feb 10, 2017 14:36:26.238734000 NZDT" size="0" pos="0" show="Feb 10, 2017 14:36:26.238734000 NZDT"/>
+ <field name="frame.offset_shift" showname="Time shift for this packet: 0.000000000 seconds" size="0" pos="0" show="0.000000000"/>
+ <field name="frame.time_epoch" showname="Epoch Time: 1486690586.238734000 seconds" size="0" pos="0" show="1486690586.238734000"/>
+ <field name="frame.time_delta" showname="Time delta from previous captured frame: 0.000072000 seconds" size="0" pos="0" show="0.000072000"/>
+ <field name="frame.time_delta_displayed" showname="Time delta from previous displayed frame: 0.000072000 seconds" size="0" pos="0" show="0.000072000"/>
+ <field name="frame.time_relative" showname="Time since reference or first frame: 9.708283000 seconds" size="0" pos="0" show="9.708283000"/>
+ <field name="frame.number" showname="Frame Number: 4576" size="0" pos="0" show="4576"/>
+ <field name="frame.len" showname="Frame Length: 179 bytes (1432 bits)" size="0" pos="0" show="179"/>
+ <field name="frame.cap_len" showname="Capture Length: 179 bytes (1432 bits)" size="0" pos="0" show="179"/>
+ <field name="frame.marked" showname="Frame is marked: False" size="0" pos="0" show="0"/>
+ <field name="frame.ignored" showname="Frame is ignored: False" size="0" pos="0" show="0"/>
+ <field name="frame.protocols" showname="Protocols in frame: raw:ip:tcp:ldap:gss-api:spnego-krb5" size="0" pos="0" show="raw:ip:tcp:ldap:gss-api:spnego-krb5"/>
+ </proto>
+ <proto name="raw" showname="Raw packet data" size="179" pos="0"/>
+ <proto name="ip" showname="Internet Protocol Version 4, Src: 127.0.0.11, Dst: 127.0.0.21" size="20" pos="0">
+ <field name="ip.version" showname="0100 .... = Version: 4" size="1" pos="0" show="4" value="4" unmaskedvalue="45"/>
+ <field name="ip.hdr_len" showname=".... 0101 = Header Length: 20 bytes" size="1" pos="0" show="5" value="5" unmaskedvalue="45"/>
+ <field name="ip.dsfield" showname="Differentiated Services Field: 0x00 (DSCP: CS0, ECN: Not-ECT)" size="1" pos="1" show="0x00000000" value="00">
+ <field name="ip.dsfield.dscp" showname="0000 00.. = Differentiated Services Codepoint: Default (0)" size="1" pos="1" show="0" value="0" unmaskedvalue="00"/>
+ <field name="ip.dsfield.ecn" showname=".... ..00 = Explicit Congestion Notification: Not ECN-Capable Transport (0)" size="1" pos="1" show="0" value="0" unmaskedvalue="00"/>
+ </field>
+ <field name="ip.len" showname="Total Length: 179" size="2" pos="2" show="179" value="00b3"/>
+ <field name="ip.id" showname="Identification: 0xffff (65535)" size="2" pos="4" show="0x0000ffff" value="ffff"/>
+ <field name="ip.flags" showname="Flags: 0x02 (Don&#x27;t Fragment)" size="1" pos="6" show="0x00000002" value="40">
+ <field name="ip.flags.rb" showname="0... .... = Reserved bit: Not set" size="1" pos="6" show="0" value="40"/>
+ <field name="ip.flags.df" showname=".1.. .... = Don&#x27;t fragment: Set" size="1" pos="6" show="1" value="40"/>
+ <field name="ip.flags.mf" showname="..0. .... = More fragments: Not set" size="1" pos="6" show="0" value="40"/>
+ </field>
+ <field name="ip.frag_offset" showname="Fragment offset: 0" size="2" pos="6" show="0" value="4000"/>
+ <field name="ip.ttl" showname="Time to live: 255" size="1" pos="8" show="255" value="ff"/>
+ <field name="ip.proto" showname="Protocol: TCP (6)" size="1" pos="9" show="6" value="06"/>
+ <field name="ip.checksum" showname="Header checksum: 0x0000 [validation disabled]" size="2" pos="10" show="0x00000000" value="0000">
+ <field name="ip.checksum_good" showname="Good: False" size="2" pos="10" show="0" value="0000"/>
+ <field name="ip.checksum_bad" showname="Bad: False" size="2" pos="10" show="0" value="0000"/>
+ </field>
+ <field name="ip.src" showname="Source: 127.0.0.11" size="4" pos="12" show="127.0.0.11" value="7f00000b"/>
+ <field name="ip.addr" showname="Source or Destination Address: 127.0.0.11" hide="yes" size="4" pos="12" show="127.0.0.11" value="7f00000b"/>
+ <field name="ip.src_host" showname="Source Host: 127.0.0.11" hide="yes" size="4" pos="12" show="127.0.0.11" value="7f00000b"/>
+ <field name="ip.host" showname="Source or Destination Host: 127.0.0.11" hide="yes" size="4" pos="12" show="127.0.0.11" value="7f00000b"/>
+ <field name="ip.dst" showname="Destination: 127.0.0.21" size="4" pos="16" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.addr" showname="Source or Destination Address: 127.0.0.21" hide="yes" size="4" pos="16" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.dst_host" showname="Destination Host: 127.0.0.21" hide="yes" size="4" pos="16" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.host" showname="Source or Destination Host: 127.0.0.21" hide="yes" size="4" pos="16" show="127.0.0.21" value="7f000015"/>
+ <field name="" show="Source GeoIP: Unknown" size="4" pos="12" value="7f00000b"/>
+ <field name="" show="Destination GeoIP: Unknown" size="4" pos="16" value="7f000015"/>
+ </proto>
+ <proto name="tcp" showname="Transmission Control Protocol, Src Port: 14849 (14849), Dst Port: 389 (389), Seq: 6557, Ack: 992, Len: 139" size="20" pos="20">
+ <field name="tcp.srcport" showname="Source Port: 14849" size="2" pos="20" show="14849" value="3a01"/>
+ <field name="tcp.dstport" showname="Destination Port: 389" size="2" pos="22" show="389" value="0185"/>
+ <field name="tcp.port" showname="Source or Destination Port: 14849" hide="yes" size="2" pos="20" show="14849" value="3a01"/>
+ <field name="tcp.port" showname="Source or Destination Port: 389" hide="yes" size="2" pos="22" show="389" value="0185"/>
+ <field name="tcp.stream" showname="Stream index: 92" size="0" pos="20" show="92"/>
+ <field name="tcp.len" showname="TCP Segment Len: 139" size="1" pos="32" show="139" value="50"/>
+ <field name="tcp.seq" showname="Sequence number: 6557 (relative sequence number)" size="4" pos="24" show="6557" value="0000199d"/>
+ <field name="tcp.nxtseq" showname="Next sequence number: 6696 (relative sequence number)" size="0" pos="20" show="6696"/>
+ <field name="tcp.ack" showname="Acknowledgment number: 992 (relative ack number)" size="4" pos="28" show="992" value="000003e0"/>
+ <field name="tcp.hdr_len" showname="Header Length: 20 bytes" size="1" pos="32" show="20" value="50"/>
+ <field name="tcp.flags" showname="Flags: 0x018 (PSH, ACK)" size="2" pos="32" show="0x00000018" value="18" unmaskedvalue="5018">
+ <field name="tcp.flags.res" showname="000. .... .... = Reserved: Not set" size="1" pos="32" show="0" value="0" unmaskedvalue="50"/>
+ <field name="tcp.flags.ns" showname="...0 .... .... = Nonce: Not set" size="1" pos="32" show="0" value="0" unmaskedvalue="50"/>
+ <field name="tcp.flags.cwr" showname=".... 0... .... = Congestion Window Reduced (CWR): Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.ecn" showname=".... .0.. .... = ECN-Echo: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.urg" showname=".... ..0. .... = Urgent: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.ack" showname=".... ...1 .... = Acknowledgment: Set" size="1" pos="33" show="1" value="FFFFFFFF" unmaskedvalue="18"/>
+ <field name="tcp.flags.push" showname=".... .... 1... = Push: Set" size="1" pos="33" show="1" value="FFFFFFFF" unmaskedvalue="18"/>
+ <field name="tcp.flags.reset" showname=".... .... .0.. = Reset: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.syn" showname=".... .... ..0. = Syn: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.fin" showname=".... .... ...0 = Fin: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.str" showname="TCP Flags: *******AP***" size="2" pos="32" show="*******AP***" value="5018"/>
+ </field>
+ <field name="tcp.window_size_value" showname="Window size value: 32767" size="2" pos="34" show="32767" value="7fff"/>
+ <field name="tcp.window_size" showname="Calculated window size: 32767" size="2" pos="34" show="32767" value="7fff"/>
+ <field name="tcp.window_size_scalefactor" showname="Window size scaling factor: -2 (no window scaling used)" size="2" pos="34" show="-2" value="7fff"/>
+ <field name="tcp.checksum" showname="Checksum: 0x0000 [validation disabled]" size="2" pos="36" show="0x00000000" value="0000">
+ <field name="tcp.checksum_good" showname="Good Checksum: False" size="2" pos="36" show="0" value="0000"/>
+ <field name="tcp.checksum_bad" showname="Bad Checksum: False" size="2" pos="36" show="0" value="0000"/>
+ </field>
+ <field name="tcp.urgent_pointer" showname="Urgent pointer: 0" size="2" pos="38" show="0" value="0000"/>
+ <field name="tcp.analysis" showname="SEQ/ACK analysis" size="0" pos="20" show="" value="">
+ <field name="tcp.analysis.acks_frame" showname="This is an ACK to the segment in frame: 4573" size="0" pos="20" show="4573"/>
+ <field name="tcp.analysis.ack_rtt" showname="The RTT to ACK the segment was: 0.000145000 seconds" size="0" pos="20" show="0.000145000"/>
+ <field name="tcp.analysis.initial_rtt" showname="iRTT: 0.000019000 seconds" size="0" pos="20" show="0.000019000"/>
+ <field name="tcp.analysis.bytes_in_flight" showname="Bytes in flight: 139" size="0" pos="20" show="139"/>
+ </field>
+ <field name="tcp.pdu.size" showname="PDU Size: 139" size="139" pos="40" show="139" value="00000087050404ff000c000c000000001b8a1304757134702161c76a250240643069020106636404443c574b475549443d32464241433138373041444531314432393743343030433034464438443543442c44433d73616d62612c44433d6578616d706c652c44433d636f6d3e0a01020a0100020100020100010100870b6f626a656374436c6173733000"/>
+ </proto>
+ <proto name="ldap" showname="Lightweight Directory Access Protocol" size="139" pos="40">
+ <field name="ldap.sasl_buffer_length" showname="SASL Buffer Length: 135" size="4" pos="40" show="135" value="00000087"/>
+ <field name="" show="SASL Buffer" size="139" pos="40" value="00000087050404ff000c000c000000001b8a1304757134702161c76a250240643069020106636404443c574b475549443d32464241433138373041444531314432393743343030433034464438443543442c44433d73616d62612c44433d6578616d706c652c44433d636f6d3e0a01020a0100020100020100010100870b6f626a656374436c6173733000">
+ <proto name="gss-api" showname="GSS-API Generic Security Service Application Program Interface" size="28" pos="44">
+ <field name="spnego.krb5.blob" showname="krb5_blob: 050404ff000c000c000000001b8a1304757134702161c76a..." size="28" pos="44" show="05:04:04:ff:00:0c:00:0c:00:00:00:00:1b:8a:13:04:75:71:34:70:21:61:c7:6a:25:02:40:64" value="050404ff000c000c000000001b8a1304757134702161c76a25024064">
+ <field name="spnego.krb5.tok_id" showname="krb5_tok_id: KRB_TOKEN_CFX_WRAP (0x0405)" size="2" pos="44" show="0x00000405" value="0504"/>
+ <field name="spnego.krb5.cfx_flags" showname="krb5_cfx_flags: 0x04, AcceptorSubkey" size="1" pos="46" show="0x00000004" value="04">
+ <field name="spnego.krb5.acceptor_subkey" showname=".... .1.. = AcceptorSubkey: Set" size="1" pos="46" show="1" value="FFFFFFFF" unmaskedvalue="04"/>
+ <field name="spnego.krb5.sealed" showname=".... ..0. = Sealed: Not set" size="1" pos="46" show="0" value="0" unmaskedvalue="04"/>
+ <field name="spnego.krb5.send_by_acceptor" showname=".... ...0 = SendByAcceptor: Not set" size="1" pos="46" show="0" value="0" unmaskedvalue="04"/>
+ </field>
+ <field name="spnego.krb5.filler" showname="krb5_filler: ff" size="1" pos="47" show="ff" value="ff"/>
+ <field name="spnego.krb5.cfx_ec" showname="krb5_cfx_ec: 12" size="2" pos="48" show="12" value="000c"/>
+ <field name="spnego.krb5.cfx_rrc" showname="krb5_cfx_rrc: 12" size="2" pos="50" show="12" value="000c"/>
+ <field name="spnego.krb5.cfx_seq" showname="krb5_cfx_seq: 462033668" size="8" pos="52" show="462033668" value="000000001b8a1304"/>
+ <field name="spnego.krb5.sgn_cksum" showname="krb5_sgn_cksum: 757134702161c76a25024064" size="12" pos="60" show="75:71:34:70:21:61:c7:6a:25:02:40:64" value="757134702161c76a25024064"/>
+ </field>
+ </proto>
+ <field name="" show="GSS-API payload (107 bytes)" size="107" pos="72" value="3069020106636404443c574b475549443d32464241433138373041444531314432393743343030433034464438443543442c44433d73616d62612c44433d6578616d706c652c44433d636f6d3e0a01020a0100020100020100010100870b6f626a656374436c6173733000">
+ <field name="ldap.LDAPMessage_element" showname="LDAPMessage searchRequest(6) &quot;&lt;WKGUID=2FBAC1870ADE11D297C400C04FD8D5CD,DC=samba,DC=example,DC=com&gt;&quot; wholeSubtree" size="107" pos="72" show="" value="">
+ <field name="ldap.messageID" showname="messageID: 6" size="1" pos="76" show="6" value="06"/>
+ <field name="ldap.protocolOp" showname="protocolOp: searchRequest (3)" size="102" pos="77" show="3" value="636404443c574b475549443d32464241433138373041444531314432393743343030433034464438443543442c44433d73616d62612c44433d6578616d706c652c44433d636f6d3e0a01020a0100020100020100010100870b6f626a656374436c6173733000">
+ <field name="ldap.searchRequest_element" showname="searchRequest" size="100" pos="79" show="" value="">
+ <field name="ldap.baseObject" showname="baseObject: &lt;WKGUID=2FBAC1870ADE11D297C400C04FD8D5CD,DC=samba,DC=example,DC=com&gt;" size="68" pos="81" show="&lt;WKGUID=2FBAC1870ADE11D297C400C04FD8D5CD,DC=samba,DC=example,DC=com&gt;" value="3c574b475549443d32464241433138373041444531314432393743343030433034464438443543442c44433d73616d62612c44433d6578616d706c652c44433d636f6d3e"/>
+ <field name="ldap.scope" showname="scope: wholeSubtree (2)" size="1" pos="151" show="2" value="02"/>
+ <field name="ldap.derefAliases" showname="derefAliases: neverDerefAliases (0)" size="1" pos="154" show="0" value="00"/>
+ <field name="ldap.sizeLimit" showname="sizeLimit: 0" size="1" pos="157" show="0" value="00"/>
+ <field name="ldap.timeLimit" showname="timeLimit: 0" size="1" pos="160" show="0" value="00"/>
+ <field name="ldap.typesOnly" showname="typesOnly: False" size="1" pos="163" show="0" value="00"/>
+ <field name="" show="Filter: (objectClass=*)" size="13" pos="164" value="870b6f626a656374436c617373">
+ <field name="ldap.filter" showname="filter: present (7)" size="11" pos="166" show="7" value="6f626a656374436c617373">
+ <field name="ldap.present" showname="present: objectClass" size="11" pos="166" show="objectClass" value="6f626a656374436c617373"/>
+ </field>
+ </field>
+ <field name="ldap.attributes" showname="attributes: 0 items" size="0" pos="179" show="0"/>
+ </field>
+ </field>
+ </field>
+ </field>
+ </field>
+ </proto>
+</packet>
+
+<packet>
+ <proto name="geninfo" pos="0" showname="General information" size="167">
+ <field name="num" pos="0" show="462" showname="Number" value="1ce" size="167"/>
+ <field name="len" pos="0" show="167" showname="Frame Length" value="a7" size="167"/>
+ <field name="caplen" pos="0" show="167" showname="Captured Length" value="a7" size="167"/>
+ <field name="timestamp" pos="0" show="Feb 13, 2017 10:17:16.150107000 NZDT" showname="Captured Time" value="1486934236.150107000" size="167"/>
+ </proto>
+ <proto name="frame" showname="Frame 462: 167 bytes on wire (1336 bits), 167 bytes captured (1336 bits)" size="167" pos="0">
+ <field name="frame.encap_type" showname="Encapsulation type: Raw IP (7)" size="0" pos="0" show="7"/>
+ <field name="frame.time" showname="Arrival Time: Feb 13, 2017 10:17:16.150107000 NZDT" size="0" pos="0" show="Feb 13, 2017 10:17:16.150107000 NZDT"/>
+ <field name="frame.offset_shift" showname="Time shift for this packet: 0.000000000 seconds" size="0" pos="0" show="0.000000000"/>
+ <field name="frame.time_epoch" showname="Epoch Time: 1486934236.150107000 seconds" size="0" pos="0" show="1486934236.150107000"/>
+ <field name="frame.time_delta" showname="Time delta from previous captured frame: 0.000165000 seconds" size="0" pos="0" show="0.000165000"/>
+ <field name="frame.time_delta_displayed" showname="Time delta from previous displayed frame: 0.000165000 seconds" size="0" pos="0" show="0.000165000"/>
+ <field name="frame.time_relative" showname="Time since reference or first frame: 465.527666000 seconds" size="0" pos="0" show="465.527666000"/>
+ <field name="frame.number" showname="Frame Number: 462" size="0" pos="0" show="462"/>
+ <field name="frame.len" showname="Frame Length: 167 bytes (1336 bits)" size="0" pos="0" show="167"/>
+ <field name="frame.cap_len" showname="Capture Length: 167 bytes (1336 bits)" size="0" pos="0" show="167"/>
+ <field name="frame.marked" showname="Frame is marked: False" size="0" pos="0" show="0"/>
+ <field name="frame.ignored" showname="Frame is ignored: False" size="0" pos="0" show="0"/>
+ <field name="frame.protocols" showname="Protocols in frame: raw:ipv6:tcp:nbss:smb" size="0" pos="0" show="raw:ipv6:tcp:nbss:smb"/>
+ </proto>
+ <proto name="raw" showname="Raw packet data" size="167" pos="0"/>
+ <proto name="ipv6" showname="Internet Protocol Version 6, Src: fd00::5357:5f03, Dst: fd00::5357:5f0b" size="40" pos="0">
+ <field name="ipv6.version" showname="0110 .... = Version: 6" size="1" pos="0" show="6" value="6" unmaskedvalue="60"/>
+ <field name="ip.version" showname="0110 .... = Version: 6 [This field makes the filter match on &quot;ip.version == 6&quot; possible]" hide="yes" size="1" pos="0" show="6" value="6" unmaskedvalue="60"/>
+ <field name="ipv6.tclass" showname=".... 0000 0000 .... .... .... .... .... = Traffic class: 0x00 (DSCP: CS0, ECN: Not-ECT)" size="4" pos="0" show="0x00000000" value="0" unmaskedvalue="60000000">
+ <field name="ipv6.tclass.dscp" showname=".... 0000 00.. .... .... .... .... .... = Differentiated Services Codepoint: Default (0)" size="4" pos="0" show="0" value="0" unmaskedvalue="60000000"/>
+ <field name="ipv6.tclass.ecn" showname=".... .... ..00 .... .... .... .... .... = Explicit Congestion Notification: Not ECN-Capable Transport (0)" size="4" pos="0" show="0" value="0" unmaskedvalue="60000000"/>
+ </field>
+ <field name="ipv6.flow" showname=".... .... .... 0000 0000 0000 0000 0000 = Flowlabel: 0x00000000" size="4" pos="0" show="0x00000000" value="0" unmaskedvalue="60000000"/>
+ <field name="ipv6.plen" showname="Payload length: 167" size="2" pos="4" show="167" value="00a7">
+ <field name="_ws.expert" showname="Expert Info (Warn/Protocol): IPv6 payload length exceeds framing length (127 bytes)" size="0" pos="4">
+ <field name="ipv6.bogus_payload_length" showname="IPv6 payload length exceeds framing length (127 bytes)" size="0" pos="0" show="" value=""/>
+ <field name="_ws.expert.message" showname="Message: IPv6 payload length exceeds framing length (127 bytes)" hide="yes" size="0" pos="0" show="IPv6 payload length exceeds framing length (127 bytes)"/>
+ <field name="_ws.expert.severity" showname="Severity level: Warn" size="0" pos="0" show="0x00600000"/>
+ <field name="_ws.expert.group" showname="Group: Protocol" size="0" pos="0" show="0x09000000"/>
+ </field>
+ </field>
+ <field name="ipv6.nxt" showname="Next header: TCP (6)" size="1" pos="6" show="6" value="06"/>
+ <field name="ipv6.hlim" showname="Hop limit: 0" size="1" pos="7" show="0" value="00"/>
+ <field name="ipv6.src" showname="Source: fd00::5357:5f03" size="16" pos="8" show="fd00::5357:5f03" value="fd000000000000000000000053575f03"/>
+ <field name="ipv6.addr" showname="Source or Destination Address: fd00::5357:5f03" hide="yes" size="16" pos="8" show="fd00::5357:5f03" value="fd000000000000000000000053575f03"/>
+ <field name="ipv6.src_host" showname="Source Host: fd00::5357:5f03" hide="yes" size="16" pos="8" show="fd00::5357:5f03" value="fd000000000000000000000053575f03"/>
+ <field name="ipv6.host" showname="Source or Destination Host: fd00::5357:5f03" hide="yes" size="16" pos="8" show="fd00::5357:5f03" value="fd000000000000000000000053575f03"/>
+ <field name="ipv6.dst" showname="Destination: fd00::5357:5f0b" size="16" pos="24" show="fd00::5357:5f0b" value="fd000000000000000000000053575f0b"/>
+ <field name="ipv6.addr" showname="Source or Destination Address: fd00::5357:5f0b" hide="yes" size="16" pos="24" show="fd00::5357:5f0b" value="fd000000000000000000000053575f0b"/>
+ <field name="ipv6.dst_host" showname="Destination Host: fd00::5357:5f0b" hide="yes" size="16" pos="24" show="fd00::5357:5f0b" value="fd000000000000000000000053575f0b"/>
+ <field name="ipv6.host" showname="Source or Destination Host: fd00::5357:5f0b" hide="yes" size="16" pos="24" show="fd00::5357:5f0b" value="fd000000000000000000000053575f0b"/>
+ <field name="" show="Source GeoIP: Unknown" size="16" pos="8" value="fd000000000000000000000053575f03"/>
+ <field name="" show="Destination GeoIP: Unknown" size="16" pos="24" value="fd000000000000000000000053575f0b"/>
+ </proto>
+ <proto name="tcp" showname="Transmission Control Protocol, Src Port: 139 (139), Dst Port: 31861 (31861), Seq: 822, Ack: 847, Len: 107" size="20" pos="40">
+ <field name="tcp.srcport" showname="Source Port: 139" size="2" pos="40" show="139" value="008b"/>
+ <field name="tcp.dstport" showname="Destination Port: 31861" size="2" pos="42" show="31861" value="7c75"/>
+ <field name="tcp.port" showname="Source or Destination Port: 139" hide="yes" size="2" pos="40" show="139" value="008b"/>
+ <field name="tcp.port" showname="Source or Destination Port: 31861" hide="yes" size="2" pos="42" show="31861" value="7c75"/>
+ <field name="tcp.stream" showname="Stream index: 6" size="0" pos="40" show="6"/>
+ <field name="tcp.len" showname="TCP Segment Len: 107" size="1" pos="52" show="107" value="50"/>
+ <field name="tcp.seq" showname="Sequence number: 822 (relative sequence number)" size="4" pos="44" show="822" value="00000336"/>
+ <field name="tcp.nxtseq" showname="Next sequence number: 929 (relative sequence number)" size="0" pos="40" show="929"/>
+ <field name="tcp.ack" showname="Acknowledgment number: 847 (relative ack number)" size="4" pos="48" show="847" value="0000034f"/>
+ <field name="tcp.hdr_len" showname="Header Length: 20 bytes" size="1" pos="52" show="20" value="50"/>
+ <field name="tcp.flags" showname="Flags: 0x018 (PSH, ACK)" size="2" pos="52" show="0x00000018" value="18" unmaskedvalue="5018">
+ <field name="tcp.flags.res" showname="000. .... .... = Reserved: Not set" size="1" pos="52" show="0" value="0" unmaskedvalue="50"/>
+ <field name="tcp.flags.ns" showname="...0 .... .... = Nonce: Not set" size="1" pos="52" show="0" value="0" unmaskedvalue="50"/>
+ <field name="tcp.flags.cwr" showname=".... 0... .... = Congestion Window Reduced (CWR): Not set" size="1" pos="53" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.ecn" showname=".... .0.. .... = ECN-Echo: Not set" size="1" pos="53" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.urg" showname=".... ..0. .... = Urgent: Not set" size="1" pos="53" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.ack" showname=".... ...1 .... = Acknowledgment: Set" size="1" pos="53" show="1" value="FFFFFFFF" unmaskedvalue="18"/>
+ <field name="tcp.flags.push" showname=".... .... 1... = Push: Set" size="1" pos="53" show="1" value="FFFFFFFF" unmaskedvalue="18"/>
+ <field name="tcp.flags.reset" showname=".... .... .0.. = Reset: Not set" size="1" pos="53" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.syn" showname=".... .... ..0. = Syn: Not set" size="1" pos="53" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.fin" showname=".... .... ...0 = Fin: Not set" size="1" pos="53" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.str" showname="TCP Flags: *******AP***" size="2" pos="52" show="*******AP***" value="5018"/>
+ </field>
+ <field name="tcp.window_size_value" showname="Window size value: 32767" size="2" pos="54" show="32767" value="7fff"/>
+ <field name="tcp.window_size" showname="Calculated window size: 32767" size="2" pos="54" show="32767" value="7fff"/>
+ <field name="tcp.window_size_scalefactor" showname="Window size scaling factor: -2 (no window scaling used)" size="2" pos="54" show="-2" value="7fff"/>
+ <field name="tcp.checksum" showname="Checksum: 0x0000 [validation disabled]" size="2" pos="56" show="0x00000000" value="0000">
+ <field name="tcp.checksum_good" showname="Good Checksum: False" size="2" pos="56" show="0" value="0000"/>
+ <field name="tcp.checksum_bad" showname="Bad Checksum: False" size="2" pos="56" show="0" value="0000"/>
+ </field>
+ <field name="tcp.urgent_pointer" showname="Urgent pointer: 0" size="2" pos="58" show="0" value="0000"/>
+ <field name="tcp.analysis" showname="SEQ/ACK analysis" size="0" pos="40" show="" value="">
+ <field name="tcp.analysis.acks_frame" showname="This is an ACK to the segment in frame: 459" size="0" pos="40" show="459"/>
+ <field name="tcp.analysis.ack_rtt" showname="The RTT to ACK the segment was: 0.000204000 seconds" size="0" pos="40" show="0.000204000"/>
+ <field name="tcp.analysis.initial_rtt" showname="iRTT: 0.000024000 seconds" size="0" pos="40" show="0.000024000"/>
+ <field name="tcp.analysis.bytes_in_flight" showname="Bytes in flight: 107" size="0" pos="40" show="107"/>
+ </field>
+ </proto>
+ <proto name="nbss" showname="NetBIOS Session Service" size="107" pos="60">
+ <field name="nbss.type" showname="Message Type: Session message (0x00)" size="1" pos="60" show="0x00000000" value="00"/>
+ <field name="nbss.length" showname="Length: 103" size="3" pos="61" show="103" value="000067"/>
+ </proto>
+ <proto name="smb" showname="SMB (Server Message Block Protocol)" size="103" pos="64">
+ <field name="" show="SMB Header" size="32" pos="64" value="ff534d42a2000000008803c8000000000000000000000000ac6a6455deec0400">
+ <field name="smb.server_component" showname="Server Component: SMB" size="4" pos="64" show="0x424d53ff" value="ff534d42"/>
+ <field name="smb.response_to" showname="Response to: 459" size="0" pos="64" show="459"/>
+ <field name="smb.time" showname="Time from request: 0.000204000 seconds" size="0" pos="64" show="0.000204000"/>
+ <field name="smb.cmd" showname="SMB Command: NT Create AndX (0xa2)" size="1" pos="68" show="162" value="a2"/>
+ <field name="smb.nt_status" showname="NT Status: STATUS_SUCCESS (0x00000000)" size="4" pos="69" show="0" value="00000000"/>
+ <field name="smb.flags" showname="Flags: 0x88, Request/Response, Case Sensitivity" size="1" pos="73" show="0x00000088" value="88">
+ <field name="smb.flags.response" showname="1... .... = Request/Response: Message is a response to the client/redirector" size="1" pos="73" show="1" value="FFFFFFFF" unmaskedvalue="88"/>
+ <field name="smb.flags.notify" showname=".0.. .... = Notify: Notify client only on open" size="1" pos="73" show="0" value="0" unmaskedvalue="88"/>
+ <field name="smb.flags.oplock" showname="..0. .... = Oplocks: OpLock not requested/granted" size="1" pos="73" show="0" value="0" unmaskedvalue="88"/>
+ <field name="smb.flags.canon" showname="...0 .... = Canonicalized Pathnames: Pathnames are not canonicalized" size="1" pos="73" show="0" value="0" unmaskedvalue="88"/>
+ <field name="smb.flags.caseless" showname=".... 1... = Case Sensitivity: Path names are caseless" size="1" pos="73" show="1" value="FFFFFFFF" unmaskedvalue="88"/>
+ <field name="smb.flags.receive_buffer" showname=".... ..0. = Receive Buffer Posted: Receive buffer has not been posted" size="1" pos="73" show="0" value="0" unmaskedvalue="88"/>
+ <field name="smb.flags.lock" showname=".... ...0 = Lock and Read: Lock&amp;Read, Write&amp;Unlock are not supported" size="1" pos="73" show="0" value="0" unmaskedvalue="88"/>
+ </field>
+ <field name="smb.flags2" showname="Flags2: 0xc803, Unicode Strings, Error Code Type, Extended Security Negotiation, Extended Attributes, Long Names Allowed" size="2" pos="74" show="0x0000c803" value="03c8">
+ <field name="smb.flags2.string" showname="1... .... .... .... = Unicode Strings: Strings are Unicode" size="2" pos="74" show="1" value="FFFFFFFF" unmaskedvalue="03c8"/>
+ <field name="smb.flags2.nt_error" showname=".1.. .... .... .... = Error Code Type: Error codes are NT error codes" size="2" pos="74" show="1" value="FFFFFFFF" unmaskedvalue="03c8"/>
+ <field name="smb.flags2.roe" showname="..0. .... .... .... = Execute-only Reads: Don&#x27;t permit reads if execute-only" size="2" pos="74" show="0" value="0" unmaskedvalue="03c8"/>
+ <field name="smb.flags2.dfs" showname="...0 .... .... .... = Dfs: Don&#x27;t resolve pathnames with Dfs" size="2" pos="74" show="0" value="0" unmaskedvalue="03c8"/>
+ <field name="smb.flags2.esn" showname=".... 1... .... .... = Extended Security Negotiation: Extended security negotiation is supported" size="2" pos="74" show="1" value="FFFFFFFF" unmaskedvalue="03c8"/>
+ <field name="smb.flags2.reparse_path" showname=".... .0.. .... .... = Reparse Path: The request does not use a @GMT reparse path" size="2" pos="74" show="0" value="0" unmaskedvalue="03c8"/>
+ <field name="smb.flags2.long_names_used" showname=".... .... .0.. .... = Long Names Used: Path names in request are not long file names" size="2" pos="74" show="0" value="0" unmaskedvalue="03c8"/>
+ <field name="smb.flags2.sec_sig_required" showname=".... .... ...0 .... = Security Signatures Required: Security signatures are not required" size="2" pos="74" show="0" value="0" unmaskedvalue="03c8"/>
+ <field name="smb.flags2.compressed" showname=".... .... .... 0... = Compressed: Compression is not requested" size="2" pos="74" show="0" value="0" unmaskedvalue="03c8"/>
+ <field name="smb.flags2.sec_sig" showname=".... .... .... .0.. = Security Signatures: Security signatures are not supported" size="2" pos="74" show="0" value="0" unmaskedvalue="03c8"/>
+ <field name="smb.flags2.ea" showname=".... .... .... ..1. = Extended Attributes: Extended attributes are supported" size="2" pos="74" show="1" value="FFFFFFFF" unmaskedvalue="03c8"/>
+ <field name="smb.flags2.long_names_allowed" showname=".... .... .... ...1 = Long Names Allowed: Long file names are allowed in the response" size="2" pos="74" show="1" value="FFFFFFFF" unmaskedvalue="03c8"/>
+ </field>
+ <field name="smb.pid.high" showname="Process ID High: 0" size="2" pos="76" show="0" value="0000"/>
+ <field name="smb.signature" showname="Signature: 0000000000000000" size="8" pos="78" show="00:00:00:00:00:00:00:00" value="0000000000000000"/>
+ <field name="smb.reserved" showname="Reserved: 0000" size="2" pos="86" show="00:00" value="0000"/>
+ <field name="smb.tid" showname="Tree ID: 27308 (\\LOCALNT4DC2\IPC$)" size="2" pos="88" show="27308" value="ac6a">
+ <field name="smb.path" showname="Path: \\LOCALNT4DC2\IPC$" size="0" pos="152" show="\\LOCALNT4DC2\IPC$"/>
+ <field name="smb.fid.mapped_in" showname="Mapped in: 456" size="0" pos="152" show="456"/>
+ </field>
+ <field name="smb.pid" showname="Process ID: 21860" size="2" pos="90" show="21860" value="6455"/>
+ <field name="smb.uid" showname="User ID: 60638" size="2" pos="92" show="60638" value="deec"/>
+ <field name="smb.mid" showname="Multiplex ID: 4" size="2" pos="94" show="4" value="0400"/>
+ </field>
+ <field name="" show="NT Create AndX Response (0xa2)" size="71" pos="96" value="22ff00000000792b01000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000200ff05000000">
+ <field name="smb.wct" showname="Word Count (WCT): 34" size="1" pos="96" show="34" value="22"/>
+ <field name="smb.cmd" showname="AndXCommand: No further commands (0xff)" size="1" pos="97" show="255" value="ff"/>
+ <field name="smb.reserved" showname="Reserved: 00" size="1" pos="98" show="00" value="00"/>
+ <field name="smb.andxoffset" showname="AndXOffset: 0" size="2" pos="99" show="0" value="0000"/>
+ <field name="smb.oplock.level" showname="Oplock level: No oplock granted (0)" size="1" pos="101" show="0" value="00"/>
+ <field name="smb.fid" showname="FID: 0x2b79 (\srvsvc)" size="2" pos="102" show="0x00002b79" value="792b">
+ <field name="smb.fid.opened_in" showname="Opened in: 462" size="0" pos="166" show="462"/>
+ <field name="smb.file" showname="File Name: \srvsvc" size="0" pos="166" show="\srvsvc"/>
+ <field name="smb.create_flags" showname="Create Flags: 0x00000000" size="4" pos="166" show="0x00000000" value="ff534d42">
+ <field name="smb.nt.create.oplock" showname=".... .... .... .... .... .... .... ..0. = Exclusive Oplock: Does NOT request oplock" size="4" pos="64" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.nt.create.batch_oplock" showname=".... .... .... .... .... .... .... .0.. = Batch Oplock: Does NOT request batch oplock" size="4" pos="64" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.nt.create.dir" showname=".... .... .... .... .... .... .... 0... = Create Directory: Target of open can be a file" size="4" pos="64" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.nt.create.ext" showname=".... .... .... .... .... .... ...0 .... = Extended Response: Extended responses NOT required" size="4" pos="64" show="0" value="0" unmaskedvalue="ff534d42"/>
+ </field>
+ <field name="smb.access_mask" showname="Access Mask: 0x0002019f" size="4" pos="166" show="0x0002019f" value="ff534d42">
+ <field name="smb.access.read" showname=".... .... .... .... .... .... .... ...1 = Read: READ access" size="4" pos="64" show="1" value="FFFFFFFF" unmaskedvalue="ff534d42"/>
+ <field name="smb.access.write" showname=".... .... .... .... .... .... .... ..1. = Write: WRITE access" size="4" pos="64" show="1" value="FFFFFFFF" unmaskedvalue="ff534d42"/>
+ <field name="smb.access.append" showname=".... .... .... .... .... .... .... .1.. = Append: APPEND access" size="4" pos="64" show="1" value="FFFFFFFF" unmaskedvalue="ff534d42"/>
+ <field name="smb.access.read_ea" showname=".... .... .... .... .... .... .... 1... = Read EA: READ EXTENDED ATTRIBUTES access" size="4" pos="64" show="1" value="FFFFFFFF" unmaskedvalue="ff534d42"/>
+ <field name="smb.access.write_ea" showname=".... .... .... .... .... .... ...1 .... = Write EA: WRITE EXTENDED ATTRIBUTES access" size="4" pos="64" show="1" value="FFFFFFFF" unmaskedvalue="ff534d42"/>
+ <field name="smb.access.execute" showname=".... .... .... .... .... .... ..0. .... = Execute: NO execute access" size="4" pos="64" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.access.delete_child" showname=".... .... .... .... .... .... .0.. .... = Delete Child: NO delete child access" size="4" pos="64" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.access.read_attributes" showname=".... .... .... .... .... .... 1... .... = Read Attributes: READ ATTRIBUTES access" size="4" pos="64" show="1" value="FFFFFFFF" unmaskedvalue="ff534d42"/>
+ <field name="smb.access.write_attributes" showname=".... .... .... .... .... ...1 .... .... = Write Attributes: WRITE ATTRIBUTES access" size="4" pos="64" show="1" value="FFFFFFFF" unmaskedvalue="ff534d42"/>
+ <field name="smb.access.delete" showname=".... .... .... ...0 .... .... .... .... = Delete: NO delete access" size="4" pos="64" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.access.read_control" showname=".... .... .... ..1. .... .... .... .... = Read Control: READ ACCESS to owner, group and ACL of the SID" size="4" pos="64" show="1" value="FFFFFFFF" unmaskedvalue="ff534d42"/>
+ <field name="smb.access.write_dac" showname=".... .... .... .0.. .... .... .... .... = Write DAC: Owner may NOT write to the DAC" size="4" pos="64" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.access.write_owner" showname=".... .... .... 0... .... .... .... .... = Write Owner: Can NOT write owner (take ownership)" size="4" pos="64" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.access.synchronize" showname=".... .... ...0 .... .... .... .... .... = Synchronize: Can NOT wait on handle to synchronize on completion of I/O" size="4" pos="64" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.access.system_security" showname=".... ...0 .... .... .... .... .... .... = System Security: System security is NOT set" size="4" pos="64" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.access.maximum_allowed" showname=".... ..0. .... .... .... .... .... .... = Maximum Allowed: Maximum allowed is NOT set" size="4" pos="64" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.access.generic_all" showname="...0 .... .... .... .... .... .... .... = Generic All: Generic all is NOT set" size="4" pos="64" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.access.generic_execute" showname="..0. .... .... .... .... .... .... .... = Generic Execute: Generic execute is NOT set" size="4" pos="64" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.access.generic_write" showname=".0.. .... .... .... .... .... .... .... = Generic Write: Generic write is NOT set" size="4" pos="64" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.access.generic_read" showname="0... .... .... .... .... .... .... .... = Generic Read: Generic read is NOT set" size="4" pos="64" show="0" value="0" unmaskedvalue="ff534d42"/>
+ </field>
+ <field name="smb.file_attribute" showname="File Attributes: 0x00000000" size="4" pos="166" show="0x00000000" value="ff534d42">
+ <field name="smb.file_attribute.read_only" showname=".... .... .... .... .... .... .... ...0 = Read Only: NOT read only" size="4" pos="64" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.file_attribute.hidden" showname=".... .... .... .... .... .... .... ..0. = Hidden: NOT hidden" size="4" pos="64" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.file_attribute.system" showname=".... .... .... .... .... .... .... .0.. = System: NOT a system file/dir" size="4" pos="64" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.file_attribute.volume" showname=".... .... .... .... .... .... .... 0... = Volume ID: NOT a volume ID" size="4" pos="64" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.file_attribute.directory" showname=".... .... .... .... .... .... ...0 .... = Directory: NOT a directory" size="4" pos="64" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.file_attribute.archive" showname=".... .... .... .... .... .... ..0. .... = Archive: Has NOT been modified since last archive" size="4" pos="64" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.file_attribute.device" showname=".... .... .... .... .... .... .0.. .... = Device: NOT a device" size="4" pos="64" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.file_attribute.normal" showname=".... .... .... .... .... .... 0... .... = Normal: Has some attribute set" size="4" pos="64" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.file_attribute.temporary" showname=".... .... .... .... .... ...0 .... .... = Temporary: NOT a temporary file" size="4" pos="64" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.file_attribute.sparse" showname=".... .... .... .... .... ..0. .... .... = Sparse: NOT a sparse file" size="4" pos="64" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.file_attribute.reparse" showname=".... .... .... .... .... .0.. .... .... = Reparse Point: Does NOT have an associated reparse point" size="4" pos="64" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.file_attribute.compressed" showname=".... .... .... .... .... 0... .... .... = Compressed: Uncompressed" size="4" pos="64" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.file_attribute.offline" showname=".... .... .... .... ...0 .... .... .... = Offline: Online" size="4" pos="64" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.file_attribute.not_content_indexed" showname=".... .... .... .... ..0. .... .... .... = Content Indexed: NOT content indexed" size="4" pos="64" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.file_attribute.encrypted" showname=".... .... .... .... .0.. .... .... .... = Encrypted: This is NOT an encrypted file" size="4" pos="64" show="0" value="0" unmaskedvalue="ff534d42"/>
+ </field>
+ <field name="smb.share_access" showname="Share Access: 0x00000003, Read, Write" size="4" pos="166" show="0x00000003" value="ff534d42">
+ <field name="smb.share.access.read" showname=".... .... .... .... .... .... .... ...1 = Read: Object can be shared for READ" size="4" pos="64" show="1" value="FFFFFFFF" unmaskedvalue="ff534d42"/>
+ <field name="smb.share.access.write" showname=".... .... .... .... .... .... .... ..1. = Write: Object can be shared for WRITE" size="4" pos="64" show="1" value="FFFFFFFF" unmaskedvalue="ff534d42"/>
+ <field name="smb.share.access.delete" showname=".... .... .... .... .... .... .... .0.. = Delete: Object can NOT be shared for delete" size="4" pos="64" show="0" value="0" unmaskedvalue="ff534d42"/>
+ </field>
+ <field name="smb.create_options" showname="Create Options: 0x00000000" size="4" pos="166" show="0x00000000" value="ff534d42">
+ <field name="smb.nt.create_options.directory" showname=".... .... .... .... .... .... .... ...0 = Directory: File being created/opened must not be a directory" size="4" pos="64" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.nt.create_options.write_through" showname=".... .... .... .... .... .... .... ..0. = Write Through: Writes need not flush buffered data before completing" size="4" pos="64" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.nt.create_options.sequential_only" showname=".... .... .... .... .... .... .... .0.. = Sequential Only: The file might not only be accessed sequentially" size="4" pos="64" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.nt.create_options.intermediate_buffering" showname=".... .... .... .... .... .... .... 0... = Intermediate Buffering: Intermediate buffering is allowed" size="4" pos="64" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.nt.create_options.sync_io_alert" showname=".... .... .... .... .... .... ...0 .... = Sync I/O Alert: Operations NOT necessarily synchronous" size="4" pos="64" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.nt.create_options.sync_io_nonalert" showname=".... .... .... .... .... .... ..0. .... = Sync I/O Nonalert: Operations NOT necessarily synchronous" size="4" pos="64" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.nt.create_options.non_directory" showname=".... .... .... .... .... .... .0.. .... = Non-Directory: File being created/opened must be a directory" size="4" pos="64" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.nt.create_options.create_tree_connection" showname=".... .... .... .... .... .... 0... .... = Create Tree Connection: Create Tree Connections is NOT set" size="4" pos="64" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.nt.create_options.complete_if_oplocked" showname=".... .... .... .... .... ...0 .... .... = Complete If Oplocked: Complete if oplocked is NOT set" size="4" pos="64" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.nt.create_options.no_ea_knowledge" showname=".... .... .... .... .... ..0. .... .... = No EA Knowledge: The client understands extended attributes" size="4" pos="64" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.nt.create_options.eight_dot_three_only" showname=".... .... .... .... .... .0.. .... .... = 8.3 Only: The client understands long file names" size="4" pos="64" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.nt.create_options.random_access" showname=".... .... .... .... .... 0... .... .... = Random Access: The file will not be accessed randomly" size="4" pos="64" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.nt.create_options.delete_on_close" showname=".... .... .... .... ...0 .... .... .... = Delete On Close: The file should not be deleted when it is closed" size="4" pos="64" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.nt.create_options.open_by_fileid" showname=".... .... .... .... ..0. .... .... .... = Open By FileID: OpenByFileID is NOT set" size="4" pos="64" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.nt.create_options.backup_intent" showname=".... .... .... .... .0.. .... .... .... = Backup Intent: This is a normal create" size="4" pos="64" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.nt.create_options.no_compression" showname=".... .... .... .... 0... .... .... .... = No Compression: Compression is allowed for Open/Create" size="4" pos="64" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.nt.create_options.reserve_opfilter" showname=".... .... ...0 .... .... .... .... .... = Reserve Opfilter: Reserve Opfilter is NOT set" size="4" pos="64" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.nt.create_options.open_reparse_point" showname=".... .... ..0. .... .... .... .... .... = Open Reparse Point: Normal open" size="4" pos="64" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.nt.create_options.open_no_recall" showname=".... .... .0.. .... .... .... .... .... = Open No Recall: Open no recall is NOT set" size="4" pos="64" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.nt.create_options.open_for_free_space_query" showname=".... .... 0... .... .... .... .... .... = Open For Free Space query: This is NOT an open for free space query" size="4" pos="64" show="0" value="0" unmaskedvalue="ff534d42"/>
+ </field>
+ <field name="smb.create.disposition" showname="Disposition: Open (if file exists open it, else fail) (1)" size="0" pos="166" show="1"/>
+ </field>
+ <field name="smb.create.action" showname="Create action: The file existed and was opened (1)" size="4" pos="104" show="1" value="01000000"/>
+ <field name="smb.create.time" showname="Created: No time specified (0)" size="8" pos="108" show="Jan 1, 1970 12:00:00.000000000 NZST" value="0000000000000000"/>
+ <field name="smb.access.time" showname="Last Access: No time specified (0)" size="8" pos="116" show="Jan 1, 1970 12:00:00.000000000 NZST" value="0000000000000000"/>
+ <field name="smb.last_write.time" showname="Last Write: No time specified (0)" size="8" pos="124" show="Jan 1, 1970 12:00:00.000000000 NZST" value="0000000000000000"/>
+ <field name="smb.change.time" showname="Change: No time specified (0)" size="8" pos="132" show="Jan 1, 1970 12:00:00.000000000 NZST" value="0000000000000000"/>
+ <field name="smb.file_attribute" showname="File Attributes: 0x00000080" size="4" pos="140" show="0x00000080" value="80000000">
+ <field name="smb.file_attribute.read_only" showname=".... .... .... .... .... .... .... ...0 = Read Only: NOT read only" size="4" pos="140" show="0" value="0" unmaskedvalue="80000000"/>
+ <field name="smb.file_attribute.hidden" showname=".... .... .... .... .... .... .... ..0. = Hidden: NOT hidden" size="4" pos="140" show="0" value="0" unmaskedvalue="80000000"/>
+ <field name="smb.file_attribute.system" showname=".... .... .... .... .... .... .... .0.. = System: NOT a system file/dir" size="4" pos="140" show="0" value="0" unmaskedvalue="80000000"/>
+ <field name="smb.file_attribute.volume" showname=".... .... .... .... .... .... .... 0... = Volume ID: NOT a volume ID" size="4" pos="140" show="0" value="0" unmaskedvalue="80000000"/>
+ <field name="smb.file_attribute.directory" showname=".... .... .... .... .... .... ...0 .... = Directory: NOT a directory" size="4" pos="140" show="0" value="0" unmaskedvalue="80000000"/>
+ <field name="smb.file_attribute.archive" showname=".... .... .... .... .... .... ..0. .... = Archive: Has NOT been modified since last archive" size="4" pos="140" show="0" value="0" unmaskedvalue="80000000"/>
+ <field name="smb.file_attribute.device" showname=".... .... .... .... .... .... .0.. .... = Device: NOT a device" size="4" pos="140" show="0" value="0" unmaskedvalue="80000000"/>
+ <field name="smb.file_attribute.normal" showname=".... .... .... .... .... .... 1... .... = Normal: An ordinary file/dir" size="4" pos="140" show="1" value="FFFFFFFF" unmaskedvalue="80000000"/>
+ <field name="smb.file_attribute.temporary" showname=".... .... .... .... .... ...0 .... .... = Temporary: NOT a temporary file" size="4" pos="140" show="0" value="0" unmaskedvalue="80000000"/>
+ <field name="smb.file_attribute.sparse" showname=".... .... .... .... .... ..0. .... .... = Sparse: NOT a sparse file" size="4" pos="140" show="0" value="0" unmaskedvalue="80000000"/>
+ <field name="smb.file_attribute.reparse" showname=".... .... .... .... .... .0.. .... .... = Reparse Point: Does NOT have an associated reparse point" size="4" pos="140" show="0" value="0" unmaskedvalue="80000000"/>
+ <field name="smb.file_attribute.compressed" showname=".... .... .... .... .... 0... .... .... = Compressed: Uncompressed" size="4" pos="140" show="0" value="0" unmaskedvalue="80000000"/>
+ <field name="smb.file_attribute.offline" showname=".... .... .... .... ...0 .... .... .... = Offline: Online" size="4" pos="140" show="0" value="0" unmaskedvalue="80000000"/>
+ <field name="smb.file_attribute.not_content_indexed" showname=".... .... .... .... ..0. .... .... .... = Content Indexed: NOT content indexed" size="4" pos="140" show="0" value="0" unmaskedvalue="80000000"/>
+ <field name="smb.file_attribute.encrypted" showname=".... .... .... .... .0.. .... .... .... = Encrypted: This is NOT an encrypted file" size="4" pos="140" show="0" value="0" unmaskedvalue="80000000"/>
+ </field>
+ <field name="smb.alloc_size" showname="Allocation Size: 0" size="8" pos="144" show="0" value="0000000000000000"/>
+ <field name="smb.end_of_file" showname="End Of File: 0" size="8" pos="152" show="0" value="0000000000000000"/>
+ <field name="smb.file_type" showname="File Type: Named pipe in message mode (2)" size="2" pos="160" show="2" value="0200"/>
+ <field name="smb.ipc_state" showname="IPC State: 0x05ff, Endpoint: Consumer end of pipe, Pipe Type: Message pipe, Read Mode: Read messages from pipe" size="2" pos="162" show="0x000005ff" value="ff05">
+ <field name="smb.ipc_state.nonblocking" showname="0... .... .... .... = Nonblocking: Reads/writes block if no data available" size="2" pos="162" show="0" value="0" unmaskedvalue="ff05"/>
+ <field name="smb.ipc_state.endpoint" showname=".0.. .... .... .... = Endpoint: Consumer end of pipe (0)" size="2" pos="162" show="0" value="0" unmaskedvalue="ff05"/>
+ <field name="smb.ipc_state.pipe_type" showname=".... 01.. .... .... = Pipe Type: Message pipe (1)" size="2" pos="162" show="1" value="1" unmaskedvalue="ff05"/>
+ <field name="smb.ipc_state.read_mode" showname=".... ..01 .... .... = Read Mode: Read messages from pipe (1)" size="2" pos="162" show="1" value="1" unmaskedvalue="ff05"/>
+ <field name="smb.ipc_state.icount" showname=".... .... 1111 1111 = Icount: 255" size="2" pos="162" show="255" value="FF" unmaskedvalue="ff05"/>
+ </field>
+ <field name="smb.is_directory" showname="Is Directory: This is NOT a directory (0)" size="1" pos="164" show="0" value="00"/>
+ <field name="smb.bcc" showname="Byte Count (BCC): 0" size="2" pos="165" show="0" value="0000"/>
+ </field>
+ </proto>
+</packet>
+
+<packet>
+ <proto name="geninfo" pos="0" showname="General information" size="220">
+ <field name="num" pos="0" show="465" showname="Number" value="1d1" size="220"/>
+ <field name="len" pos="0" show="220" showname="Frame Length" value="dc" size="220"/>
+ <field name="caplen" pos="0" show="220" showname="Captured Length" value="dc" size="220"/>
+ <field name="timestamp" pos="0" show="Feb 13, 2017 10:17:16.150278000 NZDT" showname="Captured Time" value="1486934236.150278000" size="220"/>
+ </proto>
+ <proto name="frame" showname="Frame 465: 220 bytes on wire (1760 bits), 220 bytes captured (1760 bits)" size="220" pos="0">
+ <field name="frame.encap_type" showname="Encapsulation type: Raw IP (7)" size="0" pos="0" show="7"/>
+ <field name="frame.time" showname="Arrival Time: Feb 13, 2017 10:17:16.150278000 NZDT" size="0" pos="0" show="Feb 13, 2017 10:17:16.150278000 NZDT"/>
+ <field name="frame.offset_shift" showname="Time shift for this packet: 0.000000000 seconds" size="0" pos="0" show="0.000000000"/>
+ <field name="frame.time_epoch" showname="Epoch Time: 1486934236.150278000 seconds" size="0" pos="0" show="1486934236.150278000"/>
+ <field name="frame.time_delta" showname="Time delta from previous captured frame: 0.000134000 seconds" size="0" pos="0" show="0.000134000"/>
+ <field name="frame.time_delta_displayed" showname="Time delta from previous displayed frame: 0.000134000 seconds" size="0" pos="0" show="0.000134000"/>
+ <field name="frame.time_relative" showname="Time since reference or first frame: 465.527837000 seconds" size="0" pos="0" show="465.527837000"/>
+ <field name="frame.number" showname="Frame Number: 465" size="0" pos="0" show="465"/>
+ <field name="frame.len" showname="Frame Length: 220 bytes (1760 bits)" size="0" pos="0" show="220"/>
+ <field name="frame.cap_len" showname="Capture Length: 220 bytes (1760 bits)" size="0" pos="0" show="220"/>
+ <field name="frame.marked" showname="Frame is marked: False" size="0" pos="0" show="0"/>
+ <field name="frame.ignored" showname="Frame is ignored: False" size="0" pos="0" show="0"/>
+ <field name="frame.protocols" showname="Protocols in frame: raw:ipv6:tcp:nbss:smb:dcerpc" size="0" pos="0" show="raw:ipv6:tcp:nbss:smb:dcerpc"/>
+ </proto>
+ <proto name="raw" showname="Raw packet data" size="220" pos="0"/>
+ <proto name="ipv6" showname="Internet Protocol Version 6, Src: fd00::5357:5f0b, Dst: fd00::5357:5f03" size="40" pos="0">
+ <field name="ipv6.version" showname="0110 .... = Version: 6" size="1" pos="0" show="6" value="6" unmaskedvalue="60"/>
+ <field name="ip.version" showname="0110 .... = Version: 6 [This field makes the filter match on &quot;ip.version == 6&quot; possible]" hide="yes" size="1" pos="0" show="6" value="6" unmaskedvalue="60"/>
+ <field name="ipv6.tclass" showname=".... 0000 0000 .... .... .... .... .... = Traffic class: 0x00 (DSCP: CS0, ECN: Not-ECT)" size="4" pos="0" show="0x00000000" value="0" unmaskedvalue="60000000">
+ <field name="ipv6.tclass.dscp" showname=".... 0000 00.. .... .... .... .... .... = Differentiated Services Codepoint: Default (0)" size="4" pos="0" show="0" value="0" unmaskedvalue="60000000"/>
+ <field name="ipv6.tclass.ecn" showname=".... .... ..00 .... .... .... .... .... = Explicit Congestion Notification: Not ECN-Capable Transport (0)" size="4" pos="0" show="0" value="0" unmaskedvalue="60000000"/>
+ </field>
+ <field name="ipv6.flow" showname=".... .... .... 0000 0000 0000 0000 0000 = Flowlabel: 0x00000000" size="4" pos="0" show="0x00000000" value="0" unmaskedvalue="60000000"/>
+ <field name="ipv6.plen" showname="Payload length: 220" size="2" pos="4" show="220" value="00dc">
+ <field name="_ws.expert" showname="Expert Info (Warn/Protocol): IPv6 payload length exceeds framing length (180 bytes)" size="0" pos="4">
+ <field name="ipv6.bogus_payload_length" showname="IPv6 payload length exceeds framing length (180 bytes)" size="0" pos="0" show="" value=""/>
+ <field name="_ws.expert.message" showname="Message: IPv6 payload length exceeds framing length (180 bytes)" hide="yes" size="0" pos="0" show="IPv6 payload length exceeds framing length (180 bytes)"/>
+ <field name="_ws.expert.severity" showname="Severity level: Warn" size="0" pos="0" show="0x00600000"/>
+ <field name="_ws.expert.group" showname="Group: Protocol" size="0" pos="0" show="0x09000000"/>
+ </field>
+ </field>
+ <field name="ipv6.nxt" showname="Next header: TCP (6)" size="1" pos="6" show="6" value="06"/>
+ <field name="ipv6.hlim" showname="Hop limit: 0" size="1" pos="7" show="0" value="00"/>
+ <field name="ipv6.src" showname="Source: fd00::5357:5f0b" size="16" pos="8" show="fd00::5357:5f0b" value="fd000000000000000000000053575f0b"/>
+ <field name="ipv6.addr" showname="Source or Destination Address: fd00::5357:5f0b" hide="yes" size="16" pos="8" show="fd00::5357:5f0b" value="fd000000000000000000000053575f0b"/>
+ <field name="ipv6.src_host" showname="Source Host: fd00::5357:5f0b" hide="yes" size="16" pos="8" show="fd00::5357:5f0b" value="fd000000000000000000000053575f0b"/>
+ <field name="ipv6.host" showname="Source or Destination Host: fd00::5357:5f0b" hide="yes" size="16" pos="8" show="fd00::5357:5f0b" value="fd000000000000000000000053575f0b"/>
+ <field name="ipv6.dst" showname="Destination: fd00::5357:5f03" size="16" pos="24" show="fd00::5357:5f03" value="fd000000000000000000000053575f03"/>
+ <field name="ipv6.addr" showname="Source or Destination Address: fd00::5357:5f03" hide="yes" size="16" pos="24" show="fd00::5357:5f03" value="fd000000000000000000000053575f03"/>
+ <field name="ipv6.dst_host" showname="Destination Host: fd00::5357:5f03" hide="yes" size="16" pos="24" show="fd00::5357:5f03" value="fd000000000000000000000053575f03"/>
+ <field name="ipv6.host" showname="Source or Destination Host: fd00::5357:5f03" hide="yes" size="16" pos="24" show="fd00::5357:5f03" value="fd000000000000000000000053575f03"/>
+ <field name="" show="Source GeoIP: Unknown" size="16" pos="8" value="fd000000000000000000000053575f0b"/>
+ <field name="" show="Destination GeoIP: Unknown" size="16" pos="24" value="fd000000000000000000000053575f03"/>
+ </proto>
+ <proto name="tcp" showname="Transmission Control Protocol, Src Port: 31861 (31861), Dst Port: 139 (139), Seq: 847, Ack: 929, Len: 160" size="20" pos="40">
+ <field name="tcp.srcport" showname="Source Port: 31861" size="2" pos="40" show="31861" value="7c75"/>
+ <field name="tcp.dstport" showname="Destination Port: 139" size="2" pos="42" show="139" value="008b"/>
+ <field name="tcp.port" showname="Source or Destination Port: 31861" hide="yes" size="2" pos="40" show="31861" value="7c75"/>
+ <field name="tcp.port" showname="Source or Destination Port: 139" hide="yes" size="2" pos="42" show="139" value="008b"/>
+ <field name="tcp.stream" showname="Stream index: 6" size="0" pos="40" show="6"/>
+ <field name="tcp.len" showname="TCP Segment Len: 160" size="1" pos="52" show="160" value="50"/>
+ <field name="tcp.seq" showname="Sequence number: 847 (relative sequence number)" size="4" pos="44" show="847" value="0000034f"/>
+ <field name="tcp.nxtseq" showname="Next sequence number: 1007 (relative sequence number)" size="0" pos="40" show="1007"/>
+ <field name="tcp.ack" showname="Acknowledgment number: 929 (relative ack number)" size="4" pos="48" show="929" value="000003a1"/>
+ <field name="tcp.hdr_len" showname="Header Length: 20 bytes" size="1" pos="52" show="20" value="50"/>
+ <field name="tcp.flags" showname="Flags: 0x018 (PSH, ACK)" size="2" pos="52" show="0x00000018" value="18" unmaskedvalue="5018">
+ <field name="tcp.flags.res" showname="000. .... .... = Reserved: Not set" size="1" pos="52" show="0" value="0" unmaskedvalue="50"/>
+ <field name="tcp.flags.ns" showname="...0 .... .... = Nonce: Not set" size="1" pos="52" show="0" value="0" unmaskedvalue="50"/>
+ <field name="tcp.flags.cwr" showname=".... 0... .... = Congestion Window Reduced (CWR): Not set" size="1" pos="53" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.ecn" showname=".... .0.. .... = ECN-Echo: Not set" size="1" pos="53" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.urg" showname=".... ..0. .... = Urgent: Not set" size="1" pos="53" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.ack" showname=".... ...1 .... = Acknowledgment: Set" size="1" pos="53" show="1" value="FFFFFFFF" unmaskedvalue="18"/>
+ <field name="tcp.flags.push" showname=".... .... 1... = Push: Set" size="1" pos="53" show="1" value="FFFFFFFF" unmaskedvalue="18"/>
+ <field name="tcp.flags.reset" showname=".... .... .0.. = Reset: Not set" size="1" pos="53" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.syn" showname=".... .... ..0. = Syn: Not set" size="1" pos="53" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.fin" showname=".... .... ...0 = Fin: Not set" size="1" pos="53" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.str" showname="TCP Flags: *******AP***" size="2" pos="52" show="*******AP***" value="5018"/>
+ </field>
+ <field name="tcp.window_size_value" showname="Window size value: 32767" size="2" pos="54" show="32767" value="7fff"/>
+ <field name="tcp.window_size" showname="Calculated window size: 32767" size="2" pos="54" show="32767" value="7fff"/>
+ <field name="tcp.window_size_scalefactor" showname="Window size scaling factor: -2 (no window scaling used)" size="2" pos="54" show="-2" value="7fff"/>
+ <field name="tcp.checksum" showname="Checksum: 0x0000 [validation disabled]" size="2" pos="56" show="0x00000000" value="0000">
+ <field name="tcp.checksum_good" showname="Good Checksum: False" size="2" pos="56" show="0" value="0000"/>
+ <field name="tcp.checksum_bad" showname="Bad Checksum: False" size="2" pos="56" show="0" value="0000"/>
+ </field>
+ <field name="tcp.urgent_pointer" showname="Urgent pointer: 0" size="2" pos="58" show="0" value="0000"/>
+ <field name="tcp.analysis" showname="SEQ/ACK analysis" size="0" pos="40" show="" value="">
+ <field name="tcp.analysis.acks_frame" showname="This is an ACK to the segment in frame: 462" size="0" pos="40" show="462"/>
+ <field name="tcp.analysis.ack_rtt" showname="The RTT to ACK the segment was: 0.000171000 seconds" size="0" pos="40" show="0.000171000"/>
+ <field name="tcp.analysis.initial_rtt" showname="iRTT: 0.000024000 seconds" size="0" pos="40" show="0.000024000"/>
+ <field name="tcp.analysis.bytes_in_flight" showname="Bytes in flight: 160" size="0" pos="40" show="160"/>
+ </field>
+ </proto>
+ <proto name="nbss" showname="NetBIOS Session Service" size="160" pos="60">
+ <field name="nbss.type" showname="Message Type: Session message (0x00)" size="1" pos="60" show="0x00000000" value="00"/>
+ <field name="nbss.length" showname="Length: 156" size="3" pos="61" show="156" value="00009c"/>
+ </proto>
+ <proto name="smb" showname="SMB (Server Message Block Protocol)" size="156" pos="64">
+ <field name="" show="SMB Header" size="32" pos="64" value="ff534d4225000000001843c8000000000000000000000000ac6a6455deec0500">
+ <field name="smb.server_component" showname="Server Component: SMB" size="4" pos="64" show="0x424d53ff" value="ff534d42"/>
+ <field name="smb.cmd" showname="SMB Command: Trans (0x25)" size="1" pos="68" show="37" value="25"/>
+ <field name="smb.nt_status" showname="NT Status: STATUS_SUCCESS (0x00000000)" size="4" pos="69" show="0" value="00000000"/>
+ <field name="smb.flags" showname="Flags: 0x18, Canonicalized Pathnames, Case Sensitivity" size="1" pos="73" show="0x00000018" value="18">
+ <field name="smb.flags.response" showname="0... .... = Request/Response: Message is a request to the server" size="1" pos="73" show="0" value="0" unmaskedvalue="18"/>
+ <field name="smb.flags.notify" showname=".0.. .... = Notify: Notify client only on open" size="1" pos="73" show="0" value="0" unmaskedvalue="18"/>
+ <field name="smb.flags.oplock" showname="..0. .... = Oplocks: OpLock not requested/granted" size="1" pos="73" show="0" value="0" unmaskedvalue="18"/>
+ <field name="smb.flags.canon" showname="...1 .... = Canonicalized Pathnames: Pathnames are canonicalized" size="1" pos="73" show="1" value="FFFFFFFF" unmaskedvalue="18"/>
+ <field name="smb.flags.caseless" showname=".... 1... = Case Sensitivity: Path names are caseless" size="1" pos="73" show="1" value="FFFFFFFF" unmaskedvalue="18"/>
+ <field name="smb.flags.receive_buffer" showname=".... ..0. = Receive Buffer Posted: Receive buffer has not been posted" size="1" pos="73" show="0" value="0" unmaskedvalue="18"/>
+ <field name="smb.flags.lock" showname=".... ...0 = Lock and Read: Lock&amp;Read, Write&amp;Unlock are not supported" size="1" pos="73" show="0" value="0" unmaskedvalue="18"/>
+ </field>
+ <field name="smb.flags2" showname="Flags2: 0xc843, Unicode Strings, Error Code Type, Extended Security Negotiation, Long Names Used, Extended Attributes, Long Names Allowed" size="2" pos="74" show="0x0000c843" value="43c8">
+ <field name="smb.flags2.string" showname="1... .... .... .... = Unicode Strings: Strings are Unicode" size="2" pos="74" show="1" value="FFFFFFFF" unmaskedvalue="43c8"/>
+ <field name="smb.flags2.nt_error" showname=".1.. .... .... .... = Error Code Type: Error codes are NT error codes" size="2" pos="74" show="1" value="FFFFFFFF" unmaskedvalue="43c8"/>
+ <field name="smb.flags2.roe" showname="..0. .... .... .... = Execute-only Reads: Don&#x27;t permit reads if execute-only" size="2" pos="74" show="0" value="0" unmaskedvalue="43c8"/>
+ <field name="smb.flags2.dfs" showname="...0 .... .... .... = Dfs: Don&#x27;t resolve pathnames with Dfs" size="2" pos="74" show="0" value="0" unmaskedvalue="43c8"/>
+ <field name="smb.flags2.esn" showname=".... 1... .... .... = Extended Security Negotiation: Extended security negotiation is supported" size="2" pos="74" show="1" value="FFFFFFFF" unmaskedvalue="43c8"/>
+ <field name="smb.flags2.reparse_path" showname=".... .0.. .... .... = Reparse Path: The request does not use a @GMT reparse path" size="2" pos="74" show="0" value="0" unmaskedvalue="43c8"/>
+ <field name="smb.flags2.long_names_used" showname=".... .... .1.. .... = Long Names Used: Path names in request are long file names" size="2" pos="74" show="1" value="FFFFFFFF" unmaskedvalue="43c8"/>
+ <field name="smb.flags2.sec_sig_required" showname=".... .... ...0 .... = Security Signatures Required: Security signatures are not required" size="2" pos="74" show="0" value="0" unmaskedvalue="43c8"/>
+ <field name="smb.flags2.compressed" showname=".... .... .... 0... = Compressed: Compression is not requested" size="2" pos="74" show="0" value="0" unmaskedvalue="43c8"/>
+ <field name="smb.flags2.sec_sig" showname=".... .... .... .0.. = Security Signatures: Security signatures are not supported" size="2" pos="74" show="0" value="0" unmaskedvalue="43c8"/>
+ <field name="smb.flags2.ea" showname=".... .... .... ..1. = Extended Attributes: Extended attributes are supported" size="2" pos="74" show="1" value="FFFFFFFF" unmaskedvalue="43c8"/>
+ <field name="smb.flags2.long_names_allowed" showname=".... .... .... ...1 = Long Names Allowed: Long file names are allowed in the response" size="2" pos="74" show="1" value="FFFFFFFF" unmaskedvalue="43c8"/>
+ </field>
+ <field name="smb.pid.high" showname="Process ID High: 0" size="2" pos="76" show="0" value="0000"/>
+ <field name="smb.signature" showname="Signature: 0000000000000000" size="8" pos="78" show="00:00:00:00:00:00:00:00" value="0000000000000000"/>
+ <field name="smb.reserved" showname="Reserved: 0000" size="2" pos="86" show="00:00" value="0000"/>
+ <field name="smb.tid" showname="Tree ID: 27308 (\\LOCALNT4DC2\IPC$)" size="2" pos="88" show="27308" value="ac6a">
+ <field name="smb.path" showname="Path: \\LOCALNT4DC2\IPC$" size="0" pos="152" show="\\LOCALNT4DC2\IPC$"/>
+ <field name="smb.fid.mapped_in" showname="Mapped in: 456" size="0" pos="152" show="456"/>
+ </field>
+ <field name="smb.pid" showname="Process ID: 21860" size="2" pos="90" show="21860" value="6455"/>
+ <field name="smb.uid" showname="User ID: 60638" size="2" pos="92" show="60638" value="deec"/>
+ <field name="smb.mid" showname="Multiplex ID: 5" size="2" pos="94" show="5" value="0500"/>
+ </field>
+ <field name="" show="Trans Request (0x25)" size="124" pos="96" value="10000048000000b81000000000000000000000000054004800540002002600792b5900005c0050004900500045005c000000000005000b03100000004800000001000000b810b810000000000100000000000100c84f324b7016d30112785a47bf6ee18803000000045d888aeb1cc9119fe808002b10486002000000">
+ <field name="smb.wct" showname="Word Count (WCT): 16" size="1" pos="96" show="16" value="10"/>
+ <field name="smb.tpc" showname="Total Parameter Count: 0" size="2" pos="97" show="0" value="0000"/>
+ <field name="smb.tdc" showname="Total Data Count: 72" size="2" pos="99" show="72" value="4800"/>
+ <field name="smb.mpc" showname="Max Parameter Count: 0" size="2" pos="101" show="0" value="0000"/>
+ <field name="smb.mdc" showname="Max Data Count: 4280" size="2" pos="103" show="4280" value="b810"/>
+ <field name="smb.msc" showname="Max Setup Count: 0" size="1" pos="105" show="0" value="00"/>
+ <field name="smb.reserved" showname="Reserved: 00" size="1" pos="106" show="00" value="00"/>
+ <field name="smb.transaction.flags" showname="Flags: 0x0000" size="2" pos="107" show="0x00000000" value="0000">
+ <field name="smb.transaction.flags.owt" showname=".... .... .... ..0. = One Way Transaction: Two way transaction" size="2" pos="107" show="0" value="0" unmaskedvalue="0000"/>
+ <field name="smb.transaction.flags.dtid" showname=".... .... .... ...0 = Disconnect TID: Do NOT disconnect TID" size="2" pos="107" show="0" value="0" unmaskedvalue="0000"/>
+ </field>
+ <field name="smb.timeout" showname="Timeout: Return immediately (0)" size="4" pos="109" show="0" value="00000000"/>
+ <field name="smb.reserved" showname="Reserved: 0000" size="2" pos="113" show="00:00" value="0000"/>
+ <field name="smb.pc" showname="Parameter Count: 0" size="2" pos="115" show="0" value="0000"/>
+ <field name="smb.po" showname="Parameter Offset: 84" size="2" pos="117" show="84" value="5400"/>
+ <field name="smb.dc" showname="Data Count: 72" size="2" pos="119" show="72" value="4800"/>
+ <field name="smb.data_offset" showname="Data Offset: 84" size="2" pos="121" show="84" value="5400"/>
+ <field name="smb.sc" showname="Setup Count: 2" size="1" pos="123" show="2" value="02"/>
+ <field name="smb.reserved" showname="Reserved: 00" size="1" pos="124" show="00" value="00"/>
+ <field name="smb.bcc" showname="Byte Count (BCC): 89" size="2" pos="129" show="89" value="5900"/>
+ <field name="smb.trans_name" showname="Transaction Name: \PIPE\" size="14" pos="132" show="\PIPE\" value="5c0050004900500045005c000000"/>
+ <field name="smb.padding" showname="Padding: 0000" size="2" pos="146" show="00:00" value="0000"/>
+ </field>
+ </proto>
+ <proto name="smb_pipe" showname="SMB Pipe Protocol" size="21" pos="125">
+ <field name="smb_pipe.function" showname="Function: TransactNmPipe (0x0026)" size="2" pos="125" show="0x00000026" value="2600"/>
+ <field name="smb.fid" showname="FID: 0x2b79 (\srvsvc)" size="2" pos="127" show="0x00002b79" value="792b">
+ <field name="smb.fid.opened_in" showname="Opened in: 462" size="0" pos="252" show="462"/>
+ <field name="smb.file" showname="File Name: \srvsvc" size="0" pos="252" show="\srvsvc"/>
+ <field name="smb.create_flags" showname="Create Flags: 0x00000000" size="4" pos="252" show="0x00000000" value="2600792b">
+ <field name="smb.nt.create.oplock" showname=".... .... .... .... .... .... .... ..0. = Exclusive Oplock: Does NOT request oplock" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.nt.create.batch_oplock" showname=".... .... .... .... .... .... .... .0.. = Batch Oplock: Does NOT request batch oplock" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.nt.create.dir" showname=".... .... .... .... .... .... .... 0... = Create Directory: Target of open can be a file" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.nt.create.ext" showname=".... .... .... .... .... .... ...0 .... = Extended Response: Extended responses NOT required" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ </field>
+ <field name="smb.access_mask" showname="Access Mask: 0x0002019f" size="4" pos="252" show="0x0002019f" value="2600792b">
+ <field name="smb.access.read" showname=".... .... .... .... .... .... .... ...1 = Read: READ access" size="4" pos="125" show="1" value="FFFFFFFF" unmaskedvalue="2600792b"/>
+ <field name="smb.access.write" showname=".... .... .... .... .... .... .... ..1. = Write: WRITE access" size="4" pos="125" show="1" value="FFFFFFFF" unmaskedvalue="2600792b"/>
+ <field name="smb.access.append" showname=".... .... .... .... .... .... .... .1.. = Append: APPEND access" size="4" pos="125" show="1" value="FFFFFFFF" unmaskedvalue="2600792b"/>
+ <field name="smb.access.read_ea" showname=".... .... .... .... .... .... .... 1... = Read EA: READ EXTENDED ATTRIBUTES access" size="4" pos="125" show="1" value="FFFFFFFF" unmaskedvalue="2600792b"/>
+ <field name="smb.access.write_ea" showname=".... .... .... .... .... .... ...1 .... = Write EA: WRITE EXTENDED ATTRIBUTES access" size="4" pos="125" show="1" value="FFFFFFFF" unmaskedvalue="2600792b"/>
+ <field name="smb.access.execute" showname=".... .... .... .... .... .... ..0. .... = Execute: NO execute access" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.access.delete_child" showname=".... .... .... .... .... .... .0.. .... = Delete Child: NO delete child access" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.access.read_attributes" showname=".... .... .... .... .... .... 1... .... = Read Attributes: READ ATTRIBUTES access" size="4" pos="125" show="1" value="FFFFFFFF" unmaskedvalue="2600792b"/>
+ <field name="smb.access.write_attributes" showname=".... .... .... .... .... ...1 .... .... = Write Attributes: WRITE ATTRIBUTES access" size="4" pos="125" show="1" value="FFFFFFFF" unmaskedvalue="2600792b"/>
+ <field name="smb.access.delete" showname=".... .... .... ...0 .... .... .... .... = Delete: NO delete access" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.access.read_control" showname=".... .... .... ..1. .... .... .... .... = Read Control: READ ACCESS to owner, group and ACL of the SID" size="4" pos="125" show="1" value="FFFFFFFF" unmaskedvalue="2600792b"/>
+ <field name="smb.access.write_dac" showname=".... .... .... .0.. .... .... .... .... = Write DAC: Owner may NOT write to the DAC" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.access.write_owner" showname=".... .... .... 0... .... .... .... .... = Write Owner: Can NOT write owner (take ownership)" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.access.synchronize" showname=".... .... ...0 .... .... .... .... .... = Synchronize: Can NOT wait on handle to synchronize on completion of I/O" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.access.system_security" showname=".... ...0 .... .... .... .... .... .... = System Security: System security is NOT set" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.access.maximum_allowed" showname=".... ..0. .... .... .... .... .... .... = Maximum Allowed: Maximum allowed is NOT set" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.access.generic_all" showname="...0 .... .... .... .... .... .... .... = Generic All: Generic all is NOT set" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.access.generic_execute" showname="..0. .... .... .... .... .... .... .... = Generic Execute: Generic execute is NOT set" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.access.generic_write" showname=".0.. .... .... .... .... .... .... .... = Generic Write: Generic write is NOT set" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.access.generic_read" showname="0... .... .... .... .... .... .... .... = Generic Read: Generic read is NOT set" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ </field>
+ <field name="smb.file_attribute" showname="File Attributes: 0x00000000" size="4" pos="252" show="0x00000000" value="2600792b">
+ <field name="smb.file_attribute.read_only" showname=".... .... .... .... .... .... .... ...0 = Read Only: NOT read only" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.file_attribute.hidden" showname=".... .... .... .... .... .... .... ..0. = Hidden: NOT hidden" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.file_attribute.system" showname=".... .... .... .... .... .... .... .0.. = System: NOT a system file/dir" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.file_attribute.volume" showname=".... .... .... .... .... .... .... 0... = Volume ID: NOT a volume ID" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.file_attribute.directory" showname=".... .... .... .... .... .... ...0 .... = Directory: NOT a directory" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.file_attribute.archive" showname=".... .... .... .... .... .... ..0. .... = Archive: Has NOT been modified since last archive" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.file_attribute.device" showname=".... .... .... .... .... .... .0.. .... = Device: NOT a device" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.file_attribute.normal" showname=".... .... .... .... .... .... 0... .... = Normal: Has some attribute set" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.file_attribute.temporary" showname=".... .... .... .... .... ...0 .... .... = Temporary: NOT a temporary file" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.file_attribute.sparse" showname=".... .... .... .... .... ..0. .... .... = Sparse: NOT a sparse file" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.file_attribute.reparse" showname=".... .... .... .... .... .0.. .... .... = Reparse Point: Does NOT have an associated reparse point" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.file_attribute.compressed" showname=".... .... .... .... .... 0... .... .... = Compressed: Uncompressed" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.file_attribute.offline" showname=".... .... .... .... ...0 .... .... .... = Offline: Online" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.file_attribute.not_content_indexed" showname=".... .... .... .... ..0. .... .... .... = Content Indexed: NOT content indexed" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.file_attribute.encrypted" showname=".... .... .... .... .0.. .... .... .... = Encrypted: This is NOT an encrypted file" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ </field>
+ <field name="smb.share_access" showname="Share Access: 0x00000003, Read, Write" size="4" pos="252" show="0x00000003" value="2600792b">
+ <field name="smb.share.access.read" showname=".... .... .... .... .... .... .... ...1 = Read: Object can be shared for READ" size="4" pos="125" show="1" value="FFFFFFFF" unmaskedvalue="2600792b"/>
+ <field name="smb.share.access.write" showname=".... .... .... .... .... .... .... ..1. = Write: Object can be shared for WRITE" size="4" pos="125" show="1" value="FFFFFFFF" unmaskedvalue="2600792b"/>
+ <field name="smb.share.access.delete" showname=".... .... .... .... .... .... .... .0.. = Delete: Object can NOT be shared for delete" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ </field>
+ <field name="smb.create_options" showname="Create Options: 0x00000000" size="4" pos="252" show="0x00000000" value="2600792b">
+ <field name="smb.nt.create_options.directory" showname=".... .... .... .... .... .... .... ...0 = Directory: File being created/opened must not be a directory" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.nt.create_options.write_through" showname=".... .... .... .... .... .... .... ..0. = Write Through: Writes need not flush buffered data before completing" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.nt.create_options.sequential_only" showname=".... .... .... .... .... .... .... .0.. = Sequential Only: The file might not only be accessed sequentially" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.nt.create_options.intermediate_buffering" showname=".... .... .... .... .... .... .... 0... = Intermediate Buffering: Intermediate buffering is allowed" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.nt.create_options.sync_io_alert" showname=".... .... .... .... .... .... ...0 .... = Sync I/O Alert: Operations NOT necessarily synchronous" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.nt.create_options.sync_io_nonalert" showname=".... .... .... .... .... .... ..0. .... = Sync I/O Nonalert: Operations NOT necessarily synchronous" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.nt.create_options.non_directory" showname=".... .... .... .... .... .... .0.. .... = Non-Directory: File being created/opened must be a directory" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.nt.create_options.create_tree_connection" showname=".... .... .... .... .... .... 0... .... = Create Tree Connection: Create Tree Connections is NOT set" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.nt.create_options.complete_if_oplocked" showname=".... .... .... .... .... ...0 .... .... = Complete If Oplocked: Complete if oplocked is NOT set" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.nt.create_options.no_ea_knowledge" showname=".... .... .... .... .... ..0. .... .... = No EA Knowledge: The client understands extended attributes" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.nt.create_options.eight_dot_three_only" showname=".... .... .... .... .... .0.. .... .... = 8.3 Only: The client understands long file names" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.nt.create_options.random_access" showname=".... .... .... .... .... 0... .... .... = Random Access: The file will not be accessed randomly" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.nt.create_options.delete_on_close" showname=".... .... .... .... ...0 .... .... .... = Delete On Close: The file should not be deleted when it is closed" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.nt.create_options.open_by_fileid" showname=".... .... .... .... ..0. .... .... .... = Open By FileID: OpenByFileID is NOT set" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.nt.create_options.backup_intent" showname=".... .... .... .... .0.. .... .... .... = Backup Intent: This is a normal create" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.nt.create_options.no_compression" showname=".... .... .... .... 0... .... .... .... = No Compression: Compression is allowed for Open/Create" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.nt.create_options.reserve_opfilter" showname=".... .... ...0 .... .... .... .... .... = Reserve Opfilter: Reserve Opfilter is NOT set" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.nt.create_options.open_reparse_point" showname=".... .... ..0. .... .... .... .... .... = Open Reparse Point: Normal open" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.nt.create_options.open_no_recall" showname=".... .... .0.. .... .... .... .... .... = Open No Recall: Open no recall is NOT set" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.nt.create_options.open_for_free_space_query" showname=".... .... 0... .... .... .... .... .... = Open For Free Space query: This is NOT an open for free space query" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ </field>
+ <field name="smb.create.disposition" showname="Disposition: Open (if file exists open it, else fail) (1)" size="0" pos="252" show="1"/>
+ </field>
+ </proto>
+ <proto name="dcerpc" showname="Distributed Computing Environment / Remote Procedure Call (DCE/RPC) Bind, Fragment: Single, FragLen: 72, Call: 1" size="72" pos="148">
+ <field name="dcerpc.ver" showname="Version: 5" size="1" pos="148" show="5" value="05"/>
+ <field name="dcerpc.ver_minor" showname="Version (minor): 0" size="1" pos="149" show="0" value="00"/>
+ <field name="dcerpc.pkt_type" showname="Packet type: Bind (11)" size="1" pos="150" show="11" value="0b"/>
+ <field name="dcerpc.cn_flags" showname="Packet Flags: 0x03" size="1" pos="151" show="0x00000003" value="03">
+ <field name="dcerpc.cn_flags.object" showname="0... .... = Object: Not set" size="1" pos="151" show="0" value="0" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.maybe" showname=".0.. .... = Maybe: Not set" size="1" pos="151" show="0" value="0" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.dne" showname="..0. .... = Did Not Execute: Not set" size="1" pos="151" show="0" value="0" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.mpx" showname="...0 .... = Multiplex: Not set" size="1" pos="151" show="0" value="0" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.reserved" showname=".... 0... = Reserved: Not set" size="1" pos="151" show="0" value="0" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.cancel_pending" showname=".... .0.. = Cancel Pending: Not set" size="1" pos="151" show="0" value="0" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.last_frag" showname=".... ..1. = Last Frag: Set" size="1" pos="151" show="1" value="FFFFFFFF" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.first_frag" showname=".... ...1 = First Frag: Set" size="1" pos="151" show="1" value="FFFFFFFF" unmaskedvalue="03"/>
+ </field>
+ <field name="dcerpc.drep" showname="Data Representation: 10000000" size="4" pos="152" show="10:00:00:00" value="10000000">
+ <field name="dcerpc.drep.byteorder" showname="Byte order: Little-endian (1)" size="1" pos="152" show="1" value="10"/>
+ <field name="dcerpc.drep.character" showname="Character: ASCII (0)" size="1" pos="152" show="0" value="10"/>
+ <field name="dcerpc.drep.fp" showname="Floating-point: IEEE (0)" size="1" pos="153" show="0" value="00"/>
+ </field>
+ <field name="dcerpc.cn_frag_len" showname="Frag Length: 72" size="2" pos="156" show="72" value="4800"/>
+ <field name="dcerpc.cn_auth_len" showname="Auth Length: 0" size="2" pos="158" show="0" value="0000"/>
+ <field name="dcerpc.cn_call_id" showname="Call ID: 1" size="4" pos="160" show="1" value="01000000"/>
+ <field name="dcerpc.cn_max_xmit" showname="Max Xmit Frag: 4280" size="2" pos="164" show="4280" value="b810"/>
+ <field name="dcerpc.cn_max_recv" showname="Max Recv Frag: 4280" size="2" pos="166" show="4280" value="b810"/>
+ <field name="dcerpc.cn_assoc_group" showname="Assoc Group: 0x00000000" size="4" pos="168" show="0x00000000" value="00000000"/>
+ <field name="dcerpc.cn_num_ctx_items" showname="Num Ctx Items: 1" size="1" pos="172" show="1" value="01"/>
+ <field name="dcerpc.cn_ctx_item" showname="Ctx Item[1]: Context ID:0, SRVSVC, 32bit NDR" size="44" pos="176" show="" value="">
+ <field name="dcerpc.cn_ctx_id" showname="Context ID: 0" size="2" pos="176" show="0" value="0000"/>
+ <field name="dcerpc.cn_num_trans_items" showname="Num Trans Items: 1" size="1" pos="178" show="1" value="01"/>
+ <field name="dcerpc.cn_bind_abstract_syntax" showname="Abstract Syntax: SRVSVC V3.0" size="20" pos="180" show="" value="">
+ <field name="dcerpc.cn_bind_to_uuid" showname="Interface: SRVSVC UUID: 4b324fc8-1670-01d3-1278-5a47bf6ee188" size="16" pos="180" show="4b324fc8-1670-01d3-1278-5a47bf6ee188" value="c84f324b7016d30112785a47bf6ee188"/>
+ <field name="dcerpc.cn_bind_if_ver" showname="Interface Ver: 3" size="2" pos="196" show="3" value="0300"/>
+ <field name="dcerpc.cn_bind_if_ver_minor" showname="Interface Ver Minor: 0" size="2" pos="198" show="0" value="0000"/>
+ </field>
+ <field name="dcerpc.cn_bind_trans" showname="Transfer Syntax[1]: 32bit NDR V2" size="20" pos="200" show="" value="">
+ <field name="dcerpc.cn_bind_trans_id" showname="Transfer Syntax: 32bit NDR UUID:8a885d04-1ceb-11c9-9fe8-08002b104860" size="16" pos="200" show="8a885d04-1ceb-11c9-9fe8-08002b104860" value="045d888aeb1cc9119fe808002b104860"/>
+ <field name="dcerpc.cn_bind_trans_ver" showname="ver: 2" size="4" pos="216" show="2" value="02000000"/>
+ </field>
+ </field>
+ </proto>
+</packet>
+
+<packet>
+ <proto name="geninfo" pos="0" showname="General information" size="244">
+ <field name="num" pos="0" show="471" showname="Number" value="1d7" size="244"/>
+ <field name="len" pos="0" show="244" showname="Frame Length" value="f4" size="244"/>
+ <field name="caplen" pos="0" show="244" showname="Captured Length" value="f4" size="244"/>
+ <field name="timestamp" pos="0" show="Feb 13, 2017 10:17:16.201029000 NZDT" showname="Captured Time" value="1486934236.201029000" size="244"/>
+ </proto>
+ <proto name="frame" showname="Frame 471: 244 bytes on wire (1952 bits), 244 bytes captured (1952 bits)" size="244" pos="0">
+ <field name="frame.encap_type" showname="Encapsulation type: Raw IP (7)" size="0" pos="0" show="7"/>
+ <field name="frame.time" showname="Arrival Time: Feb 13, 2017 10:17:16.201029000 NZDT" size="0" pos="0" show="Feb 13, 2017 10:17:16.201029000 NZDT"/>
+ <field name="frame.offset_shift" showname="Time shift for this packet: 0.000000000 seconds" size="0" pos="0" show="0.000000000"/>
+ <field name="frame.time_epoch" showname="Epoch Time: 1486934236.201029000 seconds" size="0" pos="0" show="1486934236.201029000"/>
+ <field name="frame.time_delta" showname="Time delta from previous captured frame: 0.050577000 seconds" size="0" pos="0" show="0.050577000"/>
+ <field name="frame.time_delta_displayed" showname="Time delta from previous displayed frame: 0.050577000 seconds" size="0" pos="0" show="0.050577000"/>
+ <field name="frame.time_relative" showname="Time since reference or first frame: 465.578588000 seconds" size="0" pos="0" show="465.578588000"/>
+ <field name="frame.number" showname="Frame Number: 471" size="0" pos="0" show="471"/>
+ <field name="frame.len" showname="Frame Length: 244 bytes (1952 bits)" size="0" pos="0" show="244"/>
+ <field name="frame.cap_len" showname="Capture Length: 244 bytes (1952 bits)" size="0" pos="0" show="244"/>
+ <field name="frame.marked" showname="Frame is marked: False" size="0" pos="0" show="0"/>
+ <field name="frame.ignored" showname="Frame is ignored: False" size="0" pos="0" show="0"/>
+ <field name="frame.protocols" showname="Protocols in frame: raw:ipv6:tcp:nbss:smb:dcerpc" size="0" pos="0" show="raw:ipv6:tcp:nbss:smb:dcerpc"/>
+ </proto>
+ <proto name="raw" showname="Raw packet data" size="244" pos="0"/>
+ <proto name="ipv6" showname="Internet Protocol Version 6, Src: fd00::5357:5f0b, Dst: fd00::5357:5f03" size="40" pos="0">
+ <field name="ipv6.version" showname="0110 .... = Version: 6" size="1" pos="0" show="6" value="6" unmaskedvalue="60"/>
+ <field name="ip.version" showname="0110 .... = Version: 6 [This field makes the filter match on &quot;ip.version == 6&quot; possible]" hide="yes" size="1" pos="0" show="6" value="6" unmaskedvalue="60"/>
+ <field name="ipv6.tclass" showname=".... 0000 0000 .... .... .... .... .... = Traffic class: 0x00 (DSCP: CS0, ECN: Not-ECT)" size="4" pos="0" show="0x00000000" value="0" unmaskedvalue="60000000">
+ <field name="ipv6.tclass.dscp" showname=".... 0000 00.. .... .... .... .... .... = Differentiated Services Codepoint: Default (0)" size="4" pos="0" show="0" value="0" unmaskedvalue="60000000"/>
+ <field name="ipv6.tclass.ecn" showname=".... .... ..00 .... .... .... .... .... = Explicit Congestion Notification: Not ECN-Capable Transport (0)" size="4" pos="0" show="0" value="0" unmaskedvalue="60000000"/>
+ </field>
+ <field name="ipv6.flow" showname=".... .... .... 0000 0000 0000 0000 0000 = Flowlabel: 0x00000000" size="4" pos="0" show="0x00000000" value="0" unmaskedvalue="60000000"/>
+ <field name="ipv6.plen" showname="Payload length: 244" size="2" pos="4" show="244" value="00f4">
+ <field name="_ws.expert" showname="Expert Info (Warn/Protocol): IPv6 payload length exceeds framing length (204 bytes)" size="0" pos="4">
+ <field name="ipv6.bogus_payload_length" showname="IPv6 payload length exceeds framing length (204 bytes)" size="0" pos="0" show="" value=""/>
+ <field name="_ws.expert.message" showname="Message: IPv6 payload length exceeds framing length (204 bytes)" hide="yes" size="0" pos="0" show="IPv6 payload length exceeds framing length (204 bytes)"/>
+ <field name="_ws.expert.severity" showname="Severity level: Warn" size="0" pos="0" show="0x00600000"/>
+ <field name="_ws.expert.group" showname="Group: Protocol" size="0" pos="0" show="0x09000000"/>
+ </field>
+ </field>
+ <field name="ipv6.nxt" showname="Next header: TCP (6)" size="1" pos="6" show="6" value="06"/>
+ <field name="ipv6.hlim" showname="Hop limit: 0" size="1" pos="7" show="0" value="00"/>
+ <field name="ipv6.src" showname="Source: fd00::5357:5f0b" size="16" pos="8" show="fd00::5357:5f0b" value="fd000000000000000000000053575f0b"/>
+ <field name="ipv6.addr" showname="Source or Destination Address: fd00::5357:5f0b" hide="yes" size="16" pos="8" show="fd00::5357:5f0b" value="fd000000000000000000000053575f0b"/>
+ <field name="ipv6.src_host" showname="Source Host: fd00::5357:5f0b" hide="yes" size="16" pos="8" show="fd00::5357:5f0b" value="fd000000000000000000000053575f0b"/>
+ <field name="ipv6.host" showname="Source or Destination Host: fd00::5357:5f0b" hide="yes" size="16" pos="8" show="fd00::5357:5f0b" value="fd000000000000000000000053575f0b"/>
+ <field name="ipv6.dst" showname="Destination: fd00::5357:5f03" size="16" pos="24" show="fd00::5357:5f03" value="fd000000000000000000000053575f03"/>
+ <field name="ipv6.addr" showname="Source or Destination Address: fd00::5357:5f03" hide="yes" size="16" pos="24" show="fd00::5357:5f03" value="fd000000000000000000000053575f03"/>
+ <field name="ipv6.dst_host" showname="Destination Host: fd00::5357:5f03" hide="yes" size="16" pos="24" show="fd00::5357:5f03" value="fd000000000000000000000053575f03"/>
+ <field name="ipv6.host" showname="Source or Destination Host: fd00::5357:5f03" hide="yes" size="16" pos="24" show="fd00::5357:5f03" value="fd000000000000000000000053575f03"/>
+ <field name="" show="Source GeoIP: Unknown" size="16" pos="8" value="fd000000000000000000000053575f0b"/>
+ <field name="" show="Destination GeoIP: Unknown" size="16" pos="24" value="fd000000000000000000000053575f03"/>
+ </proto>
+ <proto name="tcp" showname="Transmission Control Protocol, Src Port: 31861 (31861), Dst Port: 139 (139), Seq: 1007, Ack: 1057, Len: 184" size="20" pos="40">
+ <field name="tcp.srcport" showname="Source Port: 31861" size="2" pos="40" show="31861" value="7c75"/>
+ <field name="tcp.dstport" showname="Destination Port: 139" size="2" pos="42" show="139" value="008b"/>
+ <field name="tcp.port" showname="Source or Destination Port: 31861" hide="yes" size="2" pos="40" show="31861" value="7c75"/>
+ <field name="tcp.port" showname="Source or Destination Port: 139" hide="yes" size="2" pos="42" show="139" value="008b"/>
+ <field name="tcp.stream" showname="Stream index: 6" size="0" pos="40" show="6"/>
+ <field name="tcp.len" showname="TCP Segment Len: 184" size="1" pos="52" show="184" value="50"/>
+ <field name="tcp.seq" showname="Sequence number: 1007 (relative sequence number)" size="4" pos="44" show="1007" value="000003ef"/>
+ <field name="tcp.nxtseq" showname="Next sequence number: 1191 (relative sequence number)" size="0" pos="40" show="1191"/>
+ <field name="tcp.ack" showname="Acknowledgment number: 1057 (relative ack number)" size="4" pos="48" show="1057" value="00000421"/>
+ <field name="tcp.hdr_len" showname="Header Length: 20 bytes" size="1" pos="52" show="20" value="50"/>
+ <field name="tcp.flags" showname="Flags: 0x018 (PSH, ACK)" size="2" pos="52" show="0x00000018" value="18" unmaskedvalue="5018">
+ <field name="tcp.flags.res" showname="000. .... .... = Reserved: Not set" size="1" pos="52" show="0" value="0" unmaskedvalue="50"/>
+ <field name="tcp.flags.ns" showname="...0 .... .... = Nonce: Not set" size="1" pos="52" show="0" value="0" unmaskedvalue="50"/>
+ <field name="tcp.flags.cwr" showname=".... 0... .... = Congestion Window Reduced (CWR): Not set" size="1" pos="53" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.ecn" showname=".... .0.. .... = ECN-Echo: Not set" size="1" pos="53" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.urg" showname=".... ..0. .... = Urgent: Not set" size="1" pos="53" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.ack" showname=".... ...1 .... = Acknowledgment: Set" size="1" pos="53" show="1" value="FFFFFFFF" unmaskedvalue="18"/>
+ <field name="tcp.flags.push" showname=".... .... 1... = Push: Set" size="1" pos="53" show="1" value="FFFFFFFF" unmaskedvalue="18"/>
+ <field name="tcp.flags.reset" showname=".... .... .0.. = Reset: Not set" size="1" pos="53" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.syn" showname=".... .... ..0. = Syn: Not set" size="1" pos="53" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.fin" showname=".... .... ...0 = Fin: Not set" size="1" pos="53" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.str" showname="TCP Flags: *******AP***" size="2" pos="52" show="*******AP***" value="5018"/>
+ </field>
+ <field name="tcp.window_size_value" showname="Window size value: 32767" size="2" pos="54" show="32767" value="7fff"/>
+ <field name="tcp.window_size" showname="Calculated window size: 32767" size="2" pos="54" show="32767" value="7fff"/>
+ <field name="tcp.window_size_scalefactor" showname="Window size scaling factor: -2 (no window scaling used)" size="2" pos="54" show="-2" value="7fff"/>
+ <field name="tcp.checksum" showname="Checksum: 0x0000 [validation disabled]" size="2" pos="56" show="0x00000000" value="0000">
+ <field name="tcp.checksum_good" showname="Good Checksum: False" size="2" pos="56" show="0" value="0000"/>
+ <field name="tcp.checksum_bad" showname="Bad Checksum: False" size="2" pos="56" show="0" value="0000"/>
+ </field>
+ <field name="tcp.urgent_pointer" showname="Urgent pointer: 0" size="2" pos="58" show="0" value="0000"/>
+ <field name="tcp.analysis" showname="SEQ/ACK analysis" size="0" pos="40" show="" value="">
+ <field name="tcp.analysis.acks_frame" showname="This is an ACK to the segment in frame: 468" size="0" pos="40" show="468"/>
+ <field name="tcp.analysis.ack_rtt" showname="The RTT to ACK the segment was: 0.050606000 seconds" size="0" pos="40" show="0.050606000"/>
+ <field name="tcp.analysis.initial_rtt" showname="iRTT: 0.000024000 seconds" size="0" pos="40" show="0.000024000"/>
+ <field name="tcp.analysis.bytes_in_flight" showname="Bytes in flight: 184" size="0" pos="40" show="184"/>
+ </field>
+ </proto>
+ <proto name="nbss" showname="NetBIOS Session Service" size="184" pos="60">
+ <field name="nbss.type" showname="Message Type: Session message (0x00)" size="1" pos="60" show="0x00000000" value="00"/>
+ <field name="nbss.length" showname="Length: 180" size="3" pos="61" show="180" value="0000b4"/>
+ </proto>
+ <proto name="smb" showname="SMB (Server Message Block Protocol)" size="180" pos="64">
+ <field name="" show="SMB Header" size="32" pos="64" value="ff534d4225000000001843c8000000000000000000000000ac6a6455deec0600">
+ <field name="smb.server_component" showname="Server Component: SMB" size="4" pos="64" show="0x424d53ff" value="ff534d42"/>
+ <field name="smb.cmd" showname="SMB Command: Trans (0x25)" size="1" pos="68" show="37" value="25"/>
+ <field name="smb.nt_status" showname="NT Status: STATUS_SUCCESS (0x00000000)" size="4" pos="69" show="0" value="00000000"/>
+ <field name="smb.flags" showname="Flags: 0x18, Canonicalized Pathnames, Case Sensitivity" size="1" pos="73" show="0x00000018" value="18">
+ <field name="smb.flags.response" showname="0... .... = Request/Response: Message is a request to the server" size="1" pos="73" show="0" value="0" unmaskedvalue="18"/>
+ <field name="smb.flags.notify" showname=".0.. .... = Notify: Notify client only on open" size="1" pos="73" show="0" value="0" unmaskedvalue="18"/>
+ <field name="smb.flags.oplock" showname="..0. .... = Oplocks: OpLock not requested/granted" size="1" pos="73" show="0" value="0" unmaskedvalue="18"/>
+ <field name="smb.flags.canon" showname="...1 .... = Canonicalized Pathnames: Pathnames are canonicalized" size="1" pos="73" show="1" value="FFFFFFFF" unmaskedvalue="18"/>
+ <field name="smb.flags.caseless" showname=".... 1... = Case Sensitivity: Path names are caseless" size="1" pos="73" show="1" value="FFFFFFFF" unmaskedvalue="18"/>
+ <field name="smb.flags.receive_buffer" showname=".... ..0. = Receive Buffer Posted: Receive buffer has not been posted" size="1" pos="73" show="0" value="0" unmaskedvalue="18"/>
+ <field name="smb.flags.lock" showname=".... ...0 = Lock and Read: Lock&amp;Read, Write&amp;Unlock are not supported" size="1" pos="73" show="0" value="0" unmaskedvalue="18"/>
+ </field>
+ <field name="smb.flags2" showname="Flags2: 0xc843, Unicode Strings, Error Code Type, Extended Security Negotiation, Long Names Used, Extended Attributes, Long Names Allowed" size="2" pos="74" show="0x0000c843" value="43c8">
+ <field name="smb.flags2.string" showname="1... .... .... .... = Unicode Strings: Strings are Unicode" size="2" pos="74" show="1" value="FFFFFFFF" unmaskedvalue="43c8"/>
+ <field name="smb.flags2.nt_error" showname=".1.. .... .... .... = Error Code Type: Error codes are NT error codes" size="2" pos="74" show="1" value="FFFFFFFF" unmaskedvalue="43c8"/>
+ <field name="smb.flags2.roe" showname="..0. .... .... .... = Execute-only Reads: Don&#x27;t permit reads if execute-only" size="2" pos="74" show="0" value="0" unmaskedvalue="43c8"/>
+ <field name="smb.flags2.dfs" showname="...0 .... .... .... = Dfs: Don&#x27;t resolve pathnames with Dfs" size="2" pos="74" show="0" value="0" unmaskedvalue="43c8"/>
+ <field name="smb.flags2.esn" showname=".... 1... .... .... = Extended Security Negotiation: Extended security negotiation is supported" size="2" pos="74" show="1" value="FFFFFFFF" unmaskedvalue="43c8"/>
+ <field name="smb.flags2.reparse_path" showname=".... .0.. .... .... = Reparse Path: The request does not use a @GMT reparse path" size="2" pos="74" show="0" value="0" unmaskedvalue="43c8"/>
+ <field name="smb.flags2.long_names_used" showname=".... .... .1.. .... = Long Names Used: Path names in request are long file names" size="2" pos="74" show="1" value="FFFFFFFF" unmaskedvalue="43c8"/>
+ <field name="smb.flags2.sec_sig_required" showname=".... .... ...0 .... = Security Signatures Required: Security signatures are not required" size="2" pos="74" show="0" value="0" unmaskedvalue="43c8"/>
+ <field name="smb.flags2.compressed" showname=".... .... .... 0... = Compressed: Compression is not requested" size="2" pos="74" show="0" value="0" unmaskedvalue="43c8"/>
+ <field name="smb.flags2.sec_sig" showname=".... .... .... .0.. = Security Signatures: Security signatures are not supported" size="2" pos="74" show="0" value="0" unmaskedvalue="43c8"/>
+ <field name="smb.flags2.ea" showname=".... .... .... ..1. = Extended Attributes: Extended attributes are supported" size="2" pos="74" show="1" value="FFFFFFFF" unmaskedvalue="43c8"/>
+ <field name="smb.flags2.long_names_allowed" showname=".... .... .... ...1 = Long Names Allowed: Long file names are allowed in the response" size="2" pos="74" show="1" value="FFFFFFFF" unmaskedvalue="43c8"/>
+ </field>
+ <field name="smb.pid.high" showname="Process ID High: 0" size="2" pos="76" show="0" value="0000"/>
+ <field name="smb.signature" showname="Signature: 0000000000000000" size="8" pos="78" show="00:00:00:00:00:00:00:00" value="0000000000000000"/>
+ <field name="smb.reserved" showname="Reserved: 0000" size="2" pos="86" show="00:00" value="0000"/>
+ <field name="smb.tid" showname="Tree ID: 27308 (\\LOCALNT4DC2\IPC$)" size="2" pos="88" show="27308" value="ac6a">
+ <field name="smb.path" showname="Path: \\LOCALNT4DC2\IPC$" size="0" pos="152" show="\\LOCALNT4DC2\IPC$"/>
+ <field name="smb.fid.mapped_in" showname="Mapped in: 456" size="0" pos="152" show="456"/>
+ </field>
+ <field name="smb.pid" showname="Process ID: 21860" size="2" pos="90" show="21860" value="6455"/>
+ <field name="smb.uid" showname="User ID: 60638" size="2" pos="92" show="60638" value="deec"/>
+ <field name="smb.mid" showname="Multiplex ID: 6" size="2" pos="94" show="6" value="0600"/>
+ </field>
+ <field name="" show="Trans Request (0x25)" size="148" pos="96" value="10000060000000b81000000000000000000000000054006000540002002600792b7100005c0050004900500045005c0000000000050000031000000060000000020000004800000000000f00000002000c000000000000000c0000004c004f00430041004c004e005400340044004300320000000100000001000000040002000000000000000000ffffffff0800020000000000">
+ <field name="smb.wct" showname="Word Count (WCT): 16" size="1" pos="96" show="16" value="10"/>
+ <field name="smb.tpc" showname="Total Parameter Count: 0" size="2" pos="97" show="0" value="0000"/>
+ <field name="smb.tdc" showname="Total Data Count: 96" size="2" pos="99" show="96" value="6000"/>
+ <field name="smb.mpc" showname="Max Parameter Count: 0" size="2" pos="101" show="0" value="0000"/>
+ <field name="smb.mdc" showname="Max Data Count: 4280" size="2" pos="103" show="4280" value="b810"/>
+ <field name="smb.msc" showname="Max Setup Count: 0" size="1" pos="105" show="0" value="00"/>
+ <field name="smb.reserved" showname="Reserved: 00" size="1" pos="106" show="00" value="00"/>
+ <field name="smb.transaction.flags" showname="Flags: 0x0000" size="2" pos="107" show="0x00000000" value="0000">
+ <field name="smb.transaction.flags.owt" showname=".... .... .... ..0. = One Way Transaction: Two way transaction" size="2" pos="107" show="0" value="0" unmaskedvalue="0000"/>
+ <field name="smb.transaction.flags.dtid" showname=".... .... .... ...0 = Disconnect TID: Do NOT disconnect TID" size="2" pos="107" show="0" value="0" unmaskedvalue="0000"/>
+ </field>
+ <field name="smb.timeout" showname="Timeout: Return immediately (0)" size="4" pos="109" show="0" value="00000000"/>
+ <field name="smb.reserved" showname="Reserved: 0000" size="2" pos="113" show="00:00" value="0000"/>
+ <field name="smb.pc" showname="Parameter Count: 0" size="2" pos="115" show="0" value="0000"/>
+ <field name="smb.po" showname="Parameter Offset: 84" size="2" pos="117" show="84" value="5400"/>
+ <field name="smb.dc" showname="Data Count: 96" size="2" pos="119" show="96" value="6000"/>
+ <field name="smb.data_offset" showname="Data Offset: 84" size="2" pos="121" show="84" value="5400"/>
+ <field name="smb.sc" showname="Setup Count: 2" size="1" pos="123" show="2" value="02"/>
+ <field name="smb.reserved" showname="Reserved: 00" size="1" pos="124" show="00" value="00"/>
+ <field name="smb.bcc" showname="Byte Count (BCC): 113" size="2" pos="129" show="113" value="7100"/>
+ <field name="smb.trans_name" showname="Transaction Name: \PIPE\" size="14" pos="132" show="\PIPE\" value="5c0050004900500045005c000000"/>
+ <field name="smb.padding" showname="Padding: 0000" size="2" pos="146" show="00:00" value="0000"/>
+ </field>
+ </proto>
+ <proto name="smb_pipe" showname="SMB Pipe Protocol" size="21" pos="125">
+ <field name="smb_pipe.function" showname="Function: TransactNmPipe (0x0026)" size="2" pos="125" show="0x00000026" value="2600"/>
+ <field name="smb.fid" showname="FID: 0x2b79 (\srvsvc)" size="2" pos="127" show="0x00002b79" value="792b">
+ <field name="smb.fid.opened_in" showname="Opened in: 462" size="0" pos="252" show="462"/>
+ <field name="smb.file" showname="File Name: \srvsvc" size="0" pos="252" show="\srvsvc"/>
+ <field name="smb.create_flags" showname="Create Flags: 0x00000000" size="4" pos="252" show="0x00000000" value="2600792b">
+ <field name="smb.nt.create.oplock" showname=".... .... .... .... .... .... .... ..0. = Exclusive Oplock: Does NOT request oplock" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.nt.create.batch_oplock" showname=".... .... .... .... .... .... .... .0.. = Batch Oplock: Does NOT request batch oplock" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.nt.create.dir" showname=".... .... .... .... .... .... .... 0... = Create Directory: Target of open can be a file" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.nt.create.ext" showname=".... .... .... .... .... .... ...0 .... = Extended Response: Extended responses NOT required" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ </field>
+ <field name="smb.access_mask" showname="Access Mask: 0x0002019f" size="4" pos="252" show="0x0002019f" value="2600792b">
+ <field name="smb.access.read" showname=".... .... .... .... .... .... .... ...1 = Read: READ access" size="4" pos="125" show="1" value="FFFFFFFF" unmaskedvalue="2600792b"/>
+ <field name="smb.access.write" showname=".... .... .... .... .... .... .... ..1. = Write: WRITE access" size="4" pos="125" show="1" value="FFFFFFFF" unmaskedvalue="2600792b"/>
+ <field name="smb.access.append" showname=".... .... .... .... .... .... .... .1.. = Append: APPEND access" size="4" pos="125" show="1" value="FFFFFFFF" unmaskedvalue="2600792b"/>
+ <field name="smb.access.read_ea" showname=".... .... .... .... .... .... .... 1... = Read EA: READ EXTENDED ATTRIBUTES access" size="4" pos="125" show="1" value="FFFFFFFF" unmaskedvalue="2600792b"/>
+ <field name="smb.access.write_ea" showname=".... .... .... .... .... .... ...1 .... = Write EA: WRITE EXTENDED ATTRIBUTES access" size="4" pos="125" show="1" value="FFFFFFFF" unmaskedvalue="2600792b"/>
+ <field name="smb.access.execute" showname=".... .... .... .... .... .... ..0. .... = Execute: NO execute access" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.access.delete_child" showname=".... .... .... .... .... .... .0.. .... = Delete Child: NO delete child access" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.access.read_attributes" showname=".... .... .... .... .... .... 1... .... = Read Attributes: READ ATTRIBUTES access" size="4" pos="125" show="1" value="FFFFFFFF" unmaskedvalue="2600792b"/>
+ <field name="smb.access.write_attributes" showname=".... .... .... .... .... ...1 .... .... = Write Attributes: WRITE ATTRIBUTES access" size="4" pos="125" show="1" value="FFFFFFFF" unmaskedvalue="2600792b"/>
+ <field name="smb.access.delete" showname=".... .... .... ...0 .... .... .... .... = Delete: NO delete access" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.access.read_control" showname=".... .... .... ..1. .... .... .... .... = Read Control: READ ACCESS to owner, group and ACL of the SID" size="4" pos="125" show="1" value="FFFFFFFF" unmaskedvalue="2600792b"/>
+ <field name="smb.access.write_dac" showname=".... .... .... .0.. .... .... .... .... = Write DAC: Owner may NOT write to the DAC" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.access.write_owner" showname=".... .... .... 0... .... .... .... .... = Write Owner: Can NOT write owner (take ownership)" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.access.synchronize" showname=".... .... ...0 .... .... .... .... .... = Synchronize: Can NOT wait on handle to synchronize on completion of I/O" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.access.system_security" showname=".... ...0 .... .... .... .... .... .... = System Security: System security is NOT set" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.access.maximum_allowed" showname=".... ..0. .... .... .... .... .... .... = Maximum Allowed: Maximum allowed is NOT set" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.access.generic_all" showname="...0 .... .... .... .... .... .... .... = Generic All: Generic all is NOT set" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.access.generic_execute" showname="..0. .... .... .... .... .... .... .... = Generic Execute: Generic execute is NOT set" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.access.generic_write" showname=".0.. .... .... .... .... .... .... .... = Generic Write: Generic write is NOT set" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.access.generic_read" showname="0... .... .... .... .... .... .... .... = Generic Read: Generic read is NOT set" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ </field>
+ <field name="smb.file_attribute" showname="File Attributes: 0x00000000" size="4" pos="252" show="0x00000000" value="2600792b">
+ <field name="smb.file_attribute.read_only" showname=".... .... .... .... .... .... .... ...0 = Read Only: NOT read only" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.file_attribute.hidden" showname=".... .... .... .... .... .... .... ..0. = Hidden: NOT hidden" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.file_attribute.system" showname=".... .... .... .... .... .... .... .0.. = System: NOT a system file/dir" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.file_attribute.volume" showname=".... .... .... .... .... .... .... 0... = Volume ID: NOT a volume ID" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.file_attribute.directory" showname=".... .... .... .... .... .... ...0 .... = Directory: NOT a directory" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.file_attribute.archive" showname=".... .... .... .... .... .... ..0. .... = Archive: Has NOT been modified since last archive" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.file_attribute.device" showname=".... .... .... .... .... .... .0.. .... = Device: NOT a device" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.file_attribute.normal" showname=".... .... .... .... .... .... 0... .... = Normal: Has some attribute set" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.file_attribute.temporary" showname=".... .... .... .... .... ...0 .... .... = Temporary: NOT a temporary file" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.file_attribute.sparse" showname=".... .... .... .... .... ..0. .... .... = Sparse: NOT a sparse file" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.file_attribute.reparse" showname=".... .... .... .... .... .0.. .... .... = Reparse Point: Does NOT have an associated reparse point" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.file_attribute.compressed" showname=".... .... .... .... .... 0... .... .... = Compressed: Uncompressed" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.file_attribute.offline" showname=".... .... .... .... ...0 .... .... .... = Offline: Online" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.file_attribute.not_content_indexed" showname=".... .... .... .... ..0. .... .... .... = Content Indexed: NOT content indexed" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.file_attribute.encrypted" showname=".... .... .... .... .0.. .... .... .... = Encrypted: This is NOT an encrypted file" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ </field>
+ <field name="smb.share_access" showname="Share Access: 0x00000003, Read, Write" size="4" pos="252" show="0x00000003" value="2600792b">
+ <field name="smb.share.access.read" showname=".... .... .... .... .... .... .... ...1 = Read: Object can be shared for READ" size="4" pos="125" show="1" value="FFFFFFFF" unmaskedvalue="2600792b"/>
+ <field name="smb.share.access.write" showname=".... .... .... .... .... .... .... ..1. = Write: Object can be shared for WRITE" size="4" pos="125" show="1" value="FFFFFFFF" unmaskedvalue="2600792b"/>
+ <field name="smb.share.access.delete" showname=".... .... .... .... .... .... .... .0.. = Delete: Object can NOT be shared for delete" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ </field>
+ <field name="smb.create_options" showname="Create Options: 0x00000000" size="4" pos="252" show="0x00000000" value="2600792b">
+ <field name="smb.nt.create_options.directory" showname=".... .... .... .... .... .... .... ...0 = Directory: File being created/opened must not be a directory" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.nt.create_options.write_through" showname=".... .... .... .... .... .... .... ..0. = Write Through: Writes need not flush buffered data before completing" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.nt.create_options.sequential_only" showname=".... .... .... .... .... .... .... .0.. = Sequential Only: The file might not only be accessed sequentially" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.nt.create_options.intermediate_buffering" showname=".... .... .... .... .... .... .... 0... = Intermediate Buffering: Intermediate buffering is allowed" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.nt.create_options.sync_io_alert" showname=".... .... .... .... .... .... ...0 .... = Sync I/O Alert: Operations NOT necessarily synchronous" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.nt.create_options.sync_io_nonalert" showname=".... .... .... .... .... .... ..0. .... = Sync I/O Nonalert: Operations NOT necessarily synchronous" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.nt.create_options.non_directory" showname=".... .... .... .... .... .... .0.. .... = Non-Directory: File being created/opened must be a directory" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.nt.create_options.create_tree_connection" showname=".... .... .... .... .... .... 0... .... = Create Tree Connection: Create Tree Connections is NOT set" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.nt.create_options.complete_if_oplocked" showname=".... .... .... .... .... ...0 .... .... = Complete If Oplocked: Complete if oplocked is NOT set" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.nt.create_options.no_ea_knowledge" showname=".... .... .... .... .... ..0. .... .... = No EA Knowledge: The client understands extended attributes" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.nt.create_options.eight_dot_three_only" showname=".... .... .... .... .... .0.. .... .... = 8.3 Only: The client understands long file names" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.nt.create_options.random_access" showname=".... .... .... .... .... 0... .... .... = Random Access: The file will not be accessed randomly" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.nt.create_options.delete_on_close" showname=".... .... .... .... ...0 .... .... .... = Delete On Close: The file should not be deleted when it is closed" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.nt.create_options.open_by_fileid" showname=".... .... .... .... ..0. .... .... .... = Open By FileID: OpenByFileID is NOT set" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.nt.create_options.backup_intent" showname=".... .... .... .... .0.. .... .... .... = Backup Intent: This is a normal create" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.nt.create_options.no_compression" showname=".... .... .... .... 0... .... .... .... = No Compression: Compression is allowed for Open/Create" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.nt.create_options.reserve_opfilter" showname=".... .... ...0 .... .... .... .... .... = Reserve Opfilter: Reserve Opfilter is NOT set" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.nt.create_options.open_reparse_point" showname=".... .... ..0. .... .... .... .... .... = Open Reparse Point: Normal open" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.nt.create_options.open_no_recall" showname=".... .... .0.. .... .... .... .... .... = Open No Recall: Open no recall is NOT set" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ <field name="smb.nt.create_options.open_for_free_space_query" showname=".... .... 0... .... .... .... .... .... = Open For Free Space query: This is NOT an open for free space query" size="4" pos="125" show="0" value="0" unmaskedvalue="2600792b"/>
+ </field>
+ <field name="smb.create.disposition" showname="Disposition: Open (if file exists open it, else fail) (1)" size="0" pos="252" show="1"/>
+ </field>
+ </proto>
+ <proto name="dcerpc" showname="Distributed Computing Environment / Remote Procedure Call (DCE/RPC) Request, Fragment: Single, FragLen: 96, Call: 2, Ctx: 0" size="96" pos="148">
+ <field name="dcerpc.ver" showname="Version: 5" size="1" pos="148" show="5" value="05"/>
+ <field name="dcerpc.ver_minor" showname="Version (minor): 0" size="1" pos="149" show="0" value="00"/>
+ <field name="dcerpc.pkt_type" showname="Packet type: Request (0)" size="1" pos="150" show="0" value="00"/>
+ <field name="dcerpc.cn_flags" showname="Packet Flags: 0x03" size="1" pos="151" show="0x00000003" value="03">
+ <field name="dcerpc.cn_flags.object" showname="0... .... = Object: Not set" size="1" pos="151" show="0" value="0" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.maybe" showname=".0.. .... = Maybe: Not set" size="1" pos="151" show="0" value="0" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.dne" showname="..0. .... = Did Not Execute: Not set" size="1" pos="151" show="0" value="0" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.mpx" showname="...0 .... = Multiplex: Not set" size="1" pos="151" show="0" value="0" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.reserved" showname=".... 0... = Reserved: Not set" size="1" pos="151" show="0" value="0" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.cancel_pending" showname=".... .0.. = Cancel Pending: Not set" size="1" pos="151" show="0" value="0" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.last_frag" showname=".... ..1. = Last Frag: Set" size="1" pos="151" show="1" value="FFFFFFFF" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.first_frag" showname=".... ...1 = First Frag: Set" size="1" pos="151" show="1" value="FFFFFFFF" unmaskedvalue="03"/>
+ </field>
+ <field name="dcerpc.drep" showname="Data Representation: 10000000" size="4" pos="152" show="10:00:00:00" value="10000000">
+ <field name="dcerpc.drep.byteorder" showname="Byte order: Little-endian (1)" size="1" pos="152" show="1" value="10"/>
+ <field name="dcerpc.drep.character" showname="Character: ASCII (0)" size="1" pos="152" show="0" value="10"/>
+ <field name="dcerpc.drep.fp" showname="Floating-point: IEEE (0)" size="1" pos="153" show="0" value="00"/>
+ </field>
+ <field name="dcerpc.cn_frag_len" showname="Frag Length: 96" size="2" pos="156" show="96" value="6000"/>
+ <field name="dcerpc.cn_auth_len" showname="Auth Length: 0" size="2" pos="158" show="0" value="0000"/>
+ <field name="dcerpc.cn_call_id" showname="Call ID: 2" size="4" pos="160" show="2" value="02000000"/>
+ <field name="dcerpc.cn_alloc_hint" showname="Alloc hint: 72" size="4" pos="164" show="72" value="48000000"/>
+ <field name="dcerpc.cn_ctx_id" showname="Context ID: 0" size="2" pos="168" show="0" value="0000"/>
+ <field name="dcerpc.opnum" showname="Opnum: 15" size="2" pos="170" show="15" value="0f00"/>
+ </proto>
+ <proto name="srvsvc" showname="Server Service, NetShareEnumAll" size="72" pos="172">
+ <field name="srvsvc.opnum" showname="Operation: NetShareEnumAll (15)" size="0" pos="172" show="15"/>
+ <field name="" show="Pointer to Server Unc (uint16)" size="40" pos="172" value="000002000c000000000000000c0000004c004f00430041004c004e00540034004400430032000000">
+ <field name="dcerpc.referent_id" showname="Referent ID: 0x00020000" size="4" pos="172" show="0x00020000" value="00000200"/>
+ <field name="dcerpc.array.max_count" showname="Max Count: 12" size="4" pos="176" show="12" value="0c000000"/>
+ <field name="dcerpc.array.offset" showname="Offset: 0" size="4" pos="180" show="0" value="00000000"/>
+ <field name="dcerpc.array.actual_count" showname="Actual Count: 12" size="4" pos="184" show="12" value="0c000000"/>
+ <field name="srvsvc.srvsvc_NetShareEnumAll.server_unc" showname="Server Unc: LOCALNT4DC2" size="24" pos="188" show="LOCALNT4DC2" value="4c004f00430041004c004e00540034004400430032000000"/>
+ </field>
+ <field name="" show="Pointer to Level (uint32)" size="4" pos="212" value="01000000">
+ <field name="srvsvc.srvsvc_NetShareEnumAll.level" showname="Level: 1" size="4" pos="212" show="1" value="01000000"/>
+ </field>
+ <field name="" show="Pointer to Ctr (srvsvc_NetShareCtr)" size="16" pos="216" value="01000000040002000000000000000000">
+ <field name="" show="srvsvc_NetShareCtr" size="8" pos="216" value="0100000004000200">
+ <field name="srvsvc.srvsvc_NetShareEnumAll.ctr" showname="Ctr" size="4" pos="216" show="" value=""/>
+ <field name="" show="Pointer to Ctr1 (srvsvc_NetShareCtr1)" size="8" pos="220" value="0400020000000000">
+ <field name="dcerpc.referent_id" showname="Referent ID: 0x00020004" size="4" pos="220" show="0x00020004" value="04000200"/>
+ <field name="srvsvc.srvsvc_NetShareCtr.ctr1" showname="Ctr1" size="8" pos="224" show="" value="">
+ <field name="srvsvc.srvsvc_NetShareCtr1.count" showname="Count: 0" size="4" pos="224" show="0" value="00000000"/>
+ <field name="dcerpc.null_pointer" showname="NULL Pointer: Pointer to Array (srvsvc_NetShareInfo1)" size="4" pos="228" show="00:00:00:00" value="00000000"/>
+ </field>
+ </field>
+ </field>
+ </field>
+ <field name="srvsvc.srvsvc_NetShareEnumAll.max_buffer" showname="Max Buffer: 4294967295" size="4" pos="232" show="4294967295" value="ffffffff"/>
+ <field name="" show="Pointer to Resume Handle (uint32)" size="8" pos="236" value="0800020000000000">
+ <field name="dcerpc.referent_id" showname="Referent ID: 0x00020008" size="4" pos="236" show="0x00020008" value="08000200"/>
+ <field name="srvsvc.srvsvc_NetShareEnumAll.resume_handle" showname="Resume Handle: 0" size="4" pos="240" show="0" value="00000000"/>
+ </field>
+ </proto>
+</packet>
+
+<packet>
+ <proto name="geninfo" pos="0" showname="General information" size="222">
+ <field name="num" pos="0" show="523" showname="Number" value="20b" size="222"/>
+ <field name="len" pos="0" show="222" showname="Frame Length" value="de" size="222"/>
+ <field name="caplen" pos="0" show="222" showname="Captured Length" value="de" size="222"/>
+ <field name="timestamp" pos="0" show="Feb 13, 2017 10:17:17.552194000 NZDT" showname="Captured Time" value="1486934237.552194000" size="222"/>
+ </proto>
+ <proto name="frame" showname="Frame 523: 222 bytes on wire (1776 bits), 222 bytes captured (1776 bits)" size="222" pos="0">
+ <field name="frame.encap_type" showname="Encapsulation type: Raw IP (7)" size="0" pos="0" show="7"/>
+ <field name="frame.time" showname="Arrival Time: Feb 13, 2017 10:17:17.552194000 NZDT" size="0" pos="0" show="Feb 13, 2017 10:17:17.552194000 NZDT"/>
+ <field name="frame.offset_shift" showname="Time shift for this packet: 0.000000000 seconds" size="0" pos="0" show="0.000000000"/>
+ <field name="frame.time_epoch" showname="Epoch Time: 1486934237.552194000 seconds" size="0" pos="0" show="1486934237.552194000"/>
+ <field name="frame.time_delta" showname="Time delta from previous captured frame: 0.000068000 seconds" size="0" pos="0" show="0.000068000"/>
+ <field name="frame.time_delta_displayed" showname="Time delta from previous displayed frame: 0.000068000 seconds" size="0" pos="0" show="0.000068000"/>
+ <field name="frame.time_relative" showname="Time since reference or first frame: 466.929753000 seconds" size="0" pos="0" show="466.929753000"/>
+ <field name="frame.number" showname="Frame Number: 523" size="0" pos="0" show="523"/>
+ <field name="frame.len" showname="Frame Length: 222 bytes (1776 bits)" size="0" pos="0" show="222"/>
+ <field name="frame.cap_len" showname="Capture Length: 222 bytes (1776 bits)" size="0" pos="0" show="222"/>
+ <field name="frame.marked" showname="Frame is marked: False" size="0" pos="0" show="0"/>
+ <field name="frame.ignored" showname="Frame is ignored: False" size="0" pos="0" show="0"/>
+ <field name="frame.protocols" showname="Protocols in frame: raw:ip:udp:nbdgm:smb:browser" size="0" pos="0" show="raw:ip:udp:nbdgm:smb:browser"/>
+ </proto>
+ <proto name="raw" showname="Raw packet data" size="222" pos="0"/>
+ <proto name="ip" showname="Internet Protocol Version 4, Src: 127.0.0.3, Dst: 127.0.0.21" size="20" pos="0">
+ <field name="ip.version" showname="0100 .... = Version: 4" size="1" pos="0" show="4" value="4" unmaskedvalue="45"/>
+ <field name="ip.hdr_len" showname=".... 0101 = Header Length: 20 bytes" size="1" pos="0" show="5" value="5" unmaskedvalue="45"/>
+ <field name="ip.dsfield" showname="Differentiated Services Field: 0x00 (DSCP: CS0, ECN: Not-ECT)" size="1" pos="1" show="0x00000000" value="00">
+ <field name="ip.dsfield.dscp" showname="0000 00.. = Differentiated Services Codepoint: Default (0)" size="1" pos="1" show="0" value="0" unmaskedvalue="00"/>
+ <field name="ip.dsfield.ecn" showname=".... ..00 = Explicit Congestion Notification: Not ECN-Capable Transport (0)" size="1" pos="1" show="0" value="0" unmaskedvalue="00"/>
+ </field>
+ <field name="ip.len" showname="Total Length: 222" size="2" pos="2" show="222" value="00de"/>
+ <field name="ip.id" showname="Identification: 0xffff (65535)" size="2" pos="4" show="0x0000ffff" value="ffff"/>
+ <field name="ip.flags" showname="Flags: 0x02 (Don&#x27;t Fragment)" size="1" pos="6" show="0x00000002" value="40">
+ <field name="ip.flags.rb" showname="0... .... = Reserved bit: Not set" size="1" pos="6" show="0" value="40"/>
+ <field name="ip.flags.df" showname=".1.. .... = Don&#x27;t fragment: Set" size="1" pos="6" show="1" value="40"/>
+ <field name="ip.flags.mf" showname="..0. .... = More fragments: Not set" size="1" pos="6" show="0" value="40"/>
+ </field>
+ <field name="ip.frag_offset" showname="Fragment offset: 0" size="2" pos="6" show="0" value="4000"/>
+ <field name="ip.ttl" showname="Time to live: 255" size="1" pos="8" show="255" value="ff"/>
+ <field name="ip.proto" showname="Protocol: UDP (17)" size="1" pos="9" show="17" value="11"/>
+ <field name="ip.checksum" showname="Header checksum: 0x0000 [validation disabled]" size="2" pos="10" show="0x00000000" value="0000">
+ <field name="ip.checksum_good" showname="Good: False" size="2" pos="10" show="0" value="0000"/>
+ <field name="ip.checksum_bad" showname="Bad: False" size="2" pos="10" show="0" value="0000"/>
+ </field>
+ <field name="ip.src" showname="Source: 127.0.0.3" size="4" pos="12" show="127.0.0.3" value="7f000003"/>
+ <field name="ip.addr" showname="Source or Destination Address: 127.0.0.3" hide="yes" size="4" pos="12" show="127.0.0.3" value="7f000003"/>
+ <field name="ip.src_host" showname="Source Host: 127.0.0.3" hide="yes" size="4" pos="12" show="127.0.0.3" value="7f000003"/>
+ <field name="ip.host" showname="Source or Destination Host: 127.0.0.3" hide="yes" size="4" pos="12" show="127.0.0.3" value="7f000003"/>
+ <field name="ip.dst" showname="Destination: 127.0.0.21" size="4" pos="16" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.addr" showname="Source or Destination Address: 127.0.0.21" hide="yes" size="4" pos="16" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.dst_host" showname="Destination Host: 127.0.0.21" hide="yes" size="4" pos="16" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.host" showname="Source or Destination Host: 127.0.0.21" hide="yes" size="4" pos="16" show="127.0.0.21" value="7f000015"/>
+ <field name="" show="Source GeoIP: Unknown" size="4" pos="12" value="7f000003"/>
+ <field name="" show="Destination GeoIP: Unknown" size="4" pos="16" value="7f000015"/>
+ </proto>
+ <proto name="udp" showname="User Datagram Protocol, Src Port: 138 (138), Dst Port: 138 (138)" size="8" pos="20">
+ <field name="udp.srcport" showname="Source Port: 138" size="2" pos="20" show="138" value="008a"/>
+ <field name="udp.dstport" showname="Destination Port: 138" size="2" pos="22" show="138" value="008a"/>
+ <field name="udp.port" showname="Source or Destination Port: 138" hide="yes" size="2" pos="20" show="138" value="008a"/>
+ <field name="udp.port" showname="Source or Destination Port: 138" hide="yes" size="2" pos="22" show="138" value="008a"/>
+ <field name="udp.length" showname="Length: 202" size="2" pos="24" show="202" value="00ca"/>
+ <field name="udp.checksum" showname="Checksum: 0x0000 (none)" size="2" pos="26" show="0x00000000" value="0000">
+ <field name="udp.checksum_good" showname="Good Checksum: False" size="2" pos="26" show="0" value="0000"/>
+ <field name="udp.checksum_bad" showname="Bad Checksum: False" size="2" pos="26" show="0" value="0000"/>
+ </field>
+ <field name="udp.stream" showname="Stream index: 30" size="0" pos="28" show="30"/>
+ </proto>
+ <proto name="nbdgm" showname="NetBIOS Datagram Service" size="82" pos="28">
+ <field name="nbdgm.type" showname="Message Type: Direct_group datagram (17)" size="1" pos="28" show="17" value="11"/>
+ <field name="nbdgm.next" showname="More fragments follow: No" size="1" pos="29" show="0" value="0a"/>
+ <field name="nbdgm.first" showname="This is first fragment: Yes" size="1" pos="29" show="1" value="0a"/>
+ <field name="nbdgm.node_type" showname="Node Type: M node (2)" size="1" pos="29" show="2" value="0a"/>
+ <field name="nbdgm.dgram_id" showname="Datagram ID: 0x023d" size="2" pos="30" show="0x0000023d" value="023d"/>
+ <field name="nbdgm.src.ip" showname="Source IP: 127.0.0.3" size="4" pos="32" show="127.0.0.3" value="7f000003"/>
+ <field name="nbdgm.src.port" showname="Source Port: 138" size="2" pos="36" show="138" value="008a"/>
+ <field name="nbdgm.dgram_len" showname="Datagram length: 180 bytes" size="2" pos="38" show="180" value="00b4"/>
+ <field name="nbdgm.pkt_offset" showname="Packet offset: 0 bytes" size="2" pos="40" show="0" value="0000"/>
+ <field name="nbdgm.source_name" showname="Source name: LOCALNT4DC2&lt;00&gt; (Workstation/Redirector)" size="34" pos="42" show="LOCALNT4DC2&lt;00&gt;" value="20454d455045444542454d454f464544454545454444434341434143414341414100"/>
+ <field name="nbdgm.destination_name" showname="Destination name: SAMBA-TEST&lt;1e&gt; (Browser Election Service)" size="34" pos="76" show="SAMBA-TEST&lt;1e&gt;" value="2046444542454e45434542434e464545464644464543414341434143414341424f00"/>
+ </proto>
+ <proto name="smb" showname="SMB (Server Message Block Protocol)" size="112" pos="110">
+ <field name="" show="SMB Header" size="32" pos="110" value="ff534d4225000000000000000000000000000000000000000000000000000000">
+ <field name="smb.server_component" showname="Server Component: SMB" size="4" pos="110" show="0x424d53ff" value="ff534d42"/>
+ <field name="smb.cmd" showname="SMB Command: Trans (0x25)" size="1" pos="114" show="37" value="25"/>
+ <field name="smb.error_class" showname="Error Class: Success (0x00)" size="1" pos="115" show="0x00000000" value="00"/>
+ <field name="smb.reserved" showname="Reserved: 00" size="1" pos="116" show="00" value="00"/>
+ <field name="smb.error_code" showname="Error Code: No Error" size="2" pos="117" show="0x00000000" value="0000"/>
+ <field name="smb.flags" showname="Flags: 0x00" size="1" pos="119" show="0x00000000" value="00">
+ <field name="smb.flags.response" showname="0... .... = Request/Response: Message is a request to the server" size="1" pos="119" show="0" value="0" unmaskedvalue="00"/>
+ <field name="smb.flags.notify" showname=".0.. .... = Notify: Notify client only on open" size="1" pos="119" show="0" value="0" unmaskedvalue="00"/>
+ <field name="smb.flags.oplock" showname="..0. .... = Oplocks: OpLock not requested/granted" size="1" pos="119" show="0" value="0" unmaskedvalue="00"/>
+ <field name="smb.flags.canon" showname="...0 .... = Canonicalized Pathnames: Pathnames are not canonicalized" size="1" pos="119" show="0" value="0" unmaskedvalue="00"/>
+ <field name="smb.flags.caseless" showname=".... 0... = Case Sensitivity: Path names are case sensitive" size="1" pos="119" show="0" value="0" unmaskedvalue="00"/>
+ <field name="smb.flags.receive_buffer" showname=".... ..0. = Receive Buffer Posted: Receive buffer has not been posted" size="1" pos="119" show="0" value="0" unmaskedvalue="00"/>
+ <field name="smb.flags.lock" showname=".... ...0 = Lock and Read: Lock&amp;Read, Write&amp;Unlock are not supported" size="1" pos="119" show="0" value="0" unmaskedvalue="00"/>
+ </field>
+ <field name="smb.flags2" showname="Flags2: 0x0000" size="2" pos="120" show="0x00000000" value="0000">
+ <field name="smb.flags2.string" showname="0... .... .... .... = Unicode Strings: Strings are ASCII" size="2" pos="120" show="0" value="0" unmaskedvalue="0000"/>
+ <field name="smb.flags2.nt_error" showname=".0.. .... .... .... = Error Code Type: Error codes are DOS error codes" size="2" pos="120" show="0" value="0" unmaskedvalue="0000"/>
+ <field name="smb.flags2.roe" showname="..0. .... .... .... = Execute-only Reads: Don&#x27;t permit reads if execute-only" size="2" pos="120" show="0" value="0" unmaskedvalue="0000"/>
+ <field name="smb.flags2.dfs" showname="...0 .... .... .... = Dfs: Don&#x27;t resolve pathnames with Dfs" size="2" pos="120" show="0" value="0" unmaskedvalue="0000"/>
+ <field name="smb.flags2.esn" showname=".... 0... .... .... = Extended Security Negotiation: Extended security negotiation is not supported" size="2" pos="120" show="0" value="0" unmaskedvalue="0000"/>
+ <field name="smb.flags2.reparse_path" showname=".... .0.. .... .... = Reparse Path: The request does not use a @GMT reparse path" size="2" pos="120" show="0" value="0" unmaskedvalue="0000"/>
+ <field name="smb.flags2.long_names_used" showname=".... .... .0.. .... = Long Names Used: Path names in request are not long file names" size="2" pos="120" show="0" value="0" unmaskedvalue="0000"/>
+ <field name="smb.flags2.sec_sig_required" showname=".... .... ...0 .... = Security Signatures Required: Security signatures are not required" size="2" pos="120" show="0" value="0" unmaskedvalue="0000"/>
+ <field name="smb.flags2.compressed" showname=".... .... .... 0... = Compressed: Compression is not requested" size="2" pos="120" show="0" value="0" unmaskedvalue="0000"/>
+ <field name="smb.flags2.sec_sig" showname=".... .... .... .0.. = Security Signatures: Security signatures are not supported" size="2" pos="120" show="0" value="0" unmaskedvalue="0000"/>
+ <field name="smb.flags2.ea" showname=".... .... .... ..0. = Extended Attributes: Extended attributes are not supported" size="2" pos="120" show="0" value="0" unmaskedvalue="0000"/>
+ <field name="smb.flags2.long_names_allowed" showname=".... .... .... ...0 = Long Names Allowed: Long file names are not allowed in the response" size="2" pos="120" show="0" value="0" unmaskedvalue="0000"/>
+ </field>
+ <field name="smb.pid.high" showname="Process ID High: 0" size="2" pos="122" show="0" value="0000"/>
+ <field name="smb.signature" showname="Signature: 0000000000000000" size="8" pos="124" show="00:00:00:00:00:00:00:00" value="0000000000000000"/>
+ <field name="smb.reserved" showname="Reserved: 0000" size="2" pos="132" show="00:00" value="0000"/>
+ <field name="smb.tid" showname="Tree ID: 0" size="2" pos="134" show="0" value="0000"/>
+ <field name="smb.pid" showname="Process ID: 0" size="2" pos="136" show="0" value="0000"/>
+ <field name="smb.uid" showname="User ID: 0" size="2" pos="138" show="0" value="0000"/>
+ <field name="smb.mid" showname="Multiplex ID: 0" size="2" pos="140" show="0" value="0000"/>
+ </field>
+ <field name="" show="Trans Request (0x25)" size="80" pos="142" value="1100001a000000000000000000000000000000000000001a00560003000100010002002b005c4d41494c534c4f545c42524f5753450008018a0f011470170000000000004c4f43414c4e543444433200">
+ <field name="smb.wct" showname="Word Count (WCT): 17" size="1" pos="142" show="17" value="11"/>
+ <field name="smb.tpc" showname="Total Parameter Count: 0" size="2" pos="143" show="0" value="0000"/>
+ <field name="smb.tdc" showname="Total Data Count: 26" size="2" pos="145" show="26" value="1a00"/>
+ <field name="smb.mpc" showname="Max Parameter Count: 0" size="2" pos="147" show="0" value="0000"/>
+ <field name="smb.mdc" showname="Max Data Count: 0" size="2" pos="149" show="0" value="0000"/>
+ <field name="smb.msc" showname="Max Setup Count: 0" size="1" pos="151" show="0" value="00"/>
+ <field name="smb.reserved" showname="Reserved: 00" size="1" pos="152" show="00" value="00"/>
+ <field name="smb.transaction.flags" showname="Flags: 0x0000" size="2" pos="153" show="0x00000000" value="0000">
+ <field name="smb.transaction.flags.owt" showname=".... .... .... ..0. = One Way Transaction: Two way transaction" size="2" pos="153" show="0" value="0" unmaskedvalue="0000"/>
+ <field name="smb.transaction.flags.dtid" showname=".... .... .... ...0 = Disconnect TID: Do NOT disconnect TID" size="2" pos="153" show="0" value="0" unmaskedvalue="0000"/>
+ </field>
+ <field name="smb.timeout" showname="Timeout: Return immediately (0)" size="4" pos="155" show="0" value="00000000"/>
+ <field name="smb.reserved" showname="Reserved: 0000" size="2" pos="159" show="00:00" value="0000"/>
+ <field name="smb.pc" showname="Parameter Count: 0" size="2" pos="161" show="0" value="0000"/>
+ <field name="smb.po" showname="Parameter Offset: 0" size="2" pos="163" show="0" value="0000"/>
+ <field name="smb.dc" showname="Data Count: 26" size="2" pos="165" show="26" value="1a00"/>
+ <field name="smb.data_offset" showname="Data Offset: 86" size="2" pos="167" show="86" value="5600"/>
+ <field name="smb.sc" showname="Setup Count: 3" size="1" pos="169" show="3" value="03"/>
+ <field name="smb.reserved" showname="Reserved: 00" size="1" pos="170" show="00" value="00"/>
+ <field name="smb.bcc" showname="Byte Count (BCC): 43" size="2" pos="177" show="43" value="2b00"/>
+ <field name="smb.trans_name" showname="Transaction Name: \MAILSLOT\BROWSE" size="17" pos="179" show="\MAILSLOT\BROWSE" value="5c4d41494c534c4f545c42524f57534500"/>
+ </field>
+ </proto>
+ <proto name="mailslot" showname="SMB MailSlot Protocol" size="25" pos="171">
+ <field name="mailslot.opcode" showname="Opcode: Write Mail Slot (1)" size="2" pos="171" show="1" value="0100"/>
+ <field name="mailslot.priority" showname="Priority: 1" size="2" pos="173" show="1" value="0100"/>
+ <field name="mailslot.class" showname="Class: Unreliable &amp; Broadcast (2)" size="2" pos="175" show="2" value="0200"/>
+ <field name="mailslot.size" showname="Size: 43" size="2" pos="177" show="43" value="2b00"/>
+ <field name="mailslot.name" showname="Mailslot Name: \MAILSLOT\BROWSE" size="17" pos="179" show="\MAILSLOT\BROWSE" value="5c4d41494c534c4f545c42524f57534500"/>
+ </proto>
+ <proto name="browser" showname="Microsoft Windows Browser Protocol" size="26" pos="196">
+ <field name="browser.command" showname="Command: Browser Election Request (0x08)" size="1" pos="196" show="0x00000008" value="08"/>
+ <field name="browser.election.version" showname="Election Version: 1" size="1" pos="197" show="1" value="01"/>
+ <field name="browser.election.criteria" showname="Election Criteria: 0x14010f8a" size="4" pos="198" show="0x14010f8a" value="8a0f0114">
+ <field name="browser.election.desire" showname="Election Desire: 0x8a, Standby, Domain Master, NT" size="1" pos="198" show="0x0000008a" value="8a">
+ <field name="browser.election.desire.backup" showname=".... ...0 = Backup: NOT Backup Browse Server" size="1" pos="198" show="0" value="0" unmaskedvalue="8a"/>
+ <field name="browser.election.desire.standby" showname=".... ..1. = Standby: Standby Browse Server" size="1" pos="198" show="1" value="FFFFFFFF" unmaskedvalue="8a"/>
+ <field name="browser.election.desire.master" showname=".... .0.. = Master: NOT Master Browser" size="1" pos="198" show="0" value="0" unmaskedvalue="8a"/>
+ <field name="browser.election.desire.domain_master" showname=".... 1... = Domain Master: Domain Master Browse Server" size="1" pos="198" show="1" value="FFFFFFFF" unmaskedvalue="8a"/>
+ <field name="browser.election.desire.wins" showname="..0. .... = WINS: NOT WINS Client" size="1" pos="198" show="0" value="0" unmaskedvalue="8a"/>
+ <field name="browser.election.desire.nt" showname="1... .... = NT: Windows NT Advanced Server" size="1" pos="198" show="1" value="FFFFFFFF" unmaskedvalue="8a"/>
+ </field>
+ <field name="browser.proto_major" showname="Browser Protocol Major Version: 15" size="1" pos="199" show="15" value="0f"/>
+ <field name="browser.proto_minor" showname="Browser Protocol Minor Version: 1" size="1" pos="200" show="1" value="01"/>
+ <field name="browser.election.os" showname="Election OS: 0x14, NT Workstation" size="1" pos="201" show="0x00000014" value="14">
+ <field name="browser.election.os.wfw" showname=".... ...0 = WfW: Not Windows for Workgroups" size="1" pos="201" show="0" value="0" unmaskedvalue="14"/>
+ <field name="browser.election.os.ntw" showname="...1 .... = NT Workstation: Windows NT Workstation" size="1" pos="201" show="1" value="FFFFFFFF" unmaskedvalue="14"/>
+ <field name="browser.election.os.nts" showname="..0. .... = NT Server: Not Windows NT Server" size="1" pos="201" show="0" value="0" unmaskedvalue="14"/>
+ </field>
+ </field>
+ <field name="browser.uptime" showname="Uptime: 6 seconds" size="4" pos="202" show="6000" value="70170000"/>
+ <field name="browser.server" showname="Server Name: LOCALNT4DC2" size="12" pos="210" show="LOCALNT4DC2" value="4c4f43414c4e543444433200"/>
+ </proto>
+</packet>
+
+
+<packet>
+ <proto name="geninfo" pos="0" showname="General information" size="128">
+ <field name="num" pos="0" show="50351" showname="Number" value="c4af" size="128"/>
+ <field name="len" pos="0" show="128" showname="Frame Length" value="80" size="128"/>
+ <field name="caplen" pos="0" show="128" showname="Captured Length" value="80" size="128"/>
+ <field name="timestamp" pos="0" show="Feb 10, 2017 14:37:58.178692000 NZDT" showname="Captured Time" value="1486690678.178692000" size="128"/>
+ </proto>
+ <proto name="frame" showname="Frame 50351: 128 bytes on wire (1024 bits), 128 bytes captured (1024 bits)" size="128" pos="0">
+ <field name="frame.encap_type" showname="Encapsulation type: Raw IP (7)" size="0" pos="0" show="7"/>
+ <field name="frame.time" showname="Arrival Time: Feb 10, 2017 14:37:58.178692000 NZDT" size="0" pos="0" show="Feb 10, 2017 14:37:58.178692000 NZDT"/>
+ <field name="frame.offset_shift" showname="Time shift for this packet: 0.000000000 seconds" size="0" pos="0" show="0.000000000"/>
+ <field name="frame.time_epoch" showname="Epoch Time: 1486690678.178692000 seconds" size="0" pos="0" show="1486690678.178692000"/>
+ <field name="frame.time_delta" showname="Time delta from previous captured frame: 0.043192000 seconds" size="0" pos="0" show="0.043192000"/>
+ <field name="frame.time_delta_displayed" showname="Time delta from previous displayed frame: 0.043192000 seconds" size="0" pos="0" show="0.043192000"/>
+ <field name="frame.time_relative" showname="Time since reference or first frame: 101.648241000 seconds" size="0" pos="0" show="101.648241000"/>
+ <field name="frame.number" showname="Frame Number: 50351" size="0" pos="0" show="50351"/>
+ <field name="frame.len" showname="Frame Length: 128 bytes (1024 bits)" size="0" pos="0" show="128"/>
+ <field name="frame.cap_len" showname="Capture Length: 128 bytes (1024 bits)" size="0" pos="0" show="128"/>
+ <field name="frame.marked" showname="Frame is marked: False" size="0" pos="0" show="0"/>
+ <field name="frame.ignored" showname="Frame is ignored: False" size="0" pos="0" show="0"/>
+ <field name="frame.protocols" showname="Protocols in frame: raw:ip:tcp:nbss:smb:dcerpc" size="0" pos="0" show="raw:ip:tcp:nbss:smb:dcerpc"/>
+ </proto>
+ <proto name="raw" showname="Raw packet data" size="128" pos="0"/>
+ <proto name="ip" showname="Internet Protocol Version 4, Src: 127.0.0.30, Dst: 127.0.0.27" size="20" pos="0">
+ <field name="ip.version" showname="0100 .... = Version: 4" size="1" pos="0" show="4" value="4" unmaskedvalue="45"/>
+ <field name="ip.hdr_len" showname=".... 0101 = Header Length: 20 bytes" size="1" pos="0" show="5" value="5" unmaskedvalue="45"/>
+ <field name="ip.dsfield" showname="Differentiated Services Field: 0x00 (DSCP: CS0, ECN: Not-ECT)" size="1" pos="1" show="0x00000000" value="00">
+ <field name="ip.dsfield.dscp" showname="0000 00.. = Differentiated Services Codepoint: Default (0)" size="1" pos="1" show="0" value="0" unmaskedvalue="00"/>
+ <field name="ip.dsfield.ecn" showname=".... ..00 = Explicit Congestion Notification: Not ECN-Capable Transport (0)" size="1" pos="1" show="0" value="0" unmaskedvalue="00"/>
+ </field>
+ <field name="ip.len" showname="Total Length: 128" size="2" pos="2" show="128" value="0080"/>
+ <field name="ip.id" showname="Identification: 0xffff (65535)" size="2" pos="4" show="0x0000ffff" value="ffff"/>
+ <field name="ip.flags" showname="Flags: 0x02 (Don&#x27;t Fragment)" size="1" pos="6" show="0x00000002" value="40">
+ <field name="ip.flags.rb" showname="0... .... = Reserved bit: Not set" size="1" pos="6" show="0" value="40"/>
+ <field name="ip.flags.df" showname=".1.. .... = Don&#x27;t fragment: Set" size="1" pos="6" show="1" value="40"/>
+ <field name="ip.flags.mf" showname="..0. .... = More fragments: Not set" size="1" pos="6" show="0" value="40"/>
+ </field>
+ <field name="ip.frag_offset" showname="Fragment offset: 0" size="2" pos="6" show="0" value="4000"/>
+ <field name="ip.ttl" showname="Time to live: 255" size="1" pos="8" show="255" value="ff"/>
+ <field name="ip.proto" showname="Protocol: TCP (6)" size="1" pos="9" show="6" value="06"/>
+ <field name="ip.checksum" showname="Header checksum: 0x0000 [validation disabled]" size="2" pos="10" show="0x00000000" value="0000">
+ <field name="ip.checksum_good" showname="Good: False" size="2" pos="10" show="0" value="0000"/>
+ <field name="ip.checksum_bad" showname="Bad: False" size="2" pos="10" show="0" value="0000"/>
+ </field>
+ <field name="ip.src" showname="Source: 127.0.0.30" size="4" pos="12" show="127.0.0.30" value="7f00001e"/>
+ <field name="ip.addr" showname="Source or Destination Address: 127.0.0.30" hide="yes" size="4" pos="12" show="127.0.0.30" value="7f00001e"/>
+ <field name="ip.src_host" showname="Source Host: 127.0.0.30" hide="yes" size="4" pos="12" show="127.0.0.30" value="7f00001e"/>
+ <field name="ip.host" showname="Source or Destination Host: 127.0.0.30" hide="yes" size="4" pos="12" show="127.0.0.30" value="7f00001e"/>
+ <field name="ip.dst" showname="Destination: 127.0.0.27" size="4" pos="16" show="127.0.0.27" value="7f00001b"/>
+ <field name="ip.addr" showname="Source or Destination Address: 127.0.0.27" hide="yes" size="4" pos="16" show="127.0.0.27" value="7f00001b"/>
+ <field name="ip.dst_host" showname="Destination Host: 127.0.0.27" hide="yes" size="4" pos="16" show="127.0.0.27" value="7f00001b"/>
+ <field name="ip.host" showname="Source or Destination Host: 127.0.0.27" hide="yes" size="4" pos="16" show="127.0.0.27" value="7f00001b"/>
+ <field name="" show="Source GeoIP: Unknown" size="4" pos="12" value="7f00001e"/>
+ <field name="" show="Destination GeoIP: Unknown" size="4" pos="16" value="7f00001b"/>
+ </proto>
+ <proto name="tcp" showname="Transmission Control Protocol, Src Port: 445 (445), Dst Port: 17919 (17919), Seq: 1815, Ack: 3639, Len: 88" size="20" pos="20">
+ <field name="tcp.srcport" showname="Source Port: 445" size="2" pos="20" show="445" value="01bd"/>
+ <field name="tcp.dstport" showname="Destination Port: 17919" size="2" pos="22" show="17919" value="45ff"/>
+ <field name="tcp.port" showname="Source or Destination Port: 445" hide="yes" size="2" pos="20" show="445" value="01bd"/>
+ <field name="tcp.port" showname="Source or Destination Port: 17919" hide="yes" size="2" pos="22" show="17919" value="45ff"/>
+ <field name="tcp.stream" showname="Stream index: 1177" size="0" pos="20" show="1177"/>
+ <field name="tcp.len" showname="TCP Segment Len: 88" size="1" pos="32" show="88" value="50"/>
+ <field name="tcp.seq" showname="Sequence number: 1815 (relative sequence number)" size="4" pos="24" show="1815" value="00000717"/>
+ <field name="tcp.nxtseq" showname="Next sequence number: 1903 (relative sequence number)" size="0" pos="20" show="1903"/>
+ <field name="tcp.ack" showname="Acknowledgment number: 3639 (relative ack number)" size="4" pos="28" show="3639" value="00000e37"/>
+ <field name="tcp.hdr_len" showname="Header Length: 20 bytes" size="1" pos="32" show="20" value="50"/>
+ <field name="tcp.flags" showname="Flags: 0x018 (PSH, ACK)" size="2" pos="32" show="0x00000018" value="18" unmaskedvalue="5018">
+ <field name="tcp.flags.res" showname="000. .... .... = Reserved: Not set" size="1" pos="32" show="0" value="0" unmaskedvalue="50"/>
+ <field name="tcp.flags.ns" showname="...0 .... .... = Nonce: Not set" size="1" pos="32" show="0" value="0" unmaskedvalue="50"/>
+ <field name="tcp.flags.cwr" showname=".... 0... .... = Congestion Window Reduced (CWR): Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.ecn" showname=".... .0.. .... = ECN-Echo: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.urg" showname=".... ..0. .... = Urgent: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.ack" showname=".... ...1 .... = Acknowledgment: Set" size="1" pos="33" show="1" value="FFFFFFFF" unmaskedvalue="18"/>
+ <field name="tcp.flags.push" showname=".... .... 1... = Push: Set" size="1" pos="33" show="1" value="FFFFFFFF" unmaskedvalue="18"/>
+ <field name="tcp.flags.reset" showname=".... .... .0.. = Reset: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.syn" showname=".... .... ..0. = Syn: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.fin" showname=".... .... ...0 = Fin: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.str" showname="TCP Flags: *******AP***" size="2" pos="32" show="*******AP***" value="5018"/>
+ </field>
+ <field name="tcp.window_size_value" showname="Window size value: 32767" size="2" pos="34" show="32767" value="7fff"/>
+ <field name="tcp.window_size" showname="Calculated window size: 32767" size="2" pos="34" show="32767" value="7fff"/>
+ <field name="tcp.window_size_scalefactor" showname="Window size scaling factor: -2 (no window scaling used)" size="2" pos="34" show="-2" value="7fff"/>
+ <field name="tcp.checksum" showname="Checksum: 0x0000 [validation disabled]" size="2" pos="36" show="0x00000000" value="0000">
+ <field name="tcp.checksum_good" showname="Good Checksum: False" size="2" pos="36" show="0" value="0000"/>
+ <field name="tcp.checksum_bad" showname="Bad Checksum: False" size="2" pos="36" show="0" value="0000"/>
+ </field>
+ <field name="tcp.urgent_pointer" showname="Urgent pointer: 0" size="2" pos="38" show="0" value="0000"/>
+ <field name="tcp.analysis" showname="SEQ/ACK analysis" size="0" pos="20" show="" value="">
+ <field name="tcp.analysis.acks_frame" showname="This is an ACK to the segment in frame: 50348" size="0" pos="20" show="50348"/>
+ <field name="tcp.analysis.ack_rtt" showname="The RTT to ACK the segment was: 0.043267000 seconds" size="0" pos="20" show="0.043267000"/>
+ <field name="tcp.analysis.initial_rtt" showname="iRTT: 0.000015000 seconds" size="0" pos="20" show="0.000015000"/>
+ <field name="tcp.analysis.bytes_in_flight" showname="Bytes in flight: 88" size="0" pos="20" show="88"/>
+ </field>
+ </proto>
+ <proto name="nbss" showname="NetBIOS Session Service" size="88" pos="40">
+ <field name="nbss.type" showname="Message Type: Session message (0x00)" size="1" pos="40" show="0x00000000" value="00"/>
+ <field name="nbss.length" showname="Length: 84" size="3" pos="41" show="84" value="000054"/>
+ </proto>
+ <proto name="smb" showname="SMB (Server Message Block Protocol)" size="84" pos="44">
+ <field name="" show="SMB Header" size="32" pos="44" value="ff534d4225000000008817c8000045cbaf6829ae71c20000d95d0000c67c0b00">
+ <field name="smb.server_component" showname="Server Component: SMB" size="4" pos="44" show="0x424d53ff" value="ff534d42"/>
+ <field name="smb.response_to" showname="Response to: 50348" size="0" pos="44" show="50348"/>
+ <field name="smb.time" showname="Time from request: 0.043267000 seconds" size="0" pos="44" show="0.043267000"/>
+ <field name="smb.cmd" showname="SMB Command: Trans (0x25)" size="1" pos="48" show="37" value="25"/>
+ <field name="smb.nt_status" showname="NT Status: STATUS_SUCCESS (0x00000000)" size="4" pos="49" show="0" value="00000000"/>
+ <field name="smb.flags" showname="Flags: 0x88, Request/Response, Case Sensitivity" size="1" pos="53" show="0x00000088" value="88">
+ <field name="smb.flags.response" showname="1... .... = Request/Response: Message is a response to the client/redirector" size="1" pos="53" show="1" value="FFFFFFFF" unmaskedvalue="88"/>
+ <field name="smb.flags.notify" showname=".0.. .... = Notify: Notify client only on open" size="1" pos="53" show="0" value="0" unmaskedvalue="88"/>
+ <field name="smb.flags.oplock" showname="..0. .... = Oplocks: OpLock not requested/granted" size="1" pos="53" show="0" value="0" unmaskedvalue="88"/>
+ <field name="smb.flags.canon" showname="...0 .... = Canonicalized Pathnames: Pathnames are not canonicalized" size="1" pos="53" show="0" value="0" unmaskedvalue="88"/>
+ <field name="smb.flags.caseless" showname=".... 1... = Case Sensitivity: Path names are caseless" size="1" pos="53" show="1" value="FFFFFFFF" unmaskedvalue="88"/>
+ <field name="smb.flags.receive_buffer" showname=".... ..0. = Receive Buffer Posted: Receive buffer has not been posted" size="1" pos="53" show="0" value="0" unmaskedvalue="88"/>
+ <field name="smb.flags.lock" showname=".... ...0 = Lock and Read: Lock&amp;Read, Write&amp;Unlock are not supported" size="1" pos="53" show="0" value="0" unmaskedvalue="88"/>
+ </field>
+ <field name="smb.flags2" showname="Flags2: 0xc817, Unicode Strings, Error Code Type, Extended Security Negotiation, Security Signatures Required, Security Signatures, Extended Attributes, Long Names Allowed" size="2" pos="54" show="0x0000c817" value="17c8">
+ <field name="smb.flags2.string" showname="1... .... .... .... = Unicode Strings: Strings are Unicode" size="2" pos="54" show="1" value="FFFFFFFF" unmaskedvalue="17c8"/>
+ <field name="smb.flags2.nt_error" showname=".1.. .... .... .... = Error Code Type: Error codes are NT error codes" size="2" pos="54" show="1" value="FFFFFFFF" unmaskedvalue="17c8"/>
+ <field name="smb.flags2.roe" showname="..0. .... .... .... = Execute-only Reads: Don&#x27;t permit reads if execute-only" size="2" pos="54" show="0" value="0" unmaskedvalue="17c8"/>
+ <field name="smb.flags2.dfs" showname="...0 .... .... .... = Dfs: Don&#x27;t resolve pathnames with Dfs" size="2" pos="54" show="0" value="0" unmaskedvalue="17c8"/>
+ <field name="smb.flags2.esn" showname=".... 1... .... .... = Extended Security Negotiation: Extended security negotiation is supported" size="2" pos="54" show="1" value="FFFFFFFF" unmaskedvalue="17c8"/>
+ <field name="smb.flags2.reparse_path" showname=".... .0.. .... .... = Reparse Path: The request does not use a @GMT reparse path" size="2" pos="54" show="0" value="0" unmaskedvalue="17c8"/>
+ <field name="smb.flags2.long_names_used" showname=".... .... .0.. .... = Long Names Used: Path names in request are not long file names" size="2" pos="54" show="0" value="0" unmaskedvalue="17c8"/>
+ <field name="smb.flags2.sec_sig_required" showname=".... .... ...1 .... = Security Signatures Required: Security signatures are required" size="2" pos="54" show="1" value="FFFFFFFF" unmaskedvalue="17c8"/>
+ <field name="smb.flags2.compressed" showname=".... .... .... 0... = Compressed: Compression is not requested" size="2" pos="54" show="0" value="0" unmaskedvalue="17c8"/>
+ <field name="smb.flags2.sec_sig" showname=".... .... .... .1.. = Security Signatures: Security signatures are supported" size="2" pos="54" show="1" value="FFFFFFFF" unmaskedvalue="17c8"/>
+ <field name="smb.flags2.ea" showname=".... .... .... ..1. = Extended Attributes: Extended attributes are supported" size="2" pos="54" show="1" value="FFFFFFFF" unmaskedvalue="17c8"/>
+ <field name="smb.flags2.long_names_allowed" showname=".... .... .... ...1 = Long Names Allowed: Long file names are allowed in the response" size="2" pos="54" show="1" value="FFFFFFFF" unmaskedvalue="17c8"/>
+ </field>
+ <field name="smb.pid.high" showname="Process ID High: 0" size="2" pos="56" show="0" value="0000"/>
+ <field name="smb.signature" showname="Signature: 45cbaf6829ae71c2" size="8" pos="58" show="45:cb:af:68:29:ae:71:c2" value="45cbaf6829ae71c2"/>
+ <field name="smb.reserved" showname="Reserved: 0000" size="2" pos="66" show="00:00" value="0000"/>
+ <field name="smb.tid" showname="Tree ID: 24025 (\\ADDC.ADDOM.SAMBA.EXAMPLE.COM\IPC$)" size="2" pos="68" show="24025" value="d95d">
+ <field name="smb.path" showname="Path: \\ADDC.ADDOM.SAMBA.EXAMPLE.COM\IPC$" size="0" pos="112" show="\\ADDC.ADDOM.SAMBA.EXAMPLE.COM\IPC$"/>
+ <field name="smb.fid.mapped_in" showname="Mapped in: 50252" size="0" pos="112" show="50252"/>
+ </field>
+ <field name="smb.pid" showname="Process ID: 0" size="2" pos="70" show="0" value="0000"/>
+ <field name="smb.uid" showname="User ID: 31942" size="2" pos="72" show="31942" value="c67c"/>
+ <field name="smb.mid" showname="Multiplex ID: 11" size="2" pos="74" show="11" value="0b00"/>
+ </field>
+ <field name="" show="Trans Response (0x25)" size="52" pos="76" value="0a00001c0000000000380000001c003800000000001d000005000203100000001c00000007000000040000000000000000000000">
+ <field name="smb.fid" showname="FID: 0x2ea1 (\lsarpc)" size="0" pos="76" show="0x00002ea1">
+ <field name="smb.fid.opened_in" showname="Opened in: 50258" size="0" pos="120" show="50258"/>
+ <field name="smb.file" showname="File Name: \lsarpc" size="0" pos="120" show="\lsarpc"/>
+ <field name="smb.create_flags" showname="Create Flags: 0x00000000" size="4" pos="120" show="0x00000000" value="ff534d42">
+ <field name="smb.nt.create.oplock" showname=".... .... .... .... .... .... .... ..0. = Exclusive Oplock: Does NOT request oplock" size="4" pos="44" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.nt.create.batch_oplock" showname=".... .... .... .... .... .... .... .0.. = Batch Oplock: Does NOT request batch oplock" size="4" pos="44" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.nt.create.dir" showname=".... .... .... .... .... .... .... 0... = Create Directory: Target of open can be a file" size="4" pos="44" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.nt.create.ext" showname=".... .... .... .... .... .... ...0 .... = Extended Response: Extended responses NOT required" size="4" pos="44" show="0" value="0" unmaskedvalue="ff534d42"/>
+ </field>
+ <field name="smb.access_mask" showname="Access Mask: 0x0002019f" size="4" pos="120" show="0x0002019f" value="ff534d42">
+ <field name="smb.access.read" showname=".... .... .... .... .... .... .... ...1 = Read: READ access" size="4" pos="44" show="1" value="FFFFFFFF" unmaskedvalue="ff534d42"/>
+ <field name="smb.access.write" showname=".... .... .... .... .... .... .... ..1. = Write: WRITE access" size="4" pos="44" show="1" value="FFFFFFFF" unmaskedvalue="ff534d42"/>
+ <field name="smb.access.append" showname=".... .... .... .... .... .... .... .1.. = Append: APPEND access" size="4" pos="44" show="1" value="FFFFFFFF" unmaskedvalue="ff534d42"/>
+ <field name="smb.access.read_ea" showname=".... .... .... .... .... .... .... 1... = Read EA: READ EXTENDED ATTRIBUTES access" size="4" pos="44" show="1" value="FFFFFFFF" unmaskedvalue="ff534d42"/>
+ <field name="smb.access.write_ea" showname=".... .... .... .... .... .... ...1 .... = Write EA: WRITE EXTENDED ATTRIBUTES access" size="4" pos="44" show="1" value="FFFFFFFF" unmaskedvalue="ff534d42"/>
+ <field name="smb.access.execute" showname=".... .... .... .... .... .... ..0. .... = Execute: NO execute access" size="4" pos="44" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.access.delete_child" showname=".... .... .... .... .... .... .0.. .... = Delete Child: NO delete child access" size="4" pos="44" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.access.read_attributes" showname=".... .... .... .... .... .... 1... .... = Read Attributes: READ ATTRIBUTES access" size="4" pos="44" show="1" value="FFFFFFFF" unmaskedvalue="ff534d42"/>
+ <field name="smb.access.write_attributes" showname=".... .... .... .... .... ...1 .... .... = Write Attributes: WRITE ATTRIBUTES access" size="4" pos="44" show="1" value="FFFFFFFF" unmaskedvalue="ff534d42"/>
+ <field name="smb.access.delete" showname=".... .... .... ...0 .... .... .... .... = Delete: NO delete access" size="4" pos="44" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.access.read_control" showname=".... .... .... ..1. .... .... .... .... = Read Control: READ ACCESS to owner, group and ACL of the SID" size="4" pos="44" show="1" value="FFFFFFFF" unmaskedvalue="ff534d42"/>
+ <field name="smb.access.write_dac" showname=".... .... .... .0.. .... .... .... .... = Write DAC: Owner may NOT write to the DAC" size="4" pos="44" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.access.write_owner" showname=".... .... .... 0... .... .... .... .... = Write Owner: Can NOT write owner (take ownership)" size="4" pos="44" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.access.synchronize" showname=".... .... ...0 .... .... .... .... .... = Synchronize: Can NOT wait on handle to synchronize on completion of I/O" size="4" pos="44" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.access.system_security" showname=".... ...0 .... .... .... .... .... .... = System Security: System security is NOT set" size="4" pos="44" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.access.maximum_allowed" showname=".... ..0. .... .... .... .... .... .... = Maximum Allowed: Maximum allowed is NOT set" size="4" pos="44" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.access.generic_all" showname="...0 .... .... .... .... .... .... .... = Generic All: Generic all is NOT set" size="4" pos="44" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.access.generic_execute" showname="..0. .... .... .... .... .... .... .... = Generic Execute: Generic execute is NOT set" size="4" pos="44" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.access.generic_write" showname=".0.. .... .... .... .... .... .... .... = Generic Write: Generic write is NOT set" size="4" pos="44" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.access.generic_read" showname="0... .... .... .... .... .... .... .... = Generic Read: Generic read is NOT set" size="4" pos="44" show="0" value="0" unmaskedvalue="ff534d42"/>
+ </field>
+ <field name="smb.file_attribute" showname="File Attributes: 0x00000000" size="4" pos="120" show="0x00000000" value="ff534d42">
+ <field name="smb.file_attribute.read_only" showname=".... .... .... .... .... .... .... ...0 = Read Only: NOT read only" size="4" pos="44" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.file_attribute.hidden" showname=".... .... .... .... .... .... .... ..0. = Hidden: NOT hidden" size="4" pos="44" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.file_attribute.system" showname=".... .... .... .... .... .... .... .0.. = System: NOT a system file/dir" size="4" pos="44" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.file_attribute.volume" showname=".... .... .... .... .... .... .... 0... = Volume ID: NOT a volume ID" size="4" pos="44" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.file_attribute.directory" showname=".... .... .... .... .... .... ...0 .... = Directory: NOT a directory" size="4" pos="44" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.file_attribute.archive" showname=".... .... .... .... .... .... ..0. .... = Archive: Has NOT been modified since last archive" size="4" pos="44" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.file_attribute.device" showname=".... .... .... .... .... .... .0.. .... = Device: NOT a device" size="4" pos="44" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.file_attribute.normal" showname=".... .... .... .... .... .... 0... .... = Normal: Has some attribute set" size="4" pos="44" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.file_attribute.temporary" showname=".... .... .... .... .... ...0 .... .... = Temporary: NOT a temporary file" size="4" pos="44" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.file_attribute.sparse" showname=".... .... .... .... .... ..0. .... .... = Sparse: NOT a sparse file" size="4" pos="44" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.file_attribute.reparse" showname=".... .... .... .... .... .0.. .... .... = Reparse Point: Does NOT have an associated reparse point" size="4" pos="44" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.file_attribute.compressed" showname=".... .... .... .... .... 0... .... .... = Compressed: Uncompressed" size="4" pos="44" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.file_attribute.offline" showname=".... .... .... .... ...0 .... .... .... = Offline: Online" size="4" pos="44" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.file_attribute.not_content_indexed" showname=".... .... .... .... ..0. .... .... .... = Content Indexed: NOT content indexed" size="4" pos="44" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.file_attribute.encrypted" showname=".... .... .... .... .0.. .... .... .... = Encrypted: This is NOT an encrypted file" size="4" pos="44" show="0" value="0" unmaskedvalue="ff534d42"/>
+ </field>
+ <field name="smb.share_access" showname="Share Access: 0x00000003, Read, Write" size="4" pos="120" show="0x00000003" value="ff534d42">
+ <field name="smb.share.access.read" showname=".... .... .... .... .... .... .... ...1 = Read: Object can be shared for READ" size="4" pos="44" show="1" value="FFFFFFFF" unmaskedvalue="ff534d42"/>
+ <field name="smb.share.access.write" showname=".... .... .... .... .... .... .... ..1. = Write: Object can be shared for WRITE" size="4" pos="44" show="1" value="FFFFFFFF" unmaskedvalue="ff534d42"/>
+ <field name="smb.share.access.delete" showname=".... .... .... .... .... .... .... .0.. = Delete: Object can NOT be shared for delete" size="4" pos="44" show="0" value="0" unmaskedvalue="ff534d42"/>
+ </field>
+ <field name="smb.create_options" showname="Create Options: 0x00000000" size="4" pos="120" show="0x00000000" value="ff534d42">
+ <field name="smb.nt.create_options.directory" showname=".... .... .... .... .... .... .... ...0 = Directory: File being created/opened must not be a directory" size="4" pos="44" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.nt.create_options.write_through" showname=".... .... .... .... .... .... .... ..0. = Write Through: Writes need not flush buffered data before completing" size="4" pos="44" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.nt.create_options.sequential_only" showname=".... .... .... .... .... .... .... .0.. = Sequential Only: The file might not only be accessed sequentially" size="4" pos="44" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.nt.create_options.intermediate_buffering" showname=".... .... .... .... .... .... .... 0... = Intermediate Buffering: Intermediate buffering is allowed" size="4" pos="44" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.nt.create_options.sync_io_alert" showname=".... .... .... .... .... .... ...0 .... = Sync I/O Alert: Operations NOT necessarily synchronous" size="4" pos="44" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.nt.create_options.sync_io_nonalert" showname=".... .... .... .... .... .... ..0. .... = Sync I/O Nonalert: Operations NOT necessarily synchronous" size="4" pos="44" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.nt.create_options.non_directory" showname=".... .... .... .... .... .... .0.. .... = Non-Directory: File being created/opened must be a directory" size="4" pos="44" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.nt.create_options.create_tree_connection" showname=".... .... .... .... .... .... 0... .... = Create Tree Connection: Create Tree Connections is NOT set" size="4" pos="44" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.nt.create_options.complete_if_oplocked" showname=".... .... .... .... .... ...0 .... .... = Complete If Oplocked: Complete if oplocked is NOT set" size="4" pos="44" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.nt.create_options.no_ea_knowledge" showname=".... .... .... .... .... ..0. .... .... = No EA Knowledge: The client understands extended attributes" size="4" pos="44" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.nt.create_options.eight_dot_three_only" showname=".... .... .... .... .... .0.. .... .... = 8.3 Only: The client understands long file names" size="4" pos="44" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.nt.create_options.random_access" showname=".... .... .... .... .... 0... .... .... = Random Access: The file will not be accessed randomly" size="4" pos="44" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.nt.create_options.delete_on_close" showname=".... .... .... .... ...0 .... .... .... = Delete On Close: The file should not be deleted when it is closed" size="4" pos="44" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.nt.create_options.open_by_fileid" showname=".... .... .... .... ..0. .... .... .... = Open By FileID: OpenByFileID is NOT set" size="4" pos="44" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.nt.create_options.backup_intent" showname=".... .... .... .... .0.. .... .... .... = Backup Intent: This is a normal create" size="4" pos="44" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.nt.create_options.no_compression" showname=".... .... .... .... 0... .... .... .... = No Compression: Compression is allowed for Open/Create" size="4" pos="44" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.nt.create_options.reserve_opfilter" showname=".... .... ...0 .... .... .... .... .... = Reserve Opfilter: Reserve Opfilter is NOT set" size="4" pos="44" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.nt.create_options.open_reparse_point" showname=".... .... ..0. .... .... .... .... .... = Open Reparse Point: Normal open" size="4" pos="44" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.nt.create_options.open_no_recall" showname=".... .... .0.. .... .... .... .... .... = Open No Recall: Open no recall is NOT set" size="4" pos="44" show="0" value="0" unmaskedvalue="ff534d42"/>
+ <field name="smb.nt.create_options.open_for_free_space_query" showname=".... .... 0... .... .... .... .... .... = Open For Free Space query: This is NOT an open for free space query" size="4" pos="44" show="0" value="0" unmaskedvalue="ff534d42"/>
+ </field>
+ <field name="smb.create.disposition" showname="Disposition: Open (if file exists open it, else fail) (1)" size="0" pos="120" show="1"/>
+ </field>
+ <field name="smb.wct" showname="Word Count (WCT): 10" size="1" pos="76" show="10" value="0a"/>
+ <field name="smb.tpc" showname="Total Parameter Count: 0" size="2" pos="77" show="0" value="0000"/>
+ <field name="smb.tdc" showname="Total Data Count: 28" size="2" pos="79" show="28" value="1c00"/>
+ <field name="smb.reserved" showname="Reserved: 0000" size="2" pos="81" show="00:00" value="0000"/>
+ <field name="smb.pc" showname="Parameter Count: 0" size="2" pos="83" show="0" value="0000"/>
+ <field name="smb.po" showname="Parameter Offset: 56" size="2" pos="85" show="56" value="3800"/>
+ <field name="smb.pd" showname="Parameter Displacement: 0" size="2" pos="87" show="0" value="0000"/>
+ <field name="smb.dc" showname="Data Count: 28" size="2" pos="89" show="28" value="1c00"/>
+ <field name="smb.data_offset" showname="Data Offset: 56" size="2" pos="91" show="56" value="3800"/>
+ <field name="smb.data_disp" showname="Data Displacement: 0" size="2" pos="93" show="0" value="0000"/>
+ <field name="smb.sc" showname="Setup Count: 0" size="1" pos="95" show="0" value="00"/>
+ <field name="smb.reserved" showname="Reserved: 00" size="1" pos="96" show="00" value="00"/>
+ <field name="smb.bcc" showname="Byte Count (BCC): 29" size="2" pos="97" show="29" value="1d00"/>
+ <field name="smb.padding" showname="Padding: 00" size="1" pos="99" show="00" value="00"/>
+ </field>
+ </proto>
+ <proto name="smb_pipe" showname="SMB Pipe Protocol" size="0" pos="0">
+ <field name="smb_pipe.function" showname="Function: TransactNmPipe (0x0026)" size="0" pos="0" show="0x00000026"/>
+ <field name="smb.fid" showname="FID: 0x2ea1 (\lsarpc)" size="0" pos="100" show="0x00002ea1">
+ <field name="smb.fid.opened_in" showname="Opened in: 50258" size="0" pos="100" show="50258"/>
+ <field name="smb.file" showname="File Name: \lsarpc" size="0" pos="100" show="\lsarpc"/>
+ <field name="smb.create_flags" showname="Create Flags: 0x00000000" size="4" pos="100" show="0x00000000" value="05000203">
+ <field name="smb.nt.create.oplock" showname=".... .... .... .... .... .... .... ..0. = Exclusive Oplock: Does NOT request oplock" size="4" pos="100" show="0" value="0" unmaskedvalue="05000203"/>
+ <field name="smb.nt.create.batch_oplock" showname=".... .... .... .... .... .... .... .0.. = Batch Oplock: Does NOT request batch oplock" size="4" pos="100" show="0" value="0" unmaskedvalue="05000203"/>
+ <field name="smb.nt.create.dir" showname=".... .... .... .... .... .... .... 0... = Create Directory: Target of open can be a file" size="4" pos="100" show="0" value="0" unmaskedvalue="05000203"/>
+ <field name="smb.nt.create.ext" showname=".... .... .... .... .... .... ...0 .... = Extended Response: Extended responses NOT required" size="4" pos="100" show="0" value="0" unmaskedvalue="05000203"/>
+ </field>
+ <field name="smb.access_mask" showname="Access Mask: 0x0002019f" size="4" pos="100" show="0x0002019f" value="05000203">
+ <field name="smb.access.read" showname=".... .... .... .... .... .... .... ...1 = Read: READ access" size="4" pos="100" show="1" value="FFFFFFFF" unmaskedvalue="05000203"/>
+ <field name="smb.access.write" showname=".... .... .... .... .... .... .... ..1. = Write: WRITE access" size="4" pos="100" show="1" value="FFFFFFFF" unmaskedvalue="05000203"/>
+ <field name="smb.access.append" showname=".... .... .... .... .... .... .... .1.. = Append: APPEND access" size="4" pos="100" show="1" value="FFFFFFFF" unmaskedvalue="05000203"/>
+ <field name="smb.access.read_ea" showname=".... .... .... .... .... .... .... 1... = Read EA: READ EXTENDED ATTRIBUTES access" size="4" pos="100" show="1" value="FFFFFFFF" unmaskedvalue="05000203"/>
+ <field name="smb.access.write_ea" showname=".... .... .... .... .... .... ...1 .... = Write EA: WRITE EXTENDED ATTRIBUTES access" size="4" pos="100" show="1" value="FFFFFFFF" unmaskedvalue="05000203"/>
+ <field name="smb.access.execute" showname=".... .... .... .... .... .... ..0. .... = Execute: NO execute access" size="4" pos="100" show="0" value="0" unmaskedvalue="05000203"/>
+ <field name="smb.access.delete_child" showname=".... .... .... .... .... .... .0.. .... = Delete Child: NO delete child access" size="4" pos="100" show="0" value="0" unmaskedvalue="05000203"/>
+ <field name="smb.access.read_attributes" showname=".... .... .... .... .... .... 1... .... = Read Attributes: READ ATTRIBUTES access" size="4" pos="100" show="1" value="FFFFFFFF" unmaskedvalue="05000203"/>
+ <field name="smb.access.write_attributes" showname=".... .... .... .... .... ...1 .... .... = Write Attributes: WRITE ATTRIBUTES access" size="4" pos="100" show="1" value="FFFFFFFF" unmaskedvalue="05000203"/>
+ <field name="smb.access.delete" showname=".... .... .... ...0 .... .... .... .... = Delete: NO delete access" size="4" pos="100" show="0" value="0" unmaskedvalue="05000203"/>
+ <field name="smb.access.read_control" showname=".... .... .... ..1. .... .... .... .... = Read Control: READ ACCESS to owner, group and ACL of the SID" size="4" pos="100" show="1" value="FFFFFFFF" unmaskedvalue="05000203"/>
+ <field name="smb.access.write_dac" showname=".... .... .... .0.. .... .... .... .... = Write DAC: Owner may NOT write to the DAC" size="4" pos="100" show="0" value="0" unmaskedvalue="05000203"/>
+ <field name="smb.access.write_owner" showname=".... .... .... 0... .... .... .... .... = Write Owner: Can NOT write owner (take ownership)" size="4" pos="100" show="0" value="0" unmaskedvalue="05000203"/>
+ <field name="smb.access.synchronize" showname=".... .... ...0 .... .... .... .... .... = Synchronize: Can NOT wait on handle to synchronize on completion of I/O" size="4" pos="100" show="0" value="0" unmaskedvalue="05000203"/>
+ <field name="smb.access.system_security" showname=".... ...0 .... .... .... .... .... .... = System Security: System security is NOT set" size="4" pos="100" show="0" value="0" unmaskedvalue="05000203"/>
+ <field name="smb.access.maximum_allowed" showname=".... ..0. .... .... .... .... .... .... = Maximum Allowed: Maximum allowed is NOT set" size="4" pos="100" show="0" value="0" unmaskedvalue="05000203"/>
+ <field name="smb.access.generic_all" showname="...0 .... .... .... .... .... .... .... = Generic All: Generic all is NOT set" size="4" pos="100" show="0" value="0" unmaskedvalue="05000203"/>
+ <field name="smb.access.generic_execute" showname="..0. .... .... .... .... .... .... .... = Generic Execute: Generic execute is NOT set" size="4" pos="100" show="0" value="0" unmaskedvalue="05000203"/>
+ <field name="smb.access.generic_write" showname=".0.. .... .... .... .... .... .... .... = Generic Write: Generic write is NOT set" size="4" pos="100" show="0" value="0" unmaskedvalue="05000203"/>
+ <field name="smb.access.generic_read" showname="0... .... .... .... .... .... .... .... = Generic Read: Generic read is NOT set" size="4" pos="100" show="0" value="0" unmaskedvalue="05000203"/>
+ </field>
+ <field name="smb.file_attribute" showname="File Attributes: 0x00000000" size="4" pos="100" show="0x00000000" value="05000203">
+ <field name="smb.file_attribute.read_only" showname=".... .... .... .... .... .... .... ...0 = Read Only: NOT read only" size="4" pos="100" show="0" value="0" unmaskedvalue="05000203"/>
+ <field name="smb.file_attribute.hidden" showname=".... .... .... .... .... .... .... ..0. = Hidden: NOT hidden" size="4" pos="100" show="0" value="0" unmaskedvalue="05000203"/>
+ <field name="smb.file_attribute.system" showname=".... .... .... .... .... .... .... .0.. = System: NOT a system file/dir" size="4" pos="100" show="0" value="0" unmaskedvalue="05000203"/>
+ <field name="smb.file_attribute.volume" showname=".... .... .... .... .... .... .... 0... = Volume ID: NOT a volume ID" size="4" pos="100" show="0" value="0" unmaskedvalue="05000203"/>
+ <field name="smb.file_attribute.directory" showname=".... .... .... .... .... .... ...0 .... = Directory: NOT a directory" size="4" pos="100" show="0" value="0" unmaskedvalue="05000203"/>
+ <field name="smb.file_attribute.archive" showname=".... .... .... .... .... .... ..0. .... = Archive: Has NOT been modified since last archive" size="4" pos="100" show="0" value="0" unmaskedvalue="05000203"/>
+ <field name="smb.file_attribute.device" showname=".... .... .... .... .... .... .0.. .... = Device: NOT a device" size="4" pos="100" show="0" value="0" unmaskedvalue="05000203"/>
+ <field name="smb.file_attribute.normal" showname=".... .... .... .... .... .... 0... .... = Normal: Has some attribute set" size="4" pos="100" show="0" value="0" unmaskedvalue="05000203"/>
+ <field name="smb.file_attribute.temporary" showname=".... .... .... .... .... ...0 .... .... = Temporary: NOT a temporary file" size="4" pos="100" show="0" value="0" unmaskedvalue="05000203"/>
+ <field name="smb.file_attribute.sparse" showname=".... .... .... .... .... ..0. .... .... = Sparse: NOT a sparse file" size="4" pos="100" show="0" value="0" unmaskedvalue="05000203"/>
+ <field name="smb.file_attribute.compressed" showname=".... .... .... .... .... 0... .... .... = Compressed: Uncompressed" size="4" pos="100" show="0" value="0" unmaskedvalue="05000203"/>
+ <field name="smb.file_attribute.offline" showname=".... .... .... .... ...0 .... .... .... = Offline: Online" size="4" pos="100" show="0" value="0" unmaskedvalue="05000203"/>
+ <field name="smb.file_attribute.not_content_indexed" showname=".... .... .... .... ..0. .... .... .... = Content Indexed: NOT content indexed" size="4" pos="100" show="0" value="0" unmaskedvalue="05000203"/>
+ <field name="smb.file_attribute.encrypted" showname=".... .... .... .... .0.. .... .... .... = Encrypted: This is NOT an encrypted file" size="4" pos="100" show="0" value="0" unmaskedvalue="05000203"/>
+ </field>
+ <field name="smb.share_access" showname="Share Access: 0x00000003, Read, Write" size="4" pos="100" show="0x00000003" value="05000203">
+ <field name="smb.share.access.read" showname=".... .... .... .... .... .... .... ...1 = Read: Object can be shared for READ" size="4" pos="100" show="1" value="FFFFFFFF" unmaskedvalue="05000203"/>
+ <field name="smb.share.access.write" showname=".... .... .... .... .... .... .... ..1. = Write: Object can be shared for WRITE" size="4" pos="100" show="1" value="FFFFFFFF" unmaskedvalue="05000203"/>
+ <field name="smb.share.access.delete" showname=".... .... .... .... .... .... .... .0.. = Delete: Object can NOT be shared for delete" size="4" pos="100" show="0" value="0" unmaskedvalue="05000203"/>
+ </field>
+ <field name="smb.create_options" showname="Create Options: 0x00000000" size="4" pos="100" show="0x00000000" value="05000203">
+ <field name="smb.nt.create_options.directory" showname=".... .... .... .... .... .... .... ...0 = Directory: File being created/opened must not be a directory" size="4" pos="100" show="0" value="0" unmaskedvalue="05000203"/>
+ <field name="smb.nt.create_options.write_through" showname=".... .... .... .... .... .... .... ..0. = Write Through: Writes need not flush buffered data before completing" size="4" pos="100" show="0" value="0" unmaskedvalue="05000203"/>
+ <field name="smb.nt.create_options.sequential_only" showname=".... .... .... .... .... .... .... .0.. = Sequential Only: The file might not only be accessed sequentially" size="4" pos="100" show="0" value="0" unmaskedvalue="05000203"/>
+ <field name="smb.nt.create_options.intermediate_buffering" showname=".... .... .... .... .... .... .... 0... = Intermediate Buffering: Intermediate buffering is allowed" size="4" pos="100" show="0" value="0" unmaskedvalue="05000203"/>
+ <field name="smb.nt.create_options.sync_io_alert" showname=".... .... .... .... .... .... ...0 .... = Sync I/O Alert: Operations NOT necessarily synchronous" size="4" pos="100" show="0" value="0" unmaskedvalue="05000203"/>
+ <field name="smb.nt.create_options.sync_io_nonalert" showname=".... .... .... .... .... .... ..0. .... = Sync I/O Nonalert: Operations NOT necessarily synchronous" size="4" pos="100" show="0" value="0" unmaskedvalue="05000203"/>
+ <field name="smb.nt.create_options.non_directory" showname=".... .... .... .... .... .... .0.. .... = Non-Directory: File being created/opened must be a directory" size="4" pos="100" show="0" value="0" unmaskedvalue="05000203"/>
+ <field name="smb.nt.create_options.create_tree_connection" showname=".... .... .... .... .... .... 0... .... = Create Tree Connection: Create Tree Connections is NOT set" size="4" pos="100" show="0" value="0" unmaskedvalue="05000203"/>
+ <field name="smb.nt.create_options.complete_if_oplocked" showname=".... .... .... .... .... ...0 .... .... = Complete If Oplocked: Complete if oplocked is NOT set" size="4" pos="100" show="0" value="0" unmaskedvalue="05000203"/>
+ <field name="smb.nt.create_options.no_ea_knowledge" showname=".... .... .... .... .... ..0. .... .... = No EA Knowledge: The client understands extended attributes" size="4" pos="100" show="0" value="0" unmaskedvalue="05000203"/>
+ <field name="smb.nt.create_options.eight_dot_three_only" showname=".... .... .... .... .... .0.. .... .... = 8.3 Only: The client understands long file names" size="4" pos="100" show="0" value="0" unmaskedvalue="05000203"/>
+ <field name="smb.nt.create_options.random_access" showname=".... .... .... .... .... 0... .... .... = Random Access: The file will not be accessed randomly" size="4" pos="100" show="0" value="0" unmaskedvalue="05000203"/>
+ <field name="smb.nt.create_options.delete_on_close" showname=".... .... .... .... ...0 .... .... .... = Delete On Close: The file should not be deleted when it is closed" size="4" pos="100" show="0" value="0" unmaskedvalue="05000203"/>
+ <field name="smb.nt.create_options.open_by_fileid" showname=".... .... .... .... ..0. .... .... .... = Open By FileID: OpenByFileID is NOT set" size="4" pos="100" show="0" value="0" unmaskedvalue="05000203"/>
+ <field name="smb.nt.create_options.backup_intent" showname=".... .... .... .... .0.. .... .... .... = Backup Intent: This is a normal create" size="4" pos="100" show="0" value="0" unmaskedvalue="05000203"/>
+ <field name="smb.nt.create_options.no_compression" showname=".... .... .... .... 0... .... .... .... = No Compression: Compression is allowed for Open/Create" size="4" pos="100" show="0" value="0" unmaskedvalue="05000203"/>
+ <field name="smb.nt.create_options.reserve_opfilter" showname=".... .... ...0 .... .... .... .... .... = Reserve Opfilter: Reserve Opfilter is NOT set" size="4" pos="100" show="0" value="0" unmaskedvalue="05000203"/>
+ <field name="smb.nt.create_options.open_reparse_point" showname=".... .... ..0. .... .... .... .... .... = Open Reparse Point: Normal open" size="4" pos="100" show="0" value="0" unmaskedvalue="05000203"/>
+ <field name="smb.nt.create_options.open_no_recall" showname=".... .... .0.. .... .... .... .... .... = Open No Recall: Open no recall is NOT set" size="4" pos="100" show="0" value="0" unmaskedvalue="05000203"/>
+ <field name="smb.nt.create_options.open_for_free_space_query" showname=".... .... 0... .... .... .... .... .... = Open For Free Space query: This is NOT an open for free space query" size="4" pos="100" show="0" value="0" unmaskedvalue="05000203"/>
+ </field>
+ <field name="smb.create.disposition" showname="Disposition: Open (if file exists open it, else fail) (1)" size="0" pos="100" show="1"/>
+ </field>
+ </proto>
+ <proto name="dcerpc" showname="Distributed Computing Environment / Remote Procedure Call (DCE/RPC) Response, Fragment: Single, FragLen: 28, Call: 7, Ctx: 0, [Req: #50348]" size="28" pos="100">
+ <field name="dcerpc.ver" showname="Version: 5" size="1" pos="100" show="5" value="05"/>
+ <field name="dcerpc.ver_minor" showname="Version (minor): 0" size="1" pos="101" show="0" value="00"/>
+ <field name="dcerpc.pkt_type" showname="Packet type: Response (2)" size="1" pos="102" show="2" value="02"/>
+ <field name="dcerpc.cn_flags" showname="Packet Flags: 0x03" size="1" pos="103" show="0x00000003" value="03">
+ <field name="dcerpc.cn_flags.object" showname="0... .... = Object: Not set" size="1" pos="103" show="0" value="0" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.maybe" showname=".0.. .... = Maybe: Not set" size="1" pos="103" show="0" value="0" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.dne" showname="..0. .... = Did Not Execute: Not set" size="1" pos="103" show="0" value="0" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.mpx" showname="...0 .... = Multiplex: Not set" size="1" pos="103" show="0" value="0" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.reserved" showname=".... 0... = Reserved: Not set" size="1" pos="103" show="0" value="0" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.cancel_pending" showname=".... .0.. = Cancel Pending: Not set" size="1" pos="103" show="0" value="0" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.last_frag" showname=".... ..1. = Last Frag: Set" size="1" pos="103" show="1" value="FFFFFFFF" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.first_frag" showname=".... ...1 = First Frag: Set" size="1" pos="103" show="1" value="FFFFFFFF" unmaskedvalue="03"/>
+ </field>
+ <field name="dcerpc.drep" showname="Data Representation: 10000000" size="4" pos="104" show="10:00:00:00" value="10000000">
+ <field name="dcerpc.drep.byteorder" showname="Byte order: Little-endian (1)" size="1" pos="104" show="1" value="10"/>
+ <field name="dcerpc.drep.character" showname="Character: ASCII (0)" size="1" pos="104" show="0" value="10"/>
+ <field name="dcerpc.drep.fp" showname="Floating-point: IEEE (0)" size="1" pos="105" show="0" value="00"/>
+ </field>
+ <field name="dcerpc.cn_frag_len" showname="Frag Length: 28" size="2" pos="108" show="28" value="1c00"/>
+ <field name="dcerpc.cn_auth_len" showname="Auth Length: 0" size="2" pos="110" show="0" value="0000"/>
+ <field name="dcerpc.cn_call_id" showname="Call ID: 7" size="4" pos="112" show="7" value="07000000"/>
+ <field name="dcerpc.cn_alloc_hint" showname="Alloc hint: 4" size="4" pos="116" show="4" value="04000000"/>
+ <field name="dcerpc.cn_ctx_id" showname="Context ID: 0" size="2" pos="120" show="0" value="0000"/>
+ <field name="dcerpc.cn_cancel_count" showname="Cancel count: 0" size="1" pos="122" show="0" value="00"/>
+ <field name="dcerpc.opnum" showname="Opnum: 27" size="0" pos="100" show="27"/>
+ <field name="dcerpc.request_in" showname="Request in frame: 50348" size="0" pos="100" show="50348"/>
+ <field name="dcerpc.time" showname="Time from request: 0.043267000 seconds" size="0" pos="124" show="0.043267000"/>
+ </proto>
+ <proto name="lsarpc" showname="Local Security Authority, lsa_SetInformationTrustedDomain" size="4" pos="124">
+ <field name="lsarpc.opnum" showname="Operation: lsa_SetInformationTrustedDomain (27)" size="0" pos="124" show="27"/>
+ <field name="dcerpc.request_in" showname="Request in frame: 50348" size="0" pos="124" show="50348"/>
+ <field name="lsarpc.status" showname="NT Error: STATUS_SUCCESS (0x00000000)" size="4" pos="124" show="0x00000000" value="00000000"/>
+ </proto>
+</packet>
+
+<packet>
+ <proto name="geninfo" pos="0" showname="General information" size="196">
+ <field name="num" pos="0" show="50491" showname="Number" value="c53b" size="196"/>
+ <field name="len" pos="0" show="196" showname="Frame Length" value="c4" size="196"/>
+ <field name="caplen" pos="0" show="196" showname="Captured Length" value="c4" size="196"/>
+ <field name="timestamp" pos="0" show="Feb 10, 2017 14:37:59.853951000 NZDT" showname="Captured Time" value="1486690679.853951000" size="196"/>
+ </proto>
+ <proto name="frame" showname="Frame 50491: 196 bytes on wire (1568 bits), 196 bytes captured (1568 bits)" size="196" pos="0">
+ <field name="frame.encap_type" showname="Encapsulation type: Raw IP (7)" size="0" pos="0" show="7"/>
+ <field name="frame.time" showname="Arrival Time: Feb 10, 2017 14:37:59.853951000 NZDT" size="0" pos="0" show="Feb 10, 2017 14:37:59.853951000 NZDT"/>
+ <field name="frame.offset_shift" showname="Time shift for this packet: 0.000000000 seconds" size="0" pos="0" show="0.000000000"/>
+ <field name="frame.time_epoch" showname="Epoch Time: 1486690679.853951000 seconds" size="0" pos="0" show="1486690679.853951000"/>
+ <field name="frame.time_delta" showname="Time delta from previous captured frame: 0.000093000 seconds" size="0" pos="0" show="0.000093000"/>
+ <field name="frame.time_delta_displayed" showname="Time delta from previous displayed frame: 0.000093000 seconds" size="0" pos="0" show="0.000093000"/>
+ <field name="frame.time_relative" showname="Time since reference or first frame: 103.323500000 seconds" size="0" pos="0" show="103.323500000"/>
+ <field name="frame.number" showname="Frame Number: 50491" size="0" pos="0" show="50491"/>
+ <field name="frame.len" showname="Frame Length: 196 bytes (1568 bits)" size="0" pos="0" show="196"/>
+ <field name="frame.cap_len" showname="Capture Length: 196 bytes (1568 bits)" size="0" pos="0" show="196"/>
+ <field name="frame.marked" showname="Frame is marked: False" size="0" pos="0" show="0"/>
+ <field name="frame.ignored" showname="Frame is ignored: False" size="0" pos="0" show="0"/>
+ <field name="frame.protocols" showname="Protocols in frame: raw:ip:tcp:dcerpc" size="0" pos="0" show="raw:ip:tcp:dcerpc"/>
+ </proto>
+ <proto name="raw" showname="Raw packet data" size="196" pos="0"/>
+ <proto name="ip" showname="Internet Protocol Version 4, Src: 127.0.0.27, Dst: 127.0.0.30" size="20" pos="0">
+ <field name="ip.version" showname="0100 .... = Version: 4" size="1" pos="0" show="4" value="4" unmaskedvalue="45"/>
+ <field name="ip.hdr_len" showname=".... 0101 = Header Length: 20 bytes" size="1" pos="0" show="5" value="5" unmaskedvalue="45"/>
+ <field name="ip.dsfield" showname="Differentiated Services Field: 0x00 (DSCP: CS0, ECN: Not-ECT)" size="1" pos="1" show="0x00000000" value="00">
+ <field name="ip.dsfield.dscp" showname="0000 00.. = Differentiated Services Codepoint: Default (0)" size="1" pos="1" show="0" value="0" unmaskedvalue="00"/>
+ <field name="ip.dsfield.ecn" showname=".... ..00 = Explicit Congestion Notification: Not ECN-Capable Transport (0)" size="1" pos="1" show="0" value="0" unmaskedvalue="00"/>
+ </field>
+ <field name="ip.len" showname="Total Length: 196" size="2" pos="2" show="196" value="00c4"/>
+ <field name="ip.id" showname="Identification: 0xffff (65535)" size="2" pos="4" show="0x0000ffff" value="ffff"/>
+ <field name="ip.flags" showname="Flags: 0x02 (Don&#x27;t Fragment)" size="1" pos="6" show="0x00000002" value="40">
+ <field name="ip.flags.rb" showname="0... .... = Reserved bit: Not set" size="1" pos="6" show="0" value="40"/>
+ <field name="ip.flags.df" showname=".1.. .... = Don&#x27;t fragment: Set" size="1" pos="6" show="1" value="40"/>
+ <field name="ip.flags.mf" showname="..0. .... = More fragments: Not set" size="1" pos="6" show="0" value="40"/>
+ </field>
+ <field name="ip.frag_offset" showname="Fragment offset: 0" size="2" pos="6" show="0" value="4000"/>
+ <field name="ip.ttl" showname="Time to live: 255" size="1" pos="8" show="255" value="ff"/>
+ <field name="ip.proto" showname="Protocol: TCP (6)" size="1" pos="9" show="6" value="06"/>
+ <field name="ip.checksum" showname="Header checksum: 0x0000 [validation disabled]" size="2" pos="10" show="0x00000000" value="0000">
+ <field name="ip.checksum_good" showname="Good: False" size="2" pos="10" show="0" value="0000"/>
+ <field name="ip.checksum_bad" showname="Bad: False" size="2" pos="10" show="0" value="0000"/>
+ </field>
+ <field name="ip.src" showname="Source: 127.0.0.27" size="4" pos="12" show="127.0.0.27" value="7f00001b"/>
+ <field name="ip.addr" showname="Source or Destination Address: 127.0.0.27" hide="yes" size="4" pos="12" show="127.0.0.27" value="7f00001b"/>
+ <field name="ip.src_host" showname="Source Host: 127.0.0.27" hide="yes" size="4" pos="12" show="127.0.0.27" value="7f00001b"/>
+ <field name="ip.host" showname="Source or Destination Host: 127.0.0.27" hide="yes" size="4" pos="12" show="127.0.0.27" value="7f00001b"/>
+ <field name="ip.dst" showname="Destination: 127.0.0.30" size="4" pos="16" show="127.0.0.30" value="7f00001e"/>
+ <field name="ip.addr" showname="Source or Destination Address: 127.0.0.30" hide="yes" size="4" pos="16" show="127.0.0.30" value="7f00001e"/>
+ <field name="ip.dst_host" showname="Destination Host: 127.0.0.30" hide="yes" size="4" pos="16" show="127.0.0.30" value="7f00001e"/>
+ <field name="ip.host" showname="Source or Destination Host: 127.0.0.30" hide="yes" size="4" pos="16" show="127.0.0.30" value="7f00001e"/>
+ <field name="" show="Source GeoIP: Unknown" size="4" pos="12" value="7f00001b"/>
+ <field name="" show="Destination GeoIP: Unknown" size="4" pos="16" value="7f00001e"/>
+ </proto>
+ <proto name="tcp" showname="Transmission Control Protocol, Src Port: 17934 (17934), Dst Port: 135 (135), Seq: 73, Ack: 61, Len: 156" size="20" pos="20">
+ <field name="tcp.srcport" showname="Source Port: 17934" size="2" pos="20" show="17934" value="460e"/>
+ <field name="tcp.dstport" showname="Destination Port: 135" size="2" pos="22" show="135" value="0087"/>
+ <field name="tcp.port" showname="Source or Destination Port: 17934" hide="yes" size="2" pos="20" show="17934" value="460e"/>
+ <field name="tcp.port" showname="Source or Destination Port: 135" hide="yes" size="2" pos="22" show="135" value="0087"/>
+ <field name="tcp.stream" showname="Stream index: 1183" size="0" pos="20" show="1183"/>
+ <field name="tcp.len" showname="TCP Segment Len: 156" size="1" pos="32" show="156" value="50"/>
+ <field name="tcp.seq" showname="Sequence number: 73 (relative sequence number)" size="4" pos="24" show="73" value="00000049"/>
+ <field name="tcp.nxtseq" showname="Next sequence number: 229 (relative sequence number)" size="0" pos="20" show="229"/>
+ <field name="tcp.ack" showname="Acknowledgment number: 61 (relative ack number)" size="4" pos="28" show="61" value="0000003d"/>
+ <field name="tcp.hdr_len" showname="Header Length: 20 bytes" size="1" pos="32" show="20" value="50"/>
+ <field name="tcp.flags" showname="Flags: 0x018 (PSH, ACK)" size="2" pos="32" show="0x00000018" value="18" unmaskedvalue="5018">
+ <field name="tcp.flags.res" showname="000. .... .... = Reserved: Not set" size="1" pos="32" show="0" value="0" unmaskedvalue="50"/>
+ <field name="tcp.flags.ns" showname="...0 .... .... = Nonce: Not set" size="1" pos="32" show="0" value="0" unmaskedvalue="50"/>
+ <field name="tcp.flags.cwr" showname=".... 0... .... = Congestion Window Reduced (CWR): Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.ecn" showname=".... .0.. .... = ECN-Echo: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.urg" showname=".... ..0. .... = Urgent: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.ack" showname=".... ...1 .... = Acknowledgment: Set" size="1" pos="33" show="1" value="FFFFFFFF" unmaskedvalue="18"/>
+ <field name="tcp.flags.push" showname=".... .... 1... = Push: Set" size="1" pos="33" show="1" value="FFFFFFFF" unmaskedvalue="18"/>
+ <field name="tcp.flags.reset" showname=".... .... .0.. = Reset: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.syn" showname=".... .... ..0. = Syn: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.fin" showname=".... .... ...0 = Fin: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.str" showname="TCP Flags: *******AP***" size="2" pos="32" show="*******AP***" value="5018"/>
+ </field>
+ <field name="tcp.window_size_value" showname="Window size value: 32767" size="2" pos="34" show="32767" value="7fff"/>
+ <field name="tcp.window_size" showname="Calculated window size: 32767" size="2" pos="34" show="32767" value="7fff"/>
+ <field name="tcp.window_size_scalefactor" showname="Window size scaling factor: -2 (no window scaling used)" size="2" pos="34" show="-2" value="7fff"/>
+ <field name="tcp.checksum" showname="Checksum: 0x0000 [validation disabled]" size="2" pos="36" show="0x00000000" value="0000">
+ <field name="tcp.checksum_good" showname="Good Checksum: False" size="2" pos="36" show="0" value="0000"/>
+ <field name="tcp.checksum_bad" showname="Bad Checksum: False" size="2" pos="36" show="0" value="0000"/>
+ </field>
+ <field name="tcp.urgent_pointer" showname="Urgent pointer: 0" size="2" pos="38" show="0" value="0000"/>
+ <field name="tcp.analysis" showname="SEQ/ACK analysis" size="0" pos="20" show="" value="">
+ <field name="tcp.analysis.acks_frame" showname="This is an ACK to the segment in frame: 50487" size="0" pos="20" show="50487"/>
+ <field name="tcp.analysis.ack_rtt" showname="The RTT to ACK the segment was: 0.000158000 seconds" size="0" pos="20" show="0.000158000"/>
+ <field name="tcp.analysis.initial_rtt" showname="iRTT: 0.000013000 seconds" size="0" pos="20" show="0.000013000"/>
+ <field name="tcp.analysis.bytes_in_flight" showname="Bytes in flight: 156" size="0" pos="20" show="156"/>
+ </field>
+ </proto>
+ <proto name="dcerpc" showname="Distributed Computing Environment / Remote Procedure Call (DCE/RPC) Request, Fragment: Single, FragLen: 156, Call: 9, Ctx: 0" size="156" pos="40">
+ <field name="dcerpc.ver" showname="Version: 5" size="1" pos="40" show="5" value="05"/>
+ <field name="dcerpc.ver_minor" showname="Version (minor): 0" size="1" pos="41" show="0" value="00"/>
+ <field name="dcerpc.pkt_type" showname="Packet type: Request (0)" size="1" pos="42" show="0" value="00"/>
+ <field name="dcerpc.cn_flags" showname="Packet Flags: 0x03" size="1" pos="43" show="0x00000003" value="03">
+ <field name="dcerpc.cn_flags.object" showname="0... .... = Object: Not set" size="1" pos="43" show="0" value="0" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.maybe" showname=".0.. .... = Maybe: Not set" size="1" pos="43" show="0" value="0" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.dne" showname="..0. .... = Did Not Execute: Not set" size="1" pos="43" show="0" value="0" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.mpx" showname="...0 .... = Multiplex: Not set" size="1" pos="43" show="0" value="0" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.reserved" showname=".... 0... = Reserved: Not set" size="1" pos="43" show="0" value="0" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.cancel_pending" showname=".... .0.. = Cancel Pending: Not set" size="1" pos="43" show="0" value="0" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.last_frag" showname=".... ..1. = Last Frag: Set" size="1" pos="43" show="1" value="FFFFFFFF" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.first_frag" showname=".... ...1 = First Frag: Set" size="1" pos="43" show="1" value="FFFFFFFF" unmaskedvalue="03"/>
+ </field>
+ <field name="dcerpc.drep" showname="Data Representation: 10000000" size="4" pos="44" show="10:00:00:00" value="10000000">
+ <field name="dcerpc.drep.byteorder" showname="Byte order: Little-endian (1)" size="1" pos="44" show="1" value="10"/>
+ <field name="dcerpc.drep.character" showname="Character: ASCII (0)" size="1" pos="44" show="0" value="10"/>
+ <field name="dcerpc.drep.fp" showname="Floating-point: IEEE (0)" size="1" pos="45" show="0" value="00"/>
+ </field>
+ <field name="dcerpc.cn_frag_len" showname="Frag Length: 156" size="2" pos="48" show="156" value="9c00"/>
+ <field name="dcerpc.cn_auth_len" showname="Auth Length: 0" size="2" pos="50" show="0" value="0000"/>
+ <field name="dcerpc.cn_call_id" showname="Call ID: 9" size="4" pos="52" show="9" value="09000000"/>
+ <field name="dcerpc.cn_alloc_hint" showname="Alloc hint: 132" size="4" pos="56" show="132" value="84000000"/>
+ <field name="dcerpc.cn_ctx_id" showname="Context ID: 0" size="2" pos="60" show="0" value="0000"/>
+ <field name="dcerpc.opnum" showname="Opnum: 3" size="2" pos="62" show="3" value="0300"/>
+ </proto>
+ <proto name="epm" showname="DCE/RPC Endpoint Mapper, Map" size="132" pos="64">
+ <field name="epm.opnum" showname="Operation: Map (3)" size="0" pos="64" show="3"/>
+ <field name="" show="UUID pointer:" size="20" pos="64" value="01000000785634123412cdabef0001234567cffb">
+ <field name="dcerpc.referent_id" showname="Referent ID: 0x00000001" size="4" pos="64" show="0x00000001" value="01000000"/>
+ <field name="epm.uuid" showname="UUID: 12345678-1234-abcd-ef00-01234567cffb" size="16" pos="68" show="12345678-1234-abcd-ef00-01234567cffb" value="785634123412cdabef0001234567cffb"/>
+ </field>
+ <field name="" show="Tower pointer:" size="87" pos="84" value="020000004b0000004b000000050013000d785634123412cdabef0001234567cffb01000200000013000d045d888aeb1cc9119fe808002b10486002000200000001000b0200000001000702000087010009040000000000">
+ <field name="dcerpc.referent_id" showname="Referent ID: 0x00000002" size="4" pos="84" show="0x00000002" value="02000000"/>
+ <field name="epm.tower.len" showname="Length: 75" size="4" pos="88" show="75" value="4b000000"/>
+ <field name="epm.tower.len" showname="Length: 75" size="4" pos="92" show="75" value="4b000000"/>
+ <field name="epm.tower.num_floors" showname="Number of floors: 5" size="2" pos="96" show="5" value="0500"/>
+ <field name="" show="Floor 1 UUID: RPC_NETLOGON" size="25" pos="98" value="13000d785634123412cdabef0001234567cffb010002000000">
+ <field name="epm.tower.lhs.len" showname="LHS Length: 19" size="2" pos="98" show="19" value="1300"/>
+ <field name="epm.tower.proto_id" showname="Protocol: UUID (0x0d)" size="1" pos="100" show="0x0000000d" value="0d"/>
+ <field name="epm.uuid" showname="UUID: RPC_NETLOGON (12345678-1234-abcd-ef00-01234567cffb)" size="16" pos="101" show="12345678-1234-abcd-ef00-01234567cffb" value="785634123412cdabef0001234567cffb"/>
+ <field name="epm.uuid_version" showname="Version: 1.00" size="2" pos="117" show="256" value="0100"/>
+ <field name="epm.tower.rhs.len" showname="RHS Length: 2" size="2" pos="119" show="2" value="0200"/>
+ <field name="epm.ver_min" showname="Version Minor: 0" size="2" pos="121" show="0" value="0000"/>
+ </field>
+ <field name="" show="Floor 2 UUID: 32bit NDR" size="25" pos="123" value="13000d045d888aeb1cc9119fe808002b104860020002000000">
+ <field name="epm.tower.lhs.len" showname="LHS Length: 19" size="2" pos="123" show="19" value="1300"/>
+ <field name="epm.tower.proto_id" showname="Protocol: UUID (0x0d)" size="1" pos="125" show="0x0000000d" value="0d"/>
+ <field name="epm.uuid" showname="UUID: 32bit NDR (8a885d04-1ceb-11c9-9fe8-08002b104860)" size="16" pos="126" show="8a885d04-1ceb-11c9-9fe8-08002b104860" value="045d888aeb1cc9119fe808002b104860"/>
+ <field name="epm.uuid_version" showname="Version: 2.00" size="2" pos="142" show="512" value="0200"/>
+ <field name="epm.tower.rhs.len" showname="RHS Length: 2" size="2" pos="144" show="2" value="0200"/>
+ <field name="epm.ver_min" showname="Version Minor: 0" size="2" pos="146" show="0" value="0000"/>
+ </field>
+ <field name="" show="Floor 3 RPC connection-oriented protocol" size="7" pos="148" value="01000b02000000">
+ <field name="epm.tower.lhs.len" showname="LHS Length: 1" size="2" pos="148" show="1" value="0100"/>
+ <field name="epm.tower.proto_id" showname="Protocol: RPC connection-oriented protocol (0x0b)" size="1" pos="150" show="0x0000000b" value="0b"/>
+ <field name="epm.tower.rhs.len" showname="RHS Length: 2" size="2" pos="151" show="2" value="0200"/>
+ </field>
+ <field name="" show="Floor 4 TCP Port:135" size="7" pos="155" value="01000702000087">
+ <field name="epm.tower.lhs.len" showname="LHS Length: 1" size="2" pos="155" show="1" value="0100"/>
+ <field name="epm.tower.proto_id" showname="Protocol: DOD TCP (0x07)" size="1" pos="157" show="0x00000007" value="07"/>
+ <field name="epm.tower.rhs.len" showname="RHS Length: 2" size="2" pos="158" show="2" value="0200"/>
+ <field name="epm.proto.ip" showname="IP: 0.0.0.0" size="4" pos="167" show="0.0.0.0" value="00000000"/>
+ </field>
+ </field>
+ <field name="epm.hnd" showname="Handle: 0000000000000000000000000000000000000000" size="20" pos="172" show="00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00" value="0000000000000000000000000000000000000000"/>
+ <field name="epm.max_towers" showname="Max Towers: 1" size="4" pos="192" show="1" value="01000000"/>
+ </proto>
+</packet>
+
+<packet>
+ <proto name="geninfo" pos="0" showname="General information" size="172">
+ <field name="num" pos="0" show="50520" showname="Number" value="c558" size="172"/>
+ <field name="len" pos="0" show="172" showname="Frame Length" value="ac" size="172"/>
+ <field name="caplen" pos="0" show="172" showname="Captured Length" value="ac" size="172"/>
+ <field name="timestamp" pos="0" show="Feb 10, 2017 14:37:59.854842000 NZDT" showname="Captured Time" value="1486690679.854842000" size="172"/>
+ </proto>
+ <proto name="frame" showname="Frame 50520: 172 bytes on wire (1376 bits), 172 bytes captured (1376 bits)" size="172" pos="0">
+ <field name="frame.encap_type" showname="Encapsulation type: Raw IP (7)" size="0" pos="0" show="7"/>
+ <field name="frame.time" showname="Arrival Time: Feb 10, 2017 14:37:59.854842000 NZDT" size="0" pos="0" show="Feb 10, 2017 14:37:59.854842000 NZDT"/>
+ <field name="frame.offset_shift" showname="Time shift for this packet: 0.000000000 seconds" size="0" pos="0" show="0.000000000"/>
+ <field name="frame.time_epoch" showname="Epoch Time: 1486690679.854842000 seconds" size="0" pos="0" show="1486690679.854842000"/>
+ <field name="frame.time_delta" showname="Time delta from previous captured frame: 0.000094000 seconds" size="0" pos="0" show="0.000094000"/>
+ <field name="frame.time_delta_displayed" showname="Time delta from previous displayed frame: 0.000094000 seconds" size="0" pos="0" show="0.000094000"/>
+ <field name="frame.time_relative" showname="Time since reference or first frame: 103.324391000 seconds" size="0" pos="0" show="103.324391000"/>
+ <field name="frame.number" showname="Frame Number: 50520" size="0" pos="0" show="50520"/>
+ <field name="frame.len" showname="Frame Length: 172 bytes (1376 bits)" size="0" pos="0" show="172"/>
+ <field name="frame.cap_len" showname="Capture Length: 172 bytes (1376 bits)" size="0" pos="0" show="172"/>
+ <field name="frame.marked" showname="Frame is marked: False" size="0" pos="0" show="0"/>
+ <field name="frame.ignored" showname="Frame is ignored: False" size="0" pos="0" show="0"/>
+ <field name="frame.protocols" showname="Protocols in frame: raw:ip:tcp:dcerpc" size="0" pos="0" show="raw:ip:tcp:dcerpc"/>
+ </proto>
+ <proto name="raw" showname="Raw packet data" size="172" pos="0"/>
+ <proto name="ip" showname="Internet Protocol Version 4, Src: 127.0.0.27, Dst: 127.0.0.30" size="20" pos="0">
+ <field name="ip.version" showname="0100 .... = Version: 4" size="1" pos="0" show="4" value="4" unmaskedvalue="45"/>
+ <field name="ip.hdr_len" showname=".... 0101 = Header Length: 20 bytes" size="1" pos="0" show="5" value="5" unmaskedvalue="45"/>
+ <field name="ip.dsfield" showname="Differentiated Services Field: 0x00 (DSCP: CS0, ECN: Not-ECT)" size="1" pos="1" show="0x00000000" value="00">
+ <field name="ip.dsfield.dscp" showname="0000 00.. = Differentiated Services Codepoint: Default (0)" size="1" pos="1" show="0" value="0" unmaskedvalue="00"/>
+ <field name="ip.dsfield.ecn" showname=".... ..00 = Explicit Congestion Notification: Not ECN-Capable Transport (0)" size="1" pos="1" show="0" value="0" unmaskedvalue="00"/>
+ </field>
+ <field name="ip.len" showname="Total Length: 172" size="2" pos="2" show="172" value="00ac"/>
+ <field name="ip.id" showname="Identification: 0xffff (65535)" size="2" pos="4" show="0x0000ffff" value="ffff"/>
+ <field name="ip.flags" showname="Flags: 0x02 (Don&#x27;t Fragment)" size="1" pos="6" show="0x00000002" value="40">
+ <field name="ip.flags.rb" showname="0... .... = Reserved bit: Not set" size="1" pos="6" show="0" value="40"/>
+ <field name="ip.flags.df" showname=".1.. .... = Don&#x27;t fragment: Set" size="1" pos="6" show="1" value="40"/>
+ <field name="ip.flags.mf" showname="..0. .... = More fragments: Not set" size="1" pos="6" show="0" value="40"/>
+ </field>
+ <field name="ip.frag_offset" showname="Fragment offset: 0" size="2" pos="6" show="0" value="4000"/>
+ <field name="ip.ttl" showname="Time to live: 255" size="1" pos="8" show="255" value="ff"/>
+ <field name="ip.proto" showname="Protocol: TCP (6)" size="1" pos="9" show="6" value="06"/>
+ <field name="ip.checksum" showname="Header checksum: 0x0000 [validation disabled]" size="2" pos="10" show="0x00000000" value="0000">
+ <field name="ip.checksum_good" showname="Good: False" size="2" pos="10" show="0" value="0000"/>
+ <field name="ip.checksum_bad" showname="Bad: False" size="2" pos="10" show="0" value="0000"/>
+ </field>
+ <field name="ip.src" showname="Source: 127.0.0.27" size="4" pos="12" show="127.0.0.27" value="7f00001b"/>
+ <field name="ip.addr" showname="Source or Destination Address: 127.0.0.27" hide="yes" size="4" pos="12" show="127.0.0.27" value="7f00001b"/>
+ <field name="ip.src_host" showname="Source Host: 127.0.0.27" hide="yes" size="4" pos="12" show="127.0.0.27" value="7f00001b"/>
+ <field name="ip.host" showname="Source or Destination Host: 127.0.0.27" hide="yes" size="4" pos="12" show="127.0.0.27" value="7f00001b"/>
+ <field name="ip.dst" showname="Destination: 127.0.0.30" size="4" pos="16" show="127.0.0.30" value="7f00001e"/>
+ <field name="ip.addr" showname="Source or Destination Address: 127.0.0.30" hide="yes" size="4" pos="16" show="127.0.0.30" value="7f00001e"/>
+ <field name="ip.dst_host" showname="Destination Host: 127.0.0.30" hide="yes" size="4" pos="16" show="127.0.0.30" value="7f00001e"/>
+ <field name="ip.host" showname="Source or Destination Host: 127.0.0.30" hide="yes" size="4" pos="16" show="127.0.0.30" value="7f00001e"/>
+ <field name="" show="Source GeoIP: Unknown" size="4" pos="12" value="7f00001b"/>
+ <field name="" show="Destination GeoIP: Unknown" size="4" pos="16" value="7f00001e"/>
+ </proto>
+ <proto name="tcp" showname="Transmission Control Protocol, Src Port: 17935 (17935), Dst Port: 1026 (1026), Seq: 73, Ack: 61, Len: 132" size="20" pos="20">
+ <field name="tcp.srcport" showname="Source Port: 17935" size="2" pos="20" show="17935" value="460f"/>
+ <field name="tcp.dstport" showname="Destination Port: 1026" size="2" pos="22" show="1026" value="0402"/>
+ <field name="tcp.port" showname="Source or Destination Port: 17935" hide="yes" size="2" pos="20" show="17935" value="460f"/>
+ <field name="tcp.port" showname="Source or Destination Port: 1026" hide="yes" size="2" pos="22" show="1026" value="0402"/>
+ <field name="tcp.stream" showname="Stream index: 1184" size="0" pos="20" show="1184"/>
+ <field name="tcp.len" showname="TCP Segment Len: 132" size="1" pos="32" show="132" value="50"/>
+ <field name="tcp.seq" showname="Sequence number: 73 (relative sequence number)" size="4" pos="24" show="73" value="00000049"/>
+ <field name="tcp.nxtseq" showname="Next sequence number: 205 (relative sequence number)" size="0" pos="20" show="205"/>
+ <field name="tcp.ack" showname="Acknowledgment number: 61 (relative ack number)" size="4" pos="28" show="61" value="0000003d"/>
+ <field name="tcp.hdr_len" showname="Header Length: 20 bytes" size="1" pos="32" show="20" value="50"/>
+ <field name="tcp.flags" showname="Flags: 0x018 (PSH, ACK)" size="2" pos="32" show="0x00000018" value="18" unmaskedvalue="5018">
+ <field name="tcp.flags.res" showname="000. .... .... = Reserved: Not set" size="1" pos="32" show="0" value="0" unmaskedvalue="50"/>
+ <field name="tcp.flags.ns" showname="...0 .... .... = Nonce: Not set" size="1" pos="32" show="0" value="0" unmaskedvalue="50"/>
+ <field name="tcp.flags.cwr" showname=".... 0... .... = Congestion Window Reduced (CWR): Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.ecn" showname=".... .0.. .... = ECN-Echo: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.urg" showname=".... ..0. .... = Urgent: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.ack" showname=".... ...1 .... = Acknowledgment: Set" size="1" pos="33" show="1" value="FFFFFFFF" unmaskedvalue="18"/>
+ <field name="tcp.flags.push" showname=".... .... 1... = Push: Set" size="1" pos="33" show="1" value="FFFFFFFF" unmaskedvalue="18"/>
+ <field name="tcp.flags.reset" showname=".... .... .0.. = Reset: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.syn" showname=".... .... ..0. = Syn: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.fin" showname=".... .... ...0 = Fin: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.str" showname="TCP Flags: *******AP***" size="2" pos="32" show="*******AP***" value="5018"/>
+ </field>
+ <field name="tcp.window_size_value" showname="Window size value: 32767" size="2" pos="34" show="32767" value="7fff"/>
+ <field name="tcp.window_size" showname="Calculated window size: 32767" size="2" pos="34" show="32767" value="7fff"/>
+ <field name="tcp.window_size_scalefactor" showname="Window size scaling factor: -2 (no window scaling used)" size="2" pos="34" show="-2" value="7fff"/>
+ <field name="tcp.checksum" showname="Checksum: 0x0000 [validation disabled]" size="2" pos="36" show="0x00000000" value="0000">
+ <field name="tcp.checksum_good" showname="Good Checksum: False" size="2" pos="36" show="0" value="0000"/>
+ <field name="tcp.checksum_bad" showname="Bad Checksum: False" size="2" pos="36" show="0" value="0000"/>
+ </field>
+ <field name="tcp.urgent_pointer" showname="Urgent pointer: 0" size="2" pos="38" show="0" value="0000"/>
+ <field name="tcp.analysis" showname="SEQ/ACK analysis" size="0" pos="20" show="" value="">
+ <field name="tcp.analysis.acks_frame" showname="This is an ACK to the segment in frame: 50516" size="0" pos="20" show="50516"/>
+ <field name="tcp.analysis.ack_rtt" showname="The RTT to ACK the segment was: 0.000138000 seconds" size="0" pos="20" show="0.000138000"/>
+ <field name="tcp.analysis.initial_rtt" showname="iRTT: 0.000011000 seconds" size="0" pos="20" show="0.000011000"/>
+ <field name="tcp.analysis.bytes_in_flight" showname="Bytes in flight: 132" size="0" pos="20" show="132"/>
+ </field>
+ </proto>
+ <proto name="dcerpc" showname="Distributed Computing Environment / Remote Procedure Call (DCE/RPC) Request, Fragment: Single, FragLen: 132, Call: 11, Ctx: 0" size="132" pos="40">
+ <field name="dcerpc.ver" showname="Version: 5" size="1" pos="40" show="5" value="05"/>
+ <field name="dcerpc.ver_minor" showname="Version (minor): 0" size="1" pos="41" show="0" value="00"/>
+ <field name="dcerpc.pkt_type" showname="Packet type: Request (0)" size="1" pos="42" show="0" value="00"/>
+ <field name="dcerpc.cn_flags" showname="Packet Flags: 0x03" size="1" pos="43" show="0x00000003" value="03">
+ <field name="dcerpc.cn_flags.object" showname="0... .... = Object: Not set" size="1" pos="43" show="0" value="0" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.maybe" showname=".0.. .... = Maybe: Not set" size="1" pos="43" show="0" value="0" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.dne" showname="..0. .... = Did Not Execute: Not set" size="1" pos="43" show="0" value="0" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.mpx" showname="...0 .... = Multiplex: Not set" size="1" pos="43" show="0" value="0" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.reserved" showname=".... 0... = Reserved: Not set" size="1" pos="43" show="0" value="0" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.cancel_pending" showname=".... .0.. = Cancel Pending: Not set" size="1" pos="43" show="0" value="0" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.last_frag" showname=".... ..1. = Last Frag: Set" size="1" pos="43" show="1" value="FFFFFFFF" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.first_frag" showname=".... ...1 = First Frag: Set" size="1" pos="43" show="1" value="FFFFFFFF" unmaskedvalue="03"/>
+ </field>
+ <field name="dcerpc.drep" showname="Data Representation: 10000000" size="4" pos="44" show="10:00:00:00" value="10000000">
+ <field name="dcerpc.drep.byteorder" showname="Byte order: Little-endian (1)" size="1" pos="44" show="1" value="10"/>
+ <field name="dcerpc.drep.character" showname="Character: ASCII (0)" size="1" pos="44" show="0" value="10"/>
+ <field name="dcerpc.drep.fp" showname="Floating-point: IEEE (0)" size="1" pos="45" show="0" value="00"/>
+ </field>
+ <field name="dcerpc.cn_frag_len" showname="Frag Length: 132" size="2" pos="48" show="132" value="8400"/>
+ <field name="dcerpc.cn_auth_len" showname="Auth Length: 0" size="2" pos="50" show="0" value="0000"/>
+ <field name="dcerpc.cn_call_id" showname="Call ID: 11" size="4" pos="52" show="11" value="0b000000"/>
+ <field name="dcerpc.cn_alloc_hint" showname="Alloc hint: 108" size="4" pos="56" show="108" value="6c000000"/>
+ <field name="dcerpc.cn_ctx_id" showname="Context ID: 0" size="2" pos="60" show="0" value="0000"/>
+ <field name="dcerpc.opnum" showname="Opnum: 4" size="2" pos="62" show="4" value="0400"/>
+ </proto>
+ <proto name="rpc_netlogon" showname="Microsoft Network Logon, NetrServerReqChallenge" size="108" pos="64">
+ <field name="netlogon.opnum" showname="Operation: NetrServerReqChallenge (4)" size="0" pos="64" show="4"/>
+ <field name="" show="Server Handle: \\addc.addom.samba.example.com" size="78" pos="64" value="000002001f000000000000001f0000005c005c0061006400640063002e006100640064006f006d002e00730061006d00620061002e006500780061006d0070006c0065002e0063006f006d000000">
+ <field name="dcerpc.referent_id" showname="Referent ID: 0x00020000" size="4" pos="64" show="0x00020000" value="00000200"/>
+ <field name="dcerpc.array.max_count" showname="Max Count: 31" size="4" pos="68" show="31" value="1f000000"/>
+ <field name="dcerpc.array.offset" showname="Offset: 0" size="4" pos="72" show="0" value="00000000"/>
+ <field name="dcerpc.array.actual_count" showname="Actual Count: 31" size="4" pos="76" show="31" value="1f000000"/>
+ <field name="netlogon.handle" showname="Handle: \\addc.addom.samba.example.com" size="62" pos="80" show="\\addc.addom.samba.example.com" value="5c005c0061006400640063002e006100640064006f006d002e00730061006d00620061002e006500780061006d0070006c0065002e0063006f006d000000"/>
+ </field>
+ <field name="" show="Computer Name: DC7" size="22" pos="142" value="00000400000000000000040000004400430037000000">
+ <field name="dcerpc.array.max_count" showname="Max Count: 4" size="4" pos="144" show="4" value="04000000"/>
+ <field name="dcerpc.array.offset" showname="Offset: 0" size="4" pos="148" show="0" value="00000000"/>
+ <field name="dcerpc.array.actual_count" showname="Actual Count: 4" size="4" pos="152" show="4" value="04000000"/>
+ <field name="netlogon.computer_name" showname="Computer Name: DC7" size="8" pos="156" show="DC7" value="4400430037000000"/>
+ </field>
+ <field name="netlogon.clientchallenge" showname="Client Challenge: 8dcc6ac9d5c32b44" size="8" pos="164" show="8d:cc:6a:c9:d5:c3:2b:44" value="8dcc6ac9d5c32b44"/>
+ </proto>
+</packet>
+
+<packet>
+ <proto name="geninfo" pos="0" showname="General information" size="133">
+ <field name="num" pos="0" show="685" showname="Number" value="2ad" size="133"/>
+ <field name="len" pos="0" show="133" showname="Frame Length" value="85" size="133"/>
+ <field name="caplen" pos="0" show="133" showname="Captured Length" value="85" size="133"/>
+ <field name="timestamp" pos="0" show="Feb 16, 2017 11:26:26.858394000 NZDT" showname="Captured Time" value="1487197586.858394000" size="133"/>
+ </proto>
+ <proto name="frame" showname="Frame 685: 133 bytes on wire (1064 bits), 133 bytes captured (1064 bits)" size="133" pos="0">
+ <field name="frame.encap_type" showname="Encapsulation type: Raw IP (7)" size="0" pos="0" show="7"/>
+ <field name="frame.time" showname="Arrival Time: Feb 16, 2017 11:26:26.858394000 NZDT" size="0" pos="0" show="Feb 16, 2017 11:26:26.858394000 NZDT"/>
+ <field name="frame.offset_shift" showname="Time shift for this packet: 0.000000000 seconds" size="0" pos="0" show="0.000000000"/>
+ <field name="frame.time_epoch" showname="Epoch Time: 1487197586.858394000 seconds" size="0" pos="0" show="1487197586.858394000"/>
+ <field name="frame.time_delta" showname="Time delta from previous captured frame: 0.000149000 seconds" size="0" pos="0" show="0.000149000"/>
+ <field name="frame.time_delta_displayed" showname="Time delta from previous displayed frame: 0.000149000 seconds" size="0" pos="0" show="0.000149000"/>
+ <field name="frame.time_relative" showname="Time since reference or first frame: 30.699100000 seconds" size="0" pos="0" show="30.699100000"/>
+ <field name="frame.number" showname="Frame Number: 685" size="0" pos="0" show="685"/>
+ <field name="frame.len" showname="Frame Length: 133 bytes (1064 bits)" size="0" pos="0" show="133"/>
+ <field name="frame.cap_len" showname="Capture Length: 133 bytes (1064 bits)" size="0" pos="0" show="133"/>
+ <field name="frame.marked" showname="Frame is marked: False" size="0" pos="0" show="0"/>
+ <field name="frame.ignored" showname="Frame is ignored: False" size="0" pos="0" show="0"/>
+ <field name="frame.protocols" showname="Protocols in frame: raw:ip:udp:cldap" size="0" pos="0" show="raw:ip:udp:cldap"/>
+ </proto>
+ <proto name="raw" showname="Raw packet data" size="133" pos="0"/>
+ <proto name="ip" showname="Internet Protocol Version 4, Src: 127.0.0.26, Dst: 127.0.0.30" size="20" pos="0">
+ <field name="ip.version" showname="0100 .... = Version: 4" size="1" pos="0" show="4" value="4" unmaskedvalue="45"/>
+ <field name="ip.hdr_len" showname=".... 0101 = Header Length: 20 bytes" size="1" pos="0" show="5" value="5" unmaskedvalue="45"/>
+ <field name="ip.dsfield" showname="Differentiated Services Field: 0x00 (DSCP: CS0, ECN: Not-ECT)" size="1" pos="1" show="0x00000000" value="00">
+ <field name="ip.dsfield.dscp" showname="0000 00.. = Differentiated Services Codepoint: Default (0)" size="1" pos="1" show="0" value="0" unmaskedvalue="00"/>
+ <field name="ip.dsfield.ecn" showname=".... ..00 = Explicit Congestion Notification: Not ECN-Capable Transport (0)" size="1" pos="1" show="0" value="0" unmaskedvalue="00"/>
+ </field>
+ <field name="ip.len" showname="Total Length: 133" size="2" pos="2" show="133" value="0085"/>
+ <field name="ip.id" showname="Identification: 0xffff (65535)" size="2" pos="4" show="0x0000ffff" value="ffff"/>
+ <field name="ip.flags" showname="Flags: 0x02 (Don&#x27;t Fragment)" size="1" pos="6" show="0x00000002" value="40">
+ <field name="ip.flags.rb" showname="0... .... = Reserved bit: Not set" size="1" pos="6" show="0" value="40"/>
+ <field name="ip.flags.df" showname=".1.. .... = Don&#x27;t fragment: Set" size="1" pos="6" show="1" value="40"/>
+ <field name="ip.flags.mf" showname="..0. .... = More fragments: Not set" size="1" pos="6" show="0" value="40"/>
+ </field>
+ <field name="ip.frag_offset" showname="Fragment offset: 0" size="2" pos="6" show="0" value="4000"/>
+ <field name="ip.ttl" showname="Time to live: 255" size="1" pos="8" show="255" value="ff"/>
+ <field name="ip.proto" showname="Protocol: UDP (17)" size="1" pos="9" show="17" value="11"/>
+ <field name="ip.checksum" showname="Header checksum: 0x0000 [validation disabled]" size="2" pos="10" show="0x00000000" value="0000">
+ <field name="ip.checksum_good" showname="Good: False" size="2" pos="10" show="0" value="0000"/>
+ <field name="ip.checksum_bad" showname="Bad: False" size="2" pos="10" show="0" value="0000"/>
+ </field>
+ <field name="ip.src" showname="Source: 127.0.0.26" size="4" pos="12" show="127.0.0.26" value="7f00001a"/>
+ <field name="ip.addr" showname="Source or Destination Address: 127.0.0.26" hide="yes" size="4" pos="12" show="127.0.0.26" value="7f00001a"/>
+ <field name="ip.src_host" showname="Source Host: 127.0.0.26" hide="yes" size="4" pos="12" show="127.0.0.26" value="7f00001a"/>
+ <field name="ip.host" showname="Source or Destination Host: 127.0.0.26" hide="yes" size="4" pos="12" show="127.0.0.26" value="7f00001a"/>
+ <field name="ip.dst" showname="Destination: 127.0.0.30" size="4" pos="16" show="127.0.0.30" value="7f00001e"/>
+ <field name="ip.addr" showname="Source or Destination Address: 127.0.0.30" hide="yes" size="4" pos="16" show="127.0.0.30" value="7f00001e"/>
+ <field name="ip.dst_host" showname="Destination Host: 127.0.0.30" hide="yes" size="4" pos="16" show="127.0.0.30" value="7f00001e"/>
+ <field name="ip.host" showname="Source or Destination Host: 127.0.0.30" hide="yes" size="4" pos="16" show="127.0.0.30" value="7f00001e"/>
+ <field name="" show="Source GeoIP: Unknown" size="4" pos="12" value="7f00001a"/>
+ <field name="" show="Destination GeoIP: Unknown" size="4" pos="16" value="7f00001e"/>
+ </proto>
+ <proto name="udp" showname="User Datagram Protocol, Src Port: 31981 (31981), Dst Port: 389 (389)" size="8" pos="20">
+ <field name="udp.srcport" showname="Source Port: 31981" size="2" pos="20" show="31981" value="7ced"/>
+ <field name="udp.dstport" showname="Destination Port: 389" size="2" pos="22" show="389" value="0185"/>
+ <field name="udp.port" showname="Source or Destination Port: 31981" hide="yes" size="2" pos="20" show="31981" value="7ced"/>
+ <field name="udp.port" showname="Source or Destination Port: 389" hide="yes" size="2" pos="22" show="389" value="0185"/>
+ <field name="udp.length" showname="Length: 113" size="2" pos="24" show="113" value="0071"/>
+ <field name="udp.checksum" showname="Checksum: 0x0000 (none)" size="2" pos="26" show="0x00000000" value="0000">
+ <field name="udp.checksum_good" showname="Good Checksum: False" size="2" pos="26" show="0" value="0000"/>
+ <field name="udp.checksum_bad" showname="Bad Checksum: False" size="2" pos="26" show="0" value="0000"/>
+ </field>
+ <field name="udp.stream" showname="Stream index: 66" size="0" pos="28" show="66"/>
+ </proto>
+ <proto name="cldap" showname="Connectionless Lightweight Directory Access Protocol" size="105" pos="28">
+ <field name="ldap.LDAPMessage_element" showname="LDAPMessage searchRequest(20287) &quot;&lt;ROOT&gt;&quot; baseObject" size="105" pos="28" show="" value="">
+ <field name="ldap.messageID" showname="messageID: 20287" size="2" pos="32" show="20287" value="4f3f"/>
+ <field name="ldap.protocolOp" showname="protocolOp: searchRequest (3)" size="99" pos="34" show="3" value="636104000a01000a0100020100020100010100a042a30d04054e74566572040406000000a3240409446e73446f6d61696e04176164646f6d2e73616d62612e6578616d706c652e636f6da30b0403414143040400000000300a04084e65744c6f676f6e">
+ <field name="ldap.searchRequest_element" showname="searchRequest" size="97" pos="36" show="" value="">
+ <field name="ldap.baseObject" showname="baseObject: " size="0" pos="38" show=""/>
+ <field name="ldap.scope" showname="scope: baseObject (0)" size="1" pos="40" show="0" value="00"/>
+ <field name="ldap.derefAliases" showname="derefAliases: neverDerefAliases (0)" size="1" pos="43" show="0" value="00"/>
+ <field name="ldap.sizeLimit" showname="sizeLimit: 0" size="1" pos="46" show="0" value="00"/>
+ <field name="ldap.timeLimit" showname="timeLimit: 0" size="1" pos="49" show="0" value="00"/>
+ <field name="ldap.typesOnly" showname="typesOnly: False" size="1" pos="52" show="0" value="00"/>
+ <field name="" show="Filter: (&amp;(&amp;(NtVer=0x00000006)(DnsDomain=addom.samba.example.com))(AAC=00:00:00:00))" size="68" pos="53" value="a042a30d04054e74566572040406000000a3240409446e73446f6d61696e04176164646f6d2e73616d62612e6578616d706c652e636f6da30b0403414143040400000000">
+ <field name="ldap.filter" showname="filter: and (0)" size="66" pos="55" show="0" value="a30d04054e74566572040406000000a3240409446e73446f6d61696e04176164646f6d2e73616d62612e6578616d706c652e636f6da30b0403414143040400000000">
+ <field name="" show="and: (&amp;(&amp;(NtVer=0x00000006)(DnsDomain=addom.samba.example.com))(AAC=00:00:00:00))" size="66" pos="55" value="a30d04054e74566572040406000000a3240409446e73446f6d61696e04176164646f6d2e73616d62612e6578616d706c652e636f6da30b0403414143040400000000">
+ <field name="ldap.and" showname="and: 3 items" size="66" pos="55" show="3" value="a30d04054e74566572040406000000a3240409446e73446f6d61696e04176164646f6d2e73616d62612e6578616d706c652e636f6da30b0403414143040400000000">
+ <field name="" show="Filter: (NtVer=0x00000006)" size="15" pos="55" value="a30d04054e74566572040406000000">
+ <field name="ldap.and_item" showname="and item: equalityMatch (3)" size="13" pos="57" show="3" value="04054e74566572040406000000">
+ <field name="ldap.equalityMatch_element" showname="equalityMatch" size="13" pos="57" show="" value="">
+ <field name="ldap.attributeDesc" showname="attributeDesc: NtVer" size="5" pos="59" show="NtVer" value="4e74566572"/>
+ <field name="mscldap.ntver.flags" showname="Version Flags: 0x00000006, V5: Client requested version 5 netlogon response, V5EX: Client requested version 5 extended netlogon response" size="4" pos="66" show="0x00000006" value="06000000">
+ <field name="mscldap.ntver.searchflags.v1" showname=".... .... .... .... .... .... .... ...0 = V1: Version 1 netlogon response not requested" size="4" pos="66" show="0" value="0" unmaskedvalue="06000000"/>
+ <field name="mscldap.ntver.searchflags.v5" showname=".... .... .... .... .... .... .... ..1. = V5: Client requested version 5 netlogon response" size="4" pos="66" show="1" value="FFFFFFFF" unmaskedvalue="06000000"/>
+ <field name="mscldap.ntver.searchflags.v5ex" showname=".... .... .... .... .... .... .... .1.. = V5EX: Client requested version 5 extended netlogon response" size="4" pos="66" show="1" value="FFFFFFFF" unmaskedvalue="06000000"/>
+ <field name="mscldap.ntver.searchflags.v5ep" showname=".... .... .... .... .... .... .... 0... = V5EP: IP address of server not requested" size="4" pos="66" show="0" value="0" unmaskedvalue="06000000"/>
+ <field name="mscldap.ntver.searchflags.vcs" showname=".... .... .... .... .... .... ...0 .... = VCS: Closest site information not requested" size="4" pos="66" show="0" value="0" unmaskedvalue="06000000"/>
+ <field name="mscldap.ntver.searchflags.vnt4" showname=".... ...0 .... .... .... .... .... .... = VNT4: Only full AD DS requested" size="4" pos="66" show="0" value="0" unmaskedvalue="06000000"/>
+ <field name="mscldap.ntver.searchflags.vpdc" showname="...0 .... .... .... .... .... .... .... = VPDC: Primary Domain Controller not requested" size="4" pos="66" show="0" value="0" unmaskedvalue="06000000"/>
+ <field name="mscldap.ntver.searchflags.vip" showname="..0. .... .... .... .... .... .... .... = VIP: IP details not requested (obsolete)" size="4" pos="66" show="0" value="0" unmaskedvalue="06000000"/>
+ <field name="mscldap.ntver.searchflags.vl" showname=".0.. .... .... .... .... .... .... .... = VL: Client is not the local machine" size="4" pos="66" show="0" value="0" unmaskedvalue="06000000"/>
+ <field name="mscldap.ntver.searchflags.vgc" showname="0... .... .... .... .... .... .... .... = VGC: Global Catalog not requested" size="4" pos="66" show="0" value="0" unmaskedvalue="06000000"/>
+ </field>
+ </field>
+ </field>
+ </field>
+ <field name="" show="Filter: (DnsDomain=addom.samba.example.com)" size="38" pos="70" value="a3240409446e73446f6d61696e04176164646f6d2e73616d62612e6578616d706c652e636f6d">
+ <field name="ldap.and_item" showname="and item: equalityMatch (3)" size="36" pos="72" show="3" value="0409446e73446f6d61696e04176164646f6d2e73616d62612e6578616d706c652e636f6d">
+ <field name="ldap.equalityMatch_element" showname="equalityMatch" size="36" pos="72" show="" value="">
+ <field name="ldap.attributeDesc" showname="attributeDesc: DnsDomain" size="9" pos="74" show="DnsDomain" value="446e73446f6d61696e"/>
+ <field name="ldap.assertionValue" showname="assertionValue: addom.samba.example.com" size="23" pos="85" show="addom.samba.example.com" value="6164646f6d2e73616d62612e6578616d706c652e636f6d"/>
+ </field>
+ </field>
+ </field>
+ <field name="" show="Filter: (AAC=00:00:00:00)" size="13" pos="108" value="a30b0403414143040400000000">
+ <field name="ldap.and_item" showname="and item: equalityMatch (3)" size="11" pos="110" show="3" value="0403414143040400000000">
+ <field name="ldap.equalityMatch_element" showname="equalityMatch" size="11" pos="110" show="" value="">
+ <field name="ldap.attributeDesc" showname="attributeDesc: AAC" size="3" pos="112" show="AAC" value="414143"/>
+ <field name="ldap.assertionValue" showname="assertionValue: 00:00:00:00" size="4" pos="117" show="00:00:00:00" value="00000000"/>
+ </field>
+ </field>
+ </field>
+ </field>
+ </field>
+ </field>
+ </field>
+ <field name="ldap.attributes" showname="attributes: 1 item" size="10" pos="123" show="1" value="04084e65744c6f676f6e">
+ <field name="ldap.AttributeDescription" showname="AttributeDescription: NetLogon" size="8" pos="125" show="NetLogon" value="4e65744c6f676f6e"/>
+ </field>
+ </field>
+ </field>
+ </field>
+ </proto>
+</packet>
+
+<packet>
+ <proto name="geninfo" pos="0" showname="General information" size="226">
+ <field name="num" pos="0" show="698" showname="Number" value="2ba" size="226"/>
+ <field name="len" pos="0" show="226" showname="Frame Length" value="e2" size="226"/>
+ <field name="caplen" pos="0" show="226" showname="Captured Length" value="e2" size="226"/>
+ <field name="timestamp" pos="0" show="Feb 16, 2017 11:26:26.864862000 NZDT" showname="Captured Time" value="1487197586.864862000" size="226"/>
+ </proto>
+ <proto name="frame" showname="Frame 698: 226 bytes on wire (1808 bits), 226 bytes captured (1808 bits)" size="226" pos="0">
+ <field name="frame.encap_type" showname="Encapsulation type: Raw IP (7)" size="0" pos="0" show="7"/>
+ <field name="frame.time" showname="Arrival Time: Feb 16, 2017 11:26:26.864862000 NZDT" size="0" pos="0" show="Feb 16, 2017 11:26:26.864862000 NZDT"/>
+ <field name="frame.offset_shift" showname="Time shift for this packet: 0.000000000 seconds" size="0" pos="0" show="0.000000000"/>
+ <field name="frame.time_epoch" showname="Epoch Time: 1487197586.864862000 seconds" size="0" pos="0" show="1487197586.864862000"/>
+ <field name="frame.time_delta" showname="Time delta from previous captured frame: 0.000059000 seconds" size="0" pos="0" show="0.000059000"/>
+ <field name="frame.time_delta_displayed" showname="Time delta from previous displayed frame: 0.000059000 seconds" size="0" pos="0" show="0.000059000"/>
+ <field name="frame.time_relative" showname="Time since reference or first frame: 30.705568000 seconds" size="0" pos="0" show="30.705568000"/>
+ <field name="frame.number" showname="Frame Number: 698" size="0" pos="0" show="698"/>
+ <field name="frame.len" showname="Frame Length: 226 bytes (1808 bits)" size="0" pos="0" show="226"/>
+ <field name="frame.cap_len" showname="Capture Length: 226 bytes (1808 bits)" size="0" pos="0" show="226"/>
+ <field name="frame.marked" showname="Frame is marked: False" size="0" pos="0" show="0"/>
+ <field name="frame.ignored" showname="Frame is ignored: False" size="0" pos="0" show="0"/>
+ <field name="frame.protocols" showname="Protocols in frame: raw:ip:tcp:nbss:smb2" size="0" pos="0" show="raw:ip:tcp:nbss:smb2"/>
+ </proto>
+ <proto name="raw" showname="Raw packet data" size="226" pos="0"/>
+ <proto name="ip" showname="Internet Protocol Version 4, Src: 127.0.0.26, Dst: 127.0.0.30" size="20" pos="0">
+ <field name="ip.version" showname="0100 .... = Version: 4" size="1" pos="0" show="4" value="4" unmaskedvalue="45"/>
+ <field name="ip.hdr_len" showname=".... 0101 = Header Length: 20 bytes" size="1" pos="0" show="5" value="5" unmaskedvalue="45"/>
+ <field name="ip.dsfield" showname="Differentiated Services Field: 0x00 (DSCP: CS0, ECN: Not-ECT)" size="1" pos="1" show="0x00000000" value="00">
+ <field name="ip.dsfield.dscp" showname="0000 00.. = Differentiated Services Codepoint: Default (0)" size="1" pos="1" show="0" value="0" unmaskedvalue="00"/>
+ <field name="ip.dsfield.ecn" showname=".... ..00 = Explicit Congestion Notification: Not ECN-Capable Transport (0)" size="1" pos="1" show="0" value="0" unmaskedvalue="00"/>
+ </field>
+ <field name="ip.len" showname="Total Length: 226" size="2" pos="2" show="226" value="00e2"/>
+ <field name="ip.id" showname="Identification: 0xffff (65535)" size="2" pos="4" show="0x0000ffff" value="ffff"/>
+ <field name="ip.flags" showname="Flags: 0x02 (Don&#x27;t Fragment)" size="1" pos="6" show="0x00000002" value="40">
+ <field name="ip.flags.rb" showname="0... .... = Reserved bit: Not set" size="1" pos="6" show="0" value="40"/>
+ <field name="ip.flags.df" showname=".1.. .... = Don&#x27;t fragment: Set" size="1" pos="6" show="1" value="40"/>
+ <field name="ip.flags.mf" showname="..0. .... = More fragments: Not set" size="1" pos="6" show="0" value="40"/>
+ </field>
+ <field name="ip.frag_offset" showname="Fragment offset: 0" size="2" pos="6" show="0" value="4000"/>
+ <field name="ip.ttl" showname="Time to live: 255" size="1" pos="8" show="255" value="ff"/>
+ <field name="ip.proto" showname="Protocol: TCP (6)" size="1" pos="9" show="6" value="06"/>
+ <field name="ip.checksum" showname="Header checksum: 0x0000 [validation disabled]" size="2" pos="10" show="0x00000000" value="0000">
+ <field name="ip.checksum_good" showname="Good: False" size="2" pos="10" show="0" value="0000"/>
+ <field name="ip.checksum_bad" showname="Bad: False" size="2" pos="10" show="0" value="0000"/>
+ </field>
+ <field name="ip.src" showname="Source: 127.0.0.26" size="4" pos="12" show="127.0.0.26" value="7f00001a"/>
+ <field name="ip.addr" showname="Source or Destination Address: 127.0.0.26" hide="yes" size="4" pos="12" show="127.0.0.26" value="7f00001a"/>
+ <field name="ip.src_host" showname="Source Host: 127.0.0.26" hide="yes" size="4" pos="12" show="127.0.0.26" value="7f00001a"/>
+ <field name="ip.host" showname="Source or Destination Host: 127.0.0.26" hide="yes" size="4" pos="12" show="127.0.0.26" value="7f00001a"/>
+ <field name="ip.dst" showname="Destination: 127.0.0.30" size="4" pos="16" show="127.0.0.30" value="7f00001e"/>
+ <field name="ip.addr" showname="Source or Destination Address: 127.0.0.30" hide="yes" size="4" pos="16" show="127.0.0.30" value="7f00001e"/>
+ <field name="ip.dst_host" showname="Destination Host: 127.0.0.30" hide="yes" size="4" pos="16" show="127.0.0.30" value="7f00001e"/>
+ <field name="ip.host" showname="Source or Destination Host: 127.0.0.30" hide="yes" size="4" pos="16" show="127.0.0.30" value="7f00001e"/>
+ <field name="" show="Source GeoIP: Unknown" size="4" pos="12" value="7f00001a"/>
+ <field name="" show="Destination GeoIP: Unknown" size="4" pos="16" value="7f00001e"/>
+ </proto>
+ <proto name="tcp" showname="Transmission Control Protocol, Src Port: 31980 (31980), Dst Port: 445 (445), Seq: 89, Ack: 229, Len: 186" size="20" pos="20">
+ <field name="tcp.srcport" showname="Source Port: 31980" size="2" pos="20" show="31980" value="7cec"/>
+ <field name="tcp.dstport" showname="Destination Port: 445" size="2" pos="22" show="445" value="01bd"/>
+ <field name="tcp.port" showname="Source or Destination Port: 31980" hide="yes" size="2" pos="20" show="31980" value="7cec"/>
+ <field name="tcp.port" showname="Source or Destination Port: 445" hide="yes" size="2" pos="22" show="445" value="01bd"/>
+ <field name="tcp.stream" showname="Stream index: 12" size="0" pos="20" show="12"/>
+ <field name="tcp.len" showname="TCP Segment Len: 186" size="1" pos="32" show="186" value="50"/>
+ <field name="tcp.seq" showname="Sequence number: 89 (relative sequence number)" size="4" pos="24" show="89" value="00000059"/>
+ <field name="tcp.nxtseq" showname="Next sequence number: 275 (relative sequence number)" size="0" pos="20" show="275"/>
+ <field name="tcp.ack" showname="Acknowledgment number: 229 (relative ack number)" size="4" pos="28" show="229" value="000000e5"/>
+ <field name="tcp.hdr_len" showname="Header Length: 20 bytes" size="1" pos="32" show="20" value="50"/>
+ <field name="tcp.flags" showname="Flags: 0x018 (PSH, ACK)" size="2" pos="32" show="0x00000018" value="18" unmaskedvalue="5018">
+ <field name="tcp.flags.res" showname="000. .... .... = Reserved: Not set" size="1" pos="32" show="0" value="0" unmaskedvalue="50"/>
+ <field name="tcp.flags.ns" showname="...0 .... .... = Nonce: Not set" size="1" pos="32" show="0" value="0" unmaskedvalue="50"/>
+ <field name="tcp.flags.cwr" showname=".... 0... .... = Congestion Window Reduced (CWR): Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.ecn" showname=".... .0.. .... = ECN-Echo: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.urg" showname=".... ..0. .... = Urgent: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.ack" showname=".... ...1 .... = Acknowledgment: Set" size="1" pos="33" show="1" value="FFFFFFFF" unmaskedvalue="18"/>
+ <field name="tcp.flags.push" showname=".... .... 1... = Push: Set" size="1" pos="33" show="1" value="FFFFFFFF" unmaskedvalue="18"/>
+ <field name="tcp.flags.reset" showname=".... .... .0.. = Reset: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.syn" showname=".... .... ..0. = Syn: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.fin" showname=".... .... ...0 = Fin: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.str" showname="TCP Flags: *******AP***" size="2" pos="32" show="*******AP***" value="5018"/>
+ </field>
+ <field name="tcp.window_size_value" showname="Window size value: 32767" size="2" pos="34" show="32767" value="7fff"/>
+ <field name="tcp.window_size" showname="Calculated window size: 32767" size="2" pos="34" show="32767" value="7fff"/>
+ <field name="tcp.window_size_scalefactor" showname="Window size scaling factor: -2 (no window scaling used)" size="2" pos="34" show="-2" value="7fff"/>
+ <field name="tcp.checksum" showname="Checksum: 0x0000 [validation disabled]" size="2" pos="36" show="0x00000000" value="0000">
+ <field name="tcp.checksum_good" showname="Good Checksum: False" size="2" pos="36" show="0" value="0000"/>
+ <field name="tcp.checksum_bad" showname="Bad Checksum: False" size="2" pos="36" show="0" value="0000"/>
+ </field>
+ <field name="tcp.urgent_pointer" showname="Urgent pointer: 0" size="2" pos="38" show="0" value="0000"/>
+ <field name="tcp.analysis" showname="SEQ/ACK analysis" size="0" pos="20" show="" value="">
+ <field name="tcp.analysis.acks_frame" showname="This is an ACK to the segment in frame: 695" size="0" pos="20" show="695"/>
+ <field name="tcp.analysis.ack_rtt" showname="The RTT to ACK the segment was: 0.000105000 seconds" size="0" pos="20" show="0.000105000"/>
+ <field name="tcp.analysis.initial_rtt" showname="iRTT: 0.000014000 seconds" size="0" pos="20" show="0.000014000"/>
+ <field name="tcp.analysis.bytes_in_flight" showname="Bytes in flight: 186" size="0" pos="20" show="186"/>
+ <field name="tcp.analysis.flags" showname="TCP Analysis Flags" size="0" pos="20" show="" value="">
+ <field name="_ws.expert" showname="Expert Info (Warn/Sequence): ACKed segment that wasn&#x27;t captured (common at capture start)" size="0" pos="20">
+ <field name="tcp.analysis.ack_lost_segment" showname="ACKed segment that wasn&#x27;t captured (common at capture start)" size="0" pos="0" show="" value=""/>
+ <field name="_ws.expert.message" showname="Message: ACKed segment that wasn&#x27;t captured (common at capture start)" hide="yes" size="0" pos="0" show="ACKed segment that wasn&#x27;t captured (common at capture start)"/>
+ <field name="_ws.expert.severity" showname="Severity level: Warn" size="0" pos="0" show="0x00600000"/>
+ <field name="_ws.expert.group" showname="Group: Sequence" size="0" pos="0" show="0x02000000"/>
+ </field>
+ </field>
+ </field>
+ </proto>
+ <proto name="nbss" showname="NetBIOS Session Service" size="186" pos="40">
+ <field name="nbss.type" showname="Message Type: Session message (0x00)" size="1" pos="40" show="0x00000000" value="00"/>
+ <field name="nbss.length" showname="Length: 182" size="3" pos="41" show="182" value="0000b6"/>
+ </proto>
+ <proto name="smb2" showname="SMB2 (Server Message Block Protocol version 2)" size="182" pos="44">
+ <field name="" show="SMB2 Header" size="64" pos="44" value="fe534d42400000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000">
+ <field name="smb2.server_component_smb2" showname="Server Component: SMB2" size="4" pos="44" show="" value=""/>
+ <field name="smb2.header_len" showname="Header Length: 64" size="2" pos="48" show="64" value="4000"/>
+ <field name="smb2.credit.charge" showname="Credit Charge: 0" size="2" pos="50" show="0" value="0000"/>
+ <field name="smb2.channel_sequence" showname="Channel Sequence: 0" size="2" pos="52" show="0" value="0000"/>
+ <field name="smb2.reserved" showname="Reserved: 0000" size="2" pos="54" show="00:00" value="0000"/>
+ <field name="smb2.cmd" showname="Command: Negotiate Protocol (0)" size="2" pos="56" show="0" value="0000"/>
+ <field name="smb2.credits.requested" showname="Credits requested: 0" size="2" pos="58" show="0" value="0000"/>
+ <field name="smb2.flags" showname="Flags: 0x00000000" size="4" pos="60" show="0x00000000" value="00000000">
+ <field name="smb2.flags.response" showname=".... .... .... .... .... .... .... ...0 = Response: This is a REQUEST" size="4" pos="60" show="0" value="0" unmaskedvalue="00000000"/>
+ <field name="smb2.flags.async" showname=".... .... .... .... .... .... .... ..0. = Async command: This is a SYNC command" size="4" pos="60" show="0" value="0" unmaskedvalue="00000000"/>
+ <field name="smb2.flags.chained" showname=".... .... .... .... .... .... .... .0.. = Chained: This pdu is NOT a chained command" size="4" pos="60" show="0" value="0" unmaskedvalue="00000000"/>
+ <field name="smb2.flags.signature" showname=".... .... .... .... .... .... .... 0... = Signing: This pdu is NOT signed" size="4" pos="60" show="0" value="0" unmaskedvalue="00000000"/>
+ <field name="smb2.flags.dfs" showname="...0 .... .... .... .... .... .... .... = DFS operation: This is a normal operation" size="4" pos="60" show="0" value="0" unmaskedvalue="00000000"/>
+ <field name="smb2.flags.replay" showname="..0. .... .... .... .... .... .... .... = Replay operation: This is NOT a replay operation" size="4" pos="60" show="0" value="0" unmaskedvalue="00000000"/>
+ </field>
+ <field name="smb2.chain_offset" showname="Chain Offset: 0x00000000" size="4" pos="64" show="0x00000000" value="00000000"/>
+ <field name="smb2.msg_id" showname="Message ID: 1" size="8" pos="68" show="1" value="0100000000000000"/>
+ <field name="smb2.pid" showname="Process Id: 0x00000000" size="4" pos="76" show="0x00000000" value="00000000"/>
+ <field name="smb2.tid" showname="Tree Id: 0x00000000" size="4" pos="80" show="0x00000000" value="00000000"/>
+ <field name="smb2.sesid" showname="Session Id: 0x0000000000000000" size="8" pos="84" show="0x0000000000000000" value="0000000000000000"/>
+ <field name="smb2.signature" showname="Signature: 00000000000000000000000000000000" size="16" pos="92" show="00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00" value="00000000000000000000000000000000"/>
+ </field>
+ <field name="" show="Negotiate Protocol Request (0x00)" size="118" pos="108" value="24000800030000007f000000ee2b90e954001a488a0332bc9e697f2d780000000200000002021002220224020003020310031103000000000100260000000000010020000100dfcf45249723e007a592511728de102521de99235cfee4ef893464068a2f67a200000200060000000000020001000200">
+ <field name="smb2.buffer_code" showname="StructureSize: 0x0024" size="2" pos="108" show="0x00000024" value="2400">
+ <field name="smb2.buffer_code.length" showname="0000 0000 0010 010. = Fixed Part Length: 18" size="2" pos="108" show="18" value="12" unmaskedvalue="2400"/>
+ <field name="smb2.buffer_code.dynamic" showname=".... .... .... ...0 = Dynamic Part: False" size="2" pos="108" show="0" value="0" unmaskedvalue="2400"/>
+ </field>
+ <field name="smb2.dialect_count" showname="Dialect count: 8" size="2" pos="110" show="8" value="0800"/>
+ <field name="smb2.sec_mode" showname="Security mode: 0x03, Signing enabled, Signing required" size="1" pos="112" show="0x00000003" value="03">
+ <field name="smb2.sec_mode.sign_enabled" showname=".... ...1 = Signing enabled: True" size="1" pos="112" show="1" value="FFFFFFFF" unmaskedvalue="03"/>
+ <field name="smb2.sec_mode.sign_required" showname=".... ..1. = Signing required: True" size="1" pos="112" show="1" value="FFFFFFFF" unmaskedvalue="03"/>
+ </field>
+ <field name="smb2.reserved" showname="Reserved: 0000" size="2" pos="114" show="00:00" value="0000"/>
+ <field name="smb2.capabilities" showname="Capabilities: 0x0000007f, DFS, LEASING, LARGE MTU, MULTI CHANNEL, PERSISTENT HANDLES, DIRECTORY LEASING, ENCRYPTION" size="4" pos="116" show="0x0000007f" value="7f000000">
+ <field name="smb2.capabilities.dfs" showname=".... .... .... .... .... .... .... ...1 = DFS: This host supports DFS" size="4" pos="116" show="1" value="FFFFFFFF" unmaskedvalue="7f000000"/>
+ <field name="smb2.capabilities.leasing" showname=".... .... .... .... .... .... .... ..1. = LEASING: This host supports LEASING" size="4" pos="116" show="1" value="FFFFFFFF" unmaskedvalue="7f000000"/>
+ <field name="smb2.capabilities.large_mtu" showname=".... .... .... .... .... .... .... .1.. = LARGE MTU: This host supports LARGE_MTU" size="4" pos="116" show="1" value="FFFFFFFF" unmaskedvalue="7f000000"/>
+ <field name="smb2.capabilities.multi_channel" showname=".... .... .... .... .... .... .... 1... = MULTI CHANNEL: This host supports MULTI CHANNEL" size="4" pos="116" show="1" value="FFFFFFFF" unmaskedvalue="7f000000"/>
+ <field name="smb2.capabilities.persistent_handles" showname=".... .... .... .... .... .... ...1 .... = PERSISTENT HANDLES: This host supports PERSISTENT HANDLES" size="4" pos="116" show="1" value="FFFFFFFF" unmaskedvalue="7f000000"/>
+ <field name="smb2.capabilities.directory_leasing" showname=".... .... .... .... .... .... ..1. .... = DIRECTORY LEASING: This host supports DIRECTORY LEASING" size="4" pos="116" show="1" value="FFFFFFFF" unmaskedvalue="7f000000"/>
+ <field name="smb2.capabilities.encryption" showname=".... .... .... .... .... .... .1.. .... = ENCRYPTION: This host supports ENCRYPTION" size="4" pos="116" show="1" value="FFFFFFFF" unmaskedvalue="7f000000"/>
+ </field>
+ <field name="smb2.client_guid" showname="Client Guid: e9902bee-0054-481a-8a03-32bc9e697f2d" size="16" pos="120" show="e9902bee-0054-481a-8a03-32bc9e697f2d" value="ee2b90e954001a488a0332bc9e697f2d"/>
+ <field name="smb2.negotiate_context.offset" showname="NegotiateContextOffset: 0x0078" size="4" pos="136" show="0x00000078" value="78000000"/>
+ <field name="smb2.negotiate_context.count" showname="NegotiateContextCount: 2" size="2" pos="140" show="2" value="0200"/>
+ <field name="smb2.reserved" showname="Reserved: 0000" size="2" pos="142" show="00:00" value="0000"/>
+ <field name="smb2.dialect" showname="Dialect: 0x0202" size="2" pos="144" show="0x00000202" value="0202"/>
+ <field name="smb2.dialect" showname="Dialect: 0x0210" size="2" pos="146" show="0x00000210" value="1002"/>
+ <field name="smb2.dialect" showname="Dialect: 0x0222" size="2" pos="148" show="0x00000222" value="2202"/>
+ <field name="smb2.dialect" showname="Dialect: 0x0224" size="2" pos="150" show="0x00000224" value="2402"/>
+ <field name="smb2.dialect" showname="Dialect: 0x0300" size="2" pos="152" show="0x00000300" value="0003"/>
+ <field name="smb2.dialect" showname="Dialect: 0x0302" size="2" pos="154" show="0x00000302" value="0203"/>
+ <field name="smb2.dialect" showname="Dialect: 0x0310" size="2" pos="156" show="0x00000310" value="1003"/>
+ <field name="smb2.dialect" showname="Dialect: 0x0311" size="2" pos="158" show="0x00000311" value="1103"/>
+ <field name="" show="Negotiate Context: SMB2_PREAUTH_INTEGRITY_CAPABILITIES " size="46" pos="164" value="0100260000000000010020000100dfcf45249723e007a592511728de102521de99235cfee4ef893464068a2f67a2">
+ <field name="smb2.negotiate_context.type" showname="Type: SMB2_PREAUTH_INTEGRITY_CAPABILITIES (0x0001)" size="2" pos="164" show="0x00000001" value="0100"/>
+ <field name="smb2.negotiate_context.data_length" showname="DataLength: 38" size="2" pos="166" show="38" value="2600"/>
+ <field name="smb2.reserved" showname="Reserved: 00000000" size="4" pos="168" show="00:00:00:00" value="00000000"/>
+ <field name="smb2.unknown" showname="unknown: 010020000100dfcf45249723e007a592511728de102521de..." size="38" pos="172" show="01:00:20:00:01:00:df:cf:45:24:97:23:e0:07:a5:92:51:17:28:de:10:25:21:de:99:23:5c:fe:e4:ef:89:34:64:06:8a:2f:67:a2" value="010020000100dfcf45249723e007a592511728de102521de99235cfee4ef893464068a2f67a2"/>
+ </field>
+ <field name="" show="Negotiate Context: SMB2_ENCRYPTION_CAPABILITIES " size="14" pos="212" value="0200060000000000020001000200">
+ <field name="smb2.negotiate_context.type" showname="Type: SMB2_ENCRYPTION_CAPABILITIES (0x0002)" size="2" pos="212" show="0x00000002" value="0200"/>
+ <field name="smb2.negotiate_context.data_length" showname="DataLength: 6" size="2" pos="214" show="6" value="0600"/>
+ <field name="smb2.reserved" showname="Reserved: 00000000" size="4" pos="216" show="00:00:00:00" value="00000000"/>
+ <field name="smb2.unknown" showname="unknown: 020001000200" size="6" pos="220" show="02:00:01:00:02:00" value="020001000200"/>
+ </field>
+ </field>
+ </proto>
+</packet>
+
+<packet>
+ <proto name="geninfo" pos="0" showname="General information" size="96">
+ <field name="num" pos="0" show="1166" showname="Number" value="48e" size="96"/>
+ <field name="len" pos="0" show="96" showname="Frame Length" value="60" size="96"/>
+ <field name="caplen" pos="0" show="96" showname="Captured Length" value="60" size="96"/>
+ <field name="timestamp" pos="0" show="Feb 16, 2017 11:26:28.515337000 NZDT" showname="Captured Time" value="1487197588.515337000" size="96"/>
+ </proto>
+ <proto name="frame" showname="Frame 1166: 96 bytes on wire (768 bits), 96 bytes captured (768 bits)" size="96" pos="0">
+ <field name="frame.encap_type" showname="Encapsulation type: Raw IP (7)" size="0" pos="0" show="7"/>
+ <field name="frame.time" showname="Arrival Time: Feb 16, 2017 11:26:28.515337000 NZDT" size="0" pos="0" show="Feb 16, 2017 11:26:28.515337000 NZDT"/>
+ <field name="frame.offset_shift" showname="Time shift for this packet: 0.000000000 seconds" size="0" pos="0" show="0.000000000"/>
+ <field name="frame.time_epoch" showname="Epoch Time: 1487197588.515337000 seconds" size="0" pos="0" show="1487197588.515337000"/>
+ <field name="frame.time_delta" showname="Time delta from previous captured frame: 0.000045000 seconds" size="0" pos="0" show="0.000045000"/>
+ <field name="frame.time_delta_displayed" showname="Time delta from previous displayed frame: 0.000045000 seconds" size="0" pos="0" show="0.000045000"/>
+ <field name="frame.time_relative" showname="Time since reference or first frame: 32.356043000 seconds" size="0" pos="0" show="32.356043000"/>
+ <field name="frame.number" showname="Frame Number: 1166" size="0" pos="0" show="1166"/>
+ <field name="frame.len" showname="Frame Length: 96 bytes (768 bits)" size="0" pos="0" show="96"/>
+ <field name="frame.cap_len" showname="Capture Length: 96 bytes (768 bits)" size="0" pos="0" show="96"/>
+ <field name="frame.marked" showname="Frame is marked: False" size="0" pos="0" show="0"/>
+ <field name="frame.ignored" showname="Frame is ignored: False" size="0" pos="0" show="0"/>
+ <field name="frame.protocols" showname="Protocols in frame: raw:ip:udp:dns" size="0" pos="0" show="raw:ip:udp:dns"/>
+ </proto>
+ <proto name="raw" showname="Raw packet data" size="96" pos="0"/>
+ <proto name="ip" showname="Internet Protocol Version 4, Src: 127.0.0.26, Dst: 0.0.0.0" size="20" pos="0">
+ <field name="ip.version" showname="0100 .... = Version: 4" size="1" pos="0" show="4" value="4" unmaskedvalue="45"/>
+ <field name="ip.hdr_len" showname=".... 0101 = Header Length: 20 bytes" size="1" pos="0" show="5" value="5" unmaskedvalue="45"/>
+ <field name="ip.dsfield" showname="Differentiated Services Field: 0x00 (DSCP: CS0, ECN: Not-ECT)" size="1" pos="1" show="0x00000000" value="00">
+ <field name="ip.dsfield.dscp" showname="0000 00.. = Differentiated Services Codepoint: Default (0)" size="1" pos="1" show="0" value="0" unmaskedvalue="00"/>
+ <field name="ip.dsfield.ecn" showname=".... ..00 = Explicit Congestion Notification: Not ECN-Capable Transport (0)" size="1" pos="1" show="0" value="0" unmaskedvalue="00"/>
+ </field>
+ <field name="ip.len" showname="Total Length: 96" size="2" pos="2" show="96" value="0060"/>
+ <field name="ip.id" showname="Identification: 0xffff (65535)" size="2" pos="4" show="0x0000ffff" value="ffff"/>
+ <field name="ip.flags" showname="Flags: 0x02 (Don&#x27;t Fragment)" size="1" pos="6" show="0x00000002" value="40">
+ <field name="ip.flags.rb" showname="0... .... = Reserved bit: Not set" size="1" pos="6" show="0" value="40"/>
+ <field name="ip.flags.df" showname=".1.. .... = Don&#x27;t fragment: Set" size="1" pos="6" show="1" value="40"/>
+ <field name="ip.flags.mf" showname="..0. .... = More fragments: Not set" size="1" pos="6" show="0" value="40"/>
+ </field>
+ <field name="ip.frag_offset" showname="Fragment offset: 0" size="2" pos="6" show="0" value="4000"/>
+ <field name="ip.ttl" showname="Time to live: 255" size="1" pos="8" show="255" value="ff"/>
+ <field name="ip.proto" showname="Protocol: UDP (17)" size="1" pos="9" show="17" value="11"/>
+ <field name="ip.checksum" showname="Header checksum: 0x0000 [validation disabled]" size="2" pos="10" show="0x00000000" value="0000">
+ <field name="ip.checksum_good" showname="Good: False" size="2" pos="10" show="0" value="0000"/>
+ <field name="ip.checksum_bad" showname="Bad: False" size="2" pos="10" show="0" value="0000"/>
+ </field>
+ <field name="ip.src" showname="Source: 127.0.0.26" size="4" pos="12" show="127.0.0.26" value="7f00001a"/>
+ <field name="ip.addr" showname="Source or Destination Address: 127.0.0.26" hide="yes" size="4" pos="12" show="127.0.0.26" value="7f00001a"/>
+ <field name="ip.src_host" showname="Source Host: 127.0.0.26" hide="yes" size="4" pos="12" show="127.0.0.26" value="7f00001a"/>
+ <field name="ip.host" showname="Source or Destination Host: 127.0.0.26" hide="yes" size="4" pos="12" show="127.0.0.26" value="7f00001a"/>
+ <field name="ip.dst" showname="Destination: 0.0.0.0" size="4" pos="16" show="0.0.0.0" value="00000000"/>
+ <field name="ip.addr" showname="Source or Destination Address: 0.0.0.0" hide="yes" size="4" pos="16" show="0.0.0.0" value="00000000"/>
+ <field name="ip.dst_host" showname="Destination Host: 0.0.0.0" hide="yes" size="4" pos="16" show="0.0.0.0" value="00000000"/>
+ <field name="ip.host" showname="Source or Destination Host: 0.0.0.0" hide="yes" size="4" pos="16" show="0.0.0.0" value="00000000"/>
+ <field name="" show="Source GeoIP: Unknown" size="4" pos="12" value="7f00001a"/>
+ <field name="" show="Destination GeoIP: Unknown" size="4" pos="16" value="00000000"/>
+ </proto>
+ <proto name="udp" showname="User Datagram Protocol, Src Port: 31989 (31989), Dst Port: 53 (53)" size="8" pos="20">
+ <field name="udp.srcport" showname="Source Port: 31989" size="2" pos="20" show="31989" value="7cf5"/>
+ <field name="udp.dstport" showname="Destination Port: 53" size="2" pos="22" show="53" value="0035"/>
+ <field name="udp.port" showname="Source or Destination Port: 31989" hide="yes" size="2" pos="20" show="31989" value="7cf5"/>
+ <field name="udp.port" showname="Source or Destination Port: 53" hide="yes" size="2" pos="22" show="53" value="0035"/>
+ <field name="udp.length" showname="Length: 76" size="2" pos="24" show="76" value="004c"/>
+ <field name="udp.checksum" showname="Checksum: 0x0000 (none)" size="2" pos="26" show="0x00000000" value="0000">
+ <field name="udp.checksum_good" showname="Good Checksum: False" size="2" pos="26" show="0" value="0000"/>
+ <field name="udp.checksum_bad" showname="Bad Checksum: False" size="2" pos="26" show="0" value="0000"/>
+ </field>
+ <field name="udp.stream" showname="Stream index: 76" size="0" pos="28" show="76"/>
+ </proto>
+ <proto name="dns" showname="Domain Name System (query)" size="68" pos="28">
+ <field name="dns.id" showname="Transaction ID: 0x1b5d" size="2" pos="28" show="0x00001b5d" value="1b5d"/>
+ <field name="dns.flags" showname="Flags: 0x2800 Dynamic update" size="2" pos="30" show="0x00002800" value="2800">
+ <field name="dns.flags.response" showname="0... .... .... .... = Response: Message is a query" size="2" pos="30" show="0" value="0" unmaskedvalue="2800"/>
+ <field name="dns.flags.opcode" showname=".010 1... .... .... = Opcode: Dynamic update (5)" size="2" pos="30" show="5" value="5" unmaskedvalue="2800"/>
+ <field name="dns.flags.truncated" showname=".... ..0. .... .... = Truncated: Message is not truncated" size="2" pos="30" show="0" value="0" unmaskedvalue="2800"/>
+ <field name="dns.flags.recdesired" showname=".... ...0 .... .... = Recursion desired: Don&#x27;t do query recursively" size="2" pos="30" show="0" value="0" unmaskedvalue="2800"/>
+ <field name="dns.flags.z" showname=".... .... .0.. .... = Z: reserved (0)" size="2" pos="30" show="0" value="0" unmaskedvalue="2800"/>
+ <field name="dns.flags.checkdisable" showname=".... .... ...0 .... = Non-authenticated data: Unacceptable" size="2" pos="30" show="0" value="0" unmaskedvalue="2800"/>
+ </field>
+ <field name="dns.count.zones" showname="Zones: 1" size="2" pos="32" show="1" value="0001"/>
+ <field name="dns.count.prerequisites" showname="Prerequisites: 0" size="2" pos="34" show="0" value="0000"/>
+ <field name="dns.count.updates" showname="Updates: 1" size="2" pos="36" show="1" value="0001"/>
+ <field name="dns.count.add_rr" showname="Additional RRs: 0" size="2" pos="38" show="0" value="0000"/>
+ <field name="" show="Zone" size="27" pos="40" value="0973616d626132303033076578616d706c6503636f6d0000060001">
+ <field name="" show="samba2003.example.com: type SOA, class IN" size="27" pos="40" value="0973616d626132303033076578616d706c6503636f6d0000060001">
+ <field name="dns.qry.name" showname="Name: samba2003.example.com" size="23" pos="40" show="samba2003.example.com" value="0973616d626132303033076578616d706c6503636f6d00"/>
+ <field name="dns.qry.name.len" showname="Name Length: 21" size="23" pos="40" show="21" value="0973616d626132303033076578616d706c6503636f6d00"/>
+ <field name="dns.count.labels" showname="Label Count: 3" size="23" pos="40" show="3" value="0973616d626132303033076578616d706c6503636f6d00"/>
+ <field name="dns.qry.type" showname="Type: SOA (Start Of a zone of Authority) (6)" size="2" pos="63" show="6" value="0006"/>
+ <field name="dns.qry.class" showname="Class: IN (0x0001)" size="2" pos="65" show="0x00000001" value="0001"/>
+ </field>
+ </field>
+ <field name="" show="Updates" size="29" pos="67" value="0a636e616d655f74657374c00c0005000100000384000603646336c00c">
+ <field name="" show="cname_test.samba2003.example.com: type CNAME, class IN, cname dc6.samba2003.example.com" size="29" pos="67" value="0a636e616d655f74657374c00c0005000100000384000603646336c00c">
+ <field name="dns.resp.name" showname="Name: cname_test.samba2003.example.com" size="13" pos="67" show="cname_test.samba2003.example.com" value="0a636e616d655f74657374c00c"/>
+ <field name="dns.resp.type" showname="Type: CNAME (Canonical NAME for an alias) (5)" size="2" pos="80" show="5" value="0005"/>
+ <field name="dns.resp.class" showname="Class: IN (0x0001)" size="2" pos="82" show="0x00000001" value="0001"/>
+ <field name="dns.resp.ttl" showname="Time to live: 900" size="4" pos="84" show="900" value="00000384"/>
+ <field name="dns.resp.len" showname="Data length: 6" size="2" pos="88" show="6" value="0006"/>
+ <field name="dns.cname" showname="CNAME: dc6.samba2003.example.com" size="6" pos="90" show="dc6.samba2003.example.com" value="03646336c00c"/>
+ </field>
+ </field>
+ </proto>
+</packet>
+
+<packet>
+ <proto name="geninfo" pos="0" showname="General information" size="96">
+ <field name="num" pos="0" show="1167" showname="Number" value="48f" size="96"/>
+ <field name="len" pos="0" show="96" showname="Frame Length" value="60" size="96"/>
+ <field name="caplen" pos="0" show="96" showname="Captured Length" value="60" size="96"/>
+ <field name="timestamp" pos="0" show="Feb 16, 2017 11:26:28.911149000 NZDT" showname="Captured Time" value="1487197588.911149000" size="96"/>
+ </proto>
+ <proto name="frame" showname="Frame 1167: 96 bytes on wire (768 bits), 96 bytes captured (768 bits)" size="96" pos="0">
+ <field name="frame.encap_type" showname="Encapsulation type: Raw IP (7)" size="0" pos="0" show="7"/>
+ <field name="frame.time" showname="Arrival Time: Feb 16, 2017 11:26:28.911149000 NZDT" size="0" pos="0" show="Feb 16, 2017 11:26:28.911149000 NZDT"/>
+ <field name="frame.offset_shift" showname="Time shift for this packet: 0.000000000 seconds" size="0" pos="0" show="0.000000000"/>
+ <field name="frame.time_epoch" showname="Epoch Time: 1487197588.911149000 seconds" size="0" pos="0" show="1487197588.911149000"/>
+ <field name="frame.time_delta" showname="Time delta from previous captured frame: 0.395812000 seconds" size="0" pos="0" show="0.395812000"/>
+ <field name="frame.time_delta_displayed" showname="Time delta from previous displayed frame: 0.395812000 seconds" size="0" pos="0" show="0.395812000"/>
+ <field name="frame.time_relative" showname="Time since reference or first frame: 32.751855000 seconds" size="0" pos="0" show="32.751855000"/>
+ <field name="frame.number" showname="Frame Number: 1167" size="0" pos="0" show="1167"/>
+ <field name="frame.len" showname="Frame Length: 96 bytes (768 bits)" size="0" pos="0" show="96"/>
+ <field name="frame.cap_len" showname="Capture Length: 96 bytes (768 bits)" size="0" pos="0" show="96"/>
+ <field name="frame.marked" showname="Frame is marked: False" size="0" pos="0" show="0"/>
+ <field name="frame.ignored" showname="Frame is ignored: False" size="0" pos="0" show="0"/>
+ <field name="frame.protocols" showname="Protocols in frame: raw:ip:udp:dns" size="0" pos="0" show="raw:ip:udp:dns"/>
+ </proto>
+ <proto name="raw" showname="Raw packet data" size="96" pos="0"/>
+ <proto name="ip" showname="Internet Protocol Version 4, Src: 0.0.0.0, Dst: 127.0.0.26" size="20" pos="0">
+ <field name="ip.version" showname="0100 .... = Version: 4" size="1" pos="0" show="4" value="4" unmaskedvalue="45"/>
+ <field name="ip.hdr_len" showname=".... 0101 = Header Length: 20 bytes" size="1" pos="0" show="5" value="5" unmaskedvalue="45"/>
+ <field name="ip.dsfield" showname="Differentiated Services Field: 0x00 (DSCP: CS0, ECN: Not-ECT)" size="1" pos="1" show="0x00000000" value="00">
+ <field name="ip.dsfield.dscp" showname="0000 00.. = Differentiated Services Codepoint: Default (0)" size="1" pos="1" show="0" value="0" unmaskedvalue="00"/>
+ <field name="ip.dsfield.ecn" showname=".... ..00 = Explicit Congestion Notification: Not ECN-Capable Transport (0)" size="1" pos="1" show="0" value="0" unmaskedvalue="00"/>
+ </field>
+ <field name="ip.len" showname="Total Length: 96" size="2" pos="2" show="96" value="0060"/>
+ <field name="ip.id" showname="Identification: 0xffff (65535)" size="2" pos="4" show="0x0000ffff" value="ffff"/>
+ <field name="ip.flags" showname="Flags: 0x02 (Don&#x27;t Fragment)" size="1" pos="6" show="0x00000002" value="40">
+ <field name="ip.flags.rb" showname="0... .... = Reserved bit: Not set" size="1" pos="6" show="0" value="40"/>
+ <field name="ip.flags.df" showname=".1.. .... = Don&#x27;t fragment: Set" size="1" pos="6" show="1" value="40"/>
+ <field name="ip.flags.mf" showname="..0. .... = More fragments: Not set" size="1" pos="6" show="0" value="40"/>
+ </field>
+ <field name="ip.frag_offset" showname="Fragment offset: 0" size="2" pos="6" show="0" value="4000"/>
+ <field name="ip.ttl" showname="Time to live: 255" size="1" pos="8" show="255" value="ff"/>
+ <field name="ip.proto" showname="Protocol: UDP (17)" size="1" pos="9" show="17" value="11"/>
+ <field name="ip.checksum" showname="Header checksum: 0x0000 [validation disabled]" size="2" pos="10" show="0x00000000" value="0000">
+ <field name="ip.checksum_good" showname="Good: False" size="2" pos="10" show="0" value="0000"/>
+ <field name="ip.checksum_bad" showname="Bad: False" size="2" pos="10" show="0" value="0000"/>
+ </field>
+ <field name="ip.src" showname="Source: 0.0.0.0" size="4" pos="12" show="0.0.0.0" value="00000000"/>
+ <field name="ip.addr" showname="Source or Destination Address: 0.0.0.0" hide="yes" size="4" pos="12" show="0.0.0.0" value="00000000"/>
+ <field name="ip.src_host" showname="Source Host: 0.0.0.0" hide="yes" size="4" pos="12" show="0.0.0.0" value="00000000"/>
+ <field name="ip.host" showname="Source or Destination Host: 0.0.0.0" hide="yes" size="4" pos="12" show="0.0.0.0" value="00000000"/>
+ <field name="ip.dst" showname="Destination: 127.0.0.26" size="4" pos="16" show="127.0.0.26" value="7f00001a"/>
+ <field name="ip.addr" showname="Source or Destination Address: 127.0.0.26" hide="yes" size="4" pos="16" show="127.0.0.26" value="7f00001a"/>
+ <field name="ip.dst_host" showname="Destination Host: 127.0.0.26" hide="yes" size="4" pos="16" show="127.0.0.26" value="7f00001a"/>
+ <field name="ip.host" showname="Source or Destination Host: 127.0.0.26" hide="yes" size="4" pos="16" show="127.0.0.26" value="7f00001a"/>
+ <field name="" show="Source GeoIP: Unknown" size="4" pos="12" value="00000000"/>
+ <field name="" show="Destination GeoIP: Unknown" size="4" pos="16" value="7f00001a"/>
+ </proto>
+ <proto name="udp" showname="User Datagram Protocol, Src Port: 53 (53), Dst Port: 31989 (31989)" size="8" pos="20">
+ <field name="udp.srcport" showname="Source Port: 53" size="2" pos="20" show="53" value="0035"/>
+ <field name="udp.dstport" showname="Destination Port: 31989" size="2" pos="22" show="31989" value="7cf5"/>
+ <field name="udp.port" showname="Source or Destination Port: 53" hide="yes" size="2" pos="20" show="53" value="0035"/>
+ <field name="udp.port" showname="Source or Destination Port: 31989" hide="yes" size="2" pos="22" show="31989" value="7cf5"/>
+ <field name="udp.length" showname="Length: 76" size="2" pos="24" show="76" value="004c"/>
+ <field name="udp.checksum" showname="Checksum: 0x0000 (none)" size="2" pos="26" show="0x00000000" value="0000">
+ <field name="udp.checksum_good" showname="Good Checksum: False" size="2" pos="26" show="0" value="0000"/>
+ <field name="udp.checksum_bad" showname="Bad Checksum: False" size="2" pos="26" show="0" value="0000"/>
+ </field>
+ <field name="udp.stream" showname="Stream index: 76" size="0" pos="28" show="76"/>
+ </proto>
+ <proto name="dns" showname="Domain Name System (response)" size="68" pos="28">
+ <field name="dns.response_to" showname="Request In: 1166" size="0" pos="28" show="1166"/>
+ <field name="dns.time" showname="Time: 0.395812000 seconds" size="0" pos="28" show="0.395812000"/>
+ <field name="dns.id" showname="Transaction ID: 0x1b5d" size="2" pos="28" show="0x00001b5d" value="1b5d"/>
+ <field name="dns.flags" showname="Flags: 0xa880 Dynamic update response, No error" size="2" pos="30" show="0x0000a880" value="a880">
+ <field name="dns.flags.response" showname="1... .... .... .... = Response: Message is a response" size="2" pos="30" show="1" value="FFFFFFFF" unmaskedvalue="a880"/>
+ <field name="dns.flags.opcode" showname=".010 1... .... .... = Opcode: Dynamic update (5)" size="2" pos="30" show="5" value="5" unmaskedvalue="a880"/>
+ <field name="dns.flags.authoritative" showname=".... .0.. .... .... = Authoritative: Server is not an authority for domain" size="2" pos="30" show="0" value="0" unmaskedvalue="a880"/>
+ <field name="dns.flags.truncated" showname=".... ..0. .... .... = Truncated: Message is not truncated" size="2" pos="30" show="0" value="0" unmaskedvalue="a880"/>
+ <field name="dns.flags.recdesired" showname=".... ...0 .... .... = Recursion desired: Don&#x27;t do query recursively" size="2" pos="30" show="0" value="0" unmaskedvalue="a880"/>
+ <field name="dns.flags.recavail" showname=".... .... 1... .... = Recursion available: Server can do recursive queries" size="2" pos="30" show="1" value="FFFFFFFF" unmaskedvalue="a880"/>
+ <field name="dns.flags.z" showname=".... .... .0.. .... = Z: reserved (0)" size="2" pos="30" show="0" value="0" unmaskedvalue="a880"/>
+ <field name="dns.flags.authenticated" showname=".... .... ..0. .... = Answer authenticated: Answer/authority portion was not authenticated by the server" size="2" pos="30" show="0" value="0" unmaskedvalue="a880"/>
+ <field name="dns.flags.checkdisable" showname=".... .... ...0 .... = Non-authenticated data: Unacceptable" size="2" pos="30" show="0" value="0" unmaskedvalue="a880"/>
+ <field name="dns.flags.rcode" showname=".... .... .... 0000 = Reply code: No error (0)" size="2" pos="30" show="0" value="0" unmaskedvalue="a880"/>
+ </field>
+ <field name="dns.count.zones" showname="Zones: 1" size="2" pos="32" show="1" value="0001"/>
+ <field name="dns.count.prerequisites" showname="Prerequisites: 0" size="2" pos="34" show="0" value="0000"/>
+ <field name="dns.count.updates" showname="Updates: 1" size="2" pos="36" show="1" value="0001"/>
+ <field name="dns.count.add_rr" showname="Additional RRs: 0" size="2" pos="38" show="0" value="0000"/>
+ <field name="" show="Zone" size="27" pos="40" value="0973616d626132303033076578616d706c6503636f6d0000060001">
+ <field name="" show="samba2003.example.com: type SOA, class IN" size="27" pos="40" value="0973616d626132303033076578616d706c6503636f6d0000060001">
+ <field name="dns.qry.name" showname="Name: samba2003.example.com" size="23" pos="40" show="samba2003.example.com" value="0973616d626132303033076578616d706c6503636f6d00"/>
+ <field name="dns.qry.name.len" showname="Name Length: 21" size="23" pos="40" show="21" value="0973616d626132303033076578616d706c6503636f6d00"/>
+ <field name="dns.count.labels" showname="Label Count: 3" size="23" pos="40" show="3" value="0973616d626132303033076578616d706c6503636f6d00"/>
+ <field name="dns.qry.type" showname="Type: SOA (Start Of a zone of Authority) (6)" size="2" pos="63" show="6" value="0006"/>
+ <field name="dns.qry.class" showname="Class: IN (0x0001)" size="2" pos="65" show="0x00000001" value="0001"/>
+ </field>
+ </field>
+ <field name="" show="Updates" size="29" pos="67" value="0a636e616d655f74657374c00c0005000100000384000603646336c00c">
+ <field name="" show="cname_test.samba2003.example.com: type CNAME, class IN, cname dc6.samba2003.example.com" size="29" pos="67" value="0a636e616d655f74657374c00c0005000100000384000603646336c00c">
+ <field name="dns.resp.name" showname="Name: cname_test.samba2003.example.com" size="13" pos="67" show="cname_test.samba2003.example.com" value="0a636e616d655f74657374c00c"/>
+ <field name="dns.resp.type" showname="Type: CNAME (Canonical NAME for an alias) (5)" size="2" pos="80" show="5" value="0005"/>
+ <field name="dns.resp.class" showname="Class: IN (0x0001)" size="2" pos="82" show="0x00000001" value="0001"/>
+ <field name="dns.resp.ttl" showname="Time to live: 900" size="4" pos="84" show="900" value="00000384"/>
+ <field name="dns.resp.len" showname="Data length: 6" size="2" pos="88" show="6" value="0006"/>
+ <field name="dns.cname" showname="CNAME: dc6.samba2003.example.com" size="6" pos="90" show="dc6.samba2003.example.com" value="03646336c00c"/>
+ </field>
+ </field>
+ </proto>
+</packet>
+
+<packet>
+ <proto name="geninfo" pos="0" showname="General information" size="328">
+ <field name="num" pos="0" show="1380" showname="Number" value="564" size="328"/>
+ <field name="len" pos="0" show="328" showname="Frame Length" value="148" size="328"/>
+ <field name="caplen" pos="0" show="328" showname="Captured Length" value="148" size="328"/>
+ <field name="timestamp" pos="0" show="Feb 16, 2017 11:26:29.619792000 NZDT" showname="Captured Time" value="1487197589.619792000" size="328"/>
+ </proto>
+ <proto name="frame" showname="Frame 1380: 328 bytes on wire (2624 bits), 328 bytes captured (2624 bits)" size="328" pos="0">
+ <field name="frame.encap_type" showname="Encapsulation type: Raw IP (7)" size="0" pos="0" show="7"/>
+ <field name="frame.time" showname="Arrival Time: Feb 16, 2017 11:26:29.619792000 NZDT" size="0" pos="0" show="Feb 16, 2017 11:26:29.619792000 NZDT"/>
+ <field name="frame.offset_shift" showname="Time shift for this packet: 0.000000000 seconds" size="0" pos="0" show="0.000000000"/>
+ <field name="frame.time_epoch" showname="Epoch Time: 1487197589.619792000 seconds" size="0" pos="0" show="1487197589.619792000"/>
+ <field name="frame.time_delta" showname="Time delta from previous captured frame: 0.000186000 seconds" size="0" pos="0" show="0.000186000"/>
+ <field name="frame.time_delta_displayed" showname="Time delta from previous displayed frame: 0.000186000 seconds" size="0" pos="0" show="0.000186000"/>
+ <field name="frame.time_relative" showname="Time since reference or first frame: 33.460498000 seconds" size="0" pos="0" show="33.460498000"/>
+ <field name="frame.number" showname="Frame Number: 1380" size="0" pos="0" show="1380"/>
+ <field name="frame.len" showname="Frame Length: 328 bytes (2624 bits)" size="0" pos="0" show="328"/>
+ <field name="frame.cap_len" showname="Capture Length: 328 bytes (2624 bits)" size="0" pos="0" show="328"/>
+ <field name="frame.marked" showname="Frame is marked: False" size="0" pos="0" show="0"/>
+ <field name="frame.ignored" showname="Frame is ignored: False" size="0" pos="0" show="0"/>
+ <field name="frame.protocols" showname="Protocols in frame: raw:ip:tcp:dcerpc:ntlmssp" size="0" pos="0" show="raw:ip:tcp:dcerpc:ntlmssp"/>
+ </proto>
+ <proto name="raw" showname="Raw packet data" size="328" pos="0"/>
+ <proto name="ip" showname="Internet Protocol Version 4, Src: 127.0.0.26, Dst: 127.0.0.26" size="20" pos="0">
+ <field name="ip.version" showname="0100 .... = Version: 4" size="1" pos="0" show="4" value="4" unmaskedvalue="45"/>
+ <field name="ip.hdr_len" showname=".... 0101 = Header Length: 20 bytes" size="1" pos="0" show="5" value="5" unmaskedvalue="45"/>
+ <field name="ip.dsfield" showname="Differentiated Services Field: 0x00 (DSCP: CS0, ECN: Not-ECT)" size="1" pos="1" show="0x00000000" value="00">
+ <field name="ip.dsfield.dscp" showname="0000 00.. = Differentiated Services Codepoint: Default (0)" size="1" pos="1" show="0" value="0" unmaskedvalue="00"/>
+ <field name="ip.dsfield.ecn" showname=".... ..00 = Explicit Congestion Notification: Not ECN-Capable Transport (0)" size="1" pos="1" show="0" value="0" unmaskedvalue="00"/>
+ </field>
+ <field name="ip.len" showname="Total Length: 328" size="2" pos="2" show="328" value="0148"/>
+ <field name="ip.id" showname="Identification: 0xffff (65535)" size="2" pos="4" show="0x0000ffff" value="ffff"/>
+ <field name="ip.flags" showname="Flags: 0x02 (Don&#x27;t Fragment)" size="1" pos="6" show="0x00000002" value="40">
+ <field name="ip.flags.rb" showname="0... .... = Reserved bit: Not set" size="1" pos="6" show="0" value="40"/>
+ <field name="ip.flags.df" showname=".1.. .... = Don&#x27;t fragment: Set" size="1" pos="6" show="1" value="40"/>
+ <field name="ip.flags.mf" showname="..0. .... = More fragments: Not set" size="1" pos="6" show="0" value="40"/>
+ </field>
+ <field name="ip.frag_offset" showname="Fragment offset: 0" size="2" pos="6" show="0" value="4000"/>
+ <field name="ip.ttl" showname="Time to live: 255" size="1" pos="8" show="255" value="ff"/>
+ <field name="ip.proto" showname="Protocol: TCP (6)" size="1" pos="9" show="6" value="06"/>
+ <field name="ip.checksum" showname="Header checksum: 0x0000 [validation disabled]" size="2" pos="10" show="0x00000000" value="0000">
+ <field name="ip.checksum_good" showname="Good: False" size="2" pos="10" show="0" value="0000"/>
+ <field name="ip.checksum_bad" showname="Bad: False" size="2" pos="10" show="0" value="0000"/>
+ </field>
+ <field name="ip.src" showname="Source: 127.0.0.26" size="4" pos="12" show="127.0.0.26" value="7f00001a"/>
+ <field name="ip.addr" showname="Source or Destination Address: 127.0.0.26" hide="yes" size="4" pos="12" show="127.0.0.26" value="7f00001a"/>
+ <field name="ip.src_host" showname="Source Host: 127.0.0.26" hide="yes" size="4" pos="12" show="127.0.0.26" value="7f00001a"/>
+ <field name="ip.host" showname="Source or Destination Host: 127.0.0.26" hide="yes" size="4" pos="12" show="127.0.0.26" value="7f00001a"/>
+ <field name="ip.dst" showname="Destination: 127.0.0.26" size="4" pos="16" show="127.0.0.26" value="7f00001a"/>
+ <field name="ip.addr" showname="Source or Destination Address: 127.0.0.26" hide="yes" size="4" pos="16" show="127.0.0.26" value="7f00001a"/>
+ <field name="ip.dst_host" showname="Destination Host: 127.0.0.26" hide="yes" size="4" pos="16" show="127.0.0.26" value="7f00001a"/>
+ <field name="ip.host" showname="Source or Destination Host: 127.0.0.26" hide="yes" size="4" pos="16" show="127.0.0.26" value="7f00001a"/>
+ <field name="" show="Source GeoIP: Unknown" size="4" pos="12" value="7f00001a"/>
+ <field name="" show="Destination GeoIP: Unknown" size="4" pos="16" value="7f00001a"/>
+ </proto>
+ <proto name="tcp" showname="Transmission Control Protocol, Src Port: 32030 (32030), Dst Port: 49152 (49152), Seq: 799, Ack: 439, Len: 288" size="20" pos="20">
+ <field name="tcp.srcport" showname="Source Port: 32030" size="2" pos="20" show="32030" value="7d1e"/>
+ <field name="tcp.dstport" showname="Destination Port: 49152" size="2" pos="22" show="49152" value="c000"/>
+ <field name="tcp.port" showname="Source or Destination Port: 32030" hide="yes" size="2" pos="20" show="32030" value="7d1e"/>
+ <field name="tcp.port" showname="Source or Destination Port: 49152" hide="yes" size="2" pos="22" show="49152" value="c000"/>
+ <field name="tcp.stream" showname="Stream index: 29" size="0" pos="20" show="29"/>
+ <field name="tcp.len" showname="TCP Segment Len: 288" size="1" pos="32" show="288" value="50"/>
+ <field name="tcp.seq" showname="Sequence number: 799 (relative sequence number)" size="4" pos="24" show="799" value="0000031f"/>
+ <field name="tcp.nxtseq" showname="Next sequence number: 1087 (relative sequence number)" size="0" pos="20" show="1087"/>
+ <field name="tcp.ack" showname="Acknowledgment number: 439 (relative ack number)" size="4" pos="28" show="439" value="000001b7"/>
+ <field name="tcp.hdr_len" showname="Header Length: 20 bytes" size="1" pos="32" show="20" value="50"/>
+ <field name="tcp.flags" showname="Flags: 0x018 (PSH, ACK)" size="2" pos="32" show="0x00000018" value="18" unmaskedvalue="5018">
+ <field name="tcp.flags.res" showname="000. .... .... = Reserved: Not set" size="1" pos="32" show="0" value="0" unmaskedvalue="50"/>
+ <field name="tcp.flags.ns" showname="...0 .... .... = Nonce: Not set" size="1" pos="32" show="0" value="0" unmaskedvalue="50"/>
+ <field name="tcp.flags.cwr" showname=".... 0... .... = Congestion Window Reduced (CWR): Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.ecn" showname=".... .0.. .... = ECN-Echo: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.urg" showname=".... ..0. .... = Urgent: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.ack" showname=".... ...1 .... = Acknowledgment: Set" size="1" pos="33" show="1" value="FFFFFFFF" unmaskedvalue="18"/>
+ <field name="tcp.flags.push" showname=".... .... 1... = Push: Set" size="1" pos="33" show="1" value="FFFFFFFF" unmaskedvalue="18"/>
+ <field name="tcp.flags.reset" showname=".... .... .0.. = Reset: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.syn" showname=".... .... ..0. = Syn: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.fin" showname=".... .... ...0 = Fin: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.str" showname="TCP Flags: *******AP***" size="2" pos="32" show="*******AP***" value="5018"/>
+ </field>
+ <field name="tcp.window_size_value" showname="Window size value: 32767" size="2" pos="34" show="32767" value="7fff"/>
+ <field name="tcp.window_size" showname="Calculated window size: 32767" size="2" pos="34" show="32767" value="7fff"/>
+ <field name="tcp.window_size_scalefactor" showname="Window size scaling factor: -2 (no window scaling used)" size="2" pos="34" show="-2" value="7fff"/>
+ <field name="tcp.checksum" showname="Checksum: 0x0000 [validation disabled]" size="2" pos="36" show="0x00000000" value="0000">
+ <field name="tcp.checksum_good" showname="Good Checksum: False" size="2" pos="36" show="0" value="0000"/>
+ <field name="tcp.checksum_bad" showname="Bad Checksum: False" size="2" pos="36" show="0" value="0000"/>
+ </field>
+ <field name="tcp.urgent_pointer" showname="Urgent pointer: 0" size="2" pos="38" show="0" value="0000"/>
+ <field name="tcp.analysis" showname="SEQ/ACK analysis" size="0" pos="20" show="" value="">
+ <field name="tcp.analysis.acks_frame" showname="This is an ACK to the segment in frame: 1377" size="0" pos="20" show="1377"/>
+ <field name="tcp.analysis.ack_rtt" showname="The RTT to ACK the segment was: 0.000209000 seconds" size="0" pos="20" show="0.000209000"/>
+ <field name="tcp.analysis.initial_rtt" showname="iRTT: 0.000014000 seconds" size="0" pos="20" show="0.000014000"/>
+ <field name="tcp.analysis.bytes_in_flight" showname="Bytes in flight: 288" size="0" pos="20" show="288"/>
+ </field>
+ </proto>
+ <proto name="dcerpc" showname="Distributed Computing Environment / Remote Procedure Call (DCE/RPC) Request, Fragment: Single, FragLen: 288, Call: 2, Ctx: 0" size="288" pos="40">
+ <field name="dcerpc.ver" showname="Version: 5" size="1" pos="40" show="5" value="05"/>
+ <field name="dcerpc.ver_minor" showname="Version (minor): 0" size="1" pos="41" show="0" value="00"/>
+ <field name="dcerpc.pkt_type" showname="Packet type: Request (0)" size="1" pos="42" show="0" value="00"/>
+ <field name="dcerpc.cn_flags" showname="Packet Flags: 0x03" size="1" pos="43" show="0x00000003" value="03">
+ <field name="dcerpc.cn_flags.object" showname="0... .... = Object: Not set" size="1" pos="43" show="0" value="0" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.maybe" showname=".0.. .... = Maybe: Not set" size="1" pos="43" show="0" value="0" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.dne" showname="..0. .... = Did Not Execute: Not set" size="1" pos="43" show="0" value="0" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.mpx" showname="...0 .... = Multiplex: Not set" size="1" pos="43" show="0" value="0" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.reserved" showname=".... 0... = Reserved: Not set" size="1" pos="43" show="0" value="0" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.cancel_pending" showname=".... .0.. = Cancel Pending: Not set" size="1" pos="43" show="0" value="0" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.last_frag" showname=".... ..1. = Last Frag: Set" size="1" pos="43" show="1" value="FFFFFFFF" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.first_frag" showname=".... ...1 = First Frag: Set" size="1" pos="43" show="1" value="FFFFFFFF" unmaskedvalue="03"/>
+ </field>
+ <field name="dcerpc.drep" showname="Data Representation: 10000000" size="4" pos="44" show="10:00:00:00" value="10000000">
+ <field name="dcerpc.drep.byteorder" showname="Byte order: Little-endian (1)" size="1" pos="44" show="1" value="10"/>
+ <field name="dcerpc.drep.character" showname="Character: ASCII (0)" size="1" pos="44" show="0" value="10"/>
+ <field name="dcerpc.drep.fp" showname="Floating-point: IEEE (0)" size="1" pos="45" show="0" value="00"/>
+ </field>
+ <field name="dcerpc.cn_frag_len" showname="Frag Length: 288" size="2" pos="48" show="288" value="2001"/>
+ <field name="dcerpc.cn_auth_len" showname="Auth Length: 16" size="2" pos="50" show="16" value="1000"/>
+ <field name="dcerpc.cn_call_id" showname="Call ID: 2" size="4" pos="52" show="2" value="02000000"/>
+ <field name="dcerpc.cn_alloc_hint" showname="Alloc hint: 236" size="4" pos="56" show="236" value="ec000000"/>
+ <field name="dcerpc.cn_ctx_id" showname="Context ID: 0" size="2" pos="60" show="0" value="0000"/>
+ <field name="dcerpc.opnum" showname="Opnum: 9" size="2" pos="62" show="9" value="0900"/>
+ <field name="dcerpc.auth_type" showname="Auth type: SPNEGO (9)" size="1" pos="304" show="9" value="09"/>
+ <field name="dcerpc.auth_level" showname="Auth level: Packet integrity (5)" size="1" pos="305" show="5" value="05"/>
+ <field name="dcerpc.auth_pad_len" showname="Auth pad len: 4" size="1" pos="306" show="4" value="04"/>
+ <field name="dcerpc.auth_rsrvd" showname="Auth Rsrvd: 0" size="1" pos="307" show="0" value="00"/>
+ <field name="dcerpc.auth_ctx_id" showname="Auth Context ID: 1" size="4" pos="308" show="1" value="01000000"/>
+ <field name="dcerpc.auth_padding" showname="Auth Padding: 00000000" size="4" pos="300" show="00:00:00:00" value="00000000"/>
+ <proto name="gss-api" showname="GSS-API Generic Security Service Application Program Interface" size="28" pos="312">
+ <field name="ntlmssp.verf" showname="NTLMSSP Verifier" size="16" pos="312" show="" value="">
+ <field name="ntlmssp.verf.vers" showname="Version Number: 1" size="4" pos="312" show="1" value="01000000"/>
+ <field name="ntlmssp.verf.body" showname="Verifier Body: 9f7b95490561ec3101000000" size="12" pos="316" show="9f:7b:95:49:05:61:ec:31:01:00:00:00" value="9f7b95490561ec3101000000"/>
+ </field>
+ </proto>
+ </proto>
+ <proto name="dnsserver" showname="DNS Server, DnssrvUpdateRecord2" size="236" pos="64">
+ <field name="dnsserver.opnum" showname="Operation: DnssrvUpdateRecord2 (9)" size="0" pos="64" show="9"/>
+ <field name="" show="Long frame" size="236" pos="64" value="0000070000000000000002000b000000000000000b0000003100320037002e0030002e0030002e0032003600000000000400020016000000000000001600000073616d6261323030332e6578616d706c652e636f6d000000260000000000000026000000727063656d707479746578747265632e73616d6261323030332e6578616d706c652e636f6d000000080002000000000000001000f000000001000000840300000000000000000000000000008ae3137102f43671010004000100000002402800a4c2ab504d57b3409d66ee4fd5fba07605000000045d888aeb1cc9119fe808002b10486002000000">
+ <field name="_ws.expert" showname="Expert Info (Warn/Protocol): Long frame" size="0" pos="64">
+ <field name="dcerpc.long_frame" showname="Long frame" size="0" pos="0" show="" value=""/>
+ <field name="_ws.expert.message" showname="Message: Long frame" hide="yes" size="0" pos="0" show="Long frame"/>
+ <field name="_ws.expert.severity" showname="Severity level: Warn" size="0" pos="0" show="0x00600000"/>
+ <field name="_ws.expert.group" showname="Group: Protocol" size="0" pos="0" show="0x09000000"/>
+ </field>
+ </field>
+ </proto>
+</packet>
+
+<packet>
+ <proto name="geninfo" pos="0" showname="General information" size="160">
+ <field name="num" pos="0" show="496" showname="Number" value="1f0" size="160"/>
+ <field name="len" pos="0" show="160" showname="Frame Length" value="a0" size="160"/>
+ <field name="caplen" pos="0" show="160" showname="Captured Length" value="a0" size="160"/>
+ <field name="timestamp" pos="0" show="Feb 16, 2017 12:18:10.757022000 NZDT" showname="Captured Time" value="1487200690.757022000" size="160"/>
+ </proto>
+ <proto name="frame" showname="Frame 496: 160 bytes on wire (1280 bits), 160 bytes captured (1280 bits)" size="160" pos="0">
+ <field name="frame.encap_type" showname="Encapsulation type: Raw IP (7)" size="0" pos="0" show="7"/>
+ <field name="frame.time" showname="Arrival Time: Feb 16, 2017 12:18:10.757022000 NZDT" size="0" pos="0" show="Feb 16, 2017 12:18:10.757022000 NZDT"/>
+ <field name="frame.offset_shift" showname="Time shift for this packet: 0.000000000 seconds" size="0" pos="0" show="0.000000000"/>
+ <field name="frame.time_epoch" showname="Epoch Time: 1487200690.757022000 seconds" size="0" pos="0" show="1487200690.757022000"/>
+ <field name="frame.time_delta" showname="Time delta from previous captured frame: 0.000083000 seconds" size="0" pos="0" show="0.000083000"/>
+ <field name="frame.time_delta_displayed" showname="Time delta from previous displayed frame: 0.000083000 seconds" size="0" pos="0" show="0.000083000"/>
+ <field name="frame.time_relative" showname="Time since reference or first frame: 5.519298000 seconds" size="0" pos="0" show="5.519298000"/>
+ <field name="frame.number" showname="Frame Number: 496" size="0" pos="0" show="496"/>
+ <field name="frame.len" showname="Frame Length: 160 bytes (1280 bits)" size="0" pos="0" show="160"/>
+ <field name="frame.cap_len" showname="Capture Length: 160 bytes (1280 bits)" size="0" pos="0" show="160"/>
+ <field name="frame.marked" showname="Frame is marked: False" size="0" pos="0" show="0"/>
+ <field name="frame.ignored" showname="Frame is ignored: False" size="0" pos="0" show="0"/>
+ <field name="frame.protocols" showname="Protocols in frame: raw:ip:tcp:nbss:smb:dcerpc" size="0" pos="0" show="raw:ip:tcp:nbss:smb:dcerpc"/>
+ </proto>
+ <proto name="raw" showname="Raw packet data" size="160" pos="0"/>
+ <proto name="ip" showname="Internet Protocol Version 4, Src: 127.0.0.11, Dst: 127.0.0.21" size="20" pos="0">
+ <field name="ip.version" showname="0100 .... = Version: 4" size="1" pos="0" show="4" value="4" unmaskedvalue="45"/>
+ <field name="ip.hdr_len" showname=".... 0101 = Header Length: 20 bytes" size="1" pos="0" show="5" value="5" unmaskedvalue="45"/>
+ <field name="ip.dsfield" showname="Differentiated Services Field: 0x00 (DSCP: CS0, ECN: Not-ECT)" size="1" pos="1" show="0x00000000" value="00">
+ <field name="ip.dsfield.dscp" showname="0000 00.. = Differentiated Services Codepoint: Default (0)" size="1" pos="1" show="0" value="0" unmaskedvalue="00"/>
+ <field name="ip.dsfield.ecn" showname=".... ..00 = Explicit Congestion Notification: Not ECN-Capable Transport (0)" size="1" pos="1" show="0" value="0" unmaskedvalue="00"/>
+ </field>
+ <field name="ip.len" showname="Total Length: 160" size="2" pos="2" show="160" value="00a0"/>
+ <field name="ip.id" showname="Identification: 0xffff (65535)" size="2" pos="4" show="0x0000ffff" value="ffff"/>
+ <field name="ip.flags" showname="Flags: 0x02 (Don&#x27;t Fragment)" size="1" pos="6" show="0x00000002" value="40">
+ <field name="ip.flags.rb" showname="0... .... = Reserved bit: Not set" size="1" pos="6" show="0" value="40"/>
+ <field name="ip.flags.df" showname=".1.. .... = Don&#x27;t fragment: Set" size="1" pos="6" show="1" value="40"/>
+ <field name="ip.flags.mf" showname="..0. .... = More fragments: Not set" size="1" pos="6" show="0" value="40"/>
+ </field>
+ <field name="ip.frag_offset" showname="Fragment offset: 0" size="2" pos="6" show="0" value="4000"/>
+ <field name="ip.ttl" showname="Time to live: 255" size="1" pos="8" show="255" value="ff"/>
+ <field name="ip.proto" showname="Protocol: TCP (6)" size="1" pos="9" show="6" value="06"/>
+ <field name="ip.checksum" showname="Header checksum: 0x0000 [validation disabled]" size="2" pos="10" show="0x00000000" value="0000">
+ <field name="ip.checksum_good" showname="Good: False" size="2" pos="10" show="0" value="0000"/>
+ <field name="ip.checksum_bad" showname="Bad: False" size="2" pos="10" show="0" value="0000"/>
+ </field>
+ <field name="ip.src" showname="Source: 127.0.0.11" size="4" pos="12" show="127.0.0.11" value="7f00000b"/>
+ <field name="ip.addr" showname="Source or Destination Address: 127.0.0.11" hide="yes" size="4" pos="12" show="127.0.0.11" value="7f00000b"/>
+ <field name="ip.src_host" showname="Source Host: 127.0.0.11" hide="yes" size="4" pos="12" show="127.0.0.11" value="7f00000b"/>
+ <field name="ip.host" showname="Source or Destination Host: 127.0.0.11" hide="yes" size="4" pos="12" show="127.0.0.11" value="7f00000b"/>
+ <field name="ip.dst" showname="Destination: 127.0.0.21" size="4" pos="16" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.addr" showname="Source or Destination Address: 127.0.0.21" hide="yes" size="4" pos="16" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.dst_host" showname="Destination Host: 127.0.0.21" hide="yes" size="4" pos="16" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.host" showname="Source or Destination Host: 127.0.0.21" hide="yes" size="4" pos="16" show="127.0.0.21" value="7f000015"/>
+ <field name="" show="Source GeoIP: Unknown" size="4" pos="12" value="7f00000b"/>
+ <field name="" show="Destination GeoIP: Unknown" size="4" pos="16" value="7f000015"/>
+ </proto>
+ <proto name="tcp" showname="Transmission Control Protocol, Src Port: 19094 (19094), Dst Port: 445 (445), Seq: 2889, Ack: 1672, Len: 120" size="20" pos="20">
+ <field name="tcp.srcport" showname="Source Port: 19094" size="2" pos="20" show="19094" value="4a96"/>
+ <field name="tcp.dstport" showname="Destination Port: 445" size="2" pos="22" show="445" value="01bd"/>
+ <field name="tcp.port" showname="Source or Destination Port: 19094" hide="yes" size="2" pos="20" show="19094" value="4a96"/>
+ <field name="tcp.port" showname="Source or Destination Port: 445" hide="yes" size="2" pos="22" show="445" value="01bd"/>
+ <field name="tcp.stream" showname="Stream index: 10" size="0" pos="20" show="10"/>
+ <field name="tcp.len" showname="TCP Segment Len: 120" size="1" pos="32" show="120" value="50"/>
+ <field name="tcp.seq" showname="Sequence number: 2889 (relative sequence number)" size="4" pos="24" show="2889" value="00000b49"/>
+ <field name="tcp.nxtseq" showname="Next sequence number: 3009 (relative sequence number)" size="0" pos="20" show="3009"/>
+ <field name="tcp.ack" showname="Acknowledgment number: 1672 (relative ack number)" size="4" pos="28" show="1672" value="00000688"/>
+ <field name="tcp.hdr_len" showname="Header Length: 20 bytes" size="1" pos="32" show="20" value="50"/>
+ <field name="tcp.flags" showname="Flags: 0x018 (PSH, ACK)" size="2" pos="32" show="0x00000018" value="18" unmaskedvalue="5018">
+ <field name="tcp.flags.res" showname="000. .... .... = Reserved: Not set" size="1" pos="32" show="0" value="0" unmaskedvalue="50"/>
+ <field name="tcp.flags.ns" showname="...0 .... .... = Nonce: Not set" size="1" pos="32" show="0" value="0" unmaskedvalue="50"/>
+ <field name="tcp.flags.cwr" showname=".... 0... .... = Congestion Window Reduced (CWR): Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.ecn" showname=".... .0.. .... = ECN-Echo: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.urg" showname=".... ..0. .... = Urgent: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.ack" showname=".... ...1 .... = Acknowledgment: Set" size="1" pos="33" show="1" value="FFFFFFFF" unmaskedvalue="18"/>
+ <field name="tcp.flags.push" showname=".... .... 1... = Push: Set" size="1" pos="33" show="1" value="FFFFFFFF" unmaskedvalue="18"/>
+ <field name="tcp.flags.reset" showname=".... .... .0.. = Reset: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.syn" showname=".... .... ..0. = Syn: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.fin" showname=".... .... ...0 = Fin: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.str" showname="TCP Flags: *******AP***" size="2" pos="32" show="*******AP***" value="5018"/>
+ </field>
+ <field name="tcp.window_size_value" showname="Window size value: 32767" size="2" pos="34" show="32767" value="7fff"/>
+ <field name="tcp.window_size" showname="Calculated window size: 32767" size="2" pos="34" show="32767" value="7fff"/>
+ <field name="tcp.window_size_scalefactor" showname="Window size scaling factor: -2 (no window scaling used)" size="2" pos="34" show="-2" value="7fff"/>
+ <field name="tcp.checksum" showname="Checksum: 0x0000 [validation disabled]" size="2" pos="36" show="0x00000000" value="0000">
+ <field name="tcp.checksum_good" showname="Good Checksum: False" size="2" pos="36" show="0" value="0000"/>
+ <field name="tcp.checksum_bad" showname="Bad Checksum: False" size="2" pos="36" show="0" value="0000"/>
+ </field>
+ <field name="tcp.urgent_pointer" showname="Urgent pointer: 0" size="2" pos="38" show="0" value="0000"/>
+ <field name="tcp.analysis" showname="SEQ/ACK analysis" size="0" pos="20" show="" value="">
+ <field name="tcp.analysis.acks_frame" showname="This is an ACK to the segment in frame: 493" size="0" pos="20" show="493"/>
+ <field name="tcp.analysis.ack_rtt" showname="The RTT to ACK the segment was: 0.000154000 seconds" size="0" pos="20" show="0.000154000"/>
+ <field name="tcp.analysis.initial_rtt" showname="iRTT: 0.000013000 seconds" size="0" pos="20" show="0.000013000"/>
+ <field name="tcp.analysis.bytes_in_flight" showname="Bytes in flight: 120" size="0" pos="20" show="120"/>
+ </field>
+ </proto>
+ <proto name="nbss" showname="NetBIOS Session Service" size="120" pos="40">
+ <field name="nbss.type" showname="Message Type: Session message (0x00)" size="1" pos="40" show="0x00000000" value="00"/>
+ <field name="nbss.length" showname="Length: 116" size="3" pos="41" show="116" value="000074"/>
+ </proto>
+ <proto name="smb" showname="SMB (Server Message Block Protocol)" size="116" pos="44">
+ <field name="" show="SMB Header" size="32" pos="44" value="ff534d4225000000001857c80000644f31ab2d1ec497000002e9000025190a00">
+ <field name="smb.server_component" showname="Server Component: SMB" size="4" pos="44" show="0x424d53ff" value="ff534d42"/>
+ <field name="smb.cmd" showname="SMB Command: Trans (0x25)" size="1" pos="48" show="37" value="25"/>
+ <field name="smb.nt_status" showname="NT Status: STATUS_SUCCESS (0x00000000)" size="4" pos="49" show="0" value="00000000"/>
+ <field name="smb.flags" showname="Flags: 0x18, Canonicalized Pathnames, Case Sensitivity" size="1" pos="53" show="0x00000018" value="18">
+ <field name="smb.flags.response" showname="0... .... = Request/Response: Message is a request to the server" size="1" pos="53" show="0" value="0" unmaskedvalue="18"/>
+ <field name="smb.flags.notify" showname=".0.. .... = Notify: Notify client only on open" size="1" pos="53" show="0" value="0" unmaskedvalue="18"/>
+ <field name="smb.flags.oplock" showname="..0. .... = Oplocks: OpLock not requested/granted" size="1" pos="53" show="0" value="0" unmaskedvalue="18"/>
+ <field name="smb.flags.canon" showname="...1 .... = Canonicalized Pathnames: Pathnames are canonicalized" size="1" pos="53" show="1" value="FFFFFFFF" unmaskedvalue="18"/>
+ <field name="smb.flags.caseless" showname=".... 1... = Case Sensitivity: Path names are caseless" size="1" pos="53" show="1" value="FFFFFFFF" unmaskedvalue="18"/>
+ <field name="smb.flags.receive_buffer" showname=".... ..0. = Receive Buffer Posted: Receive buffer has not been posted" size="1" pos="53" show="0" value="0" unmaskedvalue="18"/>
+ <field name="smb.flags.lock" showname=".... ...0 = Lock and Read: Lock&amp;Read, Write&amp;Unlock are not supported" size="1" pos="53" show="0" value="0" unmaskedvalue="18"/>
+ </field>
+ <field name="smb.flags2" showname="Flags2: 0xc857, Unicode Strings, Error Code Type, Extended Security Negotiation, Long Names Used, Security Signatures Required, Security Signatures, Extended Attributes, Long Names Allowed" size="2" pos="54" show="0x0000c857" value="57c8">
+ <field name="smb.flags2.string" showname="1... .... .... .... = Unicode Strings: Strings are Unicode" size="2" pos="54" show="1" value="FFFFFFFF" unmaskedvalue="57c8"/>
+ <field name="smb.flags2.nt_error" showname=".1.. .... .... .... = Error Code Type: Error codes are NT error codes" size="2" pos="54" show="1" value="FFFFFFFF" unmaskedvalue="57c8"/>
+ <field name="smb.flags2.roe" showname="..0. .... .... .... = Execute-only Reads: Don&#x27;t permit reads if execute-only" size="2" pos="54" show="0" value="0" unmaskedvalue="57c8"/>
+ <field name="smb.flags2.dfs" showname="...0 .... .... .... = Dfs: Don&#x27;t resolve pathnames with Dfs" size="2" pos="54" show="0" value="0" unmaskedvalue="57c8"/>
+ <field name="smb.flags2.esn" showname=".... 1... .... .... = Extended Security Negotiation: Extended security negotiation is supported" size="2" pos="54" show="1" value="FFFFFFFF" unmaskedvalue="57c8"/>
+ <field name="smb.flags2.reparse_path" showname=".... .0.. .... .... = Reparse Path: The request does not use a @GMT reparse path" size="2" pos="54" show="0" value="0" unmaskedvalue="57c8"/>
+ <field name="smb.flags2.long_names_used" showname=".... .... .1.. .... = Long Names Used: Path names in request are long file names" size="2" pos="54" show="1" value="FFFFFFFF" unmaskedvalue="57c8"/>
+ <field name="smb.flags2.sec_sig_required" showname=".... .... ...1 .... = Security Signatures Required: Security signatures are required" size="2" pos="54" show="1" value="FFFFFFFF" unmaskedvalue="57c8"/>
+ <field name="smb.flags2.compressed" showname=".... .... .... 0... = Compressed: Compression is not requested" size="2" pos="54" show="0" value="0" unmaskedvalue="57c8"/>
+ <field name="smb.flags2.sec_sig" showname=".... .... .... .1.. = Security Signatures: Security signatures are supported" size="2" pos="54" show="1" value="FFFFFFFF" unmaskedvalue="57c8"/>
+ <field name="smb.flags2.ea" showname=".... .... .... ..1. = Extended Attributes: Extended attributes are supported" size="2" pos="54" show="1" value="FFFFFFFF" unmaskedvalue="57c8"/>
+ <field name="smb.flags2.long_names_allowed" showname=".... .... .... ...1 = Long Names Allowed: Long file names are allowed in the response" size="2" pos="54" show="1" value="FFFFFFFF" unmaskedvalue="57c8"/>
+ </field>
+ <field name="smb.pid.high" showname="Process ID High: 0" size="2" pos="56" show="0" value="0000"/>
+ <field name="smb.signature" showname="Signature: 644f31ab2d1ec497" size="8" pos="58" show="64:4f:31:ab:2d:1e:c4:97" value="644f31ab2d1ec497"/>
+ <field name="smb.reserved" showname="Reserved: 0000" size="2" pos="66" show="00:00" value="0000"/>
+ <field name="smb.tid" showname="Tree ID: 59650 (\\LOCALDC\IPC$)" size="2" pos="68" show="59650" value="02e9">
+ <field name="smb.path" showname="Path: \\LOCALDC\IPC$" size="0" pos="112" show="\\LOCALDC\IPC$"/>
+ <field name="smb.fid.mapped_in" showname="Mapped in: 451" size="0" pos="112" show="451"/>
+ </field>
+ <field name="smb.pid" showname="Process ID: 0" size="2" pos="70" show="0" value="0000"/>
+ <field name="smb.uid" showname="User ID: 6437" size="2" pos="72" show="6437" value="2519"/>
+ <field name="smb.mid" showname="Multiplex ID: 10" size="2" pos="74" show="10" value="0a00"/>
+ </field>
+ <field name="" show="Trans Request (0x25)" size="84" pos="76" value="10000020000000b8100000000000000000000000005400200054000200260002003100005c0050004900500045005c00000000000500000310000000200000000200000008000000000000000000000000000002">
+ <field name="smb.wct" showname="Word Count (WCT): 16" size="1" pos="76" show="16" value="10"/>
+ <field name="smb.tpc" showname="Total Parameter Count: 0" size="2" pos="77" show="0" value="0000"/>
+ <field name="smb.tdc" showname="Total Data Count: 32" size="2" pos="79" show="32" value="2000"/>
+ <field name="smb.mpc" showname="Max Parameter Count: 0" size="2" pos="81" show="0" value="0000"/>
+ <field name="smb.mdc" showname="Max Data Count: 4280" size="2" pos="83" show="4280" value="b810"/>
+ <field name="smb.msc" showname="Max Setup Count: 0" size="1" pos="85" show="0" value="00"/>
+ <field name="smb.reserved" showname="Reserved: 00" size="1" pos="86" show="00" value="00"/>
+ <field name="smb.transaction.flags" showname="Flags: 0x0000" size="2" pos="87" show="0x00000000" value="0000">
+ <field name="smb.transaction.flags.owt" showname=".... .... .... ..0. = One Way Transaction: Two way transaction" size="2" pos="87" show="0" value="0" unmaskedvalue="0000"/>
+ <field name="smb.transaction.flags.dtid" showname=".... .... .... ...0 = Disconnect TID: Do NOT disconnect TID" size="2" pos="87" show="0" value="0" unmaskedvalue="0000"/>
+ </field>
+ <field name="smb.timeout" showname="Timeout: Return immediately (0)" size="4" pos="89" show="0" value="00000000"/>
+ <field name="smb.reserved" showname="Reserved: 0000" size="2" pos="93" show="00:00" value="0000"/>
+ <field name="smb.pc" showname="Parameter Count: 0" size="2" pos="95" show="0" value="0000"/>
+ <field name="smb.po" showname="Parameter Offset: 84" size="2" pos="97" show="84" value="5400"/>
+ <field name="smb.dc" showname="Data Count: 32" size="2" pos="99" show="32" value="2000"/>
+ <field name="smb.data_offset" showname="Data Offset: 84" size="2" pos="101" show="84" value="5400"/>
+ <field name="smb.sc" showname="Setup Count: 2" size="1" pos="103" show="2" value="02"/>
+ <field name="smb.reserved" showname="Reserved: 00" size="1" pos="104" show="00" value="00"/>
+ <field name="smb.bcc" showname="Byte Count (BCC): 49" size="2" pos="109" show="49" value="3100"/>
+ <field name="smb.trans_name" showname="Transaction Name: \PIPE\" size="14" pos="112" show="\PIPE\" value="5c0050004900500045005c000000"/>
+ <field name="smb.padding" showname="Padding: 0000" size="2" pos="126" show="00:00" value="0000"/>
+ </field>
+ </proto>
+ <proto name="smb_pipe" showname="SMB Pipe Protocol" size="21" pos="105">
+ <field name="smb_pipe.function" showname="Function: TransactNmPipe (0x0026)" size="2" pos="105" show="0x00000026" value="2600"/>
+ <field name="smb.fid" showname="FID: 0x0002 (\samr)" size="2" pos="107" show="0x00000002" value="0200">
+ <field name="smb.fid.opened_in" showname="Opened in: 487" size="0" pos="212" show="487"/>
+ <field name="smb.file" showname="File Name: \samr" size="0" pos="212" show="\samr"/>
+ <field name="smb.create_flags" showname="Create Flags: 0x00000000" size="4" pos="212" show="0x00000000" value="26000200">
+ <field name="smb.nt.create.oplock" showname=".... .... .... .... .... .... .... ..0. = Exclusive Oplock: Does NOT request oplock" size="4" pos="105" show="0" value="0" unmaskedvalue="26000200"/>
+ <field name="smb.nt.create.batch_oplock" showname=".... .... .... .... .... .... .... .0.. = Batch Oplock: Does NOT request batch oplock" size="4" pos="105" show="0" value="0" unmaskedvalue="26000200"/>
+ <field name="smb.nt.create.dir" showname=".... .... .... .... .... .... .... 0... = Create Directory: Target of open can be a file" size="4" pos="105" show="0" value="0" unmaskedvalue="26000200"/>
+ <field name="smb.nt.create.ext" showname=".... .... .... .... .... .... ...0 .... = Extended Response: Extended responses NOT required" size="4" pos="105" show="0" value="0" unmaskedvalue="26000200"/>
+ </field>
+ <field name="smb.access_mask" showname="Access Mask: 0x0002019f" size="4" pos="212" show="0x0002019f" value="26000200">
+ <field name="smb.access.read" showname=".... .... .... .... .... .... .... ...1 = Read: READ access" size="4" pos="105" show="1" value="FFFFFFFF" unmaskedvalue="26000200"/>
+ <field name="smb.access.write" showname=".... .... .... .... .... .... .... ..1. = Write: WRITE access" size="4" pos="105" show="1" value="FFFFFFFF" unmaskedvalue="26000200"/>
+ <field name="smb.access.append" showname=".... .... .... .... .... .... .... .1.. = Append: APPEND access" size="4" pos="105" show="1" value="FFFFFFFF" unmaskedvalue="26000200"/>
+ <field name="smb.access.read_ea" showname=".... .... .... .... .... .... .... 1... = Read EA: READ EXTENDED ATTRIBUTES access" size="4" pos="105" show="1" value="FFFFFFFF" unmaskedvalue="26000200"/>
+ <field name="smb.access.write_ea" showname=".... .... .... .... .... .... ...1 .... = Write EA: WRITE EXTENDED ATTRIBUTES access" size="4" pos="105" show="1" value="FFFFFFFF" unmaskedvalue="26000200"/>
+ <field name="smb.access.execute" showname=".... .... .... .... .... .... ..0. .... = Execute: NO execute access" size="4" pos="105" show="0" value="0" unmaskedvalue="26000200"/>
+ <field name="smb.access.delete_child" showname=".... .... .... .... .... .... .0.. .... = Delete Child: NO delete child access" size="4" pos="105" show="0" value="0" unmaskedvalue="26000200"/>
+ <field name="smb.access.read_attributes" showname=".... .... .... .... .... .... 1... .... = Read Attributes: READ ATTRIBUTES access" size="4" pos="105" show="1" value="FFFFFFFF" unmaskedvalue="26000200"/>
+ <field name="smb.access.write_attributes" showname=".... .... .... .... .... ...1 .... .... = Write Attributes: WRITE ATTRIBUTES access" size="4" pos="105" show="1" value="FFFFFFFF" unmaskedvalue="26000200"/>
+ <field name="smb.access.delete" showname=".... .... .... ...0 .... .... .... .... = Delete: NO delete access" size="4" pos="105" show="0" value="0" unmaskedvalue="26000200"/>
+ <field name="smb.access.read_control" showname=".... .... .... ..1. .... .... .... .... = Read Control: READ ACCESS to owner, group and ACL of the SID" size="4" pos="105" show="1" value="FFFFFFFF" unmaskedvalue="26000200"/>
+ <field name="smb.access.write_dac" showname=".... .... .... .0.. .... .... .... .... = Write DAC: Owner may NOT write to the DAC" size="4" pos="105" show="0" value="0" unmaskedvalue="26000200"/>
+ <field name="smb.access.write_owner" showname=".... .... .... 0... .... .... .... .... = Write Owner: Can NOT write owner (take ownership)" size="4" pos="105" show="0" value="0" unmaskedvalue="26000200"/>
+ <field name="smb.access.synchronize" showname=".... .... ...0 .... .... .... .... .... = Synchronize: Can NOT wait on handle to synchronize on completion of I/O" size="4" pos="105" show="0" value="0" unmaskedvalue="26000200"/>
+ <field name="smb.access.system_security" showname=".... ...0 .... .... .... .... .... .... = System Security: System security is NOT set" size="4" pos="105" show="0" value="0" unmaskedvalue="26000200"/>
+ <field name="smb.access.maximum_allowed" showname=".... ..0. .... .... .... .... .... .... = Maximum Allowed: Maximum allowed is NOT set" size="4" pos="105" show="0" value="0" unmaskedvalue="26000200"/>
+ <field name="smb.access.generic_all" showname="...0 .... .... .... .... .... .... .... = Generic All: Generic all is NOT set" size="4" pos="105" show="0" value="0" unmaskedvalue="26000200"/>
+ <field name="smb.access.generic_execute" showname="..0. .... .... .... .... .... .... .... = Generic Execute: Generic execute is NOT set" size="4" pos="105" show="0" value="0" unmaskedvalue="26000200"/>
+ <field name="smb.access.generic_write" showname=".0.. .... .... .... .... .... .... .... = Generic Write: Generic write is NOT set" size="4" pos="105" show="0" value="0" unmaskedvalue="26000200"/>
+ <field name="smb.access.generic_read" showname="0... .... .... .... .... .... .... .... = Generic Read: Generic read is NOT set" size="4" pos="105" show="0" value="0" unmaskedvalue="26000200"/>
+ </field>
+ <field name="smb.file_attribute" showname="File Attributes: 0x00000000" size="4" pos="212" show="0x00000000" value="26000200">
+ <field name="smb.file_attribute.read_only" showname=".... .... .... .... .... .... .... ...0 = Read Only: NOT read only" size="4" pos="105" show="0" value="0" unmaskedvalue="26000200"/>
+ <field name="smb.file_attribute.hidden" showname=".... .... .... .... .... .... .... ..0. = Hidden: NOT hidden" size="4" pos="105" show="0" value="0" unmaskedvalue="26000200"/>
+ <field name="smb.file_attribute.system" showname=".... .... .... .... .... .... .... .0.. = System: NOT a system file/dir" size="4" pos="105" show="0" value="0" unmaskedvalue="26000200"/>
+ <field name="smb.file_attribute.volume" showname=".... .... .... .... .... .... .... 0... = Volume ID: NOT a volume ID" size="4" pos="105" show="0" value="0" unmaskedvalue="26000200"/>
+ <field name="smb.file_attribute.directory" showname=".... .... .... .... .... .... ...0 .... = Directory: NOT a directory" size="4" pos="105" show="0" value="0" unmaskedvalue="26000200"/>
+ <field name="smb.file_attribute.archive" showname=".... .... .... .... .... .... ..0. .... = Archive: Has NOT been modified since last archive" size="4" pos="105" show="0" value="0" unmaskedvalue="26000200"/>
+ <field name="smb.file_attribute.device" showname=".... .... .... .... .... .... .0.. .... = Device: NOT a device" size="4" pos="105" show="0" value="0" unmaskedvalue="26000200"/>
+ <field name="smb.file_attribute.normal" showname=".... .... .... .... .... .... 0... .... = Normal: Has some attribute set" size="4" pos="105" show="0" value="0" unmaskedvalue="26000200"/>
+ <field name="smb.file_attribute.temporary" showname=".... .... .... .... .... ...0 .... .... = Temporary: NOT a temporary file" size="4" pos="105" show="0" value="0" unmaskedvalue="26000200"/>
+ <field name="smb.file_attribute.sparse" showname=".... .... .... .... .... ..0. .... .... = Sparse: NOT a sparse file" size="4" pos="105" show="0" value="0" unmaskedvalue="26000200"/>
+ <field name="smb.file_attribute.reparse" showname=".... .... .... .... .... .0.. .... .... = Reparse Point: Does NOT have an associated reparse point" size="4" pos="105" show="0" value="0" unmaskedvalue="26000200"/>
+ <field name="smb.file_attribute.compressed" showname=".... .... .... .... .... 0... .... .... = Compressed: Uncompressed" size="4" pos="105" show="0" value="0" unmaskedvalue="26000200"/>
+ <field name="smb.file_attribute.offline" showname=".... .... .... .... ...0 .... .... .... = Offline: Online" size="4" pos="105" show="0" value="0" unmaskedvalue="26000200"/>
+ <field name="smb.file_attribute.not_content_indexed" showname=".... .... .... .... ..0. .... .... .... = Content Indexed: NOT content indexed" size="4" pos="105" show="0" value="0" unmaskedvalue="26000200"/>
+ <field name="smb.file_attribute.encrypted" showname=".... .... .... .... .0.. .... .... .... = Encrypted: This is NOT an encrypted file" size="4" pos="105" show="0" value="0" unmaskedvalue="26000200"/>
+ </field>
+ <field name="smb.share_access" showname="Share Access: 0x00000003, Read, Write" size="4" pos="212" show="0x00000003" value="26000200">
+ <field name="smb.share.access.read" showname=".... .... .... .... .... .... .... ...1 = Read: Object can be shared for READ" size="4" pos="105" show="1" value="FFFFFFFF" unmaskedvalue="26000200"/>
+ <field name="smb.share.access.write" showname=".... .... .... .... .... .... .... ..1. = Write: Object can be shared for WRITE" size="4" pos="105" show="1" value="FFFFFFFF" unmaskedvalue="26000200"/>
+ <field name="smb.share.access.delete" showname=".... .... .... .... .... .... .... .0.. = Delete: Object can NOT be shared for delete" size="4" pos="105" show="0" value="0" unmaskedvalue="26000200"/>
+ </field>
+ <field name="smb.create_options" showname="Create Options: 0x00000000" size="4" pos="212" show="0x00000000" value="26000200">
+ <field name="smb.nt.create_options.directory" showname=".... .... .... .... .... .... .... ...0 = Directory: File being created/opened must not be a directory" size="4" pos="105" show="0" value="0" unmaskedvalue="26000200"/>
+ <field name="smb.nt.create_options.write_through" showname=".... .... .... .... .... .... .... ..0. = Write Through: Writes need not flush buffered data before completing" size="4" pos="105" show="0" value="0" unmaskedvalue="26000200"/>
+ <field name="smb.nt.create_options.sequential_only" showname=".... .... .... .... .... .... .... .0.. = Sequential Only: The file might not only be accessed sequentially" size="4" pos="105" show="0" value="0" unmaskedvalue="26000200"/>
+ <field name="smb.nt.create_options.intermediate_buffering" showname=".... .... .... .... .... .... .... 0... = Intermediate Buffering: Intermediate buffering is allowed" size="4" pos="105" show="0" value="0" unmaskedvalue="26000200"/>
+ <field name="smb.nt.create_options.sync_io_alert" showname=".... .... .... .... .... .... ...0 .... = Sync I/O Alert: Operations NOT necessarily synchronous" size="4" pos="105" show="0" value="0" unmaskedvalue="26000200"/>
+ <field name="smb.nt.create_options.sync_io_nonalert" showname=".... .... .... .... .... .... ..0. .... = Sync I/O Nonalert: Operations NOT necessarily synchronous" size="4" pos="105" show="0" value="0" unmaskedvalue="26000200"/>
+ <field name="smb.nt.create_options.non_directory" showname=".... .... .... .... .... .... .0.. .... = Non-Directory: File being created/opened must be a directory" size="4" pos="105" show="0" value="0" unmaskedvalue="26000200"/>
+ <field name="smb.nt.create_options.create_tree_connection" showname=".... .... .... .... .... .... 0... .... = Create Tree Connection: Create Tree Connections is NOT set" size="4" pos="105" show="0" value="0" unmaskedvalue="26000200"/>
+ <field name="smb.nt.create_options.complete_if_oplocked" showname=".... .... .... .... .... ...0 .... .... = Complete If Oplocked: Complete if oplocked is NOT set" size="4" pos="105" show="0" value="0" unmaskedvalue="26000200"/>
+ <field name="smb.nt.create_options.no_ea_knowledge" showname=".... .... .... .... .... ..0. .... .... = No EA Knowledge: The client understands extended attributes" size="4" pos="105" show="0" value="0" unmaskedvalue="26000200"/>
+ <field name="smb.nt.create_options.eight_dot_three_only" showname=".... .... .... .... .... .0.. .... .... = 8.3 Only: The client understands long file names" size="4" pos="105" show="0" value="0" unmaskedvalue="26000200"/>
+ <field name="smb.nt.create_options.random_access" showname=".... .... .... .... .... 0... .... .... = Random Access: The file will not be accessed randomly" size="4" pos="105" show="0" value="0" unmaskedvalue="26000200"/>
+ <field name="smb.nt.create_options.delete_on_close" showname=".... .... .... .... ...0 .... .... .... = Delete On Close: The file should not be deleted when it is closed" size="4" pos="105" show="0" value="0" unmaskedvalue="26000200"/>
+ <field name="smb.nt.create_options.open_by_fileid" showname=".... .... .... .... ..0. .... .... .... = Open By FileID: OpenByFileID is NOT set" size="4" pos="105" show="0" value="0" unmaskedvalue="26000200"/>
+ <field name="smb.nt.create_options.backup_intent" showname=".... .... .... .... .0.. .... .... .... = Backup Intent: This is a normal create" size="4" pos="105" show="0" value="0" unmaskedvalue="26000200"/>
+ <field name="smb.nt.create_options.no_compression" showname=".... .... .... .... 0... .... .... .... = No Compression: Compression is allowed for Open/Create" size="4" pos="105" show="0" value="0" unmaskedvalue="26000200"/>
+ <field name="smb.nt.create_options.reserve_opfilter" showname=".... .... ...0 .... .... .... .... .... = Reserve Opfilter: Reserve Opfilter is NOT set" size="4" pos="105" show="0" value="0" unmaskedvalue="26000200"/>
+ <field name="smb.nt.create_options.open_reparse_point" showname=".... .... ..0. .... .... .... .... .... = Open Reparse Point: Normal open" size="4" pos="105" show="0" value="0" unmaskedvalue="26000200"/>
+ <field name="smb.nt.create_options.open_no_recall" showname=".... .... .0.. .... .... .... .... .... = Open No Recall: Open no recall is NOT set" size="4" pos="105" show="0" value="0" unmaskedvalue="26000200"/>
+ <field name="smb.nt.create_options.open_for_free_space_query" showname=".... .... 0... .... .... .... .... .... = Open For Free Space query: This is NOT an open for free space query" size="4" pos="105" show="0" value="0" unmaskedvalue="26000200"/>
+ </field>
+ <field name="smb.create.disposition" showname="Disposition: Open (if file exists open it, else fail) (1)" size="0" pos="212" show="1"/>
+ </field>
+ </proto>
+ <proto name="dcerpc" showname="Distributed Computing Environment / Remote Procedure Call (DCE/RPC) Request, Fragment: Single, FragLen: 32, Call: 2, Ctx: 0" size="32" pos="128">
+ <field name="dcerpc.ver" showname="Version: 5" size="1" pos="128" show="5" value="05"/>
+ <field name="dcerpc.ver_minor" showname="Version (minor): 0" size="1" pos="129" show="0" value="00"/>
+ <field name="dcerpc.pkt_type" showname="Packet type: Request (0)" size="1" pos="130" show="0" value="00"/>
+ <field name="dcerpc.cn_flags" showname="Packet Flags: 0x03" size="1" pos="131" show="0x00000003" value="03">
+ <field name="dcerpc.cn_flags.object" showname="0... .... = Object: Not set" size="1" pos="131" show="0" value="0" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.maybe" showname=".0.. .... = Maybe: Not set" size="1" pos="131" show="0" value="0" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.dne" showname="..0. .... = Did Not Execute: Not set" size="1" pos="131" show="0" value="0" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.mpx" showname="...0 .... = Multiplex: Not set" size="1" pos="131" show="0" value="0" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.reserved" showname=".... 0... = Reserved: Not set" size="1" pos="131" show="0" value="0" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.cancel_pending" showname=".... .0.. = Cancel Pending: Not set" size="1" pos="131" show="0" value="0" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.last_frag" showname=".... ..1. = Last Frag: Set" size="1" pos="131" show="1" value="FFFFFFFF" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.first_frag" showname=".... ...1 = First Frag: Set" size="1" pos="131" show="1" value="FFFFFFFF" unmaskedvalue="03"/>
+ </field>
+ <field name="dcerpc.drep" showname="Data Representation: 10000000" size="4" pos="132" show="10:00:00:00" value="10000000">
+ <field name="dcerpc.drep.byteorder" showname="Byte order: Little-endian (1)" size="1" pos="132" show="1" value="10"/>
+ <field name="dcerpc.drep.character" showname="Character: ASCII (0)" size="1" pos="132" show="0" value="10"/>
+ <field name="dcerpc.drep.fp" showname="Floating-point: IEEE (0)" size="1" pos="133" show="0" value="00"/>
+ </field>
+ <field name="dcerpc.cn_frag_len" showname="Frag Length: 32" size="2" pos="136" show="32" value="2000"/>
+ <field name="dcerpc.cn_auth_len" showname="Auth Length: 0" size="2" pos="138" show="0" value="0000"/>
+ <field name="dcerpc.cn_call_id" showname="Call ID: 2" size="4" pos="140" show="2" value="02000000"/>
+ <field name="dcerpc.cn_alloc_hint" showname="Alloc hint: 8" size="4" pos="144" show="8" value="08000000"/>
+ <field name="dcerpc.cn_ctx_id" showname="Context ID: 0" size="2" pos="148" show="0" value="0000"/>
+ <field name="dcerpc.opnum" showname="Opnum: 0" size="2" pos="150" show="0" value="0000"/>
+ </proto>
+ <proto name="samr" showname="SAMR (pidl), Connect" size="8" pos="152">
+ <field name="samr.opnum" showname="Operation: Connect (0)" size="0" pos="152" show="0"/>
+ <field name="dcerpc.null_pointer" showname="NULL Pointer: Pointer to System Name (uint16)" size="4" pos="152" show="00:00:00:00" value="00000000"/>
+ <field name="samr.connect.access_mask" showname="Access Mask: 0x02000000" size="4" pos="156" show="0x02000000" value="00000002">
+ <field name="" show="Generic rights: 0x00000000" size="4" pos="156" value="00000002">
+ <field name="nt.access_mask.generic_read" showname="0... .... .... .... .... .... .... .... = Generic read: Not set" size="4" pos="156" show="0" value="0" unmaskedvalue="00000002"/>
+ <field name="nt.access_mask.generic_write" showname=".0.. .... .... .... .... .... .... .... = Generic write: Not set" size="4" pos="156" show="0" value="0" unmaskedvalue="00000002"/>
+ <field name="nt.access_mask.generic_execute" showname="..0. .... .... .... .... .... .... .... = Generic execute: Not set" size="4" pos="156" show="0" value="0" unmaskedvalue="00000002"/>
+ <field name="nt.access_mask.generic_all" showname="...0 .... .... .... .... .... .... .... = Generic all: Not set" size="4" pos="156" show="0" value="0" unmaskedvalue="00000002"/>
+ </field>
+ <field name="nt.access_mask.maximum_allowed" showname=".... ..1. .... .... .... .... .... .... = Maximum allowed: Set" size="4" pos="156" show="1" value="FFFFFFFF" unmaskedvalue="00000002"/>
+ <field name="nt.access_mask.access_sacl" showname=".... .... 0... .... .... .... .... .... = Access SACL: Not set" size="4" pos="156" show="0" value="0" unmaskedvalue="00000002"/>
+ <field name="" show="Standard rights: 0x00000000" size="4" pos="156" value="00000002">
+ <field name="nt.access_mask.synchronise" showname=".... .... ...0 .... .... .... .... .... = Synchronise: Not set" size="4" pos="156" show="0" value="0" unmaskedvalue="00000002"/>
+ <field name="nt.access_mask.write_owner" showname=".... .... .... 0... .... .... .... .... = Write owner: Not set" size="4" pos="156" show="0" value="0" unmaskedvalue="00000002"/>
+ <field name="nt.access_mask.write_dac" showname=".... .... .... .0.. .... .... .... .... = Write DAC: Not set" size="4" pos="156" show="0" value="0" unmaskedvalue="00000002"/>
+ <field name="nt.access_mask.read_control" showname=".... .... .... ..0. .... .... .... .... = Read control: Not set" size="4" pos="156" show="0" value="0" unmaskedvalue="00000002"/>
+ <field name="nt.access_mask.delete" showname=".... .... .... ...0 .... .... .... .... = Delete: Not set" size="4" pos="156" show="0" value="0" unmaskedvalue="00000002"/>
+ </field>
+ <field name="" show="SAMR Connect specific rights: 0x00000000" size="4" pos="156" value="00000002">
+ <field name="samr.samr_ConnectAccessMask.SAMR_ACCESS_LOOKUP_DOMAIN" showname=".... .... .... .... .... .... ..0. .... = Samr Access Lookup Domain: SAMR_ACCESS_LOOKUP_DOMAIN is NOT SET" size="4" pos="156" show="0" value="0" unmaskedvalue="00000002"/>
+ <field name="samr.samr_ConnectAccessMask.SAMR_ACCESS_ENUM_DOMAINS" showname=".... .... .... .... .... .... ...0 .... = Samr Access Enum Domains: SAMR_ACCESS_ENUM_DOMAINS is NOT SET" size="4" pos="156" show="0" value="0" unmaskedvalue="00000002"/>
+ <field name="samr.samr_ConnectAccessMask.SAMR_ACCESS_CREATE_DOMAIN" showname=".... .... .... .... .... .... .... 0... = Samr Access Create Domain: SAMR_ACCESS_CREATE_DOMAIN is NOT SET" size="4" pos="156" show="0" value="0" unmaskedvalue="00000002"/>
+ <field name="samr.samr_ConnectAccessMask.SAMR_ACCESS_INITIALIZE_SERVER" showname=".... .... .... .... .... .... .... .0.. = Samr Access Initialize Server: SAMR_ACCESS_INITIALIZE_SERVER is NOT SET" size="4" pos="156" show="0" value="0" unmaskedvalue="00000002"/>
+ <field name="samr.samr_ConnectAccessMask.SAMR_ACCESS_SHUTDOWN_SERVER" showname=".... .... .... .... .... .... .... ..0. = Samr Access Shutdown Server: SAMR_ACCESS_SHUTDOWN_SERVER is NOT SET" size="4" pos="156" show="0" value="0" unmaskedvalue="00000002"/>
+ <field name="samr.samr_ConnectAccessMask.SAMR_ACCESS_CONNECT_TO_SERVER" showname=".... .... .... .... .... .... .... ...0 = Samr Access Connect To Server: SAMR_ACCESS_CONNECT_TO_SERVER is NOT SET" size="4" pos="156" show="0" value="0" unmaskedvalue="00000002"/>
+ </field>
+ </field>
+ </proto>
+</packet>
+
+<packet>
+ <proto name="geninfo" pos="0" showname="General information" size="244">
+ <field name="num" pos="0" show="618" showname="Number" value="26a" size="244"/>
+ <field name="len" pos="0" show="244" showname="Frame Length" value="f4" size="244"/>
+ <field name="caplen" pos="0" show="244" showname="Captured Length" value="f4" size="244"/>
+ <field name="timestamp" pos="0" show="Feb 16, 2017 12:18:11.039416000 NZDT" showname="Captured Time" value="1487200691.039416000" size="244"/>
+ </proto>
+ <proto name="frame" showname="Frame 618: 244 bytes on wire (1952 bits), 244 bytes captured (1952 bits)" size="244" pos="0">
+ <field name="frame.encap_type" showname="Encapsulation type: Raw IP (7)" size="0" pos="0" show="7"/>
+ <field name="frame.time" showname="Arrival Time: Feb 16, 2017 12:18:11.039416000 NZDT" size="0" pos="0" show="Feb 16, 2017 12:18:11.039416000 NZDT"/>
+ <field name="frame.offset_shift" showname="Time shift for this packet: 0.000000000 seconds" size="0" pos="0" show="0.000000000"/>
+ <field name="frame.time_epoch" showname="Epoch Time: 1487200691.039416000 seconds" size="0" pos="0" show="1487200691.039416000"/>
+ <field name="frame.time_delta" showname="Time delta from previous captured frame: 0.000200000 seconds" size="0" pos="0" show="0.000200000"/>
+ <field name="frame.time_delta_displayed" showname="Time delta from previous displayed frame: 0.000200000 seconds" size="0" pos="0" show="0.000200000"/>
+ <field name="frame.time_relative" showname="Time since reference or first frame: 5.801692000 seconds" size="0" pos="0" show="5.801692000"/>
+ <field name="frame.number" showname="Frame Number: 618" size="0" pos="0" show="618"/>
+ <field name="frame.len" showname="Frame Length: 244 bytes (1952 bits)" size="0" pos="0" show="244"/>
+ <field name="frame.cap_len" showname="Capture Length: 244 bytes (1952 bits)" size="0" pos="0" show="244"/>
+ <field name="frame.marked" showname="Frame is marked: False" size="0" pos="0" show="0"/>
+ <field name="frame.ignored" showname="Frame is ignored: False" size="0" pos="0" show="0"/>
+ <field name="frame.protocols" showname="Protocols in frame: raw:ip:tcp:dcerpc:spnego-krb5:spnego-krb5" size="0" pos="0" show="raw:ip:tcp:dcerpc:spnego-krb5:spnego-krb5"/>
+ </proto>
+ <proto name="raw" showname="Raw packet data" size="244" pos="0"/>
+ <proto name="ip" showname="Internet Protocol Version 4, Src: 127.0.0.11, Dst: 127.0.0.21" size="20" pos="0">
+ <field name="ip.version" showname="0100 .... = Version: 4" size="1" pos="0" show="4" value="4" unmaskedvalue="45"/>
+ <field name="ip.hdr_len" showname=".... 0101 = Header Length: 20 bytes" size="1" pos="0" show="5" value="5" unmaskedvalue="45"/>
+ <field name="ip.dsfield" showname="Differentiated Services Field: 0x00 (DSCP: CS0, ECN: Not-ECT)" size="1" pos="1" show="0x00000000" value="00">
+ <field name="ip.dsfield.dscp" showname="0000 00.. = Differentiated Services Codepoint: Default (0)" size="1" pos="1" show="0" value="0" unmaskedvalue="00"/>
+ <field name="ip.dsfield.ecn" showname=".... ..00 = Explicit Congestion Notification: Not ECN-Capable Transport (0)" size="1" pos="1" show="0" value="0" unmaskedvalue="00"/>
+ </field>
+ <field name="ip.len" showname="Total Length: 244" size="2" pos="2" show="244" value="00f4"/>
+ <field name="ip.id" showname="Identification: 0xffff (65535)" size="2" pos="4" show="0x0000ffff" value="ffff"/>
+ <field name="ip.flags" showname="Flags: 0x02 (Don&#x27;t Fragment)" size="1" pos="6" show="0x00000002" value="40">
+ <field name="ip.flags.rb" showname="0... .... = Reserved bit: Not set" size="1" pos="6" show="0" value="40"/>
+ <field name="ip.flags.df" showname=".1.. .... = Don&#x27;t fragment: Set" size="1" pos="6" show="1" value="40"/>
+ <field name="ip.flags.mf" showname="..0. .... = More fragments: Not set" size="1" pos="6" show="0" value="40"/>
+ </field>
+ <field name="ip.frag_offset" showname="Fragment offset: 0" size="2" pos="6" show="0" value="4000"/>
+ <field name="ip.ttl" showname="Time to live: 255" size="1" pos="8" show="255" value="ff"/>
+ <field name="ip.proto" showname="Protocol: TCP (6)" size="1" pos="9" show="6" value="06"/>
+ <field name="ip.checksum" showname="Header checksum: 0x0000 [validation disabled]" size="2" pos="10" show="0x00000000" value="0000">
+ <field name="ip.checksum_good" showname="Good: False" size="2" pos="10" show="0" value="0000"/>
+ <field name="ip.checksum_bad" showname="Bad: False" size="2" pos="10" show="0" value="0000"/>
+ </field>
+ <field name="ip.src" showname="Source: 127.0.0.11" size="4" pos="12" show="127.0.0.11" value="7f00000b"/>
+ <field name="ip.addr" showname="Source or Destination Address: 127.0.0.11" hide="yes" size="4" pos="12" show="127.0.0.11" value="7f00000b"/>
+ <field name="ip.src_host" showname="Source Host: 127.0.0.11" hide="yes" size="4" pos="12" show="127.0.0.11" value="7f00000b"/>
+ <field name="ip.host" showname="Source or Destination Host: 127.0.0.11" hide="yes" size="4" pos="12" show="127.0.0.11" value="7f00000b"/>
+ <field name="ip.dst" showname="Destination: 127.0.0.21" size="4" pos="16" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.addr" showname="Source or Destination Address: 127.0.0.21" hide="yes" size="4" pos="16" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.dst_host" showname="Destination Host: 127.0.0.21" hide="yes" size="4" pos="16" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.host" showname="Source or Destination Host: 127.0.0.21" hide="yes" size="4" pos="16" show="127.0.0.21" value="7f000015"/>
+ <field name="" show="Source GeoIP: Unknown" size="4" pos="12" value="7f00000b"/>
+ <field name="" show="Destination GeoIP: Unknown" size="4" pos="16" value="7f000015"/>
+ </proto>
+ <proto name="tcp" showname="Transmission Control Protocol, Src Port: 19098 (19098), Dst Port: 49152 (49152), Seq: 1870, Ack: 367, Len: 204" size="20" pos="20">
+ <field name="tcp.srcport" showname="Source Port: 19098" size="2" pos="20" show="19098" value="4a9a"/>
+ <field name="tcp.dstport" showname="Destination Port: 49152" size="2" pos="22" show="49152" value="c000"/>
+ <field name="tcp.port" showname="Source or Destination Port: 19098" hide="yes" size="2" pos="20" show="19098" value="4a9a"/>
+ <field name="tcp.port" showname="Source or Destination Port: 49152" hide="yes" size="2" pos="22" show="49152" value="c000"/>
+ <field name="tcp.stream" showname="Stream index: 14" size="0" pos="20" show="14"/>
+ <field name="tcp.len" showname="TCP Segment Len: 204" size="1" pos="32" show="204" value="50"/>
+ <field name="tcp.seq" showname="Sequence number: 1870 (relative sequence number)" size="4" pos="24" show="1870" value="0000074e"/>
+ <field name="tcp.nxtseq" showname="Next sequence number: 2074 (relative sequence number)" size="0" pos="20" show="2074"/>
+ <field name="tcp.ack" showname="Acknowledgment number: 367 (relative ack number)" size="4" pos="28" show="367" value="0000016f"/>
+ <field name="tcp.hdr_len" showname="Header Length: 20 bytes" size="1" pos="32" show="20" value="50"/>
+ <field name="tcp.flags" showname="Flags: 0x018 (PSH, ACK)" size="2" pos="32" show="0x00000018" value="18" unmaskedvalue="5018">
+ <field name="tcp.flags.res" showname="000. .... .... = Reserved: Not set" size="1" pos="32" show="0" value="0" unmaskedvalue="50"/>
+ <field name="tcp.flags.ns" showname="...0 .... .... = Nonce: Not set" size="1" pos="32" show="0" value="0" unmaskedvalue="50"/>
+ <field name="tcp.flags.cwr" showname=".... 0... .... = Congestion Window Reduced (CWR): Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.ecn" showname=".... .0.. .... = ECN-Echo: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.urg" showname=".... ..0. .... = Urgent: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.ack" showname=".... ...1 .... = Acknowledgment: Set" size="1" pos="33" show="1" value="FFFFFFFF" unmaskedvalue="18"/>
+ <field name="tcp.flags.push" showname=".... .... 1... = Push: Set" size="1" pos="33" show="1" value="FFFFFFFF" unmaskedvalue="18"/>
+ <field name="tcp.flags.reset" showname=".... .... .0.. = Reset: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.syn" showname=".... .... ..0. = Syn: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.fin" showname=".... .... ...0 = Fin: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.str" showname="TCP Flags: *******AP***" size="2" pos="32" show="*******AP***" value="5018"/>
+ </field>
+ <field name="tcp.window_size_value" showname="Window size value: 32767" size="2" pos="34" show="32767" value="7fff"/>
+ <field name="tcp.window_size" showname="Calculated window size: 32767" size="2" pos="34" show="32767" value="7fff"/>
+ <field name="tcp.window_size_scalefactor" showname="Window size scaling factor: -2 (no window scaling used)" size="2" pos="34" show="-2" value="7fff"/>
+ <field name="tcp.checksum" showname="Checksum: 0x0000 [validation disabled]" size="2" pos="36" show="0x00000000" value="0000">
+ <field name="tcp.checksum_good" showname="Good Checksum: False" size="2" pos="36" show="0" value="0000"/>
+ <field name="tcp.checksum_bad" showname="Bad Checksum: False" size="2" pos="36" show="0" value="0000"/>
+ </field>
+ <field name="tcp.urgent_pointer" showname="Urgent pointer: 0" size="2" pos="38" show="0" value="0000"/>
+ <field name="tcp.analysis" showname="SEQ/ACK analysis" size="0" pos="20" show="" value="">
+ <field name="tcp.analysis.acks_frame" showname="This is an ACK to the segment in frame: 615" size="0" pos="20" show="615"/>
+ <field name="tcp.analysis.ack_rtt" showname="The RTT to ACK the segment was: 0.000257000 seconds" size="0" pos="20" show="0.000257000"/>
+ <field name="tcp.analysis.initial_rtt" showname="iRTT: 0.000055000 seconds" size="0" pos="20" show="0.000055000"/>
+ <field name="tcp.analysis.bytes_in_flight" showname="Bytes in flight: 204" size="0" pos="20" show="204"/>
+ </field>
+ </proto>
+ <proto name="dcerpc" showname="Distributed Computing Environment / Remote Procedure Call (DCE/RPC) Request, Fragment: Single, FragLen: 204, Call: 2, Ctx: 0" size="204" pos="40">
+ <field name="dcerpc.ver" showname="Version: 5" size="1" pos="40" show="5" value="05"/>
+ <field name="dcerpc.ver_minor" showname="Version (minor): 0" size="1" pos="41" show="0" value="00"/>
+ <field name="dcerpc.pkt_type" showname="Packet type: Request (0)" size="1" pos="42" show="0" value="00"/>
+ <field name="dcerpc.cn_flags" showname="Packet Flags: 0x03" size="1" pos="43" show="0x00000003" value="03">
+ <field name="dcerpc.cn_flags.object" showname="0... .... = Object: Not set" size="1" pos="43" show="0" value="0" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.maybe" showname=".0.. .... = Maybe: Not set" size="1" pos="43" show="0" value="0" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.dne" showname="..0. .... = Did Not Execute: Not set" size="1" pos="43" show="0" value="0" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.mpx" showname="...0 .... = Multiplex: Not set" size="1" pos="43" show="0" value="0" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.reserved" showname=".... 0... = Reserved: Not set" size="1" pos="43" show="0" value="0" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.cancel_pending" showname=".... .0.. = Cancel Pending: Not set" size="1" pos="43" show="0" value="0" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.last_frag" showname=".... ..1. = Last Frag: Set" size="1" pos="43" show="1" value="FFFFFFFF" unmaskedvalue="03"/>
+ <field name="dcerpc.cn_flags.first_frag" showname=".... ...1 = First Frag: Set" size="1" pos="43" show="1" value="FFFFFFFF" unmaskedvalue="03"/>
+ </field>
+ <field name="dcerpc.drep" showname="Data Representation: 10000000" size="4" pos="44" show="10:00:00:00" value="10000000">
+ <field name="dcerpc.drep.byteorder" showname="Byte order: Little-endian (1)" size="1" pos="44" show="1" value="10"/>
+ <field name="dcerpc.drep.character" showname="Character: ASCII (0)" size="1" pos="44" show="0" value="10"/>
+ <field name="dcerpc.drep.fp" showname="Floating-point: IEEE (0)" size="1" pos="45" show="0" value="00"/>
+ </field>
+ <field name="dcerpc.cn_frag_len" showname="Frag Length: 204" size="2" pos="48" show="204" value="cc00"/>
+ <field name="dcerpc.cn_auth_len" showname="Auth Length: 76" size="2" pos="50" show="76" value="4c00"/>
+ <field name="dcerpc.cn_call_id" showname="Call ID: 2" size="4" pos="52" show="2" value="02000000"/>
+ <field name="dcerpc.cn_alloc_hint" showname="Alloc hint: 84" size="4" pos="56" show="84" value="54000000"/>
+ <field name="dcerpc.cn_ctx_id" showname="Context ID: 0" size="2" pos="60" show="0" value="0000"/>
+ <field name="dcerpc.opnum" showname="Opnum: 0" size="2" pos="62" show="0" value="0000"/>
+ <field name="dcerpc.auth_type" showname="Auth type: SPNEGO (9)" size="1" pos="160" show="9" value="09"/>
+ <field name="dcerpc.auth_level" showname="Auth level: Packet privacy (6)" size="1" pos="161" show="6" value="06"/>
+ <field name="dcerpc.auth_pad_len" showname="Auth pad len: 12" size="1" pos="162" show="12" value="0c"/>
+ <field name="dcerpc.auth_rsrvd" showname="Auth Rsrvd: 0" size="1" pos="163" show="0" value="00"/>
+ <field name="dcerpc.auth_ctx_id" showname="Auth Context ID: 1" size="4" pos="164" show="1" value="01000000"/>
+ <proto name="gss-api" showname="GSS-API Generic Security Service Application Program Interface" size="76" pos="168">
+ <field name="spnego.krb5.blob" showname="krb5_blob: 050406ff0010001c000000000bcbcd947efcdcdd031c9af0..." size="76" pos="168" show="05:04:06:ff:00:10:00:1c:00:00:00:00:0b:cb:cd:94:7e:fc:dc:dd:03:1c:9a:f0:b0:c9:a0:30:4e:3f:5a:f9:bd:9f:63:82:c8:cb:eb:fe:08:6b:0f:7d:93:b3:30:d3:6c:90:51:24:3a:38:c9:aa:26:c0:0c:5a:a0:a2:7b:1d:10:4b:31:f6:34:4a:cd:24:f8:c2:c9:73" value="050406ff0010001c000000000bcbcd947efcdcdd031c9af0b0c9a0304e3f5af9bd9f6382c8cbebfe086b0f7d93b330d36c9051243a38c9aa26c00c5aa0a27b1d104b31f6344acd24f8c2c973">
+ <field name="spnego.krb5.tok_id" showname="krb5_tok_id: KRB_TOKEN_CFX_WRAP (0x0405)" size="2" pos="168" show="0x00000405" value="0504"/>
+ <field name="spnego.krb5.cfx_flags" showname="krb5_cfx_flags: 0x06, AcceptorSubkey, Sealed" size="1" pos="170" show="0x00000006" value="06">
+ <field name="spnego.krb5.acceptor_subkey" showname=".... .1.. = AcceptorSubkey: Set" size="1" pos="170" show="1" value="FFFFFFFF" unmaskedvalue="06"/>
+ <field name="spnego.krb5.sealed" showname=".... ..1. = Sealed: Set" size="1" pos="170" show="1" value="FFFFFFFF" unmaskedvalue="06"/>
+ <field name="spnego.krb5.send_by_acceptor" showname=".... ...0 = SendByAcceptor: Not set" size="1" pos="170" show="0" value="0" unmaskedvalue="06"/>
+ </field>
+ <field name="spnego.krb5.filler" showname="krb5_filler: ff" size="1" pos="171" show="ff" value="ff"/>
+ <field name="spnego.krb5.cfx_ec" showname="krb5_cfx_ec: 16" size="2" pos="172" show="16" value="0010"/>
+ <field name="spnego.krb5.cfx_rrc" showname="krb5_cfx_rrc: 28" size="2" pos="174" show="28" value="001c"/>
+ <field name="spnego.krb5.cfx_seq" showname="krb5_cfx_seq: 197905812" size="8" pos="176" show="197905812" value="000000000bcbcd94"/>
+ <field name="spnego.krb5.sgn_cksum" showname="krb5_sgn_cksum: 7efcdcdd031c9af0b0c9a0304e3f5af9bd9f6382c8cbebfe..." size="60" pos="184" show="7e:fc:dc:dd:03:1c:9a:f0:b0:c9:a0:30:4e:3f:5a:f9:bd:9f:63:82:c8:cb:eb:fe:08:6b:0f:7d:93:b3:30:d3:6c:90:51:24:3a:38:c9:aa:26:c0:0c:5a:a0:a2:7b:1d:10:4b:31:f6:34:4a:cd:24:f8:c2:c9:73" value="7efcdcdd031c9af0b0c9a0304e3f5af9bd9f6382c8cbebfe086b0f7d93b330d36c9051243a38c9aa26c00c5aa0a27b1d104b31f6344acd24f8c2c973"/>
+ </field>
+ </proto>
+ </proto>
+ <proto name="drsuapi" showname="DRSUAPI, DsBind" size="96" pos="64">
+ <field name="drsuapi.opnum" showname="Operation: DsBind (0)" size="0" pos="64" show="0"/>
+ <field name="dcerpc.encrypted_stub_data" showname="Encrypted stub data: 536b3d9d2cae12c8cfef430800028a405e0c5b0dd1ab3a67..." size="96" pos="64" show="53:6b:3d:9d:2c:ae:12:c8:cf:ef:43:08:00:02:8a:40:5e:0c:5b:0d:d1:ab:3a:67:7b:bf:dc:66:7d:f1:90:ff:c6:6b:04:07:07:e2:7e:20:ca:73:41:fc:bf:0e:16:07:00:31:88:a1:7d:13:54:99:50:55:29:f1:ce:07:e8:92:78:69:63:7c:f2:60:2a:fd:7b:60:49:5d:7e:bf:dc:b2:2b:b7:47:86:6e:c8:51:1c:53:bb:73:35:dc:5c:b1:b0" value="536b3d9d2cae12c8cfef430800028a405e0c5b0dd1ab3a677bbfdc667df190ffc66b040707e27e20ca7341fcbf0e1607003188a17d135499505529f1ce07e8927869637cf2602afd7b60495d7ebfdcb22bb747866ec8511c53bb7335dc5cb1b0"/>
+ </proto>
+</packet>
+
+<packet>
+ <proto name="geninfo" pos="0" showname="General information" size="314">
+ <field name="num" pos="0" show="1971790" showname="Number" value="1e164e" size="314"/>
+ <field name="len" pos="0" show="314" showname="Frame Length" value="13a" size="314"/>
+ <field name="caplen" pos="0" show="314" showname="Captured Length" value="13a" size="314"/>
+ <field name="timestamp" pos="0" show="Feb 13, 2017 10:23:04.809271000 NZDT" showname="Captured Time" value="1486934584.809271000" size="314"/>
+ </proto>
+ <proto name="frame" showname="Frame 1971790: 314 bytes on wire (2512 bits), 314 bytes captured (2512 bits)" size="314" pos="0">
+ <field name="frame.encap_type" showname="Encapsulation type: Raw IP (7)" size="0" pos="0" show="7"/>
+ <field name="frame.time" showname="Arrival Time: Feb 13, 2017 10:23:04.809271000 NZDT" size="0" pos="0" show="Feb 13, 2017 10:23:04.809271000 NZDT"/>
+ <field name="frame.offset_shift" showname="Time shift for this packet: 0.000000000 seconds" size="0" pos="0" show="0.000000000"/>
+ <field name="frame.time_epoch" showname="Epoch Time: 1486934584.809271000 seconds" size="0" pos="0" show="1486934584.809271000"/>
+ <field name="frame.time_delta" showname="Time delta from previous captured frame: 0.053481000 seconds" size="0" pos="0" show="0.053481000"/>
+ <field name="frame.time_delta_displayed" showname="Time delta from previous displayed frame: 0.053481000 seconds" size="0" pos="0" show="0.053481000"/>
+ <field name="frame.time_relative" showname="Time since reference or first frame: 814.186830000 seconds" size="0" pos="0" show="814.186830000"/>
+ <field name="frame.number" showname="Frame Number: 1971790" size="0" pos="0" show="1971790"/>
+ <field name="frame.len" showname="Frame Length: 314 bytes (2512 bits)" size="0" pos="0" show="314"/>
+ <field name="frame.cap_len" showname="Capture Length: 314 bytes (2512 bits)" size="0" pos="0" show="314"/>
+ <field name="frame.marked" showname="Frame is marked: False" size="0" pos="0" show="0"/>
+ <field name="frame.ignored" showname="Frame is ignored: False" size="0" pos="0" show="0"/>
+ <field name="frame.protocols" showname="Protocols in frame: raw:ip:udp:nbdgm:smb:smb_netlogon" size="0" pos="0" show="raw:ip:udp:nbdgm:smb:smb_netlogon"/>
+ </proto>
+ <proto name="raw" showname="Raw packet data" size="314" pos="0"/>
+ <proto name="ip" showname="Internet Protocol Version 4, Src: 127.0.0.4, Dst: 127.0.0.3" size="20" pos="0">
+ <field name="ip.version" showname="0100 .... = Version: 4" size="1" pos="0" show="4" value="4" unmaskedvalue="45"/>
+ <field name="ip.hdr_len" showname=".... 0101 = Header Length: 20 bytes" size="1" pos="0" show="5" value="5" unmaskedvalue="45"/>
+ <field name="ip.dsfield" showname="Differentiated Services Field: 0x00 (DSCP: CS0, ECN: Not-ECT)" size="1" pos="1" show="0x00000000" value="00">
+ <field name="ip.dsfield.dscp" showname="0000 00.. = Differentiated Services Codepoint: Default (0)" size="1" pos="1" show="0" value="0" unmaskedvalue="00"/>
+ <field name="ip.dsfield.ecn" showname=".... ..00 = Explicit Congestion Notification: Not ECN-Capable Transport (0)" size="1" pos="1" show="0" value="0" unmaskedvalue="00"/>
+ </field>
+ <field name="ip.len" showname="Total Length: 314" size="2" pos="2" show="314" value="013a"/>
+ <field name="ip.id" showname="Identification: 0xffff (65535)" size="2" pos="4" show="0x0000ffff" value="ffff"/>
+ <field name="ip.flags" showname="Flags: 0x02 (Don&#x27;t Fragment)" size="1" pos="6" show="0x00000002" value="40">
+ <field name="ip.flags.rb" showname="0... .... = Reserved bit: Not set" size="1" pos="6" show="0" value="40"/>
+ <field name="ip.flags.df" showname=".1.. .... = Don&#x27;t fragment: Set" size="1" pos="6" show="1" value="40"/>
+ <field name="ip.flags.mf" showname="..0. .... = More fragments: Not set" size="1" pos="6" show="0" value="40"/>
+ </field>
+ <field name="ip.frag_offset" showname="Fragment offset: 0" size="2" pos="6" show="0" value="4000"/>
+ <field name="ip.ttl" showname="Time to live: 255" size="1" pos="8" show="255" value="ff"/>
+ <field name="ip.proto" showname="Protocol: UDP (17)" size="1" pos="9" show="17" value="11"/>
+ <field name="ip.checksum" showname="Header checksum: 0x0000 [validation disabled]" size="2" pos="10" show="0x00000000" value="0000">
+ <field name="ip.checksum_good" showname="Good: False" size="2" pos="10" show="0" value="0000"/>
+ <field name="ip.checksum_bad" showname="Bad: False" size="2" pos="10" show="0" value="0000"/>
+ </field>
+ <field name="ip.src" showname="Source: 127.0.0.4" size="4" pos="12" show="127.0.0.4" value="7f000004"/>
+ <field name="ip.addr" showname="Source or Destination Address: 127.0.0.4" hide="yes" size="4" pos="12" show="127.0.0.4" value="7f000004"/>
+ <field name="ip.src_host" showname="Source Host: 127.0.0.4" hide="yes" size="4" pos="12" show="127.0.0.4" value="7f000004"/>
+ <field name="ip.host" showname="Source or Destination Host: 127.0.0.4" hide="yes" size="4" pos="12" show="127.0.0.4" value="7f000004"/>
+ <field name="ip.dst" showname="Destination: 127.0.0.3" size="4" pos="16" show="127.0.0.3" value="7f000003"/>
+ <field name="ip.addr" showname="Source or Destination Address: 127.0.0.3" hide="yes" size="4" pos="16" show="127.0.0.3" value="7f000003"/>
+ <field name="ip.dst_host" showname="Destination Host: 127.0.0.3" hide="yes" size="4" pos="16" show="127.0.0.3" value="7f000003"/>
+ <field name="ip.host" showname="Source or Destination Host: 127.0.0.3" hide="yes" size="4" pos="16" show="127.0.0.3" value="7f000003"/>
+ <field name="" show="Source GeoIP: Unknown" size="4" pos="12" value="7f000004"/>
+ <field name="" show="Destination GeoIP: Unknown" size="4" pos="16" value="7f000003"/>
+ </proto>
+ <proto name="udp" showname="User Datagram Protocol, Src Port: 138 (138), Dst Port: 138 (138)" size="8" pos="20">
+ <field name="udp.srcport" showname="Source Port: 138" size="2" pos="20" show="138" value="008a"/>
+ <field name="udp.dstport" showname="Destination Port: 138" size="2" pos="22" show="138" value="008a"/>
+ <field name="udp.port" showname="Source or Destination Port: 138" hide="yes" size="2" pos="20" show="138" value="008a"/>
+ <field name="udp.port" showname="Source or Destination Port: 138" hide="yes" size="2" pos="22" show="138" value="008a"/>
+ <field name="udp.length" showname="Length: 294" size="2" pos="24" show="294" value="0126"/>
+ <field name="udp.checksum" showname="Checksum: 0x0000 (none)" size="2" pos="26" show="0x00000000" value="0000">
+ <field name="udp.checksum_good" showname="Good Checksum: False" size="2" pos="26" show="0" value="0000"/>
+ <field name="udp.checksum_bad" showname="Bad Checksum: False" size="2" pos="26" show="0" value="0000"/>
+ </field>
+ <field name="udp.stream" showname="Stream index: 322" size="0" pos="28" show="322"/>
+ </proto>
+ <proto name="nbdgm" showname="NetBIOS Datagram Service" size="82" pos="28">
+ <field name="nbdgm.type" showname="Message Type: Direct_group datagram (17)" size="1" pos="28" show="17" value="11"/>
+ <field name="nbdgm.next" showname="More fragments follow: No" size="1" pos="29" show="0" value="0a"/>
+ <field name="nbdgm.first" showname="This is first fragment: Yes" size="1" pos="29" show="1" value="0a"/>
+ <field name="nbdgm.node_type" showname="Node Type: M node (2)" size="1" pos="29" show="2" value="0a"/>
+ <field name="nbdgm.dgram_id" showname="Datagram ID: 0x7172" size="2" pos="30" show="0x00007172" value="7172"/>
+ <field name="nbdgm.src.ip" showname="Source IP: 127.0.0.4" size="4" pos="32" show="127.0.0.4" value="7f000004"/>
+ <field name="nbdgm.src.port" showname="Source Port: 138" size="2" pos="36" show="138" value="008a"/>
+ <field name="nbdgm.dgram_len" showname="Datagram length: 272 bytes" size="2" pos="38" show="272" value="0110"/>
+ <field name="nbdgm.pkt_offset" showname="Packet offset: 0 bytes" size="2" pos="40" show="0" value="0000"/>
+ <field name="nbdgm.source_name" showname="Source name: LOCALNT4MEMBER3&lt;00&gt; (Workstation/Redirector)" size="34" pos="42" show="LOCALNT4MEMBER3&lt;00&gt;" value="20454d455045444542454d454f46454445454e4546454e4543454646434444414100"/>
+ <field name="nbdgm.destination_name" showname="Destination name: SAMBA-TEST&lt;1c&gt; (Domain Controllers)" size="34" pos="76" show="SAMBA-TEST&lt;1c&gt;" value="2046444542454e45434542434e464545464644464543414341434143414341424d00"/>
+ </proto>
+ <proto name="smb" showname="SMB (Server Message Block Protocol)" size="204" pos="110">
+ <field name="" show="SMB Header" size="32" pos="110" value="ff534d4225000000000000000000000000000000000000000000000000000000">
+ <field name="smb.server_component" showname="Server Component: SMB" size="4" pos="110" show="0x424d53ff" value="ff534d42"/>
+ <field name="smb.cmd" showname="SMB Command: Trans (0x25)" size="1" pos="114" show="37" value="25"/>
+ <field name="smb.error_class" showname="Error Class: Success (0x00)" size="1" pos="115" show="0x00000000" value="00"/>
+ <field name="smb.reserved" showname="Reserved: 00" size="1" pos="116" show="00" value="00"/>
+ <field name="smb.error_code" showname="Error Code: No Error" size="2" pos="117" show="0x00000000" value="0000"/>
+ <field name="smb.flags" showname="Flags: 0x00" size="1" pos="119" show="0x00000000" value="00">
+ <field name="smb.flags.response" showname="0... .... = Request/Response: Message is a request to the server" size="1" pos="119" show="0" value="0" unmaskedvalue="00"/>
+ <field name="smb.flags.notify" showname=".0.. .... = Notify: Notify client only on open" size="1" pos="119" show="0" value="0" unmaskedvalue="00"/>
+ <field name="smb.flags.oplock" showname="..0. .... = Oplocks: OpLock not requested/granted" size="1" pos="119" show="0" value="0" unmaskedvalue="00"/>
+ <field name="smb.flags.canon" showname="...0 .... = Canonicalized Pathnames: Pathnames are not canonicalized" size="1" pos="119" show="0" value="0" unmaskedvalue="00"/>
+ <field name="smb.flags.caseless" showname=".... 0... = Case Sensitivity: Path names are case sensitive" size="1" pos="119" show="0" value="0" unmaskedvalue="00"/>
+ <field name="smb.flags.receive_buffer" showname=".... ..0. = Receive Buffer Posted: Receive buffer has not been posted" size="1" pos="119" show="0" value="0" unmaskedvalue="00"/>
+ <field name="smb.flags.lock" showname=".... ...0 = Lock and Read: Lock&amp;Read, Write&amp;Unlock are not supported" size="1" pos="119" show="0" value="0" unmaskedvalue="00"/>
+ </field>
+ <field name="smb.flags2" showname="Flags2: 0x0000" size="2" pos="120" show="0x00000000" value="0000">
+ <field name="smb.flags2.string" showname="0... .... .... .... = Unicode Strings: Strings are ASCII" size="2" pos="120" show="0" value="0" unmaskedvalue="0000"/>
+ <field name="smb.flags2.nt_error" showname=".0.. .... .... .... = Error Code Type: Error codes are DOS error codes" size="2" pos="120" show="0" value="0" unmaskedvalue="0000"/>
+ <field name="smb.flags2.roe" showname="..0. .... .... .... = Execute-only Reads: Don&#x27;t permit reads if execute-only" size="2" pos="120" show="0" value="0" unmaskedvalue="0000"/>
+ <field name="smb.flags2.dfs" showname="...0 .... .... .... = Dfs: Don&#x27;t resolve pathnames with Dfs" size="2" pos="120" show="0" value="0" unmaskedvalue="0000"/>
+ <field name="smb.flags2.esn" showname=".... 0... .... .... = Extended Security Negotiation: Extended security negotiation is not supported" size="2" pos="120" show="0" value="0" unmaskedvalue="0000"/>
+ <field name="smb.flags2.reparse_path" showname=".... .0.. .... .... = Reparse Path: The request does not use a @GMT reparse path" size="2" pos="120" show="0" value="0" unmaskedvalue="0000"/>
+ <field name="smb.flags2.long_names_used" showname=".... .... .0.. .... = Long Names Used: Path names in request are not long file names" size="2" pos="120" show="0" value="0" unmaskedvalue="0000"/>
+ <field name="smb.flags2.sec_sig_required" showname=".... .... ...0 .... = Security Signatures Required: Security signatures are not required" size="2" pos="120" show="0" value="0" unmaskedvalue="0000"/>
+ <field name="smb.flags2.compressed" showname=".... .... .... 0... = Compressed: Compression is not requested" size="2" pos="120" show="0" value="0" unmaskedvalue="0000"/>
+ <field name="smb.flags2.sec_sig" showname=".... .... .... .0.. = Security Signatures: Security signatures are not supported" size="2" pos="120" show="0" value="0" unmaskedvalue="0000"/>
+ <field name="smb.flags2.ea" showname=".... .... .... ..0. = Extended Attributes: Extended attributes are not supported" size="2" pos="120" show="0" value="0" unmaskedvalue="0000"/>
+ <field name="smb.flags2.long_names_allowed" showname=".... .... .... ...0 = Long Names Allowed: Long file names are not allowed in the response" size="2" pos="120" show="0" value="0" unmaskedvalue="0000"/>
+ </field>
+ <field name="smb.pid.high" showname="Process ID High: 0" size="2" pos="122" show="0" value="0000"/>
+ <field name="smb.signature" showname="Signature: 0000000000000000" size="8" pos="124" show="00:00:00:00:00:00:00:00" value="0000000000000000"/>
+ <field name="smb.reserved" showname="Reserved: 0000" size="2" pos="132" show="00:00" value="0000"/>
+ <field name="smb.tid" showname="Tree ID: 0" size="2" pos="134" show="0" value="0000"/>
+ <field name="smb.pid" showname="Process ID: 0" size="2" pos="136" show="0" value="0000"/>
+ <field name="smb.uid" showname="User ID: 0" size="2" pos="138" show="0" value="0000"/>
+ <field name="smb.mid" showname="Multiplex ID: 0" size="2" pos="140" show="0" value="0000"/>
+ </field>
+ <field name="" show="Trans Request (0x25)" size="172" pos="142" value="110000710000000000000000000000000000000000000071005b00030001000000020087005c4d41494c534c4f545c4e45545c4e544c4f474f4e00120000004c004f00430041004c004e00540034004d0045004d00420045005200330000004c004f00430041004c004e00540034004d0045004d004200450052003300240000005c4d41494c534c4f545c4e45545c4745544443333030303037460080000000000000000b000000ffffffff">
+ <field name="smb.wct" showname="Word Count (WCT): 17" size="1" pos="142" show="17" value="11"/>
+ <field name="smb.tpc" showname="Total Parameter Count: 0" size="2" pos="143" show="0" value="0000"/>
+ <field name="smb.tdc" showname="Total Data Count: 113" size="2" pos="145" show="113" value="7100"/>
+ <field name="smb.mpc" showname="Max Parameter Count: 0" size="2" pos="147" show="0" value="0000"/>
+ <field name="smb.mdc" showname="Max Data Count: 0" size="2" pos="149" show="0" value="0000"/>
+ <field name="smb.msc" showname="Max Setup Count: 0" size="1" pos="151" show="0" value="00"/>
+ <field name="smb.reserved" showname="Reserved: 00" size="1" pos="152" show="00" value="00"/>
+ <field name="smb.transaction.flags" showname="Flags: 0x0000" size="2" pos="153" show="0x00000000" value="0000">
+ <field name="smb.transaction.flags.owt" showname=".... .... .... ..0. = One Way Transaction: Two way transaction" size="2" pos="153" show="0" value="0" unmaskedvalue="0000"/>
+ <field name="smb.transaction.flags.dtid" showname=".... .... .... ...0 = Disconnect TID: Do NOT disconnect TID" size="2" pos="153" show="0" value="0" unmaskedvalue="0000"/>
+ </field>
+ <field name="smb.timeout" showname="Timeout: Return immediately (0)" size="4" pos="155" show="0" value="00000000"/>
+ <field name="smb.reserved" showname="Reserved: 0000" size="2" pos="159" show="00:00" value="0000"/>
+ <field name="smb.pc" showname="Parameter Count: 0" size="2" pos="161" show="0" value="0000"/>
+ <field name="smb.po" showname="Parameter Offset: 0" size="2" pos="163" show="0" value="0000"/>
+ <field name="smb.dc" showname="Data Count: 113" size="2" pos="165" show="113" value="7100"/>
+ <field name="smb.data_offset" showname="Data Offset: 91" size="2" pos="167" show="91" value="5b00"/>
+ <field name="smb.sc" showname="Setup Count: 3" size="1" pos="169" show="3" value="03"/>
+ <field name="smb.reserved" showname="Reserved: 00" size="1" pos="170" show="00" value="00"/>
+ <field name="smb.bcc" showname="Byte Count (BCC): 135" size="2" pos="177" show="135" value="8700"/>
+ <field name="smb.trans_name" showname="Transaction Name: \MAILSLOT\NET\NTLOGON" size="22" pos="179" show="\MAILSLOT\NET\NTLOGON" value="5c4d41494c534c4f545c4e45545c4e544c4f474f4e00"/>
+ </field>
+ </proto>
+ <proto name="mailslot" showname="SMB MailSlot Protocol" size="30" pos="171">
+ <field name="mailslot.opcode" showname="Opcode: Write Mail Slot (1)" size="2" pos="171" show="1" value="0100"/>
+ <field name="mailslot.priority" showname="Priority: 0" size="2" pos="173" show="0" value="0000"/>
+ <field name="mailslot.class" showname="Class: Unreliable &amp; Broadcast (2)" size="2" pos="175" show="2" value="0200"/>
+ <field name="mailslot.size" showname="Size: 135" size="2" pos="177" show="135" value="8700"/>
+ <field name="mailslot.name" showname="Mailslot Name: \MAILSLOT\NET\NTLOGON" size="22" pos="179" show="\MAILSLOT\NET\NTLOGON" value="5c4d41494c534c4f545c4e45545c4e544c4f474f4e00"/>
+ </proto>
+ <proto name="smb_netlogon" showname="Microsoft Windows Logon Protocol (Old)" size="113" pos="201">
+ <field name="smb_netlogon.command" showname="Command: SAM LOGON request from client (0x12)" size="1" pos="201" show="0x00000012" value="12"/>
+ <field name="smb_netlogon.request_count" showname="Request Count: 0" size="2" pos="203" show="0" value="0000"/>
+ <field name="smb_netlogon.unicode_computer_name" showname="Unicode Computer Name: LOCALNT4MEMBER3" size="32" pos="205" show="LOCALNT4MEMBER3" value="4c004f00430041004c004e00540034004d0045004d0042004500520033000000"/>
+ <field name="smb_netlogon.user_name" showname="User Name: LOCALNT4MEMBER3$" size="34" pos="237" show="LOCALNT4MEMBER3$" value="4c004f00430041004c004e00540034004d0045004d00420045005200330024000000"/>
+ <field name="smb_netlogon.mailslot_name" showname="Mailslot Name: \MAILSLOT\NET\GETDC300007F" size="27" pos="271" show="\MAILSLOT\NET\GETDC300007F" value="5c4d41494c534c4f545c4e45545c47455444433330303030374600"/>
+ <field name="smb_netlogon.flags" showname="Account control: 0x00000080, Workstation Trust" size="4" pos="298" show="0x00000080" value="80000000">
+ <field name="smb_netlogon.flags.autolock" showname=".... .... .... .... .... .0.. .... .... = Autolock: User account NOT auto-locked" size="4" pos="298" show="0" value="0" unmaskedvalue="80000000"/>
+ <field name="smb_netlogon.flags.expire" showname=".... .... .... .... .... ..0. .... .... = Expire: User password will expire" size="4" pos="298" show="0" value="0" unmaskedvalue="80000000"/>
+ <field name="smb_netlogon.flags.server" showname=".... .... .... .... .... ...0 .... .... = Server Trust: NOT a Server Trust user account" size="4" pos="298" show="0" value="0" unmaskedvalue="80000000"/>
+ <field name="smb_netlogon.flags.workstation" showname=".... .... .... .... .... .... 1... .... = Workstation Trust: Workstation Trust user account" size="4" pos="298" show="1" value="FFFFFFFF" unmaskedvalue="80000000"/>
+ <field name="smb_netlogon.flags.interdomain" showname=".... .... .... .... .... .... .0.. .... = Interdomain Trust: NOT a Inter-domain Trust user account" size="4" pos="298" show="0" value="0" unmaskedvalue="80000000"/>
+ <field name="smb_netlogon.flags.mns" showname=".... .... .... .... .... .... ..0. .... = MNS User: NOT a MNS Logon user account" size="4" pos="298" show="0" value="0" unmaskedvalue="80000000"/>
+ <field name="smb_netlogon.flags.normal" showname=".... .... .... .... .... .... ...0 .... = Normal User: NOT a normal user account" size="4" pos="298" show="0" value="0" unmaskedvalue="80000000"/>
+ <field name="smb_netlogon.flags.temp_dup" showname=".... .... .... .... .... .... .... 0... = Temp Duplicate User: NOT a temp duplicate user account" size="4" pos="298" show="0" value="0" unmaskedvalue="80000000"/>
+ <field name="smb_netlogon.flags.password" showname=".... .... .... .... .... .... .... .0.. = Password: Password required" size="4" pos="298" show="0" value="0" unmaskedvalue="80000000"/>
+ <field name="smb_netlogon.flags.homedir" showname=".... .... .... .... .... .... .... ..0. = Homedir: Homedir required" size="4" pos="298" show="0" value="0" unmaskedvalue="80000000"/>
+ <field name="smb_netlogon.flags.enabled" showname=".... .... .... .... .... .... .... ...0 = Enabled: User account disabled" size="4" pos="298" show="0" value="0" unmaskedvalue="80000000"/>
+ </field>
+ <field name="smb_netlogon.domain_sid_size" showname="Domain SID Size: 0" size="4" pos="302" show="0" value="00000000"/>
+ <field name="smb_netlogon.nt_version" showname="NT Version: 11" size="4" pos="306" show="11" value="0b000000"/>
+ <field name="smb_netlogon.lmnt_token" showname="LMNT Token: 0xffff (Windows NT Networking)" size="2" pos="310" show="0x0000ffff" value="ffff"/>
+ <field name="smb_netlogon.lm_token" showname="LM20 Token: 0xffff (LanMan 2.0 or higher)" size="2" pos="312" show="0x0000ffff" value="ffff"/>
+ </proto>
+</packet>
+
+<packet>
+ <proto name="geninfo" pos="0" showname="General information" size="248">
+ <field name="num" pos="0" show="64697" showname="Number" value="fcb9" size="248"/>
+ <field name="len" pos="0" show="248" showname="Frame Length" value="f8" size="248"/>
+ <field name="caplen" pos="0" show="248" showname="Captured Length" value="f8" size="248"/>
+ <field name="timestamp" pos="0" show="Feb 10, 2017 14:38:39.940434000 NZDT" showname="Captured Time" value="1486690719.940434000" size="248"/>
+ </proto>
+ <proto name="frame" showname="Frame 64697: 248 bytes on wire (1984 bits), 248 bytes captured (1984 bits)" size="248" pos="0">
+ <field name="frame.encap_type" showname="Encapsulation type: Raw IP (7)" size="0" pos="0" show="7"/>
+ <field name="frame.time" showname="Arrival Time: Feb 10, 2017 14:38:39.940434000 NZDT" size="0" pos="0" show="Feb 10, 2017 14:38:39.940434000 NZDT"/>
+ <field name="frame.offset_shift" showname="Time shift for this packet: 0.000000000 seconds" size="0" pos="0" show="0.000000000"/>
+ <field name="frame.time_epoch" showname="Epoch Time: 1486690719.940434000 seconds" size="0" pos="0" show="1486690719.940434000"/>
+ <field name="frame.time_delta" showname="Time delta from previous captured frame: 0.000173000 seconds" size="0" pos="0" show="0.000173000"/>
+ <field name="frame.time_delta_displayed" showname="Time delta from previous displayed frame: 0.000173000 seconds" size="0" pos="0" show="0.000173000"/>
+ <field name="frame.time_relative" showname="Time since reference or first frame: 143.409983000 seconds" size="0" pos="0" show="143.409983000"/>
+ <field name="frame.number" showname="Frame Number: 64697" size="0" pos="0" show="64697"/>
+ <field name="frame.len" showname="Frame Length: 248 bytes (1984 bits)" size="0" pos="0" show="248"/>
+ <field name="frame.cap_len" showname="Capture Length: 248 bytes (1984 bits)" size="0" pos="0" show="248"/>
+ <field name="frame.marked" showname="Frame is marked: False" size="0" pos="0" show="0"/>
+ <field name="frame.ignored" showname="Frame is ignored: False" size="0" pos="0" show="0"/>
+ <field name="frame.protocols" showname="Protocols in frame: raw:ip:tcp:ldap:gss-api:spnego-krb5" size="0" pos="0" show="raw:ip:tcp:ldap:gss-api:spnego-krb5"/>
+ </proto>
+ <proto name="raw" showname="Raw packet data" size="248" pos="0"/>
+ <proto name="ip" showname="Internet Protocol Version 4, Src: 127.0.0.11, Dst: 127.0.0.21" size="20" pos="0">
+ <field name="ip.version" showname="0100 .... = Version: 4" size="1" pos="0" show="4" value="4" unmaskedvalue="45"/>
+ <field name="ip.hdr_len" showname=".... 0101 = Header Length: 20 bytes" size="1" pos="0" show="5" value="5" unmaskedvalue="45"/>
+ <field name="ip.dsfield" showname="Differentiated Services Field: 0x00 (DSCP: CS0, ECN: Not-ECT)" size="1" pos="1" show="0x00000000" value="00">
+ <field name="ip.dsfield.dscp" showname="0000 00.. = Differentiated Services Codepoint: Default (0)" size="1" pos="1" show="0" value="0" unmaskedvalue="00"/>
+ <field name="ip.dsfield.ecn" showname=".... ..00 = Explicit Congestion Notification: Not ECN-Capable Transport (0)" size="1" pos="1" show="0" value="0" unmaskedvalue="00"/>
+ </field>
+ <field name="ip.len" showname="Total Length: 248" size="2" pos="2" show="248" value="00f8"/>
+ <field name="ip.id" showname="Identification: 0xffff (65535)" size="2" pos="4" show="0x0000ffff" value="ffff"/>
+ <field name="ip.flags" showname="Flags: 0x02 (Don&#x27;t Fragment)" size="1" pos="6" show="0x00000002" value="40">
+ <field name="ip.flags.rb" showname="0... .... = Reserved bit: Not set" size="1" pos="6" show="0" value="40"/>
+ <field name="ip.flags.df" showname=".1.. .... = Don&#x27;t fragment: Set" size="1" pos="6" show="1" value="40"/>
+ <field name="ip.flags.mf" showname="..0. .... = More fragments: Not set" size="1" pos="6" show="0" value="40"/>
+ </field>
+ <field name="ip.frag_offset" showname="Fragment offset: 0" size="2" pos="6" show="0" value="4000"/>
+ <field name="ip.ttl" showname="Time to live: 255" size="1" pos="8" show="255" value="ff"/>
+ <field name="ip.proto" showname="Protocol: TCP (6)" size="1" pos="9" show="6" value="06"/>
+ <field name="ip.checksum" showname="Header checksum: 0x0000 [validation disabled]" size="2" pos="10" show="0x00000000" value="0000">
+ <field name="ip.checksum_good" showname="Good: False" size="2" pos="10" show="0" value="0000"/>
+ <field name="ip.checksum_bad" showname="Bad: False" size="2" pos="10" show="0" value="0000"/>
+ </field>
+ <field name="ip.src" showname="Source: 127.0.0.11" size="4" pos="12" show="127.0.0.11" value="7f00000b"/>
+ <field name="ip.addr" showname="Source or Destination Address: 127.0.0.11" hide="yes" size="4" pos="12" show="127.0.0.11" value="7f00000b"/>
+ <field name="ip.src_host" showname="Source Host: 127.0.0.11" hide="yes" size="4" pos="12" show="127.0.0.11" value="7f00000b"/>
+ <field name="ip.host" showname="Source or Destination Host: 127.0.0.11" hide="yes" size="4" pos="12" show="127.0.0.11" value="7f00000b"/>
+ <field name="ip.dst" showname="Destination: 127.0.0.21" size="4" pos="16" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.addr" showname="Source or Destination Address: 127.0.0.21" hide="yes" size="4" pos="16" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.dst_host" showname="Destination Host: 127.0.0.21" hide="yes" size="4" pos="16" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.host" showname="Source or Destination Host: 127.0.0.21" hide="yes" size="4" pos="16" show="127.0.0.21" value="7f000015"/>
+ <field name="" show="Source GeoIP: Unknown" size="4" pos="12" value="7f00000b"/>
+ <field name="" show="Destination GeoIP: Unknown" size="4" pos="16" value="7f000015"/>
+ </proto>
+ <proto name="tcp" showname="Transmission Control Protocol, Src Port: 18512 (18512), Dst Port: 389 (389), Seq: 7674, Ack: 11148, Len: 208" size="20" pos="20">
+ <field name="tcp.srcport" showname="Source Port: 18512" size="2" pos="20" show="18512" value="4850"/>
+ <field name="tcp.dstport" showname="Destination Port: 389" size="2" pos="22" show="389" value="0185"/>
+ <field name="tcp.port" showname="Source or Destination Port: 18512" hide="yes" size="2" pos="20" show="18512" value="4850"/>
+ <field name="tcp.port" showname="Source or Destination Port: 389" hide="yes" size="2" pos="22" show="389" value="0185"/>
+ <field name="tcp.stream" showname="Stream index: 1400" size="0" pos="20" show="1400"/>
+ <field name="tcp.len" showname="TCP Segment Len: 208" size="1" pos="32" show="208" value="50"/>
+ <field name="tcp.seq" showname="Sequence number: 7674 (relative sequence number)" size="4" pos="24" show="7674" value="00001dfa"/>
+ <field name="tcp.nxtseq" showname="Next sequence number: 7882 (relative sequence number)" size="0" pos="20" show="7882"/>
+ <field name="tcp.ack" showname="Acknowledgment number: 11148 (relative ack number)" size="4" pos="28" show="11148" value="00002b8c"/>
+ <field name="tcp.hdr_len" showname="Header Length: 20 bytes" size="1" pos="32" show="20" value="50"/>
+ <field name="tcp.flags" showname="Flags: 0x018 (PSH, ACK)" size="2" pos="32" show="0x00000018" value="18" unmaskedvalue="5018">
+ <field name="tcp.flags.res" showname="000. .... .... = Reserved: Not set" size="1" pos="32" show="0" value="0" unmaskedvalue="50"/>
+ <field name="tcp.flags.ns" showname="...0 .... .... = Nonce: Not set" size="1" pos="32" show="0" value="0" unmaskedvalue="50"/>
+ <field name="tcp.flags.cwr" showname=".... 0... .... = Congestion Window Reduced (CWR): Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.ecn" showname=".... .0.. .... = ECN-Echo: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.urg" showname=".... ..0. .... = Urgent: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.ack" showname=".... ...1 .... = Acknowledgment: Set" size="1" pos="33" show="1" value="FFFFFFFF" unmaskedvalue="18"/>
+ <field name="tcp.flags.push" showname=".... .... 1... = Push: Set" size="1" pos="33" show="1" value="FFFFFFFF" unmaskedvalue="18"/>
+ <field name="tcp.flags.reset" showname=".... .... .0.. = Reset: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.syn" showname=".... .... ..0. = Syn: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.fin" showname=".... .... ...0 = Fin: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.str" showname="TCP Flags: *******AP***" size="2" pos="32" show="*******AP***" value="5018"/>
+ </field>
+ <field name="tcp.window_size_value" showname="Window size value: 32767" size="2" pos="34" show="32767" value="7fff"/>
+ <field name="tcp.window_size" showname="Calculated window size: 32767" size="2" pos="34" show="32767" value="7fff"/>
+ <field name="tcp.window_size_scalefactor" showname="Window size scaling factor: -2 (no window scaling used)" size="2" pos="34" show="-2" value="7fff"/>
+ <field name="tcp.checksum" showname="Checksum: 0x0000 [validation disabled]" size="2" pos="36" show="0x00000000" value="0000">
+ <field name="tcp.checksum_good" showname="Good Checksum: False" size="2" pos="36" show="0" value="0000"/>
+ <field name="tcp.checksum_bad" showname="Bad Checksum: False" size="2" pos="36" show="0" value="0000"/>
+ </field>
+ <field name="tcp.urgent_pointer" showname="Urgent pointer: 0" size="2" pos="38" show="0" value="0000"/>
+ <field name="tcp.analysis" showname="SEQ/ACK analysis" size="0" pos="20" show="" value="">
+ <field name="tcp.analysis.acks_frame" showname="This is an ACK to the segment in frame: 64694" size="0" pos="20" show="64694"/>
+ <field name="tcp.analysis.ack_rtt" showname="The RTT to ACK the segment was: 0.000241000 seconds" size="0" pos="20" show="0.000241000"/>
+ <field name="tcp.analysis.initial_rtt" showname="iRTT: 0.000019000 seconds" size="0" pos="20" show="0.000019000"/>
+ <field name="tcp.analysis.bytes_in_flight" showname="Bytes in flight: 208" size="0" pos="20" show="208"/>
+ </field>
+ <field name="tcp.pdu.size" showname="PDU Size: 208" size="208" pos="40" show="208" value="000000cc050404ff000c000c0000000034b876cafa1236459c941cdfeb431f613081ad0201346681a7043c636e3d6c6461707465737432636f6d70757465722c636e3d636f6d7075746572732c44433d73616d62612c44433d6578616d706c652c44433d636f6d306730650a010230600414736572766963655072696e636970616c4e616d6531480416686f73742f6c6461707465737432636f6d70757465720416686f73742f6c6461707465737432636f6d70757465720416636966732f6c6461707465737432636f6d7075746572"/>
+ </proto>
+ <proto name="ldap" showname="Lightweight Directory Access Protocol" size="208" pos="40">
+ <field name="ldap.sasl_buffer_length" showname="SASL Buffer Length: 204" size="4" pos="40" show="204" value="000000cc"/>
+ <field name="" show="SASL Buffer" size="208" pos="40" value="000000cc050404ff000c000c0000000034b876cafa1236459c941cdfeb431f613081ad0201346681a7043c636e3d6c6461707465737432636f6d70757465722c636e3d636f6d7075746572732c44433d73616d62612c44433d6578616d706c652c44433d636f6d306730650a010230600414736572766963655072696e636970616c4e616d6531480416686f73742f6c6461707465737432636f6d70757465720416686f73742f6c6461707465737432636f6d70757465720416636966732f6c6461707465737432636f6d7075746572">
+ <proto name="gss-api" showname="GSS-API Generic Security Service Application Program Interface" size="28" pos="44">
+ <field name="spnego.krb5.blob" showname="krb5_blob: 050404ff000c000c0000000034b876cafa1236459c941cdf..." size="28" pos="44" show="05:04:04:ff:00:0c:00:0c:00:00:00:00:34:b8:76:ca:fa:12:36:45:9c:94:1c:df:eb:43:1f:61" value="050404ff000c000c0000000034b876cafa1236459c941cdfeb431f61">
+ <field name="spnego.krb5.tok_id" showname="krb5_tok_id: KRB_TOKEN_CFX_WRAP (0x0405)" size="2" pos="44" show="0x00000405" value="0504"/>
+ <field name="spnego.krb5.cfx_flags" showname="krb5_cfx_flags: 0x04, AcceptorSubkey" size="1" pos="46" show="0x00000004" value="04">
+ <field name="spnego.krb5.acceptor_subkey" showname=".... .1.. = AcceptorSubkey: Set" size="1" pos="46" show="1" value="FFFFFFFF" unmaskedvalue="04"/>
+ <field name="spnego.krb5.sealed" showname=".... ..0. = Sealed: Not set" size="1" pos="46" show="0" value="0" unmaskedvalue="04"/>
+ <field name="spnego.krb5.send_by_acceptor" showname=".... ...0 = SendByAcceptor: Not set" size="1" pos="46" show="0" value="0" unmaskedvalue="04"/>
+ </field>
+ <field name="spnego.krb5.filler" showname="krb5_filler: ff" size="1" pos="47" show="ff" value="ff"/>
+ <field name="spnego.krb5.cfx_ec" showname="krb5_cfx_ec: 12" size="2" pos="48" show="12" value="000c"/>
+ <field name="spnego.krb5.cfx_rrc" showname="krb5_cfx_rrc: 12" size="2" pos="50" show="12" value="000c"/>
+ <field name="spnego.krb5.cfx_seq" showname="krb5_cfx_seq: 884504266" size="8" pos="52" show="884504266" value="0000000034b876ca"/>
+ <field name="spnego.krb5.sgn_cksum" showname="krb5_sgn_cksum: fa1236459c941cdfeb431f61" size="12" pos="60" show="fa:12:36:45:9c:94:1c:df:eb:43:1f:61" value="fa1236459c941cdfeb431f61"/>
+ </field>
+ </proto>
+ <field name="" show="GSS-API payload (176 bytes)" size="176" pos="72" value="3081ad0201346681a7043c636e3d6c6461707465737432636f6d70757465722c636e3d636f6d7075746572732c44433d73616d62612c44433d6578616d706c652c44433d636f6d306730650a010230600414736572766963655072696e636970616c4e616d6531480416686f73742f6c6461707465737432636f6d70757465720416686f73742f6c6461707465737432636f6d70757465720416636966732f6c6461707465737432636f6d7075746572">
+ <field name="ldap.LDAPMessage_element" showname="LDAPMessage modifyRequest(52) &quot;cn=ldaptest2computer,cn=computers,DC=samba,DC=example,DC=com&quot;" size="176" pos="72" show="" value="">
+ <field name="ldap.messageID" showname="messageID: 52" size="1" pos="77" show="52" value="34"/>
+ <field name="ldap.protocolOp" showname="protocolOp: modifyRequest (6)" size="170" pos="78" show="6" value="6681a7043c636e3d6c6461707465737432636f6d70757465722c636e3d636f6d7075746572732c44433d73616d62612c44433d6578616d706c652c44433d636f6d306730650a010230600414736572766963655072696e636970616c4e616d6531480416686f73742f6c6461707465737432636f6d70757465720416686f73742f6c6461707465737432636f6d70757465720416636966732f6c6461707465737432636f6d7075746572">
+ <field name="ldap.modifyRequest_element" showname="modifyRequest" size="167" pos="81" show="" value="">
+ <field name="ldap.object" showname="object: cn=ldaptest2computer,cn=computers,DC=samba,DC=example,DC=com" size="60" pos="83" show="cn=ldaptest2computer,cn=computers,DC=samba,DC=example,DC=com" value="636e3d6c6461707465737432636f6d70757465722c636e3d636f6d7075746572732c44433d73616d62612c44433d6578616d706c652c44433d636f6d"/>
+ <field name="ldap.modification" showname="modification: 1 item" size="103" pos="145" show="1" value="30650a010230600414736572766963655072696e636970616c4e616d6531480416686f73742f6c6461707465737432636f6d70757465720416686f73742f6c6461707465737432636f6d70757465720416636966732f6c6461707465737432636f6d7075746572">
+ <field name="ldap.modification_item_element" showname="modification item" size="103" pos="145" show="" value="">
+ <field name="ldap.operation" showname="operation: replace (2)" size="1" pos="149" show="2" value="02"/>
+ <field name="ldap.modification_element" showname="modification servicePrincipalName" size="98" pos="150" show="" value="">
+ <field name="ldap.type" showname="type: servicePrincipalName" size="20" pos="154" show="servicePrincipalName" value="736572766963655072696e636970616c4e616d65"/>
+ <field name="ldap.vals" showname="vals: 3 items" size="72" pos="176" show="3" value="0416686f73742f6c6461707465737432636f6d70757465720416686f73742f6c6461707465737432636f6d70757465720416636966732f6c6461707465737432636f6d7075746572">
+ <field name="ldap.AttributeValue" showname="AttributeValue: host/ldaptest2computer" size="22" pos="178" show="68:6f:73:74:2f:6c:64:61:70:74:65:73:74:32:63:6f:6d:70:75:74:65:72" value="686f73742f6c6461707465737432636f6d7075746572"/>
+ <field name="ldap.AttributeValue" showname="AttributeValue: host/ldaptest2computer" size="22" pos="202" show="68:6f:73:74:2f:6c:64:61:70:74:65:73:74:32:63:6f:6d:70:75:74:65:72" value="686f73742f6c6461707465737432636f6d7075746572"/>
+ <field name="ldap.AttributeValue" showname="AttributeValue: cifs/ldaptest2computer" size="22" pos="226" show="63:69:66:73:2f:6c:64:61:70:74:65:73:74:32:63:6f:6d:70:75:74:65:72" value="636966732f6c6461707465737432636f6d7075746572"/>
+ </field>
+ </field>
+ </field>
+ </field>
+ </field>
+ </field>
+ </field>
+ </field>
+ </field>
+ </proto>
+</packet>
+
+<packet>
+ <proto name="geninfo" pos="0" showname="General information" size="95">
+ <field name="num" pos="0" show="51638" showname="Number" value="c9b6" size="95"/>
+ <field name="len" pos="0" show="95" showname="Frame Length" value="5f" size="95"/>
+ <field name="caplen" pos="0" show="95" showname="Captured Length" value="5f" size="95"/>
+ <field name="timestamp" pos="0" show="Feb 10, 2017 14:38:02.579057000 NZDT" showname="Captured Time" value="1486690682.579057000" size="95"/>
+ </proto>
+ <proto name="frame" showname="Frame 51638: 95 bytes on wire (760 bits), 95 bytes captured (760 bits)" size="95" pos="0">
+ <field name="frame.encap_type" showname="Encapsulation type: Raw IP (7)" size="0" pos="0" show="7"/>
+ <field name="frame.time" showname="Arrival Time: Feb 10, 2017 14:38:02.579057000 NZDT" size="0" pos="0" show="Feb 10, 2017 14:38:02.579057000 NZDT"/>
+ <field name="frame.offset_shift" showname="Time shift for this packet: 0.000000000 seconds" size="0" pos="0" show="0.000000000"/>
+ <field name="frame.time_epoch" showname="Epoch Time: 1486690682.579057000 seconds" size="0" pos="0" show="1486690682.579057000"/>
+ <field name="frame.time_delta" showname="Time delta from previous captured frame: 0.000038000 seconds" size="0" pos="0" show="0.000038000"/>
+ <field name="frame.time_delta_displayed" showname="Time delta from previous displayed frame: 0.000038000 seconds" size="0" pos="0" show="0.000038000"/>
+ <field name="frame.time_relative" showname="Time since reference or first frame: 106.048606000 seconds" size="0" pos="0" show="106.048606000"/>
+ <field name="frame.number" showname="Frame Number: 51638" size="0" pos="0" show="51638"/>
+ <field name="frame.len" showname="Frame Length: 95 bytes (760 bits)" size="0" pos="0" show="95"/>
+ <field name="frame.cap_len" showname="Capture Length: 95 bytes (760 bits)" size="0" pos="0" show="95"/>
+ <field name="frame.marked" showname="Frame is marked: False" size="0" pos="0" show="0"/>
+ <field name="frame.ignored" showname="Frame is ignored: False" size="0" pos="0" show="0"/>
+ <field name="frame.protocols" showname="Protocols in frame: raw:ip:tcp:ldap" size="0" pos="0" show="raw:ip:tcp:ldap"/>
+ </proto>
+ <proto name="raw" showname="Raw packet data" size="95" pos="0"/>
+ <proto name="ip" showname="Internet Protocol Version 4, Src: 127.0.0.11, Dst: 127.0.0.21" size="20" pos="0">
+ <field name="ip.version" showname="0100 .... = Version: 4" size="1" pos="0" show="4" value="4" unmaskedvalue="45"/>
+ <field name="ip.hdr_len" showname=".... 0101 = Header Length: 20 bytes" size="1" pos="0" show="5" value="5" unmaskedvalue="45"/>
+ <field name="ip.dsfield" showname="Differentiated Services Field: 0x00 (DSCP: CS0, ECN: Not-ECT)" size="1" pos="1" show="0x00000000" value="00">
+ <field name="ip.dsfield.dscp" showname="0000 00.. = Differentiated Services Codepoint: Default (0)" size="1" pos="1" show="0" value="0" unmaskedvalue="00"/>
+ <field name="ip.dsfield.ecn" showname=".... ..00 = Explicit Congestion Notification: Not ECN-Capable Transport (0)" size="1" pos="1" show="0" value="0" unmaskedvalue="00"/>
+ </field>
+ <field name="ip.len" showname="Total Length: 95" size="2" pos="2" show="95" value="005f"/>
+ <field name="ip.id" showname="Identification: 0xffff (65535)" size="2" pos="4" show="0x0000ffff" value="ffff"/>
+ <field name="ip.flags" showname="Flags: 0x02 (Don&#x27;t Fragment)" size="1" pos="6" show="0x00000002" value="40">
+ <field name="ip.flags.rb" showname="0... .... = Reserved bit: Not set" size="1" pos="6" show="0" value="40"/>
+ <field name="ip.flags.df" showname=".1.. .... = Don&#x27;t fragment: Set" size="1" pos="6" show="1" value="40"/>
+ <field name="ip.flags.mf" showname="..0. .... = More fragments: Not set" size="1" pos="6" show="0" value="40"/>
+ </field>
+ <field name="ip.frag_offset" showname="Fragment offset: 0" size="2" pos="6" show="0" value="4000"/>
+ <field name="ip.ttl" showname="Time to live: 255" size="1" pos="8" show="255" value="ff"/>
+ <field name="ip.proto" showname="Protocol: TCP (6)" size="1" pos="9" show="6" value="06"/>
+ <field name="ip.checksum" showname="Header checksum: 0x0000 [validation disabled]" size="2" pos="10" show="0x00000000" value="0000">
+ <field name="ip.checksum_good" showname="Good: False" size="2" pos="10" show="0" value="0000"/>
+ <field name="ip.checksum_bad" showname="Bad: False" size="2" pos="10" show="0" value="0000"/>
+ </field>
+ <field name="ip.src" showname="Source: 127.0.0.11" size="4" pos="12" show="127.0.0.11" value="7f00000b"/>
+ <field name="ip.addr" showname="Source or Destination Address: 127.0.0.11" hide="yes" size="4" pos="12" show="127.0.0.11" value="7f00000b"/>
+ <field name="ip.src_host" showname="Source Host: 127.0.0.11" hide="yes" size="4" pos="12" show="127.0.0.11" value="7f00000b"/>
+ <field name="ip.host" showname="Source or Destination Host: 127.0.0.11" hide="yes" size="4" pos="12" show="127.0.0.11" value="7f00000b"/>
+ <field name="ip.dst" showname="Destination: 127.0.0.21" size="4" pos="16" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.addr" showname="Source or Destination Address: 127.0.0.21" hide="yes" size="4" pos="16" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.dst_host" showname="Destination Host: 127.0.0.21" hide="yes" size="4" pos="16" show="127.0.0.21" value="7f000015"/>
+ <field name="ip.host" showname="Source or Destination Host: 127.0.0.21" hide="yes" size="4" pos="16" show="127.0.0.21" value="7f000015"/>
+ <field name="" show="Source GeoIP: Unknown" size="4" pos="12" value="7f00000b"/>
+ <field name="" show="Destination GeoIP: Unknown" size="4" pos="16" value="7f000015"/>
+ </proto>
+ <proto name="tcp" showname="Transmission Control Protocol, Src Port: 18036 (18036), Dst Port: 389 (389), Seq: 1, Ack: 1, Len: 55" size="20" pos="20">
+ <field name="tcp.srcport" showname="Source Port: 18036" size="2" pos="20" show="18036" value="4674"/>
+ <field name="tcp.dstport" showname="Destination Port: 389" size="2" pos="22" show="389" value="0185"/>
+ <field name="tcp.port" showname="Source or Destination Port: 18036" hide="yes" size="2" pos="20" show="18036" value="4674"/>
+ <field name="tcp.port" showname="Source or Destination Port: 389" hide="yes" size="2" pos="22" show="389" value="0185"/>
+ <field name="tcp.stream" showname="Stream index: 1207" size="0" pos="20" show="1207"/>
+ <field name="tcp.len" showname="TCP Segment Len: 55" size="1" pos="32" show="55" value="50"/>
+ <field name="tcp.seq" showname="Sequence number: 1 (relative sequence number)" size="4" pos="24" show="1" value="00000001"/>
+ <field name="tcp.nxtseq" showname="Next sequence number: 56 (relative sequence number)" size="0" pos="20" show="56"/>
+ <field name="tcp.ack" showname="Acknowledgment number: 1 (relative ack number)" size="4" pos="28" show="1" value="00000001"/>
+ <field name="tcp.hdr_len" showname="Header Length: 20 bytes" size="1" pos="32" show="20" value="50"/>
+ <field name="tcp.flags" showname="Flags: 0x018 (PSH, ACK)" size="2" pos="32" show="0x00000018" value="18" unmaskedvalue="5018">
+ <field name="tcp.flags.res" showname="000. .... .... = Reserved: Not set" size="1" pos="32" show="0" value="0" unmaskedvalue="50"/>
+ <field name="tcp.flags.ns" showname="...0 .... .... = Nonce: Not set" size="1" pos="32" show="0" value="0" unmaskedvalue="50"/>
+ <field name="tcp.flags.cwr" showname=".... 0... .... = Congestion Window Reduced (CWR): Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.ecn" showname=".... .0.. .... = ECN-Echo: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.urg" showname=".... ..0. .... = Urgent: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.ack" showname=".... ...1 .... = Acknowledgment: Set" size="1" pos="33" show="1" value="FFFFFFFF" unmaskedvalue="18"/>
+ <field name="tcp.flags.push" showname=".... .... 1... = Push: Set" size="1" pos="33" show="1" value="FFFFFFFF" unmaskedvalue="18"/>
+ <field name="tcp.flags.reset" showname=".... .... .0.. = Reset: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.syn" showname=".... .... ..0. = Syn: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.fin" showname=".... .... ...0 = Fin: Not set" size="1" pos="33" show="0" value="0" unmaskedvalue="18"/>
+ <field name="tcp.flags.str" showname="TCP Flags: *******AP***" size="2" pos="32" show="*******AP***" value="5018"/>
+ </field>
+ <field name="tcp.window_size_value" showname="Window size value: 32767" size="2" pos="34" show="32767" value="7fff"/>
+ <field name="tcp.window_size" showname="Calculated window size: 32767" size="2" pos="34" show="32767" value="7fff"/>
+ <field name="tcp.window_size_scalefactor" showname="Window size scaling factor: -2 (no window scaling used)" size="2" pos="34" show="-2" value="7fff"/>
+ <field name="tcp.checksum" showname="Checksum: 0x0000 [validation disabled]" size="2" pos="36" show="0x00000000" value="0000">
+ <field name="tcp.checksum_good" showname="Good Checksum: False" size="2" pos="36" show="0" value="0000"/>
+ <field name="tcp.checksum_bad" showname="Bad Checksum: False" size="2" pos="36" show="0" value="0000"/>
+ </field>
+ <field name="tcp.urgent_pointer" showname="Urgent pointer: 0" size="2" pos="38" show="0" value="0000"/>
+ <field name="tcp.analysis" showname="SEQ/ACK analysis" size="0" pos="20" show="" value="">
+ <field name="tcp.analysis.initial_rtt" showname="iRTT: 0.000073000 seconds" size="0" pos="20" show="0.000073000"/>
+ <field name="tcp.analysis.bytes_in_flight" showname="Bytes in flight: 55" size="0" pos="20" show="55"/>
+ </field>
+ <field name="tcp.pdu.size" showname="PDU Size: 55" size="55" pos="40" show="55" value="30350201016030020103041f41646d696e6973747261746f724053414d42412e4558414d504c452e434f4d800a6c6f6344437061737331"/>
+ </proto>
+ <proto name="ldap" showname="Lightweight Directory Access Protocol" size="55" pos="40">
+ <field name="ldap.LDAPMessage_element" showname="LDAPMessage bindRequest(1) &quot;Administrator@SAMBA.EXAMPLE.COM&quot; simple" size="55" pos="40" show="" value="">
+ <field name="ldap.messageID" showname="messageID: 1" size="1" pos="44" show="1" value="01"/>
+ <field name="ldap.protocolOp" showname="protocolOp: bindRequest (0)" size="50" pos="45" show="0" value="6030020103041f41646d696e6973747261746f724053414d42412e4558414d504c452e434f4d800a6c6f6344437061737331">
+ <field name="ldap.bindRequest_element" showname="bindRequest" size="48" pos="47" show="" value="">
+ <field name="ldap.version" showname="version: 3" size="1" pos="49" show="3" value="03"/>
+ <field name="ldap.name" showname="name: Administrator@SAMBA.EXAMPLE.COM" size="31" pos="52" show="Administrator@SAMBA.EXAMPLE.COM" value="41646d696e6973747261746f724053414d42412e4558414d504c452e434f4d"/>
+ <field name="ldap.authentication" showname="authentication: simple (0)" size="10" pos="85" show="0" value="6c6f6344437061737331">
+ <field name="ldap.simple" showname="simple: 6c6f6344437061737331" size="10" pos="85" show="6c:6f:63:44:43:70:61:73:73:31" value="6c6f6344437061737331"/>
+ </field>
+ </field>
+ </field>
+ </field>
+ </proto>
+</packet>
+
+
+</pdml>
diff --git a/python/samba/tests/blackbox/traffic_learner.py b/python/samba/tests/blackbox/traffic_learner.py
new file mode 100644
index 0000000..ac941ce
--- /dev/null
+++ b/python/samba/tests/blackbox/traffic_learner.py
@@ -0,0 +1,71 @@
+# Black box tests for script/traffic_leaner
+#
+# Copyright (C) Catalyst IT Ltd. 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""Blackbox tests for traffic_leaner"""
+
+import os
+import json
+import tempfile
+from samba.emulate import traffic
+
+from samba.tests import BlackboxTestCase
+
+LEARNER = "script/traffic_learner"
+DATA_DIR = "python/samba/tests/blackbox/testdata"
+
+
+class TrafficLearnerTests(BlackboxTestCase):
+
+ def test_no_output_file(self):
+ """Run the script with no output file specified. Should fail."""
+ self.check_exit_code(LEARNER, 1)
+
+ def test_model_generation(self):
+ """Ensure a model is generated from a summary file and it is
+ correct"""
+
+ with self.mktemp() as output:
+ summary = os.path.join(DATA_DIR, "traffic-sample-very-short.txt")
+ command = "%s %s --out %s" % (LEARNER, summary, output)
+ self.check_run(command)
+
+ expected_fn = os.path.join(DATA_DIR, "traffic_learner.expected")
+ expected = traffic.TrafficModel()
+ f=open(expected_fn)
+ expected.load(f)
+ f.close()
+
+ f=open(output)
+ actual = traffic.TrafficModel()
+ actual.load(f)
+ f.close()
+
+ actual_ngrams = {k: sorted(v) for k, v in actual.ngrams.items()}
+ expected_ngrams = {k: sorted(v) for k, v in expected.ngrams.items()}
+
+ self.assertEqual(expected_ngrams, actual_ngrams)
+
+ actual_details = {k: sorted(v) for k, v in actual.query_details.items()}
+ expected_details = {k: sorted(v) for k, v in expected.query_details.items()}
+ self.assertEqual(expected_details, actual_details)
+ self.assertEqual(expected.cumulative_duration, actual.cumulative_duration)
+ self.assertEqual(expected.packet_rate, actual.packet_rate)
+
+ with open(expected_fn) as f1, open(output) as f2:
+ expected_json = json.load(f1)
+ actual_json = json.load(f2)
+ self.assertEqual(expected_json, actual_json)
diff --git a/python/samba/tests/blackbox/traffic_replay.py b/python/samba/tests/blackbox/traffic_replay.py
new file mode 100644
index 0000000..835c248
--- /dev/null
+++ b/python/samba/tests/blackbox/traffic_replay.py
@@ -0,0 +1,100 @@
+# Black box tests for script/traffic_leaner
+#
+# Copyright (C) Catalyst IT Ltd. 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""Blackbox tests for traffic_replay"""
+
+import os
+import tempfile
+
+from samba.tests import BlackboxTestCase
+
+DATA_DIR = "python/samba/tests/blackbox/testdata"
+SCRIPT = "script/traffic_replay"
+FIXED = "--fixed-password=trafficreplay01%"
+SERVER = os.environ["SERVER"]
+PASSWORD = os.environ["PASSWORD"]
+USER = os.environ["USERNAME"]
+CREDS = "-U%s%%%s" % (USER, PASSWORD)
+MODEL = os.path.join(DATA_DIR, "traffic-sample-very-short.model")
+EXPECTED_OUTPUT = os.path.join(DATA_DIR, "traffic_replay-%s.expected")
+
+
+class TrafficLearnerTests(BlackboxTestCase):
+
+ def tearDown(self):
+ options = "--clean-up"
+ command = "%s %s %s %s" % (SCRIPT, options, CREDS, SERVER)
+ self.check_run(command)
+
+ def test_generate_users_only(self):
+ """Ensure the generate users only option functions correctly
+ """
+ options = ("--generate-users-only --number-of-users 20 "
+ "--number-of-groups 5 --average-groups-per-user 2")
+ command = "%s %s %s %s %s" % (
+ SCRIPT, options, FIXED, CREDS, SERVER)
+ self.check_run(command)
+ command = "%s %s %s %s %s %s" % (
+ SCRIPT, MODEL, options, FIXED, CREDS, SERVER)
+ self.check_run(command)
+
+ def test_summary_generation(self):
+ """Ensure a summary file is generated and the contents are correct"""
+
+ for i, opts in enumerate((["--random-seed=3"],
+ ["--random-seed=4"],
+ ["--random-seed=3",
+ "--conversation-persistence=0.5"],
+ ["--random-seed=3",
+ "--old-scale",
+ "--conversation-persistence=0.95"],
+ )):
+ with self.mktemp() as output:
+ command = ([SCRIPT, MODEL,
+ "--traffic-summary", output,
+ "-D1", "-S0.1"] +
+ opts +
+ [FIXED, CREDS, SERVER])
+ self.check_run(command)
+ expected = open(EXPECTED_OUTPUT % i).read()
+ actual = open(output).read()
+ self.assertStringsEqual(expected, actual)
+
+ def test_summary_replay_no_fixed(self):
+ """Ensure a summary file with no fixed password fails
+ """
+ command = [SCRIPT, MODEL, CREDS, SERVER]
+ self.check_exit_code(command, 1)
+
+ def test_model_replay(self):
+ """Ensure a model can be replayed against a DC
+ """
+ command = [SCRIPT, MODEL,
+ FIXED,
+ '-D2', '-S0.1',
+ CREDS, SERVER]
+ self.check_run(command)
+
+ def test_generate_users_only_no_password(self):
+ """Ensure the generate users only fails if no fixed_password supplied"
+ """
+ options = ("--generate-users-only --number-of-users 20 "
+ "--number-of-groups 5 --average-groups-per-user 2")
+ command = "%s %s %s %s" % (SCRIPT, options, CREDS, SERVER)
+ self.check_exit_code(command, 1)
+ command = "%s %s %s %s %s" % (SCRIPT, MODEL, options, CREDS, SERVER)
+ self.check_exit_code(command, 1)
diff --git a/python/samba/tests/blackbox/traffic_summary.py b/python/samba/tests/blackbox/traffic_summary.py
new file mode 100644
index 0000000..b895083
--- /dev/null
+++ b/python/samba/tests/blackbox/traffic_summary.py
@@ -0,0 +1,53 @@
+# Black box tests for script/traffic_leaner
+#
+# Copyright (C) Catalyst IT Ltd. 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""Blackbox tests for traffic_summary"""
+
+import os
+import subprocess
+import tempfile
+
+from samba.tests import BlackboxTestCase
+
+SCRIPT = "script/traffic_summary.pl"
+DATA_DIR = "python/samba/tests/blackbox/testdata"
+INPUT = os.path.join(DATA_DIR, "traffic_summary.pdml")
+EXPECTED_FN = os.path.join(DATA_DIR, "traffic_summary.expected")
+
+
+class TrafficSummaryTests(BlackboxTestCase):
+
+ def check_twig(self):
+ """Check that perl XML::Twig module is installed.
+ Traffic summary depends on this module being installed.
+ """
+ line = "perl -MXML::Twig -e 1"
+ p = subprocess.Popen(line, shell=True)
+ retcode = p.wait()
+ return (retcode == 0)
+
+ def test_traffic_summary(self):
+ if not self.check_twig():
+ self.skipTest("Perl module XML::Twig is not installed")
+
+ with self.mktemp() as output:
+ command = "%s %s >%s" % (SCRIPT, INPUT, output)
+ print(command)
+ self.check_run(command)
+ expected = open(EXPECTED_FN).readlines()
+ actual = open(output).readlines()
+ self.assertEqual(expected, actual)
diff --git a/python/samba/tests/common.py b/python/samba/tests/common.py
new file mode 100644
index 0000000..1a7d9ad
--- /dev/null
+++ b/python/samba/tests/common.py
@@ -0,0 +1,66 @@
+# Unix SMB/CIFS implementation. Tests for common.py routines
+# Copyright (C) Andrew Tridgell 2011
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for samba.common"""
+
+import samba
+import os
+import samba.tests
+from samba.common import normalise_int32
+from samba.samdb import dsdb_Dn
+
+
+class CommonTests(samba.tests.TestCaseInTempDir):
+
+ def test_normalise_int32(self):
+ self.assertEqual('17', normalise_int32(17))
+ self.assertEqual('17', normalise_int32('17'))
+ self.assertEqual('-123', normalise_int32('-123'))
+ self.assertEqual('-1294967296', normalise_int32('3000000000'))
+
+ def test_dsdb_Dn_binary(self):
+ url = self.tempdir + "/test_dsdb_Dn_binary.ldb"
+ sam = samba.Ldb(url=url)
+ dn1 = dsdb_Dn(sam, "DC=foo,DC=bar")
+ dn2 = dsdb_Dn(sam, "B:8:0000000D:<GUID=b3f0ec29-17f4-452a-b002-963e1909d101>;DC=samba,DC=example,DC=com")
+ self.assertEqual(dn2.binary, "0000000D")
+ self.assertEqual(13, dn2.get_binary_integer())
+ os.unlink(url)
+
+ def test_dsdb_Dn_sorted(self):
+ url = self.tempdir + "/test_dsdb_Dn_sorted.ldb"
+ sam = samba.Ldb(url=url)
+ try:
+ dn1 = dsdb_Dn(sam, "B:8:0000000D:<GUID=b3f0ec29-17f4-452a-b002-963e1909d101>;OU=dn1,DC=samba,DC=example,DC=com")
+ dn2 = dsdb_Dn(sam, "B:8:0000000C:<GUID=b3f0ec29-17f4-452a-b002-963e1909d101>;OU=dn1,DC=samba,DC=example,DC=com")
+ dn3 = dsdb_Dn(sam, "B:8:0000000F:<GUID=00000000-17f4-452a-b002-963e1909d101>;OU=dn3,DC=samba,DC=example,DC=com")
+ dn4 = dsdb_Dn(sam, "B:8:00000000:<GUID=ffffffff-17f4-452a-b002-963e1909d101>;OU=dn4,DC=samba,DC=example,DC=com")
+ dn5 = dsdb_Dn(sam, "<GUID=ffffffff-27f4-452a-b002-963e1909d101>;OU=dn5,DC=samba,DC=example,DC=com")
+ dn6 = dsdb_Dn(sam, "<GUID=00000000-27f4-452a-b002-963e1909d101>;OU=dn6,DC=samba,DC=example,DC=com")
+ unsorted_links14 = [dn1, dn2, dn3, dn4]
+ sorted_vals14 = [str(dn) for dn in sorted(unsorted_links14)]
+ self.assertEqual(sorted_vals14[0], str(dn3))
+ self.assertEqual(sorted_vals14[1], str(dn2))
+ self.assertEqual(sorted_vals14[2], str(dn1))
+ self.assertEqual(sorted_vals14[3], str(dn4))
+ unsorted_links56 = [dn5, dn6]
+ sorted_vals56 = [str(dn) for dn in sorted(unsorted_links56)]
+ self.assertEqual(sorted_vals56[0], str(dn6))
+ self.assertEqual(sorted_vals56[1], str(dn5))
+ finally:
+ del sam
+ os.unlink(url)
diff --git a/python/samba/tests/complex_expressions.py b/python/samba/tests/complex_expressions.py
new file mode 100644
index 0000000..4cb6330
--- /dev/null
+++ b/python/samba/tests/complex_expressions.py
@@ -0,0 +1,487 @@
+# -*- coding: utf-8 -*-
+
+# Copyright Andrew Bartlett 2018
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import optparse
+import samba
+import samba.getopt as options
+import sys
+import os
+import time
+from samba.auth import system_session
+from samba.tests import TestCase
+import ldb
+
+ERRCODE_ENTRY_EXISTS = 68
+ERRCODE_OPERATIONS_ERROR = 1
+ERRCODE_INVALID_VALUE = 21
+ERRCODE_CLASS_VIOLATION = 65
+
+parser = optparse.OptionParser("{0} <host>".format(sys.argv[0]))
+sambaopts = options.SambaOptions(parser)
+
+# use command line creds if available
+credopts = options.CredentialsOptions(parser)
+parser.add_option_group(credopts)
+parser.add_option("-v", action="store_true", dest="verbose",
+ help="print successful expression outputs")
+opts, args = parser.parse_args()
+
+if len(args) < 1:
+ parser.print_usage()
+ sys.exit(1)
+
+lp = sambaopts.get_loadparm()
+creds = credopts.get_credentials(lp)
+
+# Set properly at end of file.
+host = None
+
+global ou_count
+ou_count = 0
+
+
+class ComplexExpressionTests(TestCase):
+ # Using setUpClass instead of setup because we're not modifying any
+ # records in the tests
+ @classmethod
+ def setUpClass(cls):
+ super().setUpClass()
+ cls.samdb = samba.samdb.SamDB(host, lp=lp,
+ session_info=system_session(),
+ credentials=creds)
+
+ ou_name = "ComplexExprTest"
+ cls.base_dn = "OU={0},{1}".format(ou_name, cls.samdb.domain_dn())
+
+ try:
+ cls.samdb.delete(cls.base_dn, ["tree_delete:1"])
+ except:
+ pass
+
+ try:
+ cls.samdb.create_ou(cls.base_dn)
+ except ldb.LdbError as e:
+ if e.args[0] == ERRCODE_ENTRY_EXISTS:
+ print(('test ou {ou} already exists. Delete with '
+ '"samba-tool group delete OU={ou} '
+ '--force-subtree-delete"').format(ou=ou_name))
+ raise e
+
+ cls.name_template = "testuser{0}"
+ cls.default_n = 10
+
+ # These fields are carefully hand-picked from the schema. They have
+ # syntax and handling appropriate for our test structure.
+ cls.largeint_f = "accountExpires"
+ cls.str_f = "accountNameHistory"
+ cls.int_f = "flags"
+ cls.enum_f = "preferredDeliveryMethod"
+ cls.time_f = "msTSExpireDate"
+ cls.ranged_int_f = "countryCode"
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.samdb.delete(cls.base_dn, ["tree_delete:1"])
+
+ # Make test OU containing users with field=val for each val
+ def make_test_objects(self, field, vals):
+ global ou_count
+ ou_count += 1
+ ou_dn = "OU=testou{0},{1}".format(ou_count, self.base_dn)
+ self.samdb.create_ou(ou_dn)
+
+ ldap_objects = [{"dn": "CN=testuser{0},{1}".format(n, ou_dn),
+ "name": self.name_template.format(n),
+ "objectClass": "user",
+ field: n}
+ for n in vals]
+
+ for ldap_object in ldap_objects:
+ # It's useful to keep appropriate python types in the ldap_object
+ # dict but samdb's 'add' function expects strings.
+ stringed_ldap_object = {k: str(v)
+ for (k, v) in ldap_object.items()}
+ try:
+ self.samdb.add(stringed_ldap_object)
+ except ldb.LdbError as e:
+ print("failed to add %s" % (stringed_ldap_object))
+ raise e
+
+ return ou_dn, ldap_objects
+
+ # Run search expr and print out time. This function should be used for
+ # almost all searching.
+ def time_ldap_search(self, expr, dn):
+ time_taken = 0
+ try:
+ start_time = time.time()
+ res = self.samdb.search(base=dn,
+ scope=ldb.SCOPE_SUBTREE,
+ expression=expr)
+ time_taken = time.time() - start_time
+ except Exception as e:
+ print("failed expr " + expr)
+ raise e
+ print("{0} took {1}s".format(expr, time_taken))
+ return res, time_taken
+
+ # Take an ldap expression and an equivalent python expression.
+ # Run and time the ldap expression and compare the result to the python
+ # expression run over a list of ldap_object dicts.
+ def assertLDAPQuery(self, ldap_expr, ou_dn, py_expr, ldap_objects):
+
+ # run (and time) the LDAP search expression over the DB
+ res, time_taken = self.time_ldap_search(ldap_expr, ou_dn)
+ results = {str(row.get('name')[0]) for row in res}
+
+ # build the set of expected results by evaluating the python-equivalent
+ # of the search expression over the same set of objects
+ expected_results = set()
+ for ldap_object in ldap_objects:
+ try:
+ final_expr = py_expr.format(**ldap_object)
+ except KeyError:
+ # If the format on the py_expr hits a key error, then
+ # ldap_object doesn't have the field, so it shouldn't match.
+ continue
+
+ if eval(final_expr):
+ expected_results.add(str(ldap_object['name']))
+
+ self.assertEqual(results, expected_results)
+
+ if opts.verbose:
+ ldap_object_names = {l['name'] for l in ldap_objects}
+ excluded = ldap_object_names - results
+ excluded = "\n ".join(excluded) or "[NOTHING]"
+ returned = "\n ".join(expected_results) or "[NOTHING]"
+
+ print("PASS: Expression {0} took {1}s and returned:"
+ "\n {2}\n"
+ "Excluded:\n {3}\n".format(ldap_expr,
+ time_taken,
+ returned,
+ excluded))
+
+ # Basic integer range test
+ def test_int_range(self, field=None):
+ n = self.default_n
+ field = field or self.int_f
+ ou_dn, ldap_objects = self.make_test_objects(field, range(n))
+
+ expr = "(&(%s>=%s)(%s<=%s))" % (field, n-1, field, n+1)
+ py_expr = "%d <= {%s} <= %d" % (n-1, field, n+1)
+ self.assertLDAPQuery(expr, ou_dn, py_expr, ldap_objects)
+
+ half_n = int(n/2)
+
+ expr = "(%s<=%s)" % (field, half_n)
+ py_expr = "{%s} <= %d" % (field, half_n)
+ self.assertLDAPQuery(expr, ou_dn, py_expr, ldap_objects)
+
+ expr = "(%s>=%s)" % (field, half_n)
+ py_expr = "{%s} >= %d" % (field, half_n)
+ self.assertLDAPQuery(expr, ou_dn, py_expr, ldap_objects)
+
+ # Same test again for largeint and enum
+ def test_largeint_range(self):
+ self.test_int_range(self.largeint_f)
+
+ def test_enum_range(self):
+ self.test_int_range(self.enum_f)
+
+ # Special range test for integer field with upper and lower bounds defined.
+ # The bounds are checked on insertion, not search, so we should be able
+ # to compare to a constant that is outside bounds.
+ def test_ranged_int_range(self):
+ field = self.ranged_int_f
+ ubound = 2**16
+ width = 8
+
+ vals = list(range(ubound-width, ubound))
+ ou_dn, ldap_objects = self.make_test_objects(field, vals)
+
+ # Check <= value above overflow returns all vals
+ expr = "(%s<=%d)" % (field, ubound+5)
+ py_expr = "{%s} <= %d" % (field, ubound+5)
+ self.assertLDAPQuery(expr, ou_dn, py_expr, ldap_objects)
+
+ # Test range also works for time fields
+ def test_time_range(self):
+ n = self.default_n
+ field = self.time_f
+ n = self.default_n
+ width = int(n/2)
+
+ base_time = 20050116175514
+ time_range = [base_time + t for t in range(-width, width)]
+ time_range = [str(t) + ".0Z" for t in time_range]
+ ou_dn, ldap_objects = self.make_test_objects(field, time_range)
+
+ expr = "(%s<=%s)" % (field, str(base_time) + ".0Z")
+ py_expr = 'int("{%s}"[:-3]) <= %d' % (field, base_time)
+ self.assertLDAPQuery(expr, ou_dn, py_expr, ldap_objects)
+
+ expr = "(&(%s>=%s)(%s<=%s))" % (field, str(base_time-1) + ".0Z",
+ field, str(base_time+1) + ".0Z")
+ py_expr = '%d <= int("{%s}"[:-3]) <= %d' % (base_time-1,
+ field,
+ base_time+1)
+ self.assertLDAPQuery(expr, ou_dn, py_expr, ldap_objects)
+
+ # Run each comparison op on a simple test set. Time taken will be printed.
+ def test_int_single_cmp_op_speeds(self, field=None):
+ n = self.default_n
+ field = field or self.int_f
+ ou_dn, ldap_objects = self.make_test_objects(field, range(n))
+
+ comp_ops = ['=', '<=', '>=']
+ py_comp_ops = ['==', '<=', '>=']
+ exprs = ["(%s%s%d)" % (field, c, n) for c in comp_ops]
+ py_exprs = ["{%s}%s%d" % (field, c, n) for c in py_comp_ops]
+
+ for expr, py_expr in zip(exprs, py_exprs):
+ self.assertLDAPQuery(expr, ou_dn, py_expr, ldap_objects)
+
+ def test_largeint_single_cmp_op_speeds(self):
+ self.test_int_single_cmp_op_speeds(self.largeint_f)
+
+ def test_enum_single_cmp_op_speeds(self):
+ self.test_int_single_cmp_op_speeds(self.enum_f)
+
+ # Check strings are ordered using a naive ordering.
+ def test_str_ordering(self):
+ field = self.str_f
+ a_ord = ord('A')
+ n = 10
+ str_range = ['abc{0}d'.format(chr(c)) for c in range(a_ord, a_ord+n)]
+ ou_dn, ldap_objects = self.make_test_objects(field, str_range)
+ half_n = int(a_ord + n/2)
+
+ # Basic <= and >= statements
+ expr = "(%s>=abc%s)" % (field, chr(half_n))
+ py_expr = "'{%s}' >= 'abc%s'" % (field, chr(half_n))
+ self.assertLDAPQuery(expr, ou_dn, py_expr, ldap_objects)
+
+ expr = "(%s<=abc%s)" % (field, chr(half_n))
+ py_expr = "'{%s}' <= 'abc%s'" % (field, chr(half_n))
+ self.assertLDAPQuery(expr, ou_dn, py_expr, ldap_objects)
+
+ # String range
+ expr = "(&(%s>=abc%s)(%s<=abc%s))" % (field, chr(half_n-2),
+ field, chr(half_n+2))
+ py_expr = "'abc%s' <= '{%s}' <= 'abc%s'" % (chr(half_n-2),
+ field,
+ chr(half_n+2))
+ self.assertLDAPQuery(expr, ou_dn, py_expr, ldap_objects)
+
+ # Integers treated as string
+ expr = "(%s>=1)" % (field)
+ py_expr = "'{%s}' >= '1'" % (field)
+ self.assertLDAPQuery(expr, ou_dn, py_expr, ldap_objects)
+
+ # Windows returns nothing for invalid expressions. Expected fail on samba.
+ def test_invalid_expressions(self, field=None):
+ field = field or self.int_f
+ n = self.default_n
+ ou_dn, ldap_objects = self.make_test_objects(field, list(range(n)))
+ int_expressions = ["(%s>=abc)",
+ "(%s<=abc)",
+ "(%s=abc)"]
+
+ for expr in int_expressions:
+ expr = expr % (field)
+ self.assertLDAPQuery(expr, ou_dn, "False", ldap_objects)
+
+ def test_largeint_invalid_expressions(self):
+ self.test_invalid_expressions(self.largeint_f)
+
+ def test_enum_invalid_expressions(self):
+ self.test_invalid_expressions(self.enum_f)
+
+ def test_case_insensitive(self):
+ str_range = ["äbc"+str(n) for n in range(10)]
+ ou_dn, ldap_objects = self.make_test_objects(self.str_f, str_range)
+
+ expr = "(%s=äbc1)" % (self.str_f)
+ pyexpr = '"{%s}"=="äbc1"' % (self.str_f)
+ self.assertLDAPQuery(expr, ou_dn, pyexpr, ldap_objects)
+
+ expr = "(%s=ÄbC1)" % (self.str_f)
+ self.assertLDAPQuery(expr, ou_dn, pyexpr, ldap_objects)
+
+ # Check negative numbers can be entered and compared
+ def test_negative_cmp(self, field=None):
+ field = field or self.int_f
+ width = 6
+ around_zero = list(range(-width, width))
+ ou_dn, ldap_objects = self.make_test_objects(field, around_zero)
+
+ expr = "(%s>=-3)" % (field)
+ py_expr = "{%s} >= -3" % (field)
+ self.assertLDAPQuery(expr, ou_dn, py_expr, ldap_objects)
+
+ def test_negative_cmp_largeint(self):
+ self.test_negative_cmp(self.largeint_f)
+
+ def test_negative_cmp_enum(self):
+ self.test_negative_cmp(self.enum_f)
+
+ # Check behaviour on insertion and comparison of zero-prefixed numbers.
+ # Samba errors on insertion, Windows strips the leading zeroes.
+ def test_zero_prefix(self, field=None):
+ field = field or self.int_f
+
+ # Test comparison with 0-prefixed constants.
+ n = self.default_n
+ ou_dn, ldap_objects = self.make_test_objects(field, list(range(n)))
+
+ expr = "(%s>=00%d)" % (field, n/2)
+ py_expr = "{%s} >= %d" % (field, n/2)
+ self.assertLDAPQuery(expr, ou_dn, py_expr, ldap_objects)
+
+ # Delete the test OU so we don't mix it up with the next one.
+ self.samdb.delete(ou_dn, ["tree_delete:1"])
+
+ # Try inserting 0-prefixed numbers, check it fails.
+ zero_pref_nums = ['00'+str(num) for num in range(n)]
+ try:
+ ou_dn, ldap_objects = self.make_test_objects(field, zero_pref_nums)
+ except ldb.LdbError as e:
+ if e.args[0] != ERRCODE_INVALID_VALUE:
+ raise e
+ return
+
+ # Samba doesn't get this far - the exception is raised. Windows allows
+ # the insertion and removes the leading 0s as tested below.
+ # Either behaviour is fine.
+ print("LDAP allowed insertion of 0-prefixed nums for field " + field)
+
+ res = self.samdb.search(base=ou_dn,
+ scope=ldb.SCOPE_SUBTREE,
+ expression="(objectClass=user)")
+ returned_nums = [str(r.get(field)[0]) for r in res]
+ expect = [str(n) for n in range(n)]
+ self.assertEqual(set(returned_nums), set(expect))
+
+ expr = "(%s>=%d)" % (field, n/2)
+ py_expr = "{%s} >= %d" % (field, n/2)
+ for ldap_object in ldap_objects:
+ ldap_object[field] = int(ldap_object[field])
+
+ self.assertLDAPQuery(expr, ou_dn, py_expr, ldap_objects)
+
+ def test_zero_prefix_largeint(self):
+ self.test_zero_prefix(self.largeint_f)
+
+ def test_zero_prefix_enum(self):
+ self.test_zero_prefix(self.enum_f)
+
+ # Check integer overflow is handled as best it can be.
+ def test_int_overflow(self, field=None, of=None):
+ field = field or self.int_f
+ of = of or 2**31-1
+ width = 8
+
+ vals = list(range(of-width, of+width))
+ ou_dn, ldap_objects = self.make_test_objects(field, vals)
+
+ # Check ">=overflow" doesn't return vals past overflow
+ expr = "(%s>=%d)" % (field, of-3)
+ py_expr = "%d <= {%s} <= %d" % (of-3, field, of)
+ self.assertLDAPQuery(expr, ou_dn, py_expr, ldap_objects)
+
+ # "<=overflow" returns everything
+ expr = "(%s<=%d)" % (field, of)
+ py_expr = "True"
+ self.assertLDAPQuery(expr, ou_dn, py_expr, ldap_objects)
+
+ # Values past overflow should be negative
+ expr = "(&(%s<=%d)(%s>=0))" % (field, of, field)
+ py_expr = "{%s} <= %d" % (field, of)
+ self.assertLDAPQuery(expr, ou_dn, py_expr, ldap_objects)
+ expr = "(%s<=0)" % (field)
+ py_expr = "{%s} >= %d" % (field, of+1)
+ self.assertLDAPQuery(expr, ou_dn, py_expr, ldap_objects)
+
+ # Get the values back out and check vals past overflow are negative.
+ res = self.samdb.search(base=ou_dn,
+ scope=ldb.SCOPE_SUBTREE,
+ expression="(objectClass=user)")
+ returned_nums = [str(r.get(field)[0]) for r in res]
+
+ # Note: range(a,b) == [a..b-1] (confusing)
+ up_to_overflow = list(range(of-width, of+1))
+ negatives = list(range(-of-1, -of+width-2))
+
+ expect = [str(n) for n in up_to_overflow + negatives]
+ self.assertEqual(set(returned_nums), set(expect))
+
+ def test_enum_overflow(self):
+ self.test_int_overflow(self.enum_f, 2**31-1)
+
+ # Check cmp works on uSNChanged. We can't insert uSNChanged vals, they get
+ # added automatically so we'll just insert some objects and go with what
+ # we get.
+ def test_usnchanged(self):
+ field = "uSNChanged"
+ n = 10
+ # Note we can't actually set uSNChanged via LDAP (LDB ignores it),
+ # so the input val range doesn't matter here
+ ou_dn, _ = self.make_test_objects(field, list(range(n)))
+
+ # Get the assigned uSNChanged values
+ res = self.samdb.search(base=ou_dn,
+ scope=ldb.SCOPE_SUBTREE,
+ expression="(objectClass=user)")
+
+ # Our vals got ignored so make ldap_objects from search result
+ ldap_objects = [{'name': str(r['name'][0]),
+ field: int(r[field][0])}
+ for r in res]
+
+ # Get the median val and use as the number in the test search expr.
+ nums = [l[field] for l in ldap_objects]
+ nums = list(sorted(nums))
+ search_num = nums[int(len(nums)/2)]
+
+ expr = "(&(%s<=%d)(objectClass=user))" % (field, search_num)
+ py_expr = "{%s} <= %d" % (field, search_num)
+ self.assertLDAPQuery(expr, ou_dn, py_expr, ldap_objects)
+
+ expr = "(&(%s>=%d)(objectClass=user))" % (field, search_num)
+ py_expr = "{%s} >= %d" % (field, search_num)
+ self.assertLDAPQuery(expr, ou_dn, py_expr, ldap_objects)
+
+
+# If we're called independently then import subunit, get host from first
+# arg and run. Otherwise, subunit ran us so just set host from env.
+# We always try to run over LDAP rather than direct file, so that
+# search timings are not impacted by opening and closing the tdb file.
+if __name__ == "__main__":
+ from samba.tests.subunitrun import TestProgram
+ host = args[0]
+
+ if "://" not in host:
+ if os.path.isfile(host):
+ host = "tdb://%s" % host
+ else:
+ host = "ldap://%s" % host
+ TestProgram(module=__name__)
+else:
+ host = "ldap://" + os.getenv("SERVER")
diff --git a/python/samba/tests/compression.py b/python/samba/tests/compression.py
new file mode 100644
index 0000000..0b42100
--- /dev/null
+++ b/python/samba/tests/compression.py
@@ -0,0 +1,210 @@
+# Unix SMB/CIFS implementation.
+# Copyright © Catalyst
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+
+from samba.tests import TestCase
+from samba import compression
+
+
+TEST_DIR = "testdata/compression"
+
+
+class BaseCompressionTest(TestCase):
+ def round_trip(self, data, size_delta=0):
+ """Compress, decompress, assert equality with original.
+
+ If size_delta is None, no size is given to decompress. This
+ should fail with the Huffman variant and succeed with plain.
+ Otherwise size_delta is added to the gven size; if negative,
+ we'd expect a failure, with plain compression a positive delta
+ will succeed.
+ """
+
+ compressed = self.compress(data)
+ if size_delta is None:
+ decompressed = self.decompress(compressed)
+ else:
+ decomp_size = len(data) + size_delta
+ decompressed = self.decompress(compressed, decomp_size)
+
+ if isinstance(data, str):
+ data = data.encode()
+
+ self.assertEqual(data, decompressed)
+ return compressed
+
+ def decompress_file(self, fn):
+ decomp_fn = os.path.join(TEST_DIR,
+ "decompressed",
+ fn + ".decomp")
+ comp_fn = os.path.join(TEST_DIR,
+ self.compressed_dir,
+ fn + self.compressed_suffix)
+
+ with open(decomp_fn, 'rb') as f:
+ decomp_expected = f.read()
+ with open(comp_fn, 'rb') as f:
+ comp = f.read()
+
+ decompressed = self.decompress(comp, len(decomp_expected))
+
+ self.assertEqual(decomp_expected, decompressed)
+
+
+class LzxpressPlainCompressionTest(BaseCompressionTest):
+ compress = compression.plain_compress
+ decompress = compression.plain_decompress
+ compressed_dir = "compressed-plain"
+ compressed_suffix = ".lzplain"
+
+ def test_round_trip_aaa_str(self):
+ s = 'a' * 150000
+ self.round_trip(s)
+
+ def test_round_trip_aaa_bytes(self):
+ s = b'a' * 150000
+ self.round_trip(s)
+
+ def test_round_trip_aaa_short(self):
+ s = b'a' * 150000
+
+ # this'll fail because the match for 'aaa...' will run
+ # past the end of the buffer
+ self.assertRaises(compression.CompressionError,
+ self.round_trip, s, -1)
+
+ def test_round_trip_aaa_long(self):
+ s = b'a' * 150000
+ # this *won't* fail because although the data will run out
+ # before the buffer is full, LZXpress plain does not care
+ # about that.
+ try:
+ self.round_trip(s, 1)
+ except compression.CompressionError as e:
+ self.fail(f"failed to decompress with {e}")
+
+ def test_round_trip_aaab_short(self):
+ s = b'a' * 150000 + b'b'
+
+ # this will *partially* succeed, because the buffer will fill
+ # up vat a break in the decompression (not mid-match), and
+ # lzxpress plain does not mind that. However self.round_trip
+ # also makes an assertion that the original data equals the
+ # decompressed result, and it won't because the decompressed
+ # result is one byte shorter.
+ self.assertRaises(AssertionError,
+ self.round_trip, s, -1)
+
+ def test_round_trip_aaab_unstated(self):
+ s = b'a' * 150000 + b'b'
+
+ # this will succeed, because with no target size given, we
+ # guess a large buffer in the python bindings.
+ try:
+ self.round_trip(s)
+ except compression.CompressionError as e:
+ self.fail(f"failed to decompress with {e}")
+
+ def test_round_trip_30mb(self):
+ s = b'abc' * 10000000
+ # This will decompress into a string bigger than the python
+ # bindings are willing to speculatively allocate, so will fail
+ # to decompress.
+ with self.assertRaises(compression.CompressionError):
+ self.round_trip(s, None)
+
+ # but it will be fine if we use the length
+ try:
+ self.round_trip(s, 0)
+ except compression.CompressionError as e:
+ self.fail(f"failed to decompress with {e}")
+
+ def test_files(self):
+ # We don't go through the whole set, which are already tested
+ # by lib/compression/tests/test_lzxpress_plain.c
+ for fn in ("slow-33d90a24e70515b14cd0",
+ "midsummer-nights-dream.txt"):
+ self.decompress_file(fn)
+
+ def test_empty_round_trip(self):
+ # not symmetrical with Huffman, this doesn't fail
+ self.round_trip('')
+
+
+class LzxpressHuffmanCompressionTest(BaseCompressionTest):
+ compress = compression.huffman_compress
+ decompress = compression.huffman_decompress
+ compressed_dir = "compressed-huffman"
+ compressed_suffix = ".lzhuff"
+
+ def test_round_trip_aaa_str(self):
+ s = 'a' * 150000
+ self.round_trip(s)
+
+ def test_round_trip_aaa_bytes(self):
+ s = b'a' * 150000
+ self.round_trip(s)
+
+ def test_round_trip_aaa_short(self):
+ s = b'a' * 150000
+
+ # this'll fail because the match for 'aaa...' will run
+ # past the end of the buffer
+ self.assertRaises(compression.CompressionError,
+ self.round_trip, s, -1)
+
+ def test_round_trip_aaa_long(self):
+ s = b'a' * 150000
+
+ # this'll fail because the data will run out before the buffer
+ # is full.
+ self.assertRaises(compression.CompressionError,
+ self.round_trip, s, 1)
+
+ def test_round_trip_aaab_short(self):
+ s = b'a' * 150000 + b'b'
+
+ # this *could* be allowed to succeed, because even though we
+ # give it the wrong size, we know the decompression will not
+ # flow over the end of the buffer, The behaviour here appears
+ # to be implementation dependent -- the decompressor has the
+ # option of saying 'whatever' and continuing. We are probably
+ # stricter than Windows.
+ self.assertRaises(compression.CompressionError,
+ self.round_trip, s, -1)
+
+ def test_round_trip_aaab_unstated(self):
+ s = b'a' * 150000 + b'b'
+
+ # For the Huffman algorithm, the length is really an essential
+ # part of the compression data, and the bindings will reject a
+ # call with out it. This happens at the argument parsing stage,
+ # so is a TypeError (i.e. wrong type of function), not a
+ # CompressionError.
+ self.assertRaises(TypeError,
+ self.round_trip, s, None)
+
+ def test_files(self):
+ # We don't go through the whole set, which are already tested
+ # by lib/compression/tests/test_lzx_huffman.c
+ for fn in ("slow-33d90a24e70515b14cd0",
+ "midsummer-nights-dream.txt"):
+ self.decompress_file(fn)
+
+ def test_empty_round_trip(self):
+ with self.assertRaises(compression.CompressionError):
+ self.round_trip('')
diff --git a/python/samba/tests/conditional_ace_assembler.py b/python/samba/tests/conditional_ace_assembler.py
new file mode 100644
index 0000000..486601b
--- /dev/null
+++ b/python/samba/tests/conditional_ace_assembler.py
@@ -0,0 +1,227 @@
+# Unix SMB/CIFS implementation.
+# Copyright © Catalyst IT 2023
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+"""Fine-grained control over conditional ACE contents.
+
+This deliberately allows you to do broken things that SDDL doesn't.
+
+- token sequences that make no real sense
+- sequences that make sense which SDDL can't encode
+- strings that aren't proper utf-16
+- etc.
+"""
+
+import struct
+from samba.dcerpc import security, conditional_ace as ca
+from samba.ndr import ndr_pack
+
+
+class Composite:
+ token = ca.CONDITIONAL_ACE_TOKEN_COMPOSITE
+
+ def __init__(self, *tokens):
+ self.members = []
+ for t in tokens:
+ self.members.append(dwim_one_token(t))
+
+ def __bytes__(self):
+ v = []
+ for x in self.members:
+ v.extend(bytes(x))
+
+ return (bytes([self.token]) +
+ struct.pack("<I", len(v)) +
+ bytes(v))
+
+
+class Int:
+ def __init__(self, value,
+ bits=ca.CONDITIONAL_ACE_TOKEN_INT64,
+ base=ca.CONDITIONAL_ACE_INT_BASE_10,
+ sign=ca.CONDITIONAL_ACE_INT_SIGN_NONE):
+ self.value = value
+ self.bits = int(bits)
+ self.base = int(base)
+ self.sign = int(sign)
+
+ def __bytes__(self):
+ n = struct.pack('<q', self.value)
+ return bytes([self.bits]) + n + bytes([self.sign, self.base])
+
+
+class String:
+ """A string is decoded as UTF-16.
+ Other iterables allows the insertion of arbitrary raw bytes."""
+ token = ca.CONDITIONAL_ACE_TOKEN_UNICODE
+
+ def __init__(self, value):
+ if isinstance(value, str):
+ value = value.encode('utf-16-le')
+ self.value = list(value)
+
+ def __bytes__(self):
+ header = struct.pack('<BI', self.token, len(self.value))
+ return header + bytes(self.value)
+
+
+class LocalAttr(String):
+ token = ca.CONDITIONAL_ACE_LOCAL_ATTRIBUTE
+
+
+class UserAttr(String):
+ token = ca.CONDITIONAL_ACE_USER_ATTRIBUTE
+
+
+class DeviceAttr(String):
+ token = ca.CONDITIONAL_ACE_DEVICE_ATTRIBUTE
+
+
+class ResourceAttr(String):
+ token = ca.CONDITIONAL_ACE_RESOURCE_ATTRIBUTE
+
+
+class ByteString:
+ """takes an iterable of 8-bit numbers, or a string."""
+ token = ca.CONDITIONAL_ACE_TOKEN_OCTET_STRING
+
+ def __init__(self, value):
+ if isinstance(value, str):
+ value = value.encode()
+ self.value = bytes(value)
+ if max(self.value) > 255 or min(self.value) < 0:
+ raise ValueError("bytes do need to be bytes (0-255)")
+
+ def __bytes__(self):
+ header = struct.pack('<BI', self.token, len(self.value))
+ return header + self.value
+
+
+class SID:
+ token = ca.CONDITIONAL_ACE_TOKEN_SID
+
+ def __init__(self, sidstring):
+ self.sid = security.domsid(sidstring)
+
+ def __bytes__(self):
+ value = ndr_pack(self.sid)
+ header = struct.pack('B<I', self.token, len(value))
+ return header + value
+
+
+class Token:
+ """To add a raw byte, like
+ Token(ca.CONDITIONAL_ACE_TOKEN_COMPOSITE)
+ """
+ def __init__(self, v):
+ self.token = v
+
+ def __bytes__(self):
+ return bytes([self.token])
+
+
+def _add_tokens():
+ for tok in dir(ca):
+ if not tok[:22] == 'CONDITIONAL_ACE_TOKEN_':
+ continue
+ k = tok[22:]
+ globals()[k] = Token(getattr(ca, tok))
+
+_add_tokens()
+
+
+def dwim_one_token(t):
+ if isinstance(t, int):
+ return Int(t)
+ if isinstance(t, str):
+ return String(t)
+ if isinstance(t, tuple):
+ return Composite(*t)
+ if isinstance(t, bytes):
+ return ByteString(t)
+
+ return t
+
+
+def assemble(*tokens):
+ program = b'artx'
+ if len(tokens) == 1 and isinstance(tokens, (list, tuple, set)):
+ print("WARNING: single argument container will become a composite. "
+ "you might have meant 'assemble(*args)', not 'assemble(args)'")
+
+ for t in tokens:
+ t = dwim_one_token(t)
+ program += bytes(t)
+
+ program += b'\x00\x00\x00'
+ program = program[:-(len(program) & 3)]
+
+ return program
+
+
+def assemble_ace(tokens=[],
+ type=security.SEC_ACE_TYPE_ACCESS_ALLOWED_CALLBACK,
+ trustee=None,
+ flags=None,
+ object=None,
+ access_mask=None):
+ type_strings = {
+ 'XA': security.SEC_ACE_TYPE_ACCESS_ALLOWED_CALLBACK,
+ 'XD': security.SEC_ACE_TYPE_ACCESS_DENIED_CALLBACK,
+ 'ZA': security.SEC_ACE_TYPE_ACCESS_ALLOWED_CALLBACK_OBJECT,
+ # this can also make plain ACEs
+ 'A': security.SEC_ACE_TYPE_ACCESS_ALLOWED,
+ 'D': security.SEC_ACE_TYPE_ACCESS_DENIED,
+ }
+
+ a = security.ace()
+ a.type = type_strings.get(type, type)
+ if trustee is not None:
+ a.trustee = trustee
+ if flags is not None:
+ a.flags = flags
+ if object is not None:
+ a.object = object
+ if tokens:
+ a.coda = assemble(*tokens)
+ return a
+
+
+def assemble_sd(base_sddl='D:',
+ add_trailing_allow_ace=False,
+ domain_sid=None,
+ **ace_args):
+ """Make a security descriptor using the base_sddl, then add the
+ assembled conditional ACE on the end of its DACL. If
+ add_trailing_allow_ace is true, an allow ace matching
+ '(A;;0x1ff;;;WD)' is added to the end, allowing successful deny
+ ACEs to be detected.
+ """
+ if domain_sid is None:
+ domain_sid = security.dom_sid('S-1-2-3-4')
+
+ sd = security.descriptor.from_sddl(base_sddl, domain_sid)
+ ace = assemble_ace(**ace_args)
+ sd.dacl_add(ace)
+ if add_trailing_allow_ace:
+ # If the compiled ACE is a deny ACE, we won't know if it
+ # worked unless there is a wide ranging allow ACE following
+ # it.
+ allow_ace = assemble_ace(type='A',
+ trustee=security.dom_sid(security.SID_WORLD),
+ access_mask=security.SEC_FILE_ALL)
+ sd.dacl_add(allow_ace)
+
+ return sd
diff --git a/python/samba/tests/conditional_ace_bytes.py b/python/samba/tests/conditional_ace_bytes.py
new file mode 100644
index 0000000..f7e7cfe
--- /dev/null
+++ b/python/samba/tests/conditional_ace_bytes.py
@@ -0,0 +1,95 @@
+# Unix SMB/CIFS implementation.
+# Copyright © Catalyst IT 2023
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+"""Fine-grained control over conditional ACE contents.
+
+This deliberately allows you to do broken things that SDDL doesn't.
+
+- token sequences that make no real sense
+- sequences that make sense which SDDL can't encode
+- strings that aren't proper utf-16
+- etc.
+"""
+
+from samba.tests import DynamicTestCase, TestCase
+from samba.tests import conditional_ace_assembler as caa
+from samba.tests.token_factory import token as Token
+from samba.dcerpc import security
+from samba.ndr import ndr_pack
+from samba import NTSTATUSError
+from samba.ntstatus import NT_STATUS_ACCESS_DENIED
+from samba.colour import colourdiff
+
+
+class ConditionalAceBytesBase(TestCase):
+ maxDiff = 0
+ @classmethod
+ def setUpClass(cls):
+ cls.domain_sid = security.dom_sid("S-1-2-3")
+ cls.token = Token(sids=['WD', 'AA'],
+ device_claims={"colour": ["orange", "blue"]})
+
+ @classmethod
+ def setUpDynamicTestCases(cls):
+ for i, row in enumerate(cls.data):
+ assembly, sddl, access_desired, name = row
+ if name is None:
+ name = sddl
+ name = f'{i+1:03}-{name}'
+ if len(name) > 150:
+ name = f"{name[:125]}+{len(name) - 125}-more-characters"
+
+ cls.generate_dynamic_test('test_assembly',
+ name, assembly, sddl, access_desired)
+
+ def _test_assembly_with_args(self, assembly, sddl_ref, access_desired):
+ sd_bytes = caa.assemble(*assembly)
+ if sddl_ref is None:
+ raise ValueError("for this test we need reference SDDL")
+
+ sddl_ref_full = f'D:(XA;;;;;WD;{sddl_ref})'
+ sd_ref = security.descriptor.from_sddl(sddl_ref_full, self.domain_sid)
+ sd_ref_bytes = ndr_pack(sd_ref)
+ header, artx, conditions = sd_ref_bytes.partition(b'artx')
+ ref_bytes = artx + conditions
+ print(colourdiff(sd_bytes, ref_bytes))
+
+ self.assertEqual(sd_bytes, ref_bytes)
+
+ if access_desired is not None:
+ try:
+ granted = security.access_check(sd, self.token, access_desired)
+ except NTSTATUSError as e:
+ if e.args[0] != NT_STATUS_ACCESS_DENIED:
+ raise
+ if self.allow:
+ self.fail(f"{assembly}: access was denied")
+ self.assertEqual(granted, access_desired)
+
+ else:
+ if not self.allow:
+ self.fail(f"{assembly}: unexpected access")
+
+@DynamicTestCase
+class ConditionalAceAssemblySDDL(ConditionalAceBytesBase):
+ allow = True
+ data = [
+ ((caa.LocalAttr("x"), 41, caa.EQUAL,
+ caa.LocalAttr("x"), caa.DeviceAttr("x"), caa.GREATER_THAN,
+ caa.AND),
+ "((x == 41) && (x > @device.x))",
+ None, None),
+ ]
diff --git a/python/samba/tests/conditional_ace_claims.py b/python/samba/tests/conditional_ace_claims.py
new file mode 100644
index 0000000..881f875
--- /dev/null
+++ b/python/samba/tests/conditional_ace_claims.py
@@ -0,0 +1,901 @@
+# Unix SMB/CIFS implementation.
+# Copyright © Catalyst IT 2023
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for Conditional ACEs, claims, and security tokens."""
+
+import random
+from samba.dcerpc import security
+from samba.security import access_check
+from samba.tests.token_factory import token as Token
+from samba.tests.token_factory import list_to_claim
+from samba.dcerpc.security import CLAIM_SECURITY_ATTRIBUTE_RELATIVE_V1
+from samba.tests import TestCase, DynamicTestCase, get_env_dir
+from samba.colour import c_RED
+import os
+from samba import NTSTATUSError
+from samba.ntstatus import NT_STATUS_ACCESS_DENIED
+
+DEFAULT_ACCESS = security.SEC_FILE_ALL
+DEFAULT_ACCESS2 = (security.SEC_STD_READ_CONTROL |
+ security.SEC_ADS_LIST |
+ security.SEC_ADS_READ_PROP)
+
+
+def write_c_test_on_failure(f):
+ """This is a function decorator that writes a function for
+ /libcli/security/tests/test_run_conditional_ace.c that runs the
+ equivalent test. Why?! Because iterating over a test to debug the
+ failure is slower in Python tests, but adding new tests is faster
+ in Python. So the flow goes like this:
+
+ 1. add python tests, run them
+ 2. if nothing fails, goto 1
+ 3. copy the test_something() text into test_run_conditional_ace.c,
+ rename it, and add it to main().
+ 4. `make bin/test_run_conditional_ace && rr bin/test_run_conditional_ace`
+ 5. `rr replay`
+
+ and you're away. You can also just work from the Python, but a few
+ runs of `make -j` after touching something in libcli/security will
+ make you see why this exists.
+
+ You might be thinking that this surely took longer to write than
+ waiting 100 times for a 30 second compile, but that misses the
+ point that debugging needs to be ergonomic and fun.
+ """
+ from json import dumps as q # JSON quoting is C quoting, more or less
+
+ def wrapper(name, token, sddl, access_desired):
+ try:
+ f(name, token, sddl, access_desired)
+ except Exception:
+ print()
+ print('static void test_something(void **state)')
+ print('{')
+ print('\tINIT();')
+ for s in ('sids', 'device_sids'):
+ if s in token:
+ macro = ('user_sids' if s == 'sids' else s).upper()
+ v = ', '.join(q(x) for x in token[s])
+ print(f'\t{macro}({v});')
+ for s in ('user_claims', 'device_claims'):
+ if s in token:
+ macro = s.upper()
+ for name, values in token[s].items():
+ if isinstance(values,
+ CLAIM_SECURITY_ATTRIBUTE_RELATIVE_V1):
+ v = '...'
+ else:
+ if not isinstance(values, (list, tuple)):
+ values = [values]
+ v = ', '.join(q(x) for x in values)
+ v = q(f"{v}")
+ print(f'\t{macro}({q(name)}, {v});')
+ print(f'\tSD({q(sddl)});')
+ if 'allow' in f.__name__:
+ print(f'\tALLOW_CHECK({access_desired:#x});')
+ else:
+ print(f'\tDENY_CHECK({access_desired:#x});')
+ print('}')
+ print()
+ raise
+ return wrapper
+
+
+class ConditionalAceClaimsBase(TestCase):
+ maxDiff = 0
+
+ @classmethod
+ def setUpDynamicTestCases(cls):
+ cls.domain_sid = security.dom_sid("S-1-22-333-4444")
+ seen = set()
+
+ for i, row in enumerate(cls.data):
+ token, sddl, access_desired = row
+ name = f'{i+1:03}-{token}-{sddl}-{access_desired}'
+ if len(name) > 150:
+ name = f"{name[:125]}+{len(name) - 125}-more-characters"
+
+ if name in seen:
+ print(f"seen {row} after {len(seen)}")
+ seen.add(name)
+
+ if cls.allow:
+ cls.generate_dynamic_test('test_allow',
+ name, token, sddl, access_desired)
+ else:
+ cls.generate_dynamic_test('test_deny',
+ name, token, sddl, access_desired)
+
+ fuzz_seed_dir = get_env_dir('SAMBA_WRITE_FUZZ_STRINGS_DIR')
+ if fuzz_seed_dir is not None:
+ cls._write_sddl_strings_for_fuzz_seeds(fuzz_seed_dir)
+
+ @classmethod
+ def _write_sddl_strings_for_fuzz_seeds(cls, fuzz_seed_dir):
+ """write all the SDDL strings we have into a directory as individual
+ files, using a naming convention beloved of fuzzing engines.
+
+ To run this set an environment variable; see
+ cls.setUpDynamicTestCases(), below.
+
+ Note this will only run in subclasses annotated with @DynamicTestCase.
+ """
+ from hashlib import md5
+ for _, sddl, _ in cls.data:
+ name = md5(sddl.encode()).hexdigest()
+ with open(os.path.join(fuzz_seed_dir, name), 'w') as f:
+ f.write(sddl)
+
+ @write_c_test_on_failure
+ def _test_allow_with_args(self, _token, sddl, access_desired):
+ if isinstance(_token, dict):
+ token = Token(**_token)
+ else:
+ token = _token
+ sd = security.descriptor.from_sddl(sddl, self.domain_sid)
+ try:
+ granted = access_check(sd, token, access_desired)
+ except NTSTATUSError as e:
+ print(c_RED(sddl))
+ print(c_RED(_token))
+ if e.args[0] != NT_STATUS_ACCESS_DENIED:
+ raise
+ self.fail("access was denied")
+
+ self.assertEqual(granted, access_desired)
+
+ @write_c_test_on_failure
+ def _test_deny_with_args(self, token, sddl, access_desired):
+ if isinstance(token, dict):
+ token = Token(**token)
+ sd = security.descriptor.from_sddl(sddl, self.domain_sid)
+ try:
+ granted = access_check(sd, token, access_desired)
+ except NTSTATUSError as e:
+ if e.args[0] == NT_STATUS_ACCESS_DENIED:
+ return
+ self.fail(f"failed with {e}, not access denied")
+
+ self.fail("access allowed")
+
+
+@DynamicTestCase
+class AllowTests(ConditionalAceClaimsBase):
+ name = "allow"
+ allow = True
+ data = [
+ ( # device_claims
+ {'sids': ['WD', 'AA'],
+ 'device_claims': {"colour":["orange", "blue"]}},
+ ('D:(XA;;0x1f;;;AA;'
+ '(@Device.colour == {"orange", "blue"}))'),
+ 0x10),
+ ( # device_claims, int >=
+ {'sids': ['WD', 'AA'],
+ 'device_claims': {"legs": 4}},
+ ('D:(XA;;0x1f;;;AA;(@Device.legs >= 1))'),
+ 0x10),
+ ( # device_claims, int
+ {'sids': ['WD', 'AA'],
+ 'device_claims': {"legs": 1}},
+ ('D:(XA;;0x1f;;;AA;(@Device.legs == 1))'),
+ 0x10),
+ ( # device_member_of && member_of
+ {'sids': ['WD', 'AA'],
+ 'device_sids': ['BA', 'BG']},
+ ("D:(XA;;0x1f;;;AA;"
+ "(Device_Member_of{SID(BA)} && Member_of{SID(WD)}))"),
+ 0x10),
+ ( # device_member_of || member_of, both true
+ {'sids': ['WD', 'AA'],
+ 'device_sids': ['BA', 'BG']},
+ ("D:(XA;;0x1f;;;AA;"
+ "(Device_Member_of{SID(AA)} || Member_of{SID(WD)}))"),
+ 0x10),
+ ( # device_member_of || member_of, second true
+ {'sids': ['WD', 'AA'],
+ 'device_sids': ['BA', 'BG']},
+ ("D:(XA;;0x1f;;;AA;"
+ "(Device_Member_of{SID(AA)} || Member_of{SID(WD)}))"),
+ 0x10),
+ ( # device_member_of || member_of, first true
+ {'sids': ['WD', 'AA'],
+ 'device_sids': ['BA', 'BG']},
+ ("D:(XA;;0x1f;;;AA;"
+ "(Device_Member_of{SID(BG)} || Member_of{SID(WR)}))"),
+ 0x10),
+ ( # single SID, Member_of_Any
+ {'sids': ['S-1-222-333']},
+ ("D:(XA;;0x1ff;;;S-1-222-333;(Member_of_Any{SID(S-1-222-333)}))"),
+ 0x1),
+ ({'sids': ['S-1-1-0']}, "O:S-1-1-0D:(A;;0x1ff;;;WD)", DEFAULT_ACCESS),
+ ({'sids': ['S-1-1-0']},
+ "O:S-1-1-0D:(XA;;0x1ff;;;WD;(Member_of{SID(S-1-1-0)}))",
+ DEFAULT_ACCESS),
+ ({'sids': ['S-1-1-0', 'S-1-222-333']},
+ "O:S-1-1-0D:(XA;;0x1ff;;;WD;(Member_of{SID(S-1-1-0)}))",
+ DEFAULT_ACCESS),
+ ({'sids': ['WD', 'S-1-222-333']},
+ "O:S-1-1-0D:(XA;;0x1ff;;;WD;(Member_of{SID(S-1-1-0)}))",
+ DEFAULT_ACCESS),
+ ( # a single SID, not a composite
+ {'sids': ['S-1-1-0', 'S-1-222-333']},
+ "O:S-1-1-0D:(XA;;0x1ff;;;WD;(Member_of SID(S-1-1-0)))",
+ DEFAULT_ACCESS),
+ ( # a single SID, not a composite, without space after Member_of
+ {'sids': ['S-1-1-0', 'S-1-222-333']},
+ "O:S-1-1-0D:(XA;;0x1ff;;;WD;(Member_of\nSID(S-1-1-0)))",
+ DEFAULT_ACCESS),
+ ( # a single SID, not a composite, Member_of_Any
+ {'sids': ['S-1-1-0', 'S-1-222-333']},
+ "O:S-1-1-0D:(XA;;0x1ff;;;WD;(Member_of_Any SID(S-1-1-0)))",
+ DEFAULT_ACCESS),
+ ( # Member_of_Any
+ {'sids': ['S-1-1-0', 'S-1-222-333']},
+ "O:S-1-1-0D:(XA;;0x1;;;WD;(Member_of_Any{SID(AS),SID(WD)}))",
+ 0x1),
+ ({'sids': ['S-1-1-0', 'S-1-222-333']},
+ ("O:S-1-1-0D:"
+ "(XA;;0x1ff;;;WD;(Member_of_Any{SID(S-1-1-0), SID(S-1-222-333)}))"),
+ DEFAULT_ACCESS),
+ ({'sids': ['S-1-1-0', 'S-1-222-333']},
+ ("O:S-1-1-0D:"
+ "(XA;;0x1ff;;;WD;(Member_of_Any{SID(S-1-1-334), SID(S-1-222-333)}))"),
+ DEFAULT_ACCESS),
+ ({'sids': ['S-1-1-0', 'S-1-222-333']},
+ ("D:(XA;;0x1ff;;;WD;(Member_of_Any{SID(S-1-222-333)}))"),
+ DEFAULT_ACCESS),
+ ({'sids': ['S-1-77-88-99', 'AA']},
+ "D:(XA;;0x1f;;;AA;(Member_of{SID(S-1-77-88-99)}))",
+ 0x10),
+ ( # device_member_of
+ {'sids': ['WD', 'AA'],
+ 'device_sids': ['BA', 'BG']},
+ "D:(XA;;0x1f;;;AA;(Device_Member_of{SID(BA)}))",
+ 0x10),
+ ( # device_member_of
+ {'sids': ['WD', 'AA'],
+ 'device_sids': ['BA', 'BG']},
+ "D:(XA;;0x1f;;;AA;(Device_Member_of{SID(BA)}))",
+ 0x10),
+ ( # not (!) member_of
+ {'sids': ['WD', 'AA'],
+ 'device_sids': ['BA', 'BG']},
+ "D:(XA;;0x1f;;;AA;(! (Member_of{SID(BA)})))",
+ 0x10),
+ ( # not not (!!) member_of
+ {'sids': ['WD', 'AA'],
+ 'device_sids': ['BA', 'BG']},
+ "D:(XA;;0x1f;;;AA;(!(! (Member_of{SID(AA)}))))",
+ 0x10),
+ ( # not * 8 (!!!! !!!!) member_of
+ {'sids': ['WD', 'AA'],
+ 'device_sids': ['BA', 'BG']},
+ "D:(XA;;0x1f;;;AA;(!(!(!(!(!(!(!(!( Member_of{SID(AA)}))))))))))",
+ 0x10),
+ ( # not * 9 (!!! !!! !!!) member_of
+ {'sids': ['WD', 'AA'],
+ 'device_sids': ['BA', 'BG']},
+ "D:(XA;;0x1f;;;AA;(!(!(!( !(!(!( !(!(!(Member_of{SID(BA)})))))))))))",
+ 0x10),
+ ( # not * 9 (!!! !!! !!!) Not_Member_of
+ {'sids': ['WD', 'AA'],
+ 'device_sids': ['BA', 'BG']},
+ ("D:(XA;;0x1f;;;AA;"
+ "(!(!(!( !(!(!( !(!(!( Not_Member_of{SID(AA)})))))))))))"),
+ 0x10),
+ ( #resource ACE
+ {'sids': ['WD', 'AA'],
+ 'device_claims': {"colour": ["blue"]}},
+ ('D:(XA;;0x1f;;;AA;(@Device.colour Contains @Resource.colour))'
+ 'S:(RA;;;;;WD;("colour",TS,0,"blue"))'),
+ 0x10),
+ ( #resource ACE ==
+ {'sids': ['WD', 'AA'],
+ 'device_claims': {"colour": ["blue"]}},
+ ('D:(XA;;0x1f;;;AA;(@Device.colour == @Resource.colour))'
+ 'S:(RA;;;;;WD;("colour",TS,0,"blue"))'),
+ 0x10),
+ ( # device_claims, comparing single to single
+ {'sids': ['WD', 'AA'],
+ 'device_claims': {"colour": "blue"}},
+ ('D:(XA;;0x1f;;;AA;(@Device.colour == "blue"))'),
+ 0x10),
+ ( # device_claims == user_claims
+ {'sids': ['WD', 'AA'],
+ 'user_claims': {"colour": "blue"},
+ 'device_claims': {"colour": "blue"}},
+ ('D:(XA;;0x1f;;;AA;(@User.colour == @Device.colour))'),
+ 0x10),
+ ( #resource ACE multi
+ {'sids': ['WD', 'AA'],
+ 'device_claims': {"colour": ["blue", "red"]}},
+ ('D:(XA;;0x1f;;;AA;(@Device.colour Contains @Resource.colour))'
+ 'S:(RA;;;;;WD;("colour",TS,0,"blue", "red"))'),
+ 0x10),
+ ]
+
+
+@DynamicTestCase
+class DenyTests(ConditionalAceClaimsBase):
+ name = "allow"
+ allow = False
+ data = [
+ ({}, "", DEFAULT_ACCESS),
+ ({'sids': ['S-1-1-0']}, "O:S-1-1-0D:(A;;0x1fe;;;WD)", DEFAULT_ACCESS),
+ ({}, "O:WDD:(A;;GACR;;;CO)", DEFAULT_ACCESS),
+ ({'sids': ['S-1-1-0', 'S-1-222-444']},
+ ("D:(XA;;0x1ff;;;WD;(Member_of_Any{SID(S-1-222-333)}))"),
+ 0x1),
+ ( # Without explicit 'everyone' SID in list of SIDs, this is
+ # denied because the ACE SID 'WD' doesn't match.
+ {'sids': ['S-1-222-333']},
+ ("D:(XA;;0x1ff;;;WD;(Member_of_Any{SID(S-1-222-333)}))"),
+ 0x1),
+ ( # device_member_of && member_of, both false
+ {'sids': ['WD', 'AA'],
+ 'device_sids': ['BA', 'BG']},
+ ("D:(XA;;0x1f;;;AA;"
+ "(Device_Member_of{SID(AA)} && Member_of{SID(WR)}))"),
+ 0x10),
+ ( # device_member_of && member_of, first false
+ {'sids': ['WD', 'AA'],
+ 'device_sids': ['BA', 'BG']},
+ ("D:(XA;;0x1f;;;AA;"
+ "(Device_Member_of{SID(AA)} && Member_of{SID(WD)}))"),
+ 0x10),
+ ( # device_member_of && member_of, second false
+ {'sids': ['WD', 'AA'],
+ 'device_sids': ['BA', 'BG']},
+ ("D:(XA;;0x1f;;;AA;"
+ "(Device_Member_of{SID(BA)} && Member_of{SID(BA)}))"),
+ 0x10),
+ ( # device_member_of || member_of, both false
+ {'sids': ['WD', 'AA'],
+ 'device_sids': ['BA', 'BG']},
+ ("D:(XA;;0x1f;;;AA;"
+ "(Device_Member_of{SID(AA)} || Member_of{SID(WR)}))"),
+ 0x10),
+ ( # device_claims, comparing composite to single
+ {'sids': ['WD', 'AA'],
+ 'device_claims': {"colour": ["orange", "blue"]}},
+ ('D:(XA;;0x1f;;;AA;(@Device.colour == "blue"))'),
+ 0x10),
+ ( # not (!) member_of
+ {'sids': ['WD', 'AA'],
+ 'device_sids': ['BA', 'BG']},
+ "D:(XA;;0x1f;;;AA;(! (Member_of{SID(AA)})))",
+ 0x10),
+ ( # not not (!!) member_of
+ {'sids': ['WD', 'AA'],
+ 'device_sids': ['BA', 'BG']},
+ "D:(XA;;0x1f;;;AA;(!(!( Member_of{SID(BA)}))))",
+ 0x10),
+ ( # not * 8 (!!!! !!!!) member_of
+ {'sids': ['WD', 'AA'],
+ 'device_sids': ['BA', 'BG']},
+ "D:(XA;;0x1f;;;AA;(!(!( !(!( !(!( !(!(Member_of{SID(BA)}))))))))))",
+ 0x10),
+ ( # not * 3 (!!!) member_of
+ {'sids': ['WD', 'AA'],
+ 'device_sids': ['BA', 'BG']},
+ "D:(XA;;0x1f;;;AA;(!(!(!(Member_of{SID(AA)})))))",
+ 0x10),
+ ( # not * 3 (!!!) Not_Member_of
+ {'sids': ['WD', 'AA'],
+ 'device_sids': ['BA', 'BG']},
+ "D:(XA;;0x1f;;;AA;(!(!(!(Not_Member_of{SID(BA)})))))",
+ 0x10),
+ ]
+
+
+def _int_range(n, n_dupes=0, random_seed=None):
+ """Makes a list of stringified integers.
+
+ If n_unique is specified and less than n, there will be that many unique
+ values (and hence some duplicates). If random_seed is set, the list will be
+ shuffled.
+ """
+ claims = [str(x) for x in range(n)]
+
+ if random_seed is None:
+ if n_dupes:
+ claims *= 1 + (n + n_dupes) // n
+ return claims[:n + n_dupes]
+
+ random.seed(random_seed)
+ for i in range(n_dupes):
+ # this purposefully skews the distribution.
+ claims.append(random.choice(claims))
+
+ random.shuffle(claims)
+ return claims
+
+
+def _str_range(n, n_dupes=0, random_seed=None, mix_case=False):
+ """Create a list of strings with somewhat controllable disorder.
+ """
+ ints = _int_range(n, n_dupes, random_seed)
+ claims = [f'a{i}' for i in ints]
+
+ if mix_case:
+ if random_seed is None:
+ random.seed(0)
+ for i in range(len(claims)):
+ if random.random() < 0.5:
+ claims[i] = claims[i].upper()
+
+ return claims
+
+
+def claim_str_range(*args, name="foo", case_sensitive=False, **kwargs):
+ """String value range as a CLAIM_SECURITY_ATTRIBUTE_RELATIVE_V1"""
+ vals = _str_range(*args, **kwargs)
+ claim = list_to_claim(name, vals, case_sensitive=case_sensitive)
+ return claim
+
+
+def claim_int_range(*args, name="foo", case_sensitive=False, **kwargs):
+ """Int value range as a CLAIM_SECURITY_ATTRIBUTE_RELATIVE_V1"""
+ vals = _int_range(*args, **kwargs)
+ claim = list_to_claim(name, vals, case_sensitive=case_sensitive)
+ return claim
+
+
+def ra_str_range(*args, name="foo", case_sensitive=False, **kwargs):
+ """Make a string claim as a resource attribute"""
+ claim = _str_range(*args, **kwargs)
+ values = '","'.join(claim)
+ c = (2 if case_sensitive else 0)
+ return f'(RA;;;;;WD;("{name}",TS,{c},"{values}"))'
+
+
+def ra_int_range(*args, name="foo", unsigned=False, **kwargs):
+ """Return an integer claim range as a resource attribute."""
+ ints = _int_range(*args, **kwargs)
+ values = ','.join(str(x) for x in ints)
+ return f'(RA;;;;;WD;("{name}",T{"U" if unsigned else "I"},0,{values}))'
+
+
+def composite_int(*args, **kwargs):
+ """Integer conditional ACE composite"""
+ claim = _int_range(*args, **kwargs)
+ values = ', '.join(claim)
+ return '{' + values + '}'
+
+
+def composite_str(*args, **kwargs):
+ """String conditional ACE composite"""
+ claim = _str_range(*args, **kwargs)
+ values = '", "'.join(claim)
+ return '{"' + values + '"}'
+
+
+@DynamicTestCase
+class ConditionalAceLargeComposites(ConditionalAceClaimsBase):
+ """Here we are dynamically generating claims and composites with large numbers
+ of members, and using them in comparisons. Sometimes the comparisons are
+ meant to fail, and sometimes not.
+ """
+ maxDiff = 0
+
+ @classmethod
+ def setUpDynamicTestCases(cls):
+ cls.domain_sid = security.dom_sid("S-1-22-333-4444")
+ for i, row in enumerate(cls.data):
+ name, allow, token, sddl = row
+ name = f'{i+1:03}-{name}'
+ if 'sids' not in token:
+ token['sids'] = ['AU', 'WD']
+ if allow:
+ cls.generate_dynamic_test('test_allow',
+ name, token, sddl, 0x10)
+ else:
+ cls.generate_dynamic_test('test_deny',
+ name, token, sddl, 0x10)
+
+ fuzz_seed_dir = get_env_dir('SAMBA_WRITE_FUZZ_STRINGS_DIR')
+ if fuzz_seed_dir is not None:
+ cls._write_sddl_strings_for_fuzz_seeds(fuzz_seed_dir)
+
+
+ data = [
+ (
+ "90-disorderly-strings-claim-vs-claim-case-sensitive-with-dupes",
+ False,
+ {'user_claims': {"c": claim_str_range(90,
+ random_seed=2),
+ "d": claim_str_range(90, 90,
+ case_sensitive=True,
+ random_seed=3)}},
+ ('D:(XA;;FA;;;WD;(@USER.c == @USER.d))')
+ ),
+ (
+ # this one currently fails before we get to compare_composites()
+ "0-vs-0",
+ True,
+ {'user_claims': {"c": claim_str_range(0)}},
+ ('D:(XA;;FA;;;WD;(@USER.c == @USER.c))')
+ ),
+ (
+ "50-orderly-strings",
+ True,
+ {'user_claims': {"c": claim_str_range(50)}},
+ (f'D:(XA;;FA;;;WD;(@USER.c == {composite_str(50)}))')
+ ),
+ (
+ "50-disorderly-strings-same-disorder",
+ True,
+ {'user_claims': {"c": claim_str_range(50, random_seed=1)}},
+ (f'D:(XA;;FA;;;WD;(@USER.c == {composite_str(50, random_seed=1)}))')
+ ),
+ (
+ "200-disorderly-strings",
+ True,
+ {'user_claims': {"c": claim_str_range(200, random_seed=1)}},
+ (f'D:(XA;;FA;;;WD;(@USER.c == {composite_str(200, random_seed=2)}))')
+ ),
+ (
+ "50-orderly-vs-disorderly-strings",
+ True,
+ {'user_claims': {"c": claim_str_range(50)}},
+ (f'D:(XA;;FA;;;WD;(@USER.c == {composite_str(50, random_seed=1)}))')
+ ),
+ (
+ "50-disorderly-vs-orderly-strings",
+ True,
+ {'user_claims': {"c": claim_str_range(50, random_seed=1)}},
+ (f'D:(XA;;FA;;;WD;(@USER.c == {composite_str(50)}))')
+ ),
+ (
+ "99-orderly-strings",
+ True,
+ {'user_claims': {"c": claim_str_range(99)}},
+ (f'D:(XA;;FA;;;WD;(@USER.c == {composite_str(99)}))')
+ ),
+ (
+ "99-disorderly-strings",
+ True,
+ {'user_claims': {"c": claim_str_range(99, random_seed=1)}},
+ (f'D:(XA;;FA;;;WD;(@USER.c == {composite_str(99, random_seed=2)}))')
+ ),
+ (
+ "99-orderly-vs-disorderly-strings",
+ True,
+ {'user_claims': {"c": claim_str_range(99)}},
+ (f'D:(XA;;FA;;;WD;(@USER.c == {composite_str(99, random_seed=1)}))')
+ ),
+ (
+ "99-disorderly-vs-orderly-strings",
+ True,
+ {'user_claims': {"c": claim_str_range(99, random_seed=1)}},
+ (f'D:(XA;;FA;;;WD;(@USER.c == {composite_str(99)}))')
+ ),
+ (
+ "39-orderly-strings-vs-39+60-dupes",
+ True,
+ {'user_claims': {"c": claim_str_range(39)}},
+ (f'D:(XA;;FA;;;WD;(@USER.c == {composite_str(39, 60)}))')
+ ),
+ (
+ "39-disorderly-strings-vs-39+60-dupes",
+ True,
+ {'user_claims': {"c": claim_str_range(39, random_seed=1)}},
+ (f'D:(XA;;FA;;;WD;(@USER.c == {composite_str(39, 60, random_seed=1)}))')
+ ),
+ (
+ "39-orderly-vs-disorderly-strings-vs-39+60-dupes",
+ True,
+ {'user_claims': {"c": claim_str_range(39)}},
+ (f'D:(XA;;FA;;;WD;(@USER.c == {composite_str(39, 60, random_seed=1)}))')
+ ),
+ (
+ "39-disorderly-vs-orderly-strings-vs-39+60-dupes",
+ True,
+ {'user_claims': {"c": claim_str_range(39, random_seed=1)}},
+ (f'D:(XA;;FA;;;WD;(@USER.c == {composite_str(39, 60)}))')
+ ),
+ (
+ "3-orderly-strings-vs-3+60-dupes",
+ True,
+ {'user_claims': {"c": claim_str_range(3)}},
+ (f'D:(XA;;FA;;;WD;(@USER.c == {composite_str(3, 60)}))')
+ ),
+ (
+ "3-disorderly-strings-vs-3+60-dupes",
+ True,
+ {'user_claims': {"c": claim_str_range(3, random_seed=1)}},
+ (f'D:(XA;;FA;;;WD;(@USER.c == {composite_str(3, 60, random_seed=1)}))')
+ ),
+ (
+ "3-orderly-vs-disorderly-strings-vs-3+60-dupes",
+ True,
+ {'user_claims': {"c": claim_str_range(3)}},
+ (f'D:(XA;;FA;;;WD;(@USER.c == {composite_str(3, 60, random_seed=1)}))')
+ ),
+ (
+ "3-disorderly-vs-orderly-strings-vs-3+60-dupes",
+ True,
+ {'user_claims': {"c": claim_str_range(3, random_seed=1)}},
+ (f'D:(XA;;FA;;;WD;(@USER.c == {composite_str(3, 60)}))')
+ ),
+ (
+ "3-orderly-strings-vs-3+61-dupes",
+ True,
+ {'user_claims': {"c": claim_str_range(3)}},
+ (f'D:(XA;;FA;;;WD;(@USER.c == {composite_str(3, 61)}))')
+ ),
+
+ (
+ "63-orderly-strings-vs-62+1-dupe",
+ False,
+ {'user_claims': {"c": claim_str_range(63)}},
+ (f'D:(XA;;FA;;;WD;(@USER.c == {composite_str(62, 1)}))')
+ ),
+ (
+ "102+1-dupe-vs-102+1-dupe",
+ False,
+ # this is an invalid claim
+ {'user_claims': {"c": claim_str_range(102, 1)}},
+ (f'D:(XA;;FA;;;WD;(@USER.c == {composite_str(102, 1)}))')
+ ),
+ (
+ "0-vs-1",
+ False,
+ {'user_claims': {"c": claim_str_range(0),
+ "d": claim_str_range(1)}},
+ ('D:(XA;;FA;;;WD;(@USER.c == @USER.d))')
+ ),
+ (
+ "2+1-dupe-vs-2+1-dupe",
+ False,
+ {'user_claims': {"c": claim_str_range(2, 1)}},
+ (f'D:(XA;;FA;;;WD;(@USER.c == {composite_str(2, 1)}))')
+ ),
+ (
+ "63-disorderly-strings-vs-62+1-dupe",
+ False,
+ {'user_claims': {"c": claim_str_range(63, random_seed=1)}},
+ (f'D:(XA;;FA;;;WD;(@USER.c == {composite_str(62, 1, random_seed=1)}))')
+ ),
+ (
+ "63-disorderly-strings-vs-63+800-dupe",
+ True,
+ {'user_claims': {"c": claim_str_range(63, random_seed=1)}},
+ (f'D:(XA;;FA;;;WD;(@USER.c == {composite_str(63, 800, random_seed=1)}))')
+ ),
+ (
+ "63-disorderly-strings-vs-62+800-dupe",
+ False,
+ {'user_claims': {"c": claim_str_range(63, random_seed=1)}},
+ (f'D:(XA;;FA;;;WD;(@USER.c == {composite_str(62, 800, random_seed=1)}))')
+ ),
+ (
+ "9-orderly-strings",
+ True,
+ {'user_claims': {"c": claim_str_range(9)}},
+ (f'D:(XA;;FA;;;WD;(@USER.c == {composite_str(9)}))')
+ ),
+ (
+ "9-orderly-strings-claim-vs-itself",
+ True,
+ {'user_claims': {"c": claim_str_range(9)}},
+ ('D:(XA;;FA;;;WD;(@USER.c == @USER.c))')
+ ),
+ (
+ "300-orderly-strings-claim-vs-itself",
+ True,
+ {'user_claims': {"c": claim_str_range(300)}},
+ ('D:(XA;;FA;;;WD;(@USER.c == @USER.c))')
+ ),
+ (
+ "900-disorderly-strings-claim-vs-claim",
+ True,
+ {'user_claims': {"c": claim_str_range(900, random_seed=1),
+ "d": claim_str_range(900, random_seed=1)}},
+ ('D:(XA;;FA;;;WD;(@USER.c == @USER.d))')
+ ),
+ (
+ "9-orderly-strings-claim-mixed-case-vs-claim-case-sensitive",
+ False,
+ {'user_claims': {"c": claim_str_range(9, mix_case=True),
+ "d": claim_str_range(9, case_sensitive=True)}},
+ ('D:(XA;;FA;;;WD;(@USER.c == @USER.d))')
+ ),
+ (
+ "9-disorderly-strings-claim-vs-claim-case-sensitive-mixed-case",
+ False,
+ {'user_claims': {"c": claim_str_range(9,random_seed=1),
+ "d": claim_str_range(9,
+ mix_case=True,
+ case_sensitive=True)}},
+ ('D:(XA;;FA;;;WD;(@USER.c == @USER.d))')
+ ),
+ (
+ "9-disorderly-strings-claim-vs-claim-case-sensitive-both-mixed-case",
+ False,
+ {'user_claims': {"c": claim_str_range(9,
+ mix_case=True,
+ random_seed=1),
+ "d": claim_str_range(9,
+ mix_case=True,
+ case_sensitive=True)}},
+ ('D:(XA;;FA;;;WD;(@USER.c == @USER.d))')
+ ),
+ (
+ "9-disorderly-strings-claim-vs-claim-case-sensitive-ne",
+ True,
+ {'user_claims': {"c": claim_str_range(9,random_seed=1),
+ "d": claim_str_range(9,
+ mix_case=True,
+ case_sensitive=True)}},
+ ('D:(XA;;FA;;;WD;(@USER.c != @USER.d))')
+ ),
+
+ (
+ "5-disorderly-strings-claim-vs-claim-case-sensitive-with-dupes-all-mixed-case",
+ False,
+ {'user_claims': {"c": claim_str_range(5,
+ mix_case=True,
+ random_seed=2),
+ "d": claim_str_range(5, 5,
+ mix_case=True,
+ random_seed=3,
+ case_sensitive=True)}},
+ ('D:(XA;;FA;;;WD;(@USER.c == @USER.d))')
+ ),
+ (
+ "90-disorderly-strings-claim-vs-int-claim",
+ False,
+ {'user_claims': {"c": claim_str_range(90,
+ random_seed=2),
+ "d": claim_int_range(90,
+ random_seed=3)}},
+ ('D:(XA;;FA;;;WD;(@USER.c == @USER.d))')
+ ),
+ (
+ "90-disorderly-ints-claim-vs-string-claim",
+ False,
+ {'user_claims': {"c": claim_int_range(90,
+ random_seed=2),
+ "d": claim_str_range(90,
+ random_seed=3)}},
+ ('D:(XA;;FA;;;WD;(@USER.c == @USER.d))')
+ ),
+ (
+ "9-disorderly-strings-vs-9+90-dupes",
+ True,
+ {'user_claims': {"c": claim_str_range(9, random_seed=1)}},
+ (f'D:(XA;;FA;;;WD;(@USER.c == {composite_str(9, 90, random_seed=1)}))')
+ ),
+ (
+ "9-disorderly-strings-vs-9+90-dupes-case-sensitive",
+ True,
+ {'user_claims': {"c": claim_str_range(9, random_seed=1, case_sensitive=True)}},
+ (f'D:(XA;;FA;;;WD;(@USER.c == {composite_str(9, 90, random_seed=2)}))')
+ ),
+ (
+ "9-disorderly-strings-vs-9+90-dupes-mixed-case",
+ True,
+ {'user_claims': {"c": claim_str_range(9, random_seed=1, mix_case=True)}},
+ (f'D:(XA;;FA;;;WD;(@USER.c == {composite_str(9, 90, random_seed=2, mix_case=True)}))')
+ ),
+ (
+ "9-disorderly-strings-vs-9+90-dupes-mixed-case-case-sensitive",
+ False,
+ {'user_claims': {"c": claim_str_range(9, random_seed=1, mix_case=True,
+ case_sensitive=True)}},
+ (f'D:(XA;;FA;;;WD;(@USER.c == {composite_str(9, 90, random_seed=2, mix_case=True)}))')
+ ),
+ (
+ "99-disorderly-strings-vs-9+90-dupes-mixed-case",
+ False,
+ {'user_claims': {"c": claim_str_range(99, random_seed=1, mix_case=True)}},
+ (f'D:(XA;;FA;;;WD;(@USER.c == {composite_str(9, 90, random_seed=2, mix_case=True)}))')
+ ),
+
+ (
+ "RA-99-disorderly-strings-vs-9+90-dupes-mixed-case",
+ False,
+ {},
+ ('D:(XA;;FA;;;WD;(@RESOURCE.c == '
+ f'{composite_str(9, 90, random_seed=1, mix_case=True)}))'
+ f'S:{ra_str_range(99, random_seed=2, mix_case=True)}'
+ )
+ ),
+ (
+ "RA-9+90-dupes-disorderly-strings-vs-9+90-dupes-mixed-case",
+ False,
+ {},
+ ('D:(XA;;FA;;;WD;(@RESOURCE.c == '
+ f'{composite_str(9, 90, random_seed=1, mix_case=True)}))'
+ f'S:{ra_str_range(9, 90, random_seed=2, mix_case=True)}'
+ )
+ ),
+ (
+ "90-disorderly-strings-claim-vs-missing-claim",
+ False,
+ {'user_claims': {"c": claim_str_range(90,
+ random_seed=2)}},
+ ('D:(XA;;FA;;;WD;(@USER.c == @USER.d))')
+ ),
+ (
+ "missing-claim-vs-90-disorderly-strings",
+ False,
+ {'user_claims': {"c": claim_str_range(90,
+ random_seed=2)}},
+ ('D:(XA;;FA;;;WD;(@USER.z == @USER.c))')
+ ),
+
+ (
+ "RA-9-disorderly-strings-vs-9-mixed-case",
+ False,
+ {'user_claims': {"c": claim_str_range(9,
+ random_seed=1,
+ mix_case=True),
+ }
+ },
+ ('D:(XA;;FA;;;WD;(@RESOURCE.c == @User.c))'
+ f'S:{ra_str_range(9, random_seed=2, mix_case=True)}'
+ )
+ ),
+
+ (
+ "9-disorderly-strings-vs-9-RA-mixed-case",
+ False,
+ {'user_claims': {"c": claim_str_range(9,
+ random_seed=1,
+ mix_case=True),
+ }
+ },
+ ('D:(XA;;FA;;;WD;(@user.c == @resource.c))'
+ f'S:{ra_str_range(9, random_seed=2, mix_case=True)}'
+ )
+ ),
+
+ (
+ "RA-29-disorderly-strings-vs-29-mixed-case",
+ False,
+ {'user_claims': {"c": claim_str_range(29,
+ random_seed=1,
+ mix_case=True),
+ }
+ },
+ ('D:(XA;;FA;;;WD;(@RESOURCE.c == @User.c))'
+ f'S:{ra_str_range(29, random_seed=2, mix_case=True)}'
+ )
+ ),
+ (
+ "0-vs-0-ne",
+ False,
+ {'user_claims': {"c": claim_str_range(0)}},
+ ('D:(XA;;FA;;;WD;(@USER.c != @USER.c))')
+ ),
+ (
+ "1-vs-1",
+ True,
+ {'user_claims': {"c": claim_str_range(1)}},
+ ('D:(XA;;FA;;;WD;(@USER.c == @USER.c))')
+ ),
+ (
+ "1-vs-1-ne",
+ False,
+ {'user_claims': {"c": claim_str_range(1)}},
+ ('D:(XA;;FA;;;WD;(@USER.c != @USER.c))')
+ ),
+ ]
diff --git a/python/samba/tests/core.py b/python/samba/tests/core.py
new file mode 100644
index 0000000..9f53473
--- /dev/null
+++ b/python/samba/tests/core.py
@@ -0,0 +1,83 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007-2008
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Samba Python tests."""
+
+import ldb
+import os
+import samba
+from samba import arcfour_encrypt, string_to_byte_array
+from samba.tests import TestCase, TestCaseInTempDir
+
+
+class SubstituteVarTestCase(TestCase):
+
+ def test_empty(self):
+ self.assertEqual("", samba.substitute_var("", {}))
+
+ def test_nothing(self):
+ self.assertEqual("foo bar",
+ samba.substitute_var("foo bar", {"bar": "bla"}))
+
+ def test_replace(self):
+ self.assertEqual("foo bla",
+ samba.substitute_var("foo ${bar}", {"bar": "bla"}))
+
+ def test_broken(self):
+ self.assertEqual("foo ${bdkjfhsdkfh sdkfh ",
+ samba.substitute_var("foo ${bdkjfhsdkfh sdkfh ", {"bar": "bla"}))
+
+ def test_unknown_var(self):
+ self.assertEqual("foo ${bla} gsff",
+ samba.substitute_var("foo ${bla} gsff", {"bar": "bla"}))
+
+ def test_check_all_substituted(self):
+ samba.check_all_substituted("nothing to see here")
+ self.assertRaises(Exception, samba.check_all_substituted,
+ "Not substituted: ${FOOBAR}")
+
+
+class ArcfourTestCase(TestCase):
+
+ def test_arcfour_direct(self):
+ key = b'12345678'
+ plain = b'abcdefghi'
+ crypt_expected = b'\xda\x91Z\xb0l\xd7\xb9\xcf\x99'
+ crypt_calculated = arcfour_encrypt(key, plain)
+ self.assertEqual(crypt_expected, crypt_calculated)
+
+
+class StringToByteArrayTestCase(TestCase):
+
+ def test_byte_array(self):
+ expected = [218, 145, 90, 176, 108, 215, 185, 207, 153]
+ calculated = string_to_byte_array('\xda\x91Z\xb0l\xd7\xb9\xcf\x99')
+ self.assertEqual(expected, calculated)
+
+
+class LdbExtensionTests(TestCaseInTempDir):
+
+ def test_searchone(self):
+ path = self.tempdir + "/searchone.ldb"
+ l = samba.Ldb(path)
+ try:
+ l.add({"dn": "foo=dc", "bar": "bla"})
+ self.assertEqual(b"bla",
+ l.searchone(basedn=ldb.Dn(l, "foo=dc"), attribute="bar"))
+ finally:
+ del l
+ os.unlink(path)
diff --git a/python/samba/tests/cred_opt.py b/python/samba/tests/cred_opt.py
new file mode 100644
index 0000000..0adb915
--- /dev/null
+++ b/python/samba/tests/cred_opt.py
@@ -0,0 +1,155 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) David Mulder <dmulder@suse.com> 2019
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for cred option parsing.
+
+"""
+
+import optparse
+import os
+from contextlib import contextmanager
+from samba.getopt import CredentialsOptions, SambaOptions
+import samba.tests
+import setproctitle
+import sys
+
+password_opt = '--password=super_secret_password'
+clear_password_opt = '--password '
+
+@contextmanager
+def auth_fle_opt(auth_file_path, long_opt=True):
+ old_argv = list(sys.argv)
+ try:
+ if long_opt:
+ sys.argv.append('--authentication-file=%s' % auth_file_path)
+ else:
+ sys.argv.append('-A')
+ sys.argv.append(auth_file_path)
+ yield
+ finally:
+ sys.argv = old_argv
+
+class CredentialsOptionsTests(samba.tests.TestCase):
+
+ def setUp(self):
+ super().setUp()
+ self.old_proctitle = setproctitle.getproctitle()
+
+ # We must append two options to get the " " we look for in the
+ # test after the redacted password
+ sys.argv.extend([password_opt, "--realm=samba.org"])
+
+ def test_clear_proctitle_password(self):
+ parser = optparse.OptionParser()
+
+ # The password burning is on the SambaOptions __init__()
+ sambaopts = SambaOptions(parser)
+ parser.add_option_group(sambaopts)
+ credopts = CredentialsOptions(parser)
+ parser.add_option_group(credopts)
+ (opts, args) = parser.parse_args()
+ self.assertNotIn(password_opt, setproctitle.getproctitle())
+ self.assertIn(clear_password_opt, setproctitle.getproctitle())
+
+ def tearDown(self):
+ super().tearDown()
+ setproctitle.setproctitle(self.old_proctitle)
+ sys.argv.pop()
+
+class AuthenticationFileTests(samba.tests.TestCaseInTempDir):
+
+ def setUp(self):
+ super().setUp()
+
+ self.parser = optparse.OptionParser()
+ self.credopts = CredentialsOptions(self.parser)
+ self.parser.add_option_group(self.credopts)
+
+ self.auth_file_name = os.path.join(self.tempdir, 'auth.txt')
+
+ self.realm = 'realm.example.com'
+ self.domain = 'dom'
+ self.password = 'pass'
+ self.username = 'user'
+
+ auth_file_fd = open(self.auth_file_name, 'x')
+ auth_file_fd.write('realm=%s\n' % self.realm)
+ auth_file_fd.write('domain=%s\n' % self.domain)
+ auth_file_fd.write('username=%s\n' % self.username)
+ auth_file_fd.write('password=%s\n' % self.password)
+ auth_file_fd.close()
+
+ def tearDown(self):
+ super().tearDown()
+
+ os.unlink(self.auth_file_name)
+
+ def test_long_option_valid_path(self):
+ with auth_fle_opt(self.auth_file_name):
+ self.parser.parse_args()
+ credopts = self.credopts
+ creds = credopts.creds
+
+ self.assertFalse(credopts.ask_for_password)
+ self.assertFalse(credopts.machine_pass)
+
+ self.assertEqual(self.username, creds.get_username())
+ self.assertEqual(self.password, creds.get_password())
+ self.assertEqual(self.domain.upper(), creds.get_domain())
+ self.assertEqual(self.realm.upper(), creds.get_realm())
+
+ def test_long_option_invalid_path(self):
+ with auth_fle_opt(self.auth_file_name + '.dontexist'):
+ self.parser.parse_args()
+ credopts = self.credopts
+ creds = credopts.creds
+
+ self.assertTrue(credopts.ask_for_password)
+ self.assertFalse(credopts.machine_pass)
+
+ self.assertIsNone(creds.get_username())
+ self.assertIsNone(creds.get_password())
+ self.assertIsNone(creds.get_domain())
+ self.assertIsNone(creds.get_realm())
+
+ def test_short_option_valid_path(self):
+ with auth_fle_opt(self.auth_file_name, long_opt=False):
+ self.parser.parse_args()
+ credopts = self.credopts
+ creds = credopts.creds
+
+ self.assertFalse(credopts.ask_for_password)
+ self.assertFalse(credopts.machine_pass)
+
+ self.assertEqual(self.username, creds.get_username())
+ self.assertEqual(self.password, creds.get_password())
+ self.assertEqual(self.domain.upper(), creds.get_domain())
+ self.assertEqual(self.realm.upper(), creds.get_realm())
+
+ def test_short_option_invalid_path(self):
+ with auth_fle_opt(self.auth_file_name + '.dontexist', long_opt=False):
+ self.parser.parse_args()
+ credopts = self.credopts
+ creds = credopts.creds
+
+ self.assertTrue(credopts.ask_for_password)
+ self.assertFalse(credopts.machine_pass)
+
+ self.assertIsNone(creds.get_username())
+ self.assertIsNone(creds.get_password())
+ self.assertIsNone(creds.get_domain())
+ self.assertIsNone(creds.get_realm())
diff --git a/python/samba/tests/credentials.py b/python/samba/tests/credentials.py
new file mode 100644
index 0000000..f9781f8
--- /dev/null
+++ b/python/samba/tests/credentials.py
@@ -0,0 +1,501 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for the Credentials Python bindings.
+
+Note that this just tests the bindings work. It does not intend to test
+the functionality, that's already done in other tests.
+"""
+
+from samba import credentials
+import samba.tests
+import os
+import binascii
+from samba.dcerpc import misc
+
+
+class CredentialsTests(samba.tests.TestCaseInTempDir):
+
+ def setUp(self):
+ super().setUp()
+ self.creds = credentials.Credentials()
+
+ def test_set_username(self):
+ self.creds.set_username("somebody")
+ self.assertEqual("somebody", self.creds.get_username())
+
+ def test_set_password(self):
+ self.creds.set_password("S3CreT")
+ self.assertEqual("S3CreT", self.creds.get_password())
+
+ def test_set_utf16_password(self):
+ password = 'S3cRet'
+ passbytes = password.encode('utf-16-le')
+ self.assertTrue(self.creds.set_utf16_password(passbytes))
+ self.assertEqual(password, self.creds.get_password())
+
+ def test_set_old_password(self):
+ self.assertEqual(None, self.creds.get_old_password())
+ self.assertTrue(self.creds.set_old_password("S3c0ndS3CreT"))
+ self.assertEqual("S3c0ndS3CreT", self.creds.get_old_password())
+
+ def test_set_old_utf16_password(self):
+ password = '0ldS3cRet'
+ passbytes = password.encode('utf-16-le')
+ self.assertTrue(self.creds.set_old_utf16_password(passbytes))
+ self.assertEqual(password, self.creds.get_old_password())
+
+ def test_set_domain(self):
+ self.creds.set_domain("ABMAS")
+ self.assertEqual("ABMAS", self.creds.get_domain())
+ self.assertEqual(self.creds.get_principal(), None)
+
+ def test_set_realm(self):
+ self.creds.set_realm("myrealm")
+ self.assertEqual("MYREALM", self.creds.get_realm())
+ self.assertEqual(self.creds.get_principal(), None)
+
+ def test_parse_string_anon(self):
+ self.creds.parse_string("%")
+ self.assertEqual("", self.creds.get_username())
+ self.assertEqual(None, self.creds.get_password())
+
+ def test_parse_string_empty_pw(self):
+ self.creds.parse_string("someone%")
+ self.assertEqual("someone", self.creds.get_username())
+ self.assertEqual("", self.creds.get_password())
+
+ def test_parse_string_none_pw(self):
+ self.creds.parse_string("someone")
+ self.assertEqual("someone", self.creds.get_username())
+ self.assertEqual(None, self.creds.get_password())
+
+ def test_parse_string_user_pw_domain(self):
+ self.creds.parse_string("dom\\someone%secr")
+ self.assertEqual("someone", self.creds.get_username())
+ self.assertEqual("secr", self.creds.get_password())
+ self.assertEqual("DOM", self.creds.get_domain())
+
+ def test_bind_dn(self):
+ self.assertEqual(None, self.creds.get_bind_dn())
+ self.creds.set_bind_dn("dc=foo,cn=bar")
+ self.assertEqual("dc=foo,cn=bar", self.creds.get_bind_dn())
+
+ def test_is_anon(self):
+ self.creds.set_username("")
+ self.assertTrue(self.creds.is_anonymous())
+ self.creds.set_username("somebody")
+ self.assertFalse(self.creds.is_anonymous())
+ self.creds.set_anonymous()
+ self.assertTrue(self.creds.is_anonymous())
+
+ def test_workstation(self):
+ # FIXME: This is uninitialised, it should be None
+ #self.assertEqual(None, self.creds.get_workstation())
+ self.creds.set_workstation("myworksta")
+ self.assertEqual("myworksta", self.creds.get_workstation())
+
+ def test_secure_channel_type(self):
+ self.assertEqual(misc.SEC_CHAN_NULL,
+ self.creds.get_secure_channel_type())
+ self.creds.set_secure_channel_type(misc.SEC_CHAN_BDC)
+ self.assertEqual(misc.SEC_CHAN_BDC,
+ self.creds.get_secure_channel_type())
+
+ def test_get_nt_hash(self):
+ password = "geheim"
+ hex_nthash = "c2ae1fe6e648846352453e816f2aeb93"
+ self.creds.set_password(password)
+ self.assertEqual(password, self.creds.get_password())
+ self.assertEqual(binascii.a2b_hex(hex_nthash),
+ self.creds.get_nt_hash())
+
+ def test_get_ntlm_response(self):
+ password = "SecREt01"
+ hex_challenge = "0123456789abcdef"
+ hex_nthash = "cd06ca7c7e10c99b1d33b7485a2ed808"
+ hex_session_key = "3f373ea8e4af954f14faa506f8eebdc4"
+ hex_ntlm_response = "25a98c1c31e81847466b29b2df4680f39958fb8c213a9cc6"
+ self.creds.set_username("fred")
+ self.creds.set_domain("nurk")
+ self.creds.set_password(password)
+ self.assertEqual(password, self.creds.get_password())
+ self.assertEqual(binascii.a2b_hex(hex_nthash),
+ self.creds.get_nt_hash())
+ response = self.creds.get_ntlm_response(flags=credentials.CLI_CRED_NTLM_AUTH,
+ challenge=binascii.a2b_hex(hex_challenge))
+
+ self.assertEqual(response["nt_response"], binascii.a2b_hex(hex_ntlm_response))
+ self.assertEqual(response["nt_session_key"], binascii.a2b_hex(hex_session_key))
+ self.assertEqual(response["flags"], credentials.CLI_CRED_NTLM_AUTH)
+
+ def test_get_nt_hash_string(self):
+ self.creds.set_password_will_be_nt_hash(True)
+ hex_nthash = "c2ae1fe6e648846352453e816f2aeb93"
+ self.creds.set_password(hex_nthash)
+ self.assertEqual(None, self.creds.get_password())
+ self.assertEqual(binascii.a2b_hex(hex_nthash),
+ self.creds.get_nt_hash())
+
+ def test_set_cmdline_callbacks(self):
+ self.creds.set_cmdline_callbacks()
+
+ def test_authentication_requested(self):
+ self.creds.set_username("")
+ self.assertFalse(self.creds.authentication_requested())
+ self.creds.set_username("somebody")
+ self.assertTrue(self.creds.authentication_requested())
+
+ def test_wrong_password(self):
+ self.assertFalse(self.creds.wrong_password())
+
+ def test_guess(self):
+ creds = credentials.Credentials()
+ lp = samba.tests.env_loadparm()
+ os.environ["USER"] = "env_user"
+ creds.guess(lp)
+ self.assertEqual(creds.get_username(), "env_user")
+ self.assertEqual(creds.get_domain(), lp.get("workgroup").upper())
+ self.assertEqual(creds.get_realm(), None)
+ self.assertEqual(creds.get_principal(), "env_user@%s" % creds.get_domain())
+ self.assertEqual(creds.is_anonymous(), False)
+ self.assertEqual(creds.authentication_requested(), False)
+
+ def test_set_anonymous(self):
+ creds = credentials.Credentials()
+ lp = samba.tests.env_loadparm()
+ os.environ["USER"] = "env_user"
+ creds.guess(lp)
+ creds.set_anonymous()
+ self.assertEqual(creds.get_username(), "")
+ self.assertEqual(creds.get_domain(), "")
+ self.assertEqual(creds.get_realm(), None)
+ self.assertEqual(creds.get_principal(), None)
+ self.assertEqual(creds.is_anonymous(), True)
+ self.assertEqual(creds.authentication_requested(), False)
+
+ def test_parse_file_1(self):
+ realm = "realm.example.com"
+ domain = "dom"
+ password = "pass"
+ username = "user"
+
+ passwd_file_name = os.path.join(self.tempdir, "parse_file")
+ passwd_file_fd = open(passwd_file_name, 'x')
+ passwd_file_fd.write("realm=%s\n" % realm)
+ passwd_file_fd.write("domain=%s\n" % domain)
+ passwd_file_fd.write("username=%s\n" % username)
+ passwd_file_fd.write("password=%s\n" % password)
+ passwd_file_fd.close()
+ self.creds.parse_file(passwd_file_name)
+ self.assertEqual(self.creds.get_username(), username)
+ self.assertEqual(self.creds.get_password(), password)
+ self.assertEqual(self.creds.get_domain(), domain.upper())
+ self.assertEqual(self.creds.get_realm(), realm.upper())
+ self.assertEqual(self.creds.get_principal(), "%s@%s" % (username, realm.upper()))
+ self.assertEqual(self.creds.is_anonymous(), False)
+ self.assertEqual(self.creds.authentication_requested(), True)
+ os.unlink(passwd_file_name)
+
+ def test_parse_file_2(self):
+ realm = "realm.example.com"
+ domain = "dom"
+ password = "pass"
+ username = "user"
+
+ passwd_file_name = os.path.join(self.tempdir, "parse_file")
+ passwd_file_fd = open(passwd_file_name, 'x')
+ passwd_file_fd.write("realm=%s\n" % realm)
+ passwd_file_fd.write("domain=%s\n" % domain)
+ passwd_file_fd.write("username=%s\\%s\n" % (domain, username))
+ passwd_file_fd.write("password=%s\n" % password)
+ passwd_file_fd.close()
+ self.creds.parse_file(passwd_file_name)
+ self.assertEqual(self.creds.get_username(), username)
+ self.assertEqual(self.creds.get_password(), password)
+ self.assertEqual(self.creds.get_domain(), domain.upper())
+ self.assertEqual(self.creds.get_realm(), realm.upper())
+ self.assertEqual(self.creds.get_principal(), "%s@%s" % (username, realm.upper()))
+ self.assertEqual(self.creds.is_anonymous(), False)
+ self.assertEqual(self.creds.authentication_requested(), True)
+ os.unlink(passwd_file_name)
+
+ def test_parse_file_3(self):
+ realm = "realm.example.com"
+ domain = "domain"
+ password = "password"
+ username = "username"
+
+ userdom = "userdom"
+
+ passwd_file_name = os.path.join(self.tempdir, "parse_file")
+ passwd_file_fd = open(passwd_file_name, 'x')
+ passwd_file_fd.write("realm=%s\n" % realm)
+ passwd_file_fd.write("domain=%s\n" % domain)
+ passwd_file_fd.write("username=%s/%s\n" % (userdom, username))
+ passwd_file_fd.write("password=%s\n" % password)
+ passwd_file_fd.close()
+ self.creds.parse_file(passwd_file_name)
+ self.assertEqual(self.creds.get_username(), username)
+ self.assertEqual(self.creds.get_password(), password)
+ self.assertEqual(self.creds.get_domain(), userdom.upper())
+ self.assertEqual(self.creds.get_realm(), userdom.upper())
+ self.assertEqual(self.creds.get_principal(), "%s@%s" % (username, userdom.upper()))
+ self.assertEqual(self.creds.is_anonymous(), False)
+ self.assertEqual(self.creds.authentication_requested(), True)
+ os.unlink(passwd_file_name)
+
+ def test_parse_file_4(self):
+ password = "password"
+ username = "username"
+
+ userdom = "userdom"
+
+ passwd_file_name = os.path.join(self.tempdir, "parse_file")
+ passwd_file_fd = open(passwd_file_name, 'x')
+ passwd_file_fd.write("username=%s\\%s%%%s\n" % (userdom, username, password))
+ passwd_file_fd.write("realm=ignorerealm\n")
+ passwd_file_fd.write("domain=ignoredomain\n")
+ passwd_file_fd.write("password=ignorepassword\n")
+ passwd_file_fd.close()
+ self.creds.parse_file(passwd_file_name)
+ self.assertEqual(self.creds.get_username(), username)
+ self.assertEqual(self.creds.get_password(), password)
+ self.assertEqual(self.creds.get_domain(), userdom.upper())
+ self.assertEqual(self.creds.get_realm(), userdom.upper())
+ self.assertEqual(self.creds.get_principal(), "%s@%s" % (username, userdom.upper()))
+ self.assertEqual(self.creds.is_anonymous(), False)
+ self.assertEqual(self.creds.authentication_requested(), True)
+ os.unlink(passwd_file_name)
+
+ def test_parse_file_5(self):
+ password = "password"
+ username = "username"
+
+ userdom = "userdom"
+
+ passwd_file_name = os.path.join(self.tempdir, "parse_file")
+ passwd_file_fd = open(passwd_file_name, 'x')
+ passwd_file_fd.write("realm=ignorerealm\n")
+ passwd_file_fd.write("username=%s\\%s%%%s\n" % (userdom, username, password))
+ passwd_file_fd.write("domain=ignoredomain\n")
+ passwd_file_fd.write("password=ignorepassword\n")
+ passwd_file_fd.close()
+ self.creds.parse_file(passwd_file_name)
+ self.assertEqual(self.creds.get_username(), username)
+ self.assertEqual(self.creds.get_password(), password)
+ self.assertEqual(self.creds.get_domain(), userdom.upper())
+ self.assertEqual(self.creds.get_realm(), userdom.upper())
+ self.assertEqual(self.creds.get_principal(), "%s@%s" % (username, userdom.upper()))
+ self.assertEqual(self.creds.is_anonymous(), False)
+ self.assertEqual(self.creds.authentication_requested(), True)
+ os.unlink(passwd_file_name)
+
+ def test_parse_username_0(self):
+ creds = credentials.Credentials()
+ lp = samba.tests.env_loadparm()
+ os.environ["USER"] = "env_user"
+ creds.guess(lp)
+ creds.parse_string("user")
+ self.assertEqual(creds.get_username(), "user")
+ self.assertEqual(creds.get_domain(), lp.get("workgroup").upper())
+ self.assertEqual(creds.get_realm(), None)
+ self.assertEqual(creds.get_principal(), "user@%s" % lp.get("workgroup").upper())
+ self.assertEqual(creds.is_anonymous(), False)
+ self.assertEqual(creds.authentication_requested(), True)
+
+ def test_parse_username_1(self):
+ creds = credentials.Credentials()
+ lp = samba.tests.env_loadparm()
+ os.environ["USER"] = "env_user"
+ creds.guess(lp)
+ realm = "realm.example.com"
+ creds.set_realm(realm, credentials.SMB_CONF)
+ creds.parse_string("user")
+ self.assertEqual(creds.get_username(), "user")
+ self.assertEqual(creds.get_domain(), lp.get("workgroup").upper())
+ self.assertEqual(creds.get_realm(), realm.upper())
+ self.assertEqual(creds.get_principal(), "user@%s" % realm.upper())
+ self.assertEqual(creds.is_anonymous(), False)
+ self.assertEqual(creds.authentication_requested(), True)
+
+ def test_parse_username_with_domain_0(self):
+ creds = credentials.Credentials()
+ lp = samba.tests.env_loadparm()
+ os.environ["USER"] = "env_user"
+ creds.guess(lp)
+ creds.parse_string("domain\\user")
+ self.assertEqual(creds.get_username(), "user")
+ self.assertEqual(creds.get_domain(), "DOMAIN")
+ self.assertEqual(creds.get_realm(), None)
+ self.assertEqual(creds.get_principal(), "user@DOMAIN")
+ self.assertEqual(creds.is_anonymous(), False)
+ self.assertEqual(creds.authentication_requested(), True)
+
+ def test_parse_username_with_domain_1(self):
+ creds = credentials.Credentials()
+ lp = samba.tests.env_loadparm()
+ os.environ["USER"] = "env_user"
+ creds.guess(lp)
+ realm = "realm.example.com"
+ creds.set_realm(realm, credentials.SMB_CONF)
+ self.assertEqual(creds.get_username(), "env_user")
+ self.assertEqual(creds.get_domain(), lp.get("workgroup").upper())
+ self.assertEqual(creds.get_realm(), realm.upper())
+ self.assertEqual(creds.get_principal(), "env_user@%s" % realm.upper())
+ creds.set_principal("unknown@realm.example.com")
+ self.assertEqual(creds.get_username(), "env_user")
+ self.assertEqual(creds.get_domain(), lp.get("workgroup").upper())
+ self.assertEqual(creds.get_realm(), realm.upper())
+ self.assertEqual(creds.get_principal(), "unknown@realm.example.com")
+ creds.parse_string("domain\\user")
+ self.assertEqual(creds.get_username(), "user")
+ self.assertEqual(creds.get_domain(), "DOMAIN")
+ self.assertEqual(creds.get_realm(), realm.upper())
+ self.assertEqual(creds.get_principal(), "user@DOMAIN")
+ self.assertEqual(creds.is_anonymous(), False)
+ self.assertEqual(creds.authentication_requested(), True)
+
+ def test_parse_username_with_domain_2(self):
+ creds = credentials.Credentials()
+ lp = samba.tests.env_loadparm()
+ os.environ["USER"] = "env_user"
+ creds.guess(lp)
+ realm = "realm.example.com"
+ creds.set_realm(realm, credentials.SPECIFIED)
+ self.assertEqual(creds.get_username(), "env_user")
+ self.assertEqual(creds.get_domain(), lp.get("workgroup").upper())
+ self.assertEqual(creds.get_realm(), realm.upper())
+ self.assertEqual(creds.get_principal(), "env_user@%s" % realm.upper())
+ creds.set_principal("unknown@realm.example.com")
+ self.assertEqual(creds.get_username(), "env_user")
+ self.assertEqual(creds.get_domain(), lp.get("workgroup").upper())
+ self.assertEqual(creds.get_realm(), realm.upper())
+ self.assertEqual(creds.get_principal(), "unknown@realm.example.com")
+ creds.parse_string("domain\\user")
+ self.assertEqual(creds.get_username(), "user")
+ self.assertEqual(creds.get_domain(), "DOMAIN")
+ self.assertEqual(creds.get_realm(), "DOMAIN")
+ self.assertEqual(creds.get_principal(), "user@DOMAIN")
+ self.assertEqual(creds.is_anonymous(), False)
+ self.assertEqual(creds.authentication_requested(), True)
+
+ def test_parse_username_with_realm(self):
+ creds = credentials.Credentials()
+ lp = samba.tests.env_loadparm()
+ os.environ["USER"] = "env_user"
+ creds.guess(lp)
+ creds.parse_string("user@samba.org")
+ self.assertEqual(creds.get_username(), "user@samba.org")
+ self.assertEqual(creds.get_domain(), "")
+ self.assertEqual(creds.get_realm(), "SAMBA.ORG")
+ self.assertEqual(creds.get_principal(), "user@samba.org")
+ self.assertEqual(creds.is_anonymous(), False)
+ self.assertEqual(creds.authentication_requested(), True)
+
+ def test_parse_username_pw(self):
+ creds = credentials.Credentials()
+ lp = samba.tests.env_loadparm()
+ os.environ["USER"] = "env_user"
+ creds.guess(lp)
+ creds.parse_string("user%pass")
+ self.assertEqual(creds.get_username(), "user")
+ self.assertEqual(creds.get_password(), "pass")
+ self.assertEqual(creds.get_domain(), lp.get("workgroup"))
+ self.assertEqual(creds.get_realm(), None)
+ self.assertEqual(creds.get_principal(), "user@%s" % lp.get("workgroup"))
+ self.assertEqual(creds.is_anonymous(), False)
+ self.assertEqual(creds.authentication_requested(), True)
+
+ def test_parse_username_with_domain_pw(self):
+ creds = credentials.Credentials()
+ lp = samba.tests.env_loadparm()
+ os.environ["USER"] = "env_user"
+ creds.guess(lp)
+ creds.parse_string("domain\\user%pass")
+ self.assertEqual(creds.get_username(), "user")
+ self.assertEqual(creds.get_domain(), "DOMAIN")
+ self.assertEqual(creds.get_password(), "pass")
+ self.assertEqual(creds.get_realm(), None)
+ self.assertEqual(creds.get_principal(), "user@DOMAIN")
+ self.assertEqual(creds.is_anonymous(), False)
+ self.assertEqual(creds.authentication_requested(), True)
+
+ def test_parse_username_with_realm_pw(self):
+ creds = credentials.Credentials()
+ lp = samba.tests.env_loadparm()
+ os.environ["USER"] = "env_user"
+ creds.guess(lp)
+ creds.parse_string("user@samba.org%pass")
+ self.assertEqual(creds.get_username(), "user@samba.org")
+ self.assertEqual(creds.get_domain(), "")
+ self.assertEqual(creds.get_password(), "pass")
+ self.assertEqual(creds.get_realm(), "SAMBA.ORG")
+ self.assertEqual(creds.get_principal(), "user@samba.org")
+ self.assertEqual(creds.is_anonymous(), False)
+ self.assertEqual(creds.authentication_requested(), True)
+
+ def test_smb_signing(self):
+ creds = credentials.Credentials()
+ self.assertEqual(creds.get_smb_signing(), credentials.SMB_SIGNING_DEFAULT)
+ creds.set_smb_signing(credentials.SMB_SIGNING_REQUIRED)
+ self.assertEqual(creds.get_smb_signing(), credentials.SMB_SIGNING_REQUIRED)
+
+ def test_smb_signing_set_conf(self):
+ lp = samba.tests.env_loadparm()
+
+ creds = credentials.Credentials()
+ creds.set_conf(lp)
+ self.assertEqual(creds.get_smb_signing(), credentials.SMB_SIGNING_DEFAULT)
+ creds.set_smb_signing(credentials.SMB_SIGNING_OFF)
+ self.assertEqual(creds.get_smb_signing(), credentials.SMB_SIGNING_OFF)
+ creds.set_conf(lp)
+ self.assertEqual(creds.get_smb_signing(), credentials.SMB_SIGNING_OFF)
+
+ def test_smb_ipc_signing(self):
+ creds = credentials.Credentials()
+ self.assertEqual(creds.get_smb_ipc_signing(), credentials.SMB_SIGNING_REQUIRED)
+ creds.set_smb_ipc_signing(credentials.SMB_SIGNING_OFF)
+ self.assertEqual(creds.get_smb_ipc_signing(), credentials.SMB_SIGNING_OFF)
+
+ def test_smb_ipc_signing_set_conf(self):
+ lp = samba.tests.env_loadparm()
+
+ creds = credentials.Credentials()
+ creds.set_conf(lp)
+ self.assertEqual(creds.get_smb_ipc_signing(), credentials.SMB_SIGNING_REQUIRED)
+ creds.set_smb_ipc_signing(credentials.SMB_SIGNING_OFF)
+ self.assertEqual(creds.get_smb_ipc_signing(), credentials.SMB_SIGNING_OFF)
+ creds.set_conf(lp)
+ self.assertEqual(creds.get_smb_ipc_signing(), credentials.SMB_SIGNING_OFF)
+
+ def test_smb_encryption(self):
+ creds = credentials.Credentials()
+ self.assertEqual(creds.get_smb_encryption(), credentials.SMB_ENCRYPTION_DEFAULT)
+ creds.set_smb_encryption(credentials.SMB_ENCRYPTION_REQUIRED)
+ self.assertEqual(creds.get_smb_encryption(), credentials.SMB_ENCRYPTION_REQUIRED)
+
+ def test_smb_encryption_set_conf(self):
+ lp = samba.tests.env_loadparm()
+
+ creds = credentials.Credentials()
+ creds.set_conf(lp)
+ self.assertEqual(creds.get_smb_encryption(), credentials.SMB_ENCRYPTION_DEFAULT)
+ creds.set_smb_encryption(credentials.SMB_ENCRYPTION_OFF)
+ self.assertEqual(creds.get_smb_encryption(), credentials.SMB_ENCRYPTION_OFF)
+ creds.set_conf(lp)
+ self.assertEqual(creds.get_smb_encryption(), credentials.SMB_ENCRYPTION_OFF)
diff --git a/python/samba/tests/dcerpc/__init__.py b/python/samba/tests/dcerpc/__init__.py
new file mode 100644
index 0000000..b8df5a2
--- /dev/null
+++ b/python/samba/tests/dcerpc/__init__.py
@@ -0,0 +1,19 @@
+# -*- coding: utf-8 -*-
+#
+# Unix SMB/CIFS implementation.
+# Copyright © Jelmer Vernooij <jelmer@samba.org> 2008
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""Tests for the DCE/RPC Python bindings."""
diff --git a/python/samba/tests/dcerpc/array.py b/python/samba/tests/dcerpc/array.py
new file mode 100644
index 0000000..073d2c2
--- /dev/null
+++ b/python/samba/tests/dcerpc/array.py
@@ -0,0 +1,206 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2016
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for array handling in PIDL generated bindings samba.dcerpc.*"""
+
+from samba.dcerpc import drsblobs
+import samba.tests
+from samba.ndr import ndr_unpack, ndr_pack
+import talloc
+import gc
+
+
+class ArrayTests(samba.tests.TestCase):
+
+ def setUp(self):
+ super().setUp()
+ talloc.enable_null_tracking()
+ self.startup_blocks = talloc.total_blocks()
+
+ def tearDown(self):
+ super().tearDown()
+ gc.collect()
+ if talloc.total_blocks() != self.startup_blocks:
+ talloc.report_full()
+ self.fail("it appears we are leaking memory")
+
+ def test_array_from_python(self):
+ rmd = drsblobs.replPropertyMetaDataBlob()
+
+ rmd.version = 1
+ rmd.ctr = drsblobs.replPropertyMetaDataCtr1()
+ rmd.ctr.count = 3
+
+ rmd1 = drsblobs.replPropertyMetaData1()
+ rmd1.attid = 1
+ rmd1.version = 2
+
+ rmd2 = drsblobs.replPropertyMetaData1()
+ rmd2.attid = 2
+ rmd2.version = 2
+
+ rmd3 = drsblobs.replPropertyMetaData1()
+ rmd3.attid = 3
+ rmd3.version = 2
+
+ rmd.ctr.array = [rmd1, rmd2, rmd3]
+ gc.collect()
+
+ self.assertIsNotNone(rmd)
+ self.assertEqual(rmd.version, 1)
+ self.assertIsNotNone(rmd.ctr)
+ self.assertEqual(rmd.ctr.count, 3)
+ self.assertEqual(len(rmd.ctr.array), rmd.ctr.count)
+ self.assertIsNotNone(rmd.ctr.array[0])
+ self.assertEqual(rmd.ctr.array[0].attid, 1)
+
+ def test_array_with_exception(self):
+ try:
+ rmd = drsblobs.replPropertyMetaDataBlob()
+
+ rmd.version = 1
+ rmd.ctr = drsblobs.replPropertyMetaDataCtr1()
+ rmd.ctr.count = 3
+
+ rmd1 = drsblobs.replPropertyMetaData1()
+ rmd1.attid = 1
+ rmd1.version = 2
+
+ rmd2 = drsblobs.replPropertyMetaData1()
+ rmd2.attid = 2
+ rmd2.version = 2
+
+ rmd3 = drsblobs.replPropertyMetaData1()
+ rmd3.attid = 3
+ rmd3.version = 2
+
+ rmd.ctr.array = [rmd1, rmd2, rmd3]
+
+ gc.collect()
+
+ self.assertIsNotNone(rmd)
+ self.assertEqual(rmd.version, 1)
+ self.assertIsNotNone(rmd.ctr)
+ self.assertEqual(rmd.ctr.count, 3)
+ self.assertEqual(len(rmd.ctr.array), rmd.ctr.count)
+ self.assertIsNotNone(rmd.ctr.array[0])
+ self.assertEqual(rmd.ctr.array[0].attid, 1)
+
+ raise Exception()
+ except:
+ pass
+
+ def test_array_from_python_function(self):
+ def get_rmd():
+ rmd = drsblobs.replPropertyMetaDataBlob()
+
+ rmd.version = 1
+ rmd.ctr = drsblobs.replPropertyMetaDataCtr1()
+ rmd.ctr.count = 3
+
+ rmd1 = drsblobs.replPropertyMetaData1()
+ rmd1.attid = 1
+ rmd1.version = 2
+
+ rmd2 = drsblobs.replPropertyMetaData1()
+ rmd2.attid = 2
+ rmd2.version = 2
+
+ rmd3 = drsblobs.replPropertyMetaData1()
+ rmd3.attid = 3
+ rmd3.version = 2
+
+ rmd.ctr.array = [rmd1, rmd2, rmd3]
+ return rmd
+
+ rmd = get_rmd()
+ gc.collect()
+ self.assertIsNotNone(rmd)
+ self.assertEqual(rmd.version, 1)
+ self.assertIsNotNone(rmd.ctr)
+ self.assertEqual(rmd.ctr.count, 3)
+ self.assertEqual(len(rmd.ctr.array), rmd.ctr.count)
+ self.assertIsNotNone(rmd.ctr.array[0])
+ self.assertEqual(rmd.ctr.array[0].attid, 1)
+
+ def test_array_from_ndr(self):
+ rmd = drsblobs.replPropertyMetaDataBlob()
+
+ rmd.version = 1
+ rmd.ctr = drsblobs.replPropertyMetaDataCtr1()
+ rmd.ctr.count = 3
+
+ rmd1 = drsblobs.replPropertyMetaData1()
+ rmd1.attid = 1
+ rmd1.version = 2
+
+ rmd2 = drsblobs.replPropertyMetaData1()
+ rmd2.attid = 2
+ rmd2.version = 2
+
+ rmd3 = drsblobs.replPropertyMetaData1()
+ rmd3.attid = 3
+ rmd3.version = 2
+
+ rmd.ctr.array = [rmd1, rmd2, rmd3]
+
+ packed = ndr_pack(rmd)
+ gc.collect()
+
+ rmd_unpacked = ndr_unpack(drsblobs.replPropertyMetaDataBlob, packed)
+ self.assertIsNotNone(rmd_unpacked)
+ self.assertEqual(rmd_unpacked.version, 1)
+ self.assertIsNotNone(rmd_unpacked.ctr)
+ self.assertEqual(rmd_unpacked.ctr.count, 3)
+ self.assertEqual(len(rmd_unpacked.ctr.array), rmd_unpacked.ctr.count)
+ self.assertIsNotNone(rmd_unpacked.ctr.array[0])
+ self.assertEqual(rmd_unpacked.ctr.array[0].attid, 1)
+
+ self.assertEqual(rmd.ctr.array[0].attid,
+ rmd_unpacked.ctr.array[0].attid)
+
+ def test_array_delete(self):
+ rmd = drsblobs.replPropertyMetaDataBlob()
+
+ rmd.version = 1
+ rmd.ctr = drsblobs.replPropertyMetaDataCtr1()
+ rmd.ctr.count = 3
+
+ rmd1 = drsblobs.replPropertyMetaData1()
+ rmd1.attid = 1
+ rmd1.version = 2
+
+ rmd2 = drsblobs.replPropertyMetaData1()
+ rmd2.attid = 2
+ rmd2.version = 2
+
+ rmd3 = drsblobs.replPropertyMetaData1()
+ rmd3.attid = 3
+ rmd3.version = 2
+
+ rmd.ctr.array = [rmd1, rmd2, rmd3]
+ try:
+ del rmd1.version
+ self.fail("succeeded in deleting rmd1.version")
+ except AttributeError as e:
+ pass
+
+ try:
+ del rmd.ctr.array
+ self.fail("succeeded in deleting rmd.ctr.array")
+ except AttributeError as e:
+ pass
diff --git a/python/samba/tests/dcerpc/bare.py b/python/samba/tests/dcerpc/bare.py
new file mode 100644
index 0000000..6229652
--- /dev/null
+++ b/python/samba/tests/dcerpc/bare.py
@@ -0,0 +1,61 @@
+# -*- coding: utf-8 -*-
+#
+# Unix SMB/CIFS implementation.
+# Copyright © Jelmer Vernooij <jelmer@samba.org> 2008
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for samba.tests.dcerpc.bare."""
+
+import os
+from samba.dcerpc import ClientConnection
+import samba.tests
+
+
+class BareTestCase(samba.tests.TestCase):
+
+ def test_bare(self):
+ # Connect to the echo pipe
+ x = ClientConnection("ncalrpc:localhost[DEFAULT]",
+ ("60a15ec5-4de8-11d7-a637-005056a20182", 1),
+ lp_ctx=samba.tests.env_loadparm())
+ self.assertEqual(b"\x01\x00\x00\x00", x.request(0, chr(0) * 4))
+
+ def test_two_contexts(self):
+ x = ClientConnection("ncalrpc:localhost[DEFAULT]",
+ ("12345778-1234-abcd-ef00-0123456789ac", 1),
+ lp_ctx=samba.tests.env_loadparm())
+ y = ClientConnection("ncalrpc:localhost",
+ ("60a15ec5-4de8-11d7-a637-005056a20182", 1),
+ basis_connection=x, lp_ctx=samba.tests.env_loadparm())
+ self.assertEqual(24, len(x.request(0, chr(0) * 8)))
+ self.assertEqual(b"\x01\x00\x00\x00", y.request(0, chr(0) * 4))
+
+ def test_bare_tcp(self):
+ # Connect to the echo pipe
+ x = ClientConnection("ncacn_ip_tcp:%s" % os.environ["SERVER"],
+ ("60a15ec5-4de8-11d7-a637-005056a20182", 1),
+ lp_ctx=samba.tests.env_loadparm())
+ self.assertEqual(b"\x01\x00\x00\x00", x.request(0, chr(0) * 4))
+
+ def test_two_contexts_tcp(self):
+ x = ClientConnection("ncacn_ip_tcp:%s" % os.environ["SERVER"],
+ ("12345778-1234-abcd-ef00-0123456789ac", 1),
+ lp_ctx=samba.tests.env_loadparm())
+ y = ClientConnection("ncacn_ip_tcp:%s" % os.environ["SERVER"],
+ ("60a15ec5-4de8-11d7-a637-005056a20182", 1),
+ basis_connection=x, lp_ctx=samba.tests.env_loadparm())
+ self.assertEqual(24, len(x.request(0, chr(0) * 8)))
+ self.assertEqual(b"\x01\x00\x00\x00", y.request(0, chr(0) * 4))
diff --git a/python/samba/tests/dcerpc/binding.py b/python/samba/tests/dcerpc/binding.py
new file mode 100644
index 0000000..1ad1f29
--- /dev/null
+++ b/python/samba/tests/dcerpc/binding.py
@@ -0,0 +1,101 @@
+#
+# Unix SMB/CIFS implementation.
+# Copyright (c) 2020 Andreas Schneider <asn@samba.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for samba.dcerpc., credentials and binding strings"""
+
+import samba.tests
+from samba.tests import RpcInterfaceTestCase, TestCase
+from samba.dcerpc import lsa
+import samba.dcerpc.security as security
+from samba.credentials import Credentials, SMB_ENCRYPTION_REQUIRED, SMB_ENCRYPTION_OFF
+from samba import NTSTATUSError
+
+class RpcBindingTests(RpcInterfaceTestCase):
+
+ def get_user_creds(self):
+ c = Credentials()
+ c.guess()
+ domain = samba.tests.env_get_var_value('DOMAIN')
+ username = samba.tests.env_get_var_value('USERNAME')
+ password = samba.tests.env_get_var_value('PASSWORD')
+ c.set_domain(domain)
+ c.set_username(username)
+ c.set_password(password)
+ return c
+
+ def test_smb3_dcerpc_no_encryption(self):
+ creds = self.get_user_creds()
+ creds.set_smb_encryption(SMB_ENCRYPTION_OFF)
+
+ lp = self.get_loadparm()
+ lp.set('client ipc max protocol', 'SMB3')
+ lp.set('client ipc min protocol', 'SMB3')
+
+ binding_string = ("ncacn_np:%s" % (samba.tests.env_get_var_value('SERVER')))
+ lsa_conn = lsa.lsarpc(binding_string, lp, creds)
+ self.assertFalse(lsa_conn.transport_encrypted())
+
+ objectAttr = lsa.ObjectAttribute()
+ objectAttr.sec_qos = lsa.QosInfo()
+
+ pol_handle = lsa_conn.OpenPolicy2('',
+ objectAttr,
+ security.SEC_FLAG_MAXIMUM_ALLOWED)
+ self.assertIsNotNone(pol_handle)
+
+ def test_smb3_dcerpc_encryption(self):
+ creds = self.get_user_creds()
+ creds.set_smb_encryption(SMB_ENCRYPTION_REQUIRED)
+
+ lp = self.get_loadparm()
+ lp.set('client ipc max protocol', 'SMB3')
+ lp.set('client ipc min protocol', 'SMB3')
+
+ binding_string = ("ncacn_np:%s" % (samba.tests.env_get_var_value('SERVER')))
+ lsa_conn = lsa.lsarpc(binding_string, lp, creds)
+ self.assertTrue(lsa_conn.transport_encrypted())
+
+ objectAttr = lsa.ObjectAttribute()
+ objectAttr.sec_qos = lsa.QosInfo()
+
+ pol_handle = lsa_conn.OpenPolicy2('',
+ objectAttr,
+ security.SEC_FLAG_MAXIMUM_ALLOWED)
+ self.assertIsNotNone(pol_handle)
+
+ def test_smb2_dcerpc_encryption(self):
+ creds = self.get_user_creds()
+ creds.set_smb_encryption(SMB_ENCRYPTION_REQUIRED)
+
+ lp = self.get_loadparm()
+ lp.set('client ipc max protocol', 'SMB2')
+ lp.set('client ipc min protocol', 'SMB2')
+
+ binding_string = ("ncacn_np:%s" % (samba.tests.env_get_var_value('SERVER')))
+ self.assertRaises(NTSTATUSError, lsa.lsarpc, binding_string, lp, creds)
+
+ def test_smb1_dcerpc_encryption(self):
+ creds = self.get_user_creds()
+ creds.set_smb_encryption(SMB_ENCRYPTION_REQUIRED)
+
+ lp = self.get_loadparm()
+ lp.set('client ipc max protocol', 'NT1')
+ lp.set('client ipc min protocol', 'NT1')
+
+ binding_string = ("ncacn_np:%s" % (samba.tests.env_get_var_value('SERVER')))
+ self.assertRaises(NTSTATUSError, lsa.lsarpc, binding_string, lp, creds)
diff --git a/python/samba/tests/dcerpc/createtrustrelax.py b/python/samba/tests/dcerpc/createtrustrelax.py
new file mode 100644
index 0000000..5dcb937
--- /dev/null
+++ b/python/samba/tests/dcerpc/createtrustrelax.py
@@ -0,0 +1,129 @@
+# Unix SMB/CIFS implementation.
+#
+# Copyright (C) Andrew Bartlett 2011
+# Copyright (C) Isaac Boukris 2020
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for the CreateTrustedDomainRelax wrapper"""
+
+import os
+import samba
+from samba.tests import TestCase
+from samba.dcerpc import lsa, security, drsblobs
+from samba.credentials import Credentials, SMB_ENCRYPTION_REQUIRED, SMB_ENCRYPTION_OFF
+from samba.trust_utils import CreateTrustedDomainRelax
+
+class CreateTrustedDomainRelaxTest(TestCase):
+
+ def get_user_creds(self):
+ c = Credentials()
+ c.guess()
+ domain = samba.tests.env_get_var_value('DOMAIN')
+ username = samba.tests.env_get_var_value('USERNAME')
+ password = samba.tests.env_get_var_value('PASSWORD')
+ c.set_domain(domain)
+ c.set_username(username)
+ c.set_password(password)
+ return c
+
+ def _create_trust_relax(self, smbencrypt=True):
+ creds = self.get_user_creds()
+
+ if smbencrypt:
+ creds.set_smb_encryption(SMB_ENCRYPTION_REQUIRED)
+ else:
+ creds.set_smb_encryption(SMB_ENCRYPTION_OFF)
+
+ lp = self.get_loadparm()
+
+ binding_string = ("ncacn_np:%s" % (samba.tests.env_get_var_value('SERVER')))
+ lsa_conn = lsa.lsarpc(binding_string, lp, creds)
+
+ if smbencrypt:
+ self.assertTrue(lsa_conn.transport_encrypted())
+ else:
+ self.assertFalse(lsa_conn.transport_encrypted())
+
+ objectAttr = lsa.ObjectAttribute()
+ objectAttr.sec_qos = lsa.QosInfo()
+
+ pol_handle = lsa_conn.OpenPolicy2('',
+ objectAttr,
+ security.SEC_FLAG_MAXIMUM_ALLOWED)
+ self.assertIsNotNone(pol_handle)
+
+ name = lsa.String()
+ name.string = "tests.samba.example.com"
+ try:
+ info = lsa_conn.QueryTrustedDomainInfoByName(pol_handle, name,
+ lsa.LSA_TRUSTED_DOMAIN_INFO_FULL_INFO)
+
+ lsa_conn.DeleteTrustedDomain(pol_handle, info.info_ex.sid)
+ except RuntimeError:
+ pass
+
+ info = lsa.TrustDomainInfoInfoEx()
+ info.domain_name.string = name.string
+ info.netbios_name.string = "createtrustrelax"
+ info.sid = security.dom_sid("S-1-5-21-538490383-3740119673-95748416")
+ info.trust_direction = lsa.LSA_TRUST_DIRECTION_INBOUND | lsa.LSA_TRUST_DIRECTION_OUTBOUND
+ info.trust_type = lsa.LSA_TRUST_TYPE_UPLEVEL
+ info.trust_attributes = lsa.LSA_TRUST_ATTRIBUTE_FOREST_TRANSITIVE
+
+ password_blob = samba.string_to_byte_array("password".encode('utf-16-le'))
+
+ clear_value = drsblobs.AuthInfoClear()
+ clear_value.size = len(password_blob)
+ clear_value.password = password_blob
+
+ clear_authentication_information = drsblobs.AuthenticationInformation()
+ clear_authentication_information.LastUpdateTime = 0
+ clear_authentication_information.AuthType = lsa.TRUST_AUTH_TYPE_CLEAR
+ clear_authentication_information.AuthInfo = clear_value
+
+ authentication_information_array = drsblobs.AuthenticationInformationArray()
+ authentication_information_array.count = 1
+ authentication_information_array.array = [clear_authentication_information]
+
+ outgoing = drsblobs.trustAuthInOutBlob()
+ outgoing.count = 1
+ outgoing.current = authentication_information_array
+
+ trustdom_handle = None
+ try:
+ trustdom_handle = CreateTrustedDomainRelax(lsa_conn,
+ pol_handle,
+ info,
+ security.SEC_STD_DELETE,
+ outgoing,
+ outgoing)
+ except samba.NTSTATUSError as nt:
+ raise AssertionError(nt)
+ except OSError as e:
+ if smbencrypt:
+ raise AssertionError(e)
+
+ if smbencrypt:
+ self.assertIsNotNone(trustdom_handle)
+ lsa_conn.DeleteTrustedDomain(pol_handle, info.sid)
+ else:
+ self.assertIsNone(trustdom_handle)
+
+ def test_create_trust_relax_encrypt(self):
+ self._create_trust_relax(True)
+
+ def test_create_trust_relax_no_enc(self):
+ self._create_trust_relax(False)
diff --git a/python/samba/tests/dcerpc/dnsserver.py b/python/samba/tests/dcerpc/dnsserver.py
new file mode 100644
index 0000000..13c9af8
--- /dev/null
+++ b/python/samba/tests/dcerpc/dnsserver.py
@@ -0,0 +1,1314 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Amitay Isaacs <amitay@gmail.com> 2011
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for samba.dcerpc.dnsserver"""
+
+import os
+import ldb
+
+from samba.auth import system_session
+from samba.samdb import SamDB
+from samba.ndr import ndr_unpack
+from samba.dcerpc import dnsp, dnsserver, security
+from samba.tests import RpcInterfaceTestCase, env_get_var_value
+from samba.dnsserver import record_from_string, flag_from_string, ARecord
+from samba import sd_utils, descriptor
+from samba import WERRORError, werror
+
+
+class DnsserverTests(RpcInterfaceTestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ super().setUpClass()
+
+ good_dns = ["SAMDOM.EXAMPLE.COM",
+ "1.EXAMPLE.COM",
+ "%sEXAMPLE.COM" % ("1." * 100),
+ "EXAMPLE",
+ "\n.COM",
+ "!@#$%^&*()_",
+ "HIGH\xFFBYTE",
+ "@.EXAMPLE.COM",
+ "."]
+ bad_dns = ["...",
+ ".EXAMPLE.COM",
+ ".EXAMPLE.",
+ "",
+ "SAMDOM..EXAMPLE.COM"]
+
+ good_mx = ["SAMDOM.EXAMPLE.COM 65535"]
+ bad_mx = []
+
+ good_srv = ["SAMDOM.EXAMPLE.COM 65535 65535 65535"]
+ bad_srv = []
+
+ for bad_dn in bad_dns:
+ bad_mx.append("%s 1" % bad_dn)
+ bad_srv.append("%s 0 0 0" % bad_dn)
+ for good_dn in good_dns:
+ good_mx.append("%s 1" % good_dn)
+ good_srv.append("%s 0 0 0" % good_dn)
+
+ cls.good_records = {
+ "A": ["192.168.0.1",
+ "255.255.255.255"],
+ "AAAA": ["1234:5678:9ABC:DEF0:0000:0000:0000:0000",
+ "0000:0000:0000:0000:0000:0000:0000:0000",
+ "1234:5678:9ABC:DEF0:1234:5678:9ABC:DEF0",
+ "1234:1234:1234::",
+ "1234:1234:1234:1234:1234::",
+ "1234:5678:9ABC:DEF0::",
+ "0000:0000::0000",
+ "1234::5678:9ABC:0000:0000:0000:0000",
+ "::1",
+ "::",
+ "1:1:1:1:1:1:1:1"],
+ "PTR": good_dns,
+ "CNAME": good_dns,
+ "NS": good_dns,
+ "MX": good_mx,
+ "SRV": good_srv,
+ "TXT": ["text", "", "@#!", "\n"]
+ }
+
+ cls.bad_records = {
+ "A": ["192.168.0.500",
+ "255.255.255.255/32"],
+ "AAAA": ["GGGG:1234:5678:9ABC:0000:0000:0000:0000",
+ "0000:0000:0000:0000:0000:0000:0000:0000/1",
+ "AAAA:AAAA:AAAA:AAAA:G000:0000:0000:1234",
+ "1234:5678:9ABC:DEF0:1234:5678:9ABC:DEF0:1234",
+ "1234:5678:9ABC:DEF0:1234:5678:9ABC",
+ "1111::1111::1111"],
+ "PTR": bad_dns,
+ "CNAME": bad_dns,
+ "NS": bad_dns,
+ "MX": bad_mx,
+ "SRV": bad_srv
+ }
+
+ # Because we use uint16_t for these numbers, we can't
+ # actually create these records.
+ invalid_mx = ["SAMDOM.EXAMPLE.COM -1",
+ "SAMDOM.EXAMPLE.COM 65536",
+ "%s 1" % ("A" * 256)]
+ invalid_srv = ["SAMDOM.EXAMPLE.COM 0 65536 0",
+ "SAMDOM.EXAMPLE.COM 0 0 65536",
+ "SAMDOM.EXAMPLE.COM 65536 0 0"]
+ cls.invalid_records = {
+ "MX": invalid_mx,
+ "SRV": invalid_srv
+ }
+
+ def setUp(self):
+ super().setUp()
+ self.server = os.environ["DC_SERVER"]
+ self.zone = env_get_var_value("REALM").lower()
+ self.conn = dnsserver.dnsserver("ncacn_ip_tcp:%s[sign]" % (self.server),
+ self.get_loadparm(),
+ self.get_credentials())
+
+ self.samdb = SamDB(url="ldap://%s" % os.environ["DC_SERVER_IP"],
+ lp=self.get_loadparm(),
+ session_info=system_session(),
+ credentials=self.get_credentials())
+
+ self.custom_zone = "zone"
+ zone_create_info = dnsserver.DNS_RPC_ZONE_CREATE_INFO_LONGHORN()
+ zone_create_info.pszZoneName = self.custom_zone
+ zone_create_info.dwZoneType = dnsp.DNS_ZONE_TYPE_PRIMARY
+ zone_create_info.fAging = 0
+ zone_create_info.fDsIntegrated = 1
+ zone_create_info.fLoadExisting = 1
+ zone_create_info.dwDpFlags = dnsserver.DNS_DP_DOMAIN_DEFAULT
+
+ self.conn.DnssrvOperation2(dnsserver.DNS_CLIENT_VERSION_LONGHORN,
+ 0,
+ self.server,
+ None,
+ 0,
+ 'ZoneCreate',
+ dnsserver.DNSSRV_TYPEID_ZONE_CREATE,
+ zone_create_info)
+
+ def tearDown(self):
+ self.conn.DnssrvOperation2(dnsserver.DNS_CLIENT_VERSION_LONGHORN,
+ 0,
+ self.server,
+ self.custom_zone,
+ 0,
+ 'DeleteZoneFromDs',
+ dnsserver.DNSSRV_TYPEID_NULL,
+ None)
+ super().tearDown()
+
+ def test_enum_is_sorted(self):
+ """
+ Confirm the zone is sorted
+ """
+
+ record_str = "192.168.50.50"
+ record_type_str = "A"
+ self.add_record(self.custom_zone, "atestrecord-1", record_type_str, record_str)
+ self.add_record(self.custom_zone, "atestrecord-2", record_type_str, record_str)
+ self.add_record(self.custom_zone, "atestrecord-3", record_type_str, record_str)
+ self.add_record(self.custom_zone, "atestrecord-4", record_type_str, record_str)
+ self.add_record(self.custom_zone, "atestrecord-0", record_type_str, record_str)
+
+ # This becomes an extra A on the zone itself by server-side magic
+ self.add_record(self.custom_zone, self.custom_zone, record_type_str, record_str)
+
+ _, result = self.conn.DnssrvEnumRecords2(dnsserver.DNS_CLIENT_VERSION_LONGHORN,
+ 0,
+ self.server,
+ self.custom_zone,
+ "@",
+ None,
+ flag_from_string(record_type_str),
+ dnsserver.DNS_RPC_VIEW_AUTHORITY_DATA,
+ None,
+ None)
+
+ self.assertEqual(len(result.rec), 6)
+ self.assertEqual(result.rec[0].dnsNodeName.str, "")
+ self.assertEqual(result.rec[1].dnsNodeName.str, "atestrecord-0")
+ self.assertEqual(result.rec[2].dnsNodeName.str, "atestrecord-1")
+ self.assertEqual(result.rec[3].dnsNodeName.str, "atestrecord-2")
+ self.assertEqual(result.rec[4].dnsNodeName.str, "atestrecord-3")
+ self.assertEqual(result.rec[5].dnsNodeName.str, "atestrecord-4")
+
+ def test_enum_is_sorted_with_zone_dup(self):
+ """
+ Confirm the zone is sorted
+ """
+
+ record_str = "192.168.50.50"
+ record_type_str = "A"
+ self.add_record(self.custom_zone, "atestrecord-1", record_type_str, record_str)
+ self.add_record(self.custom_zone, "atestrecord-2", record_type_str, record_str)
+ self.add_record(self.custom_zone, "atestrecord-3", record_type_str, record_str)
+ self.add_record(self.custom_zone, "atestrecord-4", record_type_str, record_str)
+ self.add_record(self.custom_zone, "atestrecord-0", record_type_str, record_str)
+
+ # This triggers a bug in old Samba
+ self.add_record(self.custom_zone, self.custom_zone + "1", record_type_str, record_str)
+
+ dn, record = self.get_record_from_db(self.custom_zone, self.custom_zone + "1")
+
+ new_dn = ldb.Dn(self.samdb, str(dn))
+ new_dn.set_component(0, "dc", self.custom_zone)
+ self.samdb.rename(dn, new_dn)
+
+ _, result = self.conn.DnssrvEnumRecords2(dnsserver.DNS_CLIENT_VERSION_LONGHORN,
+ 0,
+ self.server,
+ self.custom_zone,
+ "@",
+ None,
+ flag_from_string(record_type_str),
+ dnsserver.DNS_RPC_VIEW_AUTHORITY_DATA,
+ None,
+ None)
+
+ self.assertEqual(len(result.rec), 7)
+ self.assertEqual(result.rec[0].dnsNodeName.str, "")
+ self.assertEqual(result.rec[1].dnsNodeName.str, "atestrecord-0")
+ self.assertEqual(result.rec[2].dnsNodeName.str, "atestrecord-1")
+ self.assertEqual(result.rec[3].dnsNodeName.str, "atestrecord-2")
+ self.assertEqual(result.rec[4].dnsNodeName.str, "atestrecord-3")
+ self.assertEqual(result.rec[5].dnsNodeName.str, "atestrecord-4")
+
+ # Windows doesn't reload the zone fast enough, but doesn't
+ # have the bug anyway, it will sort last on both names (where
+ # it should)
+ if result.rec[6].dnsNodeName.str != (self.custom_zone + "1"):
+ self.assertEqual(result.rec[6].dnsNodeName.str, self.custom_zone)
+
+ def test_enum_is_sorted_children_prefix_first(self):
+ """
+ Confirm the zone returns the selected prefix first but no more
+ as Samba is flappy for the full sort
+ """
+
+ record_str = "192.168.50.50"
+ record_type_str = "A"
+ self.add_record(self.custom_zone, "atestrecord-1.a.b", record_type_str, record_str)
+ self.add_record(self.custom_zone, "atestrecord-2.a.b", record_type_str, record_str)
+ self.add_record(self.custom_zone, "atestrecord-3.a.b", record_type_str, record_str)
+ self.add_record(self.custom_zone, "atestrecord-4.a.b", record_type_str, record_str)
+ self.add_record(self.custom_zone, "atestrecord-0.a.b", record_type_str, record_str)
+
+ # Not expected to be returned
+ self.add_record(self.custom_zone, "atestrecord-0.b.b", record_type_str, record_str)
+
+ _, result = self.conn.DnssrvEnumRecords2(dnsserver.DNS_CLIENT_VERSION_LONGHORN,
+ 0,
+ self.server,
+ self.custom_zone,
+ "a.b",
+ None,
+ flag_from_string(record_type_str),
+ dnsserver.DNS_RPC_VIEW_AUTHORITY_DATA,
+ None,
+ None)
+
+ self.assertEqual(len(result.rec), 6)
+ self.assertEqual(result.rec[0].dnsNodeName.str, "")
+
+ def test_enum_is_sorted_children(self):
+ """
+ Confirm the zone is sorted
+ """
+
+ record_str = "192.168.50.50"
+ record_type_str = "A"
+ self.add_record(self.custom_zone, "atestrecord-1.a.b", record_type_str, record_str)
+ self.add_record(self.custom_zone, "atestrecord-2.a.b", record_type_str, record_str)
+ self.add_record(self.custom_zone, "atestrecord-3.a.b", record_type_str, record_str)
+ self.add_record(self.custom_zone, "atestrecord-4.a.b", record_type_str, record_str)
+ self.add_record(self.custom_zone, "atestrecord-0.a.b", record_type_str, record_str)
+
+ # Not expected to be returned
+ self.add_record(self.custom_zone, "atestrecord-0.b.b", record_type_str, record_str)
+
+ _, result = self.conn.DnssrvEnumRecords2(dnsserver.DNS_CLIENT_VERSION_LONGHORN,
+ 0,
+ self.server,
+ self.custom_zone,
+ "a.b",
+ None,
+ flag_from_string(record_type_str),
+ dnsserver.DNS_RPC_VIEW_AUTHORITY_DATA,
+ None,
+ None)
+
+ self.assertEqual(len(result.rec), 6)
+ self.assertEqual(result.rec[0].dnsNodeName.str, "")
+ self.assertEqual(result.rec[1].dnsNodeName.str, "atestrecord-0")
+ self.assertEqual(result.rec[2].dnsNodeName.str, "atestrecord-1")
+ self.assertEqual(result.rec[3].dnsNodeName.str, "atestrecord-2")
+ self.assertEqual(result.rec[4].dnsNodeName.str, "atestrecord-3")
+ self.assertEqual(result.rec[5].dnsNodeName.str, "atestrecord-4")
+
+ # This test fails against Samba (but passes against Windows),
+ # because Samba does not return the record when we enum records.
+ # Records can be given DNS_RANK_NONE when the zone they are in
+ # does not have DNS_ZONE_TYPE_PRIMARY. Since such records can be
+ # deleted, however, we do not consider this urgent to fix and
+ # so this test is a knownfail.
+ def test_rank_none(self):
+ """
+ See what happens when we set a record's rank to
+ DNS_RANK_NONE.
+ """
+
+ record_str = "192.168.50.50"
+ record_type_str = "A"
+ self.add_record(self.custom_zone, "testrecord", record_type_str, record_str)
+
+ dn, record = self.get_record_from_db(self.custom_zone, "testrecord")
+ record.rank = 0 # DNS_RANK_NONE
+ res = self.samdb.dns_replace_by_dn(dn, [record])
+ if res is not None:
+ self.fail("Unable to update dns record to have DNS_RANK_NONE.")
+
+ self.assert_num_records(self.custom_zone, "testrecord", record_type_str)
+ self.add_record(self.custom_zone, "testrecord", record_type_str, record_str, assertion=False)
+ self.delete_record(self.custom_zone, "testrecord", record_type_str, record_str)
+ self.assert_num_records(self.custom_zone, "testrecord", record_type_str, 0)
+
+ def test_dns_tombstoned_zero_timestamp(self):
+ """What happens with a zero EntombedTime tombstone?"""
+ # A zero-timestamp tombstone record has a special meaning for
+ # dns_common_replace(), which is the function exposed by
+ # samdb.dns_replace_by_dn(), and which is *NOT* a general
+ # purpose record replacement function but a specialised part
+ # of the dns update mechanism (for both DLZ and internal).
+ #
+ # In the earlier stages of handling updates, a record that
+ # needs to be deleted is set to be a tombstone with a zero
+ # timestamp. dns_common_replace() notices this specific
+ # marker, and if there are no other records, marks the node as
+ # tombstoned, in the process adding a "real" tombstone.
+ #
+ # If the tombstone has a non-zero timestamp, as you'll see in
+ # the next test, dns_common_replace will decide that the node
+ # is already tombstoned, and that no action needs to be taken.
+ #
+ # This test has worked historically, entirely by accident, as
+ # changing the wType appears to
+
+ record_str = "192.168.50.50"
+ self.add_record(self.custom_zone, "testrecord", 'A', record_str)
+
+ dn, record = self.get_record_from_db(self.custom_zone, "testrecord")
+ record.wType = dnsp.DNS_TYPE_TOMBSTONE
+ record.data = 0
+ self.samdb.dns_replace_by_dn(dn, [record])
+
+ # there should be no A record, and one TOMBSTONE record.
+ self.assert_num_records(self.custom_zone, "testrecord", 'A', 0)
+ # we can't make assertions about the tombstone count based on
+ # RPC calls, as there are no tombstones in RPCs (there is
+ # "DNS_TYPE_ZERO" instead). Nor do tombstones show up if we
+ # use DNS_TYPE_ALL.
+ self.assert_num_records(self.custom_zone, "testrecord", 'ALL', 0)
+
+ # But we can use LDAP:
+ records = self.ldap_get_records(self.custom_zone, "testrecord")
+ self.assertEqual(len(records), 1)
+ r = records[0]
+ self.assertEqual(r.wType, dnsp.DNS_TYPE_TOMBSTONE)
+ self.assertGreater(r.data, 1e17) # ~ October 1916
+
+ # this should fail, because no A records.
+ self.delete_record(self.custom_zone, "testrecord", 'A', record_str,
+ assertion=False)
+
+ def test_dns_tombstoned_nonzero_timestamp(self):
+ """See what happens when we set a record to be tombstoned with an
+ EntombedTime timestamp.
+ """
+ # Because this tombstone has a non-zero EntombedTime,
+ # dns_common_replace() will decide the node was already
+ # tombstoned and there is nothing to be done, leaving the A
+ # record where it was.
+
+ record_str = "192.168.50.50"
+ self.add_record(self.custom_zone, "testrecord", 'A', record_str)
+
+ dn, record = self.get_record_from_db(self.custom_zone, "testrecord")
+ record.wType = dnsp.DNS_TYPE_TOMBSTONE
+ record.data = 0x123456789A
+ self.samdb.dns_replace_by_dn(dn, [record])
+
+ # there should be the A record and no TOMBSTONE
+ self.assert_num_records(self.custom_zone, "testrecord", 'A', 1)
+ self.assert_num_records(self.custom_zone, "testrecord", 'TOMBSTONE', 0)
+ # this should succeed
+ self.delete_record(self.custom_zone, "testrecord", 'A', record_str,
+ assertion=True)
+ self.assert_num_records(self.custom_zone, "testrecord", 'TOMBSTONE', 0)
+ self.assert_num_records(self.custom_zone, "testrecord", 'A', 0)
+
+ def get_record_from_db(self, zone_name, record_name):
+ """
+ Returns (dn of record, record)
+ """
+
+ zones = self.samdb.search(base="DC=DomainDnsZones,%s" % self.samdb.get_default_basedn(), scope=ldb.SCOPE_SUBTREE,
+ expression="(objectClass=dnsZone)",
+ attrs=["cn"])
+
+ zone_dn = None
+ for zone in zones:
+ if "DC=%s," % zone_name in str(zone.dn):
+ zone_dn = zone.dn
+ break
+
+ if zone_dn is None:
+ raise AssertionError("Couldn't find zone '%s'." % zone_name)
+
+ records = self.samdb.search(base=zone_dn, scope=ldb.SCOPE_SUBTREE,
+ expression="(objectClass=dnsNode)",
+ attrs=["dnsRecord"])
+
+ for old_packed_record in records:
+ if record_name in str(old_packed_record.dn):
+ rec = ndr_unpack(dnsp.DnssrvRpcRecord, old_packed_record["dnsRecord"][0])
+ return (old_packed_record.dn, rec)
+
+ def ldap_get_records(self, zone, name):
+ zone_dn = (f"DC={zone},CN=MicrosoftDNS,DC=DomainDNSZones,"
+ f"{self.samdb.get_default_basedn()}")
+
+ expr = f"(&(objectClass=dnsNode)(name={name}))"
+ nodes = self.samdb.search(base=zone_dn,
+ scope=ldb.SCOPE_SUBTREE,
+ expression=expr,
+ attrs=["dnsRecord"])
+
+ records = nodes[0].get('dnsRecord')
+ return [ndr_unpack(dnsp.DnssrvRpcRecord, r) for r in records]
+
+ def test_duplicate_matching(self):
+ """
+ Make sure that records which should be distinct from each other or duplicate
+ to each other behave as expected.
+ """
+
+ distinct_dns = [("SAMDOM.EXAMPLE.COM",
+ "SAMDOM.EXAMPLE.CO",
+ "EXAMPLE.COM", "SAMDOM.EXAMPLE")]
+ duplicate_dns = [("SAMDOM.EXAMPLE.COM", "samdom.example.com", "SAMDOM.example.COM"),
+ ("EXAMPLE.", "EXAMPLE")]
+
+ # Every tuple has entries which should be considered duplicate to one another.
+ duplicates = {
+ "AAAA": [("AAAA::", "aaaa::"),
+ ("AAAA::", "AAAA:0000::"),
+ ("AAAA::", "AAAA:0000:0000:0000:0000:0000:0000:0000"),
+ ("AAAA::", "AAAA:0:0:0:0:0:0:0"),
+ ("0123::", "123::"),
+ ("::", "::0", "0000:0000:0000:0000:0000:0000:0000:0000")],
+ }
+
+ # Every tuple has entries which should be considered distinct from one another.
+ distinct = {
+ "A": [("192.168.1.0", "192.168.1.1", "192.168.2.0", "192.169.1.0", "193.168.1.0")],
+ "AAAA": [("AAAA::1234:5678:9ABC", "::AAAA:1234:5678:9ABC"),
+ ("1000::", "::1000"),
+ ("::1", "::11", "::1111"),
+ ("1234::", "0234::")],
+ "SRV": [("SAMDOM.EXAMPLE.COM 1 1 1", "SAMDOM.EXAMPLE.COM 1 1 0", "SAMDOM.EXAMPLE.COM 1 0 1",
+ "SAMDOM.EXAMPLE.COM 0 1 1", "SAMDOM.EXAMPLE.COM 2 1 0", "SAMDOM.EXAMPLE.COM 2 2 2")],
+ "MX": [("SAMDOM.EXAMPLE.COM 1", "SAMDOM.EXAMPLE.COM 0")],
+ "TXT": [("A RECORD", "B RECORD", "a record")]
+ }
+
+ for record_type_str in ("PTR", "CNAME", "NS"):
+ distinct[record_type_str] = distinct_dns
+ duplicates[record_type_str] = duplicate_dns
+
+ for record_type_str in duplicates:
+ for duplicate_tuple in duplicates[record_type_str]:
+ # Attempt to add duplicates and make sure that all after the first fails
+ self.add_record(self.custom_zone, "testrecord", record_type_str, duplicate_tuple[0])
+ for record in duplicate_tuple:
+ self.add_record(self.custom_zone, "testrecord", record_type_str, record, assertion=False)
+ self.assert_num_records(self.custom_zone, "testrecord", record_type_str)
+ self.delete_record(self.custom_zone, "testrecord", record_type_str, duplicate_tuple[0])
+
+ # Repeatedly: add the first duplicate, and attempt to remove all of the others, making sure this succeeds
+ for record in duplicate_tuple:
+ self.add_record(self.custom_zone, "testrecord", record_type_str, duplicate_tuple[0])
+ self.delete_record(self.custom_zone, "testrecord", record_type_str, record)
+
+ for record_type_str in distinct:
+ for distinct_tuple in distinct[record_type_str]:
+ # Attempt to add distinct and make sure that they all succeed within a tuple
+ i = 0
+ for record in distinct_tuple:
+ i = i + 1
+ try:
+ self.add_record(self.custom_zone, "testrecord", record_type_str, record)
+ # All records should have been added.
+ self.assert_num_records(self.custom_zone, "testrecord", record_type_str, expected_num=i)
+ except AssertionError as e:
+ raise AssertionError("Failed to add %s, which should be distinct from all others in the set. "
+ "Original error: %s\nDistinct set: %s." % (record, e, distinct_tuple))
+ for record in distinct_tuple:
+ self.delete_record(self.custom_zone, "testrecord", record_type_str, record)
+ # CNAMEs should not have been added, since they conflict.
+ if record_type_str == 'CNAME':
+ continue
+
+ # Add the first distinct and attempt to remove all of the others, making sure this fails
+ # Windows fails this test. This is probably due to weird tombstoning behavior.
+ self.add_record(self.custom_zone, "testrecord", record_type_str, distinct_tuple[0])
+ for record in distinct_tuple:
+ if record == distinct_tuple[0]:
+ continue
+ try:
+ self.delete_record(self.custom_zone, "testrecord", record_type_str, record, assertion=False)
+ except AssertionError as e:
+ raise AssertionError("Managed to remove %s by attempting to remove %s. Original error: %s"
+ % (distinct_tuple[0], record, e))
+ self.delete_record(self.custom_zone, "testrecord", record_type_str, distinct_tuple[0])
+
+ def test_accept_valid_commands(self):
+ """
+ Make sure that we can add, update and delete a variety
+ of valid records.
+ """
+ for record_type_str in self.good_records:
+ for record_str in self.good_records[record_type_str]:
+ self.add_record(self.custom_zone, "testrecord", record_type_str, record_str)
+ self.assert_num_records(self.custom_zone, "testrecord", record_type_str)
+ self.delete_record(self.custom_zone, "testrecord", record_type_str, record_str)
+
+ def check_params(self, wDataLength, rank, flags, dwTtlSeconds, dwReserved, data,
+ wType, dwTimeStamp=0, zone="zone", rec_name="testrecord"):
+ res = self.get_record_from_db(zone, rec_name)
+ self.assertIsNotNone(res, "Expected record %s but was not found over LDAP." % data)
+ (rec_dn, rec) = res
+ self.assertEqual(wDataLength, rec.wDataLength, "Unexpected data length for record %s. Got %s, expected %s." % (data, rec.wDataLength, wDataLength))
+ self.assertEqual(rank, rec.rank, "Unexpected rank for record %s. Got %s, expected %s." % (data, rec.rank, rank))
+ self.assertEqual(flags, rec.flags, "Unexpected flags for record %s. Got %s, expected %s." % (data, rec.flags, flags))
+ self.assertEqual(dwTtlSeconds, rec.dwTtlSeconds, "Unexpected time to live for record %s. Got %s, expected %s." % (data, rec.dwTtlSeconds, dwTtlSeconds))
+ self.assertEqual(dwReserved, rec.dwReserved, "Unexpected dwReserved for record %s. Got %s, expected %s." % (data, rec.dwReserved, dwReserved))
+ self.assertEqual(data.lower(), rec.data.lower(), "Unexpected data for record %s. Got %s, expected %s." % (data, rec.data.lower(), data.lower()))
+ self.assertEqual(wType, rec.wType, "Unexpected wType for record %s. Got %s, expected %s." % (data, rec.wType, wType))
+ self.assertEqual(dwTimeStamp, rec.dwTimeStamp, "Unexpected timestamp for record %s. Got %s, expected %s." % (data, rec.dwTimeStamp, dwTimeStamp))
+
+ def test_record_params(self):
+ """
+ Make sure that, when we add records to the database,
+ they're added with reasonable parameters.
+ """
+ self.add_record(self.custom_zone, "testrecord", "A", "192.168.50.50")
+ self.check_params(4, 240, 0, 900, 0, "192.168.50.50", 1)
+ self.delete_record(self.custom_zone, "testrecord", "A", "192.168.50.50")
+ self.add_record(self.custom_zone, "testrecord", "AAAA", "AAAA:AAAA::")
+ self.check_params(16, 240, 0, 900, 0, "AAAA:AAAA:0000:0000:0000:0000:0000:0000", 28)
+ self.delete_record(self.custom_zone, "testrecord", "AAAA", "AAAA:AAAA::")
+ self.add_record(self.custom_zone, "testrecord", "CNAME", "cnamedest")
+ self.check_params(13, 240, 0, 900, 0, "cnamedest", 5)
+ self.delete_record(self.custom_zone, "testrecord", "CNAME", "cnamedest")
+
+ def test_reject_invalid_commands(self):
+ """
+ Make sure that we can't add a variety of invalid records,
+ and that we can't update valid records to invalid ones.
+ """
+ num_failures = 0
+ for record_type_str in self.bad_records:
+ for record_str in self.bad_records[record_type_str]:
+ # Attempt to add the bad record, which should fail. Then, attempt to query for and delete
+ # it. Since it shouldn't exist, these should fail too.
+ try:
+ self.add_record(self.custom_zone, "testrecord", record_type_str, record_str, assertion=False)
+ self.assert_num_records(self.custom_zone, "testrecord", record_type_str, expected_num=0)
+ self.delete_record(self.custom_zone, "testrecord", record_type_str, record_str, assertion=False)
+ except AssertionError as e:
+ print(e)
+ num_failures = num_failures + 1
+
+ # Also try to update valid records to invalid ones, making sure this fails
+ for record_type_str in self.bad_records:
+ for record_str in self.bad_records[record_type_str]:
+ good_record_str = self.good_records[record_type_str][0]
+ self.add_record(self.custom_zone, "testrecord", record_type_str, good_record_str)
+ try:
+ self.add_record(self.custom_zone, "testrecord", record_type_str, record_str, assertion=False)
+ except AssertionError as e:
+ print(e)
+ num_failures = num_failures + 1
+ self.delete_record(self.custom_zone, "testrecord", record_type_str, good_record_str)
+
+ self.assertTrue(num_failures == 0, "Failed to reject invalid commands. Total failures: %d." % num_failures)
+
+ def test_add_duplicate_different_type(self):
+ """
+ Attempt to add some values which have the same name as
+ existing ones, just a different type.
+ """
+ num_failures = 0
+ for record_type_str_1 in self.good_records:
+ record1 = self.good_records[record_type_str_1][0]
+ self.add_record(self.custom_zone, "testrecord", record_type_str_1, record1)
+ for record_type_str_2 in self.good_records:
+ if record_type_str_1 == record_type_str_2:
+ continue
+
+ record2 = self.good_records[record_type_str_2][0]
+
+ has_a = record_type_str_1 == 'A' or record_type_str_2 == 'A'
+ has_aaaa = record_type_str_1 == 'AAAA' or record_type_str_2 == 'AAAA'
+ has_cname = record_type_str_1 == 'CNAME' or record_type_str_2 == 'CNAME'
+ has_ptr = record_type_str_1 == 'PTR' or record_type_str_2 == 'PTR'
+ has_mx = record_type_str_1 == 'MX' or record_type_str_2 == 'MX'
+ has_srv = record_type_str_1 == 'SRV' or record_type_str_2 == 'SRV'
+ has_txt = record_type_str_1 == 'TXT' or record_type_str_2 == 'TXT'
+
+ # If we attempt to add any record except A or AAAA when we already have an NS record,
+ # the add should fail.
+ add_error_ok = False
+ if record_type_str_1 == 'NS' and not has_a and not has_aaaa:
+ add_error_ok = True
+ # If we attempt to add a CNAME when an A, PTR or MX record exists, the add should fail.
+ if record_type_str_2 == 'CNAME' and (has_ptr or has_mx or has_a or has_aaaa):
+ add_error_ok = True
+ # If we have a CNAME, adding an A, AAAA, SRV or TXT record should fail.
+ # If we have an A, AAAA, SRV or TXT record, adding a CNAME should fail.
+ if has_cname and (has_a or has_aaaa or has_srv or has_txt):
+ add_error_ok = True
+
+ try:
+ self.add_record(self.custom_zone, "testrecord", record_type_str_2, record2)
+ if add_error_ok:
+ num_failures = num_failures + 1
+ print("Expected error when adding %s while a %s existed."
+ % (record_type_str_2, record_type_str_1))
+ except AssertionError as e:
+ if not add_error_ok:
+ num_failures = num_failures + 1
+ print("Didn't expect error when adding %s while a %s existed."
+ % (record_type_str_2, record_type_str_1))
+
+ if not add_error_ok:
+ # In the "normal" case, we expect the add to work and us to have one of each type of record afterwards.
+ expected_num_type_1 = 1
+ expected_num_type_2 = 1
+
+ # If we have an MX record, a PTR record should replace it when added.
+ # If we have a PTR record, an MX record should replace it when added.
+ if has_ptr and has_mx:
+ expected_num_type_1 = 0
+
+ # If we have a CNAME, SRV or TXT record, a PTR or MX record should replace it when added.
+ if (has_cname or has_srv or has_txt) and (record_type_str_2 == 'PTR' or record_type_str_2 == 'MX'):
+ expected_num_type_1 = 0
+
+ if (record_type_str_1 == 'NS' and (has_a or has_aaaa)):
+ expected_num_type_2 = 0
+
+ try:
+ self.assert_num_records(self.custom_zone, "testrecord", record_type_str_1, expected_num=expected_num_type_1)
+ except AssertionError as e:
+ num_failures = num_failures + 1
+ print("Expected %s %s records after adding a %s record and a %s record already existed."
+ % (expected_num_type_1, record_type_str_1, record_type_str_2, record_type_str_1))
+ try:
+ self.assert_num_records(self.custom_zone, "testrecord", record_type_str_2, expected_num=expected_num_type_2)
+ except AssertionError as e:
+ num_failures = num_failures + 1
+ print("Expected %s %s records after adding a %s record and a %s record already existed."
+ % (expected_num_type_2, record_type_str_2, record_type_str_2, record_type_str_1))
+
+ try:
+ self.delete_record(self.custom_zone, "testrecord", record_type_str_2, record2)
+ except AssertionError as e:
+ pass
+
+ self.delete_record(self.custom_zone, "testrecord", record_type_str_1, record1)
+
+ self.assertTrue(num_failures == 0, "Failed collision and replacement behavior. Total failures: %d." % num_failures)
+
+ # Windows fails this test in the same way we do.
+ def _test_cname(self):
+ """
+ Test some special properties of CNAME records.
+ """
+
+ # RFC 1912: When there is a CNAME record, there must not be any other records with the same alias
+ cname_record = self.good_records["CNAME"][1]
+ self.add_record(self.custom_zone, "testrecord", "CNAME", cname_record)
+
+ for record_type_str in self.good_records:
+ other_record = self.good_records[record_type_str][0]
+ self.add_record(self.custom_zone, "testrecord", record_type_str, other_record, assertion=False)
+ self.assert_num_records(self.custom_zone, "testrecord", record_type_str, expected_num=0)
+
+ # RFC 2181: MX & NS records must not be allowed to point to a CNAME alias
+ mx_record = "testrecord 1"
+ ns_record = "testrecord"
+
+ self.add_record(self.custom_zone, "mxrec", "MX", mx_record, assertion=False)
+ self.add_record(self.custom_zone, "nsrec", "NS", ns_record, assertion=False)
+
+ self.delete_record(self.custom_zone, "testrecord", "CNAME", cname_record)
+
+ def test_add_duplicate_value(self):
+ """
+ Make sure that we can't add duplicate values of any type.
+ """
+ for record_type_str in self.good_records:
+ record = self.good_records[record_type_str][0]
+
+ self.add_record(self.custom_zone, "testrecord", record_type_str, record)
+ self.add_record(self.custom_zone, "testrecord", record_type_str, record, assertion=False)
+ self.assert_num_records(self.custom_zone, "testrecord", record_type_str)
+ self.delete_record(self.custom_zone, "testrecord", record_type_str, record)
+
+ def test_add_similar_value(self):
+ """
+ Attempt to add values with the same name and type in the same
+ zone. This should work, and should result in both values
+ existing (except with some types).
+ """
+ for record_type_str in self.good_records:
+ for i in range(1, len(self.good_records[record_type_str])):
+ record1 = self.good_records[record_type_str][i - 1]
+ record2 = self.good_records[record_type_str][i]
+
+ if record_type_str == 'CNAME':
+ continue
+ # We expect CNAME records to override one another, as
+ # an alias can only map to one CNAME record.
+ # Also, on Windows, when the empty string is added and
+ # another record is added afterwards, the empty string
+ # will be silently overridden by the new one, so it
+ # fails this test for the empty string.
+ expected_num = 1 if record_type_str == 'CNAME' else 2
+
+ self.add_record(self.custom_zone, "testrecord", record_type_str, record1)
+ self.add_record(self.custom_zone, "testrecord", record_type_str, record2)
+ self.assert_num_records(self.custom_zone, "testrecord", record_type_str, expected_num=expected_num)
+ self.delete_record(self.custom_zone, "testrecord", record_type_str, record1)
+ self.delete_record(self.custom_zone, "testrecord", record_type_str, record2)
+
+ def assert_record(self, zone, name, record_type_str, expected_record_str,
+ assertion=True, client_version=dnsserver.DNS_CLIENT_VERSION_LONGHORN):
+ """
+ Asserts whether or not the given record with the given type exists in the
+ given zone.
+ """
+ try:
+ _, result = self.query_records(zone, name, record_type_str)
+ except RuntimeError as e:
+ if assertion:
+ raise AssertionError("Record '%s' of type '%s' was not present when it should have been."
+ % (expected_record_str, record_type_str))
+ else:
+ return
+
+ found = False
+ for record in result.rec[0].records:
+ if record.data == expected_record_str:
+ found = True
+ break
+
+ if found and not assertion:
+ raise AssertionError("Record '%s' of type '%s' was present when it shouldn't have been." % (expected_record_str, record_type_str))
+ elif not found and assertion:
+ raise AssertionError("Record '%s' of type '%s' was not present when it should have been." % (expected_record_str, record_type_str))
+
+ def assert_num_records(self, zone, name, record_type_str, expected_num=1,
+ client_version=dnsserver.DNS_CLIENT_VERSION_LONGHORN):
+ """
+ Asserts that there are a given amount of records with the given type in
+ the given zone.
+ """
+ try:
+ _, result = self.query_records(zone, name, record_type_str)
+ num_results = len(result.rec[0].records)
+ if not num_results == expected_num:
+ raise AssertionError("There were %d records of type '%s' with the name '%s' when %d were expected."
+ % (num_results, record_type_str, name, expected_num))
+ except RuntimeError:
+ if not expected_num == 0:
+ raise AssertionError("There were no records of type '%s' with the name '%s' when %d were expected."
+ % (record_type_str, name, expected_num))
+
+ def query_records(self, zone, name, record_type_str, client_version=dnsserver.DNS_CLIENT_VERSION_LONGHORN):
+ return self.conn.DnssrvEnumRecords2(client_version,
+ 0,
+ self.server,
+ zone,
+ name,
+ None,
+ flag_from_string(record_type_str),
+ dnsserver.DNS_RPC_VIEW_AUTHORITY_DATA | dnsserver.DNS_RPC_VIEW_NO_CHILDREN,
+ None,
+ None)
+
+ def add_record(self, zone, name, record_type_str, record_str,
+ assertion=True, client_version=dnsserver.DNS_CLIENT_VERSION_LONGHORN):
+ """
+ Attempts to add a map from the given name to a record of the given type,
+ in the given zone.
+ Also asserts whether or not the add was successful.
+ This can also update existing records if they have the same name.
+ """
+ record = record_from_string(record_type_str, record_str, sep=' ')
+ add_rec_buf = dnsserver.DNS_RPC_RECORD_BUF()
+ add_rec_buf.rec = record
+
+ try:
+ self.conn.DnssrvUpdateRecord2(client_version,
+ 0,
+ self.server,
+ zone,
+ name,
+ add_rec_buf,
+ None)
+ if not assertion:
+ raise AssertionError("Successfully added record '%s' of type '%s', which should have failed."
+ % (record_str, record_type_str))
+ except RuntimeError as e:
+ if assertion:
+ raise AssertionError("Failed to add record '%s' of type '%s', which should have succeeded. Error was '%s'."
+ % (record_str, record_type_str, str(e)))
+
+ def delete_record(self, zone, name, record_type_str, record_str,
+ assertion=True, client_version=dnsserver.DNS_CLIENT_VERSION_LONGHORN):
+ """
+ Attempts to delete a record with the given name, record and record type
+ from the given zone.
+ Also asserts whether or not the deletion was successful.
+ """
+ record = record_from_string(record_type_str, record_str, sep=' ')
+ del_rec_buf = dnsserver.DNS_RPC_RECORD_BUF()
+ del_rec_buf.rec = record
+
+ try:
+ self.conn.DnssrvUpdateRecord2(client_version,
+ 0,
+ self.server,
+ zone,
+ name,
+ None,
+ del_rec_buf)
+ if not assertion:
+ raise AssertionError("Successfully deleted record '%s' of type '%s', which should have failed." % (record_str, record_type_str))
+ except RuntimeError as e:
+ if assertion:
+ raise AssertionError("Failed to delete record '%s' of type '%s', which should have succeeded. Error was '%s'." % (record_str, record_type_str, str(e)))
+
+ def test_query2(self):
+ typeid, result = self.conn.DnssrvQuery2(dnsserver.DNS_CLIENT_VERSION_W2K,
+ 0,
+ self.server,
+ None,
+ 'ServerInfo')
+ self.assertEqual(dnsserver.DNSSRV_TYPEID_SERVER_INFO_W2K, typeid)
+
+ typeid, result = self.conn.DnssrvQuery2(dnsserver.DNS_CLIENT_VERSION_DOTNET,
+ 0,
+ self.server,
+ None,
+ 'ServerInfo')
+ self.assertEqual(dnsserver.DNSSRV_TYPEID_SERVER_INFO_DOTNET, typeid)
+
+ typeid, result = self.conn.DnssrvQuery2(dnsserver.DNS_CLIENT_VERSION_LONGHORN,
+ 0,
+ self.server,
+ None,
+ 'ServerInfo')
+ self.assertEqual(dnsserver.DNSSRV_TYPEID_SERVER_INFO, typeid)
+
+
+ # This test is to confirm that we do not support multizone operations,
+ # which are designated by a non-zero dwContext value (the 3rd argument
+ # to DnssrvOperation).
+ def test_operation_invalid(self):
+ non_zone = 'a-zone-that-does-not-exist'
+ typeid = dnsserver.DNSSRV_TYPEID_NAME_AND_PARAM
+ name_and_param = dnsserver.DNS_RPC_NAME_AND_PARAM()
+ name_and_param.pszNodeName = 'AllowUpdate'
+ name_and_param.dwParam = dnsp.DNS_ZONE_UPDATE_SECURE
+ try:
+ res = self.conn.DnssrvOperation(self.server,
+ non_zone,
+ 1,
+ 'ResetDwordProperty',
+ typeid,
+ name_and_param)
+ except WERRORError as e:
+ if e.args[0] == werror.WERR_DNS_ERROR_ZONE_DOES_NOT_EXIST:
+ return
+
+ # We should always encounter a DOES_NOT_EXIST error.
+ self.fail()
+
+ # This test is to confirm that we do not support multizone operations,
+ # which are designated by a non-zero dwContext value (the 5th argument
+ # to DnssrvOperation2).
+ def test_operation2_invalid(self):
+ client_version = dnsserver.DNS_CLIENT_VERSION_LONGHORN
+ non_zone = 'a-zone-that-does-not-exist'
+ typeid = dnsserver.DNSSRV_TYPEID_NAME_AND_PARAM
+ name_and_param = dnsserver.DNS_RPC_NAME_AND_PARAM()
+ name_and_param.pszNodeName = 'AllowUpdate'
+ name_and_param.dwParam = dnsp.DNS_ZONE_UPDATE_SECURE
+ try:
+ res = self.conn.DnssrvOperation2(client_version,
+ 0,
+ self.server,
+ non_zone,
+ 1,
+ 'ResetDwordProperty',
+ typeid,
+ name_and_param)
+ except WERRORError as e:
+ if e.args[0] == werror.WERR_DNS_ERROR_ZONE_DOES_NOT_EXIST:
+ return
+
+ # We should always encounter a DOES_NOT_EXIST error.
+ self.fail()
+
+ def test_operation2(self):
+ client_version = dnsserver.DNS_CLIENT_VERSION_LONGHORN
+ rev_zone = '1.168.192.in-addr.arpa'
+
+ zone_create = dnsserver.DNS_RPC_ZONE_CREATE_INFO_LONGHORN()
+ zone_create.pszZoneName = rev_zone
+ zone_create.dwZoneType = dnsp.DNS_ZONE_TYPE_PRIMARY
+ zone_create.fAllowUpdate = dnsp.DNS_ZONE_UPDATE_SECURE
+ zone_create.fAging = 0
+ zone_create.dwDpFlags = dnsserver.DNS_DP_DOMAIN_DEFAULT
+
+ # Create zone
+ self.conn.DnssrvOperation2(client_version,
+ 0,
+ self.server,
+ None,
+ 0,
+ 'ZoneCreate',
+ dnsserver.DNSSRV_TYPEID_ZONE_CREATE,
+ zone_create)
+
+ request_filter = (dnsserver.DNS_ZONE_REQUEST_REVERSE |
+ dnsserver.DNS_ZONE_REQUEST_PRIMARY)
+ _, zones = self.conn.DnssrvComplexOperation2(client_version,
+ 0,
+ self.server,
+ None,
+ 'EnumZones',
+ dnsserver.DNSSRV_TYPEID_DWORD,
+ request_filter)
+ self.assertEqual(1, zones.dwZoneCount)
+
+ # Delete zone
+ self.conn.DnssrvOperation2(client_version,
+ 0,
+ self.server,
+ rev_zone,
+ 0,
+ 'DeleteZoneFromDs',
+ dnsserver.DNSSRV_TYPEID_NULL,
+ None)
+
+ typeid, zones = self.conn.DnssrvComplexOperation2(client_version,
+ 0,
+ self.server,
+ None,
+ 'EnumZones',
+ dnsserver.DNSSRV_TYPEID_DWORD,
+ request_filter)
+ self.assertEqual(0, zones.dwZoneCount)
+
+ def test_complexoperation2(self):
+ client_version = dnsserver.DNS_CLIENT_VERSION_LONGHORN
+ request_filter = (dnsserver.DNS_ZONE_REQUEST_FORWARD |
+ dnsserver.DNS_ZONE_REQUEST_PRIMARY)
+
+ typeid, zones = self.conn.DnssrvComplexOperation2(client_version,
+ 0,
+ self.server,
+ None,
+ 'EnumZones',
+ dnsserver.DNSSRV_TYPEID_DWORD,
+ request_filter)
+ self.assertEqual(dnsserver.DNSSRV_TYPEID_ZONE_LIST, typeid)
+ self.assertEqual(3, zones.dwZoneCount)
+
+ request_filter = (dnsserver.DNS_ZONE_REQUEST_REVERSE |
+ dnsserver.DNS_ZONE_REQUEST_PRIMARY)
+ typeid, zones = self.conn.DnssrvComplexOperation2(client_version,
+ 0,
+ self.server,
+ None,
+ 'EnumZones',
+ dnsserver.DNSSRV_TYPEID_DWORD,
+ request_filter)
+ self.assertEqual(dnsserver.DNSSRV_TYPEID_ZONE_LIST, typeid)
+ self.assertEqual(0, zones.dwZoneCount)
+
+ def test_enumrecords2(self):
+ client_version = dnsserver.DNS_CLIENT_VERSION_LONGHORN
+ record_type = dnsp.DNS_TYPE_NS
+ select_flags = (dnsserver.DNS_RPC_VIEW_ROOT_HINT_DATA |
+ dnsserver.DNS_RPC_VIEW_ADDITIONAL_DATA)
+ _, roothints = self.conn.DnssrvEnumRecords2(client_version,
+ 0,
+ self.server,
+ '..RootHints',
+ '.',
+ None,
+ record_type,
+ select_flags,
+ None,
+ None)
+ self.assertEqual(14, roothints.count) # 1 NS + 13 A records (a-m)
+
+ def test_updaterecords2(self):
+ client_version = dnsserver.DNS_CLIENT_VERSION_LONGHORN
+ record_type = dnsp.DNS_TYPE_A
+ select_flags = dnsserver.DNS_RPC_VIEW_AUTHORITY_DATA
+ name = 'dummy'
+ rec = ARecord('1.2.3.4')
+ rec2 = ARecord('5.6.7.8')
+
+ # Add record
+ add_rec_buf = dnsserver.DNS_RPC_RECORD_BUF()
+ add_rec_buf.rec = rec
+ self.conn.DnssrvUpdateRecord2(client_version,
+ 0,
+ self.server,
+ self.zone,
+ name,
+ add_rec_buf,
+ None)
+
+ _, result = self.conn.DnssrvEnumRecords2(client_version,
+ 0,
+ self.server,
+ self.zone,
+ name,
+ None,
+ record_type,
+ select_flags,
+ None,
+ None)
+ self.assertEqual(1, result.count)
+ self.assertEqual(1, result.rec[0].wRecordCount)
+ self.assertEqual(dnsp.DNS_TYPE_A, result.rec[0].records[0].wType)
+ self.assertEqual('1.2.3.4', result.rec[0].records[0].data)
+
+ # Update record
+ add_rec_buf = dnsserver.DNS_RPC_RECORD_BUF()
+ add_rec_buf.rec = rec2
+ del_rec_buf = dnsserver.DNS_RPC_RECORD_BUF()
+ del_rec_buf.rec = rec
+ self.conn.DnssrvUpdateRecord2(client_version,
+ 0,
+ self.server,
+ self.zone,
+ name,
+ add_rec_buf,
+ del_rec_buf)
+
+ buflen, result = self.conn.DnssrvEnumRecords2(client_version,
+ 0,
+ self.server,
+ self.zone,
+ name,
+ None,
+ record_type,
+ select_flags,
+ None,
+ None)
+ self.assertEqual(1, result.count)
+ self.assertEqual(1, result.rec[0].wRecordCount)
+ self.assertEqual(dnsp.DNS_TYPE_A, result.rec[0].records[0].wType)
+ self.assertEqual('5.6.7.8', result.rec[0].records[0].data)
+
+ # Delete record
+ del_rec_buf = dnsserver.DNS_RPC_RECORD_BUF()
+ del_rec_buf.rec = rec2
+ self.conn.DnssrvUpdateRecord2(client_version,
+ 0,
+ self.server,
+ self.zone,
+ name,
+ None,
+ del_rec_buf)
+
+ self.assertRaises(RuntimeError, self.conn.DnssrvEnumRecords2,
+ client_version,
+ 0,
+ self.server,
+ self.zone,
+ name,
+ None,
+ record_type,
+ select_flags,
+ None,
+ None)
+
+ # The following tests do not pass against Samba because the owner and
+ # group are not consistent with Windows, as well as some ACEs.
+ #
+ # The following ACE are also required for 2012R2:
+ #
+ # (OA;CIIO;WP;ea1b7b93-5e48-46d5-bc6c-4df4fda78a35;bf967a86-0de6-11d0-a285-00aa003049e2;PS)
+ # (OA;OICI;RPWP;3f78c3e5-f79a-46bd-a0b8-9d18116ddc79;;PS)"
+ #
+ # [TPM + Allowed-To-Act-On-Behalf-Of-Other-Identity]
+ def test_security_descriptor_msdcs_zone(self):
+ """
+ Make sure that security descriptors of the msdcs zone is
+ as expected.
+ """
+
+ zones = self.samdb.search(base="DC=ForestDnsZones,%s" % self.samdb.get_default_basedn(),
+ scope=ldb.SCOPE_SUBTREE,
+ expression="(&(objectClass=dnsZone)(name=_msdcs*))",
+ attrs=["nTSecurityDescriptor", "objectClass"])
+ self.assertEqual(len(zones), 1)
+ self.assertIn("nTSecurityDescriptor", zones[0])
+ tmp = zones[0]["nTSecurityDescriptor"][0]
+ utils = sd_utils.SDUtils(self.samdb)
+ sd = ndr_unpack(security.descriptor, tmp)
+
+ domain_sid = security.dom_sid(self.samdb.get_domain_sid())
+
+ res = self.samdb.search(base=self.samdb.get_default_basedn(), scope=ldb.SCOPE_SUBTREE,
+ expression="(sAMAccountName=DnsAdmins)",
+ attrs=["objectSid"])
+
+ dns_admin = str(ndr_unpack(security.dom_sid, res[0]['objectSid'][0]))
+
+ packed_sd = descriptor.sddl2binary("O:SYG:BA"
+ "D:AI(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;DA)"
+ "(A;;CC;;;AU)"
+ "(A;;RPLCLORC;;;WD)"
+ "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)"
+ "(A;CI;RPWPCRCCDCLCRCWOWDSDDTSW;;;ED)",
+ domain_sid, {"DnsAdmins": dns_admin})
+ expected_sd = descriptor.get_clean_sd(ndr_unpack(security.descriptor, packed_sd))
+
+ diff = descriptor.get_diff_sds(expected_sd, sd, domain_sid)
+ self.assertEqual(diff, '', "SD of msdcs zone different to expected.\n"
+ "Difference was:\n%s\nExpected: %s\nGot: %s" %
+ (diff, expected_sd.as_sddl(utils.domain_sid),
+ sd.as_sddl(utils.domain_sid)))
+
+ def test_security_descriptor_forest_zone(self):
+ """
+ Make sure that security descriptors of forest dns zones are
+ as expected.
+ """
+ forest_zone = "test_forest_zone"
+ zone_create_info = dnsserver.DNS_RPC_ZONE_CREATE_INFO_LONGHORN()
+ zone_create_info.dwZoneType = dnsp.DNS_ZONE_TYPE_PRIMARY
+ zone_create_info.fAging = 0
+ zone_create_info.fDsIntegrated = 1
+ zone_create_info.fLoadExisting = 1
+
+ zone_create_info.pszZoneName = forest_zone
+ zone_create_info.dwDpFlags = dnsserver.DNS_DP_FOREST_DEFAULT
+
+ self.conn.DnssrvOperation2(dnsserver.DNS_CLIENT_VERSION_LONGHORN,
+ 0,
+ self.server,
+ None,
+ 0,
+ 'ZoneCreate',
+ dnsserver.DNSSRV_TYPEID_ZONE_CREATE,
+ zone_create_info)
+
+ partition_dn = self.samdb.get_default_basedn()
+ partition_dn.add_child("DC=ForestDnsZones")
+ zones = self.samdb.search(base=partition_dn, scope=ldb.SCOPE_SUBTREE,
+ expression="(name=%s)" % forest_zone,
+ attrs=["nTSecurityDescriptor"])
+ self.assertEqual(len(zones), 1)
+ current_dn = zones[0].dn
+ self.assertIn("nTSecurityDescriptor", zones[0])
+ tmp = zones[0]["nTSecurityDescriptor"][0]
+ utils = sd_utils.SDUtils(self.samdb)
+ sd = ndr_unpack(security.descriptor, tmp)
+
+ domain_sid = security.dom_sid(self.samdb.get_domain_sid())
+
+ res = self.samdb.search(base=self.samdb.get_default_basedn(),
+ scope=ldb.SCOPE_SUBTREE,
+ expression="(sAMAccountName=DnsAdmins)",
+ attrs=["objectSid"])
+
+ dns_admin = str(ndr_unpack(security.dom_sid, res[0]['objectSid'][0]))
+
+ packed_sd = descriptor.sddl2binary("O:DAG:DA"
+ "D:AI(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;DA)"
+ "(A;;CC;;;AU)"
+ "(A;;RPLCLORC;;;WD)"
+ "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)"
+ "(A;CI;RPWPCRCCDCLCRCWOWDSDDTSW;;;ED)",
+ domain_sid, {"DnsAdmins": dns_admin})
+ expected_sd = descriptor.get_clean_sd(ndr_unpack(security.descriptor, packed_sd))
+
+ packed_msdns = descriptor.get_dns_forest_microsoft_dns_descriptor(domain_sid,
+ {"DnsAdmins": dns_admin})
+ expected_msdns_sd = descriptor.get_clean_sd(ndr_unpack(security.descriptor, packed_msdns))
+
+ packed_part_sd = descriptor.get_dns_partition_descriptor(domain_sid)
+ expected_part_sd = descriptor.get_clean_sd(ndr_unpack(security.descriptor,
+ packed_part_sd))
+ try:
+ msdns_dn = ldb.Dn(self.samdb, "CN=MicrosoftDNS,%s" % str(partition_dn))
+ security_desc_dict = [(current_dn.get_linearized(), expected_sd),
+ (msdns_dn.get_linearized(), expected_msdns_sd),
+ (partition_dn.get_linearized(), expected_part_sd)]
+
+ for (key, sec_desc) in security_desc_dict:
+ zones = self.samdb.search(base=key, scope=ldb.SCOPE_BASE,
+ attrs=["nTSecurityDescriptor"])
+ self.assertIn("nTSecurityDescriptor", zones[0])
+ tmp = zones[0]["nTSecurityDescriptor"][0]
+ utils = sd_utils.SDUtils(self.samdb)
+
+ sd = ndr_unpack(security.descriptor, tmp)
+ diff = descriptor.get_diff_sds(sec_desc, sd, domain_sid)
+
+ self.assertEqual(diff, '', "Security descriptor of forest DNS zone with DN '%s' different to expected. Difference was:\n%s\nExpected: %s\nGot: %s"
+ % (key, diff, sec_desc.as_sddl(utils.domain_sid), sd.as_sddl(utils.domain_sid)))
+
+ finally:
+ self.conn.DnssrvOperation2(dnsserver.DNS_CLIENT_VERSION_LONGHORN,
+ 0,
+ self.server,
+ forest_zone,
+ 0,
+ 'DeleteZoneFromDs',
+ dnsserver.DNSSRV_TYPEID_NULL,
+ None)
+
+ def test_security_descriptor_domain_zone(self):
+ """
+ Make sure that security descriptors of domain dns zones are
+ as expected.
+ """
+
+ partition_dn = self.samdb.get_default_basedn()
+ partition_dn.add_child("DC=DomainDnsZones")
+ zones = self.samdb.search(base=partition_dn, scope=ldb.SCOPE_SUBTREE,
+ expression="(name=%s)" % self.custom_zone,
+ attrs=["nTSecurityDescriptor"])
+ self.assertEqual(len(zones), 1)
+ current_dn = zones[0].dn
+ self.assertIn("nTSecurityDescriptor", zones[0])
+ tmp = zones[0]["nTSecurityDescriptor"][0]
+ utils = sd_utils.SDUtils(self.samdb)
+ sd = ndr_unpack(security.descriptor, tmp)
+ sddl = sd.as_sddl(utils.domain_sid)
+
+ domain_sid = security.dom_sid(self.samdb.get_domain_sid())
+
+ res = self.samdb.search(base=self.samdb.get_default_basedn(), scope=ldb.SCOPE_SUBTREE,
+ expression="(sAMAccountName=DnsAdmins)",
+ attrs=["objectSid"])
+
+ dns_admin = str(ndr_unpack(security.dom_sid, res[0]['objectSid'][0]))
+
+ packed_sd = descriptor.sddl2binary("O:DAG:DA"
+ "D:AI(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;DA)"
+ "(A;;CC;;;AU)"
+ "(A;;RPLCLORC;;;WD)"
+ "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)"
+ "(A;CI;RPWPCRCCDCLCRCWOWDSDDTSW;;;ED)",
+ domain_sid, {"DnsAdmins": dns_admin})
+ expected_sd = descriptor.get_clean_sd(ndr_unpack(security.descriptor, packed_sd))
+
+ packed_msdns = descriptor.get_dns_domain_microsoft_dns_descriptor(domain_sid,
+ {"DnsAdmins": dns_admin})
+ expected_msdns_sd = descriptor.get_clean_sd(ndr_unpack(security.descriptor, packed_msdns))
+
+ packed_part_sd = descriptor.get_dns_partition_descriptor(domain_sid)
+ expected_part_sd = descriptor.get_clean_sd(ndr_unpack(security.descriptor,
+ packed_part_sd))
+
+ msdns_dn = ldb.Dn(self.samdb, "CN=MicrosoftDNS,%s" % str(partition_dn))
+ security_desc_dict = [(current_dn.get_linearized(), expected_sd),
+ (msdns_dn.get_linearized(), expected_msdns_sd),
+ (partition_dn.get_linearized(), expected_part_sd)]
+
+ for (key, sec_desc) in security_desc_dict:
+ zones = self.samdb.search(base=key, scope=ldb.SCOPE_BASE,
+ attrs=["nTSecurityDescriptor"])
+ self.assertIn("nTSecurityDescriptor", zones[0])
+ tmp = zones[0]["nTSecurityDescriptor"][0]
+ utils = sd_utils.SDUtils(self.samdb)
+
+ sd = ndr_unpack(security.descriptor, tmp)
+ diff = descriptor.get_diff_sds(sec_desc, sd, domain_sid)
+
+ self.assertEqual(diff, '', "Security descriptor of domain DNS zone with DN '%s' different to expected. Difference was:\n%s\nExpected: %s\nGot: %s"
+ % (key, diff, sec_desc.as_sddl(utils.domain_sid), sd.as_sddl(utils.domain_sid)))
diff --git a/python/samba/tests/dcerpc/integer.py b/python/samba/tests/dcerpc/integer.py
new file mode 100644
index 0000000..69a6a09
--- /dev/null
+++ b/python/samba/tests/dcerpc/integer.py
@@ -0,0 +1,250 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2015
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for integer handling in PIDL generated bindings samba.dcerpc.*"""
+
+from samba.dcerpc import server_id, misc, srvsvc, samr
+import samba.tests
+
+
+class IntegerTests(samba.tests.TestCase):
+
+ def test_uint32_into_hyper(self):
+ s = server_id.server_id()
+ s.unique_id = server_id.NONCLUSTER_VNN
+ self.assertEqual(s.unique_id, 0xFFFFFFFF)
+
+ def test_int_into_hyper(self):
+ s = server_id.server_id()
+ s.unique_id = 1
+ self.assertEqual(s.unique_id, 1)
+
+ def test_negative_int_into_hyper(self):
+ s = server_id.server_id()
+
+ def assign():
+ s.unique_id = -1
+ self.assertRaises(OverflowError, assign)
+
+ def test_hyper_into_uint32(self):
+ s = server_id.server_id()
+
+ def assign():
+ s.vnn = server_id.SERVERID_UNIQUE_ID_NOT_TO_VERIFY
+ self.assertRaises(OverflowError, assign)
+
+ def test_hyper_into_int32(self):
+ s = srvsvc.NetRemoteTODInfo()
+
+ def assign():
+ s.timezone = server_id.SERVERID_UNIQUE_ID_NOT_TO_VERIFY
+ self.assertRaises(OverflowError, assign)
+
+ def test_int_into_int32(self):
+ s = srvsvc.NetRemoteTODInfo()
+ s.timezone = 5
+ self.assertEqual(s.timezone, 5)
+
+ def test_uint32_into_int32(self):
+ s = srvsvc.NetRemoteTODInfo()
+
+ def assign():
+ s.timezone = server_id.NONCLUSTER_VNN
+ self.assertRaises(OverflowError, assign)
+
+ def test_long_into_int32(self):
+ s = srvsvc.NetRemoteTODInfo()
+ # here we force python2 to convert its 32/64 bit python int into
+ # an arbitrarily long python long, then reduce the number back
+ # down to something that would fit in an int anyway. In a pure
+ # python2 world, you could achieve the same thing by writing
+ # s.timezone = 5L
+ # but that is a syntax error in py3.
+ s.timezone = (5 << 65) >> 65
+ self.assertEqual(s.timezone, 5)
+
+ def test_larger_long_int_into_int32(self):
+ s = srvsvc.NetRemoteTODInfo()
+
+ def assign():
+ s.timezone = 2147483648
+ self.assertRaises(OverflowError, assign)
+
+ def test_larger_int_into_int32(self):
+ s = srvsvc.NetRemoteTODInfo()
+ s.timezone = 2147483647
+ self.assertEqual(s.timezone, 2147483647)
+
+ def test_float_into_int32(self):
+ s = srvsvc.NetRemoteTODInfo()
+
+ def assign():
+ s.timezone = 2.5
+ self.assertRaises(TypeError, assign)
+
+ def test_int_float_into_int32(self):
+ s = srvsvc.NetRemoteTODInfo()
+
+ def assign():
+ s.timezone = 2.0
+ self.assertRaises(TypeError, assign)
+
+ def test_negative_int_into_int32(self):
+ s = srvsvc.NetRemoteTODInfo()
+ s.timezone = -2147483648
+ self.assertEqual(s.timezone, -2147483648)
+
+ def test_negative_into_uint32(self):
+ s = server_id.server_id()
+
+ def assign():
+ s.vnn = -1
+ self.assertRaises(OverflowError, assign)
+
+ def test_hyper_into_uint16(self):
+ g = misc.GUID()
+
+ def assign():
+ g.time_mid = server_id.SERVERID_UNIQUE_ID_NOT_TO_VERIFY
+ self.assertRaises(OverflowError, assign)
+
+ def test_int_into_uint16(self):
+ g = misc.GUID()
+
+ def assign():
+ g.time_mid = 200000
+ self.assertRaises(OverflowError, assign)
+
+ def test_negative_int_into_uint16(self):
+ g = misc.GUID()
+
+ def assign():
+ g.time_mid = -2
+ self.assertRaises(OverflowError, assign)
+
+ def test_enum_into_uint16(self):
+ g = misc.GUID()
+ g.time_mid = misc.SEC_CHAN_DOMAIN
+ self.assertEqual(g.time_mid, misc.SEC_CHAN_DOMAIN)
+
+ def test_bitmap_into_uint16(self):
+ g = misc.GUID()
+ g.time_mid = misc.SV_TYPE_WFW
+ self.assertEqual(g.time_mid, misc.SV_TYPE_WFW)
+
+ def test_overflow_bitmap_into_uint16(self):
+ g = misc.GUID()
+
+ def assign():
+ g.time_mid = misc.SV_TYPE_LOCAL_LIST_ONLY
+ self.assertRaises(OverflowError, assign)
+
+ def test_overflow_bitmap_into_uint16_2(self):
+ g = misc.GUID()
+
+ def assign():
+ g.time_mid = misc.SV_TYPE_DOMAIN_ENUM
+ self.assertRaises(OverflowError, assign)
+
+ def test_hyper_into_int64(self):
+ s = samr.DomInfo1()
+
+ def assign():
+ s.max_password_age = server_id.SERVERID_UNIQUE_ID_NOT_TO_VERIFY
+ self.assertRaises(OverflowError, assign)
+
+ def test_int_into_int64(self):
+ s = samr.DomInfo1()
+ s.max_password_age = 5
+ self.assertEqual(s.max_password_age, 5)
+
+ def test_negative_int_into_int64(self):
+ s = samr.DomInfo1()
+ s.max_password_age = -5
+ self.assertEqual(s.max_password_age, -5)
+
+ def test_larger_int_into_int64(self):
+ s = samr.DomInfo1()
+ s.max_password_age = server_id.NONCLUSTER_VNN
+ self.assertEqual(s.max_password_age, 0xFFFFFFFF)
+
+ def test_larger_negative_int_into_int64(self):
+ s = samr.DomInfo1()
+ s.max_password_age = -2147483649
+ self.assertEqual(s.max_password_age, -2147483649)
+
+ def test_int_list_over_list(self):
+ g = misc.GUID()
+ g.node = [5, 0, 5, 0, 7, 4]
+ self.assertEqual(g.node[0], 5)
+
+ def test_long_int_list_over_uint8_list(self):
+ g = misc.GUID()
+ g.node = [5, 0, 5, 0, 7, 4]
+ self.assertEqual(g.node[0], 5)
+
+ def test_negative_list_over_uint8_list(self):
+ g = misc.GUID()
+
+ def assign():
+ g.node = [-1, 0, 5, 0, 7, 4]
+ self.assertRaises(OverflowError, assign)
+
+ def test_overflow_list_over_uint8_list(self):
+ g = misc.GUID()
+
+ def assign():
+ g.node = [256, 0, 5, 0, 7, 4]
+ self.assertRaises(OverflowError, assign)
+
+ def test_short_list_over_uint8_list(self):
+ g = misc.GUID()
+
+ def assign():
+ g.node = [5, 0, 5]
+ self.assertRaises(TypeError, assign)
+
+ def test_long_list_over_uint8_list(self):
+ g = misc.GUID()
+
+ def assign():
+ g.node = [5, 0, 5, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF]
+ self.assertRaises(TypeError, assign)
+
+ # Due to our PIDL bindings generating a python List, modifications
+ # to a list of non-objects are not reflected in the C list
+ # (modifications objects in lists of objects work because the
+ # objects are modified), so changes essentially vanish and are not
+ # type checked either.
+ def test_assign_into_uint8_list(self):
+ g = misc.GUID()
+ g.node[1] = 5
+ self.assertEqual(g.node[1], 5)
+
+ def test_negative_into_uint8_list(self):
+ g = misc.GUID()
+
+ def assign():
+ g.node[1] = -1
+ self.assertRaises(OverflowError, assign)
+
+ def test_overflow_into_uint8_list(self):
+ g = misc.GUID()
+
+ def assign():
+ g.node[1] = 256
+ self.assertRaises(OverflowError, assign)
diff --git a/python/samba/tests/dcerpc/lsa.py b/python/samba/tests/dcerpc/lsa.py
new file mode 100644
index 0000000..355bb1f
--- /dev/null
+++ b/python/samba/tests/dcerpc/lsa.py
@@ -0,0 +1,333 @@
+# -*- coding: utf-8 -*-
+#
+# Unix SMB/CIFS implementation.
+# Copyright © Andrew Bartlett <abartlet@samba.org> 2021
+# Copyright (C) Catalyst IT Ltd. 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for samba.dcerpc.lsa."""
+
+from samba.dcerpc import lsa
+from samba.credentials import Credentials
+from samba.tests import TestCase
+from samba.dcerpc.security import dom_sid
+from samba import NTSTATUSError
+from samba.ntstatus import NT_STATUS_ACCESS_DENIED
+import samba.tests
+
+class LsaTests(TestCase):
+
+ def setUp(self):
+ self.lp = self.get_loadparm()
+ self.server = samba.tests.env_get_var_value('SERVER')
+
+ def test_lsa_LookupSids3_multiple(self):
+ machine_creds = Credentials()
+ machine_creds.guess(self.lp)
+ machine_creds.set_machine_account()
+
+ c = lsa.lsarpc(
+ "ncacn_ip_tcp:%s[schannel,seal]" % self.server,
+ self.lp,
+ machine_creds)
+
+ sids = lsa.SidArray()
+ sid = lsa.SidPtr()
+ # Need a set
+ x = dom_sid("S-1-5-7")
+ sid.sid = x
+ sids.sids = [sid]
+ sids.num_sids = 1
+ names = lsa.TransNameArray2()
+ level = lsa.LSA_LOOKUP_NAMES_ALL
+ count = 0
+ lookup_options = lsa.LSA_LOOKUP_OPTION_SEARCH_ISOLATED_NAMES
+ client_revision = lsa.LSA_CLIENT_REVISION_2
+
+ # We want to run LookupSids3 multiple times on the same
+ # connection as we have code to re-use the sam.ldb and we need
+ # to check things work for the second request.
+ (domains, names, count) = c.LookupSids3(sids, names, level, count, lookup_options, client_revision)
+ self.assertEqual(count, 1)
+ self.assertEqual(names.count, 1)
+ self.assertEqual(names.names[0].name.string,
+ "ANONYMOUS LOGON")
+ (domains2, names2, count2) = c.LookupSids3(sids, names, level, count, lookup_options, client_revision)
+ self.assertEqual(count2, 1)
+ self.assertEqual(names2.count, 1)
+ self.assertEqual(names2.names[0].name.string,
+ "ANONYMOUS LOGON")
+
+ # Just looking for any exceptions in the last couple of loops
+ c.LookupSids3(sids, names, level, count, lookup_options, client_revision)
+ c.LookupSids3(sids, names, level, count, lookup_options, client_revision)
+
+ def test_lsa_LookupSids3_multiple_conns(self):
+ machine_creds = Credentials()
+ machine_creds.guess(self.lp)
+ machine_creds.set_machine_account()
+
+ c = lsa.lsarpc(
+ "ncacn_ip_tcp:%s[schannel,seal]" % self.server,
+ self.lp,
+ machine_creds)
+
+ sids = lsa.SidArray()
+ sid = lsa.SidPtr()
+ # Need a set
+ x = dom_sid("S-1-5-7")
+ sid.sid = x
+ sids.sids = [sid]
+ sids.num_sids = 1
+ names = lsa.TransNameArray2()
+ level = lsa.LSA_LOOKUP_NAMES_ALL
+ count = 0
+ lookup_options = lsa.LSA_LOOKUP_OPTION_SEARCH_ISOLATED_NAMES
+ client_revision = lsa.LSA_CLIENT_REVISION_2
+
+ # We want to run LookupSids3, and then again on a new
+ # connection to show that we don't have an issue with the DB
+ # being tied to the wrong connection.
+ (domains, names, count) = c.LookupSids3(sids,
+ names,
+ level,
+ count,
+ lookup_options,
+ client_revision)
+ self.assertEqual(count, 1)
+ self.assertEqual(names.count, 1)
+ self.assertEqual(names.names[0].name.string,
+ "ANONYMOUS LOGON")
+
+ c = lsa.lsarpc(
+ "ncacn_ip_tcp:%s[schannel,seal]" % self.server,
+ self.lp,
+ machine_creds)
+
+ (domains, names, count) = c.LookupSids3(sids,
+ names,
+ level,
+ count,
+ lookup_options,
+ client_revision)
+ self.assertEqual(count, 1)
+ self.assertEqual(names.count, 1)
+ self.assertEqual(names.names[0].name.string,
+ "ANONYMOUS LOGON")
+
+
+ def test_lsa_LookupNames4_LookupSids3_multiple(self):
+ """
+ Test by going back and forward between real DB lookups
+ name->sid->name to ensure the sam.ldb handle is fine once
+ shared
+ """
+
+ machine_creds = Credentials()
+ machine_creds.guess(self.lp)
+ machine_creds.set_machine_account()
+
+ c_normal = lsa.lsarpc(
+ "ncacn_np:%s[seal]" % self.server,
+ self.lp,
+ machine_creds)
+
+ username, domain = c_normal.GetUserName(None, None, None)
+
+ c = lsa.lsarpc(
+ "ncacn_ip_tcp:%s[schannel,seal]" % self.server,
+ self.lp,
+ machine_creds)
+
+ sids = lsa.TransSidArray3()
+ names = [username]
+ level = lsa.LSA_LOOKUP_NAMES_ALL
+ count = 0
+ lookup_options = lsa.LSA_LOOKUP_OPTION_SEARCH_ISOLATED_NAMES
+ client_revision = lsa.LSA_CLIENT_REVISION_2
+ (domains, sids, count) = c.LookupNames4(names,
+ sids,
+ level,
+ count,
+ lookup_options,
+ client_revision)
+
+ # Another lookup on the same connection, will re-used the
+ # server-side implicit state handle on the connection
+ (domains, sids, count) = c.LookupNames4(names,
+ sids,
+ level,
+ count,
+ lookup_options,
+ client_revision)
+
+ self.assertEqual(count, 1)
+ self.assertEqual(sids.count, 1)
+
+ # Now look the SIDs back up
+ names = lsa.TransNameArray2()
+ sid = lsa.SidPtr()
+ sid.sid = sids.sids[0].sid
+ lookup_sids = lsa.SidArray()
+ lookup_sids.sids = [sid]
+ lookup_sids.num_sids = 1
+ level = lsa.LSA_LOOKUP_NAMES_ALL
+ count = 1
+ lookup_options = 0
+ client_revision = lsa.LSA_CLIENT_REVISION_2
+
+ (domains, names, count) = c.LookupSids3(lookup_sids,
+ names,
+ level,
+ count,
+ lookup_options,
+ client_revision)
+ self.assertEqual(count, 1)
+ self.assertEqual(names.count, 1)
+ self.assertEqual(names.names[0].name.string,
+ username.string)
+
+ # And once more just to be sure, just checking for a fault
+ sids = lsa.TransSidArray3()
+ names = [username]
+ level = lsa.LSA_LOOKUP_NAMES_ALL
+ count = 0
+ lookup_options = lsa.LSA_LOOKUP_OPTION_SEARCH_ISOLATED_NAMES
+ client_revision = lsa.LSA_CLIENT_REVISION_2
+ (domains, sids, count) = c.LookupNames4(names,
+ sids,
+ level,
+ count,
+ lookup_options,
+ client_revision)
+
+
+ def test_lsa_LookupNames4_multiple_conns(self):
+ """
+ Test by going back and forward between real DB lookups
+ name->sid->name to ensure the sam.ldb handle is fine once
+ shared
+ """
+
+ machine_creds = Credentials()
+ machine_creds.guess(self.lp)
+ machine_creds.set_machine_account()
+
+ c_normal = lsa.lsarpc(
+ "ncacn_np:%s[seal]" % self.server,
+ self.lp,
+ machine_creds)
+
+ username, domain = c_normal.GetUserName(None, None, None)
+
+ c = lsa.lsarpc(
+ "ncacn_ip_tcp:%s[schannel,seal]" % self.server,
+ self.lp,
+ machine_creds)
+
+ sids = lsa.TransSidArray3()
+ names = [username]
+ level = lsa.LSA_LOOKUP_NAMES_ALL
+ count = 0
+ lookup_options = lsa.LSA_LOOKUP_OPTION_SEARCH_ISOLATED_NAMES
+ client_revision = lsa.LSA_CLIENT_REVISION_2
+ (domains, sids, count) = c.LookupNames4(names,
+ sids,
+ level,
+ count,
+ lookup_options,
+ client_revision)
+
+ c = lsa.lsarpc(
+ "ncacn_ip_tcp:%s[schannel,seal]" % self.server,
+ self.lp,
+ machine_creds)
+
+ sids = lsa.TransSidArray3()
+ names = [username]
+ level = lsa.LSA_LOOKUP_NAMES_ALL
+ count = 0
+ lookup_options = lsa.LSA_LOOKUP_OPTION_SEARCH_ISOLATED_NAMES
+ client_revision = lsa.LSA_CLIENT_REVISION_2
+ (domains, sids, count) = c.LookupNames4(names,
+ sids,
+ level,
+ count,
+ lookup_options,
+ client_revision)
+
+ def test_lsa_LookupNames4_without_schannel(self):
+
+ machine_creds = Credentials()
+ machine_creds.guess(self.lp)
+ machine_creds.set_machine_account()
+
+ c_normal = lsa.lsarpc(
+ "ncacn_np:%s[seal]" % self.server,
+ self.lp,
+ machine_creds)
+
+ username, domain = c_normal.GetUserName(None, None, None)
+
+ sids = lsa.TransSidArray3()
+ names = [username]
+ level = lsa.LSA_LOOKUP_NAMES_ALL
+ count = 0
+ lookup_options = lsa.LSA_LOOKUP_OPTION_SEARCH_ISOLATED_NAMES
+ client_revision = lsa.LSA_CLIENT_REVISION_2
+
+ with self.assertRaises(NTSTATUSError) as e:
+ c_normal.LookupNames4(names,
+ sids,
+ level,
+ count,
+ lookup_options,
+ client_revision)
+ if (e.exception.args[0] != NT_STATUS_ACCESS_DENIED):
+ raise AssertionError("LookupNames4 without schannel must fail with ACCESS_DENIED")
+
+ def test_lsa_LookupSids3_without_schannel(self):
+ machine_creds = Credentials()
+ machine_creds.guess(self.lp)
+ machine_creds.set_machine_account()
+
+ c = lsa.lsarpc(
+ "ncacn_ip_tcp:%s[seal]" % self.server,
+ self.lp,
+ machine_creds)
+
+ sids = lsa.SidArray()
+ sid = lsa.SidPtr()
+ # Need a set
+ x = dom_sid("S-1-5-7")
+ sid.sid = x
+ sids.sids = [sid]
+ sids.num_sids = 1
+ names = lsa.TransNameArray2()
+ level = lsa.LSA_LOOKUP_NAMES_ALL
+ count = 0
+ lookup_options = lsa.LSA_LOOKUP_OPTION_SEARCH_ISOLATED_NAMES
+ client_revision = lsa.LSA_CLIENT_REVISION_2
+
+ with self.assertRaises(NTSTATUSError) as e:
+ c.LookupSids3(sids,
+ names,
+ level,
+ count,
+ lookup_options,
+ client_revision)
+ if (e.exception.args[0] != NT_STATUS_ACCESS_DENIED):
+ raise AssertionError("LookupSids3 without schannel must fail with ACCESS_DENIED")
diff --git a/python/samba/tests/dcerpc/mdssvc.py b/python/samba/tests/dcerpc/mdssvc.py
new file mode 100644
index 0000000..1d53676
--- /dev/null
+++ b/python/samba/tests/dcerpc/mdssvc.py
@@ -0,0 +1,194 @@
+#
+# Unix SMB/CIFS implementation.
+# Copyright Ralph Boehme <slow@samba.org> 2019
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for samba.dcerpc.mdssvc"""
+
+import os
+import time
+import threading
+import logging
+import json
+from http.server import HTTPServer, BaseHTTPRequestHandler
+from samba.dcerpc import mdssvc
+from samba.tests import RpcInterfaceTestCase
+from samba.samba3 import mdscli
+from samba.logger import get_samba_logger
+
+logger = get_samba_logger(name=__name__)
+
+testfiles = [
+ "foo",
+ "bar",
+ "x+x",
+ "x*x",
+ "x=x",
+ "x'x",
+ "x?x",
+ "x\"x",
+ "x\\x",
+ "x(x",
+ "x x",
+]
+
+class MdssvcHTTPRequestHandler(BaseHTTPRequestHandler):
+ def do_POST(self):
+ content_length = int(self.headers['content-length'])
+ body = self.rfile.read(content_length)
+
+ actual_json = json.loads((body))
+ expected_json = json.loads(self.server.json_in)
+
+ if actual_json != expected_json:
+ logger.error("Bad request, expected:\n%s\nGot:\n%s\n" % (expected_json, actual_json))
+ self.send_error(400,
+ "Bad request",
+ "Expected: %s\n"
+ "Got: %s\n" %
+ (expected_json, actual_json))
+ return
+
+ resp = bytes(self.server.json_out, encoding="utf-8")
+
+ self.send_response(200)
+ self.send_header('content-type', 'application/json; charset=UTF-8')
+ self.send_header('content-length', len(resp))
+ self.end_headers()
+ self.wfile.write(resp)
+
+class MdssvcTests(RpcInterfaceTestCase):
+
+ def setUp(self):
+ super().setUp()
+
+ self.pipe = mdssvc.mdssvc('ncacn_np:fileserver[/pipe/mdssvc]', self.get_loadparm())
+
+ self.server = HTTPServer(('10.53.57.35', 8080),
+ MdssvcHTTPRequestHandler,
+ bind_and_activate=False)
+
+ self.t = threading.Thread(target=MdssvcTests.http_server, args=(self,))
+ self.t.setDaemon(True)
+ self.t.start()
+ self.sharepath = os.environ["LOCAL_PATH"]
+ time.sleep(1)
+
+ conn = mdscli.conn(self.pipe, 'spotlight', '/foo')
+ self.fakepath = conn.sharepath()
+ conn.disconnect(self.pipe)
+
+ for file in testfiles:
+ f = open("%s/%s" % (self.sharepath, file), "w")
+ f.close()
+
+ def tearDown(self):
+ super().tearDown()
+ for file in testfiles:
+ os.remove("%s/%s" % (self.sharepath, file))
+
+ def http_server(self):
+ self.server.server_bind()
+ self.server.server_activate()
+ self.server.serve_forever()
+
+ def run_test(self, query, expect, json_in, json_out):
+ self.server.json_in = json_in.replace("%BASEPATH%", self.sharepath)
+ self.server.json_out = json_out.replace("%BASEPATH%", self.sharepath)
+
+ self.conn = mdscli.conn(self.pipe, 'spotlight', '/foo')
+ search = self.conn.search(self.pipe, query, self.fakepath)
+
+ # Give it some time, the get_results() below returns immediately
+ # what's available, so if we ask to soon, we might get back no results
+ # as the server is still processing the request
+ time.sleep(1)
+
+ results = search.get_results(self.pipe)
+ self.assertEqual(results, expect)
+
+ search.close(self.pipe)
+ self.conn.disconnect(self.pipe)
+
+ def test_mdscli_search(self):
+ exp_json_query = r'''{
+ "from": 0, "size": 50, "_source": ["path.real"],
+ "query": {
+ "query_string": {
+ "query": "(samba*) AND path.real.fulltext:\"%BASEPATH%\""
+ }
+ }
+ }'''
+ fake_json_response = '''{
+ "hits" : {
+ "total" : { "value" : 2},
+ "hits" : [
+ {"_source" : {"path" : {"real" : "%BASEPATH%/foo"}}},
+ {"_source" : {"path" : {"real" : "%BASEPATH%/bar"}}}
+ ]
+ }
+ }'''
+ exp_results = ["foo", "bar"]
+ self.run_test('*=="samba*"', exp_results, exp_json_query, fake_json_response)
+
+ def test_mdscli_search_escapes(self):
+ sl_query = (
+ r'kMDItemFSName=="x+x"||'
+ r'kMDItemFSName=="x\*x"||'
+ r'kMDItemFSName=="x=x"||'
+ 'kMDItemFSName=="x\'x"||'
+ r'kMDItemFSName=="x?x"||'
+ r'kMDItemFSName=="x x"||'
+ r'kMDItemFSName=="x(x"||'
+ r'kMDItemFSName=="x\"x"||'
+ r'kMDItemFSName=="x\\x"'
+ )
+ exp_json_query = r'''{
+ "from": 0, "size": 50, "_source": ["path.real"],
+ "query": {
+ "query_string": {
+ "query": "(file.filename:x\\+x OR file.filename:x\\*x OR file.filename:x=x OR file.filename:x'x OR file.filename:x\\?x OR file.filename:x\\ x OR file.filename:x\\(x OR file.filename:x\\\"x OR file.filename:x\\\\x) AND path.real.fulltext:\"%BASEPATH%\""
+ }
+ }
+ }'''
+ fake_json_response = r'''{
+ "hits" : {
+ "total" : {"value" : 9},
+ "hits" : [
+ {"_source" : {"path" : {"real" : "%BASEPATH%/x+x"}}},
+ {"_source" : {"path" : {"real" : "%BASEPATH%/x*x"}}},
+ {"_source" : {"path" : {"real" : "%BASEPATH%/x=x"}}},
+ {"_source" : {"path" : {"real" : "%BASEPATH%/x'x"}}},
+ {"_source" : {"path" : {"real" : "%BASEPATH%/x?x"}}},
+ {"_source" : {"path" : {"real" : "%BASEPATH%/x x"}}},
+ {"_source" : {"path" : {"real" : "%BASEPATH%/x(x"}}},
+ {"_source" : {"path" : {"real" : "%BASEPATH%/x\"x"}}},
+ {"_source" : {"path" : {"real" : "%BASEPATH%/x\\x"}}}
+ ]
+ }
+ }'''
+ exp_results = [
+ r"x+x",
+ r"x*x",
+ r"x=x",
+ r"x'x",
+ r"x?x",
+ r"x x",
+ r"x(x",
+ "x\"x",
+ r"x\x",
+ ]
+ self.run_test(sl_query, exp_results, exp_json_query, fake_json_response)
diff --git a/python/samba/tests/dcerpc/misc.py b/python/samba/tests/dcerpc/misc.py
new file mode 100644
index 0000000..6b58e94
--- /dev/null
+++ b/python/samba/tests/dcerpc/misc.py
@@ -0,0 +1,101 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for samba.dcerpc.misc."""
+
+from samba.dcerpc import misc
+import samba.tests
+from samba.common import cmp
+
+text1 = "76f53846-a7c2-476a-ae2c-20e2b80d7b34"
+text2 = "344edffa-330a-4b39-b96e-2c34da52e8b1"
+text3 = "00112233-4455-6677-8899-aabbccddeeff"
+
+
+class GUIDTests(samba.tests.TestCase):
+
+ def test_str(self):
+ guid = misc.GUID(text1)
+ self.assertEqual(text1, str(guid))
+
+ def test_repr(self):
+ guid = misc.GUID(text1)
+ self.assertEqual("GUID('%s')" % text1, repr(guid))
+
+ def test_compare_different(self):
+ guid1 = misc.GUID(text1)
+ guid2 = misc.GUID(text2)
+ self.assertFalse(guid1 == guid2)
+ self.assertGreater(guid1, guid2)
+ self.assertTrue(cmp(guid1, guid2) > 0)
+
+ def test_compare_same(self):
+ guid1 = misc.GUID(text1)
+ guid2 = misc.GUID(text1)
+ self.assertTrue(guid1 == guid2)
+ self.assertEqual(guid1, guid2)
+ self.assertEqual(0, cmp(guid1, guid2))
+
+ def test_valid_formats(self):
+ fmts = [
+ "00112233-4455-6677-8899-aabbccddeeff", # 36
+ b"00112233-4455-6677-8899-aabbccddeeff", # 36 as bytes
+ "{00112233-4455-6677-8899-aabbccddeeff}", # 38
+
+ "33221100554477668899aabbccddeeff", # 32
+ b"33221100554477668899aabbccddeeff", # 32 as bytes
+
+ # 16 as hex bytes
+ b"\x33\x22\x11\x00\x55\x44\x77\x66\x88\x99\xaa\xbb\xcc\xdd\xee\xff"
+ ]
+ for fmt in fmts:
+ guid = misc.GUID(fmt)
+ self.assertEqual(text3, str(guid))
+
+ def test_invalid_formats(self):
+ fmts = [
+ "00112233-4455-6677-8899-aabbccddee", # 34
+ "{33221100554477668899aabbccddeeff}",
+ "33221100554477668899aabbccddee", # 30
+ "\\x33\\x22\\x11\\x00\\x55\\x44\\x77\\x66\\x88\\x99\\xaa\\xbb\\xcc\\xdd\\xee\\xff",
+ r"\x33\x22\x11\x00\x55\x44\x77\x66\x88\x99\xaa\xbb\xcc\xdd\xee\xff",
+ ]
+ for fmt in fmts:
+ try:
+ misc.GUID(fmt)
+ except samba.NTSTATUSError:
+ # invalid formats should get this error
+ continue
+ else:
+ # otherwise, test fail
+ self.fail()
+
+
+class PolicyHandleTests(samba.tests.TestCase):
+
+ def test_init(self):
+ x = misc.policy_handle(text1, 1)
+ self.assertEqual(1, x.handle_type)
+ self.assertEqual(text1, str(x.uuid))
+
+ def test_repr(self):
+ x = misc.policy_handle(text1, 42)
+ self.assertEqual("policy_handle(%d, '%s')" % (42, text1), repr(x))
+
+ def test_str(self):
+ x = misc.policy_handle(text1, 42)
+ self.assertEqual("%d, %s" % (42, text1), str(x))
diff --git a/python/samba/tests/dcerpc/raw_protocol.py b/python/samba/tests/dcerpc/raw_protocol.py
new file mode 100755
index 0000000..fa5a042
--- /dev/null
+++ b/python/samba/tests/dcerpc/raw_protocol.py
@@ -0,0 +1,7514 @@
+#!/usr/bin/env python3
+# Unix SMB/CIFS implementation.
+# Copyright (C) Stefan Metzmacher 2014,2015
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+import os
+import time
+
+sys.path.insert(0, "bin/python")
+os.environ["PYTHONUNBUFFERED"] = "1"
+
+import samba.dcerpc.dcerpc as dcerpc
+import samba.dcerpc.base as base
+import samba.dcerpc.misc as misc
+import samba.dcerpc.epmapper
+import samba.dcerpc.mgmt
+import samba.dcerpc.netlogon
+import samba.dcerpc.lsa
+import struct
+from samba import gensec
+from samba.tests.dcerpc.raw_testcase import RawDCERPCTest
+from samba.ntstatus import (
+ NT_STATUS_SUCCESS
+)
+
+global_ndr_print = False
+global_hexdump = False
+
+
+class TestDCERPC_BIND(RawDCERPCTest):
+
+ def setUp(self):
+ super().setUp()
+ self.do_ndr_print = global_ndr_print
+ self.do_hexdump = global_hexdump
+
+ def _test_no_auth_request_bind_pfc_flags(self, req_pfc_flags, rep_pfc_flags):
+ ndr32 = base.transfer_syntax_ndr()
+
+ tsf1_list = [ndr32]
+ ctx1 = dcerpc.ctx_list()
+ ctx1.context_id = 1
+ ctx1.num_transfer_syntaxes = len(tsf1_list)
+ ctx1.abstract_syntax = samba.dcerpc.mgmt.abstract_syntax()
+ ctx1.transfer_syntaxes = tsf1_list
+
+ req = self.generate_bind(call_id=0, pfc_flags=req_pfc_flags, ctx_list=[ctx1])
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_BIND_ACK, req.call_id,
+ pfc_flags=rep_pfc_flags, auth_length=0)
+ self.assertEqual(rep.u.max_xmit_frag, req.u.max_xmit_frag)
+ self.assertEqual(rep.u.max_recv_frag, req.u.max_recv_frag)
+ self.assertNotEqual(rep.u.assoc_group_id, req.u.assoc_group_id)
+ self.assertEqual(rep.u.secondary_address_size, 4)
+ self.assertEqual(rep.u.secondary_address, "%d" % self.tcp_port)
+ self.assertPadding(rep.u._pad1, 2)
+ self.assertEqual(rep.u.num_results, 1)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_ACCEPTANCE)
+ self.assertEqual(rep.u.ctx_list[0].reason,
+ dcerpc.DCERPC_BIND_ACK_REASON_NOT_SPECIFIED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, ndr32)
+ self.assertEqual(rep.u.auth_info, b'\0' * 0)
+
+ # And now try a request
+ req = self.generate_request(call_id=1,
+ context_id=ctx1.context_id,
+ opnum=0,
+ stub=b"")
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_RESPONSE, req.call_id,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, req.u.context_id)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertGreaterEqual(len(rep.u.stub_and_verifier), rep.u.alloc_hint)
+
+ def _test_no_auth_request_alter_pfc_flags(self, req_pfc_flags, rep_pfc_flags):
+ ndr32 = base.transfer_syntax_ndr()
+
+ tsf1_list = [ndr32]
+ ctx1 = dcerpc.ctx_list()
+ ctx1.context_id = 1
+ ctx1.num_transfer_syntaxes = len(tsf1_list)
+ ctx1.abstract_syntax = samba.dcerpc.mgmt.abstract_syntax()
+ ctx1.transfer_syntaxes = tsf1_list
+
+ req = self.generate_bind(call_id=0, ctx_list=[ctx1])
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_BIND_ACK, req.call_id,
+ auth_length=0)
+ self.assertEqual(rep.u.max_xmit_frag, req.u.max_xmit_frag)
+ self.assertEqual(rep.u.max_recv_frag, req.u.max_recv_frag)
+ self.assertNotEqual(rep.u.assoc_group_id, req.u.assoc_group_id)
+ self.assertEqual(rep.u.secondary_address_size, 4)
+ self.assertEqual(rep.u.secondary_address, "%d" % self.tcp_port)
+ self.assertPadding(rep.u._pad1, 2)
+ self.assertEqual(rep.u.num_results, 1)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_ACCEPTANCE)
+ self.assertEqual(rep.u.ctx_list[0].reason,
+ dcerpc.DCERPC_BIND_ACK_REASON_NOT_SPECIFIED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, ndr32)
+ self.assertEqual(rep.u.auth_info, b'\0' * 0)
+
+ # And now try a alter context
+ req = self.generate_alter(call_id=0, pfc_flags=req_pfc_flags, ctx_list=[ctx1])
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_ALTER_RESP, req.call_id,
+ pfc_flags=rep_pfc_flags, auth_length=0)
+ self.assertEqual(rep.u.max_xmit_frag, req.u.max_xmit_frag)
+ self.assertEqual(rep.u.max_recv_frag, req.u.max_recv_frag)
+ self.assertNotEqual(rep.u.assoc_group_id, req.u.assoc_group_id)
+ self.assertEqual(rep.u.secondary_address_size, 0)
+ self.assertEqual(rep.u.secondary_address, "")
+ self.assertPadding(rep.u._pad1, 2)
+ self.assertEqual(rep.u.num_results, 1)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_ACCEPTANCE)
+ self.assertEqual(rep.u.ctx_list[0].reason,
+ dcerpc.DCERPC_BIND_ACK_REASON_NOT_SPECIFIED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, ndr32)
+ self.assertEqual(rep.u.auth_info, b'\0' * 0)
+
+ # And now try a request
+ req = self.generate_request(call_id=1,
+ context_id=ctx1.context_id,
+ opnum=0,
+ stub=b"")
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_RESPONSE, req.call_id,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, req.u.context_id)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertGreaterEqual(len(rep.u.stub_and_verifier), rep.u.alloc_hint)
+
+ def test_no_auth_request(self):
+ return self._test_no_auth_request_bind_pfc_flags(
+ req_pfc_flags=0 |
+ dcerpc.DCERPC_PFC_FLAG_FIRST |
+ dcerpc.DCERPC_PFC_FLAG_LAST,
+ rep_pfc_flags=0 |
+ dcerpc.DCERPC_PFC_FLAG_FIRST |
+ dcerpc.DCERPC_PFC_FLAG_LAST)
+
+ def test_no_auth_request_bind_pfc_00(self):
+ return self._test_no_auth_request_bind_pfc_flags(
+ req_pfc_flags=0 |
+ 0,
+ rep_pfc_flags=0 |
+ dcerpc.DCERPC_PFC_FLAG_FIRST |
+ dcerpc.DCERPC_PFC_FLAG_LAST)
+
+ def test_no_auth_request_bind_pfc_FIRST(self):
+ return self._test_no_auth_request_bind_pfc_flags(
+ req_pfc_flags=0 |
+ dcerpc.DCERPC_PFC_FLAG_FIRST |
+ 0,
+ rep_pfc_flags=0 |
+ dcerpc.DCERPC_PFC_FLAG_FIRST |
+ dcerpc.DCERPC_PFC_FLAG_LAST)
+
+ def test_no_auth_request_bind_pfc_LAST(self):
+ return self._test_no_auth_request_bind_pfc_flags(
+ req_pfc_flags=0 |
+ dcerpc.DCERPC_PFC_FLAG_LAST |
+ 0,
+ rep_pfc_flags=0 |
+ dcerpc.DCERPC_PFC_FLAG_FIRST |
+ dcerpc.DCERPC_PFC_FLAG_LAST)
+
+ def test_no_auth_request_bind_pfc_HDR_SIGNING(self):
+ return self._test_no_auth_request_bind_pfc_flags(
+ req_pfc_flags=0 |
+ dcerpc.DCERPC_PFC_FLAG_SUPPORT_HEADER_SIGN |
+ 0,
+ rep_pfc_flags=0 |
+ dcerpc.DCERPC_PFC_FLAG_FIRST |
+ dcerpc.DCERPC_PFC_FLAG_LAST |
+ dcerpc.DCERPC_PFC_FLAG_SUPPORT_HEADER_SIGN)
+
+ def test_no_auth_request_bind_pfc_08(self):
+ return self._test_no_auth_request_bind_pfc_flags(
+ req_pfc_flags=0 |
+ 8 |
+ 0,
+ rep_pfc_flags=0 |
+ dcerpc.DCERPC_PFC_FLAG_FIRST |
+ dcerpc.DCERPC_PFC_FLAG_LAST)
+
+ def test_no_auth_request_bind_pfc_CONC_MPX(self):
+ return self._test_no_auth_request_bind_pfc_flags(
+ req_pfc_flags=0 |
+ dcerpc.DCERPC_PFC_FLAG_CONC_MPX |
+ 0,
+ rep_pfc_flags=0 |
+ dcerpc.DCERPC_PFC_FLAG_FIRST |
+ dcerpc.DCERPC_PFC_FLAG_LAST |
+ dcerpc.DCERPC_PFC_FLAG_CONC_MPX)
+
+ def test_no_auth_request_bind_pfc_DID_NOT_EXECUTE(self):
+ return self._test_no_auth_request_bind_pfc_flags(
+ req_pfc_flags=0 |
+ dcerpc.DCERPC_PFC_FLAG_DID_NOT_EXECUTE |
+ 0,
+ rep_pfc_flags=0 |
+ dcerpc.DCERPC_PFC_FLAG_FIRST |
+ dcerpc.DCERPC_PFC_FLAG_LAST)
+
+ def test_no_auth_request_bind_pfc_MAYBE(self):
+ return self._test_no_auth_request_bind_pfc_flags(
+ req_pfc_flags=0 |
+ dcerpc.DCERPC_PFC_FLAG_MAYBE |
+ 0,
+ rep_pfc_flags=0 |
+ dcerpc.DCERPC_PFC_FLAG_FIRST |
+ dcerpc.DCERPC_PFC_FLAG_LAST)
+
+ def test_no_auth_request_bind_pfc_OBJECT_UUID(self):
+ return self._test_no_auth_request_bind_pfc_flags(
+ req_pfc_flags=0 |
+ dcerpc.DCERPC_PFC_FLAG_OBJECT_UUID |
+ 0,
+ rep_pfc_flags=0 |
+ dcerpc.DCERPC_PFC_FLAG_FIRST |
+ dcerpc.DCERPC_PFC_FLAG_LAST)
+
+ # TODO: doesn't announce DCERPC_PFC_FLAG_SUPPORT_HEADER_SIGN
+ # without authentication
+ # TODO: doesn't announce DCERPC_PFC_FLAG_CONC_MPX
+ # by default
+ def _test_no_auth_request_bind_pfc_ff(self):
+ return self._test_no_auth_request_bind_pfc_flags(
+ req_pfc_flags=0 |
+ 0xff |
+ 0,
+ rep_pfc_flags=0 |
+ dcerpc.DCERPC_PFC_FLAG_FIRST |
+ dcerpc.DCERPC_PFC_FLAG_LAST |
+ dcerpc.DCERPC_PFC_FLAG_SUPPORT_HEADER_SIGN |
+ dcerpc.DCERPC_PFC_FLAG_CONC_MPX)
+
+ def test_no_auth_request_alter_pfc_00(self):
+ return self._test_no_auth_request_alter_pfc_flags(
+ req_pfc_flags=0 |
+ 0,
+ rep_pfc_flags=0 |
+ dcerpc.DCERPC_PFC_FLAG_FIRST |
+ dcerpc.DCERPC_PFC_FLAG_LAST)
+
+ def test_no_auth_request_alter_pfc_FIRST(self):
+ return self._test_no_auth_request_alter_pfc_flags(
+ req_pfc_flags=0 |
+ dcerpc.DCERPC_PFC_FLAG_FIRST |
+ 0,
+ rep_pfc_flags=0 |
+ dcerpc.DCERPC_PFC_FLAG_FIRST |
+ dcerpc.DCERPC_PFC_FLAG_LAST)
+
+ def test_no_auth_request_alter_pfc_LAST(self):
+ return self._test_no_auth_request_alter_pfc_flags(
+ req_pfc_flags=0 |
+ dcerpc.DCERPC_PFC_FLAG_LAST |
+ 0,
+ rep_pfc_flags=0 |
+ dcerpc.DCERPC_PFC_FLAG_FIRST |
+ dcerpc.DCERPC_PFC_FLAG_LAST)
+
+ def test_no_auth_request_alter_pfc_HDR_SIGNING(self):
+ return self._test_no_auth_request_alter_pfc_flags(
+ req_pfc_flags=0 |
+ dcerpc.DCERPC_PFC_FLAG_SUPPORT_HEADER_SIGN |
+ 0,
+ rep_pfc_flags=0 |
+ dcerpc.DCERPC_PFC_FLAG_FIRST |
+ dcerpc.DCERPC_PFC_FLAG_LAST |
+ dcerpc.DCERPC_PFC_FLAG_SUPPORT_HEADER_SIGN)
+
+ def test_no_auth_request_alter_pfc_08(self):
+ return self._test_no_auth_request_alter_pfc_flags(
+ req_pfc_flags=0 |
+ 8 |
+ 0,
+ rep_pfc_flags=0 |
+ dcerpc.DCERPC_PFC_FLAG_FIRST |
+ dcerpc.DCERPC_PFC_FLAG_LAST)
+
+ def test_no_auth_request_alter_pfc_CONC_MPX(self):
+ return self._test_no_auth_request_alter_pfc_flags(
+ req_pfc_flags=0 |
+ dcerpc.DCERPC_PFC_FLAG_CONC_MPX |
+ 0,
+ rep_pfc_flags=0 |
+ dcerpc.DCERPC_PFC_FLAG_FIRST |
+ dcerpc.DCERPC_PFC_FLAG_LAST)
+
+ def test_no_auth_request_alter_pfc_DID_NOT_EXECUTE(self):
+ return self._test_no_auth_request_alter_pfc_flags(
+ req_pfc_flags=0 |
+ dcerpc.DCERPC_PFC_FLAG_DID_NOT_EXECUTE |
+ 0,
+ rep_pfc_flags=0 |
+ dcerpc.DCERPC_PFC_FLAG_FIRST |
+ dcerpc.DCERPC_PFC_FLAG_LAST)
+
+ def test_no_auth_request_alter_pfc_MAYBE(self):
+ return self._test_no_auth_request_alter_pfc_flags(
+ req_pfc_flags=0 |
+ dcerpc.DCERPC_PFC_FLAG_MAYBE |
+ 0,
+ rep_pfc_flags=0 |
+ dcerpc.DCERPC_PFC_FLAG_FIRST |
+ dcerpc.DCERPC_PFC_FLAG_LAST)
+
+ def test_no_auth_request_alter_pfc_OBJECT_UUID(self):
+ return self._test_no_auth_request_alter_pfc_flags(
+ req_pfc_flags=0 |
+ dcerpc.DCERPC_PFC_FLAG_OBJECT_UUID |
+ 0,
+ rep_pfc_flags=0 |
+ dcerpc.DCERPC_PFC_FLAG_FIRST |
+ dcerpc.DCERPC_PFC_FLAG_LAST)
+
+ def test_no_auth_request_alter_pfc_ff(self):
+ return self._test_no_auth_request_alter_pfc_flags(
+ req_pfc_flags=0 |
+ 0xff |
+ 0,
+ rep_pfc_flags=0 |
+ dcerpc.DCERPC_PFC_FLAG_FIRST |
+ dcerpc.DCERPC_PFC_FLAG_LAST |
+ dcerpc.DCERPC_PFC_FLAG_SUPPORT_HEADER_SIGN)
+
+ def test_no_auth_no_ctx(self):
+ # send an useless bind
+ req = self.generate_bind(call_id=0)
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_BIND_NAK, req.call_id,
+ auth_length=0)
+ self.assertEqual(rep.u.reject_reason,
+ dcerpc.DCERPC_BIND_NAK_REASON_NOT_SPECIFIED)
+ self.assertEqual(rep.u.num_versions, 1)
+ self.assertEqual(rep.u.versions[0].rpc_vers, req.rpc_vers)
+ self.assertEqual(rep.u.versions[0].rpc_vers_minor, req.rpc_vers_minor)
+ self.assertPadding(rep.u._pad, 3)
+
+ def test_invalid_auth_noctx(self):
+ req = self.generate_bind(call_id=0)
+ req.auth_length = dcerpc.DCERPC_AUTH_TRAILER_LENGTH
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_BIND_NAK, req.call_id,
+ auth_length=0)
+ self.assertEqual(rep.u.reject_reason,
+ dcerpc.DCERPC_BIND_NAK_REASON_PROTOCOL_VERSION_NOT_SUPPORTED)
+ self.assertEqual(rep.u.num_versions, 1)
+ self.assertEqual(rep.u.versions[0].rpc_vers, req.rpc_vers)
+ self.assertEqual(rep.u.versions[0].rpc_vers_minor, req.rpc_vers_minor)
+ self.assertPadding(rep.u._pad, 3)
+
+ def test_no_auth_valid_valid_request(self):
+ ndr32 = base.transfer_syntax_ndr()
+
+ tsf1_list = [ndr32]
+ ctx1 = dcerpc.ctx_list()
+ ctx1.context_id = 1
+ ctx1.num_transfer_syntaxes = len(tsf1_list)
+ ctx1.abstract_syntax = samba.dcerpc.mgmt.abstract_syntax()
+ ctx1.transfer_syntaxes = tsf1_list
+
+ req = self.generate_bind(call_id=0, ctx_list=[ctx1])
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_BIND_ACK, req.call_id,
+ auth_length=0)
+ self.assertEqual(rep.u.max_xmit_frag, req.u.max_xmit_frag)
+ self.assertEqual(rep.u.max_recv_frag, req.u.max_recv_frag)
+ self.assertNotEqual(rep.u.assoc_group_id, req.u.assoc_group_id)
+ self.assertEqual(rep.u.secondary_address_size, 4)
+ self.assertEqual(rep.u.secondary_address, "%d" % self.tcp_port)
+ self.assertPadding(rep.u._pad1, 2)
+ self.assertEqual(rep.u.num_results, 1)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_ACCEPTANCE)
+ self.assertEqual(rep.u.ctx_list[0].reason,
+ dcerpc.DCERPC_BIND_ACK_REASON_NOT_SPECIFIED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, ndr32)
+ self.assertEqual(rep.u.auth_info, b'\0' * 0)
+
+ # Send a bind again
+ tsf2_list = [ndr32]
+ ctx2 = dcerpc.ctx_list()
+ ctx2.context_id = 2
+ ctx2.num_transfer_syntaxes = len(tsf2_list)
+ ctx2.abstract_syntax = samba.dcerpc.mgmt.abstract_syntax()
+ ctx2.transfer_syntaxes = tsf2_list
+
+ req = self.generate_bind(call_id=1, ctx_list=[ctx2])
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_BIND_NAK, req.call_id,
+ auth_length=0)
+ self.assertEqual(rep.u.reject_reason,
+ dcerpc.DCERPC_BIND_NAK_REASON_NOT_SPECIFIED)
+ self.assertEqual(rep.u.num_versions, 1)
+ self.assertEqual(rep.u.versions[0].rpc_vers, req.rpc_vers)
+ self.assertEqual(rep.u.versions[0].rpc_vers_minor, req.rpc_vers_minor)
+ self.assertPadding(rep.u._pad, 3)
+
+ # wait for a disconnect
+ rep = self.recv_pdu()
+ self.assertIsNone(rep)
+ self.assertNotConnected()
+
+ def test_no_auth_invalid_valid_request(self):
+ # send an useless bind
+ req = self.generate_bind(call_id=0)
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_BIND_NAK, req.call_id,
+ auth_length=0)
+ self.assertEqual(rep.u.reject_reason,
+ dcerpc.DCERPC_BIND_NAK_REASON_NOT_SPECIFIED)
+ self.assertEqual(rep.u.num_versions, 1)
+ self.assertEqual(rep.u.versions[0].rpc_vers, req.rpc_vers)
+ self.assertEqual(rep.u.versions[0].rpc_vers_minor, req.rpc_vers_minor)
+ self.assertPadding(rep.u._pad, 3)
+
+ # wait for a disconnect
+ rep = self.recv_pdu()
+ self.assertIsNone(rep)
+ self.assertNotConnected()
+
+ def test_alter_no_auth_no_ctx(self):
+ ndr32 = base.transfer_syntax_ndr()
+
+ tsf1_list = [ndr32]
+ ctx1 = dcerpc.ctx_list()
+ ctx1.context_id = 1
+ ctx1.num_transfer_syntaxes = len(tsf1_list)
+ ctx1.abstract_syntax = samba.dcerpc.mgmt.abstract_syntax()
+ ctx1.transfer_syntaxes = tsf1_list
+
+ req = self.generate_bind(call_id=0, ctx_list=[ctx1])
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_BIND_ACK, req.call_id,
+ auth_length=0)
+ self.assertEqual(rep.u.max_xmit_frag, req.u.max_xmit_frag)
+ self.assertEqual(rep.u.max_recv_frag, req.u.max_recv_frag)
+ self.assertNotEqual(rep.u.assoc_group_id, req.u.assoc_group_id)
+ self.assertEqual(rep.u.secondary_address_size, 4)
+ self.assertEqual(rep.u.secondary_address, "%d" % self.tcp_port)
+ self.assertPadding(rep.u._pad1, 2)
+ self.assertEqual(rep.u.num_results, 1)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_ACCEPTANCE)
+ self.assertEqual(rep.u.ctx_list[0].reason,
+ dcerpc.DCERPC_BIND_ACK_REASON_NOT_SPECIFIED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, ndr32)
+ self.assertEqual(rep.u.auth_info, b'\0' * 0)
+
+ # Send a alter
+ req = self.generate_alter(call_id=1, ctx_list=[])
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_FAULT, req.call_id,
+ pfc_flags=req.pfc_flags |
+ dcerpc.DCERPC_PFC_FLAG_DID_NOT_EXECUTE,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, 0)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertEqual(rep.u.flags, 0)
+ self.assertEqual(rep.u.status, dcerpc.DCERPC_NCA_S_PROTO_ERROR)
+ self.assertEqual(rep.u.reserved, 0)
+ self.assertEqual(len(rep.u.error_and_verifier), 0)
+
+ # wait for a disconnect
+ rep = self.recv_pdu()
+ self.assertIsNone(rep)
+ self.assertNotConnected()
+
+ def test_no_auth_presentation_ctx_valid1(self):
+ ndr32 = base.transfer_syntax_ndr()
+
+ zero_syntax = misc.ndr_syntax_id()
+
+ tsf1_list = [zero_syntax, ndr32]
+ ctx1 = dcerpc.ctx_list()
+ ctx1.context_id = 1
+ ctx1.num_transfer_syntaxes = len(tsf1_list)
+ ctx1.abstract_syntax = samba.dcerpc.mgmt.abstract_syntax()
+ ctx1.transfer_syntaxes = tsf1_list
+
+ req = self.generate_bind(call_id=0, ctx_list=[ctx1])
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_BIND_ACK, req.call_id,
+ auth_length=0)
+ self.assertEqual(rep.u.max_xmit_frag, req.u.max_xmit_frag)
+ self.assertEqual(rep.u.max_recv_frag, req.u.max_recv_frag)
+ self.assertNotEqual(rep.u.assoc_group_id, req.u.assoc_group_id)
+ self.assertEqual(rep.u.secondary_address_size, 4)
+ self.assertEqual(rep.u.secondary_address, "%d" % self.tcp_port)
+ self.assertPadding(rep.u._pad1, 2)
+ self.assertEqual(rep.u.num_results, 1)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_ACCEPTANCE)
+ self.assertEqual(rep.u.ctx_list[0].reason,
+ dcerpc.DCERPC_BIND_ACK_REASON_NOT_SPECIFIED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, ndr32)
+ self.assertEqual(rep.u.auth_info, b'\0' * 0)
+
+ # Send a alter
+ req = self.generate_alter(call_id=1, ctx_list=[ctx1])
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_ALTER_RESP, req.call_id,
+ auth_length=0)
+ self.assertEqual(rep.u.max_xmit_frag, req.u.max_xmit_frag)
+ self.assertEqual(rep.u.max_recv_frag, req.u.max_recv_frag)
+ self.assertNotEqual(rep.u.assoc_group_id, req.u.assoc_group_id)
+ self.assertEqual(rep.u.secondary_address_size, 0)
+ self.assertPadding(rep.u._pad1, 2)
+ self.assertEqual(rep.u.num_results, 1)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_ACCEPTANCE)
+ self.assertEqual(rep.u.ctx_list[0].reason,
+ dcerpc.DCERPC_BIND_ACK_REASON_NOT_SPECIFIED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, ndr32)
+ self.assertEqual(rep.u.auth_info, b'\0' * 0)
+
+ req = self.generate_request(call_id=2,
+ context_id=ctx1.context_id,
+ opnum=0xffff,
+ stub=b"")
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_FAULT, req.call_id,
+ pfc_flags=req.pfc_flags |
+ dcerpc.DCERPC_PFC_FLAG_DID_NOT_EXECUTE,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, ctx1.context_id)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertEqual(rep.u.flags, 0)
+ self.assertEqual(rep.u.status, dcerpc.DCERPC_NCA_S_OP_RNG_ERROR)
+ self.assertEqual(rep.u.reserved, 0)
+ self.assertEqual(len(rep.u.error_and_verifier), 0)
+
+ def test_no_auth_presentation_ctx_invalid1(self):
+ ndr32 = base.transfer_syntax_ndr()
+
+ zero_syntax = misc.ndr_syntax_id()
+
+ tsf1_list = [ndr32]
+ ctx1 = dcerpc.ctx_list()
+ ctx1.context_id = 1
+ ctx1.num_transfer_syntaxes = len(tsf1_list)
+ ctx1.abstract_syntax = ndr32
+ ctx1.transfer_syntaxes = tsf1_list
+
+ req = self.generate_bind(call_id=0, ctx_list=[ctx1])
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_BIND_ACK, req.call_id,
+ auth_length=0)
+ self.assertEqual(rep.u.max_xmit_frag, req.u.max_xmit_frag)
+ self.assertEqual(rep.u.max_recv_frag, req.u.max_recv_frag)
+ self.assertNotEqual(rep.u.assoc_group_id, req.u.assoc_group_id)
+ self.assertEqual(rep.u.secondary_address_size, 4)
+ self.assertEqual(rep.u.secondary_address, "%d" % self.tcp_port)
+ self.assertPadding(rep.u._pad1, 2)
+ self.assertEqual(rep.u.num_results, 1)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_PROVIDER_REJECTION)
+ self.assertEqual(rep.u.ctx_list[0].reason,
+ dcerpc.DCERPC_BIND_ACK_REASON_ABSTRACT_SYNTAX_NOT_SUPPORTED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, zero_syntax)
+ self.assertEqual(rep.u.auth_info, b'\0' * 0)
+
+ # Send a alter
+ req = self.generate_alter(call_id=1, ctx_list=[ctx1])
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_ALTER_RESP, req.call_id,
+ auth_length=0)
+ self.assertEqual(rep.u.max_xmit_frag, req.u.max_xmit_frag)
+ self.assertEqual(rep.u.max_recv_frag, req.u.max_recv_frag)
+ self.assertNotEqual(rep.u.assoc_group_id, req.u.assoc_group_id)
+ self.assertEqual(rep.u.secondary_address_size, 0)
+ self.assertPadding(rep.u._pad1, 2)
+ self.assertEqual(rep.u.num_results, 1)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_PROVIDER_REJECTION)
+ self.assertEqual(rep.u.ctx_list[0].reason,
+ dcerpc.DCERPC_BIND_ACK_REASON_ABSTRACT_SYNTAX_NOT_SUPPORTED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, zero_syntax)
+ self.assertEqual(rep.u.auth_info, b'\0' * 0)
+
+ req = self.generate_request(call_id=2,
+ context_id=12345,
+ opnum=0,
+ stub=b"")
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_FAULT, req.call_id,
+ pfc_flags=req.pfc_flags |
+ dcerpc.DCERPC_PFC_FLAG_DID_NOT_EXECUTE,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, 0)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertEqual(rep.u.flags, 0)
+ self.assertEqual(rep.u.status, dcerpc.DCERPC_NCA_S_UNKNOWN_IF)
+ self.assertEqual(rep.u.reserved, 0)
+ self.assertEqual(len(rep.u.error_and_verifier), 0)
+
+ # Send a alter again to prove the connection is still alive
+ req = self.generate_alter(call_id=3, ctx_list=[ctx1])
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_ALTER_RESP, req.call_id,
+ auth_length=0)
+ self.assertEqual(rep.u.max_xmit_frag, req.u.max_xmit_frag)
+ self.assertEqual(rep.u.max_recv_frag, req.u.max_recv_frag)
+ self.assertNotEqual(rep.u.assoc_group_id, req.u.assoc_group_id)
+ self.assertEqual(rep.u.secondary_address_size, 0)
+ self.assertPadding(rep.u._pad1, 2)
+ self.assertEqual(rep.u.num_results, 1)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_PROVIDER_REJECTION)
+ self.assertEqual(rep.u.ctx_list[0].reason,
+ dcerpc.DCERPC_BIND_ACK_REASON_ABSTRACT_SYNTAX_NOT_SUPPORTED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, zero_syntax)
+ self.assertEqual(rep.u.auth_info, b'\0' * 0)
+
+ def test_no_auth_presentation_ctx_invalid2(self):
+ ndr32 = base.transfer_syntax_ndr()
+
+ zero_syntax = misc.ndr_syntax_id()
+
+ tsf1a_list = []
+ ctx1a = dcerpc.ctx_list()
+ ctx1a.context_id = 1
+ ctx1a.num_transfer_syntaxes = len(tsf1a_list)
+ ctx1a.abstract_syntax = samba.dcerpc.mgmt.abstract_syntax()
+ ctx1a.transfer_syntaxes = tsf1a_list
+
+ req = self.generate_bind(call_id=0, ctx_list=[ctx1a])
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_BIND_NAK, req.call_id,
+ auth_length=0)
+ self.assertEqual(rep.u.reject_reason,
+ dcerpc.DCERPC_BIND_NAK_REASON_NOT_SPECIFIED)
+ self.assertEqual(rep.u.num_versions, 1)
+ self.assertEqual(rep.u.versions[0].rpc_vers, req.rpc_vers)
+ self.assertEqual(rep.u.versions[0].rpc_vers_minor, req.rpc_vers_minor)
+ self.assertPadding(rep.u._pad, 3)
+
+ # wait for a disconnect
+ rep = self.recv_pdu()
+ self.assertIsNone(rep)
+ self.assertNotConnected()
+
+ def test_no_auth_presentation_ctx_invalid3(self):
+ ndr32 = base.transfer_syntax_ndr()
+
+ zero_syntax = misc.ndr_syntax_id()
+
+ tsf1a_list = [zero_syntax, ndr32, ndr32, ndr32]
+ ctx1a = dcerpc.ctx_list()
+ ctx1a.context_id = 1
+ ctx1a.num_transfer_syntaxes = len(tsf1a_list)
+ ctx1a.abstract_syntax = samba.dcerpc.mgmt.abstract_syntax()
+ ctx1a.transfer_syntaxes = tsf1a_list
+
+ req = self.generate_bind(call_id=0, ctx_list=[ctx1a])
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_BIND_ACK, req.call_id,
+ auth_length=0)
+ self.assertEqual(rep.u.max_xmit_frag, req.u.max_xmit_frag)
+ self.assertEqual(rep.u.max_recv_frag, req.u.max_recv_frag)
+ self.assertNotEqual(rep.u.assoc_group_id, req.u.assoc_group_id)
+ self.assertEqual(rep.u.secondary_address_size, 4)
+ self.assertEqual(rep.u.secondary_address, "%d" % self.tcp_port)
+ self.assertPadding(rep.u._pad1, 2)
+ self.assertEqual(rep.u.num_results, 1)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_ACCEPTANCE)
+ self.assertEqual(rep.u.ctx_list[0].reason,
+ dcerpc.DCERPC_BIND_ACK_REASON_NOT_SPECIFIED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, ndr32)
+ self.assertEqual(rep.u.auth_info, b'\0' * 0)
+
+ tsf1b_list = []
+ ctx1b = dcerpc.ctx_list()
+ ctx1b.context_id = 1
+ ctx1b.num_transfer_syntaxes = len(tsf1b_list)
+ ctx1b.abstract_syntax = samba.dcerpc.mgmt.abstract_syntax()
+ ctx1b.transfer_syntaxes = tsf1b_list
+
+ # Send a alter
+ req = self.generate_alter(call_id=1, ctx_list=[ctx1b])
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_FAULT, req.call_id,
+ pfc_flags=req.pfc_flags |
+ dcerpc.DCERPC_PFC_FLAG_DID_NOT_EXECUTE,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, 0)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertEqual(rep.u.flags, 0)
+ self.assertEqual(rep.u.status, dcerpc.DCERPC_NCA_S_PROTO_ERROR)
+ self.assertEqual(rep.u.reserved, 0)
+ self.assertEqual(len(rep.u.error_and_verifier), 0)
+
+ # wait for a disconnect
+ rep = self.recv_pdu()
+ self.assertIsNone(rep)
+ self.assertNotConnected()
+
+ def test_no_auth_presentation_ctx_invalid4(self):
+ ndr32 = base.transfer_syntax_ndr()
+ ndr64 = base.transfer_syntax_ndr64()
+
+ zero_syntax = misc.ndr_syntax_id()
+
+ tsf1a_list = [zero_syntax, ndr32, ndr32, ndr32]
+ ctx1a = dcerpc.ctx_list()
+ ctx1a.context_id = 1
+ ctx1a.num_transfer_syntaxes = len(tsf1a_list)
+ ctx1a.abstract_syntax = samba.dcerpc.mgmt.abstract_syntax()
+ ctx1a.transfer_syntaxes = tsf1a_list
+
+ req = self.generate_bind(call_id=0, ctx_list=[ctx1a])
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_BIND_ACK, req.call_id,
+ auth_length=0)
+ self.assertEqual(rep.u.max_xmit_frag, req.u.max_xmit_frag)
+ self.assertEqual(rep.u.max_recv_frag, req.u.max_recv_frag)
+ self.assertNotEqual(rep.u.assoc_group_id, req.u.assoc_group_id)
+ self.assertEqual(rep.u.secondary_address_size, 4)
+ self.assertEqual(rep.u.secondary_address, "%d" % self.tcp_port)
+ self.assertPadding(rep.u._pad1, 2)
+ self.assertEqual(rep.u.num_results, 1)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_ACCEPTANCE)
+ self.assertEqual(rep.u.ctx_list[0].reason,
+ dcerpc.DCERPC_BIND_ACK_REASON_NOT_SPECIFIED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, ndr32)
+ self.assertEqual(rep.u.auth_info, b'\0' * 0)
+
+ # With a known but wrong syntax we get a protocol error
+ # see test_no_auth_presentation_ctx_valid2
+ tsf1b_list = [zero_syntax, samba.dcerpc.epmapper.abstract_syntax(), ndr64]
+ ctx1b = dcerpc.ctx_list()
+ ctx1b.context_id = 1
+ ctx1b.num_transfer_syntaxes = len(tsf1b_list)
+ ctx1b.abstract_syntax = samba.dcerpc.mgmt.abstract_syntax()
+ ctx1b.transfer_syntaxes = tsf1b_list
+
+ # Send a alter
+ req = self.generate_alter(call_id=1, ctx_list=[ctx1b])
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_FAULT, req.call_id,
+ pfc_flags=req.pfc_flags |
+ dcerpc.DCERPC_PFC_FLAG_DID_NOT_EXECUTE,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, 0)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertEqual(rep.u.flags, 0)
+ self.assertEqual(rep.u.status, dcerpc.DCERPC_NCA_S_PROTO_ERROR)
+ self.assertEqual(rep.u.reserved, 0)
+ self.assertEqual(len(rep.u.error_and_verifier), 0)
+
+ # wait for a disconnect
+ rep = self.recv_pdu()
+ self.assertIsNone(rep)
+ self.assertNotConnected()
+
+ def test_no_auth_presentation_ctx_valid2(self):
+ ndr32 = base.transfer_syntax_ndr()
+
+ zero_syntax = misc.ndr_syntax_id()
+
+ tsf1a_list = [zero_syntax, ndr32, ndr32, ndr32]
+ ctx1a = dcerpc.ctx_list()
+ ctx1a.context_id = 1
+ ctx1a.num_transfer_syntaxes = len(tsf1a_list)
+ ctx1a.abstract_syntax = samba.dcerpc.mgmt.abstract_syntax()
+ ctx1a.transfer_syntaxes = tsf1a_list
+
+ req = self.generate_bind(call_id=0, ctx_list=[ctx1a])
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_BIND_ACK, req.call_id,
+ auth_length=0)
+ self.assertEqual(rep.u.max_xmit_frag, req.u.max_xmit_frag)
+ self.assertEqual(rep.u.max_recv_frag, req.u.max_recv_frag)
+ self.assertNotEqual(rep.u.assoc_group_id, req.u.assoc_group_id)
+ self.assertEqual(rep.u.secondary_address_size, 4)
+ self.assertEqual(rep.u.secondary_address, "%d" % self.tcp_port)
+ self.assertPadding(rep.u._pad1, 2)
+ self.assertEqual(rep.u.num_results, 1)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_ACCEPTANCE)
+ self.assertEqual(rep.u.ctx_list[0].reason,
+ dcerpc.DCERPC_BIND_ACK_REASON_NOT_SPECIFIED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, ndr32)
+ self.assertEqual(rep.u.auth_info, b'\0' * 0)
+
+ # With a unknown but wrong syntaxes we get NO protocol error
+ # see test_no_auth_presentation_ctx_invalid4
+ tsf1b_list = [zero_syntax, samba.dcerpc.epmapper.abstract_syntax()]
+ ctx1b = dcerpc.ctx_list()
+ ctx1b.context_id = 1
+ ctx1b.num_transfer_syntaxes = len(tsf1b_list)
+ ctx1b.abstract_syntax = samba.dcerpc.mgmt.abstract_syntax()
+ ctx1b.transfer_syntaxes = tsf1b_list
+
+ # Send a alter
+ req = self.generate_alter(call_id=1, ctx_list=[ctx1b])
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_ALTER_RESP, req.call_id,
+ auth_length=0)
+ self.assertEqual(rep.u.max_xmit_frag, req.u.max_xmit_frag)
+ self.assertEqual(rep.u.max_recv_frag, req.u.max_recv_frag)
+ self.assertNotEqual(rep.u.assoc_group_id, req.u.assoc_group_id)
+ self.assertEqual(rep.u.secondary_address_size, 0)
+ self.assertPadding(rep.u._pad1, 2)
+ self.assertEqual(rep.u.num_results, 1)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_PROVIDER_REJECTION)
+ self.assertEqual(rep.u.ctx_list[0].reason,
+ dcerpc.DCERPC_BIND_ACK_REASON_TRANSFER_SYNTAXES_NOT_SUPPORTED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, zero_syntax)
+ self.assertEqual(rep.u.auth_info, b'\0' * 0)
+
+ req = self.generate_request(call_id=2,
+ context_id=ctx1a.context_id,
+ opnum=0xffff,
+ stub=b"")
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_FAULT, req.call_id,
+ pfc_flags=req.pfc_flags |
+ dcerpc.DCERPC_PFC_FLAG_DID_NOT_EXECUTE,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, ctx1a.context_id)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertEqual(rep.u.flags, 0)
+ self.assertEqual(rep.u.status, dcerpc.DCERPC_NCA_S_OP_RNG_ERROR)
+ self.assertEqual(rep.u.reserved, 0)
+ self.assertEqual(len(rep.u.error_and_verifier), 0)
+
+ def test_no_auth_presentation_ctx_no_ndr64(self):
+ ndr32 = base.transfer_syntax_ndr()
+ zero_syntax = misc.ndr_syntax_id()
+
+ tsfZ_list = [zero_syntax]
+ ctxZ = dcerpc.ctx_list()
+ ctxZ.context_id = 54321
+ ctxZ.num_transfer_syntaxes = len(tsfZ_list)
+ ctxZ.abstract_syntax = zero_syntax
+ ctxZ.transfer_syntaxes = tsfZ_list
+
+ req = self.generate_bind(call_id=0, ctx_list=[ctxZ])
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_BIND_ACK, req.call_id,
+ auth_length=0)
+ self.assertEqual(rep.u.max_xmit_frag, req.u.max_xmit_frag)
+ self.assertEqual(rep.u.max_recv_frag, req.u.max_recv_frag)
+ self.assertNotEqual(rep.u.assoc_group_id, req.u.assoc_group_id)
+ self.assertEqual(rep.u.secondary_address_size, 4)
+ self.assertEqual(rep.u.secondary_address, "%d" % self.tcp_port)
+ self.assertPadding(rep.u._pad1, 2)
+ self.assertEqual(rep.u.num_results, 1)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_PROVIDER_REJECTION)
+ self.assertEqual(rep.u.ctx_list[0].reason,
+ dcerpc.DCERPC_BIND_ACK_REASON_ABSTRACT_SYNTAX_NOT_SUPPORTED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, zero_syntax)
+ self.assertEqual(rep.u.auth_info, b'\0' * 0)
+
+ tsf0_list = [ndr32]
+ ctx0 = dcerpc.ctx_list()
+ ctx0.context_id = 0
+ ctx0.num_transfer_syntaxes = len(tsf0_list)
+ ctx0.abstract_syntax = samba.dcerpc.mgmt.abstract_syntax()
+ ctx0.transfer_syntaxes = tsf0_list
+
+ req = self.generate_alter(call_id=0, ctx_list=[ctx0])
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_ALTER_RESP, req.call_id,
+ auth_length=0)
+ self.assertEqual(rep.u.max_xmit_frag, req.u.max_xmit_frag)
+ self.assertEqual(rep.u.max_recv_frag, req.u.max_recv_frag)
+ self.assertNotEqual(rep.u.assoc_group_id, req.u.assoc_group_id)
+ self.assertEqual(rep.u.secondary_address_size, 0)
+ self.assertPadding(rep.u._pad1, 2)
+ self.assertEqual(rep.u.num_results, 1)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_ACCEPTANCE)
+ self.assertEqual(rep.u.ctx_list[0].reason,
+ dcerpc.DCERPC_BIND_ACK_REASON_NOT_SPECIFIED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, ndr32)
+ self.assertEqual(rep.u.auth_info, b'\0' * 0)
+
+ req = self.generate_request(call_id=1,
+ context_id=ctx0.context_id,
+ opnum=0,
+ stub=b"")
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_RESPONSE, req.call_id,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, req.u.context_id)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertGreaterEqual(len(rep.u.stub_and_verifier), rep.u.alloc_hint)
+
+ tsf1_list = [zero_syntax, ndr32]
+ ctx1 = dcerpc.ctx_list()
+ ctx1.context_id = 1
+ ctx1.num_transfer_syntaxes = len(tsf1_list)
+ ctx1.abstract_syntax = samba.dcerpc.mgmt.abstract_syntax()
+ ctx1.transfer_syntaxes = tsf1_list
+
+ req = self.generate_alter(call_id=1, ctx_list=[ctx1])
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_ALTER_RESP, req.call_id,
+ auth_length=0)
+ self.assertEqual(rep.u.max_xmit_frag, req.u.max_xmit_frag)
+ self.assertEqual(rep.u.max_recv_frag, req.u.max_recv_frag)
+ self.assertNotEqual(rep.u.assoc_group_id, req.u.assoc_group_id)
+ self.assertEqual(rep.u.secondary_address_size, 0)
+ self.assertPadding(rep.u._pad1, 2)
+ self.assertEqual(rep.u.num_results, 1)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_ACCEPTANCE)
+ self.assertEqual(rep.u.ctx_list[0].reason,
+ dcerpc.DCERPC_BIND_ACK_REASON_NOT_SPECIFIED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, ndr32)
+ self.assertEqual(rep.u.auth_info, b'\0' * 0)
+
+ req = self.generate_request(call_id=1,
+ context_id=ctx1.context_id,
+ opnum=0,
+ stub=b"")
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_RESPONSE, req.call_id,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, req.u.context_id)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertGreaterEqual(len(rep.u.stub_and_verifier), rep.u.alloc_hint)
+
+ tsf2_list = [ndr32, ndr32]
+ ctx2 = dcerpc.ctx_list()
+ ctx2.context_id = 2
+ ctx2.num_transfer_syntaxes = len(tsf2_list)
+ ctx2.abstract_syntax = samba.dcerpc.mgmt.abstract_syntax()
+ ctx2.transfer_syntaxes = tsf2_list
+
+ req = self.generate_alter(call_id=2, ctx_list=[ctx2])
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_ALTER_RESP, req.call_id,
+ auth_length=0)
+ self.assertEqual(rep.u.max_xmit_frag, req.u.max_xmit_frag)
+ self.assertEqual(rep.u.max_recv_frag, req.u.max_recv_frag)
+ self.assertNotEqual(rep.u.assoc_group_id, req.u.assoc_group_id)
+ self.assertEqual(rep.u.secondary_address_size, 0)
+ self.assertPadding(rep.u._pad1, 2)
+ self.assertEqual(rep.u.num_results, 1)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_ACCEPTANCE)
+ self.assertEqual(rep.u.ctx_list[0].reason,
+ dcerpc.DCERPC_BIND_ACK_REASON_NOT_SPECIFIED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, ndr32)
+ self.assertEqual(rep.u.auth_info, b'\0' * 0)
+
+ req = self.generate_request(call_id=1,
+ context_id=ctx2.context_id,
+ opnum=0,
+ stub=b"")
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_RESPONSE, req.call_id,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, req.u.context_id)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertGreaterEqual(len(rep.u.stub_and_verifier), rep.u.alloc_hint)
+
+ tsf3_list = [ndr32]
+ ctx3 = dcerpc.ctx_list()
+ ctx3.context_id = 3
+ ctx3.num_transfer_syntaxes = len(tsf3_list)
+ ctx3.abstract_syntax = samba.dcerpc.mgmt.abstract_syntax()
+ ctx3.transfer_syntaxes = tsf3_list
+
+ tsf4_list = [ndr32]
+ ctx4 = dcerpc.ctx_list()
+ ctx4.context_id = 4
+ ctx4.num_transfer_syntaxes = len(tsf4_list)
+ ctx4.abstract_syntax = samba.dcerpc.mgmt.abstract_syntax()
+ ctx4.transfer_syntaxes = tsf4_list
+
+ req = self.generate_alter(call_id=34, ctx_list=[ctx3, ctx4])
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_ALTER_RESP, req.call_id,
+ auth_length=0)
+ self.assertEqual(rep.u.max_xmit_frag, req.u.max_xmit_frag)
+ self.assertEqual(rep.u.max_recv_frag, req.u.max_recv_frag)
+ self.assertNotEqual(rep.u.assoc_group_id, req.u.assoc_group_id)
+ self.assertEqual(rep.u.secondary_address_size, 0)
+ self.assertPadding(rep.u._pad1, 2)
+ self.assertEqual(rep.u.num_results, 2)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_ACCEPTANCE)
+ self.assertEqual(rep.u.ctx_list[0].reason,
+ dcerpc.DCERPC_BIND_ACK_REASON_NOT_SPECIFIED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, ndr32)
+ self.assertEqual(rep.u.ctx_list[1].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_PROVIDER_REJECTION)
+ self.assertEqual(rep.u.ctx_list[1].reason,
+ dcerpc.DCERPC_BIND_ACK_REASON_TRANSFER_SYNTAXES_NOT_SUPPORTED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[1].syntax, zero_syntax)
+ self.assertEqual(rep.u.auth_info, b'\0' * 0)
+
+ req = self.generate_request(call_id=1,
+ context_id=ctx3.context_id,
+ opnum=0,
+ stub=b"")
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_RESPONSE, req.call_id,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, req.u.context_id)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertGreaterEqual(len(rep.u.stub_and_verifier), rep.u.alloc_hint)
+
+ req = self.generate_alter(call_id=43, ctx_list=[ctx4, ctx3])
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_ALTER_RESP, req.call_id,
+ auth_length=0)
+ self.assertEqual(rep.u.max_xmit_frag, req.u.max_xmit_frag)
+ self.assertEqual(rep.u.max_recv_frag, req.u.max_recv_frag)
+ self.assertNotEqual(rep.u.assoc_group_id, req.u.assoc_group_id)
+ self.assertEqual(rep.u.secondary_address_size, 0)
+ self.assertPadding(rep.u._pad1, 2)
+ self.assertEqual(rep.u.num_results, 2)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_ACCEPTANCE)
+ self.assertEqual(rep.u.ctx_list[0].reason,
+ dcerpc.DCERPC_BIND_ACK_REASON_NOT_SPECIFIED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, ndr32)
+ self.assertEqual(rep.u.ctx_list[1].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_PROVIDER_REJECTION)
+ self.assertEqual(rep.u.ctx_list[1].reason,
+ dcerpc.DCERPC_BIND_ACK_REASON_TRANSFER_SYNTAXES_NOT_SUPPORTED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[1].syntax, zero_syntax)
+ self.assertEqual(rep.u.auth_info, b'\0' * 0)
+
+ req = self.generate_request(call_id=1,
+ context_id=ctx4.context_id,
+ opnum=0,
+ stub=b"")
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_RESPONSE, req.call_id,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, req.u.context_id)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertGreaterEqual(len(rep.u.stub_and_verifier), rep.u.alloc_hint)
+
+ req = self.generate_request(call_id=1,
+ context_id=ctx3.context_id,
+ opnum=0,
+ stub=b"")
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_RESPONSE, req.call_id,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, req.u.context_id)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertGreaterEqual(len(rep.u.stub_and_verifier), rep.u.alloc_hint)
+
+ req = self.generate_alter(call_id=44, ctx_list=[ctx4, ctx4])
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_ALTER_RESP, req.call_id,
+ auth_length=0)
+ self.assertEqual(rep.u.max_xmit_frag, req.u.max_xmit_frag)
+ self.assertEqual(rep.u.max_recv_frag, req.u.max_recv_frag)
+ self.assertNotEqual(rep.u.assoc_group_id, req.u.assoc_group_id)
+ self.assertEqual(rep.u.secondary_address_size, 0)
+ self.assertPadding(rep.u._pad1, 2)
+ self.assertEqual(rep.u.num_results, 2)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_ACCEPTANCE)
+ self.assertEqual(rep.u.ctx_list[0].reason,
+ dcerpc.DCERPC_BIND_ACK_REASON_NOT_SPECIFIED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, ndr32)
+ self.assertEqual(rep.u.ctx_list[1].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_PROVIDER_REJECTION)
+ self.assertEqual(rep.u.ctx_list[1].reason,
+ dcerpc.DCERPC_BIND_ACK_REASON_TRANSFER_SYNTAXES_NOT_SUPPORTED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[1].syntax, zero_syntax)
+ self.assertEqual(rep.u.auth_info, b'\0' * 0)
+
+ req = self.generate_request(call_id=1,
+ context_id=ctx4.context_id,
+ opnum=0,
+ stub=b"")
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_RESPONSE, req.call_id,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, req.u.context_id)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertGreaterEqual(len(rep.u.stub_and_verifier), rep.u.alloc_hint)
+
+ req = self.generate_request(call_id=1,
+ context_id=ctx3.context_id,
+ opnum=0,
+ stub=b"")
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_RESPONSE, req.call_id,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, req.u.context_id)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertGreaterEqual(len(rep.u.stub_and_verifier), rep.u.alloc_hint)
+
+ tsf5mgmt_list = [ndr32]
+ ctx5mgmt = dcerpc.ctx_list()
+ ctx5mgmt.context_id = 5
+ ctx5mgmt.num_transfer_syntaxes = len(tsf5mgmt_list)
+ ctx5mgmt.abstract_syntax = samba.dcerpc.mgmt.abstract_syntax()
+ ctx5mgmt.transfer_syntaxes = tsf5mgmt_list
+
+ tsf5epm_list = [ndr32]
+ ctx5epm = dcerpc.ctx_list()
+ ctx5epm.context_id = 5
+ ctx5epm.num_transfer_syntaxes = len(tsf5epm_list)
+ ctx5epm.abstract_syntax = samba.dcerpc.mgmt.abstract_syntax()
+ ctx5epm.transfer_syntaxes = tsf5epm_list
+
+ req = self.generate_alter(call_id=55, ctx_list=[ctx5mgmt, ctx5epm])
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_ALTER_RESP, req.call_id,
+ auth_length=0)
+ self.assertEqual(rep.u.max_xmit_frag, req.u.max_xmit_frag)
+ self.assertEqual(rep.u.max_recv_frag, req.u.max_recv_frag)
+ self.assertNotEqual(rep.u.assoc_group_id, req.u.assoc_group_id)
+ self.assertEqual(rep.u.secondary_address_size, 0)
+ self.assertPadding(rep.u._pad1, 2)
+ self.assertEqual(rep.u.num_results, 2)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_ACCEPTANCE)
+ self.assertEqual(rep.u.ctx_list[0].reason,
+ dcerpc.DCERPC_BIND_ACK_REASON_NOT_SPECIFIED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, ndr32)
+ self.assertEqual(rep.u.ctx_list[1].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_PROVIDER_REJECTION)
+ self.assertEqual(rep.u.ctx_list[1].reason,
+ dcerpc.DCERPC_BIND_ACK_REASON_TRANSFER_SYNTAXES_NOT_SUPPORTED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[1].syntax, zero_syntax)
+ self.assertEqual(rep.u.auth_info, b'\0' * 0)
+
+ req = self.generate_request(call_id=1,
+ context_id=ctx5mgmt.context_id,
+ opnum=0,
+ stub=b"")
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_RESPONSE, req.call_id,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, req.u.context_id)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertGreaterEqual(len(rep.u.stub_and_verifier), rep.u.alloc_hint)
+
+ req = self.generate_alter(call_id=55, ctx_list=[ctx5mgmt, ctx5epm])
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_ALTER_RESP, req.call_id,
+ auth_length=0)
+ self.assertEqual(rep.u.max_xmit_frag, req.u.max_xmit_frag)
+ self.assertEqual(rep.u.max_recv_frag, req.u.max_recv_frag)
+ self.assertNotEqual(rep.u.assoc_group_id, req.u.assoc_group_id)
+ self.assertEqual(rep.u.secondary_address_size, 0)
+ self.assertPadding(rep.u._pad1, 2)
+ self.assertEqual(rep.u.num_results, 2)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_ACCEPTANCE)
+ self.assertEqual(rep.u.ctx_list[0].reason,
+ dcerpc.DCERPC_BIND_ACK_REASON_NOT_SPECIFIED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, ndr32)
+ self.assertEqual(rep.u.ctx_list[1].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_PROVIDER_REJECTION)
+ self.assertEqual(rep.u.ctx_list[1].reason,
+ dcerpc.DCERPC_BIND_ACK_REASON_TRANSFER_SYNTAXES_NOT_SUPPORTED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[1].syntax, zero_syntax)
+ self.assertEqual(rep.u.auth_info, b'\0' * 0)
+
+ req = self.generate_request(call_id=1,
+ context_id=ctx5mgmt.context_id,
+ opnum=0,
+ stub=b"")
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_RESPONSE, req.call_id,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, req.u.context_id)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertGreaterEqual(len(rep.u.stub_and_verifier), rep.u.alloc_hint)
+
+ def test_no_auth_bind_time_none_simple(self):
+ features = 0
+ btf = base.bind_time_features_syntax(features)
+
+ zero_syntax = misc.ndr_syntax_id()
+
+ tsf1_list = [btf]
+ ctx1 = dcerpc.ctx_list()
+ ctx1.context_id = 1
+ ctx1.num_transfer_syntaxes = len(tsf1_list)
+ ctx1.abstract_syntax = zero_syntax
+ ctx1.transfer_syntaxes = tsf1_list
+
+ req = self.generate_bind(call_id=0, ctx_list=[ctx1])
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_BIND_ACK, req.call_id,
+ auth_length=0)
+ self.assertEqual(rep.u.max_xmit_frag, req.u.max_xmit_frag)
+ self.assertEqual(rep.u.max_recv_frag, req.u.max_recv_frag)
+ self.assertNotEqual(rep.u.assoc_group_id, req.u.assoc_group_id)
+ self.assertEqual(rep.u.secondary_address_size, 4)
+ self.assertEqual(rep.u.secondary_address, "%d" % self.tcp_port)
+ self.assertPadding(rep.u._pad1, 2)
+ self.assertEqual(rep.u.num_results, 1)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_NEGOTIATE_ACK)
+ self.assertEqual(rep.u.ctx_list[0].reason, features)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, zero_syntax)
+ self.assertEqual(rep.u.auth_info, b'\0' * 0)
+
+ def test_no_auth_bind_time_none_ignore_additional(self):
+ features1 = 0
+ btf1 = base.bind_time_features_syntax(features1)
+
+ features2 = dcerpc.DCERPC_BIND_TIME_KEEP_CONNECTION_ON_ORPHAN
+ features2 |= dcerpc.DCERPC_BIND_TIME_SECURITY_CONTEXT_MULTIPLEXING
+ btf2 = base.bind_time_features_syntax(features2)
+
+ zero_syntax = misc.ndr_syntax_id()
+ ndr64 = base.transfer_syntax_ndr64()
+
+ tsf1_list = [btf1, btf2, zero_syntax]
+ ctx1 = dcerpc.ctx_list()
+ ctx1.context_id = 1
+ ctx1.num_transfer_syntaxes = len(tsf1_list)
+ ctx1.abstract_syntax = ndr64
+ ctx1.transfer_syntaxes = tsf1_list
+
+ req = self.generate_bind(call_id=0, ctx_list=[ctx1])
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_BIND_ACK, req.call_id,
+ auth_length=0)
+ self.assertEqual(rep.u.max_xmit_frag, req.u.max_xmit_frag)
+ self.assertEqual(rep.u.max_recv_frag, req.u.max_recv_frag)
+ self.assertNotEqual(rep.u.assoc_group_id, req.u.assoc_group_id)
+ self.assertEqual(rep.u.secondary_address_size, 4)
+ self.assertEqual(rep.u.secondary_address, "%d" % self.tcp_port)
+ self.assertPadding(rep.u._pad1, 2)
+ self.assertEqual(rep.u.num_results, 1)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_NEGOTIATE_ACK)
+ self.assertEqual(rep.u.ctx_list[0].reason, features1)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, zero_syntax)
+ self.assertEqual(rep.u.auth_info, b'\0' * 0)
+
+ def test_no_auth_bind_time_only_first(self):
+ features1 = dcerpc.DCERPC_BIND_TIME_KEEP_CONNECTION_ON_ORPHAN
+ btf1 = base.bind_time_features_syntax(features1)
+
+ features2 = dcerpc.DCERPC_BIND_TIME_SECURITY_CONTEXT_MULTIPLEXING
+ btf2 = base.bind_time_features_syntax(features2)
+
+ zero_syntax = misc.ndr_syntax_id()
+
+ tsf1_list = [zero_syntax, btf1, btf2, zero_syntax]
+ ctx1 = dcerpc.ctx_list()
+ ctx1.context_id = 1
+ ctx1.num_transfer_syntaxes = len(tsf1_list)
+ ctx1.abstract_syntax = zero_syntax
+ ctx1.transfer_syntaxes = tsf1_list
+
+ req = self.generate_bind(call_id=0, ctx_list=[ctx1])
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_BIND_ACK, req.call_id,
+ auth_length=0)
+ self.assertEqual(rep.u.max_xmit_frag, req.u.max_xmit_frag)
+ self.assertEqual(rep.u.max_recv_frag, req.u.max_recv_frag)
+ self.assertNotEqual(rep.u.assoc_group_id, req.u.assoc_group_id)
+ self.assertEqual(rep.u.secondary_address_size, 4)
+ self.assertEqual(rep.u.secondary_address, "%d" % self.tcp_port)
+ self.assertPadding(rep.u._pad1, 2)
+ self.assertEqual(rep.u.num_results, 1)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_PROVIDER_REJECTION)
+ self.assertEqual(rep.u.ctx_list[0].reason,
+ dcerpc.DCERPC_BIND_ACK_REASON_ABSTRACT_SYNTAX_NOT_SUPPORTED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, zero_syntax)
+ self.assertEqual(rep.u.auth_info, b'\0' * 0)
+
+ def test_no_auth_bind_time_twice(self):
+ features1 = dcerpc.DCERPC_BIND_TIME_KEEP_CONNECTION_ON_ORPHAN
+ btf1 = base.bind_time_features_syntax(features1)
+
+ features2 = dcerpc.DCERPC_BIND_TIME_SECURITY_CONTEXT_MULTIPLEXING
+ btf2 = base.bind_time_features_syntax(features2)
+
+ zero_syntax = misc.ndr_syntax_id()
+
+ tsf1_list = [btf1]
+ ctx1 = dcerpc.ctx_list()
+ ctx1.context_id = 1
+ ctx1.num_transfer_syntaxes = len(tsf1_list)
+ ctx1.abstract_syntax = zero_syntax
+ ctx1.transfer_syntaxes = tsf1_list
+
+ tsf2_list = [btf2]
+ ctx2 = dcerpc.ctx_list()
+ ctx2.context_id = 2
+ ctx2.num_transfer_syntaxes = len(tsf2_list)
+ ctx2.abstract_syntax = zero_syntax
+ ctx2.transfer_syntaxes = tsf2_list
+
+ req = self.generate_bind(call_id=0, ctx_list=[ctx1, ctx2])
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_BIND_NAK, req.call_id,
+ auth_length=0)
+ self.assertEqual(rep.u.reject_reason,
+ dcerpc.DCERPC_BIND_NAK_REASON_NOT_SPECIFIED)
+ self.assertEqual(rep.u.num_versions, 1)
+ self.assertEqual(rep.u.versions[0].rpc_vers, req.rpc_vers)
+ self.assertEqual(rep.u.versions[0].rpc_vers_minor, req.rpc_vers_minor)
+ self.assertPadding(rep.u._pad, 3)
+
+ # wait for a disconnect
+ rep = self.recv_pdu()
+ self.assertIsNone(rep)
+ self.assertNotConnected()
+
+ def test_no_auth_bind_time_keep_on_orphan_simple(self):
+ features = dcerpc.DCERPC_BIND_TIME_KEEP_CONNECTION_ON_ORPHAN
+ btf = base.bind_time_features_syntax(features)
+
+ zero_syntax = misc.ndr_syntax_id()
+
+ tsf1_list = [btf]
+ ctx1 = dcerpc.ctx_list()
+ ctx1.context_id = 1
+ ctx1.num_transfer_syntaxes = len(tsf1_list)
+ ctx1.abstract_syntax = zero_syntax
+ ctx1.transfer_syntaxes = tsf1_list
+
+ req = self.generate_bind(call_id=0, ctx_list=[ctx1])
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_BIND_ACK, req.call_id,
+ auth_length=0)
+ self.assertEqual(rep.u.max_xmit_frag, req.u.max_xmit_frag)
+ self.assertEqual(rep.u.max_recv_frag, req.u.max_recv_frag)
+ self.assertNotEqual(rep.u.assoc_group_id, req.u.assoc_group_id)
+ self.assertEqual(rep.u.secondary_address_size, 4)
+ self.assertEqual(rep.u.secondary_address, "%d" % self.tcp_port)
+ self.assertPadding(rep.u._pad1, 2)
+ self.assertEqual(rep.u.num_results, 1)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_NEGOTIATE_ACK)
+ self.assertEqual(rep.u.ctx_list[0].reason, features)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, zero_syntax)
+ self.assertEqual(rep.u.auth_info, b'\0' * 0)
+
+ def test_no_auth_bind_time_keep_on_orphan_ignore_additional(self):
+ features1 = dcerpc.DCERPC_BIND_TIME_KEEP_CONNECTION_ON_ORPHAN
+ btf1 = base.bind_time_features_syntax(features1)
+
+ features2 = dcerpc.DCERPC_BIND_TIME_SECURITY_CONTEXT_MULTIPLEXING
+ btf2 = base.bind_time_features_syntax(features2)
+
+ zero_syntax = misc.ndr_syntax_id()
+ ndr64 = base.transfer_syntax_ndr64()
+
+ tsf1_list = [btf1, btf2, zero_syntax]
+ ctx1 = dcerpc.ctx_list()
+ ctx1.context_id = 1
+ ctx1.num_transfer_syntaxes = len(tsf1_list)
+ ctx1.abstract_syntax = ndr64
+ ctx1.transfer_syntaxes = tsf1_list
+
+ req = self.generate_bind(call_id=0, ctx_list=[ctx1])
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_BIND_ACK, req.call_id,
+ auth_length=0)
+ self.assertEqual(rep.u.max_xmit_frag, req.u.max_xmit_frag)
+ self.assertEqual(rep.u.max_recv_frag, req.u.max_recv_frag)
+ self.assertNotEqual(rep.u.assoc_group_id, req.u.assoc_group_id)
+ self.assertEqual(rep.u.secondary_address_size, 4)
+ self.assertEqual(rep.u.secondary_address, "%d" % self.tcp_port)
+ self.assertPadding(rep.u._pad1, 2)
+ self.assertEqual(rep.u.num_results, 1)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_NEGOTIATE_ACK)
+ self.assertEqual(rep.u.ctx_list[0].reason, features1)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, zero_syntax)
+ self.assertEqual(rep.u.auth_info, b'\0' * 0)
+
+ def test_no_auth_bind_time_sec_ctx_ignore_additional(self):
+ features1 = dcerpc.DCERPC_BIND_TIME_SECURITY_CONTEXT_MULTIPLEXING
+ btf1 = base.bind_time_features_syntax(features1)
+
+ features2 = dcerpc.DCERPC_BIND_TIME_KEEP_CONNECTION_ON_ORPHAN
+ btf2 = base.bind_time_features_syntax(features2)
+
+ zero_syntax = misc.ndr_syntax_id()
+ ndr64 = base.transfer_syntax_ndr64()
+
+ tsf1_list = [btf1, btf2, zero_syntax]
+ ctx1 = dcerpc.ctx_list()
+ ctx1.context_id = 1
+ ctx1.num_transfer_syntaxes = len(tsf1_list)
+ ctx1.abstract_syntax = ndr64
+ ctx1.transfer_syntaxes = tsf1_list
+
+ req = self.generate_bind(call_id=0, ctx_list=[ctx1])
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_BIND_ACK, req.call_id,
+ auth_length=0)
+ self.assertEqual(rep.u.max_xmit_frag, req.u.max_xmit_frag)
+ self.assertEqual(rep.u.max_recv_frag, req.u.max_recv_frag)
+ self.assertNotEqual(rep.u.assoc_group_id, req.u.assoc_group_id)
+ self.assertEqual(rep.u.secondary_address_size, 4)
+ self.assertEqual(rep.u.secondary_address, "%d" % self.tcp_port)
+ self.assertPadding(rep.u._pad1, 2)
+ self.assertEqual(rep.u.num_results, 1)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_NEGOTIATE_ACK)
+ self.assertEqual(rep.u.ctx_list[0].reason, features1)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, zero_syntax)
+ self.assertEqual(rep.u.auth_info, b'\0' * 0)
+
+ def _test_auth_type_level_bind_nak(self, auth_type, auth_level, creds=None,
+ reason=dcerpc.DCERPC_BIND_NAK_REASON_INVALID_AUTH_TYPE):
+ ndr32 = base.transfer_syntax_ndr()
+
+ tsf1_list = [ndr32]
+ ctx1 = dcerpc.ctx_list()
+ ctx1.context_id = 1
+ ctx1.num_transfer_syntaxes = len(tsf1_list)
+ ctx1.abstract_syntax = samba.dcerpc.mgmt.abstract_syntax()
+ ctx1.transfer_syntaxes = tsf1_list
+ ctx_list = [ctx1]
+
+ auth_context_id = 0
+
+ if creds is not None:
+ # We always start with DCERPC_AUTH_LEVEL_INTEGRITY
+ auth_context = self.get_auth_context_creds(creds,
+ auth_type=auth_type,
+ auth_level=auth_level,
+ auth_context_id=auth_context_id,
+ g_auth_level=dcerpc.DCERPC_AUTH_LEVEL_INTEGRITY)
+ from_server = b""
+ (finished, to_server) = auth_context["gensec"].update(from_server)
+ self.assertFalse(finished)
+
+ auth_info = self.generate_auth(auth_type=auth_context["auth_type"],
+ auth_level=auth_context["auth_level"],
+ auth_context_id=auth_context["auth_context_id"],
+ auth_blob=to_server)
+ else:
+ to_server = b"none"
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=auth_level,
+ auth_context_id=auth_context_id,
+ auth_blob=to_server)
+
+ req = self.generate_bind(call_id=0,
+ ctx_list=ctx_list,
+ auth_info=auth_info)
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_BIND_NAK, req.call_id,
+ auth_length=0)
+ self.assertEqual(rep.u.reject_reason, reason)
+ self.assertEqual(rep.u.num_versions, 1)
+ self.assertEqual(rep.u.versions[0].rpc_vers, req.rpc_vers)
+ self.assertEqual(rep.u.versions[0].rpc_vers_minor, req.rpc_vers_minor)
+ self.assertPadding(rep.u._pad, 3)
+
+ # wait for a disconnect
+ rep = self.recv_pdu()
+ self.assertIsNone(rep)
+ self.assertNotConnected()
+
+ def _test_auth_none_level_bind(self, auth_level,
+ reason=dcerpc.DCERPC_BIND_NAK_REASON_INVALID_AUTH_TYPE):
+ return self._test_auth_type_level_bind_nak(auth_type=dcerpc.DCERPC_AUTH_LEVEL_NONE,
+ auth_level=auth_level, reason=reason)
+
+ def test_auth_none_none_bind(self):
+ return self._test_auth_none_level_bind(dcerpc.DCERPC_AUTH_LEVEL_NONE,
+ reason=dcerpc.DCERPC_BIND_NAK_REASON_NOT_SPECIFIED)
+
+ def test_auth_none_connect_bind(self):
+ return self._test_auth_none_level_bind(dcerpc.DCERPC_AUTH_LEVEL_CONNECT)
+
+ def test_auth_none_call_bind(self):
+ return self._test_auth_none_level_bind(dcerpc.DCERPC_AUTH_LEVEL_CALL)
+
+ def test_auth_none_packet_bind(self):
+ return self._test_auth_none_level_bind(dcerpc.DCERPC_AUTH_LEVEL_PACKET)
+
+ def test_auth_none_integrity_bind(self):
+ return self._test_auth_none_level_bind(dcerpc.DCERPC_AUTH_LEVEL_INTEGRITY)
+
+ def test_auth_none_privacy_bind(self):
+ return self._test_auth_none_level_bind(dcerpc.DCERPC_AUTH_LEVEL_PRIVACY)
+
+ def test_auth_none_0_bind(self):
+ return self._test_auth_none_level_bind(0,
+ reason=dcerpc.DCERPC_BIND_NAK_REASON_NOT_SPECIFIED)
+
+ def test_auth_none_7_bind(self):
+ return self._test_auth_none_level_bind(7,
+ reason=dcerpc.DCERPC_BIND_NAK_REASON_NOT_SPECIFIED)
+
+ def test_auth_none_255_bind(self):
+ return self._test_auth_none_level_bind(255,
+ reason=dcerpc.DCERPC_BIND_NAK_REASON_NOT_SPECIFIED)
+
+ def _test_auth_none_level_request(self, auth_level):
+ ndr32 = base.transfer_syntax_ndr()
+
+ tsf1_list = [ndr32]
+ ctx1 = dcerpc.ctx_list()
+ ctx1.context_id = 1
+ ctx1.num_transfer_syntaxes = len(tsf1_list)
+ ctx1.abstract_syntax = samba.dcerpc.mgmt.abstract_syntax()
+ ctx1.transfer_syntaxes = tsf1_list
+ ctx_list = [ctx1]
+
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_NONE
+ auth_context_id = 0
+
+ req = self.generate_bind(call_id=0,
+ ctx_list=ctx_list)
+
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_BIND_ACK, req.call_id)
+ self.assertEqual(rep.u.max_xmit_frag, req.u.max_xmit_frag)
+ self.assertEqual(rep.u.max_recv_frag, req.u.max_recv_frag)
+ self.assertNotEqual(rep.u.assoc_group_id, req.u.assoc_group_id)
+ self.assertEqual(rep.u.secondary_address_size, 4)
+ self.assertEqual(rep.u.secondary_address, "%d" % self.tcp_port)
+ self.assertPadding(rep.u._pad1, 2)
+ self.assertEqual(rep.u.num_results, 1)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_ACCEPTANCE)
+ self.assertEqual(rep.u.ctx_list[0].reason,
+ dcerpc.DCERPC_BIND_ACK_REASON_NOT_SPECIFIED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, ndr32)
+ self.assertEqual(len(rep.u.auth_info), 0)
+
+ # And now try a request without auth_info
+ req = self.generate_request(call_id=2,
+ context_id=ctx1.context_id,
+ opnum=0,
+ stub=b"")
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_RESPONSE, req.call_id,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, req.u.context_id)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertGreaterEqual(len(rep.u.stub_and_verifier), rep.u.alloc_hint)
+
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=auth_level,
+ auth_context_id=auth_context_id,
+ auth_blob=b"none")
+
+ req = self.generate_request(call_id=3,
+ context_id=ctx1.context_id,
+ opnum=0,
+ stub=b"",
+ auth_info=auth_info)
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ # We get a fault back
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_FAULT, req.call_id,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, req.u.context_id)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertEqual(rep.u.flags, 0)
+ self.assertEqual(rep.u.status, dcerpc.DCERPC_FAULT_ACCESS_DENIED)
+ self.assertEqual(rep.u.reserved, 0)
+ self.assertEqual(len(rep.u.error_and_verifier), 0)
+
+ # wait for a disconnect
+ rep = self.recv_pdu()
+ self.assertIsNone(rep)
+ self.assertNotConnected()
+
+ def test_auth_none_none_request(self):
+ return self._test_auth_none_level_request(dcerpc.DCERPC_AUTH_LEVEL_NONE)
+
+ def test_auth_none_connect_request(self):
+ return self._test_auth_none_level_request(dcerpc.DCERPC_AUTH_LEVEL_CONNECT)
+
+ def test_auth_none_call_request(self):
+ return self._test_auth_none_level_request(dcerpc.DCERPC_AUTH_LEVEL_CALL)
+
+ def test_auth_none_packet_request(self):
+ return self._test_auth_none_level_request(dcerpc.DCERPC_AUTH_LEVEL_PACKET)
+
+ def test_ntlmssp_multi_auth_first1_lastSame2(self):
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_NTLMSSP
+ expected_fault = dcerpc.DCERPC_FAULT_SEC_PKG_ERROR
+ auth_context_2nd = 2
+ expected_call_id = None
+ expected_context_id = None
+ not_executed = False
+ conc_mpx = False
+ forced_call_id = None
+ forced_context_id = None
+ forced_opnum = None
+ forced_auth_context_id = None
+ forced_auth_type = None
+ forced_auth_level = None
+ return self._test_generic_auth_first_last(auth_type,
+ expected_fault,
+ auth_context_2nd=auth_context_2nd,
+ expected_call_id=expected_call_id,
+ expected_context_id=expected_context_id,
+ not_executed=not_executed,
+ conc_mpx=conc_mpx,
+ forced_call_id=forced_call_id,
+ forced_context_id=forced_context_id,
+ forced_opnum=forced_opnum,
+ forced_auth_context_id=forced_auth_context_id,
+ forced_auth_type=forced_auth_type,
+ forced_auth_level=forced_auth_level)
+
+ def test_ntlmssp_multi_auth_first1_lastNext2(self):
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_NTLMSSP
+ expected_fault = dcerpc.DCERPC_NCA_S_PROTO_ERROR
+ auth_context_2nd = 2
+ expected_call_id = None
+ expected_context_id = None
+ not_executed = False
+ conc_mpx = False
+ forced_call_id = 4
+ forced_context_id = None
+ forced_opnum = None
+ forced_auth_context_id = None
+ forced_auth_type = None
+ forced_auth_level = None
+ return self._test_generic_auth_first_last(auth_type,
+ expected_fault,
+ auth_context_2nd=auth_context_2nd,
+ expected_call_id=expected_call_id,
+ expected_context_id=expected_context_id,
+ not_executed=not_executed,
+ conc_mpx=conc_mpx,
+ forced_call_id=forced_call_id,
+ forced_context_id=forced_context_id,
+ forced_opnum=forced_opnum,
+ forced_auth_context_id=forced_auth_context_id,
+ forced_auth_type=forced_auth_type,
+ forced_auth_level=forced_auth_level)
+
+ def test_ntlmssp_multi_auth_first1_lastSame111(self):
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_NTLMSSP
+ expected_fault = None
+ auth_context_2nd = 1
+ expected_call_id = None
+ expected_context_id = None
+ not_executed = False
+ conc_mpx = False
+ forced_call_id = None
+ forced_context_id = 111
+ forced_opnum = 111
+ forced_auth_context_id = 111
+ forced_auth_type = 111
+ forced_auth_level = 111
+ return self._test_generic_auth_first_last(auth_type,
+ expected_fault,
+ auth_context_2nd=auth_context_2nd,
+ expected_call_id=expected_call_id,
+ expected_context_id=expected_context_id,
+ not_executed=not_executed,
+ conc_mpx=conc_mpx,
+ forced_call_id=forced_call_id,
+ forced_context_id=forced_context_id,
+ forced_opnum=forced_opnum,
+ forced_auth_context_id=forced_auth_context_id,
+ forced_auth_type=forced_auth_type,
+ forced_auth_level=forced_auth_level)
+
+ def test_ntlmssp_multi_auth_first1_lastNext111(self):
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_NTLMSSP
+ expected_fault = dcerpc.DCERPC_NCA_S_PROTO_ERROR
+ auth_context_2nd = 1
+ expected_call_id = None
+ expected_context_id = None
+ not_executed = False
+ conc_mpx = False
+ forced_call_id = 4
+ forced_context_id = 111
+ forced_opnum = 111
+ forced_auth_context_id = 111
+ forced_auth_type = 111
+ forced_auth_level = 111
+ return self._test_generic_auth_first_last(auth_type,
+ expected_fault,
+ auth_context_2nd=auth_context_2nd,
+ expected_call_id=expected_call_id,
+ expected_context_id=expected_context_id,
+ not_executed=not_executed,
+ conc_mpx=conc_mpx,
+ forced_call_id=forced_call_id,
+ forced_context_id=forced_context_id,
+ forced_opnum=forced_opnum,
+ forced_auth_context_id=forced_auth_context_id,
+ forced_auth_type=forced_auth_type,
+ forced_auth_level=forced_auth_level)
+
+ def test_ntlmssp_multi_auth_MPX_first1_lastNext111(self):
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_NTLMSSP
+ expected_fault = dcerpc.DCERPC_NCA_S_PROTO_ERROR
+ auth_context_2nd = 1
+ expected_call_id = 4
+ expected_context_id = 0
+ not_executed = False
+ conc_mpx = True
+ forced_call_id = 4
+ forced_context_id = 111
+ forced_opnum = 111
+ forced_auth_context_id = 111
+ forced_auth_type = 111
+ forced_auth_level = 111
+ return self._test_generic_auth_first_last(auth_type,
+ expected_fault,
+ auth_context_2nd=auth_context_2nd,
+ expected_call_id=expected_call_id,
+ expected_context_id=expected_context_id,
+ not_executed=not_executed,
+ conc_mpx=conc_mpx,
+ forced_call_id=forced_call_id,
+ forced_context_id=forced_context_id,
+ forced_opnum=forced_opnum,
+ forced_auth_context_id=forced_auth_context_id,
+ forced_auth_type=forced_auth_type,
+ forced_auth_level=forced_auth_level)
+
+ def test_ntlmssp_multi_auth_first1_lastSameNone(self):
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_NTLMSSP
+ expected_fault = dcerpc.DCERPC_NCA_S_PROTO_ERROR
+ auth_context_2nd = None
+ expected_call_id = None
+ expected_context_id = None
+ not_executed = False
+ conc_mpx = False
+ forced_call_id = None
+ forced_context_id = None
+ forced_opnum = None
+ forced_auth_context_id = None
+ forced_auth_type = None
+ forced_auth_level = None
+ return self._test_generic_auth_first_last(auth_type,
+ expected_fault,
+ auth_context_2nd=auth_context_2nd,
+ expected_call_id=expected_call_id,
+ expected_context_id=expected_context_id,
+ not_executed=not_executed,
+ conc_mpx=conc_mpx,
+ forced_call_id=forced_call_id,
+ forced_context_id=forced_context_id,
+ forced_opnum=forced_opnum,
+ forced_auth_context_id=forced_auth_context_id,
+ forced_auth_type=forced_auth_type,
+ forced_auth_level=forced_auth_level)
+
+ def test_ntlmssp_multi_auth_MPX_first1_lastSameNone(self):
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_NTLMSSP
+ expected_fault = dcerpc.DCERPC_NCA_S_PROTO_ERROR
+ auth_context_2nd = None
+ expected_call_id = None
+ expected_context_id = None
+ not_executed = False
+ conc_mpx = True
+ forced_call_id = None
+ forced_context_id = None
+ forced_opnum = None
+ forced_auth_context_id = None
+ forced_auth_type = None
+ forced_auth_level = None
+ return self._test_generic_auth_first_last(auth_type,
+ expected_fault,
+ auth_context_2nd=auth_context_2nd,
+ expected_call_id=expected_call_id,
+ expected_context_id=expected_context_id,
+ not_executed=not_executed,
+ conc_mpx=conc_mpx,
+ forced_call_id=forced_call_id,
+ forced_context_id=forced_context_id,
+ forced_opnum=forced_opnum,
+ forced_auth_context_id=forced_auth_context_id,
+ forced_auth_type=forced_auth_type,
+ forced_auth_level=forced_auth_level)
+
+ def test_ntlmssp_multi_auth_first1_lastNextNone(self):
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_NTLMSSP
+ expected_fault = dcerpc.DCERPC_NCA_S_PROTO_ERROR
+ auth_context_2nd = None
+ expected_call_id = None
+ expected_context_id = None
+ not_executed = False
+ conc_mpx = False
+ forced_call_id = 4
+ forced_context_id = None
+ forced_opnum = None
+ forced_auth_context_id = None
+ forced_auth_type = None
+ forced_auth_level = None
+ return self._test_generic_auth_first_last(auth_type,
+ expected_fault,
+ auth_context_2nd=auth_context_2nd,
+ expected_call_id=expected_call_id,
+ expected_context_id=expected_context_id,
+ not_executed=not_executed,
+ conc_mpx=conc_mpx,
+ forced_call_id=forced_call_id,
+ forced_context_id=forced_context_id,
+ forced_opnum=forced_opnum,
+ forced_auth_context_id=forced_auth_context_id,
+ forced_auth_type=forced_auth_type,
+ forced_auth_level=forced_auth_level)
+
+ def test_ntlmssp_multi_auth_MPX_first1_lastNextNone(self):
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_NTLMSSP
+ expected_fault = dcerpc.DCERPC_NCA_S_PROTO_ERROR
+ auth_context_2nd = None
+ expected_call_id = 4
+ expected_context_id = 0
+ not_executed = False
+ conc_mpx = True
+ forced_call_id = 4
+ forced_context_id = None
+ forced_opnum = None
+ forced_auth_context_id = None
+ forced_auth_type = None
+ forced_auth_level = None
+ return self._test_generic_auth_first_last(auth_type,
+ expected_fault,
+ auth_context_2nd=auth_context_2nd,
+ expected_call_id=expected_call_id,
+ expected_context_id=expected_context_id,
+ not_executed=not_executed,
+ conc_mpx=conc_mpx,
+ forced_call_id=forced_call_id,
+ forced_context_id=forced_context_id,
+ forced_opnum=forced_opnum,
+ forced_auth_context_id=forced_auth_context_id,
+ forced_auth_type=forced_auth_type,
+ forced_auth_level=forced_auth_level)
+
+ def test_ntlmssp_multi_auth_first1_lastSameNone111(self):
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_NTLMSSP
+ expected_fault = dcerpc.DCERPC_NCA_S_PROTO_ERROR
+ auth_context_2nd = None
+ expected_call_id = None
+ expected_context_id = None
+ not_executed = False
+ conc_mpx = False
+ forced_call_id = None
+ forced_context_id = 111
+ forced_opnum = 111
+ forced_auth_context_id = None
+ forced_auth_type = None
+ forced_auth_level = None
+ return self._test_generic_auth_first_last(auth_type,
+ expected_fault,
+ auth_context_2nd=auth_context_2nd,
+ expected_call_id=expected_call_id,
+ expected_context_id=expected_context_id,
+ not_executed=not_executed,
+ conc_mpx=conc_mpx,
+ forced_call_id=forced_call_id,
+ forced_context_id=forced_context_id,
+ forced_opnum=forced_opnum,
+ forced_auth_context_id=forced_auth_context_id,
+ forced_auth_type=forced_auth_type,
+ forced_auth_level=forced_auth_level)
+
+ def test_ntlmssp_multi_auth_MPX_first1_lastSameNone111(self):
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_NTLMSSP
+ expected_fault = dcerpc.DCERPC_NCA_S_PROTO_ERROR
+ auth_context_2nd = None
+ expected_call_id = None
+ expected_context_id = None
+ not_executed = False
+ conc_mpx = True
+ forced_call_id = None
+ forced_context_id = 111
+ forced_opnum = 111
+ forced_auth_context_id = None
+ forced_auth_type = None
+ forced_auth_level = None
+ return self._test_generic_auth_first_last(auth_type,
+ expected_fault,
+ auth_context_2nd=auth_context_2nd,
+ expected_call_id=expected_call_id,
+ expected_context_id=expected_context_id,
+ not_executed=not_executed,
+ conc_mpx=conc_mpx,
+ forced_call_id=forced_call_id,
+ forced_context_id=forced_context_id,
+ forced_opnum=forced_opnum,
+ forced_auth_context_id=forced_auth_context_id,
+ forced_auth_type=forced_auth_type,
+ forced_auth_level=forced_auth_level)
+
+ def test_ntlmssp_multi_auth_first1_lastNextNone111(self):
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_NTLMSSP
+ expected_fault = dcerpc.DCERPC_NCA_S_PROTO_ERROR
+ auth_context_2nd = None
+ expected_call_id = None
+ expected_context_id = None
+ not_executed = False
+ conc_mpx = False
+ forced_call_id = 4
+ forced_context_id = 111
+ forced_opnum = 111
+ forced_auth_context_id = None
+ forced_auth_type = None
+ forced_auth_level = None
+ return self._test_generic_auth_first_last(auth_type,
+ expected_fault,
+ auth_context_2nd=auth_context_2nd,
+ expected_call_id=expected_call_id,
+ expected_context_id=expected_context_id,
+ not_executed=not_executed,
+ conc_mpx=conc_mpx,
+ forced_call_id=forced_call_id,
+ forced_context_id=forced_context_id,
+ forced_opnum=forced_opnum,
+ forced_auth_context_id=forced_auth_context_id,
+ forced_auth_type=forced_auth_type,
+ forced_auth_level=forced_auth_level)
+
+ def test_ntlmssp_multi_auth_MPX_first1_lastNextNone111(self):
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_NTLMSSP
+ expected_fault = dcerpc.DCERPC_NCA_S_PROTO_ERROR
+ auth_context_2nd = None
+ expected_call_id = 4
+ expected_context_id = 0
+ not_executed = False
+ conc_mpx = True
+ forced_call_id = 4
+ forced_context_id = 111
+ forced_opnum = 111
+ forced_auth_context_id = None
+ forced_auth_type = None
+ forced_auth_level = None
+ return self._test_generic_auth_first_last(auth_type,
+ expected_fault,
+ auth_context_2nd=auth_context_2nd,
+ expected_call_id=expected_call_id,
+ expected_context_id=expected_context_id,
+ not_executed=not_executed,
+ conc_mpx=conc_mpx,
+ forced_call_id=forced_call_id,
+ forced_context_id=forced_context_id,
+ forced_opnum=forced_opnum,
+ forced_auth_context_id=forced_auth_context_id,
+ forced_auth_type=forced_auth_type,
+ forced_auth_level=forced_auth_level)
+
+ def _test_generic_auth_first_2nd(self,
+ auth_type,
+ pfc_flags_2nd,
+ expected_fault,
+ auth_context_2nd=2,
+ skip_first=False,
+ expected_call_id=None,
+ expected_context_id=None,
+ conc_mpx=False,
+ not_executed=False,
+ forced_call_id=None,
+ forced_context_id=None,
+ forced_opnum=None,
+ forced_auth_context_id=None,
+ forced_auth_type=None,
+ forced_auth_level=None):
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_NTLMSSP
+ auth_level1 = dcerpc.DCERPC_AUTH_LEVEL_INTEGRITY
+ auth_context_id1=1
+ auth_level2 = dcerpc.DCERPC_AUTH_LEVEL_PACKET
+ auth_context_id2=2
+
+ creds = self.get_user_creds()
+
+ abstract = samba.dcerpc.mgmt.abstract_syntax()
+ transfer = base.transfer_syntax_ndr()
+
+ tsf1_list = [transfer]
+ ctx = samba.dcerpc.dcerpc.ctx_list()
+ ctx.context_id = 1
+ ctx.num_transfer_syntaxes = len(tsf1_list)
+ ctx.abstract_syntax = abstract
+ ctx.transfer_syntaxes = tsf1_list
+
+ auth_context1 = self.get_auth_context_creds(creds=creds,
+ auth_type=auth_type,
+ auth_level=auth_level1,
+ auth_context_id=auth_context_id1,
+ hdr_signing=False)
+ auth_context2 = self.get_auth_context_creds(creds=creds,
+ auth_type=auth_type,
+ auth_level=auth_level2,
+ auth_context_id=auth_context_id2,
+ hdr_signing=False)
+
+ bind_pfc_flags = dcerpc.DCERPC_PFC_FLAG_FIRST | dcerpc.DCERPC_PFC_FLAG_LAST
+ if conc_mpx:
+ bind_pfc_flags |= dcerpc.DCERPC_PFC_FLAG_CONC_MPX
+
+ ack0 = self.do_generic_bind(call_id=0,
+ ctx=ctx,
+ pfc_flags=bind_pfc_flags)
+
+ ack1 = self.do_generic_bind(call_id=1,
+ ctx=ctx,
+ auth_context=auth_context1,
+ assoc_group_id = ack0.u.assoc_group_id,
+ start_with_alter=True)
+ if auth_context_2nd == 2:
+ ack2 = self.do_generic_bind(call_id=2,
+ ctx=ctx,
+ auth_context=auth_context2,
+ assoc_group_id = ack0.u.assoc_group_id,
+ start_with_alter=True)
+
+ ndr_print = self.do_ndr_print
+ hexdump = self.do_hexdump
+ inq_if_ids = samba.dcerpc.mgmt.inq_if_ids()
+ io = inq_if_ids
+ if ndr_print:
+ sys.stderr.write("in: %s" % samba.ndr.ndr_print_in(io))
+ stub_in = samba.ndr.ndr_pack_in(io)
+ stub_in += b'\xfe'*45 # add some padding in order to have some payload
+ if hexdump:
+ sys.stderr.write("stub_in: %d\n%s" % (len(stub_in), self.hexdump(stub_in)))
+
+ call_id = 3
+ context_id = ctx.context_id
+ opnum = io.opnum()
+
+ if not skip_first:
+ pfc_flags = samba.dcerpc.dcerpc.DCERPC_PFC_FLAG_FIRST
+ stub_in_tmp = stub_in[0:16]
+ req = self.generate_request_auth(call_id=call_id,
+ context_id=context_id,
+ pfc_flags=pfc_flags,
+ opnum=opnum,
+ alloc_hint=len(stub_in),
+ stub=stub_in_tmp,
+ auth_context=auth_context1)
+ self.send_pdu(req, ndr_print=ndr_print, hexdump=hexdump)
+ rep = self.recv_pdu(timeout=0.01)
+ self.assertIsNone(rep)
+ self.assertIsConnected()
+
+ # context_id, opnum and auth header values are completely ignored
+ if auth_context_2nd == 1:
+ auth_context_copy = auth_context1.copy()
+ elif auth_context_2nd == 2:
+ auth_context_copy = auth_context2.copy()
+ else:
+ auth_context_copy = None
+
+ expected_pfc_flags = dcerpc.DCERPC_PFC_FLAG_FIRST | dcerpc.DCERPC_PFC_FLAG_LAST
+ if expected_context_id is None:
+ expected_context_id = context_id
+ if expected_call_id is None:
+ expected_call_id = call_id
+ if not_executed:
+ expected_pfc_flags |= dcerpc.DCERPC_PFC_FLAG_DID_NOT_EXECUTE
+
+ if forced_call_id is not None:
+ call_id = forced_call_id
+ if forced_context_id is not None:
+ context_id = forced_context_id
+ if forced_opnum is not None:
+ opnum = forced_opnum
+ if forced_auth_context_id is not None:
+ auth_context_copy["auth_context_id"] = forced_auth_context_id
+ if forced_auth_type is not None:
+ auth_context_copy["auth_type"] = forced_auth_type
+ if forced_auth_level is not None:
+ auth_context_copy["auth_level"] = forced_auth_level
+
+ pfc_flags = samba.dcerpc.dcerpc.DCERPC_PFC_FLAG_FIRST
+ stub_in_tmp = stub_in[16:-1]
+ req = self.generate_request_auth(call_id=call_id,
+ context_id=context_id,
+ pfc_flags=pfc_flags_2nd,
+ opnum=opnum,
+ alloc_hint=len(stub_in_tmp),
+ stub=stub_in_tmp,
+ auth_context=auth_context_copy)
+ self.send_pdu(req, ndr_print=ndr_print, hexdump=hexdump)
+ if expected_fault is None:
+ self.do_single_request(call_id=3, ctx=ctx, io=io, send_req=False, auth_context=auth_context1)
+ return
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_FAULT, expected_call_id,
+ pfc_flags=expected_pfc_flags,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, expected_context_id)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertEqual(rep.u.flags, 0)
+ self.assertEqual(rep.u.status, expected_fault)
+ self.assertEqual(rep.u.reserved, 0)
+ self.assertEqual(len(rep.u.error_and_verifier), 0)
+
+ if not_executed:
+ # still alive
+ rep = self.recv_pdu(timeout=0.01)
+ self.assertIsNone(rep)
+ self.assertIsConnected()
+ return
+
+ # wait for a disconnect
+ rep = self.recv_pdu()
+ self.assertIsNone(rep)
+ self.assertNotConnected()
+
+ def _test_generic_auth_first_last(self,
+ auth_type,
+ expected_fault,
+ auth_context_2nd=2,
+ expected_call_id=None,
+ expected_context_id=None,
+ conc_mpx=False,
+ not_executed=False,
+ forced_call_id=None,
+ forced_context_id=None,
+ forced_opnum=None,
+ forced_auth_context_id=None,
+ forced_auth_type=None,
+ forced_auth_level=None):
+ pfc_flags_2nd = samba.dcerpc.dcerpc.DCERPC_PFC_FLAG_LAST
+ return self._test_generic_auth_first_2nd(auth_type,
+ pfc_flags_2nd,
+ expected_fault,
+ auth_context_2nd=auth_context_2nd,
+ expected_call_id=expected_call_id,
+ expected_context_id=expected_context_id,
+ not_executed=not_executed,
+ conc_mpx=conc_mpx,
+ forced_call_id=forced_call_id,
+ forced_context_id=forced_context_id,
+ forced_opnum=forced_opnum,
+ forced_auth_context_id=forced_auth_context_id,
+ forced_auth_type=forced_auth_type,
+ forced_auth_level=forced_auth_level)
+
+ def _test_generic_auth_first_first(self,
+ auth_type,
+ expected_fault,
+ auth_context_2nd=2,
+ expected_call_id=None,
+ expected_context_id=None,
+ conc_mpx=False,
+ not_executed=False,
+ forced_call_id=None,
+ forced_context_id=None,
+ forced_opnum=None,
+ forced_auth_context_id=None,
+ forced_auth_type=None,
+ forced_auth_level=None):
+ pfc_flags_2nd = samba.dcerpc.dcerpc.DCERPC_PFC_FLAG_FIRST
+ return self._test_generic_auth_first_2nd(auth_type,
+ pfc_flags_2nd,
+ expected_fault,
+ auth_context_2nd=auth_context_2nd,
+ expected_call_id=expected_call_id,
+ expected_context_id=expected_context_id,
+ not_executed=not_executed,
+ conc_mpx=conc_mpx,
+ forced_call_id=forced_call_id,
+ forced_context_id=forced_context_id,
+ forced_opnum=forced_opnum,
+ forced_auth_context_id=forced_auth_context_id,
+ forced_auth_type=forced_auth_type,
+ forced_auth_level=forced_auth_level)
+
+ def test_ntlmssp_multi_auth_first1_firstSame2(self):
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_NTLMSSP
+ expected_fault = dcerpc.DCERPC_FAULT_SEC_PKG_ERROR
+ auth_context_2nd = 2
+ expected_call_id = None
+ expected_context_id = None
+ not_executed = False
+ conc_mpx = False
+ forced_call_id = None
+ forced_context_id = None
+ forced_opnum = None
+ forced_auth_context_id = None
+ forced_auth_type = None
+ forced_auth_level = None
+ return self._test_generic_auth_first_first(auth_type,
+ expected_fault,
+ auth_context_2nd=auth_context_2nd,
+ expected_call_id=expected_call_id,
+ expected_context_id=expected_context_id,
+ not_executed=not_executed,
+ conc_mpx=conc_mpx,
+ forced_call_id=forced_call_id,
+ forced_context_id=forced_context_id,
+ forced_opnum=forced_opnum,
+ forced_auth_context_id=forced_auth_context_id,
+ forced_auth_type=forced_auth_type,
+ forced_auth_level=forced_auth_level)
+
+ def test_ntlmssp_multi_auth_first1_firstNext2(self):
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_NTLMSSP
+ expected_fault = dcerpc.DCERPC_NCA_S_PROTO_ERROR
+ auth_context_2nd = 2
+ expected_call_id = 3
+ expected_context_id = None
+ not_executed = False
+ conc_mpx = False
+ forced_call_id = 4
+ forced_context_id = None
+ forced_opnum = None
+ forced_auth_context_id = None
+ forced_auth_type = None
+ forced_auth_level = None
+ return self._test_generic_auth_first_first(auth_type,
+ expected_fault,
+ auth_context_2nd=auth_context_2nd,
+ expected_call_id=expected_call_id,
+ expected_context_id=expected_context_id,
+ not_executed=not_executed,
+ conc_mpx=conc_mpx,
+ forced_call_id=forced_call_id,
+ forced_context_id=forced_context_id,
+ forced_opnum=forced_opnum,
+ forced_auth_context_id=forced_auth_context_id,
+ forced_auth_type=forced_auth_type,
+ forced_auth_level=forced_auth_level)
+
+ def test_ntlmssp_multi_auth_first1_firstSame111(self):
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_NTLMSSP
+ expected_fault = dcerpc.DCERPC_NCA_S_PROTO_ERROR
+ auth_context_2nd = 1
+ expected_call_id = None
+ expected_context_id = None
+ not_executed = False
+ conc_mpx = False
+ forced_call_id = None
+ forced_context_id = 111
+ forced_opnum = 111
+ forced_auth_context_id = 111
+ forced_auth_type = 111
+ forced_auth_level = 111
+ return self._test_generic_auth_first_first(auth_type,
+ expected_fault,
+ auth_context_2nd=auth_context_2nd,
+ expected_call_id=expected_call_id,
+ expected_context_id=expected_context_id,
+ not_executed=not_executed,
+ conc_mpx=conc_mpx,
+ forced_call_id=forced_call_id,
+ forced_context_id=forced_context_id,
+ forced_opnum=forced_opnum,
+ forced_auth_context_id=forced_auth_context_id,
+ forced_auth_type=forced_auth_type,
+ forced_auth_level=forced_auth_level)
+
+ def test_ntlmssp_multi_auth_MPX_first1_firstSame111(self):
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_NTLMSSP
+ expected_fault = dcerpc.DCERPC_NCA_S_PROTO_ERROR
+ auth_context_2nd = 1
+ expected_call_id = None
+ expected_context_id = None
+ not_executed = False
+ conc_mpx = True
+ forced_call_id = None
+ forced_context_id = 111
+ forced_opnum = 111
+ forced_auth_context_id = 111
+ forced_auth_type = 111
+ forced_auth_level = 111
+ return self._test_generic_auth_first_first(auth_type,
+ expected_fault,
+ auth_context_2nd=auth_context_2nd,
+ expected_call_id=expected_call_id,
+ expected_context_id=expected_context_id,
+ not_executed=not_executed,
+ conc_mpx=conc_mpx,
+ forced_call_id=forced_call_id,
+ forced_context_id=forced_context_id,
+ forced_opnum=forced_opnum,
+ forced_auth_context_id=forced_auth_context_id,
+ forced_auth_type=forced_auth_type,
+ forced_auth_level=forced_auth_level)
+
+ def test_ntlmssp_multi_auth_first1_firstNext111(self):
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_NTLMSSP
+ expected_fault = dcerpc.DCERPC_NCA_S_PROTO_ERROR
+ auth_context_2nd = 1
+ expected_call_id = 3
+ expected_context_id = None
+ not_executed = False
+ conc_mpx = False
+ forced_call_id = 4
+ forced_context_id = 111
+ forced_opnum = 111
+ forced_auth_context_id = 111
+ forced_auth_type = 111
+ forced_auth_level = 111
+ return self._test_generic_auth_first_first(auth_type,
+ expected_fault,
+ auth_context_2nd=auth_context_2nd,
+ expected_call_id=expected_call_id,
+ expected_context_id=expected_context_id,
+ not_executed=not_executed,
+ conc_mpx=conc_mpx,
+ forced_call_id=forced_call_id,
+ forced_context_id=forced_context_id,
+ forced_opnum=forced_opnum,
+ forced_auth_context_id=forced_auth_context_id,
+ forced_auth_type=forced_auth_type,
+ forced_auth_level=forced_auth_level)
+
+ def test_ntlmssp_multi_auth_MPX_first1_firstNext111(self):
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_NTLMSSP
+ expected_fault = dcerpc.DCERPC_NCA_S_PROTO_ERROR
+ auth_context_2nd = 1
+ expected_call_id = 4
+ expected_context_id = 0
+ not_executed = False
+ conc_mpx = True
+ forced_call_id = 4
+ forced_context_id = 111
+ forced_opnum = 111
+ forced_auth_context_id = 111
+ forced_auth_type = 111
+ forced_auth_level = 111
+ return self._test_generic_auth_first_first(auth_type,
+ expected_fault,
+ auth_context_2nd=auth_context_2nd,
+ expected_call_id=expected_call_id,
+ expected_context_id=expected_context_id,
+ not_executed=not_executed,
+ conc_mpx=conc_mpx,
+ forced_call_id=forced_call_id,
+ forced_context_id=forced_context_id,
+ forced_opnum=forced_opnum,
+ forced_auth_context_id=forced_auth_context_id,
+ forced_auth_type=forced_auth_type,
+ forced_auth_level=forced_auth_level)
+
+ def test_ntlmssp_multi_auth_first1_firstSameNone(self):
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_NTLMSSP
+ expected_fault = dcerpc.DCERPC_NCA_S_PROTO_ERROR
+ auth_context_2nd = None
+ expected_call_id = None
+ expected_context_id = None
+ not_executed = False
+ conc_mpx = False
+ forced_call_id = None
+ forced_context_id = None
+ forced_opnum = None
+ forced_auth_context_id = None
+ forced_auth_type = None
+ forced_auth_level = None
+ return self._test_generic_auth_first_first(auth_type,
+ expected_fault,
+ auth_context_2nd=auth_context_2nd,
+ expected_call_id=expected_call_id,
+ expected_context_id=expected_context_id,
+ not_executed=not_executed,
+ conc_mpx=conc_mpx,
+ forced_call_id=forced_call_id,
+ forced_context_id=forced_context_id,
+ forced_opnum=forced_opnum,
+ forced_auth_context_id=forced_auth_context_id,
+ forced_auth_type=forced_auth_type,
+ forced_auth_level=forced_auth_level)
+
+ def test_ntlmssp_multi_auth_MPX_first1_firstSameNone(self):
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_NTLMSSP
+ expected_fault = dcerpc.DCERPC_NCA_S_PROTO_ERROR
+ auth_context_2nd = None
+ expected_call_id = None
+ expected_context_id = None
+ not_executed = False
+ conc_mpx = True
+ forced_call_id = None
+ forced_context_id = None
+ forced_opnum = None
+ forced_auth_context_id = None
+ forced_auth_type = None
+ forced_auth_level = None
+ return self._test_generic_auth_first_first(auth_type,
+ expected_fault,
+ auth_context_2nd=auth_context_2nd,
+ expected_call_id=expected_call_id,
+ expected_context_id=expected_context_id,
+ not_executed=not_executed,
+ conc_mpx=conc_mpx,
+ forced_call_id=forced_call_id,
+ forced_context_id=forced_context_id,
+ forced_opnum=forced_opnum,
+ forced_auth_context_id=forced_auth_context_id,
+ forced_auth_type=forced_auth_type,
+ forced_auth_level=forced_auth_level)
+
+ def test_ntlmssp_multi_auth_first1_firstNextNone(self):
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_NTLMSSP
+ expected_fault = dcerpc.DCERPC_NCA_S_PROTO_ERROR
+ auth_context_2nd = None
+ expected_call_id = None
+ expected_context_id = None
+ not_executed = False
+ conc_mpx = False
+ forced_call_id = 4
+ forced_context_id = None
+ forced_opnum = None
+ forced_auth_context_id = None
+ forced_auth_type = None
+ forced_auth_level = None
+ return self._test_generic_auth_first_first(auth_type,
+ expected_fault,
+ auth_context_2nd=auth_context_2nd,
+ expected_call_id=expected_call_id,
+ expected_context_id=expected_context_id,
+ not_executed=not_executed,
+ conc_mpx=conc_mpx,
+ forced_call_id=forced_call_id,
+ forced_context_id=forced_context_id,
+ forced_opnum=forced_opnum,
+ forced_auth_context_id=forced_auth_context_id,
+ forced_auth_type=forced_auth_type,
+ forced_auth_level=forced_auth_level)
+
+ def test_ntlmssp_multi_auth_MPX_first1_firstNextNone(self):
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_NTLMSSP
+ expected_fault = dcerpc.DCERPC_NCA_S_PROTO_ERROR
+ auth_context_2nd = None
+ expected_call_id = 4
+ expected_context_id = 0
+ not_executed = False
+ conc_mpx = True
+ forced_call_id = 4
+ forced_context_id = None
+ forced_opnum = None
+ forced_auth_context_id = None
+ forced_auth_type = None
+ forced_auth_level = None
+ return self._test_generic_auth_first_first(auth_type,
+ expected_fault,
+ auth_context_2nd=auth_context_2nd,
+ expected_call_id=expected_call_id,
+ expected_context_id=expected_context_id,
+ not_executed=not_executed,
+ conc_mpx=conc_mpx,
+ forced_call_id=forced_call_id,
+ forced_context_id=forced_context_id,
+ forced_opnum=forced_opnum,
+ forced_auth_context_id=forced_auth_context_id,
+ forced_auth_type=forced_auth_type,
+ forced_auth_level=forced_auth_level)
+
+ def test_ntlmssp_multi_auth_first1_firstSameNone111(self):
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_NTLMSSP
+ expected_fault = dcerpc.DCERPC_NCA_S_PROTO_ERROR
+ auth_context_2nd = None
+ expected_call_id = None
+ expected_context_id = None
+ not_executed = False
+ conc_mpx = False
+ forced_call_id = None
+ forced_context_id = 111
+ forced_opnum = 111
+ forced_auth_context_id = None
+ forced_auth_type = None
+ forced_auth_level = None
+ return self._test_generic_auth_first_first(auth_type,
+ expected_fault,
+ auth_context_2nd=auth_context_2nd,
+ expected_call_id=expected_call_id,
+ expected_context_id=expected_context_id,
+ not_executed=not_executed,
+ conc_mpx=conc_mpx,
+ forced_call_id=forced_call_id,
+ forced_context_id=forced_context_id,
+ forced_opnum=forced_opnum,
+ forced_auth_context_id=forced_auth_context_id,
+ forced_auth_type=forced_auth_type,
+ forced_auth_level=forced_auth_level)
+
+ def test_ntlmssp_multi_auth_MPX_first1_firstSameNone111(self):
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_NTLMSSP
+ expected_fault = dcerpc.DCERPC_NCA_S_PROTO_ERROR
+ auth_context_2nd = None
+ expected_call_id = None
+ expected_context_id = None
+ not_executed = False
+ conc_mpx = True
+ forced_call_id = None
+ forced_context_id = 111
+ forced_opnum = 111
+ forced_auth_context_id = None
+ forced_auth_type = None
+ forced_auth_level = None
+ return self._test_generic_auth_first_first(auth_type,
+ expected_fault,
+ auth_context_2nd=auth_context_2nd,
+ expected_call_id=expected_call_id,
+ expected_context_id=expected_context_id,
+ not_executed=not_executed,
+ conc_mpx=conc_mpx,
+ forced_call_id=forced_call_id,
+ forced_context_id=forced_context_id,
+ forced_opnum=forced_opnum,
+ forced_auth_context_id=forced_auth_context_id,
+ forced_auth_type=forced_auth_type,
+ forced_auth_level=forced_auth_level)
+
+ def test_ntlmssp_multi_auth_first1_firstNextNone111(self):
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_NTLMSSP
+ expected_fault = dcerpc.DCERPC_NCA_S_PROTO_ERROR
+ auth_context_2nd = None
+ expected_call_id = None
+ expected_context_id = None
+ not_executed = False
+ conc_mpx = False
+ forced_call_id = 4
+ forced_context_id = 111
+ forced_opnum = 111
+ forced_auth_context_id = None
+ forced_auth_type = None
+ forced_auth_level = None
+ return self._test_generic_auth_first_first(auth_type,
+ expected_fault,
+ auth_context_2nd=auth_context_2nd,
+ expected_call_id=expected_call_id,
+ expected_context_id=expected_context_id,
+ not_executed=not_executed,
+ conc_mpx=conc_mpx,
+ forced_call_id=forced_call_id,
+ forced_context_id=forced_context_id,
+ forced_opnum=forced_opnum,
+ forced_auth_context_id=forced_auth_context_id,
+ forced_auth_type=forced_auth_type,
+ forced_auth_level=forced_auth_level)
+
+ def test_ntlmssp_multi_auth_MPX_first1_firstNextNone111(self):
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_NTLMSSP
+ expected_fault = dcerpc.DCERPC_NCA_S_PROTO_ERROR
+ auth_context_2nd = None
+ expected_call_id = 4
+ expected_context_id = 0
+ not_executed = False
+ conc_mpx = True
+ forced_call_id = 4
+ forced_context_id = 111
+ forced_opnum = 111
+ forced_auth_context_id = None
+ forced_auth_type = None
+ forced_auth_level = None
+ return self._test_generic_auth_first_first(auth_type,
+ expected_fault,
+ auth_context_2nd=auth_context_2nd,
+ expected_call_id=expected_call_id,
+ expected_context_id=expected_context_id,
+ not_executed=not_executed,
+ conc_mpx=conc_mpx,
+ forced_call_id=forced_call_id,
+ forced_context_id=forced_context_id,
+ forced_opnum=forced_opnum,
+ forced_auth_context_id=forced_auth_context_id,
+ forced_auth_type=forced_auth_type,
+ forced_auth_level=forced_auth_level)
+
+ def _test_generic_auth_middle(self,
+ auth_type,
+ expected_fault,
+ expected_context_id=None,
+ not_executed=False,
+ conc_mpx=False,
+ forced_context_id=None,
+ forced_opnum=None,
+ forced_auth_context_id=None,
+ forced_auth_type=None,
+ forced_auth_level=None):
+ auth_context_2nd = 1
+ skip_first = True
+ pfc_flags_2nd = 0
+ expected_call_id = None
+ forced_call_id = None
+ return self._test_generic_auth_first_2nd(auth_type,
+ pfc_flags_2nd,
+ expected_fault,
+ auth_context_2nd=auth_context_2nd,
+ skip_first=skip_first,
+ expected_call_id=expected_call_id,
+ expected_context_id=expected_context_id,
+ not_executed=not_executed,
+ conc_mpx=conc_mpx,
+ forced_call_id=forced_call_id,
+ forced_context_id=forced_context_id,
+ forced_opnum=forced_opnum,
+ forced_auth_context_id=forced_auth_context_id,
+ forced_auth_type=forced_auth_type,
+ forced_auth_level=forced_auth_level)
+
+ def test_ntlmssp_auth_middle_alone(self):
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_NTLMSSP
+ expected_fault = dcerpc.DCERPC_NCA_S_PROTO_ERROR
+ expected_context_id = 0
+ not_executed = False
+ conc_mpx = False
+ forced_context_id = None
+ forced_opnum = None
+ forced_auth_context_id = None
+ forced_auth_type = None
+ forced_auth_level = None
+ return self._test_generic_auth_middle(auth_type,
+ expected_fault,
+ expected_context_id=expected_context_id,
+ not_executed=not_executed,
+ conc_mpx=conc_mpx,
+ forced_context_id=forced_context_id,
+ forced_opnum=forced_opnum,
+ forced_auth_context_id=forced_auth_context_id,
+ forced_auth_type=forced_auth_type,
+ forced_auth_level=forced_auth_level)
+
+ def test_ntlmssp_auth_MPX_middle_alone(self):
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_NTLMSSP
+ expected_fault = dcerpc.DCERPC_NCA_S_PROTO_ERROR
+ expected_context_id = None
+ not_executed = False
+ conc_mpx = True
+ forced_context_id = None
+ forced_opnum = None
+ forced_auth_context_id = None
+ forced_auth_type = None
+ forced_auth_level = None
+ return self._test_generic_auth_middle(auth_type,
+ expected_fault,
+ expected_context_id=expected_context_id,
+ not_executed=not_executed,
+ conc_mpx=conc_mpx,
+ forced_context_id=forced_context_id,
+ forced_opnum=forced_opnum,
+ forced_auth_context_id=forced_auth_context_id,
+ forced_auth_type=forced_auth_type,
+ forced_auth_level=forced_auth_level)
+
+ def test_ntlmssp_auth_middle_all_111(self):
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_NTLMSSP
+ expected_fault = dcerpc.DCERPC_NCA_S_PROTO_ERROR
+ expected_context_id = 0
+ not_executed = False
+ conc_mpx = False
+ forced_context_id = 111
+ forced_opnum = 111
+ forced_auth_context_id = 111
+ forced_auth_type = 111
+ forced_auth_level = 111
+ return self._test_generic_auth_middle(auth_type,
+ expected_fault,
+ expected_context_id=expected_context_id,
+ not_executed=not_executed,
+ conc_mpx=conc_mpx,
+ forced_context_id=forced_context_id,
+ forced_opnum=forced_opnum,
+ forced_auth_context_id=forced_auth_context_id,
+ forced_auth_type=forced_auth_type,
+ forced_auth_level=forced_auth_level)
+
+ def test_ntlmssp_auth_MPX_middle_all_111(self):
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_NTLMSSP
+ expected_fault = dcerpc.DCERPC_NCA_S_UNKNOWN_IF
+ expected_context_id = 0
+ not_executed = True
+ conc_mpx = True
+ forced_context_id = 111
+ forced_opnum = 111
+ forced_auth_context_id = 111
+ forced_auth_type = 111
+ forced_auth_level = 111
+ return self._test_generic_auth_middle(auth_type,
+ expected_fault,
+ expected_context_id=expected_context_id,
+ not_executed=not_executed,
+ conc_mpx=conc_mpx,
+ forced_context_id=forced_context_id,
+ forced_opnum=forced_opnum,
+ forced_auth_context_id=forced_auth_context_id,
+ forced_auth_type=forced_auth_type,
+ forced_auth_level=forced_auth_level)
+
+ def test_ntlmssp_auth_middle_auth_all_111(self):
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_NTLMSSP
+ expected_fault = dcerpc.DCERPC_NCA_S_PROTO_ERROR
+ expected_context_id = 0
+ not_executed = False
+ conc_mpx = False
+ forced_context_id = None
+ forced_opnum = 111
+ forced_auth_context_id = 111
+ forced_auth_type = 111
+ forced_auth_level = 111
+ return self._test_generic_auth_middle(auth_type,
+ expected_fault,
+ expected_context_id=expected_context_id,
+ not_executed=not_executed,
+ conc_mpx=conc_mpx,
+ forced_context_id=forced_context_id,
+ forced_opnum=forced_opnum,
+ forced_auth_context_id=forced_auth_context_id,
+ forced_auth_type=forced_auth_type,
+ forced_auth_level=forced_auth_level)
+
+ def test_ntlmssp_auth_MPX_middle_auth_all_111(self):
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_NTLMSSP
+ expected_fault = dcerpc.DCERPC_FAULT_ACCESS_DENIED
+ expected_context_id = None
+ not_executed = False
+ conc_mpx = True
+ forced_context_id = None
+ forced_opnum = 111
+ forced_auth_context_id = 111
+ forced_auth_type = 111
+ forced_auth_level = 111
+ return self._test_generic_auth_middle(auth_type,
+ expected_fault,
+ expected_context_id=expected_context_id,
+ not_executed=not_executed,
+ conc_mpx=conc_mpx,
+ forced_context_id=forced_context_id,
+ forced_opnum=forced_opnum,
+ forced_auth_context_id=forced_auth_context_id,
+ forced_auth_type=forced_auth_type,
+ forced_auth_level=forced_auth_level)
+
+ def test_ntlmssp_auth_middle_auth_context_111(self):
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_NTLMSSP
+ expected_fault = dcerpc.DCERPC_NCA_S_PROTO_ERROR
+ expected_context_id = 0
+ not_executed = False
+ conc_mpx = False
+ forced_context_id = None
+ forced_opnum = None
+ forced_auth_context_id = 111
+ forced_auth_type = None
+ forced_auth_level = None
+ return self._test_generic_auth_middle(auth_type,
+ expected_fault,
+ expected_context_id=expected_context_id,
+ not_executed=not_executed,
+ conc_mpx=conc_mpx,
+ forced_context_id=forced_context_id,
+ forced_opnum=forced_opnum,
+ forced_auth_context_id=forced_auth_context_id,
+ forced_auth_type=forced_auth_type,
+ forced_auth_level=forced_auth_level)
+
+ def test_ntlmssp_auth_MPX_middle_auth_context_111(self):
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_NTLMSSP
+ expected_fault = dcerpc.DCERPC_FAULT_ACCESS_DENIED
+ expected_context_id = None
+ not_executed = False
+ conc_mpx = True
+ forced_context_id = None
+ forced_opnum = None
+ forced_auth_context_id = 111
+ forced_auth_type = None
+ forced_auth_level = None
+ return self._test_generic_auth_middle(auth_type,
+ expected_fault,
+ expected_context_id=expected_context_id,
+ not_executed=not_executed,
+ conc_mpx=conc_mpx,
+ forced_context_id=forced_context_id,
+ forced_opnum=forced_opnum,
+ forced_auth_context_id=forced_auth_context_id,
+ forced_auth_type=forced_auth_type,
+ forced_auth_level=forced_auth_level)
+
+ def test_ntlmssp_auth_middle_auth_type_111(self):
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_NTLMSSP
+ expected_fault = dcerpc.DCERPC_NCA_S_PROTO_ERROR
+ expected_context_id = 0
+ not_executed = False
+ conc_mpx = False
+ forced_context_id = None
+ forced_opnum = None
+ forced_auth_context_id = None
+ forced_auth_type = 111
+ forced_auth_level = None
+ return self._test_generic_auth_middle(auth_type,
+ expected_fault,
+ expected_context_id=expected_context_id,
+ not_executed=not_executed,
+ conc_mpx=conc_mpx,
+ forced_context_id=forced_context_id,
+ forced_opnum=forced_opnum,
+ forced_auth_context_id=forced_auth_context_id,
+ forced_auth_type=forced_auth_type,
+ forced_auth_level=forced_auth_level)
+
+ def test_ntlmssp_auth_MPX_middle_auth_type_111(self):
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_NTLMSSP
+ expected_fault = dcerpc.DCERPC_FAULT_ACCESS_DENIED
+ expected_context_id = None
+ not_executed = False
+ conc_mpx = True
+ forced_context_id = None
+ forced_opnum = None
+ forced_auth_context_id = None
+ forced_auth_type = 111
+ forced_auth_level = None
+ return self._test_generic_auth_middle(auth_type,
+ expected_fault,
+ expected_context_id=expected_context_id,
+ not_executed=not_executed,
+ conc_mpx=conc_mpx,
+ forced_context_id=forced_context_id,
+ forced_opnum=forced_opnum,
+ forced_auth_context_id=forced_auth_context_id,
+ forced_auth_type=forced_auth_type,
+ forced_auth_level=forced_auth_level)
+
+ def test_ntlmssp_auth_middle_auth_level_111(self):
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_NTLMSSP
+ expected_fault = dcerpc.DCERPC_NCA_S_PROTO_ERROR
+ expected_context_id = 0
+ not_executed = False
+ conc_mpx = False
+ forced_context_id = None
+ forced_opnum = None
+ forced_auth_context_id = None
+ forced_auth_type = None
+ forced_auth_level = 111
+ return self._test_generic_auth_middle(auth_type,
+ expected_fault,
+ expected_context_id=expected_context_id,
+ not_executed=not_executed,
+ conc_mpx=conc_mpx,
+ forced_context_id=forced_context_id,
+ forced_opnum=forced_opnum,
+ forced_auth_context_id=forced_auth_context_id,
+ forced_auth_type=forced_auth_type,
+ forced_auth_level=forced_auth_level)
+
+ def test_ntlmssp_auth_MPX_middle_auth_level_111(self):
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_NTLMSSP
+ expected_fault = dcerpc.DCERPC_FAULT_ACCESS_DENIED
+ expected_context_id = None
+ not_executed = False
+ conc_mpx = True
+ forced_context_id = None
+ forced_opnum = None
+ forced_auth_context_id = None
+ forced_auth_type = None
+ forced_auth_level = 111
+ return self._test_generic_auth_middle(auth_type,
+ expected_fault,
+ expected_context_id=expected_context_id,
+ not_executed=not_executed,
+ conc_mpx=conc_mpx,
+ forced_context_id=forced_context_id,
+ forced_opnum=forced_opnum,
+ forced_auth_context_id=forced_auth_context_id,
+ forced_auth_type=forced_auth_type,
+ forced_auth_level=forced_auth_level)
+
+ def _test_neg_xmit_check_values(self,
+ req_xmit=None,
+ req_recv=None,
+ rep_both=None,
+ alter_xmit=None,
+ alter_recv=None):
+ ndr32 = base.transfer_syntax_ndr()
+
+ tsf1_list = [ndr32]
+ ctx1 = dcerpc.ctx_list()
+ ctx1.context_id = 1
+ ctx1.num_transfer_syntaxes = len(tsf1_list)
+ ctx1.abstract_syntax = samba.dcerpc.mgmt.abstract_syntax()
+ ctx1.transfer_syntaxes = tsf1_list
+
+ req = self.generate_bind(call_id=0,
+ max_xmit_frag=req_xmit,
+ max_recv_frag=req_recv,
+ ctx_list=[ctx1])
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_BIND_ACK, req.call_id,
+ auth_length=0)
+ self.assertEqual(rep.u.max_xmit_frag, rep_both)
+ self.assertEqual(rep.u.max_recv_frag, rep_both)
+ self.assertNotEqual(rep.u.assoc_group_id, req.u.assoc_group_id)
+ self.assertEqual(rep.u.secondary_address_size, 4)
+ self.assertEqual(rep.u.secondary_address, "%d" % self.tcp_port)
+ self.assertPadding(rep.u._pad1, 2)
+ self.assertEqual(rep.u.num_results, 1)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_ACCEPTANCE)
+ self.assertEqual(rep.u.ctx_list[0].reason,
+ dcerpc.DCERPC_BIND_ACK_REASON_NOT_SPECIFIED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, ndr32)
+ self.assertEqual(rep.u.auth_info, b'\0' * 0)
+
+ assoc_group_id = rep.u.assoc_group_id
+ if alter_xmit is None:
+ alter_xmit = rep_both - 8
+ if alter_recv is None:
+ alter_recv = rep_both - 8
+
+ # max_{xmit,recv}_frag and assoc_group_id are completely
+ # ignored in alter_context requests
+ req = self.generate_alter(call_id=1,
+ max_xmit_frag=alter_xmit,
+ max_recv_frag=alter_recv,
+ assoc_group_id=0xffffffff - rep.u.assoc_group_id,
+ ctx_list=[ctx1])
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_ALTER_RESP, req.call_id,
+ auth_length=0)
+ self.assertEqual(rep.u.max_xmit_frag, rep_both)
+ self.assertEqual(rep.u.max_recv_frag, rep_both)
+ self.assertEqual(rep.u.assoc_group_id, rep.u.assoc_group_id)
+ self.assertEqual(rep.u.secondary_address_size, 0)
+ self.assertPadding(rep.u._pad1, 2)
+ self.assertEqual(rep.u.num_results, 1)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_ACCEPTANCE)
+ self.assertEqual(rep.u.ctx_list[0].reason,
+ dcerpc.DCERPC_BIND_ACK_REASON_NOT_SPECIFIED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, ndr32)
+ self.assertEqual(rep.u.auth_info, b'\0' * 0)
+
+ chunk_size = rep_both - dcerpc.DCERPC_REQUEST_LENGTH
+ req = self.generate_request(call_id=2,
+ context_id=ctx1.context_id,
+ opnum=0,
+ alloc_hint=0xffffffff,
+ stub=b"\00" * chunk_size)
+ self.send_pdu(req, ndr_print=True, hexdump=True)
+ rep = self.recv_pdu(ndr_print=True, hexdump=True)
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_RESPONSE, req.call_id,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, req.u.context_id)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertGreaterEqual(len(rep.u.stub_and_verifier), rep.u.alloc_hint)
+
+ chunk_size = 5840 - dcerpc.DCERPC_REQUEST_LENGTH
+ req = self.generate_request(call_id=2,
+ context_id=ctx1.context_id,
+ opnum=0,
+ alloc_hint=0xffffffff,
+ stub=b"\00" * chunk_size)
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_RESPONSE, req.call_id,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, req.u.context_id)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertGreaterEqual(len(rep.u.stub_and_verifier), rep.u.alloc_hint)
+
+ chunk_size += 1
+ req = self.generate_request(call_id=3,
+ context_id=ctx1.context_id,
+ opnum=0,
+ alloc_hint=0xffffffff,
+ stub=b"\00" * chunk_size)
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ # We get a fault
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_FAULT, req.call_id,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, 0)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertEqual(rep.u.flags, 0)
+ self.assertEqual(rep.u.status, dcerpc.DCERPC_NCA_S_PROTO_ERROR)
+ self.assertEqual(rep.u.reserved, 0)
+ self.assertEqual(len(rep.u.error_and_verifier), 0)
+
+ # wait for a disconnect
+ rep = self.recv_pdu()
+ self.assertIsNone(rep)
+ self.assertNotConnected()
+
+ def test_neg_xmit_ffff_ffff(self):
+ return self._test_neg_xmit_check_values(req_xmit=0xffff,
+ req_recv=0xffff,
+ rep_both=5840)
+
+ def test_neg_xmit_0_ffff(self):
+ return self._test_neg_xmit_check_values(req_xmit=0,
+ req_recv=0xffff,
+ rep_both=2048,
+ alter_xmit=0xffff,
+ alter_recv=0xffff)
+
+ def test_neg_xmit_ffff_0(self):
+ return self._test_neg_xmit_check_values(req_xmit=0xffff,
+ req_recv=0,
+ rep_both=2048)
+
+ def test_neg_xmit_0_0(self):
+ return self._test_neg_xmit_check_values(req_xmit=0,
+ req_recv=0,
+ rep_both=2048,
+ alter_xmit=0xffff,
+ alter_recv=0xffff)
+
+ def test_neg_xmit_3199_0(self):
+ return self._test_neg_xmit_check_values(req_xmit=3199,
+ req_recv=0,
+ rep_both=2048)
+
+ def test_neg_xmit_0_3199(self):
+ return self._test_neg_xmit_check_values(req_xmit=0,
+ req_recv=3199,
+ rep_both=2048)
+
+ def test_neg_xmit_3199_ffff(self):
+ return self._test_neg_xmit_check_values(req_xmit=3199,
+ req_recv=0xffff,
+ rep_both=3192)
+
+ def test_neg_xmit_ffff_3199(self):
+ return self._test_neg_xmit_check_values(req_xmit=0xffff,
+ req_recv=3199,
+ rep_both=3192)
+
+ def test_alloc_hint(self):
+ ndr32 = base.transfer_syntax_ndr()
+
+ tsf1_list = [ndr32]
+ ctx = dcerpc.ctx_list()
+ ctx.context_id = 0
+ ctx.num_transfer_syntaxes = len(tsf1_list)
+ ctx.abstract_syntax = samba.dcerpc.mgmt.abstract_syntax()
+ ctx.transfer_syntaxes = tsf1_list
+
+ req = self.generate_bind(call_id=0,
+ ctx_list=[ctx])
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_BIND_ACK, req.call_id,
+ auth_length=0)
+ self.assertEqual(rep.u.max_xmit_frag, req.u.max_xmit_frag)
+ self.assertEqual(rep.u.max_recv_frag, req.u.max_recv_frag)
+ self.assertNotEqual(rep.u.assoc_group_id, req.u.assoc_group_id)
+ self.assertEqual(rep.u.secondary_address_size, 4)
+ self.assertEqual(rep.u.secondary_address, "%d" % self.tcp_port)
+ self.assertPadding(rep.u._pad1, 2)
+ self.assertEqual(rep.u.num_results, 1)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_ACCEPTANCE)
+ self.assertEqual(rep.u.ctx_list[0].reason,
+ dcerpc.DCERPC_BIND_ACK_REASON_NOT_SPECIFIED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, ndr32)
+ self.assertEqual(rep.u.auth_info, b'\0' * 0)
+
+ # And now try a request without auth_info
+ req = self.generate_request(call_id=2,
+ context_id=ctx.context_id,
+ opnum=0,
+ alloc_hint=0xffffffff,
+ stub=b"")
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_RESPONSE, req.call_id,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, req.u.context_id)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertGreaterEqual(len(rep.u.stub_and_verifier), rep.u.alloc_hint)
+
+ req = self.generate_request(call_id=3,
+ context_id=ctx.context_id,
+ opnum=1,
+ alloc_hint=0xffffffff,
+ stub=b"\04\00\00\00\00\00\00\00")
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_RESPONSE, req.call_id,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, req.u.context_id)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertGreaterEqual(len(rep.u.stub_and_verifier), rep.u.alloc_hint)
+
+ req = self.generate_request(call_id=4,
+ context_id=ctx.context_id,
+ opnum=1,
+ alloc_hint=1,
+ stub=b"\04\00\00\00\00\00\00\00")
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_RESPONSE, req.call_id,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, req.u.context_id)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertGreaterEqual(len(rep.u.stub_and_verifier), rep.u.alloc_hint)
+
+ def _get_netlogon_ctx(self):
+ abstract = samba.dcerpc.netlogon.abstract_syntax()
+ ndr32 = base.transfer_syntax_ndr()
+
+ (ctx, ack) = self.prepare_presentation(abstract, ndr32, context_id=0,
+ epmap=True, return_ack=True)
+
+ server = '\\\\' + self.target_hostname
+ if isinstance(server, bytes):
+ server_utf16 = server.decode('utf-8').encode('utf-16-le')
+ else:
+ server_utf16 = server.encode('utf-16-le')
+ computer = 'UNKNOWNCOMPUTER'
+ if isinstance(server, bytes):
+ computer_utf16 = computer.decode('utf-8').encode('utf-16-le')
+ else:
+ computer_utf16 = computer.encode('utf-16-le')
+
+ real_stub = struct.pack('<IIII', 0x00200000,
+ len(server) + 1, 0, len(server) + 1)
+ real_stub += server_utf16 + b'\x00\x00'
+ mod_len = len(real_stub) % 4
+ if mod_len != 0:
+ real_stub += b'\x00' * (4 - mod_len)
+ real_stub += struct.pack('<III',
+ len(computer) + 1, 0, len(computer) + 1)
+ real_stub += computer_utf16 + b'\x00\x00'
+ real_stub += b'\x11\x22\x33\x44\x55\x66\x77\x88'
+
+ return (ctx, ack, real_stub)
+
+ def _test_fragmented_requests(self, remaining=None, alloc_hint=None,
+ fault_first=None, fault_last=None):
+ (ctx, rep, real_stub) = self._get_netlogon_ctx()
+
+ chunk = rep.u.max_recv_frag - dcerpc.DCERPC_REQUEST_LENGTH
+
+ total = 0
+ first = True
+ while remaining > 0:
+ thistime = min(remaining, chunk)
+ remaining -= thistime
+ total += thistime
+
+ pfc_flags = 0
+ if first:
+ pfc_flags |= dcerpc.DCERPC_PFC_FLAG_FIRST
+ first = False
+ stub = real_stub + b'\x00' * (thistime - len(real_stub))
+ else:
+ stub = b"\x00" * thistime
+
+ if remaining == 0:
+ pfc_flags |= dcerpc.DCERPC_PFC_FLAG_LAST
+
+ # And now try a request without auth_info
+ # netr_ServerReqChallenge()
+ req = self.generate_request(call_id=0x21234,
+ pfc_flags=pfc_flags,
+ context_id=ctx.context_id,
+ opnum=4,
+ alloc_hint=alloc_hint,
+ stub=stub)
+ if alloc_hint >= thistime:
+ alloc_hint -= thistime
+ else:
+ alloc_hint = 0
+ self.send_pdu(req, hexdump=False)
+ if fault_first is not None:
+ rep = self.recv_pdu()
+ # We get a fault back
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_FAULT, req.call_id,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, req.u.context_id)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertEqual(rep.u.flags, 0)
+ self.assertEqual(rep.u.status, fault_first)
+ self.assertEqual(rep.u.reserved, 0)
+ self.assertEqual(len(rep.u.error_and_verifier), 0)
+
+ # wait for a disconnect
+ rep = self.recv_pdu()
+ self.assertIsNone(rep)
+ self.assertNotConnected()
+ return
+ if remaining == 0:
+ break
+ if total >= 0x400000 and fault_last is not None:
+ rep = self.recv_pdu()
+ # We get a fault back
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_FAULT, req.call_id,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, req.u.context_id)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertEqual(rep.u.flags, 0)
+ self.assertEqual(rep.u.status, fault_last)
+ self.assertEqual(rep.u.reserved, 0)
+ self.assertEqual(len(rep.u.error_and_verifier), 0)
+
+ # wait for a disconnect
+ rep = self.recv_pdu()
+ self.assertIsNone(rep)
+ self.assertNotConnected()
+ return
+ rep = self.recv_pdu(timeout=0.01)
+ self.assertIsNone(rep)
+ self.assertIsConnected()
+
+ if total >= 0x400000 and fault_last is not None:
+ rep = self.recv_pdu()
+ # We get a fault back
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_FAULT, req.call_id,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, req.u.context_id)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertEqual(rep.u.flags, 0)
+ self.assertEqual(rep.u.status, fault_last)
+ self.assertEqual(rep.u.reserved, 0)
+ self.assertEqual(len(rep.u.error_and_verifier), 0)
+
+ # wait for a disconnect
+ rep = self.recv_pdu()
+ self.assertIsNone(rep)
+ self.assertNotConnected()
+ return
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_RESPONSE, req.call_id,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, req.u.context_id)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertGreaterEqual(len(rep.u.stub_and_verifier), rep.u.alloc_hint)
+
+ self.assertEqual(len(rep.u.stub_and_verifier), 12)
+ status = struct.unpack_from("<I", rep.u.stub_and_verifier, len(rep.u.stub_and_verifier) - 4)
+ self.assertEqual(status[0], 0)
+
+ def test_fragmented_requests01(self):
+ return self._test_fragmented_requests(remaining=0x400000,
+ alloc_hint=0x400000)
+
+ def test_fragmented_requests02(self):
+ return self._test_fragmented_requests(remaining=0x400000,
+ alloc_hint=0x100000)
+
+ def test_fragmented_requests03(self):
+ return self._test_fragmented_requests(remaining=0x400000,
+ alloc_hint=0)
+
+ def test_fragmented_requests04(self):
+ return self._test_fragmented_requests(remaining=0x400000,
+ alloc_hint=0x400001,
+ fault_first=dcerpc.DCERPC_FAULT_ACCESS_DENIED)
+
+ def test_fragmented_requests05(self):
+ return self._test_fragmented_requests(remaining=0x500001,
+ alloc_hint=0,
+ fault_last=dcerpc.DCERPC_FAULT_ACCESS_DENIED)
+
+ def _test_same_requests(self, pfc_flags, fault_1st=False, fault_2nd=False):
+ (ctx, rep, real_stub) = self._get_netlogon_ctx()
+
+ # netr_ServerReqChallenge with given flags
+ req = self.generate_request(call_id=2,
+ pfc_flags=pfc_flags,
+ context_id=ctx.context_id,
+ opnum=4,
+ stub=real_stub)
+ self.send_pdu(req)
+ if fault_1st:
+ rep = self.recv_pdu()
+ # We get a fault back
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_FAULT, req.call_id,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, 0)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertEqual(rep.u.flags, 0)
+ self.assertEqual(rep.u.status, dcerpc.DCERPC_NCA_S_PROTO_ERROR)
+ self.assertEqual(rep.u.reserved, 0)
+ self.assertEqual(len(rep.u.error_and_verifier), 0)
+
+ # wait for a disconnect
+ rep = self.recv_pdu()
+ self.assertIsNone(rep)
+ self.assertNotConnected()
+ return
+ rep = self.recv_pdu(timeout=0.1)
+ self.assertIsNone(rep)
+ self.assertIsConnected()
+
+ # netr_ServerReqChallenge without DCERPC_PFC_FLAG_LAST
+ # with the same call_id
+ req = self.generate_request(call_id=2,
+ pfc_flags=pfc_flags,
+ context_id=ctx.context_id,
+ opnum=4,
+ stub=real_stub)
+ self.send_pdu(req)
+ if fault_2nd:
+ rep = self.recv_pdu()
+ # We get a fault back
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_FAULT, req.call_id,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, req.u.context_id)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertEqual(rep.u.flags, 0)
+ self.assertEqual(rep.u.status, dcerpc.DCERPC_NCA_S_PROTO_ERROR)
+ self.assertEqual(rep.u.reserved, 0)
+ self.assertEqual(len(rep.u.error_and_verifier), 0)
+
+ # wait for a disconnect
+ rep = self.recv_pdu()
+ self.assertIsNone(rep)
+ self.assertNotConnected()
+ return
+
+ rep = self.recv_pdu(timeout=0.1)
+ self.assertIsNone(rep)
+ self.assertIsConnected()
+
+ def test_first_only_requests(self):
+ return self._test_same_requests(pfc_flags=dcerpc.DCERPC_PFC_FLAG_FIRST,
+ fault_2nd=True)
+
+ def test_none_only_requests(self):
+ return self._test_same_requests(pfc_flags=0, fault_1st=True)
+
+ def test_last_only_requests(self):
+ return self._test_same_requests(pfc_flags=dcerpc.DCERPC_PFC_FLAG_LAST,
+ fault_1st=True)
+
+ def test_first_maybe_requests(self):
+ return self._test_same_requests(pfc_flags=dcerpc.DCERPC_PFC_FLAG_FIRST |
+ dcerpc.DCERPC_PFC_FLAG_MAYBE,
+ fault_2nd=True)
+
+ def test_first_didnot_requests(self):
+ return self._test_same_requests(pfc_flags=dcerpc.DCERPC_PFC_FLAG_FIRST |
+ dcerpc.DCERPC_PFC_FLAG_DID_NOT_EXECUTE,
+ fault_2nd=True)
+
+ def test_first_cmpx_requests(self):
+ return self._test_same_requests(pfc_flags=dcerpc.DCERPC_PFC_FLAG_FIRST |
+ dcerpc.DCERPC_PFC_FLAG_CONC_MPX,
+ fault_2nd=True)
+
+ def test_first_08_requests(self):
+ return self._test_same_requests(pfc_flags=dcerpc.DCERPC_PFC_FLAG_FIRST |
+ 0x08,
+ fault_2nd=True)
+
+ def test_first_cancel_requests(self):
+ (ctx, rep, real_stub) = self._get_netlogon_ctx()
+
+ # netr_ServerReqChallenge with given flags
+ req = self.generate_request(call_id=2,
+ pfc_flags=dcerpc.DCERPC_PFC_FLAG_FIRST |
+ dcerpc.DCERPC_PFC_FLAG_PENDING_CANCEL,
+ context_id=ctx.context_id,
+ opnum=4,
+ stub=real_stub)
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ # We get a fault back
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_FAULT, req.call_id,
+ pfc_flags=dcerpc.DCERPC_PFC_FLAG_FIRST |
+ dcerpc.DCERPC_PFC_FLAG_LAST |
+ dcerpc.DCERPC_PFC_FLAG_DID_NOT_EXECUTE,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, 0)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertEqual(rep.u.flags, 0)
+ self.assertEqual(rep.u.status, dcerpc.DCERPC_FAULT_NO_CALL_ACTIVE)
+ self.assertEqual(rep.u.reserved, 0)
+ self.assertEqual(len(rep.u.error_and_verifier), 0)
+
+ # wait for a disconnect
+ rep = self.recv_pdu()
+ self.assertIsNone(rep)
+ self.assertNotConnected()
+
+ def test_2nd_cancel_requests(self):
+ (ctx, rep, real_stub) = self._get_netlogon_ctx()
+
+ # netr_ServerReqChallenge with given flags
+ req = self.generate_request(call_id=2,
+ pfc_flags=dcerpc.DCERPC_PFC_FLAG_FIRST,
+ context_id=ctx.context_id,
+ opnum=4,
+ stub=real_stub)
+ self.send_pdu(req)
+ rep = self.recv_pdu(timeout=0.1)
+ self.assertIsNone(rep)
+ self.assertIsConnected()
+
+ # netr_ServerReqChallenge with given flags
+ req = self.generate_request(call_id=2,
+ pfc_flags=dcerpc.DCERPC_PFC_FLAG_PENDING_CANCEL,
+ context_id=ctx.context_id,
+ opnum=4,
+ stub=real_stub)
+ self.send_pdu(req)
+ rep = self.recv_pdu(timeout=0.1)
+ self.assertIsNone(rep)
+ self.assertIsConnected()
+
+ # netr_ServerReqChallenge with given flags
+ req = self.generate_request(call_id=2,
+ pfc_flags=dcerpc.DCERPC_PFC_FLAG_LAST,
+ context_id=ctx.context_id,
+ opnum=4,
+ stub=real_stub)
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_RESPONSE, req.call_id,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, req.u.context_id)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertGreaterEqual(len(rep.u.stub_and_verifier), rep.u.alloc_hint)
+
+ self.assertEqual(len(rep.u.stub_and_verifier), 12)
+ status = struct.unpack_from("<I", rep.u.stub_and_verifier, len(rep.u.stub_and_verifier) - 4)
+ self.assertEqual(status[0], 0)
+
+ def test_last_cancel_requests(self):
+ (ctx, rep, real_stub) = self._get_netlogon_ctx()
+
+ # netr_ServerReqChallenge with given flags
+ req = self.generate_request(call_id=2,
+ pfc_flags=dcerpc.DCERPC_PFC_FLAG_FIRST,
+ context_id=ctx.context_id,
+ opnum=4,
+ stub=real_stub[:4])
+ self.send_pdu(req)
+ rep = self.recv_pdu(timeout=0.1)
+ self.assertIsNone(rep)
+ self.assertIsConnected()
+
+ # netr_ServerReqChallenge with given flags
+ req = self.generate_request(call_id=2,
+ pfc_flags=dcerpc.DCERPC_PFC_FLAG_LAST |
+ dcerpc.DCERPC_PFC_FLAG_PENDING_CANCEL,
+ context_id=ctx.context_id,
+ opnum=4,
+ stub=real_stub[4:])
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_RESPONSE, req.call_id,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, req.u.context_id)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertGreaterEqual(len(rep.u.stub_and_verifier), rep.u.alloc_hint)
+
+ self.assertEqual(len(rep.u.stub_and_verifier), 12)
+ status = struct.unpack_from("<I", rep.u.stub_and_verifier, len(rep.u.stub_and_verifier) - 4)
+ self.assertEqual(status[0], 0)
+
+ def test_mix_requests(self):
+ (ctx, rep, real_stub) = self._get_netlogon_ctx()
+
+ # netr_ServerReqChallenge with given flags
+ req = self.generate_request(call_id=50,
+ pfc_flags=dcerpc.DCERPC_PFC_FLAG_FIRST,
+ context_id=ctx.context_id,
+ opnum=4,
+ stub=real_stub)
+ self.send_pdu(req)
+ rep = self.recv_pdu(timeout=0.1)
+ self.assertIsNone(rep)
+ self.assertIsConnected()
+
+ # netr_ServerReqChallenge with given flags
+ req = self.generate_request(call_id=51,
+ pfc_flags=dcerpc.DCERPC_PFC_FLAG_FIRST,
+ context_id=ctx.context_id,
+ opnum=4,
+ stub=real_stub)
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ # We get a fault back
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_FAULT, 50,
+ pfc_flags=dcerpc.DCERPC_PFC_FLAG_FIRST |
+ dcerpc.DCERPC_PFC_FLAG_LAST,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, req.u.context_id)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertEqual(rep.u.flags, 0)
+ self.assertEqual(rep.u.status, dcerpc.DCERPC_NCA_S_PROTO_ERROR)
+ self.assertEqual(rep.u.reserved, 0)
+ self.assertEqual(len(rep.u.error_and_verifier), 0)
+
+ def test_co_cancel_no_request(self):
+ ndr32 = base.transfer_syntax_ndr()
+ abstract = samba.dcerpc.mgmt.abstract_syntax()
+ ctx = self.prepare_presentation(abstract, ndr32, context_id=0xff)
+
+ req = self.generate_co_cancel(call_id=3)
+ self.send_pdu(req)
+ rep = self.recv_pdu(timeout=0.01)
+ self.assertIsNone(rep)
+ self.assertIsConnected()
+
+ # And now try a request
+ req = self.generate_request(call_id=1,
+ context_id=ctx.context_id,
+ opnum=0,
+ stub=b"")
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_RESPONSE, req.call_id,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, req.u.context_id)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertGreaterEqual(len(rep.u.stub_and_verifier), rep.u.alloc_hint)
+
+ def test_co_cancel_request_after_first(self):
+ ndr32 = base.transfer_syntax_ndr()
+ abstract = samba.dcerpc.mgmt.abstract_syntax()
+ ctx = self.prepare_presentation(abstract, ndr32, context_id=0xff)
+
+ req = self.generate_request(call_id=1,
+ pfc_flags=dcerpc.DCERPC_PFC_FLAG_FIRST,
+ context_id=ctx.context_id,
+ opnum=0,
+ stub=b"")
+ self.send_pdu(req)
+ rep = self.recv_pdu(timeout=0.01)
+ self.assertIsNone(rep)
+ self.assertIsConnected()
+
+ req = self.generate_co_cancel(call_id=1)
+ self.send_pdu(req)
+ rep = self.recv_pdu(timeout=0.01)
+ self.assertIsNone(rep)
+ self.assertIsConnected()
+
+ req = self.generate_request(call_id=1,
+ pfc_flags=dcerpc.DCERPC_PFC_FLAG_LAST,
+ context_id=ctx.context_id,
+ opnum=0,
+ stub=b"")
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_RESPONSE, req.call_id,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, req.u.context_id)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertGreaterEqual(len(rep.u.stub_and_verifier), rep.u.alloc_hint)
+
+ # And now try a request
+ req = self.generate_request(call_id=2,
+ context_id=ctx.context_id,
+ opnum=0,
+ stub=b"")
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_RESPONSE, req.call_id,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, req.u.context_id)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertGreaterEqual(len(rep.u.stub_and_verifier), rep.u.alloc_hint)
+
+ def test_orphaned_no_request(self):
+ ndr32 = base.transfer_syntax_ndr()
+ abstract = samba.dcerpc.mgmt.abstract_syntax()
+ ctx = self.prepare_presentation(abstract, ndr32)
+
+ req = self.generate_orphaned(call_id=3)
+ self.send_pdu(req)
+ rep = self.recv_pdu(timeout=0.01)
+ self.assertIsNone(rep)
+ self.assertIsConnected()
+
+ # And now try a request
+ req = self.generate_request(call_id=1,
+ context_id=ctx.context_id,
+ opnum=0,
+ stub=b"")
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_RESPONSE, req.call_id,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, req.u.context_id & 0xff)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertGreaterEqual(len(rep.u.stub_and_verifier), rep.u.alloc_hint)
+
+ def test_orphaned_request_after_first_last(self):
+ ndr32 = base.transfer_syntax_ndr()
+ abstract = samba.dcerpc.mgmt.abstract_syntax()
+ ctx = self.prepare_presentation(abstract, ndr32)
+
+ req = self.generate_request(call_id=1,
+ pfc_flags=dcerpc.DCERPC_PFC_FLAG_FIRST,
+ context_id=ctx.context_id,
+ opnum=0,
+ stub=b"")
+ self.send_pdu(req)
+ rep = self.recv_pdu(timeout=0.1)
+ self.assertIsNone(rep)
+ self.assertIsConnected()
+
+ req = self.generate_orphaned(call_id=1)
+ self.send_pdu(req)
+ rep = self.recv_pdu(timeout=0.1)
+ self.assertIsNone(rep)
+ self.assertIsConnected()
+
+ req = self.generate_request(call_id=1,
+ pfc_flags=dcerpc.DCERPC_PFC_FLAG_LAST,
+ context_id=ctx.context_id,
+ opnum=0,
+ stub=b"")
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_RESPONSE, req.call_id,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, req.u.context_id & 0xff)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertGreaterEqual(len(rep.u.stub_and_verifier), rep.u.alloc_hint)
+
+ # And now try a request
+ req = self.generate_request(call_id=2,
+ context_id=ctx.context_id,
+ opnum=0,
+ stub=b"")
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_RESPONSE, req.call_id,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, req.u.context_id & 0xff)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertGreaterEqual(len(rep.u.stub_and_verifier), rep.u.alloc_hint)
+
+ def test_orphaned_request_after_first_mpx_last(self):
+ ndr32 = base.transfer_syntax_ndr()
+ abstract = samba.dcerpc.mgmt.abstract_syntax()
+
+ pfc_flags = samba.dcerpc.dcerpc.DCERPC_PFC_FLAG_FIRST
+ pfc_flags |= samba.dcerpc.dcerpc.DCERPC_PFC_FLAG_LAST
+ pfc_flags |= samba.dcerpc.dcerpc.DCERPC_PFC_FLAG_CONC_MPX
+ ctx = self.prepare_presentation(abstract, ndr32, pfc_flags=pfc_flags)
+
+ req = self.generate_request(call_id=1,
+ pfc_flags=dcerpc.DCERPC_PFC_FLAG_FIRST,
+ context_id=ctx.context_id,
+ opnum=0,
+ stub=b"")
+ self.send_pdu(req)
+ rep = self.recv_pdu(timeout=0.1)
+ self.assertIsNone(rep)
+ self.assertIsConnected()
+
+ req = self.generate_orphaned(call_id=1)
+ self.send_pdu(req)
+ rep = self.recv_pdu(timeout=0.1)
+ self.assertIsNone(rep)
+ self.assertIsConnected()
+
+ req = self.generate_request(call_id=1,
+ pfc_flags=dcerpc.DCERPC_PFC_FLAG_LAST,
+ context_id=ctx.context_id,
+ opnum=0,
+ stub=b"")
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_RESPONSE, req.call_id,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, req.u.context_id & 0xff)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertGreaterEqual(len(rep.u.stub_and_verifier), rep.u.alloc_hint)
+
+ # And now try a request
+ req = self.generate_request(call_id=2,
+ context_id=ctx.context_id,
+ opnum=0,
+ stub=b"")
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_RESPONSE, req.call_id,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, req.u.context_id & 0xff)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertGreaterEqual(len(rep.u.stub_and_verifier), rep.u.alloc_hint)
+
+ def test_orphaned_request_after_first_no_last(self):
+ ndr32 = base.transfer_syntax_ndr()
+ abstract = samba.dcerpc.mgmt.abstract_syntax()
+ ctx = self.prepare_presentation(abstract, ndr32)
+
+ req1 = self.generate_request(call_id=1,
+ pfc_flags=dcerpc.DCERPC_PFC_FLAG_FIRST,
+ context_id=ctx.context_id,
+ opnum=0,
+ stub=b"")
+ self.send_pdu(req1)
+ rep = self.recv_pdu(timeout=0.1)
+ self.assertIsNone(rep)
+ self.assertIsConnected()
+
+ req = self.generate_orphaned(call_id=1)
+ self.send_pdu(req)
+ rep = self.recv_pdu(timeout=0.1)
+ self.assertIsNone(rep)
+ self.assertIsConnected()
+
+ # And now try a new request
+ req2 = self.generate_request(call_id=2,
+ context_id=ctx.context_id,
+ opnum=0,
+ stub=b"")
+ self.send_pdu(req2)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_FAULT, req1.call_id,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, req1.u.context_id)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertEqual(rep.u.flags, 0)
+ self.assertEqual(rep.u.status, dcerpc.DCERPC_NCA_S_PROTO_ERROR)
+ self.assertEqual(rep.u.reserved, 0)
+ self.assertEqual(len(rep.u.error_and_verifier), 0)
+
+ # wait for a disconnect
+ rep = self.recv_pdu()
+ self.assertIsNone(rep)
+ self.assertNotConnected()
+
+ def test_orphaned_request_after_first_mpx_no_last(self):
+ ndr32 = base.transfer_syntax_ndr()
+ abstract = samba.dcerpc.mgmt.abstract_syntax()
+
+ pfc_flags = samba.dcerpc.dcerpc.DCERPC_PFC_FLAG_FIRST
+ pfc_flags |= samba.dcerpc.dcerpc.DCERPC_PFC_FLAG_LAST
+ pfc_flags |= samba.dcerpc.dcerpc.DCERPC_PFC_FLAG_CONC_MPX
+ ctx = self.prepare_presentation(abstract, ndr32,
+ pfc_flags=pfc_flags)
+
+ req1 = self.generate_request(call_id=1,
+ pfc_flags=dcerpc.DCERPC_PFC_FLAG_FIRST,
+ context_id=ctx.context_id,
+ opnum=0,
+ stub=b"")
+ self.send_pdu(req1)
+ rep = self.recv_pdu(timeout=0.1)
+ self.assertIsNone(rep)
+ self.assertIsConnected()
+
+ req = self.generate_orphaned(call_id=1)
+ self.send_pdu(req)
+ rep = self.recv_pdu(timeout=0.1)
+ self.assertIsNone(rep)
+ self.assertIsConnected()
+
+ # And now try a new request
+ req2 = self.generate_request(call_id=2,
+ context_id=ctx.context_id - 1,
+ opnum=0,
+ stub=b"")
+ self.send_pdu(req2)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_FAULT, req2.call_id,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, 0)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertEqual(rep.u.flags, 0)
+ self.assertEqual(rep.u.status, dcerpc.DCERPC_NCA_S_PROTO_ERROR)
+ self.assertEqual(rep.u.reserved, 0)
+ self.assertEqual(len(rep.u.error_and_verifier), 0)
+
+ # wait for a disconnect
+ rep = self.recv_pdu()
+ self.assertIsNone(rep)
+ self.assertNotConnected()
+
+ def _test_spnego_connect_upgrade_request(self, upgrade_auth_level):
+ ndr32 = base.transfer_syntax_ndr()
+
+ tsf1_list = [ndr32]
+ ctx1 = dcerpc.ctx_list()
+ ctx1.context_id = 1
+ ctx1.num_transfer_syntaxes = len(tsf1_list)
+ ctx1.abstract_syntax = samba.dcerpc.mgmt.abstract_syntax()
+ ctx1.transfer_syntaxes = tsf1_list
+ ctx_list = [ctx1]
+
+ c = self.get_anon_creds()
+ g = gensec.Security.start_client(self.settings)
+ g.set_credentials(c)
+ g.want_feature(gensec.FEATURE_DCE_STYLE)
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_SPNEGO
+ auth_level = dcerpc.DCERPC_AUTH_LEVEL_CONNECT
+ auth_context_id = 2
+ g.start_mech_by_authtype(auth_type, auth_level)
+ from_server = b""
+ (finished, to_server) = g.update(from_server)
+ self.assertFalse(finished)
+
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=auth_level,
+ auth_context_id=auth_context_id,
+ auth_blob=to_server)
+
+ req = self.generate_bind(call_id=0,
+ ctx_list=ctx_list,
+ auth_info=auth_info)
+
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_BIND_ACK, req.call_id)
+ self.assertEqual(rep.u.max_xmit_frag, req.u.max_xmit_frag)
+ self.assertEqual(rep.u.max_recv_frag, req.u.max_recv_frag)
+ self.assertNotEqual(rep.u.assoc_group_id, req.u.assoc_group_id)
+ self.assertEqual(rep.u.secondary_address_size, 4)
+ self.assertEqual(rep.u.secondary_address, "%d" % self.tcp_port)
+ self.assertPadding(rep.u._pad1, 2)
+ self.assertEqual(rep.u.num_results, 1)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_ACCEPTANCE)
+ self.assertEqual(rep.u.ctx_list[0].reason,
+ dcerpc.DCERPC_BIND_ACK_REASON_NOT_SPECIFIED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, ndr32)
+ self.assertNotEqual(len(rep.u.auth_info), 0)
+ a = self.parse_auth(rep.u.auth_info)
+
+ from_server = a.credentials
+ (finished, to_server) = g.update(from_server)
+ self.assertFalse(finished)
+
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=auth_level,
+ auth_context_id=auth_context_id,
+ auth_blob=to_server)
+
+ req = self.generate_alter(call_id=0,
+ ctx_list=ctx_list,
+ assoc_group_id=rep.u.assoc_group_id,
+ auth_info=auth_info)
+
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_ALTER_RESP, req.call_id)
+ self.assertEqual(rep.u.max_xmit_frag, req.u.max_xmit_frag)
+ self.assertEqual(rep.u.max_recv_frag, req.u.max_recv_frag)
+ self.assertEqual(rep.u.assoc_group_id, req.u.assoc_group_id)
+ self.assertEqual(rep.u.secondary_address_size, 0)
+ self.assertPadding(rep.u._pad1, 2)
+ self.assertEqual(rep.u.num_results, 1)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_ACCEPTANCE)
+ self.assertEqual(rep.u.ctx_list[0].reason,
+ dcerpc.DCERPC_BIND_ACK_REASON_NOT_SPECIFIED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, ndr32)
+ self.assertNotEqual(len(rep.u.auth_info), 0)
+ a = self.parse_auth(rep.u.auth_info)
+
+ from_server = a.credentials
+ (finished, to_server) = g.update(from_server)
+ self.assertTrue(finished)
+
+ # And now try a request without auth_info
+ req = self.generate_request(call_id=2,
+ context_id=ctx1.context_id,
+ opnum=0,
+ stub=b"")
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_RESPONSE, req.call_id,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, req.u.context_id & 0xff)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertGreaterEqual(len(rep.u.stub_and_verifier), rep.u.alloc_hint)
+
+ # Now a request with auth_info DCERPC_AUTH_LEVEL_CONNECT
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=auth_level,
+ auth_context_id=auth_context_id,
+ auth_blob=b"\x01" +b"\x00" *15)
+ req = self.generate_request(call_id=3,
+ context_id=ctx1.context_id,
+ opnum=0,
+ stub=b"",
+ auth_info=auth_info)
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ # We don't get an auth_info back
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_RESPONSE, req.call_id,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, req.u.context_id & 0xff)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertGreaterEqual(len(rep.u.stub_and_verifier), rep.u.alloc_hint)
+
+ # Now a request with auth_info upgrade_auth_level
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=upgrade_auth_level,
+ auth_context_id=auth_context_id,
+ auth_blob=b"\x01" + b"\x00" * 15)
+ req = self.generate_request(call_id=4,
+ context_id=ctx1.context_id,
+ opnum=0,
+ stub=b"",
+ auth_info=auth_info)
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ # We get a fault back
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_FAULT, req.call_id,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, req.u.context_id)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertEqual(rep.u.flags, 0)
+ self.assertEqual(rep.u.status, dcerpc.DCERPC_FAULT_ACCESS_DENIED)
+ self.assertEqual(rep.u.reserved, 0)
+ self.assertEqual(len(rep.u.error_and_verifier), 0)
+
+ # wait for a disconnect
+ rep = self.recv_pdu()
+ self.assertIsNone(rep)
+ self.assertNotConnected()
+
+ def test_spnego_connect_packet_upgrade(self):
+ return self._test_spnego_connect_upgrade_request(
+ dcerpc.DCERPC_AUTH_LEVEL_PACKET)
+
+ def test_spnego_connect_integrity_upgrade(self):
+ return self._test_spnego_connect_upgrade_request(
+ dcerpc.DCERPC_AUTH_LEVEL_INTEGRITY)
+
+ def _test_spnego_connect_downgrade_request(self, initial_auth_level):
+ ndr32 = base.transfer_syntax_ndr()
+
+ tsf1_list = [ndr32]
+ ctx1 = dcerpc.ctx_list()
+ ctx1.context_id = 1
+ ctx1.num_transfer_syntaxes = len(tsf1_list)
+ ctx1.abstract_syntax = samba.dcerpc.mgmt.abstract_syntax()
+ ctx1.transfer_syntaxes = tsf1_list
+ ctx_list = [ctx1]
+
+ c = self.get_anon_creds()
+ g = gensec.Security.start_client(self.settings)
+ g.set_credentials(c)
+ g.want_feature(gensec.FEATURE_DCE_STYLE)
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_SPNEGO
+ auth_level = initial_auth_level
+ auth_context_id = 2
+ g.start_mech_by_authtype(auth_type, auth_level)
+ from_server = b""
+ (finished, to_server) = g.update(from_server)
+ self.assertFalse(finished)
+
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=auth_level,
+ auth_context_id=auth_context_id,
+ auth_blob=to_server)
+
+ req = self.generate_bind(call_id=0,
+ ctx_list=ctx_list,
+ auth_info=auth_info)
+
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_BIND_ACK, req.call_id)
+ self.assertEqual(rep.u.max_xmit_frag, req.u.max_xmit_frag)
+ self.assertEqual(rep.u.max_recv_frag, req.u.max_recv_frag)
+ self.assertNotEqual(rep.u.assoc_group_id, req.u.assoc_group_id)
+ self.assertEqual(rep.u.secondary_address_size, 4)
+ self.assertEqual(rep.u.secondary_address, "%d" % self.tcp_port)
+ self.assertPadding(rep.u._pad1, 2)
+ self.assertEqual(rep.u.num_results, 1)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_ACCEPTANCE)
+ self.assertEqual(rep.u.ctx_list[0].reason,
+ dcerpc.DCERPC_BIND_ACK_REASON_NOT_SPECIFIED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, ndr32)
+ self.assertNotEqual(len(rep.u.auth_info), 0)
+ a = self.parse_auth(rep.u.auth_info)
+
+ from_server = a.credentials
+ (finished, to_server) = g.update(from_server)
+ self.assertFalse(finished)
+
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=auth_level,
+ auth_context_id=auth_context_id,
+ auth_blob=to_server)
+
+ req = self.generate_alter(call_id=0,
+ ctx_list=ctx_list,
+ assoc_group_id=rep.u.assoc_group_id,
+ auth_info=auth_info)
+
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_ALTER_RESP, req.call_id)
+ self.assertEqual(rep.u.max_xmit_frag, req.u.max_xmit_frag)
+ self.assertEqual(rep.u.max_recv_frag, req.u.max_recv_frag)
+ self.assertEqual(rep.u.assoc_group_id, req.u.assoc_group_id)
+ self.assertEqual(rep.u.secondary_address_size, 0)
+ self.assertPadding(rep.u._pad1, 2)
+ self.assertEqual(rep.u.num_results, 1)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_ACCEPTANCE)
+ self.assertEqual(rep.u.ctx_list[0].reason,
+ dcerpc.DCERPC_BIND_ACK_REASON_NOT_SPECIFIED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, ndr32)
+ self.assertNotEqual(len(rep.u.auth_info), 0)
+ a = self.parse_auth(rep.u.auth_info)
+
+ from_server = a.credentials
+ (finished, to_server) = g.update(from_server)
+ self.assertTrue(finished)
+
+ # Now a request with auth_info DCERPC_AUTH_LEVEL_CONNECT
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=dcerpc.DCERPC_AUTH_LEVEL_CONNECT,
+ auth_context_id=auth_context_id,
+ auth_blob=b"\x01" + b"\x00" * 15)
+ req = self.generate_request(call_id=3,
+ context_id=ctx1.context_id,
+ opnum=0,
+ stub=b"",
+ auth_info=auth_info)
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ # We get a fault back
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_FAULT, req.call_id,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, req.u.context_id)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertEqual(rep.u.flags, 0)
+ self.assertEqual(rep.u.status, dcerpc.DCERPC_FAULT_ACCESS_DENIED)
+ self.assertEqual(rep.u.reserved, 0)
+ self.assertEqual(len(rep.u.error_and_verifier), 0)
+
+ # wait for a disconnect
+ rep = self.recv_pdu()
+ self.assertIsNone(rep)
+ self.assertNotConnected()
+
+ def test_spnego_packet_downgrade_connect(self):
+ return self._test_spnego_connect_downgrade_request(
+ dcerpc.DCERPC_AUTH_LEVEL_PACKET)
+
+ def test_spnego_integrity_downgrade_connect(self):
+ return self._test_spnego_connect_upgrade_request(
+ dcerpc.DCERPC_AUTH_LEVEL_INTEGRITY)
+
+ def test_spnego_unfinished_request(self):
+ ndr32 = base.transfer_syntax_ndr()
+
+ tsf1_list = [ndr32]
+ ctx1 = dcerpc.ctx_list()
+ ctx1.context_id = 1
+ ctx1.num_transfer_syntaxes = len(tsf1_list)
+ ctx1.abstract_syntax = samba.dcerpc.mgmt.abstract_syntax()
+ ctx1.transfer_syntaxes = tsf1_list
+ ctx_list = [ctx1]
+
+ c = self.get_anon_creds()
+ g = gensec.Security.start_client(self.settings)
+ g.set_credentials(c)
+ g.want_feature(gensec.FEATURE_DCE_STYLE)
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_SPNEGO
+ auth_level = dcerpc.DCERPC_AUTH_LEVEL_CONNECT
+ auth_context_id = 2
+ g.start_mech_by_authtype(auth_type, auth_level)
+ from_server = b""
+ (finished, to_server) = g.update(from_server)
+ self.assertFalse(finished)
+
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=auth_level,
+ auth_context_id=auth_context_id,
+ auth_blob=to_server)
+
+ req = self.generate_bind(call_id=0,
+ ctx_list=ctx_list,
+ auth_info=auth_info)
+
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_BIND_ACK, req.call_id)
+ self.assertEqual(rep.u.max_xmit_frag, req.u.max_xmit_frag)
+ self.assertEqual(rep.u.max_recv_frag, req.u.max_recv_frag)
+ self.assertNotEqual(rep.u.assoc_group_id, req.u.assoc_group_id)
+ assoc_group_id = rep.u.assoc_group_id
+ self.assertEqual(rep.u.secondary_address_size, 4)
+ self.assertEqual(rep.u.secondary_address, "%d" % self.tcp_port)
+ self.assertPadding(rep.u._pad1, 2)
+ self.assertEqual(rep.u.num_results, 1)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_ACCEPTANCE)
+ self.assertEqual(rep.u.ctx_list[0].reason,
+ dcerpc.DCERPC_BIND_ACK_REASON_NOT_SPECIFIED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, ndr32)
+ self.assertNotEqual(len(rep.u.auth_info), 0)
+ a = self.parse_auth(rep.u.auth_info)
+
+ from_server = a.credentials
+ (finished, to_server) = g.update(from_server)
+ self.assertFalse(finished)
+
+ # Now a request with auth_info DCERPC_AUTH_LEVEL_CONNECT
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=auth_level,
+ auth_context_id=auth_context_id,
+ auth_blob=b"\x01" + b"\x00" * 15)
+ req = self.generate_request(call_id=1,
+ context_id=ctx1.context_id,
+ opnum=0,
+ stub=b"",
+ auth_info=auth_info)
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ # We get a fault
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_FAULT, req.call_id,
+ pfc_flags=req.pfc_flags |
+ dcerpc.DCERPC_PFC_FLAG_DID_NOT_EXECUTE,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, 0)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertEqual(rep.u.flags, 0)
+ self.assertEqual(rep.u.status, dcerpc.DCERPC_NCA_S_PROTO_ERROR)
+ self.assertEqual(rep.u.reserved, 0)
+ self.assertEqual(len(rep.u.error_and_verifier), 0)
+
+ # wait for a disconnect
+ rep = self.recv_pdu()
+ self.assertIsNone(rep)
+ self.assertNotConnected()
+
+ def test_spnego_auth3(self):
+ ndr32 = base.transfer_syntax_ndr()
+
+ tsf1_list = [ndr32]
+ ctx1 = dcerpc.ctx_list()
+ ctx1.context_id = 1
+ ctx1.num_transfer_syntaxes = len(tsf1_list)
+ ctx1.abstract_syntax = samba.dcerpc.mgmt.abstract_syntax()
+ ctx1.transfer_syntaxes = tsf1_list
+ ctx_list = [ctx1]
+
+ c = self.get_anon_creds()
+ g = gensec.Security.start_client(self.settings)
+ g.set_credentials(c)
+ g.want_feature(gensec.FEATURE_DCE_STYLE)
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_SPNEGO
+ auth_level = dcerpc.DCERPC_AUTH_LEVEL_CONNECT
+ auth_context_id = 2
+ g.start_mech_by_authtype(auth_type, auth_level)
+ from_server = b""
+ (finished, to_server) = g.update(from_server)
+ self.assertFalse(finished)
+
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=auth_level,
+ auth_context_id=auth_context_id,
+ auth_blob=to_server)
+ req = self.generate_bind(call_id=0,
+ ctx_list=ctx_list,
+ auth_info=auth_info)
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_BIND_ACK, req.call_id)
+ self.assertEqual(rep.u.max_xmit_frag, req.u.max_xmit_frag)
+ self.assertEqual(rep.u.max_recv_frag, req.u.max_recv_frag)
+ self.assertNotEqual(rep.u.assoc_group_id, req.u.assoc_group_id)
+ self.assertEqual(rep.u.secondary_address_size, 4)
+ self.assertEqual(rep.u.secondary_address, "%d" % self.tcp_port)
+ self.assertPadding(rep.u._pad1, 2)
+ self.assertEqual(rep.u.num_results, 1)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_ACCEPTANCE)
+ self.assertEqual(rep.u.ctx_list[0].reason,
+ dcerpc.DCERPC_BIND_ACK_REASON_NOT_SPECIFIED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, ndr32)
+ self.assertNotEqual(len(rep.u.auth_info), 0)
+ a = self.parse_auth(rep.u.auth_info)
+
+ from_server = a.credentials
+ (finished, to_server) = g.update(from_server)
+ self.assertFalse(finished)
+
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=auth_level,
+ auth_context_id=auth_context_id,
+ auth_blob=to_server)
+ req = self.generate_auth3(call_id=0,
+ auth_info=auth_info)
+ self.send_pdu(req)
+ rep = self.recv_pdu(timeout=0.01)
+ self.assertIsNone(rep)
+ self.assertIsConnected()
+
+ # And now try a request without auth_info
+ req = self.generate_request(call_id=2,
+ context_id=ctx1.context_id,
+ opnum=0,
+ stub=b"")
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ # We get a fault back
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_FAULT, req.call_id,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, req.u.context_id)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertEqual(rep.u.flags, 0)
+ self.assertEqual(rep.u.status, dcerpc.DCERPC_FAULT_ACCESS_DENIED)
+ self.assertEqual(rep.u.reserved, 0)
+ self.assertEqual(len(rep.u.error_and_verifier), 0)
+
+ # wait for a disconnect
+ rep = self.recv_pdu()
+ self.assertIsNone(rep)
+ self.assertNotConnected()
+
+ def test_spnego_connect_reauth_alter(self):
+ ndr32 = base.transfer_syntax_ndr()
+ ndr64 = base.transfer_syntax_ndr64()
+
+ tsf1_list = [ndr32]
+ ctx1 = dcerpc.ctx_list()
+ ctx1.context_id = 1
+ ctx1.num_transfer_syntaxes = len(tsf1_list)
+ ctx1.abstract_syntax = samba.dcerpc.mgmt.abstract_syntax()
+ ctx1.transfer_syntaxes = tsf1_list
+ ctx_list = [ctx1]
+
+ c = self.get_anon_creds()
+ g = gensec.Security.start_client(self.settings)
+ g.set_credentials(c)
+ g.want_feature(gensec.FEATURE_DCE_STYLE)
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_SPNEGO
+ auth_level = dcerpc.DCERPC_AUTH_LEVEL_CONNECT
+ auth_context_id = 2
+ g.start_mech_by_authtype(auth_type, auth_level)
+ from_server = b""
+ (finished, to_server) = g.update(from_server)
+ self.assertFalse(finished)
+
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=auth_level,
+ auth_context_id=auth_context_id,
+ auth_blob=to_server)
+
+ req = self.generate_bind(call_id=0,
+ ctx_list=ctx_list,
+ auth_info=auth_info)
+
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_BIND_ACK, req.call_id)
+ self.assertEqual(rep.u.max_xmit_frag, req.u.max_xmit_frag)
+ self.assertEqual(rep.u.max_recv_frag, req.u.max_recv_frag)
+ self.assertNotEqual(rep.u.assoc_group_id, req.u.assoc_group_id)
+ self.assertEqual(rep.u.secondary_address_size, 4)
+ self.assertEqual(rep.u.secondary_address, "%d" % self.tcp_port)
+ self.assertPadding(rep.u._pad1, 2)
+ self.assertEqual(rep.u.num_results, 1)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_ACCEPTANCE)
+ self.assertEqual(rep.u.ctx_list[0].reason,
+ dcerpc.DCERPC_BIND_ACK_REASON_NOT_SPECIFIED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, ndr32)
+ self.assertNotEqual(len(rep.u.auth_info), 0)
+ a = self.parse_auth(rep.u.auth_info)
+
+ from_server = a.credentials
+ (finished, to_server) = g.update(from_server)
+ self.assertFalse(finished)
+
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=auth_level,
+ auth_context_id=auth_context_id,
+ auth_blob=to_server)
+ req = self.generate_alter(call_id=0,
+ ctx_list=[ctx1],
+ assoc_group_id=rep.u.assoc_group_id,
+ auth_info=auth_info)
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_ALTER_RESP, req.call_id)
+ self.assertEqual(rep.u.max_xmit_frag, req.u.max_xmit_frag)
+ self.assertEqual(rep.u.max_recv_frag, req.u.max_recv_frag)
+ self.assertEqual(rep.u.assoc_group_id, req.u.assoc_group_id)
+ self.assertEqual(rep.u.secondary_address_size, 0)
+ self.assertPadding(rep.u._pad1, 2)
+ self.assertEqual(rep.u.num_results, 1)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_ACCEPTANCE)
+ self.assertEqual(rep.u.ctx_list[0].reason,
+ dcerpc.DCERPC_BIND_ACK_REASON_NOT_SPECIFIED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, ndr32)
+ self.assertNotEqual(len(rep.u.auth_info), 0)
+ a = self.parse_auth(rep.u.auth_info)
+
+ from_server = a.credentials
+ (finished, to_server) = g.update(from_server)
+ self.assertTrue(finished)
+
+ # And now try a request without auth_info
+ req = self.generate_request(call_id=2,
+ context_id=ctx1.context_id,
+ opnum=0,
+ stub=b"")
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_RESPONSE, req.call_id,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, req.u.context_id & 0xff)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertGreaterEqual(len(rep.u.stub_and_verifier), rep.u.alloc_hint)
+
+ # Now a request with auth_info DCERPC_AUTH_LEVEL_CONNECT
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=auth_level,
+ auth_context_id=auth_context_id,
+ auth_blob=b"\x01" + b"\x00" * 15)
+ req = self.generate_request(call_id=3,
+ context_id=ctx1.context_id,
+ opnum=0,
+ stub=b"",
+ auth_info=auth_info)
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ # We don't get an auth_info back
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_RESPONSE, req.call_id,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, req.u.context_id)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertGreaterEqual(len(rep.u.stub_and_verifier), rep.u.alloc_hint)
+
+ # Now a reauth
+
+ g = gensec.Security.start_client(self.settings)
+ g.set_credentials(c)
+ g.want_feature(gensec.FEATURE_DCE_STYLE)
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_SPNEGO
+ auth_level = dcerpc.DCERPC_AUTH_LEVEL_CONNECT
+ auth_context_id = 2
+ g.start_mech_by_authtype(auth_type, auth_level)
+ from_server = b""
+ (finished, to_server) = g.update(from_server)
+ self.assertFalse(finished)
+
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=auth_level,
+ auth_context_id=auth_context_id,
+ auth_blob=to_server)
+ req = self.generate_alter(call_id=0,
+ ctx_list=ctx_list,
+ auth_info=auth_info)
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ # We get a fault
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_FAULT, req.call_id,
+ pfc_flags=req.pfc_flags |
+ dcerpc.DCERPC_PFC_FLAG_DID_NOT_EXECUTE,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, 0)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertEqual(rep.u.flags, 0)
+ self.assertEqual(rep.u.status, dcerpc.DCERPC_FAULT_ACCESS_DENIED)
+ self.assertEqual(rep.u.reserved, 0)
+ self.assertEqual(len(rep.u.error_and_verifier), 0)
+
+ # wait for a disconnect
+ rep = self.recv_pdu()
+ self.assertIsNone(rep)
+ self.assertNotConnected()
+
+ def test_spnego_connect_reauth_auth3(self):
+ ndr32 = base.transfer_syntax_ndr()
+ ndr64 = base.transfer_syntax_ndr64()
+
+ tsf1_list = [ndr32]
+ ctx1 = dcerpc.ctx_list()
+ ctx1.context_id = 1
+ ctx1.num_transfer_syntaxes = len(tsf1_list)
+ ctx1.abstract_syntax = samba.dcerpc.mgmt.abstract_syntax()
+ ctx1.transfer_syntaxes = tsf1_list
+ ctx_list = [ctx1]
+
+ c = self.get_anon_creds()
+ g = gensec.Security.start_client(self.settings)
+ g.set_credentials(c)
+ g.want_feature(gensec.FEATURE_DCE_STYLE)
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_SPNEGO
+ auth_level = dcerpc.DCERPC_AUTH_LEVEL_CONNECT
+ auth_context_id = 2
+ g.start_mech_by_authtype(auth_type, auth_level)
+ from_server = b""
+ (finished, to_server) = g.update(from_server)
+ self.assertFalse(finished)
+
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=auth_level,
+ auth_context_id=auth_context_id,
+ auth_blob=to_server)
+
+ req = self.generate_bind(call_id=0,
+ ctx_list=ctx_list,
+ auth_info=auth_info)
+
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_BIND_ACK, req.call_id)
+ self.assertEqual(rep.u.max_xmit_frag, req.u.max_xmit_frag)
+ self.assertEqual(rep.u.max_recv_frag, req.u.max_recv_frag)
+ self.assertNotEqual(rep.u.assoc_group_id, req.u.assoc_group_id)
+ self.assertEqual(rep.u.secondary_address_size, 4)
+ self.assertEqual(rep.u.secondary_address, "%d" % self.tcp_port)
+ self.assertPadding(rep.u._pad1, 2)
+ self.assertEqual(rep.u.num_results, 1)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_ACCEPTANCE)
+ self.assertEqual(rep.u.ctx_list[0].reason,
+ dcerpc.DCERPC_BIND_ACK_REASON_NOT_SPECIFIED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, ndr32)
+ self.assertNotEqual(len(rep.u.auth_info), 0)
+ a = self.parse_auth(rep.u.auth_info)
+
+ from_server = a.credentials
+ (finished, to_server) = g.update(from_server)
+ self.assertFalse(finished)
+
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=auth_level,
+ auth_context_id=auth_context_id,
+ auth_blob=to_server)
+ req = self.generate_alter(call_id=0,
+ ctx_list=[ctx1],
+ assoc_group_id=rep.u.assoc_group_id,
+ auth_info=auth_info)
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_ALTER_RESP, req.call_id)
+ self.assertEqual(rep.u.max_xmit_frag, req.u.max_xmit_frag)
+ self.assertEqual(rep.u.max_recv_frag, req.u.max_recv_frag)
+ self.assertEqual(rep.u.assoc_group_id, req.u.assoc_group_id)
+ self.assertEqual(rep.u.secondary_address_size, 0)
+ self.assertPadding(rep.u._pad1, 2)
+ self.assertEqual(rep.u.num_results, 1)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_ACCEPTANCE)
+ self.assertEqual(rep.u.ctx_list[0].reason,
+ dcerpc.DCERPC_BIND_ACK_REASON_NOT_SPECIFIED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, ndr32)
+ self.assertNotEqual(len(rep.u.auth_info), 0)
+ a = self.parse_auth(rep.u.auth_info)
+
+ from_server = a.credentials
+ (finished, to_server) = g.update(from_server)
+ self.assertTrue(finished)
+
+ # And now try a request without auth_info
+ req = self.generate_request(call_id=2,
+ context_id=ctx1.context_id,
+ opnum=0,
+ stub=b"")
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_RESPONSE, req.call_id,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, req.u.context_id)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertGreaterEqual(len(rep.u.stub_and_verifier), rep.u.alloc_hint)
+
+ # Now a request with auth_info DCERPC_AUTH_LEVEL_CONNECT
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=auth_level,
+ auth_context_id=auth_context_id,
+ auth_blob=b"\x01" + b"\x00" * 15)
+ req = self.generate_request(call_id=3,
+ context_id=ctx1.context_id,
+ opnum=0,
+ stub=b"",
+ auth_info=auth_info)
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ # We don't get an auth_info back
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_RESPONSE, req.call_id,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, req.u.context_id)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertGreaterEqual(len(rep.u.stub_and_verifier), rep.u.alloc_hint)
+
+ # Now a reauth
+
+ g = gensec.Security.start_client(self.settings)
+ g.set_credentials(c)
+ g.want_feature(gensec.FEATURE_DCE_STYLE)
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_SPNEGO
+ auth_level = dcerpc.DCERPC_AUTH_LEVEL_CONNECT
+ auth_context_id = 2
+ g.start_mech_by_authtype(auth_type, auth_level)
+ from_server = b""
+ (finished, to_server) = g.update(from_server)
+ self.assertFalse(finished)
+
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=auth_level,
+ auth_context_id=auth_context_id,
+ auth_blob=to_server)
+ req = self.generate_auth3(call_id=0,
+ auth_info=auth_info)
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ # We get a fault
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_FAULT, req.call_id,
+ pfc_flags=req.pfc_flags |
+ dcerpc.DCERPC_PFC_FLAG_DID_NOT_EXECUTE,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, 0)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertEqual(rep.u.flags, 0)
+ self.assertEqual(rep.u.status, dcerpc.DCERPC_NCA_S_PROTO_ERROR)
+ self.assertEqual(rep.u.reserved, 0)
+ self.assertEqual(len(rep.u.error_and_verifier), 0)
+
+ # wait for a disconnect
+ rep = self.recv_pdu()
+ self.assertIsNone(rep)
+ self.assertNotConnected()
+
+ def test_spnego_change_auth_level(self):
+ ndr32 = base.transfer_syntax_ndr()
+
+ tsf1_list = [ndr32]
+ ctx1 = dcerpc.ctx_list()
+ ctx1.context_id = 1
+ ctx1.num_transfer_syntaxes = len(tsf1_list)
+ ctx1.abstract_syntax = samba.dcerpc.mgmt.abstract_syntax()
+ ctx1.transfer_syntaxes = tsf1_list
+
+ c = self.get_anon_creds()
+ g = gensec.Security.start_client(self.settings)
+ g.set_credentials(c)
+ g.want_feature(gensec.FEATURE_DCE_STYLE)
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_SPNEGO
+ auth_level = dcerpc.DCERPC_AUTH_LEVEL_INTEGRITY
+ auth_context_id = 2
+ g.start_mech_by_authtype(auth_type, auth_level)
+ from_server = b""
+ (finished, to_server) = g.update(from_server)
+ self.assertFalse(finished)
+
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=auth_level,
+ auth_context_id=auth_context_id,
+ auth_blob=to_server)
+ req = self.generate_bind(call_id=0,
+ ctx_list=[ctx1],
+ auth_info=auth_info)
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_BIND_ACK, req.call_id)
+ self.assertEqual(rep.u.max_xmit_frag, req.u.max_xmit_frag)
+ self.assertEqual(rep.u.max_recv_frag, req.u.max_recv_frag)
+ self.assertNotEqual(rep.u.assoc_group_id, req.u.assoc_group_id)
+ self.assertEqual(rep.u.secondary_address_size, 4)
+ self.assertEqual(rep.u.secondary_address, "%d" % self.tcp_port)
+ self.assertPadding(rep.u._pad1, 2)
+ self.assertEqual(rep.u.num_results, 1)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_ACCEPTANCE)
+ self.assertEqual(rep.u.ctx_list[0].reason,
+ dcerpc.DCERPC_BIND_ACK_REASON_NOT_SPECIFIED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, ndr32)
+ self.assertNotEqual(len(rep.u.auth_info), 0)
+ a = self.parse_auth(rep.u.auth_info)
+
+ from_server = a.credentials
+ (finished, to_server) = g.update(from_server)
+ self.assertFalse(finished)
+
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=dcerpc.DCERPC_AUTH_LEVEL_PRIVACY,
+ auth_context_id=auth_context_id,
+ auth_blob=to_server)
+ req = self.generate_alter(call_id=0,
+ ctx_list=[ctx1],
+ assoc_group_id=rep.u.assoc_group_id,
+ auth_info=auth_info)
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_FAULT, req.call_id,
+ pfc_flags=req.pfc_flags |
+ dcerpc.DCERPC_PFC_FLAG_DID_NOT_EXECUTE,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, 0)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertEqual(rep.u.flags, 0)
+ self.assertEqual(rep.u.status, dcerpc.DCERPC_FAULT_ACCESS_DENIED)
+ self.assertEqual(rep.u.reserved, 0)
+ self.assertEqual(len(rep.u.error_and_verifier), 0)
+
+ # wait for a disconnect
+ rep = self.recv_pdu()
+ self.assertIsNone(rep)
+ self.assertNotConnected()
+
+ def test_spnego_change_abstract(self):
+ ndr32 = base.transfer_syntax_ndr()
+
+ tsf1_list = [ndr32]
+ ctx1 = dcerpc.ctx_list()
+ ctx1.context_id = 1
+ ctx1.num_transfer_syntaxes = len(tsf1_list)
+ ctx1.abstract_syntax = samba.dcerpc.mgmt.abstract_syntax()
+ ctx1.transfer_syntaxes = tsf1_list
+
+ ctx1b = dcerpc.ctx_list()
+ ctx1b.context_id = 1
+ ctx1b.num_transfer_syntaxes = len(tsf1_list)
+ ctx1b.abstract_syntax = samba.dcerpc.epmapper.abstract_syntax()
+ ctx1b.transfer_syntaxes = tsf1_list
+
+ c = self.get_anon_creds()
+ g = gensec.Security.start_client(self.settings)
+ g.set_credentials(c)
+ g.want_feature(gensec.FEATURE_DCE_STYLE)
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_SPNEGO
+ auth_level = dcerpc.DCERPC_AUTH_LEVEL_INTEGRITY
+ auth_context_id = 2
+ g.start_mech_by_authtype(auth_type, auth_level)
+ from_server = b""
+ (finished, to_server) = g.update(from_server)
+ self.assertFalse(finished)
+
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=auth_level,
+ auth_context_id=auth_context_id,
+ auth_blob=to_server)
+ req = self.generate_bind(call_id=0,
+ ctx_list=[ctx1],
+ auth_info=auth_info)
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_BIND_ACK, req.call_id)
+ self.assertEqual(rep.u.max_xmit_frag, req.u.max_xmit_frag)
+ self.assertEqual(rep.u.max_recv_frag, req.u.max_recv_frag)
+ self.assertNotEqual(rep.u.assoc_group_id, req.u.assoc_group_id)
+ self.assertEqual(rep.u.secondary_address_size, 4)
+ self.assertEqual(rep.u.secondary_address, "%d" % self.tcp_port)
+ self.assertPadding(rep.u._pad1, 2)
+ self.assertEqual(rep.u.num_results, 1)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_ACCEPTANCE)
+ self.assertEqual(rep.u.ctx_list[0].reason,
+ dcerpc.DCERPC_BIND_ACK_REASON_NOT_SPECIFIED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, ndr32)
+ self.assertNotEqual(len(rep.u.auth_info), 0)
+ a = self.parse_auth(rep.u.auth_info)
+
+ from_server = a.credentials
+ (finished, to_server) = g.update(from_server)
+ self.assertFalse(finished)
+
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=dcerpc.DCERPC_AUTH_LEVEL_PRIVACY,
+ auth_context_id=auth_context_id,
+ auth_blob=to_server)
+ req = self.generate_alter(call_id=0,
+ ctx_list=[ctx1b],
+ assoc_group_id=rep.u.assoc_group_id,
+ auth_info=auth_info)
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_FAULT, req.call_id,
+ pfc_flags=req.pfc_flags |
+ dcerpc.DCERPC_PFC_FLAG_DID_NOT_EXECUTE,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, 0)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertEqual(rep.u.flags, 0)
+ self.assertEqual(rep.u.status, dcerpc.DCERPC_NCA_S_PROTO_ERROR)
+ self.assertEqual(rep.u.reserved, 0)
+ self.assertEqual(len(rep.u.error_and_verifier), 0)
+
+ # wait for a disconnect
+ rep = self.recv_pdu()
+ self.assertIsNone(rep)
+ self.assertNotConnected()
+
+ def test_spnego_change_transfer(self):
+ ndr32 = base.transfer_syntax_ndr()
+ ndr64 = base.transfer_syntax_ndr64()
+
+ tsf1_list = [ndr32]
+ ctx1 = dcerpc.ctx_list()
+ ctx1.context_id = 1
+ ctx1.num_transfer_syntaxes = len(tsf1_list)
+ ctx1.abstract_syntax = samba.dcerpc.mgmt.abstract_syntax()
+ ctx1.transfer_syntaxes = tsf1_list
+
+ tsf1b_list = [ndr32, ndr64]
+ ctx1b = dcerpc.ctx_list()
+ ctx1b.context_id = 1
+ ctx1b.num_transfer_syntaxes = len(tsf1b_list)
+ ctx1b.abstract_syntax = samba.dcerpc.mgmt.abstract_syntax()
+ ctx1b.transfer_syntaxes = tsf1b_list
+
+ c = self.get_anon_creds()
+ g = gensec.Security.start_client(self.settings)
+ g.set_credentials(c)
+ g.want_feature(gensec.FEATURE_DCE_STYLE)
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_SPNEGO
+ auth_level = dcerpc.DCERPC_AUTH_LEVEL_INTEGRITY
+ auth_context_id = 2
+ g.start_mech_by_authtype(auth_type, auth_level)
+ from_server = b""
+ (finished, to_server) = g.update(from_server)
+ self.assertFalse(finished)
+
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=auth_level,
+ auth_context_id=auth_context_id,
+ auth_blob=to_server)
+ req = self.generate_bind(call_id=0,
+ ctx_list=[ctx1],
+ auth_info=auth_info)
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_BIND_ACK, req.call_id)
+ self.assertEqual(rep.u.max_xmit_frag, req.u.max_xmit_frag)
+ self.assertEqual(rep.u.max_recv_frag, req.u.max_recv_frag)
+ self.assertNotEqual(rep.u.assoc_group_id, req.u.assoc_group_id)
+ self.assertEqual(rep.u.secondary_address_size, 4)
+ self.assertEqual(rep.u.secondary_address, "%d" % self.tcp_port)
+ self.assertPadding(rep.u._pad1, 2)
+ self.assertEqual(rep.u.num_results, 1)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_ACCEPTANCE)
+ self.assertEqual(rep.u.ctx_list[0].reason,
+ dcerpc.DCERPC_BIND_ACK_REASON_NOT_SPECIFIED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, ndr32)
+ self.assertNotEqual(len(rep.u.auth_info), 0)
+ a = self.parse_auth(rep.u.auth_info)
+
+ from_server = a.credentials
+ (finished, to_server) = g.update(from_server)
+ self.assertFalse(finished)
+
+ # We change ctx_list and auth_level
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=dcerpc.DCERPC_AUTH_LEVEL_PRIVACY,
+ auth_context_id=auth_context_id,
+ auth_blob=to_server)
+ req = self.generate_alter(call_id=0,
+ ctx_list=[ctx1b],
+ assoc_group_id=rep.u.assoc_group_id,
+ auth_info=auth_info)
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_FAULT, req.call_id,
+ pfc_flags=req.pfc_flags |
+ dcerpc.DCERPC_PFC_FLAG_DID_NOT_EXECUTE,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, 0)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertEqual(rep.u.flags, 0)
+ self.assertEqual(rep.u.status, dcerpc.DCERPC_NCA_S_PROTO_ERROR)
+ self.assertEqual(rep.u.reserved, 0)
+ self.assertEqual(len(rep.u.error_and_verifier), 0)
+
+ # wait for a disconnect
+ rep = self.recv_pdu()
+ self.assertIsNone(rep)
+ self.assertNotConnected()
+
+ def test_spnego_change_auth_type1(self):
+ ndr32 = base.transfer_syntax_ndr()
+ ndr64 = base.transfer_syntax_ndr64()
+
+ tsf1_list = [ndr32]
+ ctx1 = dcerpc.ctx_list()
+ ctx1.context_id = 1
+ ctx1.num_transfer_syntaxes = len(tsf1_list)
+ ctx1.abstract_syntax = samba.dcerpc.mgmt.abstract_syntax()
+ ctx1.transfer_syntaxes = tsf1_list
+
+ c = self.get_anon_creds()
+ g = gensec.Security.start_client(self.settings)
+ g.set_credentials(c)
+ g.want_feature(gensec.FEATURE_DCE_STYLE)
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_SPNEGO
+ auth_level = dcerpc.DCERPC_AUTH_LEVEL_INTEGRITY
+ auth_context_id = 2
+ g.start_mech_by_authtype(auth_type, auth_level)
+ from_server = b""
+ (finished, to_server) = g.update(from_server)
+ self.assertFalse(finished)
+
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=auth_level,
+ auth_context_id=auth_context_id,
+ auth_blob=to_server)
+ req = self.generate_bind(call_id=0,
+ ctx_list=[ctx1],
+ auth_info=auth_info)
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_BIND_ACK, req.call_id)
+ self.assertEqual(rep.u.max_xmit_frag, req.u.max_xmit_frag)
+ self.assertEqual(rep.u.max_recv_frag, req.u.max_recv_frag)
+ self.assertNotEqual(rep.u.assoc_group_id, req.u.assoc_group_id)
+ self.assertEqual(rep.u.secondary_address_size, 4)
+ self.assertEqual(rep.u.secondary_address, "%d" % self.tcp_port)
+ self.assertPadding(rep.u._pad1, 2)
+ self.assertEqual(rep.u.num_results, 1)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_ACCEPTANCE)
+ self.assertEqual(rep.u.ctx_list[0].reason,
+ dcerpc.DCERPC_BIND_ACK_REASON_NOT_SPECIFIED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, ndr32)
+ self.assertNotEqual(len(rep.u.auth_info), 0)
+ a = self.parse_auth(rep.u.auth_info)
+
+ from_server = a.credentials
+ (finished, to_server) = g.update(from_server)
+ self.assertFalse(finished)
+
+ # We change ctx_list and auth_level
+ auth_info = self.generate_auth(auth_type=dcerpc.DCERPC_AUTH_TYPE_KRB5,
+ auth_level=auth_level,
+ auth_context_id=auth_context_id,
+ auth_blob=to_server)
+ req = self.generate_alter(call_id=0,
+ ctx_list=[ctx1],
+ assoc_group_id=rep.u.assoc_group_id,
+ auth_info=auth_info)
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_FAULT, req.call_id,
+ pfc_flags=req.pfc_flags |
+ dcerpc.DCERPC_PFC_FLAG_DID_NOT_EXECUTE,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, 0)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertEqual(rep.u.flags, 0)
+ self.assertEqual(rep.u.status, dcerpc.DCERPC_FAULT_SEC_PKG_ERROR)
+ self.assertEqual(rep.u.reserved, 0)
+ self.assertEqual(len(rep.u.error_and_verifier), 0)
+
+ # wait for a disconnect
+ rep = self.recv_pdu()
+ self.assertIsNone(rep)
+ self.assertNotConnected()
+
+ def test_spnego_change_auth_type2(self):
+ ndr32 = base.transfer_syntax_ndr()
+ ndr64 = base.transfer_syntax_ndr64()
+
+ tsf1_list = [ndr32]
+ ctx1 = dcerpc.ctx_list()
+ ctx1.context_id = 1
+ ctx1.num_transfer_syntaxes = len(tsf1_list)
+ ctx1.abstract_syntax = samba.dcerpc.mgmt.abstract_syntax()
+ ctx1.transfer_syntaxes = tsf1_list
+
+ tsf1b_list = [ndr32, ndr64]
+ ctx1b = dcerpc.ctx_list()
+ ctx1b.context_id = 1
+ ctx1b.num_transfer_syntaxes = len(tsf1b_list)
+ ctx1b.abstract_syntax = samba.dcerpc.mgmt.abstract_syntax()
+ ctx1b.transfer_syntaxes = tsf1b_list
+
+ c = self.get_anon_creds()
+ g = gensec.Security.start_client(self.settings)
+ g.set_credentials(c)
+ g.want_feature(gensec.FEATURE_DCE_STYLE)
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_SPNEGO
+ auth_level = dcerpc.DCERPC_AUTH_LEVEL_INTEGRITY
+ auth_context_id = 2
+ g.start_mech_by_authtype(auth_type, auth_level)
+ from_server = b""
+ (finished, to_server) = g.update(from_server)
+ self.assertFalse(finished)
+
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=auth_level,
+ auth_context_id=auth_context_id,
+ auth_blob=to_server)
+ req = self.generate_bind(call_id=0,
+ ctx_list=[ctx1],
+ auth_info=auth_info)
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_BIND_ACK, req.call_id)
+ self.assertEqual(rep.u.max_xmit_frag, req.u.max_xmit_frag)
+ self.assertEqual(rep.u.max_recv_frag, req.u.max_recv_frag)
+ self.assertNotEqual(rep.u.assoc_group_id, req.u.assoc_group_id)
+ self.assertEqual(rep.u.secondary_address_size, 4)
+ self.assertEqual(rep.u.secondary_address, "%d" % self.tcp_port)
+ self.assertPadding(rep.u._pad1, 2)
+ self.assertEqual(rep.u.num_results, 1)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_ACCEPTANCE)
+ self.assertEqual(rep.u.ctx_list[0].reason,
+ dcerpc.DCERPC_BIND_ACK_REASON_NOT_SPECIFIED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, ndr32)
+ self.assertNotEqual(len(rep.u.auth_info), 0)
+ a = self.parse_auth(rep.u.auth_info)
+
+ from_server = a.credentials
+ (finished, to_server) = g.update(from_server)
+ self.assertFalse(finished)
+
+ # We change ctx_list and auth_level
+ auth_info = self.generate_auth(auth_type=dcerpc.DCERPC_AUTH_TYPE_KRB5,
+ auth_level=auth_level,
+ auth_context_id=auth_context_id,
+ auth_blob=to_server)
+ req = self.generate_alter(call_id=0,
+ ctx_list=[ctx1b],
+ assoc_group_id=rep.u.assoc_group_id,
+ auth_info=auth_info)
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_FAULT, req.call_id,
+ pfc_flags=req.pfc_flags |
+ dcerpc.DCERPC_PFC_FLAG_DID_NOT_EXECUTE,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, 0)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertEqual(rep.u.flags, 0)
+ self.assertEqual(rep.u.status, dcerpc.DCERPC_NCA_S_PROTO_ERROR)
+ self.assertEqual(rep.u.reserved, 0)
+ self.assertEqual(len(rep.u.error_and_verifier), 0)
+
+ # wait for a disconnect
+ rep = self.recv_pdu()
+ self.assertIsNone(rep)
+ self.assertNotConnected()
+
+ def test_spnego_change_auth_type3(self):
+ ndr32 = base.transfer_syntax_ndr()
+ ndr64 = base.transfer_syntax_ndr64()
+
+ tsf1_list = [ndr32]
+ ctx1 = dcerpc.ctx_list()
+ ctx1.context_id = 1
+ ctx1.num_transfer_syntaxes = len(tsf1_list)
+ ctx1.abstract_syntax = samba.dcerpc.mgmt.abstract_syntax()
+ ctx1.transfer_syntaxes = tsf1_list
+
+ tsf1b_list = [ndr32, ndr64]
+ ctx1b = dcerpc.ctx_list()
+ ctx1b.context_id = 1
+ ctx1b.num_transfer_syntaxes = len(tsf1b_list)
+ ctx1b.abstract_syntax = samba.dcerpc.mgmt.abstract_syntax()
+ ctx1b.transfer_syntaxes = tsf1b_list
+
+ c = self.get_anon_creds()
+ g = gensec.Security.start_client(self.settings)
+ g.set_credentials(c)
+ g.want_feature(gensec.FEATURE_DCE_STYLE)
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_SPNEGO
+ auth_level = dcerpc.DCERPC_AUTH_LEVEL_INTEGRITY
+ auth_context_id = 2
+ g.start_mech_by_authtype(auth_type, auth_level)
+ from_server = b""
+ (finished, to_server) = g.update(from_server)
+ self.assertFalse(finished)
+
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=auth_level,
+ auth_context_id=auth_context_id,
+ auth_blob=to_server)
+ req = self.generate_bind(call_id=0,
+ ctx_list=[ctx1],
+ auth_info=auth_info)
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_BIND_ACK, req.call_id)
+ self.assertEqual(rep.u.max_xmit_frag, req.u.max_xmit_frag)
+ self.assertEqual(rep.u.max_recv_frag, req.u.max_recv_frag)
+ self.assertNotEqual(rep.u.assoc_group_id, req.u.assoc_group_id)
+ self.assertEqual(rep.u.secondary_address_size, 4)
+ self.assertEqual(rep.u.secondary_address, "%d" % self.tcp_port)
+ self.assertPadding(rep.u._pad1, 2)
+ self.assertEqual(rep.u.num_results, 1)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_ACCEPTANCE)
+ self.assertEqual(rep.u.ctx_list[0].reason,
+ dcerpc.DCERPC_BIND_ACK_REASON_NOT_SPECIFIED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, ndr32)
+ self.assertNotEqual(len(rep.u.auth_info), 0)
+ a = self.parse_auth(rep.u.auth_info)
+
+ from_server = a.credentials
+ (finished, to_server) = g.update(from_server)
+ self.assertFalse(finished)
+
+ # We change ctx_list and auth_level
+ auth_info = self.generate_auth(auth_type=dcerpc.DCERPC_AUTH_TYPE_NONE,
+ auth_level=auth_level,
+ auth_context_id=auth_context_id,
+ auth_blob=to_server)
+ req = self.generate_alter(call_id=0,
+ ctx_list=[ctx1b],
+ assoc_group_id=rep.u.assoc_group_id,
+ auth_info=auth_info)
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_FAULT, req.call_id,
+ pfc_flags=req.pfc_flags |
+ dcerpc.DCERPC_PFC_FLAG_DID_NOT_EXECUTE,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, 0)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertEqual(rep.u.flags, 0)
+ self.assertEqual(rep.u.status, dcerpc.DCERPC_FAULT_ACCESS_DENIED)
+ self.assertEqual(rep.u.reserved, 0)
+ self.assertEqual(len(rep.u.error_and_verifier), 0)
+
+ # wait for a disconnect
+ rep = self.recv_pdu()
+ self.assertIsNone(rep)
+ self.assertNotConnected()
+
+ def test_spnego_auth_pad_ok(self):
+ ndr32 = base.transfer_syntax_ndr()
+
+ tsf1_list = [ndr32]
+ ctx1 = dcerpc.ctx_list()
+ ctx1.context_id = 1
+ ctx1.num_transfer_syntaxes = len(tsf1_list)
+ ctx1.abstract_syntax = samba.dcerpc.mgmt.abstract_syntax()
+ ctx1.transfer_syntaxes = tsf1_list
+ ctx_list = [ctx1]
+
+ c = self.get_anon_creds()
+ g = gensec.Security.start_client(self.settings)
+ g.set_credentials(c)
+ g.want_feature(gensec.FEATURE_DCE_STYLE)
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_SPNEGO
+ auth_level = dcerpc.DCERPC_AUTH_LEVEL_CONNECT
+ auth_context_id = 2
+ g.start_mech_by_authtype(auth_type, auth_level)
+ from_server = b""
+ (finished, to_server) = g.update(from_server)
+ self.assertFalse(finished)
+
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=auth_level,
+ auth_context_id=auth_context_id,
+ auth_blob=to_server)
+
+ req = self.generate_bind(call_id=0,
+ ctx_list=ctx_list,
+ auth_info=auth_info)
+ req_pdu = samba.ndr.ndr_pack(req)
+
+ auth_pad_ok = len(req_pdu)
+ auth_pad_ok -= dcerpc.DCERPC_REQUEST_LENGTH
+ auth_pad_ok -= dcerpc.DCERPC_AUTH_TRAILER_LENGTH
+ auth_pad_ok -= len(to_server)
+
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=auth_level,
+ auth_context_id=auth_context_id,
+ auth_pad_length=auth_pad_ok,
+ auth_blob=to_server)
+
+ req = self.generate_bind(call_id=0,
+ ctx_list=ctx_list,
+ auth_info=auth_info)
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_BIND_ACK, req.call_id)
+ self.assertEqual(rep.u.max_xmit_frag, req.u.max_xmit_frag)
+ self.assertEqual(rep.u.max_recv_frag, req.u.max_recv_frag)
+ self.assertNotEqual(rep.u.assoc_group_id, req.u.assoc_group_id)
+ self.assertEqual(rep.u.secondary_address_size, 4)
+ self.assertEqual(rep.u.secondary_address, "%d" % self.tcp_port)
+ self.assertPadding(rep.u._pad1, 2)
+ self.assertEqual(rep.u.num_results, 1)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_ACCEPTANCE)
+ self.assertEqual(rep.u.ctx_list[0].reason,
+ dcerpc.DCERPC_BIND_ACK_REASON_NOT_SPECIFIED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, ndr32)
+ self.assertNotEqual(len(rep.u.auth_info), 0)
+ a = self.parse_auth(rep.u.auth_info)
+
+ from_server = a.credentials
+ (finished, to_server) = g.update(from_server)
+ self.assertFalse(finished)
+
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=auth_level,
+ auth_context_id=auth_context_id,
+ auth_blob=to_server)
+ req = self.generate_alter(call_id=0,
+ ctx_list=ctx_list,
+ assoc_group_id=rep.u.assoc_group_id,
+ auth_info=auth_info)
+ req_pdu = samba.ndr.ndr_pack(req)
+
+ auth_pad_ok = len(req_pdu)
+ auth_pad_ok -= dcerpc.DCERPC_REQUEST_LENGTH
+ auth_pad_ok -= dcerpc.DCERPC_AUTH_TRAILER_LENGTH
+ auth_pad_ok -= len(to_server)
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=auth_level,
+ auth_context_id=auth_context_id,
+ auth_pad_length=auth_pad_ok,
+ auth_blob=to_server)
+ req = self.generate_alter(call_id=0,
+ ctx_list=ctx_list,
+ assoc_group_id=rep.u.assoc_group_id,
+ auth_info=auth_info)
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_ALTER_RESP, req.call_id)
+ self.assertEqual(rep.u.max_xmit_frag, req.u.max_xmit_frag)
+ self.assertEqual(rep.u.max_recv_frag, req.u.max_recv_frag)
+ self.assertEqual(rep.u.assoc_group_id, req.u.assoc_group_id)
+ self.assertEqual(rep.u.secondary_address_size, 0)
+ self.assertPadding(rep.u._pad1, 2)
+ self.assertEqual(rep.u.num_results, 1)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_ACCEPTANCE)
+ self.assertEqual(rep.u.ctx_list[0].reason,
+ dcerpc.DCERPC_BIND_ACK_REASON_NOT_SPECIFIED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, ndr32)
+ self.assertNotEqual(len(rep.u.auth_info), 0)
+ a = self.parse_auth(rep.u.auth_info)
+
+ from_server = a.credentials
+ (finished, to_server) = g.update(from_server)
+ self.assertTrue(finished)
+
+ # And now try a request without auth_info
+ req = self.generate_request(call_id=2,
+ context_id=ctx1.context_id,
+ opnum=0,
+ stub=b"")
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_RESPONSE, req.call_id,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, req.u.context_id)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertGreaterEqual(len(rep.u.stub_and_verifier), rep.u.alloc_hint)
+
+ # Now a request with auth_info DCERPC_AUTH_LEVEL_CONNECT
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=auth_level,
+ auth_context_id=auth_context_id,
+ auth_blob=b"\x01" + b"\x00" * 15)
+ req = self.generate_request(call_id=3,
+ context_id=ctx1.context_id,
+ opnum=0,
+ stub=b"",
+ auth_info=auth_info)
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ # We don't get an auth_info back
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_RESPONSE, req.call_id,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, req.u.context_id)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertGreaterEqual(len(rep.u.stub_and_verifier), rep.u.alloc_hint)
+
+ self._disconnect("disconnect")
+ self.assertNotConnected()
+
+ def test_spnego_auth_pad_fail_bind(self):
+ ndr32 = base.transfer_syntax_ndr()
+
+ tsf1_list = [ndr32]
+ ctx1 = dcerpc.ctx_list()
+ ctx1.context_id = 1
+ ctx1.num_transfer_syntaxes = len(tsf1_list)
+ ctx1.abstract_syntax = samba.dcerpc.mgmt.abstract_syntax()
+ ctx1.transfer_syntaxes = tsf1_list
+ ctx_list = [ctx1]
+
+ c = self.get_anon_creds()
+ g = gensec.Security.start_client(self.settings)
+ g.set_credentials(c)
+ g.want_feature(gensec.FEATURE_DCE_STYLE)
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_SPNEGO
+ auth_level = dcerpc.DCERPC_AUTH_LEVEL_CONNECT
+ auth_context_id = 2
+ g.start_mech_by_authtype(auth_type, auth_level)
+ from_server = b""
+ (finished, to_server) = g.update(from_server)
+ self.assertFalse(finished)
+
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=auth_level,
+ auth_context_id=auth_context_id,
+ auth_blob=to_server)
+
+ req = self.generate_bind(call_id=0,
+ ctx_list=ctx_list,
+ auth_info=auth_info)
+ req_pdu = samba.ndr.ndr_pack(req)
+
+ auth_pad_ok = len(req_pdu)
+ auth_pad_ok -= dcerpc.DCERPC_REQUEST_LENGTH
+ auth_pad_ok -= dcerpc.DCERPC_AUTH_TRAILER_LENGTH
+ auth_pad_ok -= len(to_server)
+ auth_pad_bad = auth_pad_ok + 1
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=auth_level,
+ auth_context_id=auth_context_id,
+ auth_pad_length=auth_pad_bad,
+ auth_blob=to_server)
+
+ req = self.generate_bind(call_id=0,
+ ctx_list=ctx_list,
+ auth_info=auth_info)
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_BIND_NAK, req.call_id,
+ auth_length=0)
+ self.assertEqual(rep.u.reject_reason,
+ dcerpc.DCERPC_BIND_NAK_REASON_PROTOCOL_VERSION_NOT_SUPPORTED)
+ self.assertEqual(rep.u.num_versions, 1)
+ self.assertEqual(rep.u.versions[0].rpc_vers, req.rpc_vers)
+ self.assertEqual(rep.u.versions[0].rpc_vers_minor, req.rpc_vers_minor)
+ self.assertEqual(len(rep.u._pad), 3)
+ self.assertEqual(rep.u._pad, b'\0' * 3)
+
+ # wait for a disconnect
+ rep = self.recv_pdu()
+ self.assertIsNone(rep)
+ self.assertNotConnected()
+
+ def test_spnego_auth_pad_fail_alter(self):
+ ndr32 = base.transfer_syntax_ndr()
+
+ tsf1_list = [ndr32]
+ ctx1 = dcerpc.ctx_list()
+ ctx1.context_id = 1
+ ctx1.num_transfer_syntaxes = len(tsf1_list)
+ ctx1.abstract_syntax = samba.dcerpc.mgmt.abstract_syntax()
+ ctx1.transfer_syntaxes = tsf1_list
+ ctx_list = [ctx1]
+
+ c = self.get_anon_creds()
+ g = gensec.Security.start_client(self.settings)
+ g.set_credentials(c)
+ g.want_feature(gensec.FEATURE_DCE_STYLE)
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_SPNEGO
+ auth_level = dcerpc.DCERPC_AUTH_LEVEL_CONNECT
+ auth_context_id = 2
+ g.start_mech_by_authtype(auth_type, auth_level)
+ from_server = b""
+ (finished, to_server) = g.update(from_server)
+ self.assertFalse(finished)
+
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=auth_level,
+ auth_context_id=auth_context_id,
+ auth_blob=to_server)
+
+ req = self.generate_bind(call_id=0,
+ ctx_list=ctx_list,
+ auth_info=auth_info)
+ req_pdu = samba.ndr.ndr_pack(req)
+
+ auth_pad_ok = len(req_pdu)
+ auth_pad_ok -= dcerpc.DCERPC_REQUEST_LENGTH
+ auth_pad_ok -= dcerpc.DCERPC_AUTH_TRAILER_LENGTH
+ auth_pad_ok -= len(to_server)
+
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=auth_level,
+ auth_context_id=auth_context_id,
+ auth_pad_length=auth_pad_ok,
+ auth_blob=to_server)
+
+ req = self.generate_bind(call_id=0,
+ ctx_list=ctx_list,
+ auth_info=auth_info)
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_BIND_ACK, req.call_id)
+ self.assertEqual(rep.u.max_xmit_frag, req.u.max_xmit_frag)
+ self.assertEqual(rep.u.max_recv_frag, req.u.max_recv_frag)
+ self.assertNotEqual(rep.u.assoc_group_id, req.u.assoc_group_id)
+ self.assertEqual(rep.u.secondary_address_size, 4)
+ self.assertEqual(rep.u.secondary_address, "%d" % self.tcp_port)
+ self.assertPadding(rep.u._pad1, 2)
+ self.assertEqual(rep.u.num_results, 1)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_ACCEPTANCE)
+ self.assertEqual(rep.u.ctx_list[0].reason,
+ dcerpc.DCERPC_BIND_ACK_REASON_NOT_SPECIFIED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, ndr32)
+ self.assertNotEqual(len(rep.u.auth_info), 0)
+ a = self.parse_auth(rep.u.auth_info)
+
+ from_server = a.credentials
+ (finished, to_server) = g.update(from_server)
+ self.assertFalse(finished)
+
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=auth_level,
+ auth_context_id=auth_context_id,
+ auth_blob=to_server)
+ req = self.generate_alter(call_id=0,
+ ctx_list=ctx_list,
+ assoc_group_id=rep.u.assoc_group_id,
+ auth_info=auth_info)
+ req_pdu = samba.ndr.ndr_pack(req)
+
+ auth_pad_ok = len(req_pdu)
+ auth_pad_ok -= dcerpc.DCERPC_REQUEST_LENGTH
+ auth_pad_ok -= dcerpc.DCERPC_AUTH_TRAILER_LENGTH
+ auth_pad_ok -= len(to_server)
+ auth_pad_bad = auth_pad_ok + 1
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=auth_level,
+ auth_context_id=auth_context_id,
+ auth_pad_length=auth_pad_bad,
+ auth_blob=to_server)
+ req = self.generate_alter(call_id=0,
+ ctx_list=ctx_list,
+ assoc_group_id=rep.u.assoc_group_id,
+ auth_info=auth_info)
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_FAULT, req.call_id,
+ pfc_flags=req.pfc_flags |
+ dcerpc.DCERPC_PFC_FLAG_DID_NOT_EXECUTE,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, 0)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertEqual(rep.u.flags, 0)
+ self.assertEqual(rep.u.status, dcerpc.DCERPC_NCA_S_PROTO_ERROR)
+ self.assertEqual(rep.u.reserved, 0)
+ self.assertEqual(len(rep.u.error_and_verifier), 0)
+
+ # wait for a disconnect
+ rep = self.recv_pdu()
+ self.assertIsNone(rep)
+ self.assertNotConnected()
+
+ def test_ntlmssp_auth_pad_ok(self):
+ ndr32 = base.transfer_syntax_ndr()
+
+ tsf1_list = [ndr32]
+ ctx1 = dcerpc.ctx_list()
+ ctx1.context_id = 1
+ ctx1.num_transfer_syntaxes = len(tsf1_list)
+ ctx1.abstract_syntax = samba.dcerpc.mgmt.abstract_syntax()
+ ctx1.transfer_syntaxes = tsf1_list
+ ctx_list = [ctx1]
+
+ c = self.get_anon_creds()
+ g = gensec.Security.start_client(self.settings)
+ g.set_credentials(c)
+ g.want_feature(gensec.FEATURE_DCE_STYLE)
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_NTLMSSP
+ auth_level = dcerpc.DCERPC_AUTH_LEVEL_CONNECT
+ auth_context_id = 2
+ g.start_mech_by_authtype(auth_type, auth_level)
+ from_server = b""
+ (finished, to_server) = g.update(from_server)
+ self.assertFalse(finished)
+
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=auth_level,
+ auth_context_id=auth_context_id,
+ auth_blob=to_server)
+
+ req = self.generate_bind(call_id=0,
+ ctx_list=ctx_list,
+ auth_info=auth_info)
+ req_pdu = samba.ndr.ndr_pack(req)
+
+ auth_pad_ok = len(req_pdu)
+ auth_pad_ok -= dcerpc.DCERPC_REQUEST_LENGTH
+ auth_pad_ok -= dcerpc.DCERPC_AUTH_TRAILER_LENGTH
+ auth_pad_ok -= len(to_server)
+
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=auth_level,
+ auth_context_id=auth_context_id,
+ auth_pad_length=auth_pad_ok,
+ auth_blob=to_server)
+
+ req = self.generate_bind(call_id=0,
+ ctx_list=ctx_list,
+ auth_info=auth_info)
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_BIND_ACK, req.call_id)
+ self.assertEqual(rep.u.max_xmit_frag, req.u.max_xmit_frag)
+ self.assertEqual(rep.u.max_recv_frag, req.u.max_recv_frag)
+ self.assertNotEqual(rep.u.assoc_group_id, req.u.assoc_group_id)
+ self.assertEqual(rep.u.secondary_address_size, 4)
+ self.assertEqual(rep.u.secondary_address, "%d" % self.tcp_port)
+ self.assertPadding(rep.u._pad1, 2)
+ self.assertEqual(rep.u.num_results, 1)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_ACCEPTANCE)
+ self.assertEqual(rep.u.ctx_list[0].reason,
+ dcerpc.DCERPC_BIND_ACK_REASON_NOT_SPECIFIED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, ndr32)
+ self.assertNotEqual(len(rep.u.auth_info), 0)
+ a = self.parse_auth(rep.u.auth_info)
+
+ from_server = a.credentials
+ (finished, to_server) = g.update(from_server)
+ self.assertTrue(finished)
+
+ auth_pad_ok = 0
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=auth_level,
+ auth_context_id=auth_context_id,
+ auth_pad_length=auth_pad_ok,
+ auth_blob=to_server)
+ req = self.generate_auth3(call_id=0,
+ auth_info=auth_info)
+ self.send_pdu(req)
+ rep = self.recv_pdu(timeout=0.01)
+ self.assertIsNone(rep)
+ self.assertIsConnected()
+
+ # And now try a request without auth_info
+ req = self.generate_request(call_id=2,
+ context_id=ctx1.context_id,
+ opnum=0,
+ stub=b"")
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_RESPONSE, req.call_id,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, req.u.context_id)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertGreaterEqual(len(rep.u.stub_and_verifier), rep.u.alloc_hint)
+
+ # Now a request with auth_info DCERPC_AUTH_LEVEL_CONNECT
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=auth_level,
+ auth_context_id=auth_context_id,
+ auth_blob=b"\x01" + b"\x00" * 15)
+ req = self.generate_request(call_id=3,
+ context_id=ctx1.context_id,
+ opnum=0,
+ stub=b"",
+ auth_info=auth_info)
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ # We don't get an auth_info back
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_RESPONSE, req.call_id,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, req.u.context_id)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertGreaterEqual(len(rep.u.stub_and_verifier), rep.u.alloc_hint)
+
+ self._disconnect("disconnect")
+ self.assertNotConnected()
+
+ def test_ntlmssp_auth_pad_fail_auth3(self):
+ ndr32 = base.transfer_syntax_ndr()
+
+ tsf1_list = [ndr32]
+ ctx1 = dcerpc.ctx_list()
+ ctx1.context_id = 1
+ ctx1.num_transfer_syntaxes = len(tsf1_list)
+ ctx1.abstract_syntax = samba.dcerpc.mgmt.abstract_syntax()
+ ctx1.transfer_syntaxes = tsf1_list
+ ctx_list = [ctx1]
+
+ c = self.get_anon_creds()
+ g = gensec.Security.start_client(self.settings)
+ g.set_credentials(c)
+ g.want_feature(gensec.FEATURE_DCE_STYLE)
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_NTLMSSP
+ auth_level = dcerpc.DCERPC_AUTH_LEVEL_CONNECT
+ auth_context_id = 2
+ g.start_mech_by_authtype(auth_type, auth_level)
+ from_server = b""
+ (finished, to_server) = g.update(from_server)
+ self.assertFalse(finished)
+
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=auth_level,
+ auth_context_id=auth_context_id,
+ auth_blob=to_server)
+
+ req = self.generate_bind(call_id=0,
+ ctx_list=ctx_list,
+ auth_info=auth_info)
+ req_pdu = samba.ndr.ndr_pack(req)
+
+ auth_pad_ok = len(req_pdu)
+ auth_pad_ok -= dcerpc.DCERPC_REQUEST_LENGTH
+ auth_pad_ok -= dcerpc.DCERPC_AUTH_TRAILER_LENGTH
+ auth_pad_ok -= len(to_server)
+
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=auth_level,
+ auth_context_id=auth_context_id,
+ auth_pad_length=auth_pad_ok,
+ auth_blob=to_server)
+
+ req = self.generate_bind(call_id=0,
+ ctx_list=ctx_list,
+ auth_info=auth_info)
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_BIND_ACK, req.call_id)
+ self.assertEqual(rep.u.max_xmit_frag, req.u.max_xmit_frag)
+ self.assertEqual(rep.u.max_recv_frag, req.u.max_recv_frag)
+ self.assertNotEqual(rep.u.assoc_group_id, req.u.assoc_group_id)
+ self.assertEqual(rep.u.secondary_address_size, 4)
+ self.assertEqual(rep.u.secondary_address, "%d" % self.tcp_port)
+ self.assertPadding(rep.u._pad1, 2)
+ self.assertEqual(rep.u.num_results, 1)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ dcerpc.DCERPC_BIND_ACK_RESULT_ACCEPTANCE)
+ self.assertEqual(rep.u.ctx_list[0].reason,
+ dcerpc.DCERPC_BIND_ACK_REASON_NOT_SPECIFIED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, ndr32)
+ self.assertNotEqual(len(rep.u.auth_info), 0)
+ a = self.parse_auth(rep.u.auth_info)
+
+ from_server = a.credentials
+ (finished, to_server) = g.update(from_server)
+ self.assertTrue(finished)
+
+ auth_pad_bad = 1
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=auth_level,
+ auth_context_id=auth_context_id,
+ auth_pad_length=auth_pad_bad,
+ auth_blob=to_server)
+ req = self.generate_auth3(call_id=0,
+ auth_info=auth_info)
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_FAULT, req.call_id,
+ pfc_flags=req.pfc_flags |
+ dcerpc.DCERPC_PFC_FLAG_DID_NOT_EXECUTE,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, 0)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertEqual(rep.u.flags, 0)
+ self.assertEqual(rep.u.status, dcerpc.DCERPC_NCA_S_FAULT_REMOTE_NO_MEMORY)
+ self.assertEqual(rep.u.reserved, 0)
+ self.assertEqual(len(rep.u.error_and_verifier), 0)
+
+ # wait for a disconnect
+ rep = self.recv_pdu()
+ self.assertIsNone(rep)
+ self.assertNotConnected()
+
+ def _test_auth_bind_auth_level(self, auth_type, auth_level, auth_context_id, ctx,
+ g_auth_level=dcerpc.DCERPC_AUTH_LEVEL_INTEGRITY,
+ hdr_signing=False,
+ alter_fault=None):
+ creds = self.get_user_creds()
+ auth_context = self.get_auth_context_creds(creds=creds,
+ auth_type=auth_type,
+ auth_level=auth_level,
+ auth_context_id=auth_context_id,
+ g_auth_level=g_auth_level,
+ hdr_signing=hdr_signing)
+ if auth_context is None:
+ return None
+ ack = self.do_generic_bind(ctx=ctx,
+ auth_context=auth_context,
+ alter_fault=alter_fault)
+ if ack is None:
+ return None
+ return auth_context
+
+ def _test_spnego_level_bind_nak(self, auth_level,
+ reason=dcerpc.DCERPC_BIND_NAK_REASON_INVALID_CHECKSUM):
+ c = self.get_user_creds()
+ return self._test_auth_type_level_bind_nak(auth_type=dcerpc.DCERPC_AUTH_TYPE_SPNEGO,
+ auth_level=auth_level, creds=c, reason=reason)
+
+ def _test_spnego_level_bind(self, auth_level,
+ g_auth_level=dcerpc.DCERPC_AUTH_LEVEL_INTEGRITY,
+ alter_fault=None,
+ request_fault=None,
+ response_fault_flags=0):
+ ndr32 = base.transfer_syntax_ndr()
+
+ tsf1_list = [ndr32]
+ ctx1 = dcerpc.ctx_list()
+ ctx1.context_id = 0x1001
+ ctx1.num_transfer_syntaxes = len(tsf1_list)
+ ctx1.abstract_syntax = samba.dcerpc.mgmt.abstract_syntax()
+ ctx1.transfer_syntaxes = tsf1_list
+
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_SPNEGO
+ auth_context_id = 2
+
+ auth_context = self._test_auth_bind_auth_level(auth_type=auth_type,
+ auth_level=auth_level,
+ auth_context_id=auth_context_id,
+ ctx=ctx1,
+ g_auth_level=g_auth_level,
+ alter_fault=alter_fault)
+ if request_fault is None:
+ return
+
+ self.assertIsNotNone(auth_context)
+ g = auth_context["gensec"]
+ self.assertIsNotNone(g)
+
+ stub_bin = b'\x00' * 17
+ mod_len = len(stub_bin) % dcerpc.DCERPC_AUTH_PAD_ALIGNMENT
+ auth_pad_length = 0
+ if mod_len > 0:
+ auth_pad_length = dcerpc.DCERPC_AUTH_PAD_ALIGNMENT - mod_len
+ stub_bin += b'\x00' * auth_pad_length
+
+ if g_auth_level >= dcerpc.DCERPC_AUTH_LEVEL_INTEGRITY:
+ sig_size = g.sig_size(len(stub_bin))
+ else:
+ sig_size = 16
+ zero_sig = b"\x00" * sig_size
+
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=auth_level,
+ auth_pad_length=auth_pad_length,
+ auth_context_id=auth_context_id,
+ auth_blob=zero_sig)
+ req = self.generate_request(call_id=4,
+ context_id=ctx1.context_id,
+ opnum=0xffff,
+ stub=stub_bin,
+ auth_info=auth_info)
+ if g_auth_level >= dcerpc.DCERPC_AUTH_LEVEL_INTEGRITY:
+ req_blob = samba.ndr.ndr_pack(req)
+ ofs_stub = dcerpc.DCERPC_REQUEST_LENGTH
+ ofs_sig = len(req_blob) - req.auth_length
+ ofs_trailer = ofs_sig - dcerpc.DCERPC_AUTH_TRAILER_LENGTH
+ req_data = req_blob[ofs_stub:ofs_trailer]
+ req_whole = req_blob[0:ofs_sig]
+ sig = g.sign_packet(req_data, req_whole)
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=auth_level,
+ auth_pad_length=auth_pad_length,
+ auth_context_id=auth_context_id,
+ auth_blob=sig)
+ req = self.generate_request(call_id=4,
+ context_id=ctx1.context_id,
+ opnum=0xffff,
+ stub=stub_bin,
+ auth_info=auth_info)
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_FAULT, req.call_id,
+ pfc_flags=req.pfc_flags | response_fault_flags,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, ctx1.context_id)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertEqual(rep.u.flags, 0)
+ self.assertEqual(rep.u.status, request_fault)
+ self.assertEqual(rep.u.reserved, 0)
+ self.assertEqual(len(rep.u.error_and_verifier), 0)
+
+ if response_fault_flags & dcerpc.DCERPC_PFC_FLAG_DID_NOT_EXECUTE:
+ return
+
+ # wait for a disconnect
+ rep = self.recv_pdu()
+ self.assertIsNone(rep)
+ self.assertNotConnected()
+
+ def test_spnego_none_bind(self):
+ return self._test_spnego_level_bind_nak(dcerpc.DCERPC_AUTH_LEVEL_NONE,
+ reason=dcerpc.DCERPC_BIND_NAK_REASON_NOT_SPECIFIED)
+
+ def test_spnego_call_bind(self):
+ return self._test_spnego_level_bind_nak(dcerpc.DCERPC_AUTH_LEVEL_CALL,
+ reason=dcerpc.DCERPC_BIND_NAK_REASON_INVALID_CHECKSUM)
+
+ def test_spnego_0_bind(self):
+ return self._test_spnego_level_bind_nak(0,
+ reason=dcerpc.DCERPC_BIND_NAK_REASON_NOT_SPECIFIED)
+
+ def test_spnego_7_bind(self):
+ return self._test_spnego_level_bind_nak(7,
+ reason=dcerpc.DCERPC_BIND_NAK_REASON_NOT_SPECIFIED)
+
+ def test_spnego_255_bind(self):
+ return self._test_spnego_level_bind_nak(255,
+ reason=dcerpc.DCERPC_BIND_NAK_REASON_NOT_SPECIFIED)
+
+ def test_spnego_connect_bind_none(self):
+ return self._test_spnego_level_bind(auth_level=dcerpc.DCERPC_AUTH_LEVEL_CONNECT,
+ g_auth_level=dcerpc.DCERPC_AUTH_LEVEL_CONNECT)
+
+ def test_spnego_connect_bind_sign(self):
+ return self._test_spnego_level_bind(auth_level=dcerpc.DCERPC_AUTH_LEVEL_CONNECT,
+ g_auth_level=dcerpc.DCERPC_AUTH_LEVEL_INTEGRITY)
+
+ def test_spnego_connect_bind_seal(self):
+ return self._test_spnego_level_bind(auth_level=dcerpc.DCERPC_AUTH_LEVEL_CONNECT,
+ g_auth_level=dcerpc.DCERPC_AUTH_LEVEL_PRIVACY)
+
+ def test_spnego_packet_bind_none(self):
+ # DCERPC_AUTH_LEVEL_PACKET is handled as alias of
+ # DCERPC_AUTH_LEVEL_INTEGRITY
+ return self._test_spnego_level_bind(auth_level=dcerpc.DCERPC_AUTH_LEVEL_PACKET,
+ g_auth_level=dcerpc.DCERPC_AUTH_LEVEL_CONNECT,
+ request_fault=dcerpc.DCERPC_FAULT_SEC_PKG_ERROR)
+
+ def test_spnego_packet_bind_sign(self):
+ # DCERPC_AUTH_LEVEL_PACKET is handled as alias of
+ # DCERPC_AUTH_LEVEL_INTEGRITY
+ return self._test_spnego_level_bind(auth_level=dcerpc.DCERPC_AUTH_LEVEL_PACKET,
+ g_auth_level=dcerpc.DCERPC_AUTH_LEVEL_INTEGRITY,
+ request_fault=dcerpc.DCERPC_NCA_S_OP_RNG_ERROR,
+ response_fault_flags=dcerpc.DCERPC_PFC_FLAG_DID_NOT_EXECUTE)
+
+ def test_spnego_packet_bind_seal(self):
+ # DCERPC_AUTH_LEVEL_PACKET is handled as alias of
+ # DCERPC_AUTH_LEVEL_INTEGRITY
+ return self._test_spnego_level_bind(auth_level=dcerpc.DCERPC_AUTH_LEVEL_PACKET,
+ g_auth_level=dcerpc.DCERPC_AUTH_LEVEL_PRIVACY,
+ request_fault=dcerpc.DCERPC_NCA_S_OP_RNG_ERROR,
+ response_fault_flags=dcerpc.DCERPC_PFC_FLAG_DID_NOT_EXECUTE)
+
+ def test_spnego_integrity_bind_none(self):
+ return self._test_spnego_level_bind(auth_level=dcerpc.DCERPC_AUTH_LEVEL_INTEGRITY,
+ g_auth_level=dcerpc.DCERPC_AUTH_LEVEL_CONNECT,
+ request_fault=dcerpc.DCERPC_FAULT_SEC_PKG_ERROR)
+
+ def test_spnego_integrity_bind_sign(self):
+ return self._test_spnego_level_bind(auth_level=dcerpc.DCERPC_AUTH_LEVEL_INTEGRITY,
+ g_auth_level=dcerpc.DCERPC_AUTH_LEVEL_INTEGRITY,
+ request_fault=dcerpc.DCERPC_NCA_S_OP_RNG_ERROR,
+ response_fault_flags=dcerpc.DCERPC_PFC_FLAG_DID_NOT_EXECUTE)
+
+ def test_spnego_integrity_bind_seal(self):
+ return self._test_spnego_level_bind(auth_level=dcerpc.DCERPC_AUTH_LEVEL_INTEGRITY,
+ g_auth_level=dcerpc.DCERPC_AUTH_LEVEL_PRIVACY,
+ request_fault=dcerpc.DCERPC_NCA_S_OP_RNG_ERROR,
+ response_fault_flags=dcerpc.DCERPC_PFC_FLAG_DID_NOT_EXECUTE)
+
+ def test_spnego_privacy_bind_none(self):
+ # This fails...
+ return self._test_spnego_level_bind(auth_level=dcerpc.DCERPC_AUTH_LEVEL_PRIVACY,
+ g_auth_level=dcerpc.DCERPC_AUTH_LEVEL_CONNECT,
+ alter_fault=dcerpc.DCERPC_FAULT_SEC_PKG_ERROR)
+
+ def test_spnego_privacy_bind_sign(self):
+ # This fails...
+ return self._test_spnego_level_bind(auth_level=dcerpc.DCERPC_AUTH_LEVEL_PRIVACY,
+ g_auth_level=dcerpc.DCERPC_AUTH_LEVEL_INTEGRITY,
+ alter_fault=dcerpc.DCERPC_FAULT_SEC_PKG_ERROR)
+
+ def test_spnego_privacy_bind_seal(self):
+ return self._test_spnego_level_bind(auth_level=dcerpc.DCERPC_AUTH_LEVEL_PRIVACY,
+ g_auth_level=dcerpc.DCERPC_AUTH_LEVEL_PRIVACY)
+
+ def _test_auth_signing_auth_level_request(self, auth_type, auth_level, hdr_sign=False):
+ ndr32 = base.transfer_syntax_ndr()
+
+ tsf1_list = [ndr32]
+ ctx1 = dcerpc.ctx_list()
+ ctx1.context_id = 0x1001
+ ctx1.num_transfer_syntaxes = len(tsf1_list)
+ ctx1.abstract_syntax = samba.dcerpc.mgmt.abstract_syntax()
+ ctx1.transfer_syntaxes = tsf1_list
+ ctx_list = [ctx1]
+
+ auth_context_id = 2
+
+ auth_context = self._test_auth_bind_auth_level(auth_type=auth_type,
+ auth_level=auth_level,
+ auth_context_id=auth_context_id,
+ hdr_signing=hdr_sign,
+ ctx=ctx1)
+ self.assertIsNotNone(auth_context)
+ g = auth_context["gensec"]
+ self.assertIsNotNone(g)
+
+ stub_bin = b'\x00' * 0
+ mod_len = len(stub_bin) % dcerpc.DCERPC_AUTH_PAD_ALIGNMENT
+ auth_pad_length = 0
+ if mod_len > 0:
+ auth_pad_length = dcerpc.DCERPC_AUTH_PAD_ALIGNMENT - mod_len
+ stub_bin += b'\x00' * auth_pad_length
+
+ sig_size = g.sig_size(len(stub_bin))
+ zero_sig = b"\x00" * sig_size
+
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=auth_level,
+ auth_pad_length=auth_pad_length,
+ auth_context_id=auth_context_id,
+ auth_blob=zero_sig)
+ req = self.generate_request(call_id=3,
+ context_id=ctx1.context_id,
+ opnum=0,
+ stub=stub_bin,
+ auth_info=auth_info)
+ req_blob = samba.ndr.ndr_pack(req)
+ ofs_stub = dcerpc.DCERPC_REQUEST_LENGTH
+ ofs_sig = len(req_blob) - req.auth_length
+ ofs_trailer = ofs_sig - dcerpc.DCERPC_AUTH_TRAILER_LENGTH
+ req_data = req_blob[ofs_stub:ofs_trailer]
+ req_whole = req_blob[0:ofs_sig]
+ sig = g.sign_packet(req_data, req_whole)
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=auth_level,
+ auth_pad_length=auth_pad_length,
+ auth_context_id=auth_context_id,
+ auth_blob=sig)
+ req = self.generate_request(call_id=3,
+ context_id=ctx1.context_id,
+ opnum=0,
+ stub=stub_bin,
+ auth_info=auth_info)
+ self.send_pdu(req)
+ (rep, rep_blob) = self.recv_pdu_raw()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_RESPONSE, req.call_id,
+ auth_length=sig_size)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, req.u.context_id & 0xff)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertGreaterEqual(len(rep.u.stub_and_verifier), rep.u.alloc_hint)
+ self.assertEqual(rep.auth_length, sig_size)
+
+ ofs_stub = dcerpc.DCERPC_REQUEST_LENGTH
+ ofs_sig = rep.frag_length - rep.auth_length
+ ofs_trailer = ofs_sig - dcerpc.DCERPC_AUTH_TRAILER_LENGTH
+ rep_data = rep_blob[ofs_stub:ofs_trailer]
+ rep_whole = rep_blob[0:ofs_sig]
+ rep_sig = rep_blob[ofs_sig:]
+ rep_auth_info_blob = rep_blob[ofs_trailer:]
+
+ rep_auth_info = self.parse_auth(rep_auth_info_blob)
+ self.assertEqual(rep_auth_info.auth_type, auth_type)
+ self.assertEqual(rep_auth_info.auth_level, auth_level)
+ # mgmt_inq_if_ids() returns no fixed size results
+ #self.assertEqual(rep_auth_info.auth_pad_length, 0)
+ self.assertEqual(rep_auth_info.auth_reserved, 0)
+ self.assertEqual(rep_auth_info.auth_context_id, auth_context_id)
+ self.assertEqual(rep_auth_info.credentials, rep_sig)
+
+ g.check_packet(rep_data, rep_whole, rep_sig)
+
+ stub_bin = b'\x00' * 17
+ mod_len = len(stub_bin) % dcerpc.DCERPC_AUTH_PAD_ALIGNMENT
+ auth_pad_length = 0
+ if mod_len > 0:
+ auth_pad_length = dcerpc.DCERPC_AUTH_PAD_ALIGNMENT - mod_len
+ stub_bin += b'\x00' * auth_pad_length
+
+ sig_size = g.sig_size(len(stub_bin))
+ zero_sig = b"\x00" * sig_size
+
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=auth_level,
+ auth_pad_length=auth_pad_length,
+ auth_context_id=auth_context_id,
+ auth_blob=zero_sig)
+ req = self.generate_request(call_id=4,
+ context_id=ctx1.context_id,
+ opnum=0xffff,
+ stub=stub_bin,
+ auth_info=auth_info)
+ req_blob = samba.ndr.ndr_pack(req)
+ ofs_stub = dcerpc.DCERPC_REQUEST_LENGTH
+ ofs_sig = len(req_blob) - req.auth_length
+ ofs_trailer = ofs_sig - dcerpc.DCERPC_AUTH_TRAILER_LENGTH
+ req_data = req_blob[ofs_stub:ofs_trailer]
+ req_whole = req_blob[0:ofs_sig]
+ sig = g.sign_packet(req_data, req_whole)
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=auth_level,
+ auth_pad_length=auth_pad_length,
+ auth_context_id=auth_context_id,
+ auth_blob=sig)
+ req = self.generate_request(call_id=4,
+ context_id=ctx1.context_id,
+ opnum=0xffff,
+ stub=stub_bin,
+ auth_info=auth_info)
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_FAULT, req.call_id,
+ pfc_flags=req.pfc_flags |
+ dcerpc.DCERPC_PFC_FLAG_DID_NOT_EXECUTE,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, ctx1.context_id)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertEqual(rep.u.flags, 0)
+ self.assertEqual(rep.u.status, dcerpc.DCERPC_NCA_S_OP_RNG_ERROR)
+ self.assertEqual(rep.u.reserved, 0)
+ self.assertEqual(len(rep.u.error_and_verifier), 0)
+
+ stub_bin = b'\x00' * 8
+ mod_len = len(stub_bin) % dcerpc.DCERPC_AUTH_PAD_ALIGNMENT
+ auth_pad_length = 0
+ if mod_len > 0:
+ auth_pad_length = dcerpc.DCERPC_AUTH_PAD_ALIGNMENT - mod_len
+ stub_bin += b'\x00' * auth_pad_length
+
+ sig_size = g.sig_size(len(stub_bin))
+ zero_sig = b"\x00" * sig_size
+
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=auth_level,
+ auth_pad_length=auth_pad_length,
+ auth_context_id=auth_context_id,
+ auth_blob=zero_sig)
+ req = self.generate_request(call_id=5,
+ context_id=ctx1.context_id,
+ opnum=1,
+ stub=stub_bin,
+ auth_info=auth_info)
+ req_blob = samba.ndr.ndr_pack(req)
+ ofs_stub = dcerpc.DCERPC_REQUEST_LENGTH
+ ofs_sig = len(req_blob) - req.auth_length
+ ofs_trailer = ofs_sig - dcerpc.DCERPC_AUTH_TRAILER_LENGTH
+ req_data = req_blob[ofs_stub:ofs_trailer]
+ req_whole = req_blob[0:ofs_sig]
+ sig = g.sign_packet(req_data, req_whole)
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=auth_level,
+ auth_pad_length=auth_pad_length,
+ auth_context_id=auth_context_id,
+ auth_blob=sig)
+ req = self.generate_request(call_id=5,
+ context_id=ctx1.context_id,
+ opnum=1,
+ stub=stub_bin,
+ auth_info=auth_info)
+ self.send_pdu(req)
+ (rep, rep_blob) = self.recv_pdu_raw()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_RESPONSE, req.call_id,
+ auth_length=sig_size)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, req.u.context_id & 0xff)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertGreaterEqual(len(rep.u.stub_and_verifier), rep.u.alloc_hint)
+ self.assertEqual(rep.auth_length, sig_size)
+
+ ofs_stub = dcerpc.DCERPC_REQUEST_LENGTH
+ ofs_sig = rep.frag_length - rep.auth_length
+ ofs_trailer = ofs_sig - dcerpc.DCERPC_AUTH_TRAILER_LENGTH
+ rep_data = rep_blob[ofs_stub:ofs_trailer]
+ rep_whole = rep_blob[0:ofs_sig]
+ rep_sig = rep_blob[ofs_sig:]
+ rep_auth_info_blob = rep_blob[ofs_trailer:]
+
+ rep_auth_info = self.parse_auth(rep_auth_info_blob)
+ self.assertEqual(rep_auth_info.auth_type, auth_type)
+ self.assertEqual(rep_auth_info.auth_level, auth_level)
+ self.assertEqual(rep_auth_info.auth_pad_length, 4)
+ self.assertEqual(rep_auth_info.auth_reserved, 0)
+ self.assertEqual(rep_auth_info.auth_context_id, auth_context_id)
+ self.assertEqual(rep_auth_info.credentials, rep_sig)
+
+ g.check_packet(rep_data, rep_whole, rep_sig)
+
+ stub_bin = b'\x00' * 8
+ mod_len = len(stub_bin) % dcerpc.DCERPC_AUTH_PAD_ALIGNMENT
+ auth_pad_length = 0
+ if mod_len > 0:
+ auth_pad_length = dcerpc.DCERPC_AUTH_PAD_ALIGNMENT - mod_len
+ stub_bin += b'\x00' * auth_pad_length
+
+ sig_size = g.sig_size(len(stub_bin))
+ zero_sig = b"\x00" * sig_size
+
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=auth_level,
+ auth_pad_length=auth_pad_length,
+ auth_context_id=auth_context_id,
+ auth_blob=zero_sig)
+ req = self.generate_request(call_id=6,
+ context_id=ctx1.context_id,
+ opnum=3,
+ stub=stub_bin,
+ auth_info=auth_info)
+ req_blob = samba.ndr.ndr_pack(req)
+ ofs_stub = dcerpc.DCERPC_REQUEST_LENGTH
+ ofs_sig = len(req_blob) - req.auth_length
+ ofs_trailer = ofs_sig - dcerpc.DCERPC_AUTH_TRAILER_LENGTH
+ req_data = req_blob[ofs_stub:ofs_trailer]
+ req_whole = req_blob[0:ofs_sig]
+ sig = g.sign_packet(req_data, req_whole)
+ auth_info = self.generate_auth(auth_type=auth_type,
+ auth_level=auth_level,
+ auth_pad_length=auth_pad_length,
+ auth_context_id=auth_context_id,
+ auth_blob=sig)
+ req = self.generate_request(call_id=6,
+ context_id=ctx1.context_id,
+ opnum=3,
+ stub=stub_bin,
+ auth_info=auth_info)
+ self.send_pdu(req)
+ (rep, rep_blob) = self.recv_pdu_raw()
+ self.verify_pdu(rep, dcerpc.DCERPC_PKT_RESPONSE, req.call_id,
+ auth_length=sig_size)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, req.u.context_id & 0xff)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertGreaterEqual(len(rep.u.stub_and_verifier), rep.u.alloc_hint)
+ self.assertEqual(rep.auth_length, sig_size)
+
+ ofs_stub = dcerpc.DCERPC_REQUEST_LENGTH
+ ofs_sig = rep.frag_length - rep.auth_length
+ ofs_trailer = ofs_sig - dcerpc.DCERPC_AUTH_TRAILER_LENGTH
+ rep_data = rep_blob[ofs_stub:ofs_trailer]
+ rep_whole = rep_blob[0:ofs_sig]
+ rep_sig = rep_blob[ofs_sig:]
+ rep_auth_info_blob = rep_blob[ofs_trailer:]
+
+ rep_auth_info = self.parse_auth(rep_auth_info_blob)
+ self.assertEqual(rep_auth_info.auth_type, auth_type)
+ self.assertEqual(rep_auth_info.auth_level, auth_level)
+ self.assertEqual(rep_auth_info.auth_pad_length, 12)
+ self.assertEqual(rep_auth_info.auth_reserved, 0)
+ self.assertEqual(rep_auth_info.auth_context_id, auth_context_id)
+ self.assertEqual(rep_auth_info.credentials, rep_sig)
+
+ g.check_packet(rep_data, rep_whole, rep_sig)
+
+ def test_spnego_signing_packet(self):
+ # DCERPC_AUTH_LEVEL_PACKET is handled as alias of
+ # DCERPC_AUTH_LEVEL_INTEGRITY
+ return self._test_auth_signing_auth_level_request(dcerpc.DCERPC_AUTH_TYPE_SPNEGO,
+ dcerpc.DCERPC_AUTH_LEVEL_PACKET)
+
+ def test_spnego_hdr_signing_packet(self):
+ # DCERPC_AUTH_LEVEL_PACKET is handled as alias of
+ # DCERPC_AUTH_LEVEL_INTEGRITY
+ return self._test_auth_signing_auth_level_request(dcerpc.DCERPC_AUTH_TYPE_SPNEGO,
+ dcerpc.DCERPC_AUTH_LEVEL_PACKET,
+ hdr_sign=True)
+
+ def test_spnego_signing_integrity(self):
+ return self._test_auth_signing_auth_level_request(dcerpc.DCERPC_AUTH_TYPE_SPNEGO,
+ dcerpc.DCERPC_AUTH_LEVEL_INTEGRITY)
+
+ def test_spnego_hdr_signing_integrity(self):
+ return self._test_auth_signing_auth_level_request(dcerpc.DCERPC_AUTH_TYPE_SPNEGO,
+ dcerpc.DCERPC_AUTH_LEVEL_INTEGRITY,
+ hdr_sign=True)
+
+ def test_ntlm_signing_packet(self):
+ # DCERPC_AUTH_LEVEL_PACKET is handled as alias of
+ # DCERPC_AUTH_LEVEL_INTEGRITY
+ return self._test_auth_signing_auth_level_request(dcerpc.DCERPC_AUTH_TYPE_NTLMSSP,
+ dcerpc.DCERPC_AUTH_LEVEL_PACKET)
+
+ def test_ntlm_hdr_signing_packet(self):
+ # DCERPC_AUTH_LEVEL_PACKET is handled as alias of
+ # DCERPC_AUTH_LEVEL_INTEGRITY
+ return self._test_auth_signing_auth_level_request(dcerpc.DCERPC_AUTH_TYPE_NTLMSSP,
+ dcerpc.DCERPC_AUTH_LEVEL_PACKET,
+ hdr_sign=True)
+
+ def test_ntlm_signing_integrity(self):
+ return self._test_auth_signing_auth_level_request(dcerpc.DCERPC_AUTH_TYPE_NTLMSSP,
+ dcerpc.DCERPC_AUTH_LEVEL_INTEGRITY)
+
+ def test_ntlm_hdr_signing_integrity(self):
+ return self._test_auth_signing_auth_level_request(dcerpc.DCERPC_AUTH_TYPE_NTLMSSP,
+ dcerpc.DCERPC_AUTH_LEVEL_INTEGRITY,
+ hdr_sign=True)
+
+ def test_krb5_signing_packet(self):
+ # DCERPC_AUTH_LEVEL_PACKET is handled as alias of
+ # DCERPC_AUTH_LEVEL_INTEGRITY
+ return self._test_auth_signing_auth_level_request(dcerpc.DCERPC_AUTH_TYPE_KRB5,
+ dcerpc.DCERPC_AUTH_LEVEL_PACKET)
+
+ def test_krb5_hdr_signing_packet(self):
+ # DCERPC_AUTH_LEVEL_PACKET is handled as alias of
+ # DCERPC_AUTH_LEVEL_INTEGRITY
+ return self._test_auth_signing_auth_level_request(dcerpc.DCERPC_AUTH_TYPE_KRB5,
+ dcerpc.DCERPC_AUTH_LEVEL_PACKET,
+ hdr_sign=True)
+
+ def test_krb5_signing_integrity(self):
+ return self._test_auth_signing_auth_level_request(dcerpc.DCERPC_AUTH_TYPE_KRB5,
+ dcerpc.DCERPC_AUTH_LEVEL_INTEGRITY)
+
+ def test_krb5_hdr_signing_integrity(self):
+ return self._test_auth_signing_auth_level_request(dcerpc.DCERPC_AUTH_TYPE_KRB5,
+ dcerpc.DCERPC_AUTH_LEVEL_INTEGRITY,
+ hdr_sign=True)
+
+ def test_assoc_group_fail1(self):
+ abstract = samba.dcerpc.mgmt.abstract_syntax()
+ transfer = base.transfer_syntax_ndr()
+
+ tsf1_list = [transfer]
+ ctx = samba.dcerpc.dcerpc.ctx_list()
+ ctx.context_id = 1
+ ctx.num_transfer_syntaxes = len(tsf1_list)
+ ctx.abstract_syntax = abstract
+ ctx.transfer_syntaxes = tsf1_list
+
+ ack = self.do_generic_bind(ctx=ctx, assoc_group_id=1,
+ nak_reason=dcerpc.DCERPC_BIND_NAK_REASON_NOT_SPECIFIED)
+ return
+
+ def test_assoc_group_fail2(self):
+ abstract = samba.dcerpc.mgmt.abstract_syntax()
+ transfer = base.transfer_syntax_ndr()
+
+ tsf1_list = [transfer]
+ ctx = samba.dcerpc.dcerpc.ctx_list()
+ ctx.context_id = 1
+ ctx.num_transfer_syntaxes = len(tsf1_list)
+ ctx.abstract_syntax = abstract
+ ctx.transfer_syntaxes = tsf1_list
+
+ ack = self.do_generic_bind(ctx=ctx)
+
+ self._disconnect("test_assoc_group_fail2")
+ self.assertNotConnected()
+ time.sleep(0.5)
+ self.connect()
+
+ ack2 = self.do_generic_bind(ctx=ctx, assoc_group_id=ack.u.assoc_group_id,
+ nak_reason=dcerpc.DCERPC_BIND_NAK_REASON_NOT_SPECIFIED)
+ return
+
+ def test_assoc_group_diff1(self):
+ abstract = samba.dcerpc.mgmt.abstract_syntax()
+ transfer = base.transfer_syntax_ndr()
+
+ (ctx1, ack1) = self.prepare_presentation(abstract, transfer,
+ context_id=1, return_ack=True)
+
+ conn2 = self.second_connection()
+ (ctx2, ack2) = conn2.prepare_presentation(abstract, transfer,
+ context_id=2, return_ack=True)
+ self.assertNotEqual(ack2.u.assoc_group_id, ack1.u.assoc_group_id)
+
+ conn2._disconnect("End of Test")
+ return
+
+ def test_assoc_group_ok1(self):
+ abstract = samba.dcerpc.mgmt.abstract_syntax()
+ transfer = base.transfer_syntax_ndr()
+
+ (ctx1, ack1) = self.prepare_presentation(abstract, transfer,
+ context_id=1, return_ack=True)
+
+ conn2 = self.second_connection()
+ (ctx2, ack2) = conn2.prepare_presentation(abstract, transfer,
+ assoc_group_id=ack1.u.assoc_group_id,
+ context_id=2, return_ack=True)
+
+ inq_if_ids = samba.dcerpc.mgmt.inq_if_ids()
+ self.do_single_request(call_id=1, ctx=ctx1, io=inq_if_ids)
+ conn2.do_single_request(call_id=1, ctx=ctx2, io=inq_if_ids)
+
+ conn2.do_single_request(call_id=1, ctx=ctx1, io=inq_if_ids,
+ fault_pfc_flags=(
+ samba.dcerpc.dcerpc.DCERPC_PFC_FLAG_FIRST |
+ samba.dcerpc.dcerpc.DCERPC_PFC_FLAG_LAST |
+ samba.dcerpc.dcerpc.DCERPC_PFC_FLAG_DID_NOT_EXECUTE),
+ fault_status=dcerpc.DCERPC_NCA_S_UNKNOWN_IF,
+ fault_context_id=0)
+
+ self.do_single_request(call_id=1, ctx=ctx1, io=inq_if_ids)
+ conn2.do_single_request(call_id=1, ctx=ctx2, io=inq_if_ids)
+ conn2._disconnect("End of Test")
+ return
+
+ def test_assoc_group_ok2(self):
+ abstract = samba.dcerpc.mgmt.abstract_syntax()
+ transfer = base.transfer_syntax_ndr()
+
+ self.reconnect_smb_pipe(primary_address='\\pipe\\lsarpc',
+ secondary_address='\\pipe\\lsass',
+ transport_creds=self.get_user_creds())
+ (ctx1, ack1) = self.prepare_presentation(abstract, transfer,
+ context_id=1, return_ack=True)
+
+ conn2 = self.second_connection()
+ (ctx2, ack2) = conn2.prepare_presentation(abstract, transfer,
+ assoc_group_id=ack1.u.assoc_group_id,
+ context_id=2, return_ack=True)
+
+ inq_if_ids = samba.dcerpc.mgmt.inq_if_ids()
+ self.do_single_request(call_id=1, ctx=ctx1, io=inq_if_ids)
+ conn2.do_single_request(call_id=1, ctx=ctx2, io=inq_if_ids)
+
+ conn2.do_single_request(call_id=1, ctx=ctx1, io=inq_if_ids,
+ fault_pfc_flags=(
+ samba.dcerpc.dcerpc.DCERPC_PFC_FLAG_FIRST |
+ samba.dcerpc.dcerpc.DCERPC_PFC_FLAG_LAST |
+ samba.dcerpc.dcerpc.DCERPC_PFC_FLAG_DID_NOT_EXECUTE),
+ fault_status=dcerpc.DCERPC_NCA_S_UNKNOWN_IF,
+ fault_context_id=0)
+
+ self.do_single_request(call_id=1, ctx=ctx1, io=inq_if_ids)
+ conn2.do_single_request(call_id=1, ctx=ctx2, io=inq_if_ids)
+ conn2._disconnect("End of Test")
+ return
+
+ def test_assoc_group_fail3(self):
+ abstract = samba.dcerpc.mgmt.abstract_syntax()
+ transfer = base.transfer_syntax_ndr()
+
+ (ctx1, ack1) = self.prepare_presentation(abstract, transfer,
+ context_id=1, return_ack=True)
+
+ # assoc groups are per transport
+ connF = self.second_connection(primary_address="\\pipe\\lsarpc",
+ secondary_address="\\pipe\\lsass",
+ transport_creds=self.get_user_creds())
+ tsfF_list = [transfer]
+ ctxF = samba.dcerpc.dcerpc.ctx_list()
+ ctxF.context_id = 0xF
+ ctxF.num_transfer_syntaxes = len(tsfF_list)
+ ctxF.abstract_syntax = abstract
+ ctxF.transfer_syntaxes = tsfF_list
+ ack = connF.do_generic_bind(ctx=ctxF, assoc_group_id=ack1.u.assoc_group_id,
+ nak_reason=dcerpc.DCERPC_BIND_NAK_REASON_NOT_SPECIFIED)
+ # wait for a disconnect
+ rep = connF.recv_pdu()
+ self.assertIsNone(rep)
+ connF.assertNotConnected()
+
+ conn2 = self.second_connection()
+ (ctx2, ack2) = conn2.prepare_presentation(abstract, transfer,
+ assoc_group_id=ack1.u.assoc_group_id,
+ context_id=2, return_ack=True)
+
+ inq_if_ids = samba.dcerpc.mgmt.inq_if_ids()
+ self.do_single_request(call_id=1, ctx=ctx1, io=inq_if_ids)
+ conn2.do_single_request(call_id=1, ctx=ctx2, io=inq_if_ids)
+
+ conn2.do_single_request(call_id=1, ctx=ctx1, io=inq_if_ids,
+ fault_pfc_flags=(
+ samba.dcerpc.dcerpc.DCERPC_PFC_FLAG_FIRST |
+ samba.dcerpc.dcerpc.DCERPC_PFC_FLAG_LAST |
+ samba.dcerpc.dcerpc.DCERPC_PFC_FLAG_DID_NOT_EXECUTE),
+ fault_status=dcerpc.DCERPC_NCA_S_UNKNOWN_IF,
+ fault_context_id=0)
+
+ self.do_single_request(call_id=1, ctx=ctx1, io=inq_if_ids)
+ conn2.do_single_request(call_id=1, ctx=ctx2, io=inq_if_ids)
+ conn2._disconnect("End of Test")
+ return
+
+ def _test_krb5_hdr_sign_delayed1(self, do_upgrade):
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_KRB5
+ auth_level = dcerpc.DCERPC_AUTH_LEVEL_INTEGRITY
+ auth_context_id = 1
+
+ creds = self.get_user_creds()
+
+ abstract = samba.dcerpc.mgmt.abstract_syntax()
+ transfer = base.transfer_syntax_ndr()
+
+ tsf1_list = [transfer]
+ ctx = samba.dcerpc.dcerpc.ctx_list()
+ ctx.context_id = 1
+ ctx.num_transfer_syntaxes = len(tsf1_list)
+ ctx.abstract_syntax = abstract
+ ctx.transfer_syntaxes = tsf1_list
+
+ auth_context = self.get_auth_context_creds(creds=creds,
+ auth_type=auth_type,
+ auth_level=auth_level,
+ auth_context_id=auth_context_id,
+ hdr_signing=False)
+
+ ack = self.do_generic_bind(call_id=1,
+ ctx=ctx,
+ auth_context=auth_context)
+
+ inq_if_ids = samba.dcerpc.mgmt.inq_if_ids()
+ self.do_single_request(call_id=2, ctx=ctx, io=inq_if_ids,
+ auth_context=auth_context)
+
+ #
+ # This is just an alter context without authentication
+ # But it can turn on header signing for the whole connection
+ #
+ ack2 = self.do_generic_bind(call_id=3, ctx=ctx,
+ pfc_flags=dcerpc.DCERPC_PFC_FLAG_FIRST |
+ dcerpc.DCERPC_PFC_FLAG_LAST |
+ dcerpc.DCERPC_PFC_FLAG_SUPPORT_HEADER_SIGN,
+ assoc_group_id = ack.u.assoc_group_id,
+ start_with_alter=True)
+
+ self.assertFalse(auth_context['hdr_signing'])
+ if do_upgrade:
+ auth_context['hdr_signing'] = True
+ auth_context["gensec"].want_feature(gensec.FEATURE_SIGN_PKT_HEADER)
+ fault_status=None
+ else:
+ fault_status=dcerpc.DCERPC_FAULT_SEC_PKG_ERROR
+
+ self.do_single_request(call_id=4, ctx=ctx, io=inq_if_ids,
+ auth_context=auth_context,
+ fault_status=fault_status)
+
+ if fault_status is not None:
+ # wait for a disconnect
+ rep = self.recv_pdu()
+ self.assertIsNone(rep)
+ self.assertNotConnected()
+ return
+
+ self.do_single_request(call_id=5, ctx=ctx, io=inq_if_ids,
+ auth_context=auth_context)
+ return
+
+ def test_krb5_hdr_sign_delayed1_ok1(self):
+ return self._test_krb5_hdr_sign_delayed1(do_upgrade=True)
+
+ def test_krb5_hdr_sign_delayed1_fail1(self):
+ return self._test_krb5_hdr_sign_delayed1(do_upgrade=False)
+
+ def _test_krb5_hdr_sign_delayed2(self, do_upgrade):
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_KRB5
+ auth_level = dcerpc.DCERPC_AUTH_LEVEL_INTEGRITY
+ auth_context_id = 1
+
+ creds = self.get_user_creds()
+
+ abstract = samba.dcerpc.mgmt.abstract_syntax()
+ transfer = base.transfer_syntax_ndr()
+
+ tsf1_list = [transfer]
+ ctx = samba.dcerpc.dcerpc.ctx_list()
+ ctx.context_id = 1
+ ctx.num_transfer_syntaxes = len(tsf1_list)
+ ctx.abstract_syntax = abstract
+ ctx.transfer_syntaxes = tsf1_list
+
+ auth_context = self.get_auth_context_creds(creds=creds,
+ auth_type=auth_type,
+ auth_level=auth_level,
+ auth_context_id=auth_context_id,
+ hdr_signing=False)
+
+ #
+ # SUPPORT_HEADER_SIGN on alter context activates header signing
+ #
+ ack = self.do_generic_bind(call_id=1,
+ ctx=ctx,
+ auth_context=auth_context,
+ pfc_flags_2nd=dcerpc.DCERPC_PFC_FLAG_FIRST |
+ dcerpc.DCERPC_PFC_FLAG_LAST |
+ dcerpc.DCERPC_PFC_FLAG_SUPPORT_HEADER_SIGN)
+
+ self.assertFalse(auth_context['hdr_signing'])
+ if do_upgrade:
+ auth_context['hdr_signing'] = True
+ auth_context["gensec"].want_feature(gensec.FEATURE_SIGN_PKT_HEADER)
+ fault_status=None
+ else:
+ fault_status=dcerpc.DCERPC_FAULT_SEC_PKG_ERROR
+
+ inq_if_ids = samba.dcerpc.mgmt.inq_if_ids()
+ self.do_single_request(call_id=4, ctx=ctx, io=inq_if_ids,
+ auth_context=auth_context,
+ fault_status=fault_status)
+
+ if fault_status is not None:
+ # wait for a disconnect
+ rep = self.recv_pdu()
+ self.assertIsNone(rep)
+ self.assertNotConnected()
+ return
+
+ self.do_single_request(call_id=5, ctx=ctx, io=inq_if_ids,
+ auth_context=auth_context)
+ return
+
+ def test_krb5_hdr_sign_delayed2_ok1(self):
+ return self._test_krb5_hdr_sign_delayed2(do_upgrade=True)
+
+ def test_krb5_hdr_sign_delayed2_fail1(self):
+ return self._test_krb5_hdr_sign_delayed2(do_upgrade=False)
+
+ def test_krb5_hdr_sign_delayed3_fail1(self):
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_KRB5
+ auth_level = dcerpc.DCERPC_AUTH_LEVEL_INTEGRITY
+ auth_context_id = 1
+
+ creds = self.get_user_creds()
+
+ abstract = samba.dcerpc.mgmt.abstract_syntax()
+ transfer = base.transfer_syntax_ndr()
+
+ tsf1_list = [transfer]
+ ctx = samba.dcerpc.dcerpc.ctx_list()
+ ctx.context_id = 1
+ ctx.num_transfer_syntaxes = len(tsf1_list)
+ ctx.abstract_syntax = abstract
+ ctx.transfer_syntaxes = tsf1_list
+
+ auth_context = self.get_auth_context_creds(creds=creds,
+ auth_type=auth_type,
+ auth_level=auth_level,
+ auth_context_id=auth_context_id,
+ hdr_signing=False)
+
+ #
+ # SUPPORT_HEADER_SIGN on auth3 doesn't activate header signing
+ #
+ ack = self.do_generic_bind(call_id=1,
+ ctx=ctx,
+ auth_context=auth_context,
+ pfc_flags_2nd=dcerpc.DCERPC_PFC_FLAG_FIRST |
+ dcerpc.DCERPC_PFC_FLAG_LAST |
+ dcerpc.DCERPC_PFC_FLAG_SUPPORT_HEADER_SIGN,
+ use_auth3=True)
+
+ inq_if_ids = samba.dcerpc.mgmt.inq_if_ids()
+ self.do_single_request(call_id=2, ctx=ctx, io=inq_if_ids,
+ auth_context=auth_context)
+
+ self.assertFalse(auth_context['hdr_signing'])
+ auth_context['hdr_signing'] = True
+ auth_context["gensec"].want_feature(gensec.FEATURE_SIGN_PKT_HEADER)
+ fault_status=dcerpc.DCERPC_FAULT_SEC_PKG_ERROR
+
+ self.do_single_request(call_id=4, ctx=ctx, io=inq_if_ids,
+ auth_context=auth_context,
+ fault_status=fault_status)
+
+ # wait for a disconnect
+ rep = self.recv_pdu()
+ self.assertIsNone(rep)
+ self.assertNotConnected()
+ return
+
+ def _test_lsa_multi_auth_connect1(self, smb_creds,
+ account_name0, authority_name0):
+ creds1 = self.get_anon_creds()
+ account_name1 = "ANONYMOUS LOGON"
+ authority_name1 = "NT AUTHORITY"
+ auth_type1 = dcerpc.DCERPC_AUTH_TYPE_NTLMSSP
+ auth_level1 = dcerpc.DCERPC_AUTH_LEVEL_CONNECT
+ auth_context_id1 = 1
+
+ creds2 = self.get_user_creds()
+ account_name2 = creds2.get_username()
+ authority_name2 = creds2.get_domain()
+ auth_type2 = dcerpc.DCERPC_AUTH_TYPE_NTLMSSP
+ auth_level2 = dcerpc.DCERPC_AUTH_LEVEL_CONNECT
+ auth_context_id2 = 2
+
+ abstract = samba.dcerpc.lsa.abstract_syntax()
+ transfer = base.transfer_syntax_ndr()
+
+ self.reconnect_smb_pipe(primary_address='\\pipe\\lsarpc',
+ secondary_address='\\pipe\\lsass',
+ transport_creds=smb_creds)
+ self.assertIsConnected()
+
+ tsf1_list = [transfer]
+ ctx1 = samba.dcerpc.dcerpc.ctx_list()
+ ctx1.context_id = 1
+ ctx1.num_transfer_syntaxes = len(tsf1_list)
+ ctx1.abstract_syntax = abstract
+ ctx1.transfer_syntaxes = tsf1_list
+
+ auth_context1 = self.get_auth_context_creds(creds=creds1,
+ auth_type=auth_type1,
+ auth_level=auth_level1,
+ auth_context_id=auth_context_id1,
+ hdr_signing=False)
+ auth_context2 = self.get_auth_context_creds(creds=creds2,
+ auth_type=auth_type2,
+ auth_level=auth_level2,
+ auth_context_id=auth_context_id2,
+ hdr_signing=False)
+
+ get_user_name = samba.dcerpc.lsa.GetUserName()
+ get_user_name.in_system_name = self.target_hostname
+ get_user_name.in_account_name = None
+ get_user_name.in_authority_name = base.ndr_pointer(None)
+
+ ack1 = self.do_generic_bind(call_id=0,
+ ctx=ctx1,
+ auth_context=auth_context1)
+
+ #
+ # With just one explicit auth context and that
+ # uses AUTH_LEVEL_CONNECT context.
+ #
+ # We always get that by default instead of the one default one
+ # inherited from the transport
+ #
+ self.do_single_request(call_id=1, ctx=ctx1, io=get_user_name)
+ self.assertEqual(get_user_name.result[0], NT_STATUS_SUCCESS)
+ self.assertEqualsStrLower(get_user_name.out_account_name, account_name1)
+ self.assertEqualsStrLower(get_user_name.out_authority_name.value, authority_name1)
+
+ self.do_single_request(call_id=2, ctx=ctx1, io=get_user_name,
+ auth_context=auth_context1)
+ self.assertEqual(get_user_name.result[0], NT_STATUS_SUCCESS)
+ self.assertEqualsStrLower(get_user_name.out_account_name, account_name1)
+ self.assertEqualsStrLower(get_user_name.out_authority_name.value, authority_name1)
+
+ ack2 = self.do_generic_bind(call_id=3,
+ ctx=ctx1,
+ auth_context=auth_context2,
+ assoc_group_id = ack1.u.assoc_group_id,
+ start_with_alter=True)
+
+ #
+ # Now we have two explicit auth contexts
+ #
+ # If we don't specify one of them we get the default one
+ # inherited from the transport
+ #
+ self.do_single_request(call_id=4, ctx=ctx1, io=get_user_name)
+ self.assertEqual(get_user_name.result[0], NT_STATUS_SUCCESS)
+ self.assertEqualsStrLower(get_user_name.out_account_name, account_name0)
+ self.assertEqualsStrLower(get_user_name.out_authority_name.value, authority_name0)
+
+ self.do_single_request(call_id=5, ctx=ctx1, io=get_user_name,
+ auth_context=auth_context1)
+ self.assertEqual(get_user_name.result[0], NT_STATUS_SUCCESS)
+ self.assertEqualsStrLower(get_user_name.out_account_name, account_name1)
+ self.assertEqualsStrLower(get_user_name.out_authority_name.value, authority_name1)
+
+ self.do_single_request(call_id=6, ctx=ctx1, io=get_user_name,
+ auth_context=auth_context2)
+ self.assertEqual(get_user_name.result[0], NT_STATUS_SUCCESS)
+ self.assertEqualsStrLower(get_user_name.out_account_name, account_name2)
+ self.assertEqualsStrLower(get_user_name.out_authority_name.value, authority_name2)
+
+ self.do_single_request(call_id=7, ctx=ctx1, io=get_user_name)
+ self.assertEqual(get_user_name.result[0], NT_STATUS_SUCCESS)
+ self.assertEqualsStrLower(get_user_name.out_account_name, account_name0)
+ self.assertEqualsStrLower(get_user_name.out_authority_name.value, authority_name0)
+
+ return
+
+ def test_lsa_multi_auth_connect1u(self):
+ smb_auth_creds = self.get_user_creds()
+ account_name0 = smb_auth_creds.get_username()
+ authority_name0 = smb_auth_creds.get_domain()
+ return self._test_lsa_multi_auth_connect1(smb_auth_creds,
+ account_name0,
+ authority_name0)
+
+ def test_lsa_multi_auth_connect1a(self):
+ smb_auth_creds = self.get_anon_creds()
+ account_name0 = "ANONYMOUS LOGON"
+ authority_name0 = "NT AUTHORITY"
+ return self._test_lsa_multi_auth_connect1(smb_auth_creds,
+ account_name0,
+ authority_name0)
+
+ def _test_lsa_multi_auth_connect2(self, smb_creds,
+ account_name0, authority_name0):
+ creds1 = self.get_anon_creds()
+ account_name1 = "ANONYMOUS LOGON"
+ authority_name1 = "NT AUTHORITY"
+ auth_type1 = dcerpc.DCERPC_AUTH_TYPE_NTLMSSP
+ auth_level1 = dcerpc.DCERPC_AUTH_LEVEL_CONNECT
+ auth_context_id1 = 1
+
+ creds2 = self.get_user_creds()
+ account_name2 = creds2.get_username()
+ authority_name2 = creds2.get_domain()
+ auth_type2 = dcerpc.DCERPC_AUTH_TYPE_NTLMSSP
+ auth_level2 = dcerpc.DCERPC_AUTH_LEVEL_CONNECT
+ auth_context_id2 = 2
+
+ abstract = samba.dcerpc.lsa.abstract_syntax()
+ transfer = base.transfer_syntax_ndr()
+
+ self.reconnect_smb_pipe(primary_address='\\pipe\\lsarpc',
+ secondary_address='\\pipe\\lsass',
+ transport_creds=smb_creds)
+ self.assertIsConnected()
+
+ tsf1_list = [transfer]
+ ctx1 = samba.dcerpc.dcerpc.ctx_list()
+ ctx1.context_id = 1
+ ctx1.num_transfer_syntaxes = len(tsf1_list)
+ ctx1.abstract_syntax = abstract
+ ctx1.transfer_syntaxes = tsf1_list
+
+ auth_context1 = self.get_auth_context_creds(creds=creds1,
+ auth_type=auth_type1,
+ auth_level=auth_level1,
+ auth_context_id=auth_context_id1,
+ hdr_signing=False)
+ auth_context2 = self.get_auth_context_creds(creds=creds2,
+ auth_type=auth_type2,
+ auth_level=auth_level2,
+ auth_context_id=auth_context_id2,
+ hdr_signing=False)
+
+ get_user_name = samba.dcerpc.lsa.GetUserName()
+ get_user_name.in_system_name = self.target_hostname
+ get_user_name.in_account_name = None
+ get_user_name.in_authority_name = base.ndr_pointer(None)
+
+ ack0 = self.do_generic_bind(call_id=0, ctx=ctx1)
+
+ #
+ # We use the default auth context
+ # inherited from the transport
+ #
+ self.do_single_request(call_id=1, ctx=ctx1, io=get_user_name)
+ self.assertEqual(get_user_name.result[0], NT_STATUS_SUCCESS)
+ self.assertEqualsStrLower(get_user_name.out_account_name, account_name0)
+ self.assertEqualsStrLower(get_user_name.out_authority_name.value, authority_name0)
+
+ ack1 = self.do_generic_bind(call_id=2,
+ ctx=ctx1,
+ auth_context=auth_context1,
+ assoc_group_id = ack0.u.assoc_group_id,
+ start_with_alter=True)
+
+ #
+ # With just one explicit auth context and that
+ # uses AUTH_LEVEL_CONNECT context.
+ #
+ # We always get that by default instead of the one default one
+ # inherited from the transport
+ #
+ self.do_single_request(call_id=3, ctx=ctx1, io=get_user_name)
+ self.assertEqual(get_user_name.result[0], NT_STATUS_SUCCESS)
+ self.assertEqualsStrLower(get_user_name.out_account_name, account_name1)
+ self.assertEqualsStrLower(get_user_name.out_authority_name.value, authority_name1)
+
+ self.do_single_request(call_id=4, ctx=ctx1, io=get_user_name,
+ auth_context=auth_context1)
+ self.assertEqual(get_user_name.result[0], NT_STATUS_SUCCESS)
+ self.assertEqualsStrLower(get_user_name.out_account_name, account_name1)
+ self.assertEqualsStrLower(get_user_name.out_authority_name.value, authority_name1)
+
+ ack2 = self.do_generic_bind(call_id=5,
+ ctx=ctx1,
+ auth_context=auth_context2,
+ assoc_group_id = ack0.u.assoc_group_id,
+ start_with_alter=True)
+
+ #
+ # Now we have two explicit auth contexts
+ #
+ # If we don't specify one of them we get the default one
+ # inherited from the transport (again)
+ #
+ self.do_single_request(call_id=6, ctx=ctx1, io=get_user_name)
+ self.assertEqual(get_user_name.result[0], NT_STATUS_SUCCESS)
+ self.assertEqualsStrLower(get_user_name.out_account_name, account_name0)
+ self.assertEqualsStrLower(get_user_name.out_authority_name.value, authority_name0)
+
+ self.do_single_request(call_id=7, ctx=ctx1, io=get_user_name,
+ auth_context=auth_context1)
+ self.assertEqual(get_user_name.result[0], NT_STATUS_SUCCESS)
+ self.assertEqualsStrLower(get_user_name.out_account_name, account_name1)
+ self.assertEqualsStrLower(get_user_name.out_authority_name.value, authority_name1)
+
+ self.do_single_request(call_id=8, ctx=ctx1, io=get_user_name,
+ auth_context=auth_context2)
+ self.assertEqual(get_user_name.result[0], NT_STATUS_SUCCESS)
+ self.assertEqualsStrLower(get_user_name.out_account_name, account_name2)
+ self.assertEqualsStrLower(get_user_name.out_authority_name.value, authority_name2)
+
+ self.do_single_request(call_id=9, ctx=ctx1, io=get_user_name)
+ self.assertEqual(get_user_name.result[0], NT_STATUS_SUCCESS)
+ self.assertEqualsStrLower(get_user_name.out_account_name, account_name0)
+ self.assertEqualsStrLower(get_user_name.out_authority_name.value, authority_name0)
+
+ return
+
+ def test_lsa_multi_auth_connect2u(self):
+ smb_auth_creds = self.get_user_creds()
+ account_name0 = smb_auth_creds.get_username()
+ authority_name0 = smb_auth_creds.get_domain()
+ return self._test_lsa_multi_auth_connect2(smb_auth_creds,
+ account_name0,
+ authority_name0)
+
+ def test_lsa_multi_auth_connect2a(self):
+ smb_auth_creds = self.get_anon_creds()
+ account_name0 = "ANONYMOUS LOGON"
+ authority_name0 = "NT AUTHORITY"
+ return self._test_lsa_multi_auth_connect2(smb_auth_creds,
+ account_name0,
+ authority_name0)
+
+ def _test_lsa_multi_auth_connect3(self, smb_creds,
+ account_name0, authority_name0):
+ creds1 = self.get_anon_creds()
+ account_name1 = "ANONYMOUS LOGON"
+ authority_name1 = "NT AUTHORITY"
+ auth_type1 = dcerpc.DCERPC_AUTH_TYPE_NTLMSSP
+ auth_level1 = dcerpc.DCERPC_AUTH_LEVEL_CONNECT
+ auth_context_id1 = 1
+
+ creds2 = self.get_user_creds()
+ account_name2 = creds2.get_username()
+ authority_name2 = creds2.get_domain()
+ auth_type2 = dcerpc.DCERPC_AUTH_TYPE_NTLMSSP
+ auth_level2 = dcerpc.DCERPC_AUTH_LEVEL_CONNECT
+ auth_context_id2 = 2
+
+ abstract = samba.dcerpc.lsa.abstract_syntax()
+ transfer = base.transfer_syntax_ndr()
+
+ self.reconnect_smb_pipe(primary_address='\\pipe\\lsarpc',
+ secondary_address='\\pipe\\lsass',
+ transport_creds=smb_creds)
+ self.assertIsConnected()
+
+ tsf1_list = [transfer]
+ ctx1 = samba.dcerpc.dcerpc.ctx_list()
+ ctx1.context_id = 1
+ ctx1.num_transfer_syntaxes = len(tsf1_list)
+ ctx1.abstract_syntax = abstract
+ ctx1.transfer_syntaxes = tsf1_list
+
+ auth_context1 = self.get_auth_context_creds(creds=creds1,
+ auth_type=auth_type1,
+ auth_level=auth_level1,
+ auth_context_id=auth_context_id1,
+ hdr_signing=False)
+ auth_context2 = self.get_auth_context_creds(creds=creds2,
+ auth_type=auth_type2,
+ auth_level=auth_level2,
+ auth_context_id=auth_context_id2,
+ hdr_signing=False)
+
+ get_user_name = samba.dcerpc.lsa.GetUserName()
+ get_user_name.in_system_name = self.target_hostname
+ get_user_name.in_account_name = None
+ get_user_name.in_authority_name = base.ndr_pointer(None)
+
+ ack0 = self.do_generic_bind(call_id=0, ctx=ctx1)
+
+ #
+ # We use the default auth context
+ # inherited from the transport
+ #
+ self.do_single_request(call_id=1, ctx=ctx1, io=get_user_name)
+ self.assertEqual(get_user_name.result[0], NT_STATUS_SUCCESS)
+ self.assertEqualsStrLower(get_user_name.out_account_name, account_name0)
+ self.assertEqualsStrLower(get_user_name.out_authority_name.value, authority_name0)
+
+ ack1 = self.do_generic_bind(call_id=2,
+ ctx=ctx1,
+ auth_context=auth_context1,
+ assoc_group_id = ack0.u.assoc_group_id,
+ start_with_alter=True)
+
+ #
+ # With just one explicit auth context and that
+ # uses AUTH_LEVEL_CONNECT context.
+ #
+ # We always get that by default instead of the one default one
+ # inherited from the transport
+ #
+ # Until an explicit usage resets that mode
+ #
+ self.do_single_request(call_id=3, ctx=ctx1, io=get_user_name)
+ self.assertEqual(get_user_name.result[0], NT_STATUS_SUCCESS)
+ self.assertEqualsStrLower(get_user_name.out_account_name, account_name1)
+ self.assertEqualsStrLower(get_user_name.out_authority_name.value, authority_name1)
+
+ self.do_single_request(call_id=4, ctx=ctx1, io=get_user_name)
+ self.assertEqual(get_user_name.result[0], NT_STATUS_SUCCESS)
+ self.assertEqualsStrLower(get_user_name.out_account_name, account_name1)
+ self.assertEqualsStrLower(get_user_name.out_authority_name.value, authority_name1)
+
+ self.do_single_request(call_id=5, ctx=ctx1, io=get_user_name,
+ auth_context=auth_context1)
+ self.assertEqual(get_user_name.result[0], NT_STATUS_SUCCESS)
+ self.assertEqualsStrLower(get_user_name.out_account_name, account_name1)
+ self.assertEqualsStrLower(get_user_name.out_authority_name.value, authority_name1)
+
+ self.do_single_request(call_id=6, ctx=ctx1, io=get_user_name)
+ self.assertEqual(get_user_name.result[0], NT_STATUS_SUCCESS)
+ self.assertEqualsStrLower(get_user_name.out_account_name, account_name0)
+ self.assertEqualsStrLower(get_user_name.out_authority_name.value, authority_name0)
+
+ ack2 = self.do_generic_bind(call_id=7,
+ ctx=ctx1,
+ auth_context=auth_context2,
+ assoc_group_id = ack0.u.assoc_group_id,
+ start_with_alter=True)
+ #
+ # A new auth context won't change that mode again.
+ #
+ self.do_single_request(call_id=8, ctx=ctx1, io=get_user_name)
+ self.assertEqual(get_user_name.result[0], NT_STATUS_SUCCESS)
+ self.assertEqualsStrLower(get_user_name.out_account_name, account_name0)
+ self.assertEqualsStrLower(get_user_name.out_authority_name.value, authority_name0)
+
+ self.do_single_request(call_id=9, ctx=ctx1, io=get_user_name,
+ auth_context=auth_context1)
+ self.assertEqual(get_user_name.result[0], NT_STATUS_SUCCESS)
+ self.assertEqualsStrLower(get_user_name.out_account_name, account_name1)
+ self.assertEqualsStrLower(get_user_name.out_authority_name.value, authority_name1)
+
+ self.do_single_request(call_id=10, ctx=ctx1, io=get_user_name,
+ auth_context=auth_context2)
+ self.assertEqual(get_user_name.result[0], NT_STATUS_SUCCESS)
+ self.assertEqualsStrLower(get_user_name.out_account_name, account_name2)
+ self.assertEqualsStrLower(get_user_name.out_authority_name.value, authority_name2)
+
+ self.do_single_request(call_id=11, ctx=ctx1, io=get_user_name)
+ self.assertEqual(get_user_name.result[0], NT_STATUS_SUCCESS)
+ self.assertEqualsStrLower(get_user_name.out_account_name, account_name0)
+ self.assertEqualsStrLower(get_user_name.out_authority_name.value, authority_name0)
+
+ return
+
+ def test_lsa_multi_auth_connect3u(self):
+ smb_auth_creds = self.get_user_creds()
+ account_name0 = smb_auth_creds.get_username()
+ authority_name0 = smb_auth_creds.get_domain()
+ return self._test_lsa_multi_auth_connect3(smb_auth_creds,
+ account_name0,
+ authority_name0)
+
+ def test_lsa_multi_auth_connect3a(self):
+ smb_auth_creds = self.get_anon_creds()
+ account_name0 = "ANONYMOUS LOGON"
+ authority_name0 = "NT AUTHORITY"
+ return self._test_lsa_multi_auth_connect3(smb_auth_creds,
+ account_name0,
+ authority_name0)
+
+ def _test_lsa_multi_auth_connect4(self, smb_creds,
+ account_name0, authority_name0):
+ creds1 = self.get_anon_creds()
+ account_name1 = "ANONYMOUS LOGON"
+ authority_name1 = "NT AUTHORITY"
+ auth_type1 = dcerpc.DCERPC_AUTH_TYPE_NTLMSSP
+ auth_level1 = dcerpc.DCERPC_AUTH_LEVEL_CONNECT
+ auth_context_id1 = 1
+
+ creds2 = self.get_user_creds()
+ account_name2 = creds2.get_username()
+ authority_name2 = creds2.get_domain()
+ auth_type2 = dcerpc.DCERPC_AUTH_TYPE_NTLMSSP
+ auth_level2 = dcerpc.DCERPC_AUTH_LEVEL_CONNECT
+ auth_context_id2 = 2
+
+ creds3 = self.get_anon_creds()
+ account_name3 = "ANONYMOUS LOGON"
+ authority_name3 = "NT AUTHORITY"
+ auth_type3 = dcerpc.DCERPC_AUTH_TYPE_NTLMSSP
+ auth_level3 = dcerpc.DCERPC_AUTH_LEVEL_CONNECT
+ auth_context_id3 = 3
+
+ creds4 = self.get_user_creds()
+ account_name4 = creds4.get_username()
+ authority_name4 = creds4.get_domain()
+ auth_type4 = dcerpc.DCERPC_AUTH_TYPE_NTLMSSP
+ auth_level4 = dcerpc.DCERPC_AUTH_LEVEL_CONNECT
+ auth_context_id4 = 4
+
+ abstract = samba.dcerpc.lsa.abstract_syntax()
+ transfer = base.transfer_syntax_ndr()
+
+ self.reconnect_smb_pipe(primary_address='\\pipe\\lsarpc',
+ secondary_address='\\pipe\\lsass',
+ transport_creds=smb_creds)
+ self.assertIsConnected()
+
+ tsf1_list = [transfer]
+ ctx1 = samba.dcerpc.dcerpc.ctx_list()
+ ctx1.context_id = 1
+ ctx1.num_transfer_syntaxes = len(tsf1_list)
+ ctx1.abstract_syntax = abstract
+ ctx1.transfer_syntaxes = tsf1_list
+
+ auth_context1 = self.get_auth_context_creds(creds=creds1,
+ auth_type=auth_type1,
+ auth_level=auth_level1,
+ auth_context_id=auth_context_id1,
+ hdr_signing=False)
+ auth_context2 = self.get_auth_context_creds(creds=creds2,
+ auth_type=auth_type2,
+ auth_level=auth_level2,
+ auth_context_id=auth_context_id2,
+ hdr_signing=False)
+ auth_context3 = self.get_auth_context_creds(creds=creds3,
+ auth_type=auth_type3,
+ auth_level=auth_level3,
+ auth_context_id=auth_context_id3,
+ hdr_signing=False)
+ auth_context4 = self.get_auth_context_creds(creds=creds4,
+ auth_type=auth_type4,
+ auth_level=auth_level4,
+ auth_context_id=auth_context_id4,
+ hdr_signing=False)
+
+ get_user_name = samba.dcerpc.lsa.GetUserName()
+ get_user_name.in_system_name = self.target_hostname
+ get_user_name.in_account_name = None
+ get_user_name.in_authority_name = base.ndr_pointer(None)
+
+ ack0 = self.do_generic_bind(call_id=0, ctx=ctx1)
+
+ #
+ # We use the default auth context
+ # inherited from the transport
+ #
+ self.do_single_request(call_id=1, ctx=ctx1, io=get_user_name)
+ self.assertEqual(get_user_name.result[0], NT_STATUS_SUCCESS)
+ self.assertEqualsStrLower(get_user_name.out_account_name, account_name0)
+ self.assertEqualsStrLower(get_user_name.out_authority_name.value, authority_name0)
+
+ ack1 = self.do_generic_bind(call_id=2,
+ ctx=ctx1,
+ auth_context=auth_context1,
+ assoc_group_id = ack0.u.assoc_group_id,
+ start_with_alter=True)
+
+ #
+ # With just one explicit auth context and that
+ # uses AUTH_LEVEL_CONNECT context.
+ #
+ # We always get that by default instead of the one default one
+ # inherited from the transport
+ #
+ # Until a new explicit context resets the mode
+ #
+ self.do_single_request(call_id=3, ctx=ctx1, io=get_user_name)
+ self.assertEqual(get_user_name.result[0], NT_STATUS_SUCCESS)
+ self.assertEqualsStrLower(get_user_name.out_account_name, account_name1)
+ self.assertEqualsStrLower(get_user_name.out_authority_name.value, authority_name1)
+
+ self.do_single_request(call_id=4, ctx=ctx1, io=get_user_name)
+ self.assertEqual(get_user_name.result[0], NT_STATUS_SUCCESS)
+ self.assertEqualsStrLower(get_user_name.out_account_name, account_name1)
+ self.assertEqualsStrLower(get_user_name.out_authority_name.value, authority_name1)
+
+ ack2 = self.do_generic_bind(call_id=5,
+ ctx=ctx1,
+ auth_context=auth_context2,
+ assoc_group_id = ack0.u.assoc_group_id,
+ start_with_alter=True)
+
+ #
+ # A new auth context with LEVEL_CONNECT resets the default.
+ #
+ self.do_single_request(call_id=6, ctx=ctx1, io=get_user_name)
+ self.assertEqual(get_user_name.result[0], NT_STATUS_SUCCESS)
+ self.assertEqualsStrLower(get_user_name.out_account_name, account_name2)
+ self.assertEqualsStrLower(get_user_name.out_authority_name.value, authority_name2)
+
+ self.do_single_request(call_id=7, ctx=ctx1, io=get_user_name)
+ self.assertEqual(get_user_name.result[0], NT_STATUS_SUCCESS)
+ self.assertEqualsStrLower(get_user_name.out_account_name, account_name2)
+ self.assertEqualsStrLower(get_user_name.out_authority_name.value, authority_name2)
+
+ ack3 = self.do_generic_bind(call_id=8,
+ ctx=ctx1,
+ auth_context=auth_context3,
+ assoc_group_id = ack0.u.assoc_group_id,
+ start_with_alter=True)
+
+ #
+ # A new auth context with LEVEL_CONNECT resets the default.
+ #
+ self.do_single_request(call_id=9, ctx=ctx1, io=get_user_name)
+ self.assertEqual(get_user_name.result[0], NT_STATUS_SUCCESS)
+ self.assertEqualsStrLower(get_user_name.out_account_name, account_name3)
+ self.assertEqualsStrLower(get_user_name.out_authority_name.value, authority_name3)
+
+ self.do_single_request(call_id=10, ctx=ctx1, io=get_user_name)
+ self.assertEqual(get_user_name.result[0], NT_STATUS_SUCCESS)
+ self.assertEqualsStrLower(get_user_name.out_account_name, account_name3)
+ self.assertEqualsStrLower(get_user_name.out_authority_name.value, authority_name3)
+
+ ack4 = self.do_generic_bind(call_id=11,
+ ctx=ctx1,
+ auth_context=auth_context4,
+ assoc_group_id = ack0.u.assoc_group_id,
+ start_with_alter=True)
+
+ #
+ # A new auth context with LEVEL_CONNECT resets the default.
+ #
+ self.do_single_request(call_id=12, ctx=ctx1, io=get_user_name)
+ self.assertEqual(get_user_name.result[0], NT_STATUS_SUCCESS)
+ self.assertEqualsStrLower(get_user_name.out_account_name, account_name4)
+ self.assertEqualsStrLower(get_user_name.out_authority_name.value, authority_name4)
+
+ self.do_single_request(call_id=13, ctx=ctx1, io=get_user_name)
+ self.assertEqual(get_user_name.result[0], NT_STATUS_SUCCESS)
+ self.assertEqualsStrLower(get_user_name.out_account_name, account_name4)
+ self.assertEqualsStrLower(get_user_name.out_authority_name.value, authority_name4)
+
+ #
+ # Only the explicit usage of any context reset that mode
+ #
+ self.do_single_request(call_id=14, ctx=ctx1, io=get_user_name,
+ auth_context=auth_context1)
+ self.assertEqual(get_user_name.result[0], NT_STATUS_SUCCESS)
+ self.assertEqualsStrLower(get_user_name.out_account_name, account_name1)
+ self.assertEqualsStrLower(get_user_name.out_authority_name.value, authority_name1)
+
+ self.do_single_request(call_id=15, ctx=ctx1, io=get_user_name)
+ self.assertEqual(get_user_name.result[0], NT_STATUS_SUCCESS)
+ self.assertEqualsStrLower(get_user_name.out_account_name, account_name0)
+ self.assertEqualsStrLower(get_user_name.out_authority_name.value, authority_name0)
+
+ self.do_single_request(call_id=16, ctx=ctx1, io=get_user_name,
+ auth_context=auth_context1)
+ self.assertEqual(get_user_name.result[0], NT_STATUS_SUCCESS)
+ self.assertEqualsStrLower(get_user_name.out_account_name, account_name1)
+ self.assertEqualsStrLower(get_user_name.out_authority_name.value, authority_name1)
+
+ self.do_single_request(call_id=17, ctx=ctx1, io=get_user_name,
+ auth_context=auth_context2)
+ self.assertEqual(get_user_name.result[0], NT_STATUS_SUCCESS)
+ self.assertEqualsStrLower(get_user_name.out_account_name, account_name2)
+ self.assertEqualsStrLower(get_user_name.out_authority_name.value, authority_name2)
+
+ self.do_single_request(call_id=18, ctx=ctx1, io=get_user_name,
+ auth_context=auth_context3)
+ self.assertEqual(get_user_name.result[0], NT_STATUS_SUCCESS)
+ self.assertEqualsStrLower(get_user_name.out_account_name, account_name3)
+ self.assertEqualsStrLower(get_user_name.out_authority_name.value, authority_name3)
+
+ self.do_single_request(call_id=19, ctx=ctx1, io=get_user_name,
+ auth_context=auth_context4)
+ self.assertEqual(get_user_name.result[0], NT_STATUS_SUCCESS)
+ self.assertEqualsStrLower(get_user_name.out_account_name, account_name4)
+ self.assertEqualsStrLower(get_user_name.out_authority_name.value, authority_name4)
+
+ self.do_single_request(call_id=20, ctx=ctx1, io=get_user_name)
+ self.assertEqual(get_user_name.result[0], NT_STATUS_SUCCESS)
+ self.assertEqualsStrLower(get_user_name.out_account_name, account_name0)
+ self.assertEqualsStrLower(get_user_name.out_authority_name.value, authority_name0)
+
+ return
+
+ def test_lsa_multi_auth_connect4u(self):
+ smb_auth_creds = self.get_user_creds()
+ account_name0 = smb_auth_creds.get_username()
+ authority_name0 = smb_auth_creds.get_domain()
+ return self._test_lsa_multi_auth_connect4(smb_auth_creds,
+ account_name0,
+ authority_name0)
+
+ def test_lsa_multi_auth_connect4a(self):
+ smb_auth_creds = self.get_anon_creds()
+ account_name0 = "ANONYMOUS LOGON"
+ authority_name0 = "NT AUTHORITY"
+ return self._test_lsa_multi_auth_connect4(smb_auth_creds,
+ account_name0,
+ authority_name0)
+
+ def _test_lsa_multi_auth_sign_connect1(self, smb_creds,
+ account_name0, authority_name0):
+
+ creds1 = self.get_user_creds()
+ account_name1 = creds1.get_username()
+ authority_name1 = creds1.get_domain()
+ auth_type1 = dcerpc.DCERPC_AUTH_TYPE_NTLMSSP
+ auth_level1 = dcerpc.DCERPC_AUTH_LEVEL_INTEGRITY
+ auth_context_id1 = 1
+
+ creds2 = self.get_user_creds()
+ account_name2 = creds2.get_username()
+ authority_name2 = creds2.get_domain()
+ auth_type2 = dcerpc.DCERPC_AUTH_TYPE_NTLMSSP
+ auth_level2 = dcerpc.DCERPC_AUTH_LEVEL_INTEGRITY
+ auth_context_id2 = 2
+
+ creds3 = self.get_anon_creds()
+ account_name3 = "ANONYMOUS LOGON"
+ authority_name3 = "NT AUTHORITY"
+ auth_type3 = dcerpc.DCERPC_AUTH_TYPE_NTLMSSP
+ auth_level3 = dcerpc.DCERPC_AUTH_LEVEL_CONNECT
+ auth_context_id3 = 3
+
+ abstract = samba.dcerpc.lsa.abstract_syntax()
+ transfer = base.transfer_syntax_ndr()
+
+ self.reconnect_smb_pipe(primary_address='\\pipe\\lsarpc',
+ secondary_address='\\pipe\\lsass',
+ transport_creds=smb_creds)
+ self.assertIsConnected()
+
+ tsf1_list = [transfer]
+ ctx1 = samba.dcerpc.dcerpc.ctx_list()
+ ctx1.context_id = 1
+ ctx1.num_transfer_syntaxes = len(tsf1_list)
+ ctx1.abstract_syntax = abstract
+ ctx1.transfer_syntaxes = tsf1_list
+
+ auth_context1 = self.get_auth_context_creds(creds=creds1,
+ auth_type=auth_type1,
+ auth_level=auth_level1,
+ auth_context_id=auth_context_id1,
+ hdr_signing=False)
+ auth_context2 = self.get_auth_context_creds(creds=creds2,
+ auth_type=auth_type2,
+ auth_level=auth_level2,
+ auth_context_id=auth_context_id2,
+ hdr_signing=False)
+ auth_context3 = self.get_auth_context_creds(creds=creds3,
+ auth_type=auth_type3,
+ auth_level=auth_level3,
+ auth_context_id=auth_context_id3,
+ hdr_signing=False)
+
+ get_user_name = samba.dcerpc.lsa.GetUserName()
+ get_user_name.in_system_name = self.target_hostname
+ get_user_name.in_account_name = None
+ get_user_name.in_authority_name = base.ndr_pointer(None)
+
+ ack1 = self.do_generic_bind(call_id=0,
+ ctx=ctx1,
+ auth_context=auth_context1)
+
+ #
+ # With just one explicit auth context and that
+ # *not* uses AUTH_LEVEL_CONNECT context.
+ #
+ # We don't get the by default (auth_context1)
+ #
+ self.do_single_request(call_id=1, ctx=ctx1, io=get_user_name)
+ self.assertEqual(get_user_name.result[0], NT_STATUS_SUCCESS)
+ self.assertEqualsStrLower(get_user_name.out_account_name, account_name0)
+ self.assertEqualsStrLower(get_user_name.out_authority_name.value, authority_name0)
+
+ self.do_single_request(call_id=2, ctx=ctx1, io=get_user_name,
+ auth_context=auth_context1)
+ self.assertEqual(get_user_name.result[0], NT_STATUS_SUCCESS)
+ self.assertEqualsStrLower(get_user_name.out_account_name, account_name1)
+ self.assertEqualsStrLower(get_user_name.out_authority_name.value, authority_name1)
+
+ self.do_single_request(call_id=3, ctx=ctx1, io=get_user_name)
+ self.assertEqual(get_user_name.result[0], NT_STATUS_SUCCESS)
+ self.assertEqualsStrLower(get_user_name.out_account_name, account_name0)
+ self.assertEqualsStrLower(get_user_name.out_authority_name.value, authority_name0)
+
+ ack2 = self.do_generic_bind(call_id=4,
+ ctx=ctx1,
+ auth_context=auth_context2,
+ assoc_group_id = ack1.u.assoc_group_id,
+ start_with_alter=True)
+
+ #
+ # With just two explicit auth context and
+ # *none* uses AUTH_LEVEL_CONNECT context.
+ #
+ # We don't get auth_context1 or auth_context2 by default
+ #
+ self.do_single_request(call_id=5, ctx=ctx1, io=get_user_name)
+ self.assertEqual(get_user_name.result[0], NT_STATUS_SUCCESS)
+ self.assertEqualsStrLower(get_user_name.out_account_name, account_name0)
+ self.assertEqualsStrLower(get_user_name.out_authority_name.value, authority_name0)
+
+ self.do_single_request(call_id=6, ctx=ctx1, io=get_user_name,
+ auth_context=auth_context1)
+ self.assertEqual(get_user_name.result[0], NT_STATUS_SUCCESS)
+ self.assertEqualsStrLower(get_user_name.out_account_name, account_name1)
+ self.assertEqualsStrLower(get_user_name.out_authority_name.value, authority_name1)
+
+ self.do_single_request(call_id=7, ctx=ctx1, io=get_user_name,
+ auth_context=auth_context2)
+ self.assertEqual(get_user_name.result[0], NT_STATUS_SUCCESS)
+ self.assertEqualsStrLower(get_user_name.out_account_name, account_name2)
+ self.assertEqualsStrLower(get_user_name.out_authority_name.value, authority_name2)
+
+ self.do_single_request(call_id=8, ctx=ctx1, io=get_user_name)
+ self.assertEqual(get_user_name.result[0], NT_STATUS_SUCCESS)
+ self.assertEqualsStrLower(get_user_name.out_account_name, account_name0)
+ self.assertEqualsStrLower(get_user_name.out_authority_name.value, authority_name0)
+
+ ack3 = self.do_generic_bind(call_id=9,
+ ctx=ctx1,
+ auth_context=auth_context3,
+ assoc_group_id = ack1.u.assoc_group_id,
+ start_with_alter=True)
+
+ #
+ # Now we have tree explicit auth contexts,
+ # but just one with AUTH_LEVEL_CONNECT
+ #
+ # If we don't specify one of them we get
+ # that one auth_level_connect context.
+ #
+ # Until an explicit usage of any auth context reset that mode.
+ #
+ self.do_single_request(call_id=10, ctx=ctx1, io=get_user_name)
+ self.assertEqual(get_user_name.result[0], NT_STATUS_SUCCESS)
+ self.assertEqualsStrLower(get_user_name.out_account_name, account_name3)
+ self.assertEqualsStrLower(get_user_name.out_authority_name.value, authority_name3)
+
+ self.do_single_request(call_id=11, ctx=ctx1, io=get_user_name)
+ self.assertEqual(get_user_name.result[0], NT_STATUS_SUCCESS)
+ self.assertEqualsStrLower(get_user_name.out_account_name, account_name3)
+ self.assertEqualsStrLower(get_user_name.out_authority_name.value, authority_name3)
+
+ self.do_single_request(call_id=12, ctx=ctx1, io=get_user_name,
+ auth_context=auth_context1)
+ self.assertEqual(get_user_name.result[0], NT_STATUS_SUCCESS)
+ self.assertEqualsStrLower(get_user_name.out_account_name, account_name1)
+ self.assertEqualsStrLower(get_user_name.out_authority_name.value, authority_name1)
+
+ self.do_single_request(call_id=13, ctx=ctx1, io=get_user_name)
+ self.assertEqual(get_user_name.result[0], NT_STATUS_SUCCESS)
+ self.assertEqualsStrLower(get_user_name.out_account_name, account_name0)
+ self.assertEqualsStrLower(get_user_name.out_authority_name.value, authority_name0)
+
+ return
+
+ def test_lsa_multi_auth_sign_connect1u(self):
+ smb_auth_creds = self.get_user_creds()
+ account_name0 = smb_auth_creds.get_username()
+ authority_name0 = smb_auth_creds.get_domain()
+ return self._test_lsa_multi_auth_sign_connect1(smb_auth_creds,
+ account_name0,
+ authority_name0)
+ def test_lsa_multi_auth_sign_connect1a(self):
+ smb_auth_creds = self.get_anon_creds()
+ account_name0 = "ANONYMOUS LOGON"
+ authority_name0 = "NT AUTHORITY"
+ return self._test_lsa_multi_auth_sign_connect1(smb_auth_creds,
+ account_name0,
+ authority_name0)
+
+ def test_spnego_multiple_auth_hdr_signing(self):
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_SPNEGO
+ auth_level1 = dcerpc.DCERPC_AUTH_LEVEL_INTEGRITY
+ auth_context_id1=1
+ auth_level2 = dcerpc.DCERPC_AUTH_LEVEL_PACKET
+ auth_context_id2=2
+
+ creds = self.get_user_creds()
+
+ abstract = samba.dcerpc.mgmt.abstract_syntax()
+ transfer = base.transfer_syntax_ndr()
+
+ tsf1_list = [transfer]
+ ctx = samba.dcerpc.dcerpc.ctx_list()
+ ctx.context_id = 1
+ ctx.num_transfer_syntaxes = len(tsf1_list)
+ ctx.abstract_syntax = abstract
+ ctx.transfer_syntaxes = tsf1_list
+
+ auth_context1 = self.get_auth_context_creds(creds=creds,
+ auth_type=auth_type,
+ auth_level=auth_level1,
+ auth_context_id=auth_context_id1,
+ hdr_signing=False)
+ auth_context2 = self.get_auth_context_creds(creds=creds,
+ auth_type=auth_type,
+ auth_level=auth_level2,
+ auth_context_id=auth_context_id2,
+ hdr_signing=False)
+
+ ack0 = self.do_generic_bind(call_id=1, ctx=ctx)
+
+ ack1 = self.do_generic_bind(call_id=2,
+ ctx=ctx,
+ auth_context=auth_context1,
+ assoc_group_id = ack0.u.assoc_group_id,
+ start_with_alter=True)
+ ack2 = self.do_generic_bind(call_id=3,
+ ctx=ctx,
+ auth_context=auth_context2,
+ assoc_group_id = ack0.u.assoc_group_id,
+ start_with_alter=True)
+
+ inq_if_ids = samba.dcerpc.mgmt.inq_if_ids()
+ self.do_single_request(call_id=4, ctx=ctx, io=inq_if_ids)
+ self.do_single_request(call_id=5, ctx=ctx, io=inq_if_ids,
+ auth_context=auth_context1)
+ self.do_single_request(call_id=6, ctx=ctx, io=inq_if_ids,
+ auth_context=auth_context2)
+
+ ack3 = self.do_generic_bind(call_id=7, ctx=ctx,
+ pfc_flags=dcerpc.DCERPC_PFC_FLAG_FIRST |
+ dcerpc.DCERPC_PFC_FLAG_LAST |
+ dcerpc.DCERPC_PFC_FLAG_SUPPORT_HEADER_SIGN,
+ assoc_group_id = ack0.u.assoc_group_id,
+ start_with_alter=True)
+
+ self.assertFalse(auth_context1['hdr_signing'])
+ auth_context1['hdr_signing'] = True
+ auth_context1["gensec"].want_feature(gensec.FEATURE_SIGN_PKT_HEADER)
+
+ self.do_single_request(call_id=8, ctx=ctx, io=inq_if_ids)
+ self.do_single_request(call_id=9, ctx=ctx, io=inq_if_ids,
+ auth_context=auth_context1)
+ self.do_single_request(call_id=10, ctx=ctx, io=inq_if_ids,
+ auth_context=auth_context2,
+ fault_status=dcerpc.DCERPC_FAULT_SEC_PKG_ERROR)
+
+ # wait for a disconnect
+ rep = self.recv_pdu()
+ self.assertIsNone(rep)
+ self.assertNotConnected()
+
+ def test_multiple_auth_limit(self):
+ creds = self.get_user_creds()
+
+ abstract = samba.dcerpc.mgmt.abstract_syntax()
+ transfer = base.transfer_syntax_ndr()
+
+ tsf1_list = [transfer]
+ ctx = samba.dcerpc.dcerpc.ctx_list()
+ ctx.context_id = 1
+ ctx.num_transfer_syntaxes = len(tsf1_list)
+ ctx.abstract_syntax = abstract
+ ctx.transfer_syntaxes = tsf1_list
+
+ ack0 = self.do_generic_bind(call_id=0, ctx=ctx)
+
+ is_server_listening = samba.dcerpc.mgmt.is_server_listening()
+
+ max_num_auth_str = samba.tests.env_get_var_value('MAX_NUM_AUTH', allow_missing=True)
+ if max_num_auth_str is not None:
+ max_num_auth = int(max_num_auth_str)
+ else:
+ max_num_auth = 2049
+
+ for i in range(1, max_num_auth+2):
+ auth_type = dcerpc.DCERPC_AUTH_TYPE_SPNEGO
+ auth_level = dcerpc.DCERPC_AUTH_LEVEL_INTEGRITY
+ auth_context_id = i
+
+ auth_context = self.get_auth_context_creds(creds=creds,
+ auth_type=auth_type,
+ auth_level=auth_level,
+ auth_context_id=auth_context_id,
+ hdr_signing=False)
+
+ alter_fault = None
+ if i > max_num_auth:
+ alter_fault = dcerpc.DCERPC_NCA_S_PROTO_ERROR
+
+ ack = self.do_generic_bind(call_id=auth_context_id,
+ ctx=ctx,
+ auth_context=auth_context,
+ assoc_group_id = ack0.u.assoc_group_id,
+ alter_fault=alter_fault,
+ start_with_alter=True,
+ )
+ if alter_fault is not None:
+ break
+
+
+ self.do_single_request(call_id=auth_context_id,
+ ctx=ctx, io=is_server_listening,
+ auth_context=auth_context)
+
+ # wait for a disconnect
+ rep = self.recv_pdu()
+ self.assertIsNone(rep)
+ self.assertNotConnected()
+ return
+
+
+if __name__ == "__main__":
+ global_ndr_print = True
+ global_hexdump = True
+ import unittest
+ unittest.main()
diff --git a/python/samba/tests/dcerpc/raw_testcase.py b/python/samba/tests/dcerpc/raw_testcase.py
new file mode 100644
index 0000000..743fa0f
--- /dev/null
+++ b/python/samba/tests/dcerpc/raw_testcase.py
@@ -0,0 +1,1177 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007-2010
+# Copyright (C) Stefan Metzmacher 2014,2015
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+import socket
+import samba.dcerpc.dcerpc as dcerpc
+import samba.dcerpc.base
+import samba.dcerpc.epmapper
+import samba.dcerpc.security as security
+import samba.tests
+from samba import gensec
+from samba.credentials import Credentials
+from samba.tests import TestCase
+from samba.ndr import ndr_pack, ndr_unpack, ndr_unpack_out
+from samba.ntstatus import (
+ NT_STATUS_CONNECTION_DISCONNECTED,
+ NT_STATUS_PIPE_DISCONNECTED,
+ NT_STATUS_IO_TIMEOUT
+)
+from samba import NTSTATUSError
+from samba.samba3 import param as s3param
+from samba.samba3 import libsmb_samba_internal as libsmb
+from samba.credentials import SMB_SIGNING_REQUIRED
+
+class smb_pipe_socket(object):
+
+ def __init__(self, target_hostname, pipename, creds, impersonation_level, lp):
+ lp3 = s3param.get_context()
+ lp3.load(lp.configfile)
+ saved_signing_state = creds.get_smb_ipc_signing()
+ creds.set_smb_ipc_signing(SMB_SIGNING_REQUIRED)
+ self.smbconn = libsmb.Conn(target_hostname, 'IPC$', lp3,
+ creds=creds, ipc=True)
+ creds.set_smb_ipc_signing(saved_signing_state)
+ self.smbfid = self.smbconn.create(pipename,
+ DesiredAccess=0x12019f,
+ ShareAccess=0x7,
+ CreateDisposition=1,
+ CreateOptions=0x400040,
+ ImpersonationLevel=impersonation_level)
+ return
+
+ def close(self):
+ self.smbconn.close(self.smbfid)
+ del self.smbconn
+
+ def settimeout(self, timeo):
+ # The socket module we simulate there
+ # specifies the timeo as seconds as float.
+ msecs = int(timeo * 1000)
+ assert msecs >= 0
+ self.smbconn.settimeout(msecs)
+ return
+
+ def send(self, buf, flags=0):
+ return self.smbconn.write(self.smbfid, buffer=buf, offset=0, mode=8)
+
+ def recv(self, len, flags=0):
+ try:
+ return self.smbconn.read(self.smbfid, offset=0, size=len)
+ except NTSTATUSError as e:
+ if e.args[0] == NT_STATUS_CONNECTION_DISCONNECTED:
+ return b'\0' * 0
+ if e.args[0] == NT_STATUS_PIPE_DISCONNECTED:
+ return b'\0' * 0
+ if e.args[0] == NT_STATUS_IO_TIMEOUT:
+ raise socket.timeout(str(e))
+ raise e
+
+class RawDCERPCTest(TestCase):
+ """A raw DCE/RPC Test case."""
+
+ def _disconnect(self, reason):
+ if self.s is None:
+ return
+ self.s.close()
+ self.s = None
+ if self.do_hexdump:
+ sys.stderr.write("disconnect[%s]\n" % reason)
+
+ def _connect_tcp(self):
+ tcp_port = int(self.primary_address)
+ try:
+ self.a = socket.getaddrinfo(self.host, tcp_port, socket.AF_UNSPEC,
+ socket.SOCK_STREAM, socket.SOL_TCP,
+ 0)
+ self.s = socket.socket(self.a[0][0], self.a[0][1], self.a[0][2])
+ self.s.settimeout(10)
+ self.s.connect(self.a[0][4])
+ except socket.error as e:
+ self.s.close()
+ raise
+ except IOError as e:
+ self.s.close()
+ raise
+ except Exception as e:
+ raise
+ finally:
+ pass
+ self.max_xmit_frag = 5840
+ self.max_recv_frag = 5840
+ if self.secondary_address is None:
+ self.secondary_address = self.primary_address
+ # compat for older tests
+ self.tcp_port = tcp_port
+
+ def _connect_smb(self):
+ a = self.primary_address.split('\\')
+ self.assertEqual(len(a), 3)
+ self.assertEqual(a[0], "")
+ self.assertEqual(a[1], "pipe")
+ pipename = a[2]
+ self.s = smb_pipe_socket(self.target_hostname,
+ pipename,
+ self.transport_creds,
+ self.transport_impersonation,
+ self.lp_ctx)
+ self.max_xmit_frag = 4280
+ self.max_recv_frag = 4280
+ if self.secondary_address is None:
+ self.secondary_address = self.primary_address
+
+ def connect(self):
+ self.assertNotConnected()
+ if self.primary_address.startswith("\\pipe\\"):
+ self._connect_smb()
+ else:
+ self._connect_tcp()
+ if self.secondary_address is None:
+ self.secondary_address = self.primary_address
+ return
+
+ def setUp(self):
+ super().setUp()
+ self.do_ndr_print = False
+ self.do_hexdump = False
+
+ self.ignore_random_pad = samba.tests.env_get_var_value('IGNORE_RANDOM_PAD',
+ allow_missing=True)
+ self.host = samba.tests.env_get_var_value('SERVER')
+ self.target_hostname = samba.tests.env_get_var_value('TARGET_HOSTNAME', allow_missing=True)
+ if self.target_hostname is None:
+ self.target_hostname = self.host
+ self.primary_address = "135"
+ self.secondary_address = None
+ self.transport_creds = self.get_anon_creds()
+ self.transport_impersonation = 0x2
+
+ self.settings = {}
+ self.settings["lp_ctx"] = self.lp_ctx = samba.tests.env_loadparm()
+ self.settings["target_hostname"] = self.target_hostname
+
+ self.s = None
+ self.connect()
+
+ def tearDown(self):
+ self._disconnect("tearDown")
+ super().tearDown()
+
+ def noop(self):
+ return
+
+ def reconnect_smb_pipe(self, primary_address, secondary_address=None,
+ transport_creds=None, transport_impersonation=None):
+ self._disconnect("reconnect_smb_pipe")
+ self.assertIsNotNone(primary_address)
+ self.primary_address = primary_address
+ if secondary_address is not None:
+ self.secondary_address = secondary_address
+ else:
+ self.secondary_address = None
+
+ if transport_creds is not None:
+ self.transport_creds = transport_creds
+
+ if transport_impersonation is not None:
+ self.transport_impersonation = transport_impersonation
+
+ self.connect()
+ return
+
+ def second_connection(self, primary_address=None, secondary_address=None,
+ transport_creds=None, transport_impersonation=None):
+ c = RawDCERPCTest(methodName='noop')
+ c.do_ndr_print = self.do_ndr_print
+ c.do_hexdump = self.do_hexdump
+ c.ignore_random_pad = self.ignore_random_pad
+
+ c.host = self.host
+ c.target_hostname = self.target_hostname
+ if primary_address is not None:
+ c.primary_address = primary_address
+ if secondary_address is not None:
+ c.secondary_address = secondary_address
+ else:
+ c.secondary_address = None
+ else:
+ self.assertIsNone(secondary_address)
+ c.primary_address = self.primary_address
+ c.secondary_address = self.secondary_address
+
+ if transport_creds is not None:
+ c.transport_creds = transport_creds
+ else:
+ c.transport_creds = self.transport_creds
+
+ if transport_impersonation is not None:
+ c.transport_impersonation = transport_impersonation
+ else:
+ c.transport_impersonation = self.transport_impersonation
+
+ c.lp_ctx = self.lp_ctx
+ c.settings = self.settings
+
+ c.s = None
+ c.connect()
+ return c
+
+ def get_user_creds(self):
+ c = Credentials()
+ c.guess()
+ domain = samba.tests.env_get_var_value('DOMAIN')
+ realm = samba.tests.env_get_var_value('REALM')
+ username = samba.tests.env_get_var_value('USERNAME')
+ password = samba.tests.env_get_var_value('PASSWORD')
+ c.set_domain(domain)
+ c.set_realm(realm)
+ c.set_username(username)
+ c.set_password(password)
+ return c
+
+ def get_anon_creds(self):
+ c = Credentials()
+ c.set_anonymous()
+ return c
+
+ def get_auth_context_creds(self, creds, auth_type, auth_level,
+ auth_context_id,
+ g_auth_level=None,
+ hdr_signing=False):
+
+ if g_auth_level is None:
+ g_auth_level = auth_level
+
+ g = gensec.Security.start_client(self.settings)
+ g.set_credentials(creds)
+ g.want_feature(gensec.FEATURE_DCE_STYLE)
+ g.start_mech_by_authtype(auth_type, g_auth_level)
+
+ if auth_type == dcerpc.DCERPC_AUTH_TYPE_KRB5:
+ expect_3legs = True
+ elif auth_type == dcerpc.DCERPC_AUTH_TYPE_NTLMSSP:
+ expect_3legs = True
+ else:
+ expect_3legs = False
+
+ auth_context = {}
+ auth_context["auth_type"] = auth_type
+ auth_context["auth_level"] = auth_level
+ auth_context["auth_context_id"] = auth_context_id
+ auth_context["g_auth_level"] = g_auth_level
+ auth_context["gensec"] = g
+ auth_context["hdr_signing"] = hdr_signing
+ auth_context["expect_3legs"] = expect_3legs
+
+ return auth_context
+
+ def do_generic_bind(self, ctx, auth_context=None,
+ pfc_flags=samba.dcerpc.dcerpc.DCERPC_PFC_FLAG_FIRST |
+ samba.dcerpc.dcerpc.DCERPC_PFC_FLAG_LAST,
+ assoc_group_id=0, call_id=0,
+ nak_reason=None, alter_fault=None,
+ start_with_alter=False,
+ pfc_flags_2nd=samba.dcerpc.dcerpc.DCERPC_PFC_FLAG_FIRST |
+ samba.dcerpc.dcerpc.DCERPC_PFC_FLAG_LAST,
+ use_auth3=False):
+ ctx_list = [ctx]
+
+ if auth_context is not None:
+ if auth_context['hdr_signing']:
+ pfc_flags |= dcerpc.DCERPC_PFC_FLAG_SUPPORT_HEADER_SIGN
+
+ expect_3legs = auth_context["expect_3legs"]
+
+ from_server = b""
+ (finished, to_server) = auth_context["gensec"].update(from_server)
+ self.assertFalse(finished)
+
+ auth_info = self.generate_auth(auth_type=auth_context["auth_type"],
+ auth_level=auth_context["auth_level"],
+ auth_context_id=auth_context["auth_context_id"],
+ auth_blob=to_server)
+ else:
+ auth_info = b""
+
+ if start_with_alter:
+ req = self.generate_alter(call_id=call_id,
+ pfc_flags=pfc_flags,
+ ctx_list=ctx_list,
+ assoc_group_id=0xffffffff - assoc_group_id,
+ auth_info=auth_info)
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ if alter_fault is not None:
+ self.verify_pdu(rep, samba.dcerpc.dcerpc.DCERPC_PKT_FAULT, req.call_id,
+ pfc_flags=req.pfc_flags |
+ samba.dcerpc.dcerpc.DCERPC_PFC_FLAG_DID_NOT_EXECUTE,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, 0)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertEqual(rep.u.flags, 0)
+ self.assertEqual(rep.u.status, alter_fault)
+ self.assertEqual(rep.u.reserved, 0)
+ self.assertEqual(len(rep.u.error_and_verifier), 0)
+ return None
+ self.verify_pdu(rep, samba.dcerpc.dcerpc.DCERPC_PKT_ALTER_RESP, req.call_id,
+ pfc_flags=req.pfc_flags)
+ self.assertEqual(rep.u.max_xmit_frag, req.u.max_xmit_frag)
+ self.assertEqual(rep.u.max_recv_frag, req.u.max_recv_frag)
+ self.assertEqual(rep.u.assoc_group_id, assoc_group_id)
+ self.assertEqual(rep.u.secondary_address_size, 0)
+ self.assertEqual(rep.u.secondary_address, '')
+ self.assertPadding(rep.u._pad1, 2)
+ else:
+ req = self.generate_bind(call_id=call_id,
+ pfc_flags=pfc_flags,
+ ctx_list=ctx_list,
+ assoc_group_id=assoc_group_id,
+ auth_info=auth_info)
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ if nak_reason is not None:
+ self.verify_pdu(rep, samba.dcerpc.dcerpc.DCERPC_PKT_BIND_NAK, req.call_id,
+ auth_length=0)
+ self.assertEqual(rep.u.reject_reason, nak_reason)
+ self.assertEqual(rep.u.num_versions, 1)
+ self.assertEqual(rep.u.versions[0].rpc_vers, req.rpc_vers)
+ self.assertEqual(rep.u.versions[0].rpc_vers_minor, req.rpc_vers_minor)
+ self.assertPadding(rep.u._pad, 3)
+ return
+ self.verify_pdu(rep, samba.dcerpc.dcerpc.DCERPC_PKT_BIND_ACK, req.call_id,
+ pfc_flags=pfc_flags)
+ self.assertEqual(rep.u.max_xmit_frag, req.u.max_xmit_frag)
+ self.assertEqual(rep.u.max_recv_frag, req.u.max_recv_frag)
+ if assoc_group_id != 0:
+ self.assertEqual(rep.u.assoc_group_id, assoc_group_id)
+ else:
+ self.assertNotEqual(rep.u.assoc_group_id, 0)
+ assoc_group_id = rep.u.assoc_group_id
+ sda_str = self.secondary_address
+ sda_len = len(sda_str) + 1
+ mod_len = (2 + sda_len) % 4
+ if mod_len != 0:
+ sda_pad = 4 - mod_len
+ else:
+ sda_pad = 0
+ self.assertEqual(rep.u.secondary_address_size, sda_len)
+ self.assertEqual(rep.u.secondary_address, sda_str)
+ self.assertPadding(rep.u._pad1, sda_pad)
+
+ self.assertEqual(rep.u.num_results, 1)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ samba.dcerpc.dcerpc.DCERPC_BIND_ACK_RESULT_ACCEPTANCE)
+ self.assertEqual(rep.u.ctx_list[0].reason,
+ samba.dcerpc.dcerpc.DCERPC_BIND_ACK_REASON_NOT_SPECIFIED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, ctx.transfer_syntaxes[0])
+ ack = rep
+ if auth_context is None:
+ self.assertEqual(rep.auth_length, 0)
+ self.assertEqual(len(rep.u.auth_info), 0)
+ return ack
+ self.assertNotEqual(rep.auth_length, 0)
+ self.assertGreater(len(rep.u.auth_info), samba.dcerpc.dcerpc.DCERPC_AUTH_TRAILER_LENGTH)
+ self.assertEqual(rep.auth_length, len(rep.u.auth_info) - samba.dcerpc.dcerpc.DCERPC_AUTH_TRAILER_LENGTH)
+
+ a = self.parse_auth(rep.u.auth_info, auth_context=auth_context)
+
+ from_server = a.credentials
+ (finished, to_server) = auth_context["gensec"].update(from_server)
+ if expect_3legs:
+ self.assertTrue(finished)
+ if auth_context['hdr_signing']:
+ auth_context["gensec"].want_feature(gensec.FEATURE_SIGN_PKT_HEADER)
+ else:
+ self.assertFalse(use_auth3)
+ self.assertFalse(finished)
+
+ auth_info = self.generate_auth(auth_type=auth_context["auth_type"],
+ auth_level=auth_context["auth_level"],
+ auth_context_id=auth_context["auth_context_id"],
+ auth_blob=to_server)
+ if use_auth3:
+ req = self.generate_auth3(call_id=call_id,
+ pfc_flags=pfc_flags_2nd,
+ auth_info=auth_info)
+ self.send_pdu(req)
+ rep = self.recv_pdu(timeout=0.01)
+ self.assertIsNone(rep)
+ self.assertIsConnected()
+ return ack
+ req = self.generate_alter(call_id=call_id,
+ ctx_list=ctx_list,
+ pfc_flags=pfc_flags_2nd,
+ assoc_group_id=0xffffffff - assoc_group_id,
+ auth_info=auth_info)
+ self.send_pdu(req)
+ rep = self.recv_pdu()
+ if alter_fault is not None:
+ self.verify_pdu(rep, samba.dcerpc.dcerpc.DCERPC_PKT_FAULT, req.call_id,
+ pfc_flags=req.pfc_flags |
+ samba.dcerpc.dcerpc.DCERPC_PFC_FLAG_DID_NOT_EXECUTE,
+ auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, 0)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertEqual(rep.u.flags, 0)
+ self.assertEqual(rep.u.status, alter_fault)
+ self.assertEqual(rep.u.reserved, 0)
+ self.assertEqual(len(rep.u.error_and_verifier), 0)
+ return None
+ self.verify_pdu(rep, samba.dcerpc.dcerpc.DCERPC_PKT_ALTER_RESP, req.call_id,
+ pfc_flags=req.pfc_flags)
+ self.assertEqual(rep.u.max_xmit_frag, req.u.max_xmit_frag)
+ self.assertEqual(rep.u.max_recv_frag, req.u.max_recv_frag)
+ self.assertEqual(rep.u.assoc_group_id, assoc_group_id)
+ self.assertEqual(rep.u.secondary_address_size, 0)
+ self.assertEqual(rep.u.secondary_address, '')
+ self.assertPadding(rep.u._pad1, 2)
+ self.assertEqual(rep.u.num_results, 1)
+ self.assertEqual(rep.u.ctx_list[0].result,
+ samba.dcerpc.dcerpc.DCERPC_BIND_ACK_RESULT_ACCEPTANCE)
+ self.assertEqual(rep.u.ctx_list[0].reason,
+ samba.dcerpc.dcerpc.DCERPC_BIND_ACK_REASON_NOT_SPECIFIED)
+ self.assertNDRSyntaxEquals(rep.u.ctx_list[0].syntax, ctx.transfer_syntaxes[0])
+ if finished:
+ self.assertEqual(rep.auth_length, 0)
+ else:
+ self.assertNotEqual(rep.auth_length, 0)
+ self.assertGreaterEqual(len(rep.u.auth_info), samba.dcerpc.dcerpc.DCERPC_AUTH_TRAILER_LENGTH)
+ self.assertEqual(rep.auth_length, len(rep.u.auth_info) - samba.dcerpc.dcerpc.DCERPC_AUTH_TRAILER_LENGTH)
+
+ a = self.parse_auth(rep.u.auth_info, auth_context=auth_context)
+
+ if finished:
+ return ack
+
+ from_server = a.credentials
+ (finished, to_server) = auth_context["gensec"].update(from_server)
+ self.assertTrue(finished)
+ if auth_context['hdr_signing']:
+ auth_context["gensec"].want_feature(gensec.FEATURE_SIGN_PKT_HEADER)
+
+ return ack
+
+ def prepare_presentation(self, abstract, transfer, object=None,
+ context_id=0xffff, epmap=False, auth_context=None,
+ pfc_flags=samba.dcerpc.dcerpc.DCERPC_PFC_FLAG_FIRST |
+ samba.dcerpc.dcerpc.DCERPC_PFC_FLAG_LAST,
+ assoc_group_id=0,
+ return_ack=False):
+ if epmap:
+ self.epmap_reconnect(abstract, transfer=transfer, object=object)
+
+ tsf1_list = [transfer]
+ ctx = samba.dcerpc.dcerpc.ctx_list()
+ ctx.context_id = context_id
+ ctx.num_transfer_syntaxes = len(tsf1_list)
+ ctx.abstract_syntax = abstract
+ ctx.transfer_syntaxes = tsf1_list
+
+ ack = self.do_generic_bind(ctx=ctx,
+ auth_context=auth_context,
+ pfc_flags=pfc_flags,
+ assoc_group_id=assoc_group_id)
+ if ack is None:
+ ctx = None
+
+ if return_ack:
+ return (ctx, ack)
+ return ctx
+
+ def do_single_request(self, call_id, ctx, io,
+ auth_context=None,
+ object=None,
+ bigendian=False, ndr64=False,
+ allow_remaining=False,
+ send_req=True,
+ recv_rep=True,
+ fault_pfc_flags=(
+ samba.dcerpc.dcerpc.DCERPC_PFC_FLAG_FIRST |
+ samba.dcerpc.dcerpc.DCERPC_PFC_FLAG_LAST),
+ fault_status=None,
+ fault_context_id=None,
+ timeout=None,
+ ndr_print=None,
+ hexdump=None):
+
+ if fault_context_id is None:
+ fault_context_id = ctx.context_id
+
+ if ndr_print is None:
+ ndr_print = self.do_ndr_print
+ if hexdump is None:
+ hexdump = self.do_hexdump
+
+ if send_req:
+ if ndr_print:
+ sys.stderr.write("in: %s" % samba.ndr.ndr_print_in(io))
+ stub_in = samba.ndr.ndr_pack_in(io, bigendian=bigendian, ndr64=ndr64)
+ if hexdump:
+ sys.stderr.write("stub_in: %d\n%s" % (len(stub_in), self.hexdump(stub_in)))
+
+ pfc_flags = samba.dcerpc.dcerpc.DCERPC_PFC_FLAG_FIRST
+ pfc_flags |= samba.dcerpc.dcerpc.DCERPC_PFC_FLAG_LAST
+ if object is not None:
+ pfc_flags |= samba.dcerpc.dcerpc.DCERPC_PFC_FLAG_OBJECT_UUID
+
+ req = self.generate_request_auth(call_id=call_id,
+ context_id=ctx.context_id,
+ pfc_flags=pfc_flags,
+ object=object,
+ opnum=io.opnum(),
+ stub=stub_in,
+ auth_context=auth_context)
+ self.send_pdu(req, ndr_print=ndr_print, hexdump=hexdump)
+ if recv_rep:
+ (rep, rep_blob) = self.recv_pdu_raw(timeout=timeout,
+ ndr_print=ndr_print,
+ hexdump=hexdump)
+ if fault_status:
+ self.verify_pdu(rep, samba.dcerpc.dcerpc.DCERPC_PKT_FAULT, call_id,
+ pfc_flags=fault_pfc_flags, auth_length=0)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, fault_context_id)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertEqual(rep.u.flags, 0)
+ self.assertEqual(rep.u.status, fault_status)
+ self.assertEqual(rep.u.reserved, 0)
+ self.assertEqual(len(rep.u.error_and_verifier), 0)
+ return
+
+ expected_auth_length = 0
+ if auth_context is not None and \
+ auth_context["auth_level"] >= dcerpc.DCERPC_AUTH_LEVEL_PACKET:
+ if send_req:
+ expected_auth_length = req.auth_length
+ else:
+ expected_auth_length = rep.auth_length
+
+
+ self.verify_pdu(rep, samba.dcerpc.dcerpc.DCERPC_PKT_RESPONSE, call_id,
+ auth_length=expected_auth_length)
+ self.assertNotEqual(rep.u.alloc_hint, 0)
+ self.assertEqual(rep.u.context_id, ctx.context_id & 0xff)
+ self.assertEqual(rep.u.cancel_count, 0)
+ self.assertGreaterEqual(len(rep.u.stub_and_verifier), rep.u.alloc_hint)
+ stub_out = self.check_response_auth(rep, rep_blob, auth_context)
+ self.assertEqual(len(stub_out), rep.u.alloc_hint)
+
+ if hexdump:
+ sys.stderr.write("stub_out: %d\n%s" % (len(stub_out), self.hexdump(stub_out)))
+ ndr_unpack_out(io, stub_out, bigendian=bigendian, ndr64=ndr64,
+ allow_remaining=allow_remaining)
+ if ndr_print:
+ sys.stderr.write("out: %s" % samba.ndr.ndr_print_out(io))
+
+ def epmap_reconnect(self, abstract, transfer=None, object=None):
+ ndr32 = samba.dcerpc.base.transfer_syntax_ndr()
+
+ if transfer is None:
+ transfer = ndr32
+
+ if object is None:
+ object = samba.dcerpc.misc.GUID()
+
+ ctx = self.prepare_presentation(samba.dcerpc.epmapper.abstract_syntax(),
+ transfer, context_id=0)
+
+ data1 = ndr_pack(abstract)
+ lhs1 = samba.dcerpc.epmapper.epm_lhs()
+ lhs1.protocol = samba.dcerpc.epmapper.EPM_PROTOCOL_UUID
+ lhs1.lhs_data = data1[:18]
+ rhs1 = samba.dcerpc.epmapper.epm_rhs_uuid()
+ rhs1.unknown = data1[18:]
+ floor1 = samba.dcerpc.epmapper.epm_floor()
+ floor1.lhs = lhs1
+ floor1.rhs = rhs1
+ data2 = ndr_pack(transfer)
+ lhs2 = samba.dcerpc.epmapper.epm_lhs()
+ lhs2.protocol = samba.dcerpc.epmapper.EPM_PROTOCOL_UUID
+ lhs2.lhs_data = data2[:18]
+ rhs2 = samba.dcerpc.epmapper.epm_rhs_uuid()
+ rhs2.unknown = data1[18:]
+ floor2 = samba.dcerpc.epmapper.epm_floor()
+ floor2.lhs = lhs2
+ floor2.rhs = rhs2
+ lhs3 = samba.dcerpc.epmapper.epm_lhs()
+ lhs3.protocol = samba.dcerpc.epmapper.EPM_PROTOCOL_NCACN
+ lhs3.lhs_data = b""
+ floor3 = samba.dcerpc.epmapper.epm_floor()
+ floor3.lhs = lhs3
+ floor3.rhs.minor_version = 0
+ lhs4 = samba.dcerpc.epmapper.epm_lhs()
+ lhs4.protocol = samba.dcerpc.epmapper.EPM_PROTOCOL_TCP
+ lhs4.lhs_data = b""
+ floor4 = samba.dcerpc.epmapper.epm_floor()
+ floor4.lhs = lhs4
+ floor4.rhs.port = int(self.primary_address)
+ lhs5 = samba.dcerpc.epmapper.epm_lhs()
+ lhs5.protocol = samba.dcerpc.epmapper.EPM_PROTOCOL_IP
+ lhs5.lhs_data = b""
+ floor5 = samba.dcerpc.epmapper.epm_floor()
+ floor5.lhs = lhs5
+ floor5.rhs.ipaddr = "0.0.0.0"
+
+ floors = [floor1, floor2, floor3, floor4, floor5]
+ req_tower = samba.dcerpc.epmapper.epm_tower()
+ req_tower.num_floors = len(floors)
+ req_tower.floors = floors
+ req_twr = samba.dcerpc.epmapper.epm_twr_t()
+ req_twr.tower = req_tower
+
+ epm_map = samba.dcerpc.epmapper.epm_Map()
+ epm_map.in_object = object
+ epm_map.in_map_tower = req_twr
+ epm_map.in_entry_handle = samba.dcerpc.misc.policy_handle()
+ epm_map.in_max_towers = 4
+
+ self.do_single_request(call_id=2, ctx=ctx, io=epm_map)
+
+ self.assertGreaterEqual(epm_map.out_num_towers, 1)
+ rep_twr = epm_map.out_towers[0].twr
+ self.assertIsNotNone(rep_twr)
+ self.assertEqual(rep_twr.tower_length, 75)
+ self.assertEqual(rep_twr.tower.num_floors, 5)
+ self.assertEqual(len(rep_twr.tower.floors), 5)
+ self.assertEqual(rep_twr.tower.floors[3].lhs.protocol,
+ samba.dcerpc.epmapper.EPM_PROTOCOL_TCP)
+ self.assertEqual(rep_twr.tower.floors[3].lhs.protocol,
+ samba.dcerpc.epmapper.EPM_PROTOCOL_TCP)
+
+ # reconnect to the given port
+ self._disconnect("epmap_reconnect")
+ self.primary_address = "%d" % rep_twr.tower.floors[3].rhs.port
+ self.secondary_address = None
+ self.connect()
+
+ def send_pdu(self, req, ndr_print=None, hexdump=None):
+ if ndr_print is None:
+ ndr_print = self.do_ndr_print
+ if hexdump is None:
+ hexdump = self.do_hexdump
+ try:
+ req_pdu = ndr_pack(req)
+ if ndr_print:
+ sys.stderr.write("send_pdu: %s" % samba.ndr.ndr_print(req))
+ if hexdump:
+ sys.stderr.write("send_pdu: %d\n%s" % (len(req_pdu), self.hexdump(req_pdu)))
+ while True:
+ sent = self.s.send(req_pdu, 0)
+ if sent == len(req_pdu):
+ break
+ req_pdu = req_pdu[sent:]
+ except socket.error as e:
+ self._disconnect("send_pdu: %s" % e)
+ raise
+ except IOError as e:
+ self._disconnect("send_pdu: %s" % e)
+ raise
+ except NTSTATUSError as e:
+ self._disconnect("send_pdu: %s" % e)
+ raise
+ finally:
+ pass
+
+ def recv_raw(self, hexdump=None, timeout=None):
+ rep_pdu = None
+ if hexdump is None:
+ hexdump = self.do_hexdump
+ try:
+ if timeout is not None:
+ self.s.settimeout(timeout)
+ rep_pdu = self.s.recv(0xffff, 0)
+ self.s.settimeout(10)
+ if len(rep_pdu) == 0:
+ self._disconnect("recv_raw: EOF")
+ return None
+ if hexdump:
+ sys.stderr.write("recv_raw: %d\n%s" % (len(rep_pdu), self.hexdump(rep_pdu)))
+ except socket.timeout as e:
+ self.s.settimeout(10)
+ sys.stderr.write("recv_raw: TIMEOUT\n")
+ pass
+ except socket.error as e:
+ self._disconnect("recv_raw: %s" % e)
+ raise
+ except IOError as e:
+ self._disconnect("recv_raw: %s" % e)
+ raise
+ finally:
+ pass
+ return rep_pdu
+
+ def recv_pdu_raw(self, ndr_print=None, hexdump=None, timeout=None):
+ rep_pdu = None
+ rep = None
+ if ndr_print is None:
+ ndr_print = self.do_ndr_print
+ if hexdump is None:
+ hexdump = self.do_hexdump
+ try:
+ rep_pdu = self.recv_raw(hexdump=hexdump, timeout=timeout)
+ if rep_pdu is None:
+ return (None, None)
+ rep = ndr_unpack(samba.dcerpc.dcerpc.ncacn_packet, rep_pdu, allow_remaining=True)
+ if ndr_print:
+ sys.stderr.write("recv_pdu: %s" % samba.ndr.ndr_print(rep))
+ self.assertEqual(rep.frag_length, len(rep_pdu))
+ finally:
+ pass
+ return (rep, rep_pdu)
+
+ def recv_pdu(self, ndr_print=None, hexdump=None, timeout=None):
+ (rep, rep_pdu) = self.recv_pdu_raw(ndr_print=ndr_print,
+ hexdump=hexdump,
+ timeout=timeout)
+ return rep
+
+ def generate_auth(self,
+ auth_type=None,
+ auth_level=None,
+ auth_pad_length=0,
+ auth_context_id=None,
+ auth_blob=None,
+ ndr_print=None, hexdump=None):
+ if ndr_print is None:
+ ndr_print = self.do_ndr_print
+ if hexdump is None:
+ hexdump = self.do_hexdump
+
+ if auth_type is not None:
+ a = samba.dcerpc.dcerpc.auth()
+ a.auth_type = auth_type
+ a.auth_level = auth_level
+ a.auth_pad_length = auth_pad_length
+ a.auth_context_id = auth_context_id
+ a.credentials = auth_blob
+
+ ai = ndr_pack(a)
+ if ndr_print:
+ sys.stderr.write("generate_auth: %s" % samba.ndr.ndr_print(a))
+ if hexdump:
+ sys.stderr.write("generate_auth: %d\n%s" % (len(ai), self.hexdump(ai)))
+ else:
+ ai = b""
+
+ return ai
+
+ def parse_auth(self, auth_info, ndr_print=None, hexdump=None,
+ auth_context=None, stub_len=0):
+ if ndr_print is None:
+ ndr_print = self.do_ndr_print
+ if hexdump is None:
+ hexdump = self.do_hexdump
+
+ if (len(auth_info) <= samba.dcerpc.dcerpc.DCERPC_AUTH_TRAILER_LENGTH):
+ return None
+
+ if hexdump:
+ sys.stderr.write("parse_auth: %d\n%s" % (len(auth_info), self.hexdump(auth_info)))
+ a = ndr_unpack(samba.dcerpc.dcerpc.auth, auth_info, allow_remaining=True)
+ if ndr_print:
+ sys.stderr.write("parse_auth: %s" % samba.ndr.ndr_print(a))
+
+ if auth_context is not None:
+ self.assertEqual(a.auth_type, auth_context["auth_type"])
+ self.assertEqual(a.auth_level, auth_context["auth_level"])
+ self.assertEqual(a.auth_reserved, 0)
+ self.assertEqual(a.auth_context_id, auth_context["auth_context_id"])
+
+ self.assertLessEqual(a.auth_pad_length, dcerpc.DCERPC_AUTH_PAD_ALIGNMENT)
+ self.assertLessEqual(a.auth_pad_length, stub_len)
+
+ return a
+
+ def check_response_auth(self, rep, rep_blob, auth_context=None,
+ auth_pad_length=None):
+
+ if auth_context is None:
+ self.assertEqual(rep.auth_length, 0)
+ return rep.u.stub_and_verifier
+
+ if auth_context["auth_level"] == dcerpc.DCERPC_AUTH_LEVEL_CONNECT:
+ self.assertEqual(rep.auth_length, 0)
+ return rep.u.stub_and_verifier
+
+ self.assertGreater(rep.auth_length, 0)
+
+ ofs_stub = dcerpc.DCERPC_REQUEST_LENGTH
+ ofs_sig = rep.frag_length - rep.auth_length
+ ofs_trailer = ofs_sig - dcerpc.DCERPC_AUTH_TRAILER_LENGTH
+ rep_data = rep_blob[ofs_stub:ofs_trailer]
+ rep_whole = rep_blob[0:ofs_sig]
+ rep_sig = rep_blob[ofs_sig:]
+ rep_auth_info_blob = rep_blob[ofs_trailer:]
+
+ rep_auth_info = self.parse_auth(rep_auth_info_blob,
+ auth_context=auth_context,
+ stub_len=len(rep_data))
+ if auth_pad_length is not None:
+ self.assertEqual(rep_auth_info.auth_pad_length, auth_pad_length)
+ self.assertEqual(rep_auth_info.credentials, rep_sig)
+
+ if auth_context["auth_level"] >= dcerpc.DCERPC_AUTH_LEVEL_PRIVACY:
+ # TODO: not yet supported here
+ self.assertTrue(False)
+ elif auth_context["auth_level"] >= dcerpc.DCERPC_AUTH_LEVEL_PACKET:
+ auth_context["gensec"].check_packet(rep_data, rep_whole, rep_sig)
+
+ stub_out = rep_data[0:len(rep_data)-rep_auth_info.auth_pad_length]
+
+ return stub_out
+
+ def generate_pdu(self, ptype, call_id, payload,
+ rpc_vers=5,
+ rpc_vers_minor=0,
+ pfc_flags=(samba.dcerpc.dcerpc.DCERPC_PFC_FLAG_FIRST |
+ samba.dcerpc.dcerpc.DCERPC_PFC_FLAG_LAST),
+ drep=None,
+ ndr_print=None, hexdump=None):
+
+ if drep is None:
+ drep = [samba.dcerpc.dcerpc.DCERPC_DREP_LE, 0, 0, 0]
+ if getattr(payload, 'auth_info', None):
+ ai = payload.auth_info
+ else:
+ ai = b""
+
+ p = samba.dcerpc.dcerpc.ncacn_packet()
+ p.rpc_vers = rpc_vers
+ p.rpc_vers_minor = rpc_vers_minor
+ p.ptype = ptype
+ p.pfc_flags = pfc_flags
+ p.drep = drep
+ p.frag_length = 0
+ if len(ai) > samba.dcerpc.dcerpc.DCERPC_AUTH_TRAILER_LENGTH:
+ p.auth_length = len(ai) - samba.dcerpc.dcerpc.DCERPC_AUTH_TRAILER_LENGTH
+ else:
+ p.auth_length = 0
+ p.call_id = call_id
+ p.u = payload
+
+ pdu = ndr_pack(p)
+ p.frag_length = len(pdu)
+
+ return p
+
+ def generate_request_auth(self, call_id,
+ pfc_flags=(dcerpc.DCERPC_PFC_FLAG_FIRST |
+ dcerpc.DCERPC_PFC_FLAG_LAST),
+ alloc_hint=None,
+ context_id=None,
+ opnum=None,
+ object=None,
+ stub=None,
+ auth_context=None,
+ ndr_print=None, hexdump=None):
+
+ if stub is None:
+ stub = b""
+
+ sig_size = 0
+ if auth_context is not None:
+ mod_len = len(stub) % dcerpc.DCERPC_AUTH_PAD_ALIGNMENT
+ auth_pad_length = 0
+ if mod_len > 0:
+ auth_pad_length = dcerpc.DCERPC_AUTH_PAD_ALIGNMENT - mod_len
+ stub += b'\x00' * auth_pad_length
+
+ if auth_context["g_auth_level"] >= samba.dcerpc.dcerpc.DCERPC_AUTH_LEVEL_PACKET:
+ sig_size = auth_context["gensec"].sig_size(len(stub))
+ else:
+ sig_size = 16
+
+ zero_sig = b"\x00" * sig_size
+ auth_info = self.generate_auth(auth_type=auth_context["auth_type"],
+ auth_level=auth_context["auth_level"],
+ auth_pad_length=auth_pad_length,
+ auth_context_id=auth_context["auth_context_id"],
+ auth_blob=zero_sig)
+ else:
+ auth_info = b""
+
+ req = self.generate_request(call_id=call_id,
+ pfc_flags=pfc_flags,
+ alloc_hint=alloc_hint,
+ context_id=context_id,
+ opnum=opnum,
+ object=object,
+ stub=stub,
+ auth_info=auth_info,
+ ndr_print=ndr_print,
+ hexdump=hexdump)
+ if auth_context is None:
+ return req
+
+ req_blob = samba.ndr.ndr_pack(req)
+ ofs_stub = dcerpc.DCERPC_REQUEST_LENGTH
+ ofs_sig = len(req_blob) - req.auth_length
+ ofs_trailer = ofs_sig - dcerpc.DCERPC_AUTH_TRAILER_LENGTH
+ req_data = req_blob[ofs_stub:ofs_trailer]
+ req_whole = req_blob[0:ofs_sig]
+
+ if auth_context["g_auth_level"] >= dcerpc.DCERPC_AUTH_LEVEL_PRIVACY:
+ # TODO: not yet supported here
+ self.assertTrue(False)
+ elif auth_context["g_auth_level"] >= dcerpc.DCERPC_AUTH_LEVEL_PACKET:
+ req_sig = auth_context["gensec"].sign_packet(req_data, req_whole)
+ elif auth_context["g_auth_level"] >= dcerpc.DCERPC_AUTH_LEVEL_CONNECT:
+ self.assertEqual(auth_context["auth_type"],
+ dcerpc.DCERPC_AUTH_TYPE_NTLMSSP)
+ req_sig = b"\x01" +b"\x00" *15
+ else:
+ return req
+ self.assertEqual(len(req_sig), req.auth_length)
+ self.assertEqual(len(req_sig), sig_size)
+
+ stub_sig_ofs = len(req.u.stub_and_verifier) - sig_size
+ stub = req.u.stub_and_verifier[0:stub_sig_ofs] + req_sig
+ req.u.stub_and_verifier = stub
+
+ return req
+
+ def verify_pdu(self, p, ptype, call_id,
+ rpc_vers=5,
+ rpc_vers_minor=0,
+ pfc_flags=(samba.dcerpc.dcerpc.DCERPC_PFC_FLAG_FIRST |
+ samba.dcerpc.dcerpc.DCERPC_PFC_FLAG_LAST),
+ drep=None,
+ auth_length=None):
+
+ if drep is None:
+ drep = [samba.dcerpc.dcerpc.DCERPC_DREP_LE, 0, 0, 0]
+ self.assertIsNotNone(p, "No valid pdu")
+
+ if getattr(p.u, 'auth_info', None):
+ ai = p.u.auth_info
+ else:
+ ai = b""
+
+ self.assertEqual(p.rpc_vers, rpc_vers)
+ self.assertEqual(p.rpc_vers_minor, rpc_vers_minor)
+ self.assertEqual(p.ptype, ptype)
+ self.assertEqual(p.pfc_flags, pfc_flags)
+ self.assertEqual(p.drep, drep)
+ self.assertGreaterEqual(p.frag_length,
+ samba.dcerpc.dcerpc.DCERPC_NCACN_PAYLOAD_OFFSET)
+ if len(ai) > samba.dcerpc.dcerpc.DCERPC_AUTH_TRAILER_LENGTH:
+ self.assertEqual(p.auth_length,
+ len(ai) - samba.dcerpc.dcerpc.DCERPC_AUTH_TRAILER_LENGTH)
+ elif auth_length is not None:
+ self.assertEqual(p.auth_length, auth_length)
+ else:
+ self.assertEqual(p.auth_length, 0)
+ self.assertEqual(p.call_id, call_id)
+
+ return
+
+ def generate_bind(self, call_id,
+ pfc_flags=(samba.dcerpc.dcerpc.DCERPC_PFC_FLAG_FIRST |
+ samba.dcerpc.dcerpc.DCERPC_PFC_FLAG_LAST),
+ max_xmit_frag=None,
+ max_recv_frag=None,
+ assoc_group_id=0,
+ ctx_list=None,
+ auth_info=b"",
+ ndr_print=None, hexdump=None):
+
+ if ctx_list is None:
+ ctx_list = []
+ if max_xmit_frag is None:
+ max_xmit_frag=self.max_xmit_frag
+ if max_recv_frag is None:
+ max_recv_frag=self.max_recv_frag
+
+ b = samba.dcerpc.dcerpc.bind()
+ b.max_xmit_frag = max_xmit_frag
+ b.max_recv_frag = max_recv_frag
+ b.assoc_group_id = assoc_group_id
+ b.num_contexts = len(ctx_list)
+ b.ctx_list = ctx_list
+ b.auth_info = auth_info
+
+ p = self.generate_pdu(ptype=samba.dcerpc.dcerpc.DCERPC_PKT_BIND,
+ pfc_flags=pfc_flags,
+ call_id=call_id,
+ payload=b,
+ ndr_print=ndr_print, hexdump=hexdump)
+
+ return p
+
+ def generate_alter(self, call_id,
+ pfc_flags=(samba.dcerpc.dcerpc.DCERPC_PFC_FLAG_FIRST |
+ samba.dcerpc.dcerpc.DCERPC_PFC_FLAG_LAST),
+ max_xmit_frag=None,
+ max_recv_frag=None,
+ assoc_group_id=0,
+ ctx_list=None,
+ auth_info=b"",
+ ndr_print=None, hexdump=None):
+
+ if ctx_list is None:
+ ctx_list = []
+ if max_xmit_frag is None:
+ max_xmit_frag=self.max_xmit_frag
+ if max_recv_frag is None:
+ max_recv_frag=self.max_recv_frag
+
+ a = samba.dcerpc.dcerpc.bind()
+ a.max_xmit_frag = max_xmit_frag
+ a.max_recv_frag = max_recv_frag
+ a.assoc_group_id = assoc_group_id
+ a.num_contexts = len(ctx_list)
+ a.ctx_list = ctx_list
+ a.auth_info = auth_info
+
+ p = self.generate_pdu(ptype=samba.dcerpc.dcerpc.DCERPC_PKT_ALTER,
+ pfc_flags=pfc_flags,
+ call_id=call_id,
+ payload=a,
+ ndr_print=ndr_print, hexdump=hexdump)
+
+ return p
+
+ def generate_auth3(self, call_id,
+ pfc_flags=(samba.dcerpc.dcerpc.DCERPC_PFC_FLAG_FIRST |
+ samba.dcerpc.dcerpc.DCERPC_PFC_FLAG_LAST),
+ auth_info=b"",
+ ndr_print=None, hexdump=None):
+
+ a = samba.dcerpc.dcerpc.auth3()
+ a.auth_info = auth_info
+
+ p = self.generate_pdu(ptype=samba.dcerpc.dcerpc.DCERPC_PKT_AUTH3,
+ pfc_flags=pfc_flags,
+ call_id=call_id,
+ payload=a,
+ ndr_print=ndr_print, hexdump=hexdump)
+
+ return p
+
+ def generate_request(self, call_id,
+ pfc_flags=(samba.dcerpc.dcerpc.DCERPC_PFC_FLAG_FIRST |
+ samba.dcerpc.dcerpc.DCERPC_PFC_FLAG_LAST),
+ alloc_hint=None,
+ context_id=None,
+ opnum=None,
+ object=None,
+ stub=None,
+ auth_info=b"",
+ ndr_print=None, hexdump=None):
+
+ if alloc_hint is None:
+ alloc_hint = len(stub)
+
+ r = samba.dcerpc.dcerpc.request()
+ r.alloc_hint = alloc_hint
+ r.context_id = context_id
+ r.opnum = opnum
+ if object is not None:
+ r.object = object
+ r.stub_and_verifier = stub + auth_info
+
+ p = self.generate_pdu(ptype=samba.dcerpc.dcerpc.DCERPC_PKT_REQUEST,
+ pfc_flags=pfc_flags,
+ call_id=call_id,
+ payload=r,
+ ndr_print=ndr_print, hexdump=hexdump)
+
+ if len(auth_info) > samba.dcerpc.dcerpc.DCERPC_AUTH_TRAILER_LENGTH:
+ p.auth_length = len(auth_info) - samba.dcerpc.dcerpc.DCERPC_AUTH_TRAILER_LENGTH
+
+ return p
+
+ def generate_co_cancel(self, call_id,
+ pfc_flags=(samba.dcerpc.dcerpc.DCERPC_PFC_FLAG_FIRST |
+ samba.dcerpc.dcerpc.DCERPC_PFC_FLAG_LAST),
+ auth_info=b"",
+ ndr_print=None, hexdump=None):
+
+ c = samba.dcerpc.dcerpc.co_cancel()
+ c.auth_info = auth_info
+
+ p = self.generate_pdu(ptype=samba.dcerpc.dcerpc.DCERPC_PKT_CO_CANCEL,
+ pfc_flags=pfc_flags,
+ call_id=call_id,
+ payload=c,
+ ndr_print=ndr_print, hexdump=hexdump)
+
+ return p
+
+ def generate_orphaned(self, call_id,
+ pfc_flags=(samba.dcerpc.dcerpc.DCERPC_PFC_FLAG_FIRST |
+ samba.dcerpc.dcerpc.DCERPC_PFC_FLAG_LAST),
+ auth_info=b"",
+ ndr_print=None, hexdump=None):
+
+ o = samba.dcerpc.dcerpc.orphaned()
+ o.auth_info = auth_info
+
+ p = self.generate_pdu(ptype=samba.dcerpc.dcerpc.DCERPC_PKT_ORPHANED,
+ pfc_flags=pfc_flags,
+ call_id=call_id,
+ payload=o,
+ ndr_print=ndr_print, hexdump=hexdump)
+
+ return p
+
+ def generate_shutdown(self, call_id,
+ pfc_flags=(samba.dcerpc.dcerpc.DCERPC_PFC_FLAG_FIRST |
+ samba.dcerpc.dcerpc.DCERPC_PFC_FLAG_LAST),
+ ndr_print=None, hexdump=None):
+
+ s = samba.dcerpc.dcerpc.shutdown()
+
+ p = self.generate_pdu(ptype=samba.dcerpc.dcerpc.DCERPC_PKT_SHUTDOWN,
+ pfc_flags=pfc_flags,
+ call_id=call_id,
+ payload=s,
+ ndr_print=ndr_print, hexdump=hexdump)
+
+ return p
+
+ def assertIsConnected(self):
+ self.assertIsNotNone(self.s, msg="Not connected")
+ return
+
+ def assertNotConnected(self):
+ self.assertIsNone(self.s, msg="Is connected")
+ return
+
+ def assertNDRSyntaxEquals(self, s1, s2):
+ self.assertEqual(s1.uuid, s2.uuid)
+ self.assertEqual(s1.if_version, s2.if_version)
+ return
+
+ def assertPadding(self, pad, length):
+ self.assertEqual(len(pad), length)
+ #
+ # sometimes windows sends random bytes
+ #
+ # we have IGNORE_RANDOM_PAD=1 to
+ # disable the check
+ #
+ if self.ignore_random_pad:
+ return
+ zero_pad = b'\0' * length
+ self.assertEqual(pad, zero_pad)
+
+ def assertEqualsStrLower(self, s1, s2):
+ self.assertEqual(str(s1).lower(), str(s2).lower())
diff --git a/python/samba/tests/dcerpc/registry.py b/python/samba/tests/dcerpc/registry.py
new file mode 100644
index 0000000..be9e484
--- /dev/null
+++ b/python/samba/tests/dcerpc/registry.py
@@ -0,0 +1,51 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2008
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for samba.dcerpc.registry."""
+
+from samba.dcerpc import winreg
+from samba.tests import RpcInterfaceTestCase
+
+
+class WinregTests(RpcInterfaceTestCase):
+
+ def setUp(self):
+ super().setUp()
+ self.conn = winreg.winreg("ncalrpc:", self.get_loadparm(),
+ self.get_credentials())
+
+ def get_hklm(self):
+ return self.conn.OpenHKLM(None,
+ winreg.KEY_QUERY_VALUE | winreg.KEY_ENUMERATE_SUB_KEYS)
+
+ def test_hklm(self):
+ handle = self.conn.OpenHKLM(None,
+ winreg.KEY_QUERY_VALUE | winreg.KEY_ENUMERATE_SUB_KEYS)
+ self.conn.CloseKey(handle)
+
+ def test_getversion(self):
+ handle = self.get_hklm()
+ version = self.conn.GetVersion(handle)
+ self.assertEqual(int, version.__class__)
+ self.conn.CloseKey(handle)
+
+ def test_getkeyinfo(self):
+ handle = self.conn.OpenHKLM(None,
+ winreg.KEY_QUERY_VALUE | winreg.KEY_ENUMERATE_SUB_KEYS)
+ x = self.conn.QueryInfoKey(handle, winreg.String())
+ self.assertEqual(9, len(x)) # should return a 9-tuple
+ self.conn.CloseKey(handle)
diff --git a/python/samba/tests/dcerpc/rpc_talloc.py b/python/samba/tests/dcerpc/rpc_talloc.py
new file mode 100644
index 0000000..4ad86a6
--- /dev/null
+++ b/python/samba/tests/dcerpc/rpc_talloc.py
@@ -0,0 +1,86 @@
+# test generated python code from pidl
+# Copyright (C) Andrew Tridgell August 2010
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+#
+# to run this test, use one of these:
+#
+# python -m unittest samba.tests.dcerpc.rpc_talloc
+#
+# or if you have trial installed (from twisted), use
+#
+# trial samba.tests.dcerpc.rpc_talloc
+
+"""Tests for the talloc handling in the generated Python DCE/RPC bindings."""
+
+import sys
+
+sys.path.insert(0, "bin/python")
+
+import samba
+import samba.tests
+from samba.dcerpc import drsuapi
+import talloc
+
+talloc.enable_null_tracking()
+
+
+class TallocTests(samba.tests.TestCase):
+ """test talloc behaviour of pidl generated python code"""
+
+ def check_blocks(self, object, num_expected):
+ """check that the number of allocated blocks is correct"""
+ nblocks = talloc.total_blocks(object)
+ if object is None:
+ nblocks -= self.initial_blocks
+ self.assertEqual(nblocks, num_expected)
+
+ def get_rodc_partial_attribute_set(self):
+ """get a list of attributes for RODC replication"""
+ partial_attribute_set = drsuapi.DsPartialAttributeSet()
+
+ # we expect one block for the object
+ self.check_blocks(partial_attribute_set, 1)
+
+ attids = [1, 2, 3]
+ partial_attribute_set.version = 1
+ partial_attribute_set.attids = attids
+ partial_attribute_set.num_attids = len(attids)
+
+ # we expect one block for the object, a structure, and a
+ # reference to the array
+ self.check_blocks(partial_attribute_set, 2)
+
+ return partial_attribute_set
+
+ def pas_test(self):
+ pas = self.get_rodc_partial_attribute_set()
+ self.check_blocks(pas, 2)
+ req8 = drsuapi.DsGetNCChangesRequest8()
+ self.check_blocks(req8, 1)
+
+ # We expect the pas and req8, plus one block for each python object
+ self.check_blocks(None, 5)
+ req8.partial_attribute_set = pas
+ if req8.partial_attribute_set.attids[1] != 2:
+ raise Exception("Wrong value in attids[2]")
+ # we now get an additional reference
+ self.check_blocks(None, 6)
+
+ def test_run(self):
+ self.initial_blocks = talloc.total_blocks(None)
+ self.check_blocks(None, 0)
+ self.pas_test()
+ self.check_blocks(None, 0)
diff --git a/python/samba/tests/dcerpc/rpcecho.py b/python/samba/tests/dcerpc/rpcecho.py
new file mode 100644
index 0000000..949e4e2
--- /dev/null
+++ b/python/samba/tests/dcerpc/rpcecho.py
@@ -0,0 +1,71 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2008
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for samba.dceprc.rpcecho."""
+
+from samba.dcerpc import echo
+from samba.ndr import ndr_pack, ndr_unpack
+from samba.tests import RpcInterfaceTestCase, TestCase
+
+
+class RpcEchoTests(RpcInterfaceTestCase):
+
+ def setUp(self):
+ super().setUp()
+ self.conn = echo.rpcecho("ncalrpc:", self.get_loadparm())
+
+ def test_two_contexts(self):
+ self.conn2 = echo.rpcecho("ncalrpc:", self.get_loadparm(), basis_connection=self.conn)
+ self.assertEqual(3, self.conn2.AddOne(2))
+
+ def test_abstract_syntax(self):
+ self.assertEqual(("60a15ec5-4de8-11d7-a637-005056a20182", 1),
+ self.conn.abstract_syntax)
+
+ def test_addone(self):
+ self.assertEqual(2, self.conn.AddOne(1))
+
+ def test_echodata(self):
+ self.assertEqual([1, 2, 3], self.conn.EchoData([1, 2, 3]))
+
+ def test_call(self):
+ self.assertEqual(u"foobar", self.conn.TestCall(u"foobar"))
+
+ def test_surrounding(self):
+ surrounding_struct = echo.Surrounding()
+ surrounding_struct.x = 4
+ surrounding_struct.surrounding = [1, 2, 3, 4]
+ y = self.conn.TestSurrounding(surrounding_struct)
+ self.assertEqual(8 * [0], y.surrounding)
+
+ def test_manual_request(self):
+ self.assertEqual(b"\x01\x00\x00\x00", self.conn.request(0, chr(0) * 4))
+
+ def test_server_name(self):
+ self.assertEqual(None, self.conn.server_name)
+
+
+class NdrEchoTests(TestCase):
+
+ def test_info1_push(self):
+ x = echo.info1()
+ x.v = 42
+ self.assertEqual(b"\x2a", ndr_pack(x))
+
+ def test_info1_pull(self):
+ x = ndr_unpack(echo.info1, b"\x42")
+ self.assertEqual(x.v, 66)
diff --git a/python/samba/tests/dcerpc/sam.py b/python/samba/tests/dcerpc/sam.py
new file mode 100644
index 0000000..9029cce
--- /dev/null
+++ b/python/samba/tests/dcerpc/sam.py
@@ -0,0 +1,783 @@
+# -*- coding: utf-8 -*-
+#
+# Unix SMB/CIFS implementation.
+# Copyright © Jelmer Vernooij <jelmer@samba.org> 2008
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for samba.dcerpc.sam."""
+
+from samba.dcerpc import samr, security, lsa
+from samba.dcerpc.samr import DomainGeneralInformation
+from samba.tests import RpcInterfaceTestCase
+from samba.tests import env_loadparm, delete_force
+
+from samba.credentials import Credentials
+from samba.auth import system_session
+from samba.samdb import SamDB
+from samba.dsdb import (
+ ATYPE_NORMAL_ACCOUNT,
+ ATYPE_WORKSTATION_TRUST,
+ GTYPE_SECURITY_UNIVERSAL_GROUP,
+ GTYPE_SECURITY_GLOBAL_GROUP)
+from samba import generate_random_password
+from samba.ndr import ndr_unpack
+import os
+
+
+# FIXME: Pidl should be doing this for us
+def toArray(handle, array, num_entries):
+ return [(entry.idx, entry.name) for entry in array.entries[:num_entries]]
+
+
+# Extract the rid from an ldb message, assumes that the message has a
+# objectSID attribute
+#
+def rid(msg):
+ sid = ndr_unpack(security.dom_sid, msg["objectSID"][0])
+ (_, rid) = sid.split()
+ return rid
+
+
+# Calculate the request size for EnumDomainUsers and EnumDomainGroups calls
+# to hold the specified number of entries.
+# We use the w2k3 element size value of 54, code under test
+# rounds this up i.e. (1+(max_size/SAMR_ENUM_USERS_MULTIPLIER))
+#
+def calc_max_size(num_entries):
+ return (num_entries - 1) * 54
+
+
+class SamrTests(RpcInterfaceTestCase):
+
+ def setUp(self):
+ super().setUp()
+ self.conn = samr.samr("ncalrpc:", self.get_loadparm())
+ self.open_samdb()
+ self.open_domain_handle()
+
+ #
+ # Open the samba database
+ #
+ def open_samdb(self):
+ self.lp = env_loadparm()
+ self.domain = os.environ["DOMAIN"]
+ self.creds = Credentials()
+ self.creds.guess(self.lp)
+ self.session = system_session()
+ self.samdb = SamDB(
+ session_info=self.session, credentials=self.creds, lp=self.lp)
+
+ #
+ # Open a SAMR Domain handle
+ def open_domain_handle(self):
+ self.handle = self.conn.Connect2(
+ None, security.SEC_FLAG_MAXIMUM_ALLOWED)
+
+ self.domain_sid = self.conn.LookupDomain(
+ self.handle, lsa.String(self.domain))
+
+ self.domain_handle = self.conn.OpenDomain(
+ self.handle, security.SEC_FLAG_MAXIMUM_ALLOWED, self.domain_sid)
+
+ # Filter a list of records, removing those that are not part of the
+ # current domain.
+ #
+ def filter_domain(self, unfiltered):
+ def sid(msg):
+ sid = ndr_unpack(security.dom_sid, msg["objectSID"][0])
+ (x, _) = sid.split()
+ return x
+
+ dom_sid = security.dom_sid(self.samdb.get_domain_sid())
+ return [x for x in unfiltered if sid(x) == dom_sid]
+
+ def test_connect5(self):
+ (level, info, handle) =\
+ self.conn.Connect5(None, 0, 1, samr.ConnectInfo1())
+
+ def test_connect2(self):
+ handle = self.conn.Connect2(None, security.SEC_FLAG_MAXIMUM_ALLOWED)
+ self.assertTrue(handle is not None)
+
+ def test_EnumDomains(self):
+ handle = self.conn.Connect2(None, security.SEC_FLAG_MAXIMUM_ALLOWED)
+ toArray(*self.conn.EnumDomains(handle, 0, 4294967295))
+ self.conn.Close(handle)
+
+ # Create groups based on the id list supplied, the id is used to
+ # form a unique name and description.
+ #
+ # returns a list of the created dn's, which can be passed to delete_dns
+ # to clean up after the test has run.
+ def create_groups(self, ids):
+ dns = []
+ for i in ids:
+ name = "SAMR_GRP%d" % i
+ dn = "cn=%s,cn=Users,%s" % (name, self.samdb.domain_dn())
+ delete_force(self.samdb, dn)
+
+ self.samdb.newgroup(name)
+ dns.append(dn)
+ return dns
+
+ # Create user accounts based on the id list supplied, the id is used to
+ # form a unique name and description.
+ #
+ # returns a list of the created dn's, which can be passed to delete_dns
+ # to clean up after the test has run.
+ def create_users(self, ids):
+ dns = []
+ for i in ids:
+ name = "SAMR_USER%d" % i
+ dn = "cn=%s,CN=USERS,%s" % (name, self.samdb.domain_dn())
+ delete_force(self.samdb, dn)
+
+ # We only need the user to exist, we don't need a password
+ self.samdb.newuser(
+ name,
+ password=None,
+ setpassword=False,
+ description="Description for " + name,
+ givenname="given%dname" % i,
+ surname="surname%d" % i)
+ dns.append(dn)
+ return dns
+
+ # Create computer accounts based on the id list supplied, the id is used to
+ # form a unique name and description.
+ #
+ # returns a list of the created dn's, which can be passed to delete_dns
+ # to clean up after the test has run.
+ def create_computers(self, ids):
+ dns = []
+ for i in ids:
+ name = "SAMR_CMP%d" % i
+ dn = "cn=%s,cn=COMPUTERS,%s" % (name, self.samdb.domain_dn())
+ delete_force(self.samdb, dn)
+
+ self.samdb.newcomputer(name, description="Description of " + name)
+ dns.append(dn)
+ return dns
+
+ # Delete the specified dn's.
+ #
+ # Used to clean up entries created by individual tests.
+ #
+ def delete_dns(self, dns):
+ for dn in dns:
+ delete_force(self.samdb, dn)
+
+ # Common tests for QueryDisplayInfo
+ #
+ def _test_QueryDisplayInfo(
+ self, level, check_results, select, attributes, add_elements):
+ #
+ # Get the expected results by querying the samdb database directly.
+ # We do this rather than use a list of expected results as this runs
+ # with other tests so we do not have a known fixed list of elements
+ expected = self.samdb.search(expression=select, attrs=attributes)
+ self.assertTrue(len(expected) > 0)
+
+ #
+ # Perform QueryDisplayInfo with max results greater than the expected
+ # number of results.
+ (ts, rs, actual) = self.conn.QueryDisplayInfo(
+ self.domain_handle, level, 0, 1024, 4294967295)
+
+ self.assertEqual(len(expected), ts)
+ self.assertEqual(len(expected), rs)
+ check_results(expected, actual.entries)
+
+ #
+ # Perform QueryDisplayInfo with max results set to the number of
+ # results returned from the first query, should return the same results
+ (ts1, rs1, actual1) = self.conn.QueryDisplayInfo(
+ self.domain_handle, level, 0, rs, 4294967295)
+ self.assertEqual(ts, ts1)
+ self.assertEqual(rs, rs1)
+ check_results(expected, actual1.entries)
+
+ #
+ # Perform QueryDisplayInfo and get the last two results.
+ # Note: We are assuming there are at least three entries
+ self.assertTrue(ts > 2)
+ (ts2, rs2, actual2) = self.conn.QueryDisplayInfo(
+ self.domain_handle, level, (ts - 2), 2, 4294967295)
+ self.assertEqual(ts, ts2)
+ self.assertEqual(2, rs2)
+ check_results(list(expected)[-2:], actual2.entries)
+
+ #
+ # Perform QueryDisplayInfo and get the first two results.
+ # Note: We are assuming there are at least three entries
+ self.assertTrue(ts > 2)
+ (ts2, rs2, actual2) = self.conn.QueryDisplayInfo(
+ self.domain_handle, level, 0, 2, 4294967295)
+ self.assertEqual(ts, ts2)
+ self.assertEqual(2, rs2)
+ check_results(list(expected)[:2], actual2.entries)
+
+ #
+ # Perform QueryDisplayInfo and get two results in the middle of the
+ # list i.e. not the first or the last entry.
+ # Note: We are assuming there are at least four entries
+ self.assertTrue(ts > 3)
+ (ts2, rs2, actual2) = self.conn.QueryDisplayInfo(
+ self.domain_handle, level, 1, 2, 4294967295)
+ self.assertEqual(ts, ts2)
+ self.assertEqual(2, rs2)
+ check_results(list(expected)[1:2], actual2.entries)
+
+ #
+ # To check that cached values are being returned rather than the
+ # results being re-read from disk we add elements, and request all
+ # but the first result.
+ #
+ dns = add_elements([1000, 1002, 1003, 1004])
+
+ #
+ # Perform QueryDisplayInfo and get all but the first result.
+ # We should be using the cached results so the entries we just added
+ # should not be present
+ (ts3, rs3, actual3) = self.conn.QueryDisplayInfo(
+ self.domain_handle, level, 1, 1024, 4294967295)
+ self.assertEqual(ts, ts3)
+ self.assertEqual(len(expected) - 1, rs3)
+ check_results(list(expected)[1:], actual3.entries)
+
+ #
+ # Perform QueryDisplayInfo and get all the results.
+ # As the start index is zero we should reread the data from disk and
+ # the added entries should be there
+ new = self.samdb.search(expression=select, attrs=attributes)
+ (ts4, rs4, actual4) = self.conn.QueryDisplayInfo(
+ self.domain_handle, level, 0, 1024, 4294967295)
+ self.assertEqual(len(expected) + len(dns), ts4)
+ self.assertEqual(len(expected) + len(dns), rs4)
+ check_results(new, actual4.entries)
+
+ # Delete the added DN's and query all but the first entry.
+ # This should ensure the cached results are used and that the
+ # missing entry code is triggered.
+ self.delete_dns(dns)
+ (ts5, rs5, actual5) = self.conn.QueryDisplayInfo(
+ self.domain_handle, level, 1, 1024, 4294967295)
+ self.assertEqual(len(expected) + len(dns), ts5)
+ # The deleted results will be filtered from the result set so should
+ # be missing from the returned results.
+ # Note: depending on the GUID order, the first result in the cache may
+ # be a deleted entry, in which case the results will contain all
+ # the expected elements, otherwise the first expected result will
+ # be missing.
+ if rs5 == len(expected):
+ check_results(expected, actual5.entries)
+ elif rs5 == (len(expected) - 1):
+ check_results(list(expected)[1:], actual5.entries)
+ else:
+ self.fail("Incorrect number of entries {0}".format(rs5))
+
+ #
+ # Perform QueryDisplayInfo specifying an index past the end of the
+ # available data.
+ # Should return no data.
+ (ts6, rs6, actual6) = self.conn.QueryDisplayInfo(
+ self.domain_handle, level, ts5, 1, 4294967295)
+ self.assertEqual(ts5, ts6)
+ self.assertEqual(0, rs6)
+
+ self.conn.Close(self.handle)
+
+ # Test for QueryDisplayInfo, Level 1
+ # Returns the sAMAccountName, displayName and description for all
+ # the user accounts.
+ #
+ def test_QueryDisplayInfo_level_1(self):
+ def check_results(expected, actual):
+ # Assume the QueryDisplayInfo and ldb.search return their results
+ # in the same order
+ for (e, a) in zip(expected, actual):
+ self.assertTrue(isinstance(a, samr.DispEntryGeneral))
+ self.assertEqual(str(e["sAMAccountName"]),
+ str(a.account_name))
+
+ # The displayName and description are optional.
+ # In the expected results they will be missing, in
+ # samr.DispEntryGeneral the corresponding attribute will have a
+ # length of zero.
+ #
+ if a.full_name.length == 0:
+ self.assertFalse("displayName" in e)
+ else:
+ self.assertEqual(str(e["displayName"]), str(a.full_name))
+
+ if a.description.length == 0:
+ self.assertFalse("description" in e)
+ else:
+ self.assertEqual(str(e["description"]),
+ str(a.description))
+ # Create four user accounts
+ # to ensure that we have the minimum needed for the tests.
+ dns = self.create_users([1, 2, 3, 4])
+
+ select = "(&(objectclass=user)(sAMAccountType={0}))".format(
+ ATYPE_NORMAL_ACCOUNT)
+ attributes = ["sAMAccountName", "displayName", "description"]
+ self._test_QueryDisplayInfo(
+ 1, check_results, select, attributes, self.create_users)
+
+ self.delete_dns(dns)
+
+ # Test for QueryDisplayInfo, Level 2
+ # Returns the sAMAccountName and description for all
+ # the computer accounts.
+ #
+ def test_QueryDisplayInfo_level_2(self):
+ def check_results(expected, actual):
+ # Assume the QueryDisplayInfo and ldb.search return their results
+ # in the same order
+ for (e, a) in zip(expected, actual):
+ self.assertTrue(isinstance(a, samr.DispEntryFull))
+ self.assertEqual(str(e["sAMAccountName"]),
+ str(a.account_name))
+
+ # The description is optional.
+ # In the expected results they will be missing, in
+ # samr.DispEntryGeneral the corresponding attribute will have a
+ # length of zero.
+ #
+ if a.description.length == 0:
+ self.assertFalse("description" in e)
+ else:
+ self.assertEqual(str(e["description"]),
+ str(a.description))
+
+ # Create four computer accounts
+ # to ensure that we have the minimum needed for the tests.
+ dns = self.create_computers([1, 2, 3, 4])
+
+ select = "(&(objectclass=user)(sAMAccountType={0}))".format(
+ ATYPE_WORKSTATION_TRUST)
+ attributes = ["sAMAccountName", "description"]
+ self._test_QueryDisplayInfo(
+ 2, check_results, select, attributes, self.create_computers)
+
+ self.delete_dns(dns)
+
+ # Test for QueryDisplayInfo, Level 3
+ # Returns the sAMAccountName and description for all
+ # the groups.
+ #
+ def test_QueryDisplayInfo_level_3(self):
+ def check_results(expected, actual):
+ # Assume the QueryDisplayInfo and ldb.search return their results
+ # in the same order
+ for (e, a) in zip(expected, actual):
+ self.assertTrue(isinstance(a, samr.DispEntryFullGroup))
+ self.assertEqual(str(e["sAMAccountName"]),
+ str(a.account_name))
+
+ # The description is optional.
+ # In the expected results they will be missing, in
+ # samr.DispEntryGeneral the corresponding attribute will have a
+ # length of zero.
+ #
+ if a.description.length == 0:
+ self.assertFalse("description" in e)
+ else:
+ self.assertEqual(str(e["description"]),
+ str(a.description))
+
+ # Create four groups
+ # to ensure that we have the minimum needed for the tests.
+ dns = self.create_groups([1, 2, 3, 4])
+
+ select = "(&(|(groupType=%d)(groupType=%d))(objectClass=group))" % (
+ GTYPE_SECURITY_UNIVERSAL_GROUP,
+ GTYPE_SECURITY_GLOBAL_GROUP)
+ attributes = ["sAMAccountName", "description"]
+ self._test_QueryDisplayInfo(
+ 3, check_results, select, attributes, self.create_groups)
+
+ self.delete_dns(dns)
+
+ # Test for QueryDisplayInfo, Level 4
+ # Returns the sAMAccountName (as an ASCII string)
+ # for all the user accounts.
+ #
+ def test_QueryDisplayInfo_level_4(self):
+ def check_results(expected, actual):
+ # Assume the QueryDisplayInfo and ldb.search return their results
+ # in the same order
+ for (e, a) in zip(expected, actual):
+ self.assertTrue(isinstance(a, samr.DispEntryAscii))
+ self.assertTrue(
+ isinstance(a.account_name, lsa.AsciiStringLarge))
+ self.assertEqual(
+ str(e["sAMAccountName"]), str(a.account_name.string))
+
+ # Create four user accounts
+ # to ensure that we have the minimum needed for the tests.
+ dns = self.create_users([1, 2, 3, 4])
+
+ select = "(&(objectclass=user)(sAMAccountType={0}))".format(
+ ATYPE_NORMAL_ACCOUNT)
+ attributes = ["sAMAccountName", "displayName", "description"]
+ self._test_QueryDisplayInfo(
+ 4, check_results, select, attributes, self.create_users)
+
+ self.delete_dns(dns)
+
+ # Test for QueryDisplayInfo, Level 5
+ # Returns the sAMAccountName (as an ASCII string)
+ # for all the groups.
+ #
+ def test_QueryDisplayInfo_level_5(self):
+ def check_results(expected, actual):
+ # Assume the QueryDisplayInfo and ldb.search return their results
+ # in the same order
+ for (e, a) in zip(expected, actual):
+ self.assertTrue(isinstance(a, samr.DispEntryAscii))
+ self.assertTrue(
+ isinstance(a.account_name, lsa.AsciiStringLarge))
+ self.assertEqual(
+ str(e["sAMAccountName"]), str(a.account_name.string))
+
+ # Create four groups
+ # to ensure that we have the minimum needed for the tests.
+ dns = self.create_groups([1, 2, 3, 4])
+
+ select = "(&(|(groupType=%d)(groupType=%d))(objectClass=group))" % (
+ GTYPE_SECURITY_UNIVERSAL_GROUP,
+ GTYPE_SECURITY_GLOBAL_GROUP)
+ attributes = ["sAMAccountName", "description"]
+ self._test_QueryDisplayInfo(
+ 5, check_results, select, attributes, self.create_groups)
+
+ self.delete_dns(dns)
+
+ def test_EnumDomainGroups(self):
+ def check_results(expected, actual):
+ for (e, a) in zip(expected, actual):
+ self.assertTrue(isinstance(a, samr.SamEntry))
+ self.assertEqual(
+ str(e["sAMAccountName"]), str(a.name.string))
+
+ # Create four groups
+ # to ensure that we have the minimum needed for the tests.
+ dns = self.create_groups([1, 2, 3, 4])
+
+ #
+ # Get the expected results by querying the samdb database directly.
+ # We do this rather than use a list of expected results as this runs
+ # with other tests so we do not have a known fixed list of elements
+ select = "(&(|(groupType=%d)(groupType=%d))(objectClass=group))" % (
+ GTYPE_SECURITY_UNIVERSAL_GROUP,
+ GTYPE_SECURITY_GLOBAL_GROUP)
+ attributes = ["sAMAccountName", "objectSID"]
+ unfiltered = self.samdb.search(expression=select, attrs=attributes)
+ filtered = self.filter_domain(unfiltered)
+ self.assertTrue(len(filtered) > 4)
+
+ # Sort the expected results by rid
+ expected = sorted(list(filtered), key=rid)
+
+ #
+ # Perform EnumDomainGroups with max size greater than the expected
+ # number of results. Allow for an extra 10 entries
+ #
+ max_size = calc_max_size(len(expected) + 10)
+ (resume_handle, actual, num_entries) = self.conn.EnumDomainGroups(
+ self.domain_handle, 0, max_size)
+ self.assertEqual(len(expected), num_entries)
+ check_results(expected, actual.entries)
+
+ #
+ # Perform EnumDomainGroups with size set to so that it contains
+ # 4 entries.
+ #
+ max_size = calc_max_size(4)
+ (resume_handle, actual, num_entries) = self.conn.EnumDomainGroups(
+ self.domain_handle, 0, max_size)
+ self.assertEqual(4, num_entries)
+ check_results(expected[:4], actual.entries)
+
+ #
+ # Try calling with resume_handle greater than number of entries
+ # Should return no results and a resume handle of 0
+ max_size = calc_max_size(1)
+ rh = len(expected)
+ self.conn.Close(self.handle)
+ (resume_handle, a, num_entries) = self.conn.EnumDomainGroups(
+ self.domain_handle, rh, max_size)
+
+ self.assertEqual(0, num_entries)
+ self.assertEqual(0, resume_handle)
+
+ #
+ # Enumerate through the domain groups one element at a time.
+ #
+ max_size = calc_max_size(1)
+ actual = []
+ (resume_handle, a, num_entries) = self.conn.EnumDomainGroups(
+ self.domain_handle, 0, max_size)
+ while resume_handle:
+ self.assertEqual(1, num_entries)
+ actual.append(a.entries[0])
+ (resume_handle, a, num_entries) = self.conn.EnumDomainGroups(
+ self.domain_handle, resume_handle, max_size)
+ if num_entries:
+ actual.append(a.entries[0])
+
+ #
+ # Check that the cached results are being returned.
+ # Obtain a new resume_handle and insert new entries into the
+ # into the DB
+ #
+ actual = []
+ max_size = calc_max_size(1)
+ (resume_handle, a, num_entries) = self.conn.EnumDomainGroups(
+ self.domain_handle, 0, max_size)
+ extra_dns = self.create_groups([1000, 1002, 1003, 1004])
+ while resume_handle:
+ self.assertEqual(1, num_entries)
+ actual.append(a.entries[0])
+ (resume_handle, a, num_entries) = self.conn.EnumDomainGroups(
+ self.domain_handle, resume_handle, max_size)
+ if num_entries:
+ actual.append(a.entries[0])
+
+ self.assertEqual(len(expected), len(actual))
+ check_results(expected, actual)
+
+ #
+ # Perform EnumDomainGroups, we should read the newly added domains
+ #
+ max_size = calc_max_size(len(expected) + len(extra_dns) + 10)
+ (resume_handle, actual, num_entries) = self.conn.EnumDomainGroups(
+ self.domain_handle, 0, max_size)
+ self.assertEqual(len(expected) + len(extra_dns), num_entries)
+
+ #
+ # Get a new expected result set by querying the database directly
+ unfiltered01 = self.samdb.search(expression=select, attrs=attributes)
+ filtered01 = self.filter_domain(unfiltered01)
+ self.assertTrue(len(filtered01) > len(expected))
+
+ # Sort the expected results by rid
+ expected01 = sorted(list(filtered01), key=rid)
+
+ #
+ # Now check that we read the new entries.
+ #
+ check_results(expected01, actual.entries)
+
+ #
+ # Check that deleted results are handled correctly.
+ # Obtain a new resume_handle and delete entries from the DB.
+ #
+ actual = []
+ max_size = calc_max_size(1)
+ (resume_handle, a, num_entries) = self.conn.EnumDomainGroups(
+ self.domain_handle, 0, max_size)
+ self.delete_dns(extra_dns)
+ while resume_handle and num_entries:
+ self.assertEqual(1, num_entries)
+ actual.append(a.entries[0])
+ (resume_handle, a, num_entries) = self.conn.EnumDomainGroups(
+ self.domain_handle, resume_handle, max_size)
+ if num_entries:
+ actual.append(a.entries[0])
+
+ self.assertEqual(len(expected), len(actual))
+ check_results(expected, actual)
+
+ self.delete_dns(dns)
+
+ def test_EnumDomainUsers(self):
+ def check_results(expected, actual):
+ for (e, a) in zip(expected, actual):
+ self.assertTrue(isinstance(a, samr.SamEntry))
+ self.assertEqual(
+ str(e["sAMAccountName"]), str(a.name.string))
+
+ # Create four users
+ # to ensure that we have the minimum needed for the tests.
+ dns = self.create_users([1, 2, 3, 4])
+
+ #
+ # Get the expected results by querying the samdb database directly.
+ # We do this rather than use a list of expected results as this runs
+ # with other tests so we do not have a known fixed list of elements
+ select = "(objectClass=user)"
+ attributes = ["sAMAccountName", "objectSID", "userAccountConrol"]
+ unfiltered = self.samdb.search(expression=select, attrs=attributes)
+ filtered = self.filter_domain(unfiltered)
+ self.assertTrue(len(filtered) > 4)
+
+ # Sort the expected results by rid
+ expected = sorted(list(filtered), key=rid)
+
+ #
+ # Perform EnumDomainUsers with max_size greater than required for the
+ # expected number of results. We should get all the results.
+ #
+ max_size = calc_max_size(len(expected) + 10)
+ (resume_handle, actual, num_entries) = self.conn.EnumDomainUsers(
+ self.domain_handle, 0, 0, max_size)
+ self.assertEqual(len(expected), num_entries)
+ check_results(expected, actual.entries)
+
+ #
+ # Perform EnumDomainUsers with size set to so that it contains
+ # 4 entries.
+ max_size = calc_max_size(4)
+ (resume_handle, actual, num_entries) = self.conn.EnumDomainUsers(
+ self.domain_handle, 0, 0, max_size)
+ self.assertEqual(4, num_entries)
+ check_results(expected[:4], actual.entries)
+
+ #
+ # Try calling with resume_handle greater than number of entries
+ # Should return no results and a resume handle of 0
+ rh = len(expected)
+ max_size = calc_max_size(1)
+ self.conn.Close(self.handle)
+ (resume_handle, a, num_entries) = self.conn.EnumDomainUsers(
+ self.domain_handle, rh, 0, max_size)
+
+ self.assertEqual(0, num_entries)
+ self.assertEqual(0, resume_handle)
+
+ #
+ # Enumerate through the domain users one element at a time.
+ # We should get all the results.
+ #
+ actual = []
+ max_size = calc_max_size(1)
+ (resume_handle, a, num_entries) = self.conn.EnumDomainUsers(
+ self.domain_handle, 0, 0, max_size)
+ while resume_handle:
+ self.assertEqual(1, num_entries)
+ actual.append(a.entries[0])
+ (resume_handle, a, num_entries) = self.conn.EnumDomainUsers(
+ self.domain_handle, resume_handle, 0, max_size)
+ if num_entries:
+ actual.append(a.entries[0])
+
+ self.assertEqual(len(expected), len(actual))
+ check_results(expected, actual)
+
+ #
+ # Check that the cached results are being returned.
+ # Obtain a new resume_handle and insert new entries into the
+ # into the DB. As the entries were added after the results were cached
+ # they should not show up in the returned results.
+ #
+ actual = []
+ max_size = calc_max_size(1)
+ (resume_handle, a, num_entries) = self.conn.EnumDomainUsers(
+ self.domain_handle, 0, 0, max_size)
+ extra_dns = self.create_users([1000, 1002, 1003, 1004])
+ while resume_handle:
+ self.assertEqual(1, num_entries)
+ actual.append(a.entries[0])
+ (resume_handle, a, num_entries) = self.conn.EnumDomainUsers(
+ self.domain_handle, resume_handle, 0, max_size)
+ if num_entries:
+ actual.append(a.entries[0])
+
+ self.assertEqual(len(expected), len(actual))
+ check_results(expected, actual)
+
+ #
+ # Perform EnumDomainUsers, we should read the newly added groups
+ # As resume_handle is zero, the results will be read from disk.
+ #
+ max_size = calc_max_size(len(expected) + len(extra_dns) + 10)
+ (resume_handle, actual, num_entries) = self.conn.EnumDomainUsers(
+ self.domain_handle, 0, 0, max_size)
+ self.assertEqual(len(expected) + len(extra_dns), num_entries)
+
+ #
+ # Get a new expected result set by querying the database directly
+ unfiltered01 = self.samdb.search(expression=select, attrs=attributes)
+ filtered01 = self.filter_domain(unfiltered01)
+ self.assertTrue(len(filtered01) > len(expected))
+
+ # Sort the expected results by rid
+ expected01 = sorted(list(filtered01), key=rid)
+
+ #
+ # Now check that we read the new entries.
+ #
+ self.assertEqual(len(expected01), num_entries)
+ check_results(expected01, actual.entries)
+
+ self.delete_dns(dns + extra_dns)
+
+ def test_DomGeneralInformation_num_users(self):
+ info = self.conn.QueryDomainInfo(
+ self.domain_handle, DomainGeneralInformation)
+ #
+ # Enumerate through all the domain users and compare the number
+ # returned against QueryDomainInfo they should be the same
+ max_size = calc_max_size(1)
+ (resume_handle, a, num_entries) = self.conn.EnumDomainUsers(
+ self.domain_handle, 0, 0, max_size)
+ count = num_entries
+ while resume_handle:
+ self.assertEqual(1, num_entries)
+ (resume_handle, a, num_entries) = self.conn.EnumDomainUsers(
+ self.domain_handle, resume_handle, 0, max_size)
+ count += num_entries
+
+ self.assertEqual(count, info.num_users)
+
+ def test_DomGeneralInformation_num_groups(self):
+ info = self.conn.QueryDomainInfo(
+ self.domain_handle, DomainGeneralInformation)
+ #
+ # Enumerate through all the domain groups and compare the number
+ # returned against QueryDomainInfo they should be the same
+ max_size = calc_max_size(1)
+ (resume_handle, a, num_entries) = self.conn.EnumDomainGroups(
+ self.domain_handle, 0, max_size)
+ count = num_entries
+ while resume_handle:
+ self.assertEqual(1, num_entries)
+ (resume_handle, a, num_entries) = self.conn.EnumDomainGroups(
+ self.domain_handle, resume_handle, max_size)
+ count += num_entries
+
+ self.assertEqual(count, info.num_groups)
+
+ def test_DomGeneralInformation_num_aliases(self):
+ info = self.conn.QueryDomainInfo(
+ self.domain_handle, DomainGeneralInformation)
+ #
+ # Enumerate through all the domain aliases and compare the number
+ # returned against QueryDomainInfo they should be the same
+ max_size = calc_max_size(1)
+ (resume_handle, a, num_entries) = self.conn.EnumDomainAliases(
+ self.domain_handle, 0, max_size)
+ count = num_entries
+ while resume_handle:
+ self.assertEqual(1, num_entries)
+ (resume_handle, a, num_entries) = self.conn.EnumDomainAliases(
+ self.domain_handle, resume_handle, max_size)
+ count += num_entries
+
+ self.assertEqual(count, info.num_aliases)
diff --git a/python/samba/tests/dcerpc/samr_change_password.py b/python/samba/tests/dcerpc/samr_change_password.py
new file mode 100644
index 0000000..f872bba
--- /dev/null
+++ b/python/samba/tests/dcerpc/samr_change_password.py
@@ -0,0 +1,187 @@
+# Unix SMB/CIFS implementation.
+#
+# Copyright © 2020 Andreas Schneider <asn@samba.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for samba.dcerpc.samr.password"""
+
+import ctypes
+import samba.tests
+
+from samba import crypto, generate_random_password, generate_random_bytes, ntstatus
+from samba.auth import system_session
+from samba.credentials import Credentials
+from samba.credentials import SMB_ENCRYPTION_REQUIRED
+from samba.dcerpc import samr, security, lsa
+from samba.samdb import SamDB
+from samba.tests import RpcInterfaceTestCase
+
+
+class SamrPasswordTests(RpcInterfaceTestCase):
+ def setUp(self):
+ super().setUp()
+ self.open_samdb()
+
+ self.create_user_account(10000)
+
+ self.remote_server = samba.tests.env_get_var_value('SERVER')
+ self.remote_domain = samba.tests.env_get_var_value('DOMAIN')
+ self.remote_user = samba.tests.env_get_var_value('USERNAME')
+ self.remote_password = samba.tests.env_get_var_value('PASSWORD')
+ self.remote_binding_string = "ncacn_np:%s[krb5]" % (self.remote_server)
+
+ self.remote_creds = Credentials()
+ self.remote_creds.guess(self.lp)
+ self.remote_creds.set_username(self.remote_user)
+ self.remote_creds.set_password(self.remote_password)
+
+ def tearDown(self):
+ super().tearDown()
+
+ samr.Close(self.user_handle)
+ samr.Close(self.domain_handle)
+ samr.Close(self.handle)
+
+ samba.tests.delete_force(self.samdb, self.user_dn)
+
+ #
+ # Open the samba database
+ #
+ def open_samdb(self):
+ self.lp = samba.tests.env_loadparm()
+
+ self.local_creds = Credentials()
+ self.local_creds.guess(self.lp)
+ self.session = system_session()
+ self.samdb = SamDB(session_info=self.session,
+ credentials=self.local_creds,
+ lp=self.lp)
+
+ #
+ # Open a SAMR Domain handle
+ #
+ def open_domain_handle(self):
+ self.handle = self.conn.Connect2(None,
+ security.SEC_FLAG_MAXIMUM_ALLOWED)
+
+ self.domain_sid = self.conn.LookupDomain(self.handle,
+ lsa.String(self.remote_domain))
+
+ self.domain_handle = self.conn.OpenDomain(self.handle,
+ security.SEC_FLAG_MAXIMUM_ALLOWED,
+ self.domain_sid)
+
+ def open_user_handle(self):
+ name = lsa.String(self.user_name)
+
+ rids = self.conn.LookupNames(self.domain_handle, [name])
+
+ self.user_handle = self.conn.OpenUser(self.domain_handle,
+ security.SEC_FLAG_MAXIMUM_ALLOWED,
+ rids[0].ids[0])
+ #
+ # Create a test user account
+ #
+ def create_user_account(self, user_id):
+ self.user_name = ("SAMR_USER_%d" % user_id)
+ self.user_pass = generate_random_password(32, 32)
+ self.user_dn = "cn=%s,cn=users,%s" % (self.user_name, self.samdb.domain_dn())
+
+ samba.tests.delete_force(self.samdb, self.user_dn)
+
+ self.samdb.newuser(self.user_name,
+ self.user_pass,
+ description="Password for " + self.user_name + " is " + self.user_pass,
+ givenname=self.user_name,
+ surname=self.user_name)
+
+
+ def init_samr_CryptPassword(self, password, session_key):
+
+ def encode_pw_buffer(password):
+ data = bytearray([0] * 516)
+
+ p = samba.string_to_byte_array(password.encode('utf-16-le'))
+ plen = len(p)
+
+ b = generate_random_bytes(512 - plen)
+
+ i = 512 - plen
+ data[0:i] = b
+ data[i:i+plen] = p
+ data[512:516] = plen.to_bytes(4, byteorder='little')
+
+ return bytes(data)
+
+ # This is a test, so always allow to encrypt using RC4
+ try:
+ crypto.set_relax_mode()
+ encrypted_blob = samba.arcfour_encrypt(session_key, encode_pw_buffer(password))
+ finally:
+ crypto.set_strict_mode()
+
+ out_blob = samr.CryptPassword()
+ out_blob.data = list(encrypted_blob)
+
+ return out_blob
+
+
+ def test_setUserInfo2_Password(self, password='P@ssw0rd'):
+ self.conn = samr.samr(self.remote_binding_string,
+ self.get_loadparm(),
+ self.remote_creds)
+ self.open_domain_handle()
+ self.open_user_handle()
+
+ password='P@ssw0rd'
+
+ level = 24
+ info = samr.UserInfo24()
+
+ info.password_expired = 0
+ info.password = self.init_samr_CryptPassword(password, self.conn.session_key)
+
+ # If the server is in FIPS mode, it should reject the password change!
+ try:
+ self.conn.SetUserInfo2(self.user_handle, level, info)
+ except samba.NTSTATUSError as e:
+ code = ctypes.c_uint32(e.args[0]).value
+ print(code)
+ if ((code == ntstatus.NT_STATUS_ACCESS_DENIED) and
+ (self.lp.weak_crypto == 'disallowed')):
+ pass
+ else:
+ raise
+
+
+ def test_setUserInfo2_Password_Encrypted(self, password='P@ssw0rd'):
+ self.remote_creds.set_smb_encryption(SMB_ENCRYPTION_REQUIRED)
+
+ self.conn = samr.samr(self.remote_binding_string,
+ self.get_loadparm(),
+ self.remote_creds)
+ self.open_domain_handle()
+ self.open_user_handle()
+
+ password='P@ssw0rd'
+
+ level = 24
+ info = samr.UserInfo24()
+
+ info.password_expired = 0
+ info.password = self.init_samr_CryptPassword(password, self.conn.session_key)
+
+ self.conn.SetUserInfo2(self.user_handle, level, info)
diff --git a/python/samba/tests/dcerpc/srvsvc.py b/python/samba/tests/dcerpc/srvsvc.py
new file mode 100644
index 0000000..4baaec3
--- /dev/null
+++ b/python/samba/tests/dcerpc/srvsvc.py
@@ -0,0 +1,68 @@
+# -*- coding: utf-8 -*-
+#
+# Unix SMB/CIFS implementation.
+# Copyright © Dhananjay Sathe <dhanajaysathe@gmail.com> 2011
+# Copyright © Jelmer Vernooij <jelmer@samba.org> 2011
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for samba.dcerpc.srvsvc."""
+
+from samba.dcerpc import srvsvc
+from samba.tests import RpcInterfaceTestCase
+
+
+class SrvsvcTests(RpcInterfaceTestCase):
+
+ def setUp(self):
+ super().setUp()
+ self.conn = srvsvc.srvsvc("ncalrpc:", self.get_loadparm())
+ self.server_unc = "\\\\."
+
+ def getDummyShareObject(self):
+ share = srvsvc.NetShareInfo2()
+
+ share.name = u'test'
+ share.comment = u'test share'
+ share.type = srvsvc.STYPE_DISKTREE
+ share.current_users = 0x00000000
+ share.max_users = -1
+ share.password = None
+ share.path = u'C:\\tmp' # some random path
+ share.permissions = 123434566
+ return share
+
+ def test_NetShareAdd(self):
+ self.skipTest("Dangerous test")
+ share = self.getDummyShareObject()
+ self.conn.NetShareAdd(self.server_unc, 2, share, None)
+
+ def test_NetShareSetInfo(self):
+ self.skipTest("Dangerous test")
+ share = self.getDummyShareObject()
+ parm_error = 0x00000000
+ self.conn.NetShareAdd(self.server_unc, 502, share, parm_error)
+ name = share.name
+ share.comment = "now successfully modified "
+ parm_error = self.pipe.NetShareSetInfo(self.server_unc, name,
+ 502, share, parm_error)
+
+ def test_NetShareDel(self):
+ self.skipTest("Dangerous test")
+ share = self.getDummyShareObject()
+ parm_error = 0x00000000
+ self.expectFailure("NetShareAdd doesn't work properly from Python",
+ self.conn.NetShareAdd, self.server_unc, 502, share, parm_error)
+ self.conn.NetShareDel(self.server_unc, share.name, 0)
diff --git a/python/samba/tests/dcerpc/string_tests.py b/python/samba/tests/dcerpc/string_tests.py
new file mode 100644
index 0000000..a3426bb
--- /dev/null
+++ b/python/samba/tests/dcerpc/string_tests.py
@@ -0,0 +1,132 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2016
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for string and unicode handling in PIDL generated bindings
+samba.dcerpc.*"""
+
+from samba.dcerpc import drsblobs
+import samba.tests
+from samba.ndr import ndr_unpack, ndr_pack
+import talloc
+import gc
+
+
+class TestException(Exception):
+ pass
+
+
+class StringTests(samba.tests.TestCase):
+
+ def setUp(self):
+ super().setUp()
+ talloc.enable_null_tracking()
+ self.startup_blocks = talloc.total_blocks()
+
+ def tearDown(self):
+ super().tearDown()
+ gc.collect()
+ if talloc.total_blocks() != self.startup_blocks:
+ talloc.report_full()
+ self.fail("it appears we are leaking memory")
+
+ def test_string_from_python(self):
+ info = drsblobs.repsFromTo2OtherInfo()
+ info.dns_name1 = "hello.example.com"
+ info.dns_name2 = "goodbye.example.com"
+ gc.collect()
+ self.assertIsNotNone(info)
+ self.assertEqual(info.dns_name1, "hello.example.com")
+ self.assertEqual(info.dns_name2, "goodbye.example.com")
+
+ info.dns_name1 = ""
+ info.dns_name2 = "goodbye.example.com"
+
+ self.assertEqual(info.dns_name1, "")
+ self.assertEqual(info.dns_name2, "goodbye.example.com")
+
+ info.dns_name2 = None
+
+ self.assertEqual(info.dns_name1, "")
+ self.assertIsNone(info.dns_name2)
+
+ def test_string_with_exception(self):
+ try:
+ self.test_string_from_python()
+ raise TestException()
+ except TestException:
+ pass
+
+ def test_string_from_python_function(self):
+ def get_info():
+ info = drsblobs.repsFromTo2OtherInfo()
+ info.dns_name1 = "1.example.com"
+ info.dns_name2 = "2.example.com"
+ return info
+
+ info = get_info()
+ gc.collect()
+ self.assertIsNotNone(info)
+ self.assertEqual(info.dns_name1, "1.example.com")
+ self.assertEqual(info.dns_name2, "2.example.com")
+
+ def test_string_modify_in_place(self):
+ info = drsblobs.repsFromTo2OtherInfo()
+ info.dns_name1 = "1.example.com"
+ info.dns_name2 = "%s.example.com"
+ gc.collect()
+ self.assertIsNotNone(info)
+ self.assertEqual(info.dns_name1, "1.example.com")
+ self.assertEqual(info.dns_name2, "%s.example.com")
+ info.dns_name1 += ".co.nz"
+ info.dns_name2 %= 2
+ self.assertEqual(info.dns_name1, "1.example.com.co.nz")
+ self.assertEqual(info.dns_name2, "2.example.com")
+ del info
+
+ def test_string_delete(self):
+ gc.collect()
+ info = drsblobs.repsFromTo2OtherInfo()
+ info.dns_name1 = "1.example.com"
+ info.dns_name2 = "2.example.com"
+ info.dns_name1 = None
+ try:
+ del info.dns_name2
+ except AttributeError:
+ pass
+
+ self.assertIsNotNone(info)
+ self.assertIsNone(info.dns_name1)
+ self.assertIsNotNone(info.dns_name2)
+
+
+class StringTestsWithoutLeakCheck(samba.tests.TestCase):
+ """We know that the ndr unpacking test leaves stuff in the
+ autofree_context, and we don't want to worry about that. So for
+ this test we don't make memory leak assertions."""
+
+ def test_string_from_ndr(self):
+ info = drsblobs.repsFromTo2OtherInfo()
+ info.dns_name1 = "1.example.com"
+ info.dns_name2 = "2.example.com"
+ packed = ndr_pack(info)
+ gc.collect()
+
+ info_unpacked = ndr_unpack(drsblobs.repsFromTo2OtherInfo, packed)
+
+ self.assertIsNotNone(info_unpacked)
+ self.assertEqual(info_unpacked.dns_name1, "1.example.com")
+ self.assertEqual(info_unpacked.dns_name2, "2.example.com")
diff --git a/python/samba/tests/dcerpc/testrpc.py b/python/samba/tests/dcerpc/testrpc.py
new file mode 100644
index 0000000..789ea9a
--- /dev/null
+++ b/python/samba/tests/dcerpc/testrpc.py
@@ -0,0 +1,143 @@
+# test generated python code from pidl
+# Copyright (C) Andrew Tridgell August 2010
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+import sys
+
+sys.path.insert(0, "bin/python")
+
+import samba
+import samba.tests
+from samba.dcerpc import drsuapi
+import talloc
+
+talloc.enable_null_tracking()
+
+
+class RpcTests(object):
+ """test type behaviour of pidl generated python RPC code"""
+
+ def check_blocks(self, object, num_expected):
+ """check that the number of allocated blocks is correct"""
+ nblocks = talloc.total_blocks(object)
+ if object is None:
+ nblocks -= self.initial_blocks
+ leaked_blocks = (nblocks - num_expected)
+ if leaked_blocks != 0:
+ print("Leaked %d blocks" % leaked_blocks)
+
+ def check_type(self, interface, typename, type):
+ print("Checking type %s" % typename)
+ v = type()
+ for n in dir(v):
+ if n[0] == '_':
+ continue
+ try:
+ value = getattr(v, n)
+ except TypeError as errstr:
+ if str(errstr) == "unknown union level":
+ print("ERROR: Unknown union level in %s.%s" % (typename, n))
+ self.errcount += 1
+ continue
+ print(str(errstr)[1:21])
+ if str(errstr)[0:21] == "Can not convert C Type":
+ print("ERROR: Unknown C type for %s.%s" % (typename, n))
+ self.errcount += 1
+ continue
+ else:
+ print("ERROR: Failed to instantiate %s.%s" % (typename, n))
+ self.errcount += 1
+ continue
+ except Exception:
+ print("ERROR: Failed to instantiate %s.%s" % (typename, n))
+ self.errcount += 1
+ continue
+
+ # now try setting the value back
+ try:
+ print("Setting %s.%s" % (typename, n))
+ setattr(v, n, value)
+ except Exception as e:
+ if isinstance(e, AttributeError) and str(e).endswith("is read-only"):
+ # readonly, ignore
+ continue
+ else:
+ print("ERROR: Failed to set %s.%s: %r: %s" % (typename, n, e.__class__, e))
+ self.errcount += 1
+ continue
+
+ # and try a comparison
+ try:
+ if value != getattr(v, n):
+ print("ERROR: Comparison failed for %s.%s: %r != %r" % (typename, n, value, getattr(v, n)))
+ continue
+ except Exception as e:
+ print("ERROR: compare exception for %s.%s: %r: %s" % (typename, n, e.__class__, e))
+ continue
+
+ def check_interface(self, interface, iname):
+ errcount = self.errcount
+ for n in dir(interface):
+ if n[0] == '_' or n == iname:
+ # skip the special ones
+ continue
+ value = getattr(interface, n)
+ if isinstance(value, str):
+ # print "%s=\"%s\"" % (n, value)
+ pass
+ elif isinstance(value, int):
+ # print "%s=%d" % (n, value)
+ pass
+ elif isinstance(value, type):
+ try:
+ initial_blocks = talloc.total_blocks(None)
+ self.check_type(interface, n, value)
+ self.check_blocks(None, initial_blocks)
+ except Exception as e:
+ print("ERROR: Failed to check_type %s.%s: %r: %s" % (iname, n, e.__class__, e))
+ self.errcount += 1
+ elif callable(value):
+ pass # Method
+ else:
+ print("UNKNOWN: %s=%s" % (n, value))
+ if self.errcount - errcount != 0:
+ print("Found %d errors in %s" % (self.errcount - errcount, iname))
+
+ def check_all_interfaces(self):
+ for iname in dir(samba.dcerpc):
+ if iname[0] == '_':
+ continue
+ if iname == 'ClientConnection' or iname == 'base':
+ continue
+ print("Checking interface %s" % iname)
+ iface = getattr(samba.dcerpc, iname)
+ initial_blocks = talloc.total_blocks(None)
+ self.check_interface(iface, iname)
+ self.check_blocks(None, initial_blocks)
+
+ def run(self):
+ self.initial_blocks = talloc.total_blocks(None)
+ self.errcount = 0
+ self.check_all_interfaces()
+ return self.errcount
+
+
+tests = RpcTests()
+errcount = tests.run()
+if errcount == 0:
+ sys.exit(0)
+else:
+ print("%d failures" % errcount)
+ sys.exit(1)
diff --git a/python/samba/tests/dcerpc/unix.py b/python/samba/tests/dcerpc/unix.py
new file mode 100644
index 0000000..b7fa1f3
--- /dev/null
+++ b/python/samba/tests/dcerpc/unix.py
@@ -0,0 +1,43 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2008
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for samba.dcerpc.unixinfo."""
+
+
+from samba.dcerpc import unixinfo
+from samba.tests import RpcInterfaceTestCase
+
+class UnixinfoTests(RpcInterfaceTestCase):
+
+ def setUp(self):
+ super().setUp()
+ self.conn = unixinfo.unixinfo("ncalrpc:", self.get_loadparm())
+
+ def test_getpwuid_int(self):
+ infos = self.conn.GetPWUid(range(512))
+ self.assertEqual(512, len(infos))
+ self.assertEqual("/bin/false", infos[0].shell)
+ self.assertTrue(isinstance(infos[0].homedir, str))
+
+ def test_gidtosid(self):
+ self.conn.GidToSid(1000)
+
+ def test_uidtosid(self):
+ self.conn.UidToSid(1000)
+
+ def test_uidtosid_fail(self):
+ self.assertRaises(TypeError, self.conn.UidToSid, "100")
diff --git a/python/samba/tests/dckeytab.py b/python/samba/tests/dckeytab.py
new file mode 100644
index 0000000..28ab186
--- /dev/null
+++ b/python/samba/tests/dckeytab.py
@@ -0,0 +1,64 @@
+# Tests for source4/libnet/py_net_dckeytab.c
+#
+# Copyright (C) David Mulder <dmulder@suse.com> 2018
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import os
+import sys
+import string
+from samba.net import Net
+from samba import enable_net_export_keytab
+
+from samba import tests
+from samba.param import LoadParm
+
+
+enable_net_export_keytab()
+
+
+def open_bytes(filename):
+ if sys.version_info[0] == 3:
+ return open(filename, errors='ignore')
+ else:
+ return open(filename, 'rb')
+
+
+class DCKeytabTests(tests.TestCase):
+ def setUp(self):
+ super().setUp()
+ self.lp = LoadParm()
+ self.lp.load_default()
+ self.creds = self.insta_creds(template=self.get_credentials())
+ self.ktfile = os.path.join(self.lp.get('private dir'), 'test.keytab')
+ self.principal = self.creds.get_principal()
+
+ def tearDown(self):
+ super().tearDown()
+ os.remove(self.ktfile)
+
+ def test_export_keytab(self):
+ net = Net(None, self.lp)
+ net.export_keytab(keytab=self.ktfile, principal=self.principal)
+ assert os.path.exists(self.ktfile), 'keytab was not created'
+ with open_bytes(self.ktfile) as bytes_kt:
+ result = ''
+ for c in bytes_kt.read():
+ if c in string.printable:
+ result += c
+ principal_parts = self.principal.split('@')
+ assert principal_parts[0] in result and \
+ principal_parts[1] in result, \
+ 'Principal not found in generated keytab'
diff --git a/python/samba/tests/dns.py b/python/samba/tests/dns.py
new file mode 100644
index 0000000..a331e26
--- /dev/null
+++ b/python/samba/tests/dns.py
@@ -0,0 +1,2247 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Kai Blin <kai@samba.org> 2011
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from samba import dsdb
+from samba.ndr import ndr_unpack, ndr_pack
+from samba.samdb import SamDB
+from samba.auth import system_session
+import ldb
+from ldb import ERR_OPERATIONS_ERROR
+import os
+import sys
+import struct
+import socket
+import samba.ndr as ndr
+from samba import credentials
+from samba.dcerpc import dns, dnsp, dnsserver
+from samba.dnsserver import TXTRecord
+from samba.dnsserver import record_from_string, dns_record_match
+from samba.tests.subunitrun import SubunitOptions, TestProgram
+from samba import werror, WERRORError
+from samba.tests.dns_base import DNSTest
+import samba.getopt as options
+import optparse
+
+
+parser = optparse.OptionParser("dns.py <server name> <server ip> [options]")
+sambaopts = options.SambaOptions(parser)
+parser.add_option_group(sambaopts)
+
+# This timeout only has relevance when testing against Windows
+# Format errors tend to return patchy responses, so a timeout is needed.
+parser.add_option("--timeout", type="int", dest="timeout",
+ help="Specify timeout for DNS requests")
+
+# use command line creds if available
+credopts = options.CredentialsOptions(parser)
+parser.add_option_group(credopts)
+subunitopts = SubunitOptions(parser)
+parser.add_option_group(subunitopts)
+
+opts, args = parser.parse_args()
+
+lp = sambaopts.get_loadparm()
+creds = credopts.get_credentials(lp)
+
+timeout = opts.timeout
+
+if len(args) < 2:
+ parser.print_usage()
+ sys.exit(1)
+
+server_name = args[0]
+server_ip = args[1]
+creds.set_krb_forwardable(credentials.NO_KRB_FORWARDABLE)
+
+
+class TestSimpleQueries(DNSTest):
+ def setUp(self):
+ super().setUp()
+ global server, server_ip, lp, creds, timeout
+ self.server = server_name
+ self.server_ip = server_ip
+ self.lp = lp
+ self.creds = creds
+ self.timeout = timeout
+
+ def test_one_a_query(self):
+ "create a query packet containing one query record"
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ questions = []
+
+ name = "%s.%s" % (self.server, self.get_dns_domain())
+ q = self.make_name_question(name, dns.DNS_QTYPE_A, dns.DNS_QCLASS_IN)
+ print("asking for ", q.name)
+ questions.append(q)
+
+ self.finish_name_packet(p, questions)
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
+ self.assert_dns_opcode_equals(response, dns.DNS_OPCODE_QUERY)
+ self.assertEqual(response.ancount, 1)
+ self.assertEqual(response.answers[0].rdata,
+ self.server_ip)
+
+ def test_one_SOA_query(self):
+ "create a query packet containing one query record for the SOA"
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ questions = []
+
+ name = "%s" % (self.get_dns_domain())
+ q = self.make_name_question(name, dns.DNS_QTYPE_SOA, dns.DNS_QCLASS_IN)
+ print("asking for ", q.name)
+ questions.append(q)
+
+ self.finish_name_packet(p, questions)
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
+ self.assert_dns_opcode_equals(response, dns.DNS_OPCODE_QUERY)
+ self.assertEqual(response.ancount, 1)
+ self.assertEqual(
+ response.answers[0].rdata.mname.upper(),
+ ("%s.%s" % (self.server, self.get_dns_domain())).upper())
+
+ def test_one_a_query_tcp(self):
+ "create a query packet containing one query record via TCP"
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ questions = []
+
+ name = "%s.%s" % (self.server, self.get_dns_domain())
+ q = self.make_name_question(name, dns.DNS_QTYPE_A, dns.DNS_QCLASS_IN)
+ print("asking for ", q.name)
+ questions.append(q)
+
+ self.finish_name_packet(p, questions)
+ (response, response_packet) =\
+ self.dns_transaction_tcp(p, host=server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
+ self.assert_dns_opcode_equals(response, dns.DNS_OPCODE_QUERY)
+ self.assertEqual(response.ancount, 1)
+ self.assertEqual(response.answers[0].rdata,
+ self.server_ip)
+
+ def test_one_mx_query(self):
+ "create a query packet causing an empty RCODE_OK answer"
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ questions = []
+
+ name = "%s.%s" % (self.server, self.get_dns_domain())
+ q = self.make_name_question(name, dns.DNS_QTYPE_MX, dns.DNS_QCLASS_IN)
+ print("asking for ", q.name)
+ questions.append(q)
+
+ self.finish_name_packet(p, questions)
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
+ self.assert_dns_opcode_equals(response, dns.DNS_OPCODE_QUERY)
+ self.assertEqual(response.ancount, 0)
+
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ questions = []
+
+ name = "invalid-%s.%s" % (self.server, self.get_dns_domain())
+ q = self.make_name_question(name, dns.DNS_QTYPE_MX, dns.DNS_QCLASS_IN)
+ print("asking for ", q.name)
+ questions.append(q)
+
+ self.finish_name_packet(p, questions)
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_NXDOMAIN)
+ self.assert_dns_opcode_equals(response, dns.DNS_OPCODE_QUERY)
+ self.assertEqual(response.ancount, 0)
+
+ def test_two_queries(self):
+ "create a query packet containing two query records"
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ questions = []
+
+ name = "%s.%s" % (self.server, self.get_dns_domain())
+ q = self.make_name_question(name, dns.DNS_QTYPE_A, dns.DNS_QCLASS_IN)
+ questions.append(q)
+
+ name = "%s.%s" % ('bogusname', self.get_dns_domain())
+ q = self.make_name_question(name, dns.DNS_QTYPE_A, dns.DNS_QCLASS_IN)
+ questions.append(q)
+
+ self.finish_name_packet(p, questions)
+ try:
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_FORMERR)
+ except socket.timeout:
+ # Windows chooses not to respond to incorrectly formatted queries.
+ # Although this appears to be non-deterministic even for the same
+ # request twice, it also appears to be based on a how poorly the
+ # request is formatted.
+ pass
+
+ def test_qtype_all_query(self):
+ "create a QTYPE_ALL query"
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ questions = []
+
+ name = "%s.%s" % (self.server, self.get_dns_domain())
+ q = self.make_name_question(name, dns.DNS_QTYPE_ALL, dns.DNS_QCLASS_IN)
+ print("asking for ", q.name)
+ questions.append(q)
+
+ self.finish_name_packet(p, questions)
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=server_ip)
+
+ num_answers = 1
+ dc_ipv6 = os.getenv('SERVER_IPV6')
+ if dc_ipv6 is not None:
+ num_answers += 1
+
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
+ self.assert_dns_opcode_equals(response, dns.DNS_OPCODE_QUERY)
+ self.assertEqual(response.ancount, num_answers)
+ self.assertEqual(response.answers[0].rdata,
+ self.server_ip)
+ if dc_ipv6 is not None:
+ self.assertEqual(response.answers[1].rdata, dc_ipv6)
+
+ def test_qclass_none_query(self):
+ "create a QCLASS_NONE query"
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ questions = []
+
+ name = "%s.%s" % (self.server, self.get_dns_domain())
+ q = self.make_name_question(
+ name,
+ dns.DNS_QTYPE_ALL,
+ dns.DNS_QCLASS_NONE)
+ questions.append(q)
+
+ self.finish_name_packet(p, questions)
+ try:
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_NOTIMP)
+ except socket.timeout:
+ # Windows chooses not to respond to incorrectly formatted queries.
+ # Although this appears to be non-deterministic even for the same
+ # request twice, it also appears to be based on a how poorly the
+ # request is formatted.
+ pass
+
+ def test_soa_hostname_query(self):
+ "create a SOA query for a hostname"
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ questions = []
+
+ name = "%s.%s" % (self.server, self.get_dns_domain())
+ q = self.make_name_question(name, dns.DNS_QTYPE_SOA, dns.DNS_QCLASS_IN)
+ questions.append(q)
+
+ self.finish_name_packet(p, questions)
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
+ self.assert_dns_opcode_equals(response, dns.DNS_OPCODE_QUERY)
+ # We don't get SOA records for single hosts
+ self.assertEqual(response.ancount, 0)
+ # But we do respond with an authority section
+ self.assertEqual(response.nscount, 1)
+
+ def test_soa_unknown_hostname_query(self):
+ "create a SOA query for an unknown hostname"
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ questions = []
+
+ name = "foobar.%s" % (self.get_dns_domain())
+ q = self.make_name_question(name, dns.DNS_QTYPE_SOA, dns.DNS_QCLASS_IN)
+ questions.append(q)
+
+ self.finish_name_packet(p, questions)
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_NXDOMAIN)
+ self.assert_dns_opcode_equals(response, dns.DNS_OPCODE_QUERY)
+ # We don't get SOA records for single hosts
+ self.assertEqual(response.ancount, 0)
+ # But we do respond with an authority section
+ self.assertEqual(response.nscount, 1)
+
+ def test_soa_domain_query(self):
+ "create a SOA query for a domain"
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ questions = []
+
+ name = self.get_dns_domain()
+ q = self.make_name_question(name, dns.DNS_QTYPE_SOA, dns.DNS_QCLASS_IN)
+ questions.append(q)
+
+ self.finish_name_packet(p, questions)
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
+ self.assert_dns_opcode_equals(response, dns.DNS_OPCODE_QUERY)
+ self.assertEqual(response.ancount, 1)
+ self.assertEqual(response.answers[0].rdata.minimum, 3600)
+
+
+class TestDNSUpdates(DNSTest):
+ def setUp(self):
+ super().setUp()
+ global server, server_ip, lp, creds, timeout
+ self.server = server_name
+ self.server_ip = server_ip
+ self.lp = lp
+ self.creds = creds
+ self.timeout = timeout
+
+ def test_two_updates(self):
+ "create two update requests"
+ p = self.make_name_packet(dns.DNS_OPCODE_UPDATE)
+ updates = []
+
+ name = "%s.%s" % (self.server, self.get_dns_domain())
+ u = self.make_name_question(name, dns.DNS_QTYPE_A, dns.DNS_QCLASS_IN)
+ updates.append(u)
+
+ name = self.get_dns_domain()
+ u = self.make_name_question(name, dns.DNS_QTYPE_A, dns.DNS_QCLASS_IN)
+ updates.append(u)
+
+ self.finish_name_packet(p, updates)
+ try:
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_FORMERR)
+ except socket.timeout:
+ # Windows chooses not to respond to incorrectly formatted queries.
+ # Although this appears to be non-deterministic even for the same
+ # request twice, it also appears to be based on a how poorly the
+ # request is formatted.
+ pass
+
+ def test_update_wrong_qclass(self):
+ "create update with DNS_QCLASS_NONE"
+ p = self.make_name_packet(dns.DNS_OPCODE_UPDATE)
+ updates = []
+
+ name = self.get_dns_domain()
+ u = self.make_name_question(name, dns.DNS_QTYPE_A, dns.DNS_QCLASS_NONE)
+ updates.append(u)
+
+ self.finish_name_packet(p, updates)
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_NOTIMP)
+
+ def test_update_prereq_with_non_null_ttl(self):
+ "test update with a non-null TTL"
+ p = self.make_name_packet(dns.DNS_OPCODE_UPDATE)
+ updates = []
+
+ name = self.get_dns_domain()
+
+ u = self.make_name_question(name, dns.DNS_QTYPE_SOA, dns.DNS_QCLASS_IN)
+ updates.append(u)
+ self.finish_name_packet(p, updates)
+
+ prereqs = []
+ r = dns.res_rec()
+ r.name = "%s.%s" % (self.server, self.get_dns_domain())
+ r.rr_type = dns.DNS_QTYPE_TXT
+ r.rr_class = dns.DNS_QCLASS_NONE
+ r.ttl = 1
+ r.length = 0
+ prereqs.append(r)
+
+ p.ancount = len(prereqs)
+ p.answers = prereqs
+
+ try:
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_FORMERR)
+ except socket.timeout:
+ # Windows chooses not to respond to incorrectly formatted queries.
+ # Although this appears to be non-deterministic even for the same
+ # request twice, it also appears to be based on a how poorly the
+ # request is formatted.
+ pass
+
+ def test_update_prereq_with_non_null_length(self):
+ "test update with a non-null length"
+ p = self.make_name_packet(dns.DNS_OPCODE_UPDATE)
+ updates = []
+
+ name = self.get_dns_domain()
+
+ u = self.make_name_question(name, dns.DNS_QTYPE_SOA, dns.DNS_QCLASS_IN)
+ updates.append(u)
+ self.finish_name_packet(p, updates)
+
+ prereqs = []
+ r = dns.res_rec()
+ r.name = "%s.%s" % (self.server, self.get_dns_domain())
+ r.rr_type = dns.DNS_QTYPE_TXT
+ r.rr_class = dns.DNS_QCLASS_ANY
+ r.ttl = 0
+ r.length = 1
+ prereqs.append(r)
+
+ p.ancount = len(prereqs)
+ p.answers = prereqs
+
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_NXRRSET)
+
+ def test_update_prereq_nonexisting_name(self):
+ "test update with a nonexisting name"
+ p = self.make_name_packet(dns.DNS_OPCODE_UPDATE)
+ updates = []
+
+ name = self.get_dns_domain()
+
+ u = self.make_name_question(name, dns.DNS_QTYPE_SOA, dns.DNS_QCLASS_IN)
+ updates.append(u)
+ self.finish_name_packet(p, updates)
+
+ prereqs = []
+ r = dns.res_rec()
+ r.name = "idontexist.%s" % self.get_dns_domain()
+ r.rr_type = dns.DNS_QTYPE_TXT
+ r.rr_class = dns.DNS_QCLASS_ANY
+ r.ttl = 0
+ r.length = 0
+ prereqs.append(r)
+
+ p.ancount = len(prereqs)
+ p.answers = prereqs
+
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_NXRRSET)
+
+ def test_update_add_txt_record(self):
+ "test adding records works"
+ prefix, txt = 'textrec', ['"This is a test"']
+ p = self.make_txt_update(prefix, txt)
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
+ self.check_query_txt(prefix, txt)
+
+ def test_delete_record(self):
+ "Test if deleting records works"
+
+ NAME = "deleterec.%s" % self.get_dns_domain()
+
+ # First, create a record to make sure we have a record to delete.
+ p = self.make_name_packet(dns.DNS_OPCODE_UPDATE)
+ updates = []
+
+ name = self.get_dns_domain()
+
+ u = self.make_name_question(name, dns.DNS_QTYPE_SOA, dns.DNS_QCLASS_IN)
+ updates.append(u)
+ self.finish_name_packet(p, updates)
+
+ updates = []
+ r = dns.res_rec()
+ r.name = NAME
+ r.rr_type = dns.DNS_QTYPE_TXT
+ r.rr_class = dns.DNS_QCLASS_IN
+ r.ttl = 900
+ r.length = 0xffff
+ rdata = self.make_txt_record(['"This is a test"'])
+ r.rdata = rdata
+ updates.append(r)
+ p.nscount = len(updates)
+ p.nsrecs = updates
+
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
+
+ # Now check the record is around
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ questions = []
+ q = self.make_name_question(NAME, dns.DNS_QTYPE_TXT, dns.DNS_QCLASS_IN)
+ questions.append(q)
+
+ self.finish_name_packet(p, questions)
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
+
+ # Now delete the record
+ p = self.make_name_packet(dns.DNS_OPCODE_UPDATE)
+ updates = []
+
+ name = self.get_dns_domain()
+
+ u = self.make_name_question(name, dns.DNS_QTYPE_SOA, dns.DNS_QCLASS_IN)
+ updates.append(u)
+ self.finish_name_packet(p, updates)
+
+ updates = []
+ r = dns.res_rec()
+ r.name = NAME
+ r.rr_type = dns.DNS_QTYPE_TXT
+ r.rr_class = dns.DNS_QCLASS_NONE
+ r.ttl = 0
+ r.length = 0xffff
+ rdata = self.make_txt_record(['"This is a test"'])
+ r.rdata = rdata
+ updates.append(r)
+ p.nscount = len(updates)
+ p.nsrecs = updates
+
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
+
+ # And finally check it's gone
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ questions = []
+
+ q = self.make_name_question(NAME, dns.DNS_QTYPE_TXT, dns.DNS_QCLASS_IN)
+ questions.append(q)
+
+ self.finish_name_packet(p, questions)
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_NXDOMAIN)
+
+ def test_readd_record(self):
+ "Test if adding, deleting and then re-adding a records works"
+
+ NAME = "readdrec.%s" % self.get_dns_domain()
+
+ # Create the record
+ p = self.make_name_packet(dns.DNS_OPCODE_UPDATE)
+ updates = []
+
+ name = self.get_dns_domain()
+
+ u = self.make_name_question(name, dns.DNS_QTYPE_SOA, dns.DNS_QCLASS_IN)
+ updates.append(u)
+ self.finish_name_packet(p, updates)
+
+ updates = []
+ r = dns.res_rec()
+ r.name = NAME
+ r.rr_type = dns.DNS_QTYPE_TXT
+ r.rr_class = dns.DNS_QCLASS_IN
+ r.ttl = 900
+ r.length = 0xffff
+ rdata = self.make_txt_record(['"This is a test"'])
+ r.rdata = rdata
+ updates.append(r)
+ p.nscount = len(updates)
+ p.nsrecs = updates
+
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
+
+ # Now check the record is around
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ questions = []
+ q = self.make_name_question(NAME, dns.DNS_QTYPE_TXT, dns.DNS_QCLASS_IN)
+ questions.append(q)
+
+ self.finish_name_packet(p, questions)
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
+
+ # Now delete the record
+ p = self.make_name_packet(dns.DNS_OPCODE_UPDATE)
+ updates = []
+
+ name = self.get_dns_domain()
+
+ u = self.make_name_question(name, dns.DNS_QTYPE_SOA, dns.DNS_QCLASS_IN)
+ updates.append(u)
+ self.finish_name_packet(p, updates)
+
+ updates = []
+ r = dns.res_rec()
+ r.name = NAME
+ r.rr_type = dns.DNS_QTYPE_TXT
+ r.rr_class = dns.DNS_QCLASS_NONE
+ r.ttl = 0
+ r.length = 0xffff
+ rdata = self.make_txt_record(['"This is a test"'])
+ r.rdata = rdata
+ updates.append(r)
+ p.nscount = len(updates)
+ p.nsrecs = updates
+
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
+
+ # check it's gone
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ questions = []
+
+ q = self.make_name_question(NAME, dns.DNS_QTYPE_TXT, dns.DNS_QCLASS_IN)
+ questions.append(q)
+
+ self.finish_name_packet(p, questions)
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_NXDOMAIN)
+
+ # recreate the record
+ p = self.make_name_packet(dns.DNS_OPCODE_UPDATE)
+ updates = []
+
+ name = self.get_dns_domain()
+
+ u = self.make_name_question(name, dns.DNS_QTYPE_SOA, dns.DNS_QCLASS_IN)
+ updates.append(u)
+ self.finish_name_packet(p, updates)
+
+ updates = []
+ r = dns.res_rec()
+ r.name = NAME
+ r.rr_type = dns.DNS_QTYPE_TXT
+ r.rr_class = dns.DNS_QCLASS_IN
+ r.ttl = 900
+ r.length = 0xffff
+ rdata = self.make_txt_record(['"This is a test"'])
+ r.rdata = rdata
+ updates.append(r)
+ p.nscount = len(updates)
+ p.nsrecs = updates
+
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
+
+ # Now check the record is around
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ questions = []
+ q = self.make_name_question(NAME, dns.DNS_QTYPE_TXT, dns.DNS_QCLASS_IN)
+ questions.append(q)
+
+ self.finish_name_packet(p, questions)
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
+
+ def test_update_add_mx_record(self):
+ "test adding MX records works"
+ p = self.make_name_packet(dns.DNS_OPCODE_UPDATE)
+ updates = []
+
+ name = self.get_dns_domain()
+
+ u = self.make_name_question(name, dns.DNS_QTYPE_SOA, dns.DNS_QCLASS_IN)
+ updates.append(u)
+ self.finish_name_packet(p, updates)
+
+ updates = []
+ r = dns.res_rec()
+ r.name = "%s" % self.get_dns_domain()
+ r.rr_type = dns.DNS_QTYPE_MX
+ r.rr_class = dns.DNS_QCLASS_IN
+ r.ttl = 900
+ r.length = 0xffff
+ rdata = dns.mx_record()
+ rdata.preference = 10
+ rdata.exchange = 'mail.%s' % self.get_dns_domain()
+ r.rdata = rdata
+ updates.append(r)
+ p.nscount = len(updates)
+ p.nsrecs = updates
+
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
+
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ questions = []
+
+ name = "%s" % self.get_dns_domain()
+ q = self.make_name_question(name, dns.DNS_QTYPE_MX, dns.DNS_QCLASS_IN)
+ questions.append(q)
+
+ self.finish_name_packet(p, questions)
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
+ self.assertEqual(response.ancount, 1)
+ ans = response.answers[0]
+ self.assertEqual(ans.rr_type, dns.DNS_QTYPE_MX)
+ self.assertEqual(ans.rdata.preference, 10)
+ self.assertEqual(ans.rdata.exchange, 'mail.%s' % self.get_dns_domain())
+
+
+class TestComplexQueries(DNSTest):
+ def make_dns_update(self, key, value, qtype):
+ p = self.make_name_packet(dns.DNS_OPCODE_UPDATE)
+
+ name = self.get_dns_domain()
+ u = self.make_name_question(name, dns.DNS_QTYPE_SOA, dns.DNS_QCLASS_IN)
+ self.finish_name_packet(p, [u])
+
+ r = dns.res_rec()
+ r.name = key
+ r.rr_type = qtype
+ r.rr_class = dns.DNS_QCLASS_IN
+ r.ttl = 900
+ r.length = 0xffff
+ r.rdata = value
+ p.nscount = 1
+ p.nsrecs = [r]
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
+
+ def setUp(self):
+ super().setUp()
+
+ global server, server_ip, lp, creds, timeout
+ self.server = server_name
+ self.server_ip = server_ip
+ self.lp = lp
+ self.creds = creds
+ self.timeout = timeout
+
+ def test_one_a_query(self):
+ "create a query packet containing one query record"
+
+ try:
+
+ # Create the record
+ name = "cname_test.%s" % self.get_dns_domain()
+ rdata = "%s.%s" % (self.server, self.get_dns_domain())
+ self.make_dns_update(name, rdata, dns.DNS_QTYPE_CNAME)
+
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ questions = []
+
+ # Check the record
+ name = "cname_test.%s" % self.get_dns_domain()
+ q = self.make_name_question(name,
+ dns.DNS_QTYPE_A,
+ dns.DNS_QCLASS_IN)
+ print("asking for ", q.name)
+ questions.append(q)
+
+ self.finish_name_packet(p, questions)
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=self.server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
+ self.assert_dns_opcode_equals(response, dns.DNS_OPCODE_QUERY)
+ self.assertEqual(response.ancount, 2)
+ self.assertEqual(response.answers[0].rr_type, dns.DNS_QTYPE_CNAME)
+ self.assertEqual(response.answers[0].rdata, "%s.%s" %
+ (self.server, self.get_dns_domain()))
+ self.assertEqual(response.answers[1].rr_type, dns.DNS_QTYPE_A)
+ self.assertEqual(response.answers[1].rdata,
+ self.server_ip)
+
+ finally:
+ # Delete the record
+ p = self.make_name_packet(dns.DNS_OPCODE_UPDATE)
+ updates = []
+
+ name = self.get_dns_domain()
+
+ u = self.make_name_question(name,
+ dns.DNS_QTYPE_SOA,
+ dns.DNS_QCLASS_IN)
+ updates.append(u)
+ self.finish_name_packet(p, updates)
+
+ updates = []
+ r = dns.res_rec()
+ r.name = "cname_test.%s" % self.get_dns_domain()
+ r.rr_type = dns.DNS_QTYPE_CNAME
+ r.rr_class = dns.DNS_QCLASS_NONE
+ r.ttl = 0
+ r.length = 0xffff
+ r.rdata = "%s.%s" % (self.server, self.get_dns_domain())
+ updates.append(r)
+ p.nscount = len(updates)
+ p.nsrecs = updates
+
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=self.server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
+
+ def test_cname_two_chain(self):
+ name0 = "cnamechain0.%s" % self.get_dns_domain()
+ name1 = "cnamechain1.%s" % self.get_dns_domain()
+ name2 = "cnamechain2.%s" % self.get_dns_domain()
+ self.make_dns_update(name1, name2, dns.DNS_QTYPE_CNAME)
+ self.make_dns_update(name2, name0, dns.DNS_QTYPE_CNAME)
+ self.make_dns_update(name0, server_ip, dns.DNS_QTYPE_A)
+
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ questions = []
+ q = self.make_name_question(name1, dns.DNS_QTYPE_A,
+ dns.DNS_QCLASS_IN)
+ questions.append(q)
+
+ self.finish_name_packet(p, questions)
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
+ self.assert_dns_opcode_equals(response, dns.DNS_OPCODE_QUERY)
+ self.assertEqual(response.ancount, 3)
+
+ self.assertEqual(response.answers[0].rr_type, dns.DNS_QTYPE_CNAME)
+ self.assertEqual(response.answers[0].name, name1)
+ self.assertEqual(response.answers[0].rdata, name2)
+
+ self.assertEqual(response.answers[1].rr_type, dns.DNS_QTYPE_CNAME)
+ self.assertEqual(response.answers[1].name, name2)
+ self.assertEqual(response.answers[1].rdata, name0)
+
+ self.assertEqual(response.answers[2].rr_type, dns.DNS_QTYPE_A)
+ self.assertEqual(response.answers[2].rdata,
+ self.server_ip)
+
+ def test_invalid_empty_cname(self):
+ name0 = "cnamedotprefix0.%s" % self.get_dns_domain()
+ try:
+ self.make_dns_update(name0, "", dns.DNS_QTYPE_CNAME)
+ except AssertionError:
+ pass
+ else:
+ self.fail("Successfully added empty CNAME, which is invalid.")
+
+ def test_cname_two_chain_not_matching_qtype(self):
+ name0 = "cnamechain0.%s" % self.get_dns_domain()
+ name1 = "cnamechain1.%s" % self.get_dns_domain()
+ name2 = "cnamechain2.%s" % self.get_dns_domain()
+ self.make_dns_update(name1, name2, dns.DNS_QTYPE_CNAME)
+ self.make_dns_update(name2, name0, dns.DNS_QTYPE_CNAME)
+ self.make_dns_update(name0, server_ip, dns.DNS_QTYPE_A)
+
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ questions = []
+ q = self.make_name_question(name1, dns.DNS_QTYPE_TXT,
+ dns.DNS_QCLASS_IN)
+ questions.append(q)
+
+ self.finish_name_packet(p, questions)
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
+ self.assert_dns_opcode_equals(response, dns.DNS_OPCODE_QUERY)
+
+ # CNAME should return all intermediate results!
+ # Only the A records exists, not the TXT.
+ self.assertEqual(response.ancount, 2)
+
+ self.assertEqual(response.answers[0].rr_type, dns.DNS_QTYPE_CNAME)
+ self.assertEqual(response.answers[0].name, name1)
+ self.assertEqual(response.answers[0].rdata, name2)
+
+ self.assertEqual(response.answers[1].rr_type, dns.DNS_QTYPE_CNAME)
+ self.assertEqual(response.answers[1].name, name2)
+ self.assertEqual(response.answers[1].rdata, name0)
+
+ def test_cname_loop(self):
+ cname1 = "cnamelooptestrec." + self.get_dns_domain()
+ cname2 = "cnamelooptestrec2." + self.get_dns_domain()
+ cname3 = "cnamelooptestrec3." + self.get_dns_domain()
+ self.make_dns_update(cname1, cname2, dnsp.DNS_TYPE_CNAME)
+ self.make_dns_update(cname2, cname3, dnsp.DNS_TYPE_CNAME)
+ self.make_dns_update(cname3, cname1, dnsp.DNS_TYPE_CNAME)
+
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ questions = []
+
+ q = self.make_name_question(cname1,
+ dns.DNS_QTYPE_A,
+ dns.DNS_QCLASS_IN)
+ questions.append(q)
+ self.finish_name_packet(p, questions)
+
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=self.server_ip)
+
+ max_recursion_depth = 20
+ self.assertEqual(len(response.answers), max_recursion_depth)
+
+ # Make sure cname limit doesn't count other records. This is a generic
+ # test called in tests below
+ def max_rec_test(self, rtype, rec_gen):
+ name = "limittestrec{0}.{1}".format(rtype, self.get_dns_domain())
+ limit = 20
+ num_recs_to_enter = limit + 5
+
+ for i in range(1, num_recs_to_enter+1):
+ ip = rec_gen(i)
+ self.make_dns_update(name, ip, rtype)
+
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ questions = []
+
+ q = self.make_name_question(name,
+ rtype,
+ dns.DNS_QCLASS_IN)
+ questions.append(q)
+ self.finish_name_packet(p, questions)
+
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=self.server_ip)
+
+ self.assertEqual(len(response.answers), num_recs_to_enter)
+
+ def test_record_limit_A(self):
+ def ip4_gen(i):
+ return "127.0.0." + str(i)
+ self.max_rec_test(rtype=dns.DNS_QTYPE_A, rec_gen=ip4_gen)
+
+ def test_record_limit_AAAA(self):
+ def ip6_gen(i):
+ return "AAAA:0:0:0:0:0:0:" + str(i)
+ self.max_rec_test(rtype=dns.DNS_QTYPE_AAAA, rec_gen=ip6_gen)
+
+ def test_record_limit_SRV(self):
+ def srv_gen(i):
+ rec = dns.srv_record()
+ rec.priority = 1
+ rec.weight = 1
+ rec.port = 92
+ rec.target = "srvtestrec" + str(i)
+ return rec
+ self.max_rec_test(rtype=dns.DNS_QTYPE_SRV, rec_gen=srv_gen)
+
+ # Same as test_record_limit_A but with a preceding CNAME follow
+ def test_cname_limit(self):
+ cname1 = "cnamelimittestrec." + self.get_dns_domain()
+ cname2 = "cnamelimittestrec2." + self.get_dns_domain()
+ cname3 = "cnamelimittestrec3." + self.get_dns_domain()
+ ip_prefix = '127.0.0.'
+ limit = 20
+ num_recs_to_enter = limit + 5
+
+ self.make_dns_update(cname1, cname2, dnsp.DNS_TYPE_CNAME)
+ self.make_dns_update(cname2, cname3, dnsp.DNS_TYPE_CNAME)
+ num_arecs_to_enter = num_recs_to_enter - 2
+ for i in range(1, num_arecs_to_enter+1):
+ ip = ip_prefix + str(i)
+ self.make_dns_update(cname3, ip, dns.DNS_QTYPE_A)
+
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ questions = []
+
+ q = self.make_name_question(cname1,
+ dns.DNS_QTYPE_A,
+ dns.DNS_QCLASS_IN)
+ questions.append(q)
+ self.finish_name_packet(p, questions)
+
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=self.server_ip)
+
+ self.assertEqual(len(response.answers), num_recs_to_enter)
+
+ # ANY query on cname record shouldn't follow the link
+ def test_cname_any_query(self):
+ cname1 = "cnameanytestrec." + self.get_dns_domain()
+ cname2 = "cnameanytestrec2." + self.get_dns_domain()
+ cname3 = "cnameanytestrec3." + self.get_dns_domain()
+
+ self.make_dns_update(cname1, cname2, dnsp.DNS_TYPE_CNAME)
+ self.make_dns_update(cname2, cname3, dnsp.DNS_TYPE_CNAME)
+
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ questions = []
+
+ q = self.make_name_question(cname1,
+ dns.DNS_QTYPE_ALL,
+ dns.DNS_QCLASS_IN)
+ questions.append(q)
+ self.finish_name_packet(p, questions)
+
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=self.server_ip)
+
+ self.assertEqual(len(response.answers), 1)
+ self.assertEqual(response.answers[0].name, cname1)
+ self.assertEqual(response.answers[0].rdata, cname2)
+
+
+class TestInvalidQueries(DNSTest):
+ def setUp(self):
+ super().setUp()
+ global server, server_ip, lp, creds, timeout
+ self.server = server_name
+ self.server_ip = server_ip
+ self.lp = lp
+ self.creds = creds
+ self.timeout = timeout
+
+ def test_one_a_query(self):
+ """send 0 bytes follows by create a query packet
+ containing one query record"""
+
+ s = None
+ try:
+ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
+ s.connect((self.server_ip, 53))
+ s.send(b"", 0)
+ finally:
+ if s is not None:
+ s.close()
+
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ questions = []
+
+ name = "%s.%s" % (self.server, self.get_dns_domain())
+ q = self.make_name_question(name, dns.DNS_QTYPE_A, dns.DNS_QCLASS_IN)
+ print("asking for ", q.name)
+ questions.append(q)
+
+ self.finish_name_packet(p, questions)
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=self.server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
+ self.assert_dns_opcode_equals(response, dns.DNS_OPCODE_QUERY)
+ self.assertEqual(response.ancount, 1)
+ self.assertEqual(response.answers[0].rdata,
+ self.server_ip)
+
+ def test_one_a_reply(self):
+ "send a reply instead of a query"
+ global timeout
+
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ questions = []
+
+ name = "%s.%s" % ('fakefakefake', self.get_dns_domain())
+ q = self.make_name_question(name, dns.DNS_QTYPE_A, dns.DNS_QCLASS_IN)
+ print("asking for ", q.name)
+ questions.append(q)
+
+ self.finish_name_packet(p, questions)
+ p.operation |= dns.DNS_FLAG_REPLY
+ s = None
+ try:
+ send_packet = ndr.ndr_pack(p)
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
+ s.settimeout(timeout)
+ host = self.server_ip
+ s.connect((host, 53))
+ tcp_packet = struct.pack('!H', len(send_packet))
+ tcp_packet += send_packet
+ s.send(tcp_packet, 0)
+ recv_packet = s.recv(0xffff + 2, 0)
+ self.assertEqual(0, len(recv_packet))
+ except socket.timeout:
+ # Windows chooses not to respond to incorrectly formatted queries.
+ # Although this appears to be non-deterministic even for the same
+ # request twice, it also appears to be based on a how poorly the
+ # request is formatted.
+ pass
+ finally:
+ if s is not None:
+ s.close()
+
+
+class TestZones(DNSTest):
+ def setUp(self):
+ super().setUp()
+ global server, server_ip, lp, creds, timeout
+ self.server = server_name
+ self.server_ip = server_ip
+ self.lp = lp
+ self.creds = creds
+ self.timeout = timeout
+
+ self.zone = "test.lan"
+ self.rpc_conn = dnsserver.dnsserver("ncacn_ip_tcp:%s[sign]" %
+ (self.server_ip),
+ self.lp, self.creds)
+
+ self.samdb = SamDB(url="ldap://" + self.server_ip,
+ lp=self.get_loadparm(),
+ session_info=system_session(),
+ credentials=self.creds)
+ self.zone_dn = "DC=" + self.zone +\
+ ",CN=MicrosoftDNS,DC=DomainDNSZones," +\
+ str(self.samdb.get_default_basedn())
+
+ def tearDown(self):
+ super().tearDown()
+
+ try:
+ self.delete_zone(self.zone)
+ except RuntimeError as e:
+ (num, string) = e.args
+ if num != werror.WERR_DNS_ERROR_ZONE_DOES_NOT_EXIST:
+ raise
+
+ def make_zone_obj(self, zone, aging_enabled=False):
+ zone_create = dnsserver.DNS_RPC_ZONE_CREATE_INFO_LONGHORN()
+ zone_create.pszZoneName = zone
+ zone_create.dwZoneType = dnsp.DNS_ZONE_TYPE_PRIMARY
+ zone_create.fAging = int(aging_enabled)
+ zone_create.dwDpFlags = dnsserver.DNS_DP_DOMAIN_DEFAULT
+ zone_create.fDsIntegrated = 1
+ zone_create.fLoadExisting = 1
+ zone_create.fAllowUpdate = dnsp.DNS_ZONE_UPDATE_UNSECURE
+ return zone_create
+
+ def create_zone(self, zone, aging_enabled=False):
+ zone_create = self.make_zone_obj(zone, aging_enabled)
+ try:
+ client_version = dnsserver.DNS_CLIENT_VERSION_LONGHORN
+ self.rpc_conn.DnssrvOperation2(client_version,
+ 0,
+ self.server_ip,
+ None,
+ 0,
+ 'ZoneCreate',
+ dnsserver.DNSSRV_TYPEID_ZONE_CREATE,
+ zone_create)
+ except WERRORError as e:
+ self.fail(e)
+
+ def set_params(self, **kwargs):
+ for key, val in kwargs.items():
+ name_param = dnsserver.DNS_RPC_NAME_AND_PARAM()
+ name_param.dwParam = val
+ name_param.pszNodeName = key
+
+ client_version = dnsserver.DNS_CLIENT_VERSION_LONGHORN
+ nap_type = dnsserver.DNSSRV_TYPEID_NAME_AND_PARAM
+ try:
+ self.rpc_conn.DnssrvOperation2(client_version,
+ 0,
+ self.server,
+ self.zone,
+ 0,
+ 'ResetDwordProperty',
+ nap_type,
+ name_param)
+ except WERRORError as e:
+ self.fail(str(e))
+
+ def ldap_modify_dnsrecs(self, name, func):
+ dn = 'DC={0},{1}'.format(name, self.zone_dn)
+ dns_recs = self.ldap_get_dns_records(name)
+ for rec in dns_recs:
+ func(rec)
+ update_dict = {'dn': dn, 'dnsRecord': [ndr_pack(r) for r in dns_recs]}
+ self.samdb.modify(ldb.Message.from_dict(self.samdb,
+ update_dict,
+ ldb.FLAG_MOD_REPLACE))
+
+ def dns_update_record(self, prefix, txt):
+ p = self.make_txt_update(prefix, txt, self.zone)
+ (code, response) = self.dns_transaction_udp(p, host=self.server_ip)
+ self.assert_dns_rcode_equals(code, dns.DNS_RCODE_OK)
+ recs = self.ldap_get_dns_records(prefix)
+ recs = [r for r in recs if r.data.str == txt]
+ self.assertEqual(len(recs), 1)
+ return recs[0]
+
+ def dns_tombstone(self, prefix, txt, zone):
+ name = prefix + "." + zone
+
+ to = dnsp.DnssrvRpcRecord()
+ to.dwTimeStamp = 1000
+ to.wType = dnsp.DNS_TYPE_TOMBSTONE
+
+ self.samdb.dns_replace(name, [to])
+
+ def ldap_get_records(self, name):
+ # The use of SCOPE_SUBTREE here avoids raising an exception in the
+ # 0 results case for a test below.
+
+ expr = "(&(objectClass=dnsNode)(name={0}))".format(name)
+ return self.samdb.search(base=self.zone_dn, scope=ldb.SCOPE_SUBTREE,
+ expression=expr, attrs=["*"])
+
+ def ldap_get_dns_records(self, name):
+ records = self.ldap_get_records(name)
+ return [ndr_unpack(dnsp.DnssrvRpcRecord, r)
+ for r in records[0].get('dnsRecord')]
+
+ def ldap_get_zone_settings(self):
+ records = self.samdb.search(base=self.zone_dn, scope=ldb.SCOPE_BASE,
+ expression="(&(objectClass=dnsZone)" +
+ "(name={0}))".format(self.zone),
+ attrs=["dNSProperty"])
+ self.assertEqual(len(records), 1)
+ props = [ndr_unpack(dnsp.DnsProperty, r)
+ for r in records[0].get('dNSProperty')]
+
+ # We have no choice but to repeat these here.
+ zone_prop_ids = {0x00: "EMPTY",
+ 0x01: "TYPE",
+ 0x02: "ALLOW_UPDATE",
+ 0x08: "SECURE_TIME",
+ 0x10: "NOREFRESH_INTERVAL",
+ 0x11: "SCAVENGING_SERVERS",
+ 0x12: "AGING_ENABLED_TIME",
+ 0x20: "REFRESH_INTERVAL",
+ 0x40: "AGING_STATE",
+ 0x80: "DELETED_FROM_HOSTNAME",
+ 0x81: "MASTER_SERVERS",
+ 0x82: "AUTO_NS_SERVERS",
+ 0x83: "DCPROMO_CONVERT",
+ 0x90: "SCAVENGING_SERVERS_DA",
+ 0x91: "MASTER_SERVERS_DA",
+ 0x92: "NS_SERVERS_DA",
+ 0x100: "NODE_DBFLAGS"}
+ return {zone_prop_ids[p.id].lower(): p.data for p in props}
+
+ def set_aging(self, enable=False):
+ self.create_zone(self.zone, aging_enabled=enable)
+ self.set_params(NoRefreshInterval=1,
+ RefreshInterval=1,
+ Aging=int(bool(enable)),
+ AllowUpdate=dnsp.DNS_ZONE_UPDATE_UNSECURE)
+
+ def test_set_aging(self, enable=True, name='agingtest', txt=None):
+ if txt is None:
+ txt = ['test txt']
+ self.set_aging(enable=True)
+ settings = self.ldap_get_zone_settings()
+ self.assertTrue(settings['aging_state'] is not None)
+ self.assertTrue(settings['aging_state'])
+
+ rec = self.dns_update_record('agingtest', ['test txt'])
+ self.assertNotEqual(rec.dwTimeStamp, 0)
+
+ def test_set_aging_disabled(self):
+ self.set_aging(enable=False)
+ settings = self.ldap_get_zone_settings()
+ self.assertTrue(settings['aging_state'] is not None)
+ self.assertFalse(settings['aging_state'])
+
+ rec = self.dns_update_record('agingtest', ['test txt'])
+ self.assertNotEqual(rec.dwTimeStamp, 0)
+
+ def test_aging_update(self, enable=True):
+ name, txt = 'agingtest', ['test txt']
+ self.set_aging(enable=True)
+ before_mod = self.dns_update_record(name, txt)
+ if not enable:
+ self.set_params(Aging=0)
+ dec = 2
+
+ def mod_ts(rec):
+ self.assertTrue(rec.dwTimeStamp > 0)
+ rec.dwTimeStamp -= dec
+ self.ldap_modify_dnsrecs(name, mod_ts)
+ after_mod = self.ldap_get_dns_records(name)
+ self.assertEqual(len(after_mod), 1)
+ after_mod = after_mod[0]
+ self.assertEqual(after_mod.dwTimeStamp,
+ before_mod.dwTimeStamp - dec)
+ after_update = self.dns_update_record(name, txt)
+ after_should_equal = before_mod if enable else after_mod
+ self.assertEqual(after_should_equal.dwTimeStamp,
+ after_update.dwTimeStamp)
+
+ def test_aging_update_disabled(self):
+ self.test_aging_update(enable=False)
+
+ def test_aging_refresh(self):
+ name, txt = 'agingtest', ['test txt']
+ self.create_zone(self.zone, aging_enabled=True)
+ interval = 10
+ self.set_params(NoRefreshInterval=interval,
+ RefreshInterval=interval,
+ Aging=1,
+ AllowUpdate=dnsp.DNS_ZONE_UPDATE_UNSECURE)
+ before_mod = self.dns_update_record(name, txt)
+
+ def mod_ts(rec):
+ self.assertTrue(rec.dwTimeStamp > 0)
+ rec.dwTimeStamp -= interval // 2
+ self.ldap_modify_dnsrecs(name, mod_ts)
+ update_during_norefresh = self.dns_update_record(name, txt)
+
+ def mod_ts(rec):
+ self.assertTrue(rec.dwTimeStamp > 0)
+ rec.dwTimeStamp -= interval + interval // 2
+ self.ldap_modify_dnsrecs(name, mod_ts)
+ update_during_refresh = self.dns_update_record(name, txt)
+ self.assertEqual(update_during_norefresh.dwTimeStamp,
+ before_mod.dwTimeStamp - interval / 2)
+ self.assertEqual(update_during_refresh.dwTimeStamp,
+ before_mod.dwTimeStamp)
+
+ def test_rpc_add_no_timestamp(self):
+ name, txt = 'agingtest', ['test txt']
+ self.set_aging(enable=True)
+ rec_buf = dnsserver.DNS_RPC_RECORD_BUF()
+ rec_buf.rec = TXTRecord(txt)
+ self.rpc_conn.DnssrvUpdateRecord2(
+ dnsserver.DNS_CLIENT_VERSION_LONGHORN,
+ 0,
+ self.server_ip,
+ self.zone,
+ name,
+ rec_buf,
+ None)
+ recs = self.ldap_get_dns_records(name)
+ self.assertEqual(len(recs), 1)
+ self.assertEqual(recs[0].dwTimeStamp, 0)
+
+ def test_static_record_dynamic_update(self):
+ name, txt = 'agingtest', ['test txt']
+ txt2 = ['test txt2']
+ self.set_aging(enable=True)
+ rec_buf = dnsserver.DNS_RPC_RECORD_BUF()
+ rec_buf.rec = TXTRecord(txt)
+ self.rpc_conn.DnssrvUpdateRecord2(
+ dnsserver.DNS_CLIENT_VERSION_LONGHORN,
+ 0,
+ self.server_ip,
+ self.zone,
+ name,
+ rec_buf,
+ None)
+
+ rec2 = self.dns_update_record(name, txt2)
+ self.assertEqual(rec2.dwTimeStamp, 0)
+
+ def test_dynamic_record_static_update(self):
+ name, txt = 'agingtest', ['test txt']
+ txt2 = ['test txt2']
+ txt3 = ['test txt3']
+ self.set_aging(enable=True)
+
+ self.dns_update_record(name, txt)
+
+ rec_buf = dnsserver.DNS_RPC_RECORD_BUF()
+ rec_buf.rec = TXTRecord(txt2)
+ self.rpc_conn.DnssrvUpdateRecord2(
+ dnsserver.DNS_CLIENT_VERSION_LONGHORN,
+ 0,
+ self.server_ip,
+ self.zone,
+ name,
+ rec_buf,
+ None)
+
+ self.dns_update_record(name, txt3)
+
+ recs = self.ldap_get_dns_records(name)
+ # Put in dict because ldap recs might be out of order
+ recs = {str(r.data.str): r for r in recs}
+ self.assertNotEqual(recs[str(txt)].dwTimeStamp, 0)
+ self.assertEqual(recs[str(txt2)].dwTimeStamp, 0)
+ self.assertEqual(recs[str(txt3)].dwTimeStamp, 0)
+
+ def test_dns_tombstone_custom_match_rule(self):
+ lp = self.get_loadparm()
+ self.samdb = SamDB(url=lp.samdb_url(), lp=lp,
+ session_info=system_session(),
+ credentials=self.creds)
+
+ name, txt = 'agingtest', ['test txt']
+ name2, txt2 = 'agingtest2', ['test txt2']
+ name3, txt3 = 'agingtest3', ['test txt3']
+ name4, txt4 = 'agingtest4', ['test txt4']
+ name5, txt5 = 'agingtest5', ['test txt5']
+
+ self.create_zone(self.zone, aging_enabled=True)
+ interval = 10
+ self.set_params(NoRefreshInterval=interval,
+ RefreshInterval=interval,
+ Aging=1,
+ AllowUpdate=dnsp.DNS_ZONE_UPDATE_UNSECURE)
+
+ self.dns_update_record(name, txt)
+
+ self.dns_update_record(name2, txt)
+ self.dns_update_record(name2, txt2)
+
+ self.dns_update_record(name3, txt)
+ self.dns_update_record(name3, txt2)
+ last_update = self.dns_update_record(name3, txt3)
+
+ # Modify txt1 of the first 2 names
+ def mod_ts(rec):
+ if rec.data.str == txt:
+ rec.dwTimeStamp -= 2
+ self.ldap_modify_dnsrecs(name, mod_ts)
+ self.ldap_modify_dnsrecs(name2, mod_ts)
+
+ # create a static dns record.
+ rec_buf = dnsserver.DNS_RPC_RECORD_BUF()
+ rec_buf.rec = TXTRecord(txt4)
+ self.rpc_conn.DnssrvUpdateRecord2(
+ dnsserver.DNS_CLIENT_VERSION_LONGHORN,
+ 0,
+ self.server_ip,
+ self.zone,
+ name4,
+ rec_buf,
+ None)
+
+ # Create a tomb stoned record.
+ self.dns_update_record(name5, txt5)
+ self.dns_tombstone(name5, txt5, self.zone)
+
+ self.ldap_get_dns_records(name3)
+ expr = "(dnsRecord:1.3.6.1.4.1.7165.4.5.3:={0})"
+ expr = expr.format(int(last_update.dwTimeStamp) - 1)
+ try:
+ res = self.samdb.search(base=self.zone_dn, scope=ldb.SCOPE_SUBTREE,
+ expression=expr, attrs=["*"])
+ except ldb.LdbError as e:
+ self.fail(str(e))
+ updated_names = {str(r.get('name')) for r in res}
+ self.assertEqual(updated_names, set([name, name2]))
+
+ def test_dns_tombstone_custom_match_rule_no_records(self):
+ lp = self.get_loadparm()
+ self.samdb = SamDB(url=lp.samdb_url(), lp=lp,
+ session_info=system_session(),
+ credentials=self.creds)
+
+ self.create_zone(self.zone, aging_enabled=True)
+ interval = 10
+ self.set_params(NoRefreshInterval=interval,
+ RefreshInterval=interval,
+ Aging=1,
+ AllowUpdate=dnsp.DNS_ZONE_UPDATE_UNSECURE)
+
+ expr = "(dnsRecord:1.3.6.1.4.1.7165.4.5.3:={0})"
+ expr = expr.format(1)
+
+ try:
+ res = self.samdb.search(base=self.zone_dn, scope=ldb.SCOPE_SUBTREE,
+ expression=expr, attrs=["*"])
+ except ldb.LdbError as e:
+ self.fail(str(e))
+ self.assertEqual(0, len(res))
+
+ def test_dns_tombstone_custom_match_rule_fail(self):
+ self.create_zone(self.zone, aging_enabled=True)
+ samdb = SamDB(url=lp.samdb_url(),
+ lp=lp,
+ session_info=system_session(),
+ credentials=self.creds)
+
+ # Property name in not dnsRecord
+ expr = "(dnsProperty:1.3.6.1.4.1.7165.4.5.3:=1)"
+ res = samdb.search(base=self.zone_dn, scope=ldb.SCOPE_SUBTREE,
+ expression=expr, attrs=["*"])
+ self.assertEqual(len(res), 0)
+
+ # No value for tombstone time
+ try:
+ expr = "(dnsRecord:1.3.6.1.4.1.7165.4.5.3:=)"
+ res = samdb.search(base=self.zone_dn, scope=ldb.SCOPE_SUBTREE,
+ expression=expr, attrs=["*"])
+ self.assertEqual(len(res), 0)
+ self.fail("Exception: ldb.ldbError not generated")
+ except ldb.LdbError as e:
+ (num, msg) = e.args
+ self.assertEqual(num, ERR_OPERATIONS_ERROR)
+
+ # Tombstone time = -
+ try:
+ expr = "(dnsRecord:1.3.6.1.4.1.7165.4.5.3:=-)"
+ res = samdb.search(base=self.zone_dn, scope=ldb.SCOPE_SUBTREE,
+ expression=expr, attrs=["*"])
+ self.assertEqual(len(res), 0)
+ self.fail("Exception: ldb.ldbError not generated")
+ except ldb.LdbError as e:
+ (num, _) = e.args
+ self.assertEqual(num, ERR_OPERATIONS_ERROR)
+
+ # Tombstone time longer than 64 characters
+ try:
+ expr = "(dnsRecord:1.3.6.1.4.1.7165.4.5.3:={0})"
+ expr = expr.format("1" * 65)
+ res = samdb.search(base=self.zone_dn, scope=ldb.SCOPE_SUBTREE,
+ expression=expr, attrs=["*"])
+ self.assertEqual(len(res), 0)
+ self.fail("Exception: ldb.ldbError not generated")
+ except ldb.LdbError as e:
+ (num, _) = e.args
+ self.assertEqual(num, ERR_OPERATIONS_ERROR)
+
+ # Non numeric Tombstone time
+ try:
+ expr = "(dnsRecord:1.3.6.1.4.1.7165.4.5.3:=expired)"
+ res = samdb.search(base=self.zone_dn, scope=ldb.SCOPE_SUBTREE,
+ expression=expr, attrs=["*"])
+ self.assertEqual(len(res), 0)
+ self.fail("Exception: ldb.ldbError not generated")
+ except ldb.LdbError as e:
+ (num, _) = e.args
+ self.assertEqual(num, ERR_OPERATIONS_ERROR)
+
+ # Non system session
+ try:
+ db = SamDB(url="ldap://" + self.server_ip,
+ lp=self.get_loadparm(),
+ credentials=self.creds)
+
+ expr = "(dnsRecord:1.3.6.1.4.1.7165.4.5.3:=2)"
+ res = db.search(base=self.zone_dn, scope=ldb.SCOPE_SUBTREE,
+ expression=expr, attrs=["*"])
+ self.assertEqual(len(res), 0)
+ self.fail("Exception: ldb.ldbError not generated")
+ except ldb.LdbError as e:
+ (num, _) = e.args
+ self.assertEqual(num, ERR_OPERATIONS_ERROR)
+
+ def test_basic_scavenging(self):
+ lp = self.get_loadparm()
+ self.samdb = SamDB(url=lp.samdb_url(), lp=lp,
+ session_info=system_session(),
+ credentials=self.creds)
+
+ self.create_zone(self.zone, aging_enabled=True)
+ interval = 1
+ self.set_params(NoRefreshInterval=interval,
+ RefreshInterval=interval,
+ Aging=1,
+ AllowUpdate=dnsp.DNS_ZONE_UPDATE_UNSECURE)
+ name, txt = 'agingtest', ['test txt']
+ name2, txt2 = 'agingtest2', ['test txt2']
+ name3, txt3 = 'agingtest3', ['test txt3']
+ name4, txt4 = 'agingtest4', ['test txt4']
+ name5, txt5 = 'agingtest5', ['test txt5']
+ self.dns_update_record(name, txt)
+ self.dns_update_record(name2, txt)
+ self.dns_update_record(name2, txt2)
+ self.dns_update_record(name3, txt)
+ self.dns_update_record(name3, txt2)
+
+ # Create a tombstoned record.
+ self.dns_update_record(name4, txt4)
+ self.dns_tombstone(name4, txt4, self.zone)
+ records = self.ldap_get_records(name4)
+ self.assertIn("dNSTombstoned", records[0])
+ self.assertEqual(records[0]["dNSTombstoned"][0], b"TRUE")
+
+ # Create an un-tombstoned record, with dnsTombstoned: FALSE
+ self.dns_update_record(name5, txt5)
+ self.dns_tombstone(name5, txt5, self.zone)
+ self.dns_update_record(name5, txt5)
+ records = self.ldap_get_records(name5)
+ self.assertIn("dNSTombstoned", records[0])
+ self.assertEqual(records[0]["dNSTombstoned"][0], b"FALSE")
+
+ last_add = self.dns_update_record(name3, txt3)
+
+ def mod_ts(rec):
+ self.assertTrue(rec.dwTimeStamp > 0)
+ if rec.data.str == txt:
+ rec.dwTimeStamp -= interval * 5
+
+ def mod_ts_all(rec):
+ rec.dwTimeStamp -= interval * 5
+ self.ldap_modify_dnsrecs(name, mod_ts)
+ self.ldap_modify_dnsrecs(name2, mod_ts)
+ self.ldap_modify_dnsrecs(name3, mod_ts)
+ self.ldap_modify_dnsrecs(name5, mod_ts_all)
+ self.assertTrue(callable(getattr(dsdb, '_scavenge_dns_records', None)))
+ dsdb._scavenge_dns_records(self.samdb)
+
+ recs = self.ldap_get_dns_records(name)
+ self.assertEqual(len(recs), 1)
+ self.assertEqual(recs[0].wType, dnsp.DNS_TYPE_TOMBSTONE)
+ records = self.ldap_get_records(name)
+ self.assertIn("dNSTombstoned", records[0])
+ self.assertEqual(records[0]["dNSTombstoned"][0], b"TRUE")
+
+ recs = self.ldap_get_dns_records(name2)
+ self.assertEqual(len(recs), 1)
+ self.assertEqual(recs[0].wType, dnsp.DNS_TYPE_TXT)
+ self.assertEqual(recs[0].data.str, txt2)
+
+ recs = self.ldap_get_dns_records(name3)
+ self.assertEqual(len(recs), 2)
+ txts = {str(r.data.str) for r in recs}
+ self.assertEqual(txts, {str(txt2), str(txt3)})
+ self.assertEqual(recs[0].wType, dnsp.DNS_TYPE_TXT)
+ self.assertEqual(recs[1].wType, dnsp.DNS_TYPE_TXT)
+
+ recs = self.ldap_get_dns_records(name4)
+ self.assertEqual(len(recs), 1)
+ self.assertEqual(recs[0].wType, dnsp.DNS_TYPE_TOMBSTONE)
+ records = self.ldap_get_records(name4)
+ self.assertIn("dNSTombstoned", records[0])
+ self.assertEqual(records[0]["dNSTombstoned"][0], b"TRUE")
+
+ recs = self.ldap_get_dns_records(name5)
+ self.assertEqual(len(recs), 1)
+ self.assertEqual(recs[0].wType, dnsp.DNS_TYPE_TOMBSTONE)
+ records = self.ldap_get_records(name5)
+ self.assertIn("dNSTombstoned", records[0])
+ self.assertEqual(records[0]["dNSTombstoned"][0], b"TRUE")
+
+ for make_it_work in [False, True]:
+ inc = -1 if make_it_work else 1
+
+ def mod_ts(rec):
+ rec.data = (last_add.dwTimeStamp - 24 * 14) + inc
+ self.ldap_modify_dnsrecs(name, mod_ts)
+ dsdb._dns_delete_tombstones(self.samdb)
+ recs = self.ldap_get_records(name)
+ if make_it_work:
+ self.assertEqual(len(recs), 0)
+ else:
+ self.assertEqual(len(recs), 1)
+
+ def test_fully_qualified_zone(self):
+
+ def create_zone_expect_exists(zone):
+ try:
+ zone_create = self.make_zone_obj(zone)
+ client_version = dnsserver.DNS_CLIENT_VERSION_LONGHORN
+ zc_type = dnsserver.DNSSRV_TYPEID_ZONE_CREATE
+ self.rpc_conn.DnssrvOperation2(client_version,
+ 0,
+ self.server_ip,
+ None,
+ 0,
+ 'ZoneCreate',
+ zc_type,
+ zone_create)
+ except WERRORError as e:
+ enum, _ = e.args
+ if enum != werror.WERR_DNS_ERROR_ZONE_ALREADY_EXISTS:
+ self.fail(e)
+ return
+ self.fail("Zone {} should already exist".format(zone))
+
+ # Create unqualified, then check creating qualified fails.
+ self.create_zone(self.zone)
+ create_zone_expect_exists(self.zone + '.')
+
+ # Same again, but the other way around.
+ self.create_zone(self.zone + '2.')
+ create_zone_expect_exists(self.zone + '2')
+
+ client_version = dnsserver.DNS_CLIENT_VERSION_LONGHORN
+ request_filter = dnsserver.DNS_ZONE_REQUEST_PRIMARY
+ tid = dnsserver.DNSSRV_TYPEID_DWORD
+ typeid, res = self.rpc_conn.DnssrvComplexOperation2(client_version,
+ 0,
+ self.server_ip,
+ None,
+ 'EnumZones',
+ tid,
+ request_filter)
+
+ self.delete_zone(self.zone)
+ self.delete_zone(self.zone + '2')
+
+ # Two zones should've been created, neither of them fully qualified.
+ zones_we_just_made = []
+ zones = [str(z.pszZoneName) for z in res.ZoneArray]
+ for zone in zones:
+ if zone.startswith(self.zone):
+ zones_we_just_made.append(zone)
+ self.assertEqual(len(zones_we_just_made), 2)
+ self.assertEqual(set(zones_we_just_made), {self.zone + '2', self.zone})
+
+ def delete_zone(self, zone):
+ self.rpc_conn.DnssrvOperation2(dnsserver.DNS_CLIENT_VERSION_LONGHORN,
+ 0,
+ self.server_ip,
+ zone,
+ 0,
+ 'DeleteZoneFromDs',
+ dnsserver.DNSSRV_TYPEID_NULL,
+ None)
+
+ def test_soa_query(self):
+ zone = "test.lan"
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ questions = []
+
+ q = self.make_name_question(zone, dns.DNS_QTYPE_SOA, dns.DNS_QCLASS_IN)
+ questions.append(q)
+ self.finish_name_packet(p, questions)
+
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=server_ip)
+ # Windows returns OK while BIND logically seems to return NXDOMAIN
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_NXDOMAIN)
+ self.assert_dns_opcode_equals(response, dns.DNS_OPCODE_QUERY)
+ self.assertEqual(response.ancount, 0)
+
+ self.create_zone(zone)
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
+ self.assert_dns_opcode_equals(response, dns.DNS_OPCODE_QUERY)
+ self.assertEqual(response.ancount, 1)
+ self.assertEqual(response.answers[0].rr_type, dns.DNS_QTYPE_SOA)
+
+ self.delete_zone(zone)
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_NXDOMAIN)
+ self.assert_dns_opcode_equals(response, dns.DNS_OPCODE_QUERY)
+ self.assertEqual(response.ancount, 0)
+
+ def set_dnsProperty_zero_length(self, dnsproperty_id):
+ records = self.samdb.search(base=self.zone_dn, scope=ldb.SCOPE_BASE,
+ expression="(&(objectClass=dnsZone)" +
+ "(name={0}))".format(self.zone),
+ attrs=["dNSProperty"])
+ self.assertEqual(len(records), 1)
+ props = [ndr_unpack(dnsp.DnsProperty, r)
+ for r in records[0].get('dNSProperty')]
+ new_props = [ndr.ndr_pack(p) for p in props if p.id == dnsproperty_id]
+
+ zero_length_p = dnsp.DnsProperty_short()
+ zero_length_p.id = dnsproperty_id
+ zero_length_p.namelength = 1
+ zero_length_p.name = 1
+ new_props += [ndr.ndr_pack(zero_length_p)]
+
+ dn = records[0].dn
+ update_dict = {'dn': dn, 'dnsProperty': new_props}
+ self.samdb.modify(ldb.Message.from_dict(self.samdb,
+ update_dict,
+ ldb.FLAG_MOD_REPLACE))
+
+ def test_update_while_dnsProperty_zero_length(self):
+ self.create_zone(self.zone)
+ self.set_dnsProperty_zero_length(dnsp.DSPROPERTY_ZONE_ALLOW_UPDATE)
+ rec = self.dns_update_record('dnspropertytest', ['test txt'])
+ self.assertNotEqual(rec.dwTimeStamp, 0)
+
+ def test_enum_zones_while_dnsProperty_zero_length(self):
+ self.create_zone(self.zone)
+ self.set_dnsProperty_zero_length(dnsp.DSPROPERTY_ZONE_ALLOW_UPDATE)
+ client_version = dnsserver.DNS_CLIENT_VERSION_LONGHORN
+ request_filter = dnsserver.DNS_ZONE_REQUEST_PRIMARY
+ tid = dnsserver.DNSSRV_TYPEID_DWORD
+ typeid, res = self.rpc_conn.DnssrvComplexOperation2(client_version,
+ 0,
+ self.server_ip,
+ None,
+ 'EnumZones',
+ tid,
+ request_filter)
+
+ def test_rpc_zone_update_while_dnsProperty_zero_length(self):
+ self.create_zone(self.zone)
+ self.set_dnsProperty_zero_length(dnsp.DSPROPERTY_ZONE_ALLOW_UPDATE)
+ self.set_params(AllowUpdate=dnsp.DNS_ZONE_UPDATE_SECURE)
+
+ def test_rpc_zone_update_while_other_dnsProperty_zero_length(self):
+ self.create_zone(self.zone)
+ self.set_dnsProperty_zero_length(dnsp.DSPROPERTY_ZONE_MASTER_SERVERS_DA)
+ self.set_params(AllowUpdate=dnsp.DNS_ZONE_UPDATE_SECURE)
+
+class TestRPCRoundtrip(DNSTest):
+ def setUp(self):
+ super().setUp()
+ global server, server_ip, lp, creds
+ self.server = server_name
+ self.server_ip = server_ip
+ self.lp = lp
+ self.creds = creds
+ self.rpc_conn = dnsserver.dnsserver("ncacn_ip_tcp:%s[sign]" %
+ (self.server_ip),
+ self.lp,
+ self.creds)
+
+ def rpc_update(self, fqn=None, data=None, wType=None, delete=False):
+ fqn = fqn or ("rpctestrec." + self.get_dns_domain())
+
+ rec = record_from_string(wType, data)
+ add_rec_buf = dnsserver.DNS_RPC_RECORD_BUF()
+ add_rec_buf.rec = rec
+
+ add_arg = add_rec_buf
+ del_arg = None
+ if delete:
+ add_arg = None
+ del_arg = add_rec_buf
+
+ self.rpc_conn.DnssrvUpdateRecord2(
+ dnsserver.DNS_CLIENT_VERSION_LONGHORN,
+ 0,
+ self.server_ip,
+ self.get_dns_domain(),
+ fqn,
+ add_arg,
+ del_arg)
+
+ def test_rpc_self_referencing_cname(self):
+ cname = "cnametest2_unqual_rec_loop"
+ cname_fqn = "%s.%s" % (cname, self.get_dns_domain())
+
+ try:
+ self.rpc_update(fqn=cname, data=cname_fqn,
+ wType=dnsp.DNS_TYPE_CNAME, delete=True)
+ except WERRORError as e:
+ if e.args[0] != werror.WERR_DNS_ERROR_RECORD_DOES_NOT_EXIST:
+ self.fail("RPC DNS gaven wrong error on pre-test cleanup "
+ "for self referencing CNAME: %s" % e.args[0])
+
+ try:
+ self.rpc_update(fqn=cname, wType=dnsp.DNS_TYPE_CNAME, data=cname_fqn)
+ except WERRORError as e:
+ if e.args[0] != werror.WERR_DNS_ERROR_CNAME_LOOP:
+ self.fail("RPC DNS gaven wrong error on insertion of "
+ "self referencing CNAME: %s" % e.args[0])
+ return
+
+ self.fail("RPC DNS allowed insertion of self referencing CNAME")
+
+ def test_update_add_txt_rpc_to_dns(self):
+ prefix, txt = 'rpctextrec', ['"This is a test"']
+
+ name = "%s.%s" % (prefix, self.get_dns_domain())
+
+ rec = record_from_string(dnsp.DNS_TYPE_TXT, '"\\"This is a test\\""')
+ add_rec_buf = dnsserver.DNS_RPC_RECORD_BUF()
+ add_rec_buf.rec = rec
+ try:
+ self.rpc_conn.DnssrvUpdateRecord2(
+ dnsserver.DNS_CLIENT_VERSION_LONGHORN,
+ 0,
+ self.server_ip,
+ self.get_dns_domain(),
+ name,
+ add_rec_buf,
+ None)
+
+ except WERRORError as e:
+ self.fail(str(e))
+
+ try:
+ self.check_query_txt(prefix, txt)
+ finally:
+ self.rpc_conn.DnssrvUpdateRecord2(
+ dnsserver.DNS_CLIENT_VERSION_LONGHORN,
+ 0,
+ self.server_ip,
+ self.get_dns_domain(),
+ name,
+ None,
+ add_rec_buf)
+
+ def test_update_add_null_padded_txt_record(self):
+ "test adding records works"
+ prefix, txt = 'pad1textrec', ['"This is a test"', '', '']
+ p = self.make_txt_update(prefix, txt)
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
+ self.check_query_txt(prefix, txt)
+ self.assertIsNotNone(
+ dns_record_match(self.rpc_conn,
+ self.server_ip,
+ self.get_dns_domain(),
+ "%s.%s" % (prefix, self.get_dns_domain()),
+ dnsp.DNS_TYPE_TXT,
+ '"\\"This is a test\\"" "" ""'))
+
+ prefix, txt = 'pad2textrec', ['"This is a test"', '', '', 'more text']
+ p = self.make_txt_update(prefix, txt)
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
+ self.check_query_txt(prefix, txt)
+ self.assertIsNotNone(
+ dns_record_match(
+ self.rpc_conn,
+ self.server_ip,
+ self.get_dns_domain(),
+ "%s.%s" % (prefix, self.get_dns_domain()),
+ dnsp.DNS_TYPE_TXT,
+ '"\\"This is a test\\"" "" "" "more text"'))
+
+ prefix, txt = 'pad3textrec', ['', '', '"This is a test"']
+ p = self.make_txt_update(prefix, txt)
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
+ self.check_query_txt(prefix, txt)
+ self.assertIsNotNone(
+ dns_record_match(
+ self.rpc_conn,
+ self.server_ip,
+ self.get_dns_domain(),
+ "%s.%s" % (prefix, self.get_dns_domain()),
+ dnsp.DNS_TYPE_TXT,
+ '"" "" "\\"This is a test\\""'))
+
+ def test_update_add_padding_rpc_to_dns(self):
+ prefix, txt = 'pad1textrec', ['"This is a test"', '', '']
+ prefix = 'rpc' + prefix
+ name = "%s.%s" % (prefix, self.get_dns_domain())
+
+ rec = record_from_string(dnsp.DNS_TYPE_TXT,
+ '"\\"This is a test\\"" "" ""')
+ add_rec_buf = dnsserver.DNS_RPC_RECORD_BUF()
+ add_rec_buf.rec = rec
+ try:
+ self.rpc_conn.DnssrvUpdateRecord2(
+ dnsserver.DNS_CLIENT_VERSION_LONGHORN,
+ 0,
+ self.server_ip,
+ self.get_dns_domain(),
+ name,
+ add_rec_buf,
+ None)
+
+ except WERRORError as e:
+ self.fail(str(e))
+
+ try:
+ self.check_query_txt(prefix, txt)
+ finally:
+ self.rpc_conn.DnssrvUpdateRecord2(
+ dnsserver.DNS_CLIENT_VERSION_LONGHORN,
+ 0,
+ self.server_ip,
+ self.get_dns_domain(),
+ name,
+ None,
+ add_rec_buf)
+
+ prefix, txt = 'pad2textrec', ['"This is a test"', '', '', 'more text']
+ prefix = 'rpc' + prefix
+ name = "%s.%s" % (prefix, self.get_dns_domain())
+
+ rec = record_from_string(dnsp.DNS_TYPE_TXT,
+ '"\\"This is a test\\"" "" "" "more text"')
+ add_rec_buf = dnsserver.DNS_RPC_RECORD_BUF()
+ add_rec_buf.rec = rec
+ try:
+ self.rpc_conn.DnssrvUpdateRecord2(
+ dnsserver.DNS_CLIENT_VERSION_LONGHORN,
+ 0,
+ self.server_ip,
+ self.get_dns_domain(),
+ name,
+ add_rec_buf,
+ None)
+
+ except WERRORError as e:
+ self.fail(str(e))
+
+ try:
+ self.check_query_txt(prefix, txt)
+ finally:
+ self.rpc_conn.DnssrvUpdateRecord2(
+ dnsserver.DNS_CLIENT_VERSION_LONGHORN,
+ 0,
+ self.server_ip,
+ self.get_dns_domain(),
+ name,
+ None,
+ add_rec_buf)
+
+ prefix, txt = 'pad3textrec', ['', '', '"This is a test"']
+ prefix = 'rpc' + prefix
+ name = "%s.%s" % (prefix, self.get_dns_domain())
+
+ rec = record_from_string(dnsp.DNS_TYPE_TXT,
+ '"" "" "\\"This is a test\\""')
+ add_rec_buf = dnsserver.DNS_RPC_RECORD_BUF()
+ add_rec_buf.rec = rec
+ try:
+ self.rpc_conn.DnssrvUpdateRecord2(
+ dnsserver.DNS_CLIENT_VERSION_LONGHORN,
+ 0,
+ self.server_ip,
+ self.get_dns_domain(),
+ name,
+ add_rec_buf,
+ None)
+ except WERRORError as e:
+ self.fail(str(e))
+
+ try:
+ self.check_query_txt(prefix, txt)
+ finally:
+ self.rpc_conn.DnssrvUpdateRecord2(
+ dnsserver.DNS_CLIENT_VERSION_LONGHORN,
+ 0,
+ self.server_ip,
+ self.get_dns_domain(),
+ name,
+ None,
+ add_rec_buf)
+
+ # Test is incomplete due to strlen against txt records
+ def test_update_add_null_char_txt_record(self):
+ "test adding records works"
+ prefix, txt = 'nulltextrec', ['NULL\x00BYTE']
+ p = self.make_txt_update(prefix, txt)
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
+ self.check_query_txt(prefix, ['NULL'])
+ self.assertIsNotNone(dns_record_match(self.rpc_conn, self.server_ip,
+ self.get_dns_domain(),
+ "%s.%s" % (prefix, self.get_dns_domain()),
+ dnsp.DNS_TYPE_TXT, '"NULL"'))
+
+ prefix, txt = 'nulltextrec2', ['NULL\x00BYTE', 'NULL\x00BYTE']
+ p = self.make_txt_update(prefix, txt)
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
+ self.check_query_txt(prefix, ['NULL', 'NULL'])
+ self.assertIsNotNone(dns_record_match(self.rpc_conn, self.server_ip,
+ self.get_dns_domain(),
+ "%s.%s" % (prefix, self.get_dns_domain()),
+ dnsp.DNS_TYPE_TXT, '"NULL" "NULL"'))
+
+ def test_update_add_null_char_rpc_to_dns(self):
+ prefix = 'rpcnulltextrec'
+ name = "%s.%s" % (prefix, self.get_dns_domain())
+
+ rec = record_from_string(dnsp.DNS_TYPE_TXT, '"NULL\x00BYTE"')
+ add_rec_buf = dnsserver.DNS_RPC_RECORD_BUF()
+ add_rec_buf.rec = rec
+ try:
+ self.rpc_conn.DnssrvUpdateRecord2(
+ dnsserver.DNS_CLIENT_VERSION_LONGHORN,
+ 0,
+ self.server_ip,
+ self.get_dns_domain(),
+ name,
+ add_rec_buf,
+ None)
+
+ except WERRORError as e:
+ self.fail(str(e))
+
+ try:
+ self.check_query_txt(prefix, ['NULL'])
+ finally:
+ self.rpc_conn.DnssrvUpdateRecord2(
+ dnsserver.DNS_CLIENT_VERSION_LONGHORN,
+ 0,
+ self.server_ip,
+ self.get_dns_domain(),
+ name,
+ None,
+ add_rec_buf)
+
+ def test_update_add_hex_char_txt_record(self):
+ "test adding records works"
+ prefix, txt = 'hextextrec', ['HIGH\xFFBYTE']
+ p = self.make_txt_update(prefix, txt)
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
+ self.check_query_txt(prefix, txt)
+ self.assertIsNotNone(dns_record_match(self.rpc_conn, self.server_ip,
+ self.get_dns_domain(),
+ "%s.%s" % (prefix, self.get_dns_domain()),
+ dnsp.DNS_TYPE_TXT, '"HIGH\xFFBYTE"'))
+
+ def test_update_add_hex_rpc_to_dns(self):
+ prefix, txt = 'hextextrec', ['HIGH\xFFBYTE']
+ prefix = 'rpc' + prefix
+ name = "%s.%s" % (prefix, self.get_dns_domain())
+
+ rec = record_from_string(dnsp.DNS_TYPE_TXT, '"HIGH\xFFBYTE"')
+ add_rec_buf = dnsserver.DNS_RPC_RECORD_BUF()
+ add_rec_buf.rec = rec
+ try:
+ self.rpc_conn.DnssrvUpdateRecord2(
+ dnsserver.DNS_CLIENT_VERSION_LONGHORN,
+ 0,
+ self.server_ip,
+ self.get_dns_domain(),
+ name,
+ add_rec_buf,
+ None)
+
+ except WERRORError as e:
+ self.fail(str(e))
+
+ try:
+ self.check_query_txt(prefix, txt)
+ finally:
+ self.rpc_conn.DnssrvUpdateRecord2(
+ dnsserver.DNS_CLIENT_VERSION_LONGHORN,
+ 0,
+ self.server_ip,
+ self.get_dns_domain(),
+ name,
+ None,
+ add_rec_buf)
+
+ def test_update_add_slash_txt_record(self):
+ "test adding records works"
+ prefix, txt = 'slashtextrec', ['Th\\=is=is a test']
+ p = self.make_txt_update(prefix, txt)
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
+ self.check_query_txt(prefix, txt)
+ self.assertIsNotNone(dns_record_match(self.rpc_conn, self.server_ip,
+ self.get_dns_domain(),
+ "%s.%s" % (prefix, self.get_dns_domain()),
+ dnsp.DNS_TYPE_TXT, '"Th\\\\=is=is a test"'))
+
+ # This test fails against Windows as it eliminates slashes in RPC
+ # One typical use for a slash is in records like 'var=value' to
+ # escape '=' characters.
+ def test_update_add_slash_rpc_to_dns(self):
+ prefix, txt = 'slashtextrec', ['Th\\=is=is a test']
+ prefix = 'rpc' + prefix
+ name = "%s.%s" % (prefix, self.get_dns_domain())
+
+ rec = record_from_string(dnsp.DNS_TYPE_TXT, '"Th\\\\=is=is a test"')
+ add_rec_buf = dnsserver.DNS_RPC_RECORD_BUF()
+ add_rec_buf.rec = rec
+ try:
+ self.rpc_conn.DnssrvUpdateRecord2(
+ dnsserver.DNS_CLIENT_VERSION_LONGHORN,
+ 0,
+ self.server_ip,
+ self.get_dns_domain(),
+ name,
+ add_rec_buf,
+ None)
+
+ except WERRORError as e:
+ self.fail(str(e))
+
+ try:
+ self.check_query_txt(prefix, txt)
+
+ finally:
+ self.rpc_conn.DnssrvUpdateRecord2(
+ dnsserver.DNS_CLIENT_VERSION_LONGHORN,
+ 0,
+ self.server_ip,
+ self.get_dns_domain(),
+ name,
+ None,
+ add_rec_buf)
+
+ def test_update_add_two_txt_records(self):
+ "test adding two txt records works"
+ prefix, txt = 'textrec2', ['"This is a test"',
+ '"and this is a test, too"']
+ p = self.make_txt_update(prefix, txt)
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
+ self.check_query_txt(prefix, txt)
+ self.assertIsNotNone(dns_record_match(self.rpc_conn, self.server_ip,
+ self.get_dns_domain(),
+ "%s.%s" % (prefix, self.get_dns_domain()),
+ dnsp.DNS_TYPE_TXT, '"\\"This is a test\\""' +
+ ' "\\"and this is a test, too\\""'))
+
+ def test_update_add_two_rpc_to_dns(self):
+ prefix, txt = 'textrec2', ['"This is a test"',
+ '"and this is a test, too"']
+ prefix = 'rpc' + prefix
+ name = "%s.%s" % (prefix, self.get_dns_domain())
+
+ rec = record_from_string(dnsp.DNS_TYPE_TXT,
+ '"\\"This is a test\\""' +
+ ' "\\"and this is a test, too\\""')
+ add_rec_buf = dnsserver.DNS_RPC_RECORD_BUF()
+ add_rec_buf.rec = rec
+ try:
+ self.rpc_conn.DnssrvUpdateRecord2(
+ dnsserver.DNS_CLIENT_VERSION_LONGHORN,
+ 0,
+ self.server_ip,
+ self.get_dns_domain(),
+ name,
+ add_rec_buf,
+ None)
+
+ except WERRORError as e:
+ self.fail(str(e))
+
+ try:
+ self.check_query_txt(prefix, txt)
+ finally:
+ self.rpc_conn.DnssrvUpdateRecord2(
+ dnsserver.DNS_CLIENT_VERSION_LONGHORN,
+ 0,
+ self.server_ip,
+ self.get_dns_domain(),
+ name,
+ None,
+ add_rec_buf)
+
+ def test_update_add_empty_txt_records(self):
+ "test adding two txt records works"
+ prefix, txt = 'emptytextrec', []
+ p = self.make_txt_update(prefix, txt)
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
+ self.check_query_txt(prefix, txt)
+ self.assertIsNotNone(dns_record_match(self.rpc_conn, self.server_ip,
+ self.get_dns_domain(),
+ "%s.%s" % (prefix, self.get_dns_domain()),
+ dnsp.DNS_TYPE_TXT, ''))
+
+ def test_update_add_empty_rpc_to_dns(self):
+ prefix, txt = 'rpcemptytextrec', []
+
+ name = "%s.%s" % (prefix, self.get_dns_domain())
+
+ rec = record_from_string(dnsp.DNS_TYPE_TXT, '')
+ add_rec_buf = dnsserver.DNS_RPC_RECORD_BUF()
+ add_rec_buf.rec = rec
+ try:
+ self.rpc_conn.DnssrvUpdateRecord2(
+ dnsserver.DNS_CLIENT_VERSION_LONGHORN,
+ 0,
+ self.server_ip,
+ self.get_dns_domain(),
+ name,
+ add_rec_buf,
+ None)
+ except WERRORError as e:
+ self.fail(str(e))
+
+ try:
+ self.check_query_txt(prefix, txt)
+ finally:
+ self.rpc_conn.DnssrvUpdateRecord2(
+ dnsserver.DNS_CLIENT_VERSION_LONGHORN,
+ 0,
+ self.server_ip,
+ self.get_dns_domain(),
+ name,
+ None,
+ add_rec_buf)
+
+
+TestProgram(module=__name__, opts=subunitopts)
diff --git a/python/samba/tests/dns_aging.py b/python/samba/tests/dns_aging.py
new file mode 100644
index 0000000..35d0e5c
--- /dev/null
+++ b/python/samba/tests/dns_aging.py
@@ -0,0 +1,2777 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Kai Blin <kai@samba.org> 2011
+# Copyright (C) Catalyst.NET 2021
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+from samba import dsdb
+from samba import dsdb_dns
+from samba.ndr import ndr_unpack, ndr_pack
+from samba.samdb import SamDB
+from samba.auth import system_session
+import ldb
+from samba import credentials
+from samba.dcerpc import dns, dnsp, dnsserver
+from samba.dnsserver import TXTRecord, ARecord
+from samba.dnsserver import ipv6_normalise
+from samba.tests.subunitrun import SubunitOptions, TestProgram
+from samba import werror, WERRORError
+from samba.tests.dns_base import DNSTest
+import samba.getopt as options
+import optparse
+import time
+from samba.colour import c_RED, c_GREEN, c_DARK_YELLOW
+
+parser = optparse.OptionParser(
+ "dns_aging.py <server name> <server ip> [options]")
+sambaopts = options.SambaOptions(parser)
+parser.add_option_group(sambaopts)
+
+
+# use command line creds if available
+credopts = options.CredentialsOptions(parser)
+parser.add_option_group(credopts)
+subunitopts = SubunitOptions(parser)
+parser.add_option_group(subunitopts)
+
+opts, args = parser.parse_args()
+if len(args) < 2:
+ parser.print_usage()
+ sys.exit(1)
+
+LP = sambaopts.get_loadparm()
+CREDS = credopts.get_credentials(LP)
+SERVER_NAME = args[0]
+SERVER_IP = args[1]
+CREDS.set_krb_forwardable(credentials.NO_KRB_FORWARDABLE)
+
+DOMAIN = CREDS.get_realm().lower()
+
+# Unix time start, in DNS timestamp (24 * 365.25 * 369)
+# These are ballpark extremes for the timestamp.
+DNS_TIMESTAMP_1970 = 3234654
+DNS_TIMESTAMP_2101 = 4383000
+DNS_TIMESTAMP_1981 = 3333333 # a middling timestamp
+
+IPv4_ADDR = "127.0.0.33"
+IPv6_ADDR = "::1"
+IPv4_ADDR_2 = "127.0.0.66"
+IPv6_ADDR_2 = "1::1"
+
+
+def get_samdb():
+ return SamDB(url=f"ldap://{SERVER_IP}",
+ lp=LP,
+ session_info=system_session(),
+ credentials=CREDS)
+
+
+def get_file_samdb():
+ # For Samba only direct file access, needed for the tombstoning functions.
+ # (For Windows, we instruct it to tombstone over RPC).
+ return SamDB(url=LP.samdb_url(),
+ lp=LP,
+ session_info=system_session(),
+ credentials=CREDS)
+
+
+def get_rpc():
+ return dnsserver.dnsserver(f"ncacn_ip_tcp:{SERVER_IP}[sign]", LP, CREDS)
+
+
+def create_zone(name, rpc=None, aging=True):
+ if rpc is None:
+ rpc = get_rpc()
+ z = dnsserver.DNS_RPC_ZONE_CREATE_INFO_LONGHORN()
+ z.pszZoneName = name
+ z.dwZoneType = dnsp.DNS_ZONE_TYPE_PRIMARY
+ z.fAging = int(bool(aging))
+ z.dwDpFlags = dnsserver.DNS_DP_DOMAIN_DEFAULT
+ z.fDsIntegrated = 1
+ z.fLoadExisting = 1
+ z.fAllowUpdate = dnsp.DNS_ZONE_UPDATE_UNSECURE
+ rpc.DnssrvOperation2(dnsserver.DNS_CLIENT_VERSION_LONGHORN,
+ 0,
+ SERVER_IP,
+ None,
+ 0,
+ 'ZoneCreate',
+ dnsserver.DNSSRV_TYPEID_ZONE_CREATE,
+ z)
+
+
+def delete_zone(name, rpc=None):
+ if rpc is None:
+ rpc = get_rpc()
+ rpc.DnssrvOperation2(dnsserver.DNS_CLIENT_VERSION_LONGHORN,
+ 0,
+ SERVER_IP,
+ name,
+ 0,
+ 'DeleteZoneFromDs',
+ dnsserver.DNSSRV_TYPEID_NULL,
+ None)
+
+
+def txt_s_list(txt):
+ """Construct a txt record string list, which is a fiddly matter."""
+ if isinstance(txt, str):
+ txt = [txt]
+ s_list = dnsp.string_list()
+ s_list.count = len(txt)
+ s_list.str = txt
+ return s_list
+
+
+def make_txt_record(txt):
+ r = dns.txt_record()
+ r.txt = txt_s_list(txt)
+ return r
+
+
+def copy_rec(rec):
+ copy = dnsserver.DNS_RPC_RECORD()
+ copy.wType = rec.wType
+ copy.dwFlags = rec.dwFlags
+ copy.dwSerial = rec.dwSerial
+ copy.dwTtlSeconds = rec.dwTtlSeconds
+ copy.data = rec.data
+ copy.dwTimeStamp = rec.dwTimeStamp
+ return copy
+
+
+def guess_wtype(data):
+ if isinstance(data, list):
+ data = make_txt_record(data)
+ return (data, dnsp.DNS_TYPE_TXT)
+ if ":" in data:
+ return (data, dnsp.DNS_TYPE_AAAA)
+ return (data, dnsp.DNS_TYPE_A)
+
+
+class TestDNSAging(DNSTest):
+ """Probe DNS aging and scavenging, using LDAP and RPC to set and test
+ the timestamps behind DNS's back."""
+ server = SERVER_NAME
+ server_ip = SERVER_IP
+ creds = CREDS
+
+ def setUp(self):
+ super().setUp()
+ self.rpc_conn = get_rpc()
+ self.samdb = get_samdb()
+
+ # We always have a zone of our own named after the test function.
+ self.zone = self.id().rsplit('.', 1)[1]
+ self.addCleanup(delete_zone, self.zone, self.rpc_conn)
+ try:
+ create_zone(self.zone, self.rpc_conn)
+ except WERRORError as e:
+ if e.args[0] != werror.WERR_DNS_ERROR_ZONE_ALREADY_EXISTS:
+ raise
+ print(f"zone {self.zone} already exists")
+
+ # Though we set this in create_zone(), that doesn't work on
+ # Windows, so we repeat again here.
+ self.set_zone_int_params(AllowUpdate=dnsp.DNS_ZONE_UPDATE_UNSECURE)
+
+ self.zone_dn = (f"DC={self.zone},CN=MicrosoftDNS,DC=DomainDNSZones,"
+ f"{self.samdb.get_default_basedn()}")
+
+ def set_zone_int_params(self, zone=None, **kwargs):
+ """Keyword arguments set parameters on the zone. e.g.:
+
+ self.set_zone_int_params(Aging=1,
+ RefreshInterval=222)
+
+ See [MS-DNSP] 3.1.1.2.1 "DNS Zone Integer Properties" for names.
+ """
+ if zone is None:
+ zone = self.zone
+ for key, val in kwargs.items():
+ name_param = dnsserver.DNS_RPC_NAME_AND_PARAM()
+ name_param.dwParam = val
+ name_param.pszNodeName = key
+ try:
+ self.rpc_conn.DnssrvOperation2(
+ dnsserver.DNS_CLIENT_VERSION_LONGHORN,
+ 0,
+ SERVER_IP,
+ zone,
+ 0,
+ 'ResetDwordProperty',
+ dnsserver.DNSSRV_TYPEID_NAME_AND_PARAM,
+ name_param)
+ except WERRORError as e:
+ self.fail(str(e))
+
+ def rpc_replace(self, name, old=None, new=None):
+ """Replace a DNS_RPC_RECORD or DNS_RPC_RECORD_BUF"""
+ # wrap our recs, if necessary
+ if isinstance(new, dnsserver.DNS_RPC_RECORD):
+ rec = new
+ new = dnsserver.DNS_RPC_RECORD_BUF()
+ new.rec = rec
+
+ if isinstance(old, dnsserver.DNS_RPC_RECORD):
+ rec = old
+ old = dnsserver.DNS_RPC_RECORD_BUF()
+ old.rec = rec
+
+ try:
+ self.rpc_conn.DnssrvUpdateRecord2(
+ dnsserver.DNS_CLIENT_VERSION_LONGHORN,
+ 0,
+ SERVER_IP,
+ self.zone,
+ name,
+ new,
+ old)
+ except WERRORError as e:
+ self.fail(f"could not replace record ({e})")
+
+ def get_unique_txt_record(self, name, txt):
+ """Get the TXT record on Name with value txt, asserting that there is
+ only one."""
+ if isinstance(txt, str):
+ txt = [txt]
+ recs = self.ldap_get_records(name)
+
+ match = None
+ for r in recs:
+ if r.wType != dnsp.DNS_TYPE_TXT:
+ continue
+ txt2 = [x for x in r.data.str]
+ if txt2 == txt:
+ self.assertIsNone(match)
+ match = r
+ return match
+
+ def get_unique_ip_record(self, name, addr, wtype=None):
+ """Get an A or AAAA record on name with the matching data."""
+ if wtype is None:
+ addr, wtype = guess_wtype(addr)
+
+ recs = self.ldap_get_records(name)
+
+ # We need to use the internal dns_record_match because not all
+ # forms always match on strings (e.g. IPv6)
+ rec = dnsp.DnssrvRpcRecord()
+ rec.wType = wtype
+ rec.data = addr
+
+ match = None
+ for r in recs:
+ if dsdb_dns.records_match(r, rec):
+ self.assertIsNone(match)
+ match = r
+ return match
+
+ def dns_query(self, name, qtype=dns.DNS_QTYPE_ALL):
+ """make a query, which might help Windows notice LDAP changes"""
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ fullname = "%s.%s" % (name, self.zone)
+ q = self.make_name_question(fullname, qtype, dns.DNS_QCLASS_IN)
+ self.finish_name_packet(p, [q])
+ r, rp = self.dns_transaction_udp(p, host=SERVER_IP)
+
+ return r
+
+ def dns_update_non_text(self, name,
+ data,
+ wtype=None,
+ qclass=dns.DNS_QCLASS_IN):
+ if wtype is None:
+ data, wtype = guess_wtype(data)
+
+ if qclass == dns.DNS_QCLASS_IN:
+ ttl = 123
+ else:
+ ttl = 0
+
+ fullname = "%s.%s" % (name, self.zone)
+ p = self.make_name_packet(dns.DNS_OPCODE_UPDATE)
+ u = self.make_name_question(self.zone,
+ dns.DNS_QTYPE_SOA,
+ dns.DNS_QCLASS_IN)
+ self.finish_name_packet(p, [u])
+
+ r = dns.res_rec()
+ r.name = fullname
+ r.rr_type = wtype
+ r.rr_class = qclass
+ r.ttl = ttl
+ if data is not None:
+ r.length = 0xffff
+ r.rdata = data
+ else:
+ r.length = 0
+
+ p.nscount = 1
+ p.nsrecs = [r]
+
+ (code, response) = self.dns_transaction_udp(p, host=SERVER_IP)
+ self.assert_dns_rcode_equals(code, dns.DNS_RCODE_OK)
+ return response
+
+ def dns_delete(self, name, data, wtype=None):
+ return self.dns_update_non_text(name,
+ data,
+ wtype,
+ qclass=dns.DNS_QCLASS_NONE)
+
+ def dns_delete_type(self, name, wtype):
+ return self.dns_update_non_text(name,
+ None,
+ wtype,
+ qclass=dns.DNS_QCLASS_ANY)
+
+ def dns_update_record(self, name, txt, ttl=900):
+ if isinstance(txt, str):
+ txt = [txt]
+ p = self.make_txt_update(name, txt, self.zone, ttl=ttl)
+ (code, response) = self.dns_transaction_udp(p, host=SERVER_IP)
+ if code.operation & dns.DNS_RCODE == dns.DNS_RCODE_REFUSED:
+ # sometimes you might forget this
+ print("\n\ngot DNS_RCODE_REFUSED\n")
+ print("Are you running this in the fl2003 environment?\n")
+ print("try `SELFTEST_TESTENV='fl2003dc:local' make testenv`\n\n")
+
+ self.assert_dns_rcode_equals(code, dns.DNS_RCODE_OK)
+ return self.get_unique_txt_record(name, txt)
+
+ def rpc_update_record(self, name, txt, **kwargs):
+ """Add the record that self.dns_update_record() would add, via the
+ dnsserver RPC pipe.
+
+ As with DNS update, if the record already exists, we replace it.
+ """
+ if isinstance(txt, str):
+ txt = [txt]
+
+ old = TXTRecord(txt)
+ rec = TXTRecord(txt)
+ for k, v in kwargs.items():
+ setattr(rec, k, v)
+
+ try:
+ self.rpc_replace(name, old, rec)
+ except AssertionError as e:
+ # we have caught and wrapped the WERRor inside
+ if 'WERR_DNS_ERROR_RECORD_DOES_NOT_EXIST' not in str(e):
+ raise
+ self.rpc_replace(name, None, rec)
+
+ return self.get_unique_txt_record(name, txt)
+
+ def rpc_delete_txt(self, name, txt):
+ if isinstance(txt, str):
+ txt = [txt]
+ old = TXTRecord(txt)
+ self.rpc_replace(name, old, None)
+
+ def get_one_node(self, name):
+ self.assertIsInstance(name, str)
+ expr = f"(&(objectClass=dnsNode)(name={name}))"
+ nodes = self.samdb.search(base=self.zone_dn,
+ scope=ldb.SCOPE_SUBTREE,
+ expression=expr,
+ attrs=["dnsRecord", "dNSTombstoned", "name"])
+
+ if len(nodes) > 1:
+ self.fail(
+ f"expected 0 or 1 dnsNodes for {name}, found {len(nodes)}")
+
+ if len(nodes) == 0:
+ return None
+ return nodes[0]
+
+ def ldap_get_records(self, name):
+ node = self.get_one_node(name)
+ if node is None:
+ return []
+
+ records = node.get('dnsRecord')
+ return [ndr_unpack(dnsp.DnssrvRpcRecord, r) for r in records]
+
+ def ldap_get_non_tombstoned_records(self, name):
+ all_records = self.ldap_get_records(name)
+ records = []
+ for r in all_records:
+ if r.wType != dnsp.DNS_TYPE_TOMBSTONE:
+ records.append(r)
+ return records
+
+ def assert_tombstoned(self, name, tombstoned=True, timestamp=None):
+ # If run with tombstoned=False, assert it isn't tombstoned
+ # (and has no traces of tombstone). Otherwise assert it has
+ # all the necessary bits.
+ #
+ # with timestamp=<non-zero number of hours>, we assert that
+ # the nttime timestamp is about that time.
+ #
+ # with timestamp=None, we assert it is within a century or so.
+ #
+ # with timestamp=False (or 0), we don't assert on it.
+
+ node = self.get_one_node(name)
+ if node is None:
+ self.fail(f"no node named {name}")
+
+ dnsts = node.get("dNSTombstoned")
+ if dnsts is None:
+ is_tombstoned = False
+ else:
+ self.assertEqual(len(dnsts), 1)
+ if dnsts[0] == b'TRUE':
+ is_tombstoned = True
+ else:
+ is_tombstoned = False
+
+ if tombstoned != is_tombstoned:
+ if is_tombstoned:
+ self.fail(f"{name} is tombstoned")
+ else:
+ self.fail(f"{name} is not tombstoned")
+
+ recs = self.ldap_get_records(name)
+ if is_tombstoned:
+ self.assertEqual(len(recs), 1)
+ self.assertEqual(recs[0].wType, dnsp.DNS_TYPE_TOMBSTONE)
+ if timestamp is None:
+ self.assert_nttime_in_hour_range(recs[0].data)
+ elif timestamp:
+ self.assert_nttime_in_hour_range(recs[0].data,
+ timestamp - 3,
+ timestamp + 3)
+
+ else:
+ for r in recs:
+ self.assertNotEqual(recs[0].wType, dnsp.DNS_TYPE_TOMBSTONE)
+
+ def ldap_replace_records(self, name, records):
+ # We use raw ldap to avoid the "helpfulness" of dsdb_dns.replace()
+
+ dn = f'DC={name},{self.zone_dn}'
+
+ msg = ldb.Message.from_dict(self.samdb,
+ {'dn': dn,
+ 'dnsRecord': [ndr_pack(r) for r in records]
+ },
+ ldb.FLAG_MOD_REPLACE)
+
+ try:
+ self.samdb.modify(msg)
+ except ldb.LdbError as e:
+ if 'LDAP_NO_SUCH_OBJECT' not in e.args[1]:
+ raise
+ # We need to do an add
+ msg["objectClass"] = ["top", "dnsNode"]
+ msg["dnsRecord"].set_flags(ldb.FLAG_MOD_ADD)
+ self.samdb.add(msg)
+
+ def ldap_update_core(self, name, wtype, data, **kwargs):
+ """This one is not TXT specific."""
+ records = self.ldap_get_records(name)
+
+ # default values
+ rec = dnsp.DnssrvRpcRecord()
+ rec.wType = wtype
+ rec.rank = dnsp.DNS_RANK_ZONE
+ rec.dwTtlSeconds = 900
+ rec.dwSerial = 110
+ rec.dwTimeStamp = 0
+ rec.data = data
+
+ # override defaults, as required
+ for k, v in kwargs.items():
+ setattr(rec, k, v)
+
+ for i, r in enumerate(records[:]):
+ if dsdb_dns.records_match(r, rec):
+ records[i] = rec
+ break
+ else: # record not found
+ records.append(rec)
+
+ self.ldap_replace_records(name, records)
+ return rec
+
+ def ldap_update_record(self, name, txt, **kwargs):
+ """Add the record that self.dns_update_record() would add, via ldap,
+ thus allowing us to set additional dnsRecord features like
+ dwTimestamp.
+ """
+ rec = self.ldap_update_core(name,
+ dnsp.DNS_TYPE_TXT,
+ txt_s_list(txt),
+ **kwargs)
+
+ recs = self.ldap_get_records(name)
+ match = None
+ for r in recs:
+ if r.wType != rec.wType:
+ continue
+ if r.data.str == rec.data.str:
+ self.assertIsNone(match, f"duplicate records for {name}")
+ match = r
+ self.assertEqual(match.rank, rec.rank & 255)
+ self.assertEqual(match.dwTtlSeconds, rec.dwTtlSeconds)
+ self.assert_timestamps_equal(match.dwTimeStamp, rec.dwTimeStamp)
+ return match
+
+ def ldap_delete_record(self, name, data, wtype=dnsp.DNS_TYPE_TXT):
+ rec = dnsp.DnssrvRpcRecord()
+ if wtype == dnsp.DNS_TYPE_TXT:
+ data = txt_s_list(data)
+
+ rec.wType = wtype
+ rec.data = data
+ records = self.ldap_get_records(name)
+ for i, r in enumerate(records[:]):
+ if dsdb_dns.records_match(r, rec):
+ del records[i]
+ break
+ else:
+ self.fail(f"record {data} not found")
+
+ self.ldap_replace_records(name, records)
+
+ def add_ip_record(self, name, addr, wtype=None, **kwargs):
+ if wtype is None:
+ addr, wtype = guess_wtype(addr)
+ rec = self.ldap_update_core(name,
+ wtype,
+ addr,
+ **kwargs)
+
+ recs = self.ldap_get_records(name)
+ match = None
+ for r in recs:
+ if dsdb_dns.records_match(r, rec):
+ self.assertIsNone(match, f"duplicate records for {name}")
+ match = r
+ self.assertEqual(match.rank, rec.rank & 255)
+ self.assertEqual(match.dwTtlSeconds, rec.dwTtlSeconds)
+ self.assert_timestamps_equal(match.dwTimeStamp, rec.dwTimeStamp)
+ return match
+
+ def ldap_modify_timestamps(self, name, delta):
+ records = self.ldap_get_records(name)
+ for rec in records:
+ rec.dwTimeStamp += delta
+ self.ldap_replace_records(name, records)
+
+ def get_rpc_records(self, name, dns_type=None):
+ if dns_type is None:
+ dns_type = dnsp.DNS_TYPE_ALL
+ select_flags = dnsserver.DNS_RPC_VIEW_AUTHORITY_DATA
+ buflen, res = self.rpc_conn.DnssrvEnumRecords2(
+ dnsserver.DNS_CLIENT_VERSION_LONGHORN,
+ 0,
+ SERVER_IP,
+ self.zone,
+ name,
+ None,
+ dns_type,
+ select_flags,
+ None,
+ None)
+ recs = []
+ if not res or res.count == 0:
+ return []
+ for rec in res.rec:
+ recs.extend(rec.records)
+ return recs
+
+ def dns_tombstone(self, name,
+ epoch_hours=DNS_TIMESTAMP_1981,
+ epoch_nttime=None):
+ dn = f'DC={name},{self.zone_dn}'
+ r = dnsp.DnssrvRpcRecord()
+ r.wType = dnsp.DNS_TYPE_TOMBSTONE
+ # r.dwTimeStamp is a 32 bit value in hours, and r.data is an
+ # NTTIME (100 nanosecond intervals), both in the 1601 epoch. A
+ # tombstone will have both, but expiration calculations use
+ # the r.data NTTIME EntombedTime timestamp (see [MS-DNSP]).
+ r.dwTimeStamp = epoch_hours
+ if epoch_nttime is None:
+ r.data = epoch_hours * 3600 * 10 * 1000 * 1000
+ else:
+ r.data = epoch_nttime
+
+ msg = ldb.Message.from_dict(self.samdb,
+ {'dn': dn,
+ 'dnsRecord': [ndr_pack(r)],
+ 'dnsTombstoned': 'TRUE'
+ },
+ ldb.FLAG_MOD_REPLACE)
+ try:
+ self.samdb.modify(msg)
+ except ldb.LdbError as e:
+ if 'LDAP_NO_SUCH_OBJECT' not in e.args[1]:
+ raise
+ # We need to do an add
+ msg["objectClass"] = ["top", "dnsNode"]
+ self.samdb.add(msg)
+
+ def set_aging(self, enable=False):
+ self.set_zone_int_params(Aging=int(bool(enable)))
+
+ def assert_timestamp_in_ballpark(self, rec):
+ self.assertGreater(rec.dwTimeStamp, DNS_TIMESTAMP_1970)
+ self.assertLess(rec.dwTimeStamp, DNS_TIMESTAMP_2101)
+
+ def assert_nttime_in_hour_range(self, t,
+ hour_min=DNS_TIMESTAMP_1970,
+ hour_max=DNS_TIMESTAMP_2101):
+ t //= int(3600 * 1e7)
+ self.assertGreater(t, hour_min)
+ self.assertLess(t, hour_max)
+
+ def assert_soon_after(self, timestamp, reference):
+ """Assert that a timestamp is the same or very slightly higher than a
+ reference timestamp.
+
+ Typically we expect the timestamps to be identical, unless an
+ hour has clicked over since the reference was taken. However
+ we allow one more hour in case it happens during a daylight
+ savings transition or something.
+ """
+ if hasattr(timestamp, 'dwTimeStamp'):
+ timestamp = timestamp.dwTimeStamp
+ if hasattr(reference, 'dwTimeStamp'):
+ reference = reference.dwTimeStamp
+
+ diff = timestamp - reference
+ days = abs(diff / 24.0)
+
+ if diff < 0:
+ msg = f"timestamp is {days} days ({abs(diff)} hours) before reference"
+ elif diff > 2:
+ msg = f"timestamp is {days} days ({diff} hours) after reference"
+ else:
+ return
+ raise AssertionError(msg)
+
+ def assert_timestamps_equal(self, ts1, ts2):
+ """Just like assertEqual(), but tells us the difference, not the
+ absolute values. e.g:
+
+ self.assertEqual(a, b)
+ AssertionError: 3685491 != 3685371
+
+ self.assert_timestamps_equal(a, b)
+ AssertionError: -120 (first is 5.0 days earlier than second)
+
+ Also, we turn a record into a timestamp if we need
+ """
+ if hasattr(ts1, 'dwTimeStamp'):
+ ts1 = ts1.dwTimeStamp
+ if hasattr(ts2, 'dwTimeStamp'):
+ ts2 = ts2.dwTimeStamp
+
+ if ts1 == ts2:
+ return
+
+ diff = ts1 - ts2
+ days = abs(diff / 24.0)
+ if ts1 == 0 or ts2 == 0:
+ # when comparing to zero we don't want the number of days.
+ msg = f"timestamp {ts1} != {ts2}"
+ elif diff > 0:
+ msg = f"{ts1} is {days} days ({diff} hours) after {ts2}"
+ else:
+ msg = f"{ts1} is {days} days ({abs(diff)} hours) before {ts2}"
+
+ raise AssertionError(msg)
+
+ def test_update_timestamps_aging_off_then_on(self):
+ # we will add a record with aging off
+ # it will have the current timestamp
+ self.set_aging(False)
+ name = 'timestamp-now'
+ name2 = 'timestamp-eightdays'
+
+ rec = self.dns_update_record(name, [name])
+ start_time = rec.dwTimeStamp
+ self.assert_timestamp_in_ballpark(rec)
+ # alter the timestamp -8 days using RPC
+ # with aging turned off, we expect no change
+ # when aging is on, we expect change
+ eight_days_ago = start_time - 8 * 24
+ rec = self.ldap_update_record(name2, [name2],
+ dwTimeStamp=eight_days_ago)
+
+ self.assert_timestamps_equal(rec.dwTimeStamp, eight_days_ago)
+
+ # if aging was on, this would change
+ rec = self.dns_update_record(name2, [name2])
+ self.assert_timestamps_equal(rec.dwTimeStamp, eight_days_ago)
+
+ self.set_aging(True)
+ rec = self.dns_update_record(name2, [name2])
+ self.assertGreaterEqual(rec.dwTimeStamp, start_time)
+
+ def test_rpc_update_timestamps(self):
+ # RPC always sets timestamps to zero on Windows.
+ self.set_aging(False)
+ name = 'timestamp-now'
+
+ rec = self.dns_update_record(name, [name])
+ start_time = rec.dwTimeStamp
+ self.assert_timestamp_in_ballpark(rec)
+ # attempt to alter the timestamp to something close by.
+ eight_days_ago = start_time - 8 * 24
+ rec = self.rpc_update_record(name, [name],
+ dwTimeStamp=eight_days_ago)
+ self.assertEqual(rec.dwTimeStamp, 0)
+
+ # try again, with aging on
+ self.set_aging(True)
+ rec = self.rpc_update_record(name, [name],
+ dwTimeStamp=eight_days_ago)
+ self.assertEqual(rec.dwTimeStamp, 0)
+
+ # now that the record is static, a dns update won't change it
+ rec = self.dns_update_record(name, [name])
+ self.assertEqual(rec.dwTimeStamp, 0)
+
+ # but another record on the same node will behave normally
+ # i.e. the node is not static, the record is.
+ name2 = 'timestamp-eightdays'
+ rec = self.dns_update_record(name2, [name2])
+ self.assert_soon_after(rec.dwTimeStamp,
+ start_time)
+
+ def get_txt_timestamps(self, name, *txts):
+ records = self.ldap_get_records(name)
+
+ ret = []
+ for t in txts:
+ for r in records:
+ t2 = [x for x in r.data.str]
+ if t == t2:
+ ret.append(r.dwTimeStamp)
+ return ret
+
+ def test_update_aging_disabled_2(self):
+ # With aging disabled, Windows updates the timestamps of all
+ # records when one is updated.
+ name = 'test'
+ txt1 = ['test txt']
+ txt2 = ['test', 'txt2']
+ txt3 = ['test', 'txt3']
+
+ self.set_aging(False)
+
+ current_time = self.dns_update_record(name, txt1).dwTimeStamp
+
+ six_days_ago = current_time - 6 * 24
+ eight_days_ago = current_time - 8 * 24
+ fifteen_days_ago = current_time - 15 * 24
+ hundred_days_ago = current_time - 100 * 24
+ thousand_days_ago = current_time - 1000 * 24
+
+ for timestamp in (current_time,
+ six_days_ago,
+ eight_days_ago,
+ fifteen_days_ago,
+ hundred_days_ago,
+ thousand_days_ago):
+ # wind back
+ self.ldap_update_record(name, txt1, dwTimeStamp=timestamp)
+ self.assertEqual(self.get_txt_timestamps(name, txt1), [timestamp])
+
+ # no change here
+ update_timestamp = self.dns_update_record(name, txt1).dwTimeStamp
+ self.assert_timestamps_equal(update_timestamp, timestamp)
+
+ # adding a fresh record
+ for timestamp in (current_time,
+ six_days_ago,
+ eight_days_ago,
+ fifteen_days_ago,
+ hundred_days_ago,
+ thousand_days_ago,
+ 100000,
+ 100):
+ # wind back
+ timestamp1 = self.ldap_update_record(
+ name,
+ txt1,
+ dwTimeStamp=timestamp).dwTimeStamp
+ self.assert_timestamps_equal(timestamp1, timestamp)
+
+ self.dns_update_record(name, txt2)
+ timestamps = self.get_txt_timestamps(name, txt1, txt2)
+ self.assertEqual(timestamps, [timestamp, current_time])
+
+ self.ldap_delete_record(name, txt2)
+ timestamps = self.get_txt_timestamps(name, txt1)
+ self.assertEqual(timestamps, [timestamp])
+
+ # add record 2.
+ timestamp2 = self.dns_update_record(name, txt2).dwTimeStamp
+ self.assert_soon_after(timestamp2, current_time)
+
+ for timestamp in (current_time,
+ six_days_ago,
+ eight_days_ago,
+ fifteen_days_ago,
+ hundred_days_ago,
+ thousand_days_ago,
+ 100000,
+ 100):
+ # wind back
+ self.ldap_update_record(name, txt1, dwTimeStamp=timestamp)
+ timestamp1 = self.get_unique_txt_record(name, txt1).dwTimeStamp
+ self.assert_timestamps_equal(timestamp1, timestamp)
+
+ timestamp2 = self.dns_update_record(name, txt2).dwTimeStamp
+ # txt1 timestamp is now current time
+ timestamps = self.get_txt_timestamps(name, txt1, txt2)
+ self.assertEqual(timestamps, [timestamp, current_time])
+
+ # with 3 records, no change
+ for timestamp in (current_time,
+ six_days_ago,
+ eight_days_ago,
+ fifteen_days_ago,
+ hundred_days_ago,
+ thousand_days_ago,
+ 100000,
+ 10):
+ # wind back
+ self.ldap_update_record(name, txt1, dwTimeStamp=timestamp)
+ self.ldap_update_record(name, txt2, dwTimeStamp=timestamp)
+ self.ldap_update_record(name, txt3, dwTimeStamp=(timestamp + 30))
+ timestamp3 = self.get_unique_txt_record(name, txt3).dwTimeStamp
+ self.assert_timestamps_equal(timestamp3, timestamp + 30)
+
+ self.dns_update_record(name, txt2).dwTimeStamp
+ timestamps = self.get_txt_timestamps(name, txt1, txt2, txt3)
+ self.assertEqual(timestamps, [timestamp,
+ timestamp,
+ timestamp + 30])
+
+ # with 3 records, one of which is static
+ # first we set the updatee's timestamp to a recognisable number
+ self.ldap_update_record(name, txt2, dwTimeStamp=999999)
+ for timestamp in (current_time,
+ six_days_ago,
+ eight_days_ago,
+ fifteen_days_ago,
+ hundred_days_ago,
+ thousand_days_ago,
+ 100000,
+ 10):
+ # wind back
+ self.ldap_update_record(name, txt1, dwTimeStamp=0)
+ self.ldap_update_record(name, txt3, dwTimeStamp=(timestamp - 9))
+ timestamp3 = self.get_unique_txt_record(name, txt3).dwTimeStamp
+ self.assert_timestamps_equal(timestamp3, timestamp - 9)
+
+ self.dns_update_record(name, txt2)
+ timestamps = self.get_txt_timestamps(name, txt1, txt2, txt3)
+ self.assertEqual(timestamps, [0,
+ 999999,
+ timestamp - 9])
+
+ # with 3 records, updating one which is static
+ timestamp3 = self.dns_update_record(name, txt3).dwTimeStamp
+ for timestamp in (current_time,
+ six_days_ago,
+ eight_days_ago,
+ fifteen_days_ago,
+ hundred_days_ago,
+ thousand_days_ago,
+ 100000,
+ 10):
+ # wind back
+ self.ldap_update_record(name, txt1, dwTimeStamp=0)
+ self.ldap_update_record(name, txt2, dwTimeStamp=0)
+ self.ldap_update_record(name, txt3, dwTimeStamp=(timestamp + 30))
+ timestamp3 = self.get_unique_txt_record(name, txt3).dwTimeStamp
+ self.assert_timestamps_equal(timestamp3, timestamp + 30)
+
+ self.dns_update_record(name, txt2).dwTimeStamp
+ timestamps = self.get_txt_timestamps(name, txt1, txt2, txt3)
+ self.assertEqual(timestamps, [0,
+ 0,
+ timestamp + 30])
+
+ # with 3 records, after the static nodes have been replaced
+ self.ldap_update_record(name, txt1, dwTimeStamp=777777)
+ self.ldap_update_record(name, txt2, dwTimeStamp=888888)
+ timestamp3 = self.dns_update_record(name, txt3).dwTimeStamp
+ for timestamp in (current_time,
+ six_days_ago,
+ eight_days_ago,
+ fifteen_days_ago,
+ hundred_days_ago,
+ thousand_days_ago,
+ 100000,
+ 10):
+ # wind back
+ self.ldap_update_record(name, txt3, dwTimeStamp=(timestamp))
+ timestamp3 = self.get_unique_txt_record(name, txt3).dwTimeStamp
+ self.assert_timestamps_equal(timestamp3, timestamp)
+
+ self.dns_update_record(name, txt2)
+ timestamps = self.get_txt_timestamps(name, txt1, txt2, txt3)
+ self.assertEqual(timestamps, [777777,
+ 888888,
+ timestamp])
+
+ def _test_update_aging_disabled_n_days_ago(self, n_days):
+ name = 'test'
+ txt1 = ['1']
+ txt2 = ['2']
+
+ self.set_aging(False)
+ current_time = self.dns_update_record(name, txt1).dwTimeStamp
+
+ # rewind timestamp using ldap
+ self.ldap_modify_timestamps(name, n_days * -24)
+ n_days_ago = self.get_unique_txt_record(name, txt1).dwTimeStamp
+ self.assertGreater(current_time, n_days_ago)
+
+ # no change when updating this record
+ update_timestamp = self.dns_update_record(name, txt1).dwTimeStamp
+ self.assert_timestamps_equal(update_timestamp, n_days_ago)
+
+ # add another record, which should have the current timestamp
+ timestamp2 = self.dns_update_record(name, txt2).dwTimeStamp
+ self.assert_soon_after(timestamp2, current_time)
+
+ # get the original record timestamp. NOW it matches current_time
+ timestamp1 = self.get_unique_txt_record(name, txt1).dwTimeStamp
+ self.assert_timestamps_equal(timestamp1, timestamp2)
+
+ # let's repeat that, this time with txt2 existing
+ self.ldap_update_record(name, txt1, dwTimeStamp=n_days_ago)
+
+ timestamp1 = self.dns_update_record(name, txt1).dwTimeStamp
+ self.assert_timestamps_equal(timestamp1, n_days_ago)
+
+ # this update is not an add
+ timestamp2 = self.dns_update_record(name, txt2).dwTimeStamp
+ self.assert_soon_after(timestamp2, current_time)
+
+ # now timestamp1 is not changed
+ timestamp1 = self.get_unique_txt_record(name, txt1).dwTimeStamp
+ self.assert_timestamps_equal(timestamp1, n_days_ago)
+
+ # delete record2, try again
+ self.ldap_delete_record(name, txt2)
+ self.ldap_update_record(name, txt1, dwTimeStamp=n_days_ago)
+
+ timestamp1 = self.dns_update_record(name, txt1).dwTimeStamp
+ self.assert_timestamps_equal(timestamp1, n_days_ago)
+
+ # here we are re-adding the deleted record
+ timestamp2 = self.dns_update_record(name, txt2).dwTimeStamp
+ self.assert_soon_after(timestamp2, current_time)
+
+ timestamp1 = self.get_unique_txt_record(name, txt1).dwTimeStamp
+
+ # It gets weird HERE.
+ # note how the SIBLING of the deleted, re-added record differs
+ # from the sibling of freshly added record, depending on the
+ # time difference.
+ if n_days <= 7:
+ self.assert_timestamps_equal(timestamp1, n_days_ago)
+ else:
+ self.assert_timestamps_equal(timestamp1, timestamp2)
+
+ # re-timestamp record2, try again
+ self.ldap_update_record(name, txt2, dwTimeStamp=n_days_ago)
+ self.ldap_update_record(name, txt1, dwTimeStamp=n_days_ago)
+
+ timestamp1 = self.dns_update_record(name, txt1).dwTimeStamp
+ self.assert_timestamps_equal(timestamp1, n_days_ago)
+
+ # no change
+ timestamp2 = self.dns_update_record(name, txt2).dwTimeStamp
+ self.assert_timestamps_equal(timestamp2, n_days_ago)
+ # also no change
+ timestamp1 = self.get_unique_txt_record(name, txt1).dwTimeStamp
+ self.assert_timestamps_equal(timestamp1, timestamp2)
+
+ # let's introduce another record
+ txt3 = ['3']
+ self.ldap_update_record(name, txt2, dwTimeStamp=n_days_ago)
+ self.ldap_update_record(name, txt1, dwTimeStamp=n_days_ago)
+
+ timestamp3 = self.dns_update_record(name, txt3).dwTimeStamp
+ self.assert_soon_after(timestamp3, current_time)
+
+ timestamp1 = self.get_unique_txt_record(name, txt1).dwTimeStamp
+ timestamp2 = self.get_unique_txt_record(name, txt2).dwTimeStamp
+
+ if n_days <= 7:
+ self.assert_timestamps_equal(timestamp1, n_days_ago)
+ else:
+ self.assert_timestamps_equal(timestamp1, timestamp3)
+
+ self.assert_timestamps_equal(timestamp2, timestamp3)
+
+ self.ldap_delete_record(name, txt3)
+ timestamp3 = self.dns_update_record(name, txt3).dwTimeStamp
+ self.assert_soon_after(timestamp3, current_time)
+ timestamp1 = self.get_unique_txt_record(name, txt1).dwTimeStamp
+ timestamp2 = self.get_unique_txt_record(name, txt2).dwTimeStamp
+
+ if n_days <= 7:
+ self.assert_timestamps_equal(timestamp1, n_days_ago)
+ else:
+ self.assert_timestamps_equal(timestamp1, timestamp3)
+
+ self.assert_timestamps_equal(timestamp2, timestamp3)
+
+ # and here we'll make txt3 static
+ txt4 = ['4']
+
+ # and here we'll make txt1 static
+ self.ldap_update_record(name, txt1, dwTimeStamp=0)
+ self.ldap_update_record(name, txt2, dwTimeStamp=n_days_ago)
+ self.ldap_update_record(name, txt3, dwTimeStamp=n_days_ago)
+ timestamp1 = self.dns_update_record(name, txt1).dwTimeStamp
+ timestamp2 = self.get_unique_txt_record(name, txt2).dwTimeStamp
+ timestamp3 = self.get_unique_txt_record(name, txt3).dwTimeStamp
+ timestamp4 = self.dns_update_record(name, txt4).dwTimeStamp
+
+ self.assertEqual(timestamp1, 0)
+ self.assert_timestamps_equal(timestamp2, n_days_ago)
+ self.assert_timestamps_equal(timestamp3, n_days_ago)
+ self.assert_soon_after(timestamp4, current_time)
+
+ def test_update_aging_disabled_in_no_refresh_window(self):
+ self._test_update_aging_disabled_n_days_ago(4)
+
+ def test_update_aging_disabled_on_no_refresh_boundary(self):
+ self._test_update_aging_disabled_n_days_ago(7)
+
+ def test_update_aging_disabled_in_refresh_window(self):
+ self._test_update_aging_disabled_n_days_ago(9)
+
+ def test_update_aging_disabled_beyond_refresh_window(self):
+ self._test_update_aging_disabled_n_days_ago(16)
+
+ def test_update_aging_disabled_in_eighteenth_century(self):
+ self._test_update_aging_disabled_n_days_ago(100000)
+
+ def test_update_aging_disabled_static(self):
+ name = 'test'
+ txt1 = ['1']
+ txt2 = ['2']
+
+ self.set_aging(False)
+
+ current_time = self.dns_update_record(name, txt1).dwTimeStamp
+ self.ldap_update_record(name, txt1, dwTimeStamp=0)
+
+ # no change when updating this record
+ timestamp1 = self.dns_update_record(name, txt1).dwTimeStamp
+ self.assertEqual(timestamp1, 0)
+
+ # add another record, which should have the current timestamp
+ timestamp2 = self.dns_update_record(name, txt2).dwTimeStamp
+ self.assert_soon_after(timestamp2, current_time)
+
+ timestamp1 = self.get_unique_txt_record(name, txt1).dwTimeStamp
+ self.assert_soon_after(timestamp1, current_time)
+
+ # let's repeat that, this time with txt2 existing
+ timestamp1 = self.dns_update_record(name, txt1).dwTimeStamp
+ self.assert_soon_after(timestamp2, current_time)
+
+ timestamp2 = self.dns_update_record(name, txt2).dwTimeStamp
+ self.assert_soon_after(timestamp2, current_time)
+
+ # delete record2, try again
+ self.ldap_delete_record(name, txt2)
+ self.ldap_update_record(name, txt1, dwTimeStamp=0)
+ # no change when updating this record
+ timestamp1 = self.dns_update_record(name, txt1).dwTimeStamp
+ self.assertEqual(timestamp1, 0)
+
+ timestamp2 = self.dns_update_record(name, txt2).dwTimeStamp
+ self.assertEqual(timestamp2, 0)
+
+ timestamp1 = self.get_unique_txt_record(name, txt1).dwTimeStamp
+ self.assertEqual(timestamp1, 0)
+ # re-timestamp record2, try again
+ self.ldap_update_record(name, txt2, dwTimeStamp=1)
+ self.ldap_update_record(name, txt1, dwTimeStamp=0)
+ # no change when updating this record
+ timestamp2 = self.dns_update_record(name, txt2).dwTimeStamp
+ self.assert_timestamps_equal(timestamp2, 1)
+
+ def test_update_aging_disabled(self):
+ # With aging disabled, Windows updates the timestamps of all
+ # records when one is updated.
+ name = 'test'
+ txt1 = ['test txt']
+ txt2 = ['test', 'txt2']
+ txt3 = ['test', 'txt3']
+ minus_6 = -6 * 24
+ minus_8 = -8 * 24
+
+ self.set_aging(False)
+
+ current_time = self.dns_update_record(name, txt1).dwTimeStamp
+
+ # rewind timestamp using ldap
+ self.ldap_modify_timestamps(name, minus_6)
+ after_mod = self.get_unique_txt_record(name, txt1)
+ six_days_ago = after_mod.dwTimeStamp
+ self.assert_timestamps_equal(six_days_ago, current_time + minus_6)
+
+ # no change
+ update_timestamp = self.dns_update_record(name, txt1).dwTimeStamp
+ self.assert_timestamps_equal(update_timestamp, six_days_ago)
+
+ self.check_query_txt(name, txt1, zone=self.zone)
+
+ # another record
+ timestamp2 = self.dns_update_record(name, txt2).dwTimeStamp
+ self.assert_soon_after(timestamp2, current_time)
+
+ timestamp1 = self.get_unique_txt_record(name, txt1).dwTimeStamp
+ # without aging, timestamp1 is changed!!
+ self.assert_timestamps_equal(timestamp1, timestamp2)
+
+ # Set both records back to 8 days ago.
+ self.ldap_modify_timestamps(name, minus_8)
+
+ eight_days_ago = self.get_unique_txt_record(name, txt1).dwTimeStamp
+ self.assert_timestamps_equal(eight_days_ago, current_time + minus_8)
+
+ update2 = self.dns_update_record(name, txt2)
+
+ # Without aging on, an update should not change the timestamps.
+ self.assert_timestamps_equal(update2.dwTimeStamp, eight_days_ago)
+ timestamp1 = self.get_unique_txt_record(name, txt1).dwTimeStamp
+ self.assert_timestamps_equal(timestamp1, eight_days_ago)
+
+ # Add another txt record. The new record should have the now
+ # timestamp, and drag the others up with it.
+ timestamp3 = self.dns_update_record(name, txt3).dwTimeStamp
+ self.assert_soon_after(timestamp3, current_time)
+ timestamp1 = self.get_unique_txt_record(name, txt1).dwTimeStamp
+ timestamp2 = self.get_unique_txt_record(name, txt2).dwTimeStamp
+ self.assert_timestamps_equal(timestamp1, timestamp3)
+ self.assert_timestamps_equal(timestamp2, timestamp3)
+
+ hundred_days_ago = current_time - 100 * 24
+ thousand_days_ago = current_time - 1000 * 24
+ record = self.ldap_update_record(name, txt1,
+ dwTimeStamp=hundred_days_ago)
+ self.assert_timestamps_equal(record.dwTimeStamp, hundred_days_ago)
+ record = self.ldap_update_record(name, txt2,
+ dwTimeStamp=thousand_days_ago)
+ self.assert_timestamps_equal(record.dwTimeStamp, thousand_days_ago)
+
+ # update 3, will others change (because beyond RefreshInterval)? yes.
+ timestamp3 = self.dns_update_record(name, txt3).dwTimeStamp
+ timestamp1 = self.get_unique_txt_record(name, txt1).dwTimeStamp
+ timestamp2 = self.get_unique_txt_record(name, txt2).dwTimeStamp
+ self.assert_soon_after(timestamp3, current_time)
+ self.assert_timestamps_equal(timestamp1, hundred_days_ago)
+ self.assert_timestamps_equal(timestamp2, thousand_days_ago)
+
+ fifteen_days_ago = current_time - 15 * 24
+ self.ldap_update_record(name, txt3, dwTimeStamp=fifteen_days_ago)
+
+ timestamp2 = self.dns_update_record(name, txt2).dwTimeStamp
+ timestamp1 = self.get_unique_txt_record(name, txt1).dwTimeStamp
+ timestamp3 = self.get_unique_txt_record(name, txt3).dwTimeStamp
+ # DNS update has no effect because all records are old
+ self.assert_timestamps_equal(timestamp2, thousand_days_ago)
+ self.assert_timestamps_equal(timestamp1, hundred_days_ago)
+ self.assert_timestamps_equal(timestamp3, fifteen_days_ago)
+
+ # Does update of old record affect timestamp of refreshable record? No.
+ self.ldap_update_record(name, txt3, dwTimeStamp=eight_days_ago)
+ timestamp2 = self.dns_update_record(name, txt2).dwTimeStamp
+ timestamp1 = self.get_unique_txt_record(name, txt1).dwTimeStamp
+ timestamp3 = self.get_unique_txt_record(name, txt3).dwTimeStamp
+ # DNS update has no effect because all records are old
+ self.assert_timestamps_equal(timestamp2, thousand_days_ago)
+ self.assert_timestamps_equal(timestamp1, hundred_days_ago)
+ self.assert_timestamps_equal(timestamp3, eight_days_ago)
+
+ # RPC zeros timestamp, after which updates won't change it.
+ # BUT it refreshes all others!
+ self.rpc_update_record(name, txt2)
+
+ timestamp2 = self.dns_update_record(name, txt3).dwTimeStamp
+ timestamp1 = self.get_unique_txt_record(name, txt1).dwTimeStamp
+ timestamp2 = self.get_unique_txt_record(name, txt2).dwTimeStamp
+ self.assertEqual(timestamp2, 0)
+ self.assert_soon_after(timestamp1, current_time)
+ self.assert_timestamps_equal(timestamp3, eight_days_ago)
+
+ def test_update_aging_enabled(self):
+ name = 'test'
+ txt1 = ['test txt']
+ txt2 = ['test', 'txt2']
+ txt3 = ['test', 'txt3']
+ txt4 = ['4']
+
+ self.set_aging(True)
+
+ current_time = self.dns_update_record(name, txt2).dwTimeStamp
+
+ six_days_ago = current_time - 6 * 24
+ eight_days_ago = current_time - 8 * 24
+ fifteen_days_ago = current_time - 15 * 24
+ hundred_days_ago = current_time - 100 * 24
+
+ self.ldap_update_record(name, txt1, dwTimeStamp=six_days_ago)
+
+ # with or without aging, a delta of -6 days does not affect
+ # timestamps, because dwNoRefreshInterval is 7 days.
+ timestamp1 = self.dns_update_record(name, txt1).dwTimeStamp
+ timestamp2 = self.get_unique_txt_record(name, txt2).dwTimeStamp
+
+ self.assert_timestamps_equal(timestamp1, six_days_ago)
+ self.assert_soon_after(timestamp2, current_time)
+
+ self.ldap_update_record(name, txt3, dwTimeStamp=eight_days_ago)
+ timestamp3 = self.get_unique_txt_record(name, txt3).dwTimeStamp
+ self.assert_timestamps_equal(timestamp3, eight_days_ago)
+
+ # update 1, what happens to 2 and 3? Nothing?
+ timestamp1 = self.dns_update_record(name, txt1).dwTimeStamp
+ timestamp2 = self.get_unique_txt_record(name, txt2).dwTimeStamp
+ timestamp3 = self.get_unique_txt_record(name, txt3).dwTimeStamp
+ self.assert_timestamps_equal(timestamp1, six_days_ago)
+ self.assert_soon_after(timestamp2, current_time)
+ self.assert_timestamps_equal(timestamp3, eight_days_ago)
+
+ # now set 1 to 8 days, and we should see changes
+ self.ldap_update_record(name, txt1, dwTimeStamp=eight_days_ago)
+
+ # update 1, what happens to 2 and 3? Nothing?
+ timestamp1 = self.dns_update_record(name, txt1).dwTimeStamp
+ timestamp2 = self.get_unique_txt_record(name, txt2).dwTimeStamp
+ timestamp3 = self.get_unique_txt_record(name, txt3).dwTimeStamp
+ self.assert_soon_after(timestamp1, current_time)
+ self.assert_soon_after(timestamp2, current_time)
+ self.assert_timestamps_equal(timestamp3, eight_days_ago)
+
+ # next few ones use these numbers
+ self.ldap_update_record(name, txt1, dwTimeStamp=fifteen_days_ago)
+ self.ldap_update_record(name, txt2, dwTimeStamp=six_days_ago)
+ self.ldap_update_record(name, txt3, dwTimeStamp=eight_days_ago)
+
+ # change even though 1 is outside the window
+ timestamp1 = self.dns_update_record(name, txt1).dwTimeStamp
+ timestamp2 = self.get_unique_txt_record(name, txt2).dwTimeStamp
+ timestamp3 = self.get_unique_txt_record(name, txt3).dwTimeStamp
+ self.assert_soon_after(timestamp1, current_time)
+ self.assert_timestamps_equal(timestamp2, six_days_ago)
+ self.assert_timestamps_equal(timestamp3, eight_days_ago)
+
+ # reset 1
+ self.ldap_update_record(name, txt1, dwTimeStamp=fifteen_days_ago)
+
+ # no change, because 2 is outside the window
+ timestamp2 = self.dns_update_record(name, txt2).dwTimeStamp
+ timestamp1 = self.get_unique_txt_record(name, txt1).dwTimeStamp
+ timestamp3 = self.get_unique_txt_record(name, txt3).dwTimeStamp
+ self.assert_timestamps_equal(timestamp1, fifteen_days_ago)
+ self.assert_timestamps_equal(timestamp2, six_days_ago)
+ self.assert_timestamps_equal(timestamp3, eight_days_ago)
+
+ # 3 changes, others do not
+ timestamp3 = self.dns_update_record(name, txt3).dwTimeStamp
+ timestamp1 = self.get_unique_txt_record(name, txt1).dwTimeStamp
+ timestamp2 = self.get_unique_txt_record(name, txt2).dwTimeStamp
+ self.assert_timestamps_equal(timestamp1, fifteen_days_ago)
+ self.assert_timestamps_equal(timestamp2, six_days_ago)
+ self.assert_soon_after(timestamp3, current_time)
+
+ # reset 3 to 100 days
+ self.ldap_update_record(name, txt3, dwTimeStamp=hundred_days_ago)
+
+ # 3 changes, others do not
+ timestamp3 = self.dns_update_record(name, txt3).dwTimeStamp
+ timestamp1 = self.get_unique_txt_record(name, txt1).dwTimeStamp
+ timestamp2 = self.get_unique_txt_record(name, txt2).dwTimeStamp
+ self.assert_timestamps_equal(timestamp1, fifteen_days_ago)
+ self.assert_timestamps_equal(timestamp2, six_days_ago)
+ self.assert_soon_after(timestamp3, current_time)
+
+ # reset 1 and 3 to 8 days. does update of 1 affect 3?
+ self.ldap_update_record(name, txt1, dwTimeStamp=eight_days_ago)
+ self.ldap_update_record(name, txt3, dwTimeStamp=eight_days_ago)
+
+ # 1 changes, others do not
+ timestamp1 = self.dns_update_record(name, txt1).dwTimeStamp
+ timestamp2 = self.get_unique_txt_record(name, txt2).dwTimeStamp
+ timestamp3 = self.get_unique_txt_record(name, txt3).dwTimeStamp
+ self.assert_soon_after(timestamp1, current_time)
+ self.assert_timestamps_equal(timestamp2, six_days_ago)
+ self.assert_timestamps_equal(timestamp3, eight_days_ago)
+
+ # Try an RPC update, zeroing 1 --> what happens to 3?
+ timestamp1 = self.rpc_update_record(name, txt1).dwTimeStamp
+ timestamp2 = self.get_unique_txt_record(name, txt2).dwTimeStamp
+ timestamp3 = self.get_unique_txt_record(name, txt3).dwTimeStamp
+ self.assertEqual(timestamp1, 0)
+ self.assert_timestamps_equal(timestamp2, six_days_ago)
+ self.assert_timestamps_equal(timestamp3, eight_days_ago)
+
+ # with 2 and 3 at 8 days, does static record change things?
+ self.ldap_update_record(name, txt2, dwTimeStamp=eight_days_ago)
+ # 2 changes, but to zero!
+ timestamp2 = self.dns_update_record(name, txt2).dwTimeStamp
+ timestamp1 = self.get_unique_txt_record(name, txt1).dwTimeStamp
+ timestamp3 = self.get_unique_txt_record(name, txt3).dwTimeStamp
+ self.assert_timestamps_equal(timestamp1, 0)
+ self.assert_timestamps_equal(timestamp2, 0)
+ self.assert_timestamps_equal(timestamp3, eight_days_ago)
+
+ self.ldap_update_record(name, txt2, dwTimeStamp=six_days_ago)
+ self.ldap_update_record(name, txt1, dwTimeStamp=3000000)
+ timestamp1 = self.get_unique_txt_record(name, txt1).dwTimeStamp
+ self.assert_timestamps_equal(timestamp1, 3000000)
+
+ # dns update remembers that node is static, even with no
+ # static records.
+ timestamp1 = self.dns_update_record(name, txt1).dwTimeStamp
+ self.assertEqual(timestamp1, 0)
+
+ # Add another txt record. The new record should have the now
+ # timestamp, and the others should remain unchanged.
+ # BUT somehow record 1 is static!?
+ timestamp4 = self.dns_update_record(name, txt4).dwTimeStamp
+ timestamp1 = self.get_unique_txt_record(name, txt1).dwTimeStamp
+ timestamp2 = self.get_unique_txt_record(name, txt2).dwTimeStamp
+ timestamp3 = self.get_unique_txt_record(name, txt3).dwTimeStamp
+ self.assert_timestamps_equal(timestamp1, 0)
+ self.assert_timestamps_equal(timestamp2, six_days_ago)
+ self.assert_timestamps_equal(timestamp3, eight_days_ago)
+ self.assert_timestamps_equal(timestamp4, 0)
+
+ def _test_update_aging_enabled_n_days_ago(self, n_days):
+ name = 'test'
+ txt1 = ['1']
+ txt2 = ['2']
+ delta = n_days * -24
+
+ self.set_aging(True)
+ current_time = self.dns_update_record(name, txt1).dwTimeStamp
+
+ # rewind timestamp using ldap
+ self.ldap_modify_timestamps(name, delta)
+ n_days_ago = self.get_unique_txt_record(name, txt1).dwTimeStamp
+ self.assertGreater(current_time, n_days_ago)
+
+ # update changes timestamp depending on time.
+ timestamp1 = self.dns_update_record(name, txt1).dwTimeStamp
+ if n_days <= 7:
+ self.assert_timestamps_equal(timestamp1, n_days_ago)
+ else:
+ self.assert_soon_after(timestamp1, current_time)
+
+ # add another record, which should have the current timestamp
+ timestamp2 = self.dns_update_record(name, txt2).dwTimeStamp
+ self.assert_soon_after(timestamp2, current_time)
+
+ # first record should not have changed
+ timestamp1_b = self.get_unique_txt_record(name, txt1).dwTimeStamp
+ self.assert_timestamps_equal(timestamp1, timestamp1_b)
+
+ # let's repeat that, this time with txt2 existing
+ self.ldap_update_record(name, txt1, dwTimeStamp=n_days_ago)
+
+ timestamp1 = self.dns_update_record(name, txt1).dwTimeStamp
+ self.assert_timestamps_equal(timestamp1, timestamp1_b)
+
+ # this update is not an add. record 2 is already up-to-date
+ timestamp2 = self.dns_update_record(name, txt2).dwTimeStamp
+ self.assert_soon_after(timestamp2, current_time)
+
+ # now timestamp1 is not changed
+ timestamp1 = self.get_unique_txt_record(name, txt1).dwTimeStamp
+ self.assert_timestamps_equal(timestamp1, timestamp1_b)
+
+ # delete record2, try again
+ self.ldap_delete_record(name, txt2)
+ self.ldap_update_record(name, txt1, dwTimeStamp=n_days_ago)
+
+ timestamp1 = self.dns_update_record(name, txt1).dwTimeStamp
+ if n_days <= 7:
+ self.assert_timestamps_equal(timestamp1, n_days_ago)
+ else:
+ self.assert_soon_after(timestamp1, current_time)
+
+ # here we are re-adding the deleted record
+ timestamp2 = self.dns_update_record(name, txt2).dwTimeStamp
+ self.assert_soon_after(timestamp2, current_time)
+
+ timestamp1 = self.get_unique_txt_record(name, txt1).dwTimeStamp
+
+ # It gets weird HERE.
+ # note how the SIBLING of the deleted, re-added record differs
+ # from the sibling of freshly added record, depending on the
+ # time difference.
+ if n_days <= 7:
+ self.assert_timestamps_equal(timestamp1, n_days_ago)
+ else:
+ self.assert_timestamps_equal(timestamp1, timestamp2)
+
+ # re-timestamp record2, try again
+ self.ldap_update_record(name, txt2, dwTimeStamp=n_days_ago)
+ self.ldap_update_record(name, txt1, dwTimeStamp=n_days_ago)
+
+ # this should make no difference
+ timestamp1_b = self.dns_update_record(name, txt1).dwTimeStamp
+ self.assert_timestamps_equal(timestamp1, timestamp1_b)
+
+ # no change
+ timestamp2 = self.dns_update_record(name, txt2).dwTimeStamp
+ self.assert_timestamps_equal(timestamp2, timestamp1)
+ # also no change
+ timestamp1 = self.get_unique_txt_record(name, txt1).dwTimeStamp
+ self.assert_timestamps_equal(timestamp1, timestamp2)
+
+ # let's introduce another record
+ txt3 = ['3']
+ self.ldap_update_record(name, txt2, dwTimeStamp=n_days_ago)
+ self.ldap_update_record(name, txt1, dwTimeStamp=n_days_ago)
+
+ timestamp3 = self.dns_update_record(name, txt3).dwTimeStamp
+ self.assert_soon_after(timestamp3, current_time)
+
+ timestamp1 = self.get_unique_txt_record(name, txt1).dwTimeStamp
+ timestamp2 = self.get_unique_txt_record(name, txt2).dwTimeStamp
+
+ self.assert_timestamps_equal(timestamp1, n_days_ago)
+ self.assert_timestamps_equal(timestamp2, n_days_ago)
+
+ self.ldap_delete_record(name, txt3)
+ timestamp3 = self.dns_update_record(name, txt3).dwTimeStamp
+ self.assert_soon_after(timestamp3, current_time)
+ timestamp1 = self.get_unique_txt_record(name, txt1).dwTimeStamp
+ timestamp2 = self.get_unique_txt_record(name, txt2).dwTimeStamp
+
+ self.assert_timestamps_equal(timestamp1, n_days_ago)
+ self.assert_timestamps_equal(timestamp2, n_days_ago)
+
+ txt4 = ['4']
+
+ # Because txt1 is static, txt4 is static
+ self.ldap_update_record(name, txt1, dwTimeStamp=0)
+ self.ldap_update_record(name, txt2, dwTimeStamp=n_days_ago)
+ self.ldap_update_record(name, txt3, dwTimeStamp=n_days_ago)
+ timestamp1 = self.dns_update_record(name, txt1).dwTimeStamp
+ timestamp2 = self.get_unique_txt_record(name, txt2).dwTimeStamp
+ timestamp3 = self.get_unique_txt_record(name, txt3).dwTimeStamp
+ timestamp4 = self.dns_update_record(name, txt4).dwTimeStamp
+
+ self.assert_timestamps_equal(timestamp1, 0)
+ self.assert_timestamps_equal(timestamp2, n_days_ago)
+ self.assert_timestamps_equal(timestamp3, n_days_ago)
+ self.assert_timestamps_equal(timestamp4, 0)
+
+ longer_ago = n_days_ago // 2
+
+ # remove all static records.
+ self.ldap_delete_record(name, txt4)
+ self.ldap_update_record(name, txt1, dwTimeStamp=longer_ago)
+ self.ldap_update_record(name, txt2, dwTimeStamp=n_days_ago)
+ self.ldap_update_record(name, txt3, dwTimeStamp=n_days_ago)
+ timestamp1 = self.get_unique_txt_record(name, txt1).dwTimeStamp
+ self.assert_timestamps_equal(timestamp1, longer_ago)
+
+ timestamp4 = self.dns_update_record(name, txt4).dwTimeStamp
+ timestamp2 = self.get_unique_txt_record(name, txt2).dwTimeStamp
+ timestamp3 = self.get_unique_txt_record(name, txt3).dwTimeStamp
+ timestamp1 = self.get_unique_txt_record(name, txt1).dwTimeStamp
+
+ # Here, although there is no record from which to get the zero
+ # timestamp, record 4 does it anyway.
+ self.assert_timestamps_equal(timestamp1, longer_ago)
+ self.assert_timestamps_equal(timestamp2, n_days_ago)
+ self.assert_timestamps_equal(timestamp3, n_days_ago)
+ self.assert_timestamps_equal(timestamp4, 0)
+
+ # and now record 1 wants to be static.
+ self.ldap_update_record(name, txt4, dwTimeStamp=longer_ago)
+ timestamp4 = self.get_unique_txt_record(name, txt4).dwTimeStamp
+ self.assert_timestamps_equal(timestamp4, longer_ago)
+ timestamp1 = self.dns_update_record(name, txt1).dwTimeStamp
+ timestamp4 = self.get_unique_txt_record(name, txt4).dwTimeStamp
+ self.assert_timestamps_equal(timestamp1, 0)
+ self.assert_timestamps_equal(timestamp4, longer_ago)
+
+ def test_update_aging_enabled_in_no_refresh_window(self):
+ self._test_update_aging_enabled_n_days_ago(4)
+
+ def test_update_aging_enabled_on_no_refresh_boundary(self):
+ self._test_update_aging_enabled_n_days_ago(7)
+
+ def test_update_aging_enabled_in_refresh_window(self):
+ self._test_update_aging_enabled_n_days_ago(9)
+
+ def test_update_aging_enabled_beyond_refresh_window(self):
+ self._test_update_aging_enabled_n_days_ago(16)
+
+ def test_update_aging_enabled_in_eighteenth_century(self):
+ self._test_update_aging_enabled_n_days_ago(100000)
+
+ def test_update_static_stickiness(self):
+ name = 'test'
+ A = ['A']
+ B = ['B']
+ C = ['C']
+ D = ['D']
+
+ self.set_aging(False)
+ self.dns_update_record(name, A).dwTimeStamp
+ self.ldap_update_record(name, B, dwTimeStamp=0)
+ self.dns_update_record(name, B)
+ self.dns_update_record(name, C)
+ ctime = self.get_unique_txt_record(name, C).dwTimeStamp
+ self.assertEqual(ctime, 0)
+ btime = self.get_unique_txt_record(name, B).dwTimeStamp
+ self.assertEqual(btime, 0)
+
+ self.ldap_replace_records(name, [])
+
+ self.dns_update_record(name, D)
+ dtime = self.get_unique_txt_record(name, D).dwTimeStamp
+ self.assertEqual(dtime, 0)
+
+ def _test_update_timestamp_weirdness(self, n_days, aging=True):
+ name = 'test'
+ A = ['A']
+ B = ['B']
+
+ self.set_aging(aging)
+
+ current_time = self.dns_update_record(name, A).dwTimeStamp
+
+ # rewind timestamp using ldap
+ self.ldap_modify_timestamps(name, n_days * -24)
+ n_days_ago = self.get_unique_txt_record(name, A).dwTimeStamp
+ time_A = self.dns_update_record(name, A).dwTimeStamp
+ # that dns_update should have reset the timestamp ONLY if
+ # aging is on and the old timestamp is > noRefresh period (7
+ # days)
+ if n_days > 7 and aging:
+ self.assert_soon_after(time_A, current_time)
+ else:
+ self.assert_timestamps_equal(time_A, n_days_ago)
+
+ # add another record, which should have the current timestamp
+ time_B = self.dns_update_record(name, B).dwTimeStamp
+ self.assert_soon_after(time_B, current_time)
+
+ time_A = self.get_unique_txt_record(name, A).dwTimeStamp
+ if aging and n_days <= 7:
+ self.assert_timestamps_equal(time_A, n_days_ago)
+ else:
+ self.assert_soon_after(time_A, current_time)
+
+ # delete B, try again
+ self.ldap_delete_record(name, B)
+ self.ldap_update_record(name, A, dwTimeStamp=n_days_ago)
+
+ time_A = self.dns_update_record(name, A).dwTimeStamp
+
+ # here we are re-adding the deleted record
+ time_B = self.dns_update_record(name, B).dwTimeStamp
+ self.assert_soon_after(time_B, current_time)
+
+ time_A = self.get_unique_txt_record(name, A).dwTimeStamp
+ return n_days_ago, time_A, time_B
+
+ def test_update_timestamp_weirdness_no_refresh_no_aging(self):
+ n_days_ago, time_A, time_B = \
+ self._test_update_timestamp_weirdness(5, False)
+ # the timestamp of the SIBLING of the deleted, re-added record
+ # differs from the sibling of freshly added record.
+ self.assert_timestamps_equal(time_A, n_days_ago)
+
+ def test_update_timestamp_weirdness_no_refresh_aging(self):
+ n_days_ago, time_A, time_B = \
+ self._test_update_timestamp_weirdness(5, True)
+ # the timestamp of the SIBLING of the deleted, re-added record
+ # differs from the sibling of freshly added record.
+ self.assert_timestamps_equal(time_A, n_days_ago)
+
+ def test_update_timestamp_weirdness_refresh_no_aging(self):
+ n_days_ago, time_A, time_B = \
+ self._test_update_timestamp_weirdness(9, False)
+ self.assert_timestamps_equal(time_A, time_B)
+
+ def test_update_timestamp_weirdness_refresh_aging(self):
+ n_days_ago, time_A, time_B = \
+ self._test_update_timestamp_weirdness(9, True)
+ self.assert_timestamps_equal(time_A, time_B)
+
+ def test_aging_refresh(self):
+ name, txt = 'agingtest', ['test txt']
+ no_refresh = 200
+ refresh = 160
+ self.set_zone_int_params(NoRefreshInterval=no_refresh,
+ RefreshInterval=refresh,
+ Aging=1)
+ before_mod = self.dns_update_record(name, txt)
+ start_time = before_mod.dwTimeStamp
+
+ # go back 86 hours, which is in the no-refresh time (but
+ # wouldn't be if we had stuck to the default of 168).
+ self.ldap_modify_timestamps(name, -170)
+ rec = self.dns_update_record(name, txt)
+ self.assert_timestamps_equal(rec.dwTimeStamp,
+ start_time - 170)
+
+ # back to -202 hours, into the refresh zone
+ # the update should reset the timestamp to now.
+ self.ldap_modify_timestamps(name, -32)
+ rec = self.dns_update_record(name, txt)
+ self.assert_soon_after(rec.dwTimeStamp, start_time)
+
+ # back to -362 hours, beyond the end of the refresh period.
+ # Actually nothing changes at this time -- we can still
+ # refresh, but the record is liable for scavenging.
+ self.ldap_modify_timestamps(name, -160)
+ rec = self.dns_update_record(name, txt)
+ self.assert_soon_after(rec.dwTimeStamp, start_time)
+
+ def test_add_no_timestamp(self):
+ # check zero timestamp is implicit
+ self.set_aging(True)
+ rec = self.ldap_update_record('ldap', 'test')
+ self.assertEqual(rec.dwTimeStamp, 0)
+ rec = self.rpc_update_record('rpc', 'test')
+ self.assertEqual(rec.dwTimeStamp, 0)
+
+ def test_add_zero_timestamp(self):
+ rec = self.ldap_update_record('ldap', 'test', dwTimeStamp=0)
+ self.assertEqual(rec.dwTimeStamp, 0)
+ rec = self.rpc_update_record('rpc', 'test', dwTimeStamp=0)
+ self.assertEqual(rec.dwTimeStamp, 0)
+
+ def test_add_update_timestamp(self):
+ # LDAP can change timestamp, RPC can't
+ rec = self.ldap_update_record('ldap', 'test', dwTimeStamp=123456)
+ self.assertEqual(rec.dwTimeStamp, 123456)
+ rec = self.rpc_update_record('rpc', 'test', dwTimeStamp=123456)
+ self.assertEqual(rec.dwTimeStamp, 0)
+ # second time is a different code path (add vs update)
+ rec = self.rpc_update_record('rpc', 'test', dwTimeStamp=123456)
+ self.assertEqual(rec.dwTimeStamp, 0)
+ # RPC update the one with timestamp, zeroing it.
+ rec = self.rpc_update_record('ldap', 'test', dwTimeStamp=123456)
+ self.assertEqual(rec.dwTimeStamp, 0)
+
+ def test_add_update_ttl(self):
+ # RPC *can* set dwTtlSeconds.
+ rec = self.ldap_update_record('ldap', 'test',
+ dwTtlSeconds=1234)
+ self.assertEqual(rec.dwTtlSeconds, 1234)
+ rec = self.rpc_update_record('rpc', 'test', dwTtlSeconds=1234)
+ self.assertEqual(rec.dwTtlSeconds, 1234)
+ # does update work like add?
+ rec = self.rpc_update_record('rpc', 'test', dwTtlSeconds=4321)
+ self.assertEqual(rec.dwTtlSeconds, 4321)
+ rec = self.rpc_update_record('ldap', 'test', dwTtlSeconds=5678)
+ self.assertEqual(rec.dwTtlSeconds, 5678)
+
+ def test_add_update_ttl_serial(self):
+ # when setting dwTtlSeconds, what happens to serial number?
+ rec = self.ldap_update_record('ldap', 'test',
+ dwTtlSeconds=1234,
+ dwSerial=123)
+ self.assertEqual(rec.dwTtlSeconds, 1234)
+ self.assertEqual(rec.dwSerial, 123)
+ rec = self.rpc_update_record('rpc', 'test', dwTtlSeconds=1234)
+ self.assertEqual(rec.dwTtlSeconds, 1234)
+ serial = rec.dwSerial
+ self.assertLess(serial, 4)
+ rec = self.rpc_update_record('rpc', 'test', dwTtlSeconds=4321)
+ self.assertEqual(rec.dwTtlSeconds, 4321)
+ self.assertEqual(rec.dwSerial, serial + 1)
+ rec = self.rpc_update_record('ldap', 'test', dwTtlSeconds=5678)
+ self.assertEqual(rec.dwTtlSeconds, 5678)
+ self.assertEqual(rec.dwSerial, 124)
+
+ def test_add_update_dwFlags(self):
+ # dwFlags splits into rank and flags.
+ # according to [MS-DNSP] 2.3.2.2, flags MUST be zero
+ rec = self.ldap_update_record('ldap', 'test', flags=22222, rank=222)
+ self.assertEqual(rec.flags, 22222)
+ self.assertEqual(rec.rank, 222)
+
+ rec = self.rpc_update_record('ldap', 'test', dwFlags=3333333)
+ # rank != 3333333 & 0xff == 213
+ self.assertEqual(rec.rank, 240) # RPC fixes rank
+ self.assertEqual(rec.flags, 0)
+
+ self.assertRaises(OverflowError,
+ self.ldap_update_record,
+ 'ldap', 'test', flags=777777777, rank=777)
+
+ # reset to no default (rank overflows)
+ rec = self.ldap_update_record('ldap', 'test', flags=7777, rank=777)
+ self.assertEqual(rec.flags, 7777)
+ self.assertEqual(rec.rank, 9)
+
+ # DNS update zeros flags, sets rank to 240 (RANK_ZONE)
+ rec = self.dns_update_record('ldap', 'test', ttl=999)
+ self.assertEqual(rec.flags, 0)
+ self.assertEqual(rec.rank, 240)
+
+ rec = self.rpc_update_record('ldap', 'test', dwFlags=321)
+ self.assertEqual(rec.flags, 0)
+ self.assertEqual(rec.rank, 240)
+
+ # RPC adding a new record: fixed rank, zero flags
+ rec = self.rpc_update_record('ldap', 'test 2', dwFlags=12345)
+ self.assertEqual(rec.rank, 240)
+ self.assertEqual(rec.flags, 0)
+
+ def test_add_update_dwReserved(self):
+ # RPC does not change dwReserved.
+ rec = self.ldap_update_record('ldap', 'test', dwReserved=54321)
+ self.assertEqual(rec.dwReserved, 54321)
+ rec = self.rpc_update_record('rpc', 'test', dwReserved=54321)
+ self.assertEqual(rec.dwReserved, 0)
+ rec = self.rpc_update_record('rpc', 'test', dwReserved=54321)
+ self.assertEqual(rec.dwReserved, 0)
+ rec = self.rpc_update_record('ldap', 'test', dwReserved=12345)
+ self.assertEqual(rec.dwReserved, 54321)
+
+ def test_add_update_dwSerial(self):
+ # On Windows the RPC record ends up with serial 2, on Samba
+ # serial 3. Rather than knownfail this, we accept anything
+ # below 4 (for now).
+ rec = self.ldap_update_record('ldap', 'test', dwSerial=123)
+ self.assertEqual(rec.dwSerial, 123)
+ rec = self.rpc_update_record('rpc', 'test', dwSerial=123)
+ self.assertLess(rec.dwSerial, 4)
+ rec = self.rpc_update_record('rpc', 'test', dwSerial=123)
+ self.assertLess(rec.dwSerial, 4)
+ rec = self.dns_update_record('rpc', 'test')
+ self.assertLess(rec.dwSerial, 4)
+ rec = self.dns_update_record('dns-0', 'test')
+ self.assertLess(rec.dwSerial, 5)
+
+ rec = self.dns_update_record('ldap', 'test')
+ self.assertEqual(rec.dwSerial, 123)
+ rec = self.rpc_update_record('ldap', 'test', dwSerial=123)
+ self.assertEqual(rec.dwSerial, 123)
+ rec = self.ldap_update_record('ldap', 'test', dwSerial=12)
+ self.assertEqual(rec.dwSerial, 12)
+ # when we dns-updated ldap/test, we alerted Windows to 123 as
+ # a high water mark for the zone. (even though we have since
+ # dropped the serial to 12, 123 is the base serial for new
+ # records).
+ rec = self.dns_update_record('dns', 'test')
+ self.assertEqual(rec.dwSerial, 124)
+ rec = self.dns_update_record('dns2', 'test')
+ self.assertEqual(rec.dwSerial, 125)
+ rec = self.rpc_update_record('rpc2', 'test')
+ self.assertEqual(rec.dwSerial, 126)
+ rec = self.dns_update_record('dns', 'test 2')
+ self.assertEqual(rec.dwSerial, 127)
+
+ def test_add_update_dwSerial_2(self):
+ # On Samba the RPC update resets the serial to a low number,
+ # while Windows leaves it high.
+ rec = self.ldap_update_record('ldap', 'test', dwSerial=123)
+ self.assertEqual(rec.dwSerial, 123)
+ rec = self.rpc_update_record('ldap', 'test', dwSerial=321)
+ self.assertEqual(rec.dwSerial, 123)
+ rec = self.dns_update_record('ldap', 'test')
+ self.assertEqual(rec.dwSerial, 123)
+
+ def test_rpc_update_disparate_types(self):
+ """Can we use update to replace a TXT with an AAAA?"""
+ name = 'x'
+ old = TXTRecord("x")
+ new = ARecord("127.0.0.111")
+ self.rpc_replace(name, None, old)
+ recs = self.ldap_get_records(name)
+ self.assertEqual(len(recs), 1)
+ self.assertEqual(recs[0].wType, old.wType)
+
+ self.rpc_replace(name, old, new)
+ recs = self.ldap_get_records(name)
+ self.assertEqual(len(recs), 1)
+ self.assertEqual(recs[0].wType, new.wType)
+
+ def test_add_update_many(self):
+ # Samba fails often in this set, but we want to see how it
+ # goes further down, so we print the problems and defer the
+ # failure.
+ failures = 0
+ total = 0
+
+ def _defer_wrap(f):
+ def _defer(*args):
+ nonlocal failures, total
+ total += 1
+ try:
+ f(*args)
+ except self.failureException as e:
+ from traceback import format_stack
+ print(f"{format_stack()[-2]} {e}\n")
+ failures += 1
+ return _defer
+
+ defer_assertEqual = _defer_wrap(self.assertEqual)
+ defer_assert_timestamp_in_ballpark = \
+ _defer_wrap(self.assert_timestamp_in_ballpark)
+
+ self.set_aging(False)
+ rec = self.ldap_update_record('ldap', 'test',
+ version=11,
+ rank=22,
+ flags=33,
+ dwSerial=44,
+ dwTtlSeconds=55,
+ dwReserved=66,
+ dwTimeStamp=77)
+
+ self.assertEqual(rec.version, 5) # disobeys request
+ self.assertEqual(rec.rank, 22)
+ self.assertEqual(rec.flags, 33)
+ self.assertEqual(rec.dwSerial, 44)
+ self.assertEqual(rec.dwTtlSeconds, 55)
+ self.assertEqual(rec.dwReserved, 66)
+ self.assertEqual(rec.dwTimeStamp, 77)
+ # DNS updates first
+ rec = self.dns_update_record('ldap', 'test', ttl=999)
+ self.assertEqual(rec.version, 5)
+ self.assertEqual(rec.rank, 240) # rank gets fixed by DNS update
+ defer_assertEqual(rec.flags, 0) # flags gets fixed
+ defer_assertEqual(rec.dwSerial, 45) # serial increments
+ self.assertEqual(rec.dwTtlSeconds, 999) # TTL set
+ defer_assertEqual(rec.dwReserved, 0) # reserved fixed
+ defer_assert_timestamp_in_ballpark(rec) # changed on Windows ?!
+
+ self.set_aging(True)
+ rec = self.dns_update_record('ldap', 'test', ttl=1111)
+ self.assertEqual(rec.version, 5)
+ self.assertEqual(rec.rank, 240)
+ defer_assertEqual(rec.flags, 0)
+ defer_assertEqual(rec.dwSerial, 46)
+ self.assertEqual(rec.dwTtlSeconds, 1111) # TTL set
+ defer_assertEqual(rec.dwReserved, 0)
+ self.assert_timestamp_in_ballpark(rec)
+
+ # RPC update
+ rec = self.rpc_update_record('ldap', 'test',
+ version=111,
+ dwFlags=333,
+ dwSerial=444,
+ dwTtlSeconds=555,
+ dwReserved=666,
+ dwTimeStamp=777)
+
+ self.assertEqual(rec.version, 5) # no change
+ self.assertEqual(rec.rank, 240) # no change
+ defer_assertEqual(rec.flags, 0) # no change
+ defer_assertEqual(rec.dwSerial, 47) # Serial increments
+ self.assertEqual(rec.dwTtlSeconds, 555) # TTL set
+ defer_assertEqual(rec.dwReserved, 0) # no change
+ self.assertEqual(rec.dwTimeStamp, 0) # timestamp zeroed
+
+ # RPC update, using default values
+ rec = self.rpc_update_record('ldap', 'test')
+ self.assertEqual(rec.version, 5)
+ self.assertEqual(rec.rank, 240)
+ defer_assertEqual(rec.flags, 0)
+ defer_assertEqual(rec.dwSerial, 48) # serial increments
+ self.assertEqual(rec.dwTtlSeconds, 900) # TTL changed
+ defer_assertEqual(rec.dwReserved, 0)
+ self.assertEqual(rec.dwTimeStamp, 0)
+
+ self.set_aging(False)
+ rec = self.dns_update_record('ldap', 'test', ttl=888)
+ self.assertEqual(rec.version, 5)
+ self.assertEqual(rec.rank, 240)
+ defer_assertEqual(rec.flags, 0)
+ defer_assertEqual(rec.dwSerial, 49) # serial increments
+ self.assertEqual(rec.dwTtlSeconds, 888) # TTL set
+ defer_assertEqual(rec.dwReserved, 0)
+ self.assertEqual(rec.dwTimeStamp, 0) # timestamp stays zero
+
+ if failures:
+ self.fail(f"failed {failures}/{total} deferred assertions")
+
+ def test_static_record_dynamic_update(self):
+ """Add a static record, then a dynamic record.
+ The dynamic record should have a timestamp set."""
+ name = 'test'
+ txt = ['static txt']
+ txt2 = ['dynamic txt']
+ self.set_aging(True)
+ rec = self.ldap_update_record(name, txt, dwTimeStamp=0)
+ rec2 = self.dns_update_record(name, txt2)
+ self.assert_timestamp_in_ballpark(rec2)
+ ts2 = rec2.dwTimeStamp
+ # update the first record. It should stay static (timestamp 0)
+ rec = self.dns_update_record(name, txt)
+ self.assertEqual(rec.dwTimeStamp, 0)
+ # and rec2 should be unchanged.
+ self.assertEqual(rec2.dwTimeStamp, ts2)
+
+ def test_dynamic_record_static_update(self):
+ name = 'agingtest'
+ txt1 = ['dns update before']
+ txt2 = ['ldap update']
+ txt3 = ['dns update after']
+ self.set_aging(True)
+
+ self.dns_update_record(name, txt1)
+ self.ldap_update_record(name, txt2)
+ self.dns_update_record(name, txt3)
+
+ recs = self.get_rpc_records(name)
+ for r in recs:
+ d = [x.str for x in r.data.str]
+ if d == txt1:
+ self.assertNotEqual(r.dwTimeStamp, 0)
+ elif d == txt2:
+ self.assertEqual(r.dwTimeStamp, 0)
+ elif d == txt3:
+ self.assertNotEqual(r.dwTimeStamp, 0)
+
+ def test_tombstone_in_hours_and_nttime(self):
+ # Until now Samba has measured tombstone timestamps in hours,
+ # not ten-millionths of a second. After now, we want Samba to
+ # handle both.
+
+ nh, oh, nn, on, on0, onf, nn0, nnf, _1601 = 'abcdefgij'
+ now_hours = dsdb_dns.unix_to_dns_timestamp(int(time.time()))
+ old_hours = now_hours - 24 * 90
+ now_nttime = dsdb_dns.dns_timestamp_to_nt_time(now_hours)
+ old_nttime = dsdb_dns.dns_timestamp_to_nt_time(old_hours)
+ # calculations on hours might be based on the lower 32 bits,
+ # so we test with these forced to extremes (the maximum change
+ # is 429 seconds in NTTIME).
+ old_nttime0 = old_nttime & 0xffffffff00000000
+ old_nttimef = old_nttime | 0xffffffff
+ now_nttime0 = now_nttime & 0xffffffff00000000
+ now_nttimef = now_nttime | 0xffffffff
+ self.dns_tombstone(nh, epoch_nttime=now_hours)
+ self.dns_tombstone(oh, epoch_nttime=old_hours)
+ self.dns_tombstone(nn, epoch_nttime=now_nttime)
+ self.dns_tombstone(on, epoch_nttime=old_nttime)
+ self.dns_tombstone(nn0, epoch_nttime=now_nttime0)
+ self.dns_tombstone(nnf, epoch_nttime=now_nttimef)
+ self.dns_tombstone(on0, epoch_nttime=old_nttime0)
+ self.dns_tombstone(onf, epoch_nttime=old_nttimef)
+ # this is our (arbitrary) threshold that will make us think in
+ # NTTIME, not hours.
+ self.dns_tombstone(_1601, epoch_nttime=(10 * 1000 * 1000 + 1))
+
+ try:
+ file_samdb = get_file_samdb()
+ except ldb.LdbError as e:
+ raise AssertionError(
+ f"failing because '{e}': this is Windows?") from None
+ dsdb._dns_delete_tombstones(file_samdb)
+
+ # nh and nn should not be deleted
+ for name in nh, nn, nn0, nnf:
+ recs = self.ldap_get_records(name)
+ self.assertEqual(len(recs), 1)
+ self.assert_tombstoned(name, timestamp=False)
+
+ # oh and on should be GONE
+ for name in oh, on, on0, onf, _1601:
+ recs = self.ldap_get_records(name)
+ self.assertEqual(len(recs), 0)
+
+ def test_dns_query_for_tombstoned_results(self):
+ # This one fails on Windows, because the dns cache holds B
+ # after it has been tombstoned behind its back.
+ A = 'a'
+ B = 'b'
+ self.dns_tombstone(A)
+ self.assert_tombstoned(A)
+ r = self.dns_query(A, qtype=dns.DNS_QTYPE_TXT)
+ self.assertEqual(r.ancount, 0)
+
+ self.dns_update_record(B, B)
+ self.dns_tombstone(B)
+ self.assert_tombstoned(B)
+ r = self.dns_query(B, qtype=dns.DNS_QTYPE_TXT)
+ self.assertEqual(r.ancount, 0)
+
+ def test_basic_scavenging(self):
+ # NOTE: This one fails on Windows, because the RPC call to
+ # prompt scavenging is not immediate. On Samba, in the
+ # testenv, we don't have the RPC call but we can connect to
+ # the database directly.
+
+ # just to be sure we have the right limits.
+ self.set_zone_int_params(NoRefreshInterval=168,
+ RefreshInterval=168,
+ Aging=1)
+
+ ts1, ts2, ts3, ts4, ts5, ts6 = ('1', '2', '3', '4', '5', '6')
+ self.dns_update_record(ts1, ts1)
+ self.dns_update_record(ts2, ts2)
+ # ts2 is tombstoned and timestamped in 1981
+ self.dns_tombstone(ts2)
+ # ts3 is tombstoned and timestamped in the future
+ self.dns_tombstone(ts3, epoch_hours=(DNS_TIMESTAMP_2101 - 1))
+ # ts4 is tombstoned and timestamped in the past
+ self.dns_tombstone(ts4, epoch_hours=1111111)
+ # ts5 is tombstoned in the past and timestamped in the future
+ self.dns_tombstone(ts5, epoch_hours=5555555, epoch_nttime=int(1e10))
+
+ # ts2 and ts3 should now be tombstoned.
+ self.assert_tombstoned(ts2)
+ self.assert_tombstoned(ts3)
+
+ # let's un-tombstone ts2
+ # ending up with dnsTombstoned: FALSE in Samba
+ # and no dNSTombstoned in Windows.
+ self.dns_update_record(ts2, "ts2 untombstoned")
+ ts2_node = self.get_one_node(ts2)
+ ts2_tombstone = ts2_node.get("dNSTombstoned")
+ if ts2_tombstone is not None:
+ self.assertEqual(ts2_tombstone[0], b"FALSE")
+
+ self.assert_tombstoned(ts2, tombstoned=False)
+
+ r = self.dns_update_record(ts6, ts6)
+
+ # put some records into the death zone.
+ self.ldap_modify_timestamps(ts1, -15 * 24)
+ self.ldap_modify_timestamps(ts2, -14 * 24 - 2)
+ self.ldap_modify_timestamps(ts6, -14 * 24 + 2)
+
+ # ts1 will be saved by this record
+ self.dns_update_record(ts1, "another record")
+
+ try:
+ # Tell the server to clean-up records.
+ # This is how it *should* work on Windows:
+ self.rpc_conn.DnssrvOperation2(
+ dnsserver.DNS_CLIENT_VERSION_LONGHORN,
+ 0,
+ SERVER_IP,
+ None,
+ 0,
+ "StartScavenging",
+ dnsserver.DNSSRV_TYPEID_NULL,
+ None)
+ # Samba won't get here (NOT_IMPLEMENTED error)
+ # wait for Windows to do its cleanup.
+ time.sleep(2)
+ except WERRORError as e:
+ if e.args[0] == werror.WERR_CALL_NOT_IMPLEMENTED:
+ # This is the Samba way, talking to the file directly,
+ # as if we were the server process. The direct
+ # connection is needed because the tombstoning search
+ # involves a magic system only filter.
+ file_samdb = get_file_samdb()
+ dsdb._scavenge_dns_records(file_samdb)
+ dsdb._dns_delete_tombstones(file_samdb)
+ else:
+ raise
+
+ # Now what we should have:
+ # ts1: alive: the old record is deleted, the new one not.
+ # ts2: tombstoned
+ # ts3: tombstoned
+ # ts4: deleted. gone.
+ # ts5: deleted. timestamp affects tombstoning, but not deletion.
+ # ts6: alive
+ #
+ # We order our assertions to make the windows test
+ # fail as late as possible (on ts4, ts5, ts2).
+ r = self.get_unique_txt_record(ts1, ["another record"])
+ self.assertIsNotNone(r)
+ r = self.get_unique_txt_record(ts6, [ts6])
+ self.assertIsNotNone(r)
+
+ self.assert_tombstoned(ts3)
+
+ n = self.get_one_node(ts4)
+ self.assertIsNone(n)
+ n = self.get_one_node(ts5)
+ self.assertIsNone(n)
+
+ self.assert_tombstoned(ts2)
+
+ def test_samba_scavenging(self):
+ # We expect this one to fail on Windows, because scavenging
+ # and tombstoning cannot be performed on demand.
+
+ try:
+ file_samdb = get_file_samdb()
+ except ldb.LdbError as e:
+ raise AssertionError(
+ f"failing because '{e}': this is Windows?") from None
+
+ # let's try different limits.
+ self.set_zone_int_params(NoRefreshInterval=30,
+ RefreshInterval=20,
+ Aging=1)
+
+ now = dsdb_dns.unix_to_dns_timestamp(int(time.time()))
+
+ A, B, C, D = 'ABCD'
+ # A has current time
+ # B has safe, non-updateable time
+ # C has safe time
+ # D is scavengeable
+ atime = self.dns_update_record(A, A).dwTimeStamp
+ btime = self.ldap_update_record(B, B, dwTimeStamp=now-20).dwTimeStamp
+ ctime = self.ldap_update_record(C, C, dwTimeStamp=now-40).dwTimeStamp
+ dtime = self.ldap_update_record(D, D, dwTimeStamp=now-60).dwTimeStamp
+ self.assert_soon_after(atime, now)
+ self.assert_timestamps_equal(btime, now-20)
+ self.assert_timestamps_equal(ctime, now-40)
+ self.assert_timestamps_equal(dtime, now-60)
+
+ dsdb._scavenge_dns_records(file_samdb)
+
+ # D should be gone (tombstoned)
+ r = self.get_unique_txt_record(D, D)
+ self.assertIsNone(r)
+ r = self.dns_query(D, qtype=dns.DNS_QTYPE_TXT)
+ self.assertEqual(r.ancount, 0)
+ recs = self.ldap_get_records(D)
+ self.assertEqual(len(recs), 1)
+ self.assert_tombstoned(D)
+
+ # others unchanged.
+ atime = self.get_unique_txt_record(A, A).dwTimeStamp
+ btime = self.get_unique_txt_record(B, B).dwTimeStamp
+ ctime = self.get_unique_txt_record(C, C).dwTimeStamp
+ self.assert_soon_after(atime, now)
+ self.assert_timestamps_equal(btime, now-20)
+ self.assert_timestamps_equal(ctime, now-40)
+
+ btime = self.dns_update_record(B, B).dwTimeStamp
+ ctime = self.dns_update_record(C, C).dwTimeStamp
+ self.assert_timestamps_equal(btime, now-40)
+ self.assert_soon_after(ctime, now)
+
+ # after this, D *should* still be a tombstone, because its
+ # tombstone timestamp is not very old.
+ dsdb._dns_delete_tombstones(file_samdb)
+ recs = self.ldap_get_records(D)
+ self.assertEqual(len(recs), 1)
+ self.assert_tombstoned(D)
+
+ # Let's delete C using rpc, and ensure it survives dns_delete_tombstones
+ self.rpc_delete_txt(C, C)
+ recs = self.ldap_get_records(C)
+ self.assertEqual(len(recs), 1)
+ self.assert_tombstoned(C)
+ dsdb._dns_delete_tombstones(file_samdb)
+ recs = self.ldap_get_records(C)
+ self.assertEqual(len(recs), 1)
+ self.assert_tombstoned(C)
+
+ # now let's wind A and B back to either side of the two week
+ # threshold. A should survive, B should not.
+ self.dns_tombstone(A, (now - 166))
+ self.dns_tombstone(B, (now - 170))
+ dsdb._dns_delete_tombstones(file_samdb)
+
+ recs = self.ldap_get_records(A)
+ self.assertEqual(len(recs), 1)
+ self.assert_tombstoned(A)
+
+ recs = self.ldap_get_records(B)
+ self.assertEqual(len(recs), 0)
+
+ def _test_A_and_AAAA_records(self, A, B, a_days, b_days, aging):
+ self.set_aging(aging)
+
+ name = 'aargh'
+ now = dsdb_dns.unix_to_dns_timestamp(int(time.time()))
+ a_initial = now - 24 * a_days
+ b_initial = now - 24 * b_days
+
+ self.dns_update_non_text(name, A)
+ self.ldap_modify_timestamps(name, a_days * -24)
+
+ rec_a = self.get_unique_ip_record(name, A)
+ rec_b = self.add_ip_record(name, B, dwTimeStamp=b_initial)
+
+ self.assert_timestamps_equal(rec_a, a_initial)
+ self.assert_timestamps_equal(rec_b, b_initial)
+
+ # touch the A record.
+ self.dns_update_non_text(name, A)
+
+ # check the A timestamp, depending on norefresh
+ rec_a = self.get_unique_ip_record(name, A)
+ if aging and a_days > 7:
+ time_a = now
+ self.assert_soon_after(rec_a, now)
+ elif a_days > 7:
+ # when we have NO aging and are in the refresh window, the
+ # timestamp now reads as a_initial, but will become now
+ # after we manipulate B for a bit.
+ time_a = now
+ self.assert_timestamps_equal(rec_a, a_initial)
+ else:
+ time_a = a_initial
+ self.assert_timestamps_equal(rec_a, a_initial)
+
+ # B timestamp should be unchanged?
+ rec_b = self.get_unique_ip_record(name, B)
+ self.assert_timestamps_equal(rec_b, b_initial)
+
+ # touch the B record.
+ self.dns_update_non_text(name, B)
+
+ # check the B timestamp
+ rec_b = self.get_unique_ip_record(name, B)
+ if not aging:
+ self.windows_variation(
+ self.assert_soon_after, rec_b, now,
+ msg="windows updates non-aging, samba does not")
+ else:
+ self.assert_soon_after(rec_b, now)
+
+ # rewind B
+ rec_b = self.add_ip_record(name, B, dwTimeStamp=b_initial)
+
+ # NOW rec A might have changed! with no aging, and out of refresh.
+ rec_a = self.get_unique_ip_record(name, A)
+ self.assert_timestamps_equal(rec_a, time_a)
+
+ self.dns_update_non_text(name, A)
+
+ rec_a = self.get_unique_ip_record(name, B)
+ self.assert_timestamps_equal(rec_b, b_initial)
+
+ # now delete A
+ _, wtype = guess_wtype(A)
+ self.ldap_delete_record(name, A, wtype=wtype)
+
+ # re-add it
+ self.dns_update_non_text(name, A)
+
+ rec_a = self.get_unique_ip_record(name, A)
+ self.assert_soon_after(rec_a, now)
+
+ rec_b = self.get_unique_ip_record(name, B)
+ self.assert_timestamps_equal(rec_b, b_initial)
+
+ def test_A_5_days_AAAA_5_days_aging(self):
+ self._test_A_and_AAAA_records(IPv4_ADDR, IPv6_ADDR, 5, 5, aging=True)
+
+ def test_A_5_days_AAAA_5_days_no_aging(self):
+ self._test_A_and_AAAA_records(IPv4_ADDR, IPv6_ADDR, 5, 5, aging=False)
+
+ def test_A_5_days_AAAA_10_days_aging(self):
+ self._test_A_and_AAAA_records(IPv4_ADDR, IPv6_ADDR, 5, 10, aging=True)
+
+ def test_A_5_days_AAAA_10_days_no_aging(self):
+ self._test_A_and_AAAA_records(IPv4_ADDR, IPv6_ADDR, 5, 10, aging=False)
+
+ def test_A_10_days_AAAA_5_days_aging(self):
+ self._test_A_and_AAAA_records(IPv4_ADDR, IPv6_ADDR, 10, 5, aging=True)
+
+ def test_A_10_days_AAAA_5_days_no_aging(self):
+ self._test_A_and_AAAA_records(IPv4_ADDR, IPv6_ADDR, 10, 5, aging=False)
+
+ def test_A_10_days_AAAA_9_days_aging(self):
+ self._test_A_and_AAAA_records(IPv4_ADDR, IPv6_ADDR, 10, 9, aging=True)
+
+ def test_A_9_days_AAAA_10_days_no_aging(self):
+ self._test_A_and_AAAA_records(IPv4_ADDR, IPv6_ADDR, 9, 10, aging=False)
+
+ def test_A_20_days_AAAA_2_days_aging(self):
+ self._test_A_and_AAAA_records(IPv4_ADDR, IPv6_ADDR, 20, 2, aging=True)
+
+ def test_A_6_days_AAAA_40_days_no_aging(self):
+ self._test_A_and_AAAA_records(IPv4_ADDR, IPv6_ADDR, 6, 40, aging=False)
+
+ def test_A_5_days_A_5_days_aging(self):
+ self._test_A_and_AAAA_records(IPv4_ADDR, IPv4_ADDR_2, 5, 5, aging=True)
+
+ def test_A_5_days_A_10_days_no_aging(self):
+ self._test_A_and_AAAA_records(IPv4_ADDR, IPv4_ADDR_2, 5, 10, aging=False)
+
+ def test_AAAA_5_days_AAAA_6_days_aging(self):
+ self._test_A_and_AAAA_records(IPv6_ADDR, IPv6_ADDR_2, 5, 6, aging=True)
+
+ def test_AAAA_5_days_AAAA_6_days_no_aging(self):
+ self._test_A_and_AAAA_records(IPv6_ADDR, IPv6_ADDR_2, 5, 6, aging=False)
+
+ def _test_multi_records_delete(self, aging):
+ # Batch deleting a type doesn't update other types timestamps.
+ self.set_aging(aging)
+
+ name = 'aargh'
+ now = dsdb_dns.unix_to_dns_timestamp(int(time.time()))
+
+ back_5_days = now - 5 * 24
+ back_10_days = now - 10 * 24
+ back_25_days = now - 25 * 24
+
+ ip4s = {
+ '1.1.1.1': now,
+ '2.2.2.2': back_5_days,
+ '3.3.3.3': back_10_days,
+ }
+ ip6s = {
+ '::1': now,
+ '::2': back_5_days,
+ '::3': back_25_days,
+ }
+
+ txts = {
+ '1': now,
+ '2': back_5_days,
+ '3': back_25_days,
+ }
+
+ # For windows, if we don't DNS update something, it won't know
+ # there's anything.
+ self.dns_update_record(name, '3')
+
+ for k, v in ip4s.items():
+ r = self.add_ip_record(name, k, wtype=dns.DNS_QTYPE_A, dwTimeStamp=v)
+
+ for k, v in ip6s.items():
+ r = self.add_ip_record(name, k, wtype=dns.DNS_QTYPE_AAAA, dwTimeStamp=v)
+
+ for k, v in txts.items():
+ r = self.ldap_update_record(name, k, dwTimeStamp=v)
+
+ self.dns_delete_type(name, dnsp.DNS_TYPE_A)
+
+ r = self.dns_query(name, dns.DNS_QTYPE_A)
+ self.assertEqual(r.ancount, 0)
+
+ r = self.dns_query(name, dns.DNS_QTYPE_TXT)
+ self.assertEqual(r.ancount, 3)
+ rset = set(x.rdata.txt.str[0] for x in r.answers)
+ self.assertEqual(rset, set(txts))
+
+ r = self.dns_query(name, dns.DNS_QTYPE_AAAA)
+ self.assertEqual(r.ancount, 3)
+ rset = set(ipv6_normalise(x.rdata) for x in r.answers)
+ self.assertEqual(rset, set(ip6s))
+
+ recs = self.ldap_get_records(name)
+ self.assertEqual(len(recs), 6)
+ for r in recs:
+ if r.wType == dns.DNS_QTYPE_AAAA:
+ k = ipv6_normalise(r.data)
+ expected = ip6s[k]
+ elif r.wType == dns.DNS_QTYPE_TXT:
+ k = r.data.str[0]
+ expected = txts[k]
+ else:
+ self.fail(f"unexpected wType {r.wType}")
+
+ self.assert_timestamps_equal(r.dwTimeStamp, expected)
+
+ def test_multi_records_delete_aging(self):
+ self._test_multi_records_delete(True)
+
+ def test_multi_records_delete_no_aging(self):
+ self._test_multi_records_delete(False)
+
+ def _test_dns_delete_times(self, n_days, aging=True):
+ # In these tests, Windows replaces the records with
+ # tombstones, while Samba just removes them. Both are
+ # reasonable approaches (there is no reanimation pathway for
+ # tombstones), but this means self.ldap_get_records() gets
+ # different numbers for each. So we use
+ # self.ldap_get_non_tombstoned_record().
+ name = 'test'
+ A = ['A']
+ B = ['B']
+ C = ['C']
+ D = ['D']
+ self.set_aging(aging)
+ now = dsdb_dns.unix_to_dns_timestamp(int(time.time()))
+ n_days_ago = max(now - n_days * 24, 0)
+
+ self.dns_update_record(name, A)
+ self.ldap_update_record(name, A, dwTimeStamp=n_days_ago)
+ self.ldap_update_record(name, B, dwTimeStamp=n_days_ago)
+ self.ldap_update_record(name, C, dwTimeStamp=n_days_ago)
+ self.dns_update_record(name, D)
+ r = self.dns_query(name, dns.DNS_QTYPE_TXT)
+ rset = set(x.rdata.txt.str[0] for x in r.answers)
+ self.assertEqual(rset, set('ABCD'))
+
+ atime = self.get_unique_txt_record(name, A).dwTimeStamp
+ btime = self.get_unique_txt_record(name, B).dwTimeStamp
+ ctime = self.get_unique_txt_record(name, C).dwTimeStamp
+ dtime = self.get_unique_txt_record(name, D).dwTimeStamp
+ recs = self.ldap_get_records(name)
+ self.assertEqual(len(recs), 4)
+ r = self.dns_query(name, dns.DNS_QTYPE_TXT)
+ rset = set(x.rdata.txt.str[0] for x in r.answers)
+ self.assertEqual(rset, set('ABCD'))
+
+ self.assert_timestamps_equal(dtime, self.get_unique_txt_record(name, D))
+
+ self.dns_delete(name, D)
+ self.assert_timestamps_equal(atime, self.get_unique_txt_record(name, A))
+ self.assert_timestamps_equal(btime, self.get_unique_txt_record(name, B))
+ self.assert_timestamps_equal(ctime, self.get_unique_txt_record(name, C))
+ recs = self.ldap_get_non_tombstoned_records(name)
+ self.assertEqual(len(recs), 3)
+ r = self.dns_query(name, dns.DNS_QTYPE_TXT)
+ rset = set(x.rdata.txt.str[0] for x in r.answers)
+ self.assertEqual(rset, set('ABC'))
+
+ self.rpc_delete_txt(name, C)
+ self.assert_timestamps_equal(atime, self.get_unique_txt_record(name, A))
+ self.assert_timestamps_equal(btime, self.get_unique_txt_record(name, B))
+ recs = self.ldap_get_non_tombstoned_records(name)
+ self.assertEqual(len(recs), 2)
+ r = self.dns_query(name, dns.DNS_QTYPE_TXT)
+ rset = set(x.rdata.txt.str[0] for x in r.answers)
+ self.assertEqual(rset, set('AB'))
+
+ self.dns_delete(name, A)
+ self.assert_timestamps_equal(btime, self.get_unique_txt_record(name, B))
+ recs = self.ldap_get_records(name)
+ self.assertEqual(len(recs), 1)
+ r = self.dns_query(name, dns.DNS_QTYPE_TXT)
+ rset = set(x.rdata.txt.str[0] for x in r.answers)
+ self.assertEqual(rset, {'B'})
+
+ self.dns_delete(name, B)
+ recs = self.ldap_get_non_tombstoned_records(name)
+ # Windows leaves the node with zero records. Samba ends up
+ # with a tombstone.
+ self.assertEqual(len(recs), 0)
+ r = self.dns_query(name, dns.DNS_QTYPE_TXT)
+ rset = set(x.rdata.txt.str[0] for x in r.answers)
+ self.assertEqual(len(rset), 0)
+
+ def test_dns_delete_times_5_days_aging(self):
+ self._test_dns_delete_times(5, True)
+
+ def test_dns_delete_times_11_days_aging(self):
+ self._test_dns_delete_times(11, True)
+
+ def test_dns_delete_times_366_days_aging(self):
+ self._test_dns_delete_times(366, True)
+
+ def test_dns_delete_times_static_aging(self):
+ self._test_dns_delete_times(1e10, True)
+
+ def test_dns_delete_times_5_days_no_aging(self):
+ self._test_dns_delete_times(5, False)
+
+ def test_dns_delete_times_11_days_no_aging(self):
+ self._test_dns_delete_times(11, False)
+
+ def test_dns_delete_times_366_days_no_aging(self):
+ self._test_dns_delete_times(366, False)
+
+ def test_dns_delete_times_static_no_aging(self):
+ self._test_dns_delete_times(1e10, False)
+
+ def _test_dns_delete_simple(self, a_days, b_days, aging=True, touch=False):
+ # Here we show that with aging enabled, the timestamp of
+ # sibling records is *not* modified when a record is deleted.
+ #
+ # With aging disabled, it *is* modified, if the dns server has
+ # seen it updated before ldap set the time (that is, probably
+ # the dns server overwrites AD). This happens even if AD
+ # thinks the record is static.
+ name = 'test'
+ A = ['A']
+ B = ['B']
+ self.set_aging(aging)
+ now = dsdb_dns.unix_to_dns_timestamp(int(time.time()))
+ a_days_ago = max(now - a_days * 24, 0)
+ b_days_ago = max(now - b_days * 24, 0)
+
+ if touch:
+ self.dns_update_record(name, A)
+ self.dns_update_record(name, B)
+
+ self.ldap_update_record(name, A, dwTimeStamp=a_days_ago)
+ self.ldap_update_record(name, B, dwTimeStamp=b_days_ago)
+
+ atime = self.get_unique_txt_record(name, A).dwTimeStamp
+
+ self.dns_delete(name, B)
+ if not aging and touch:
+ # this resets the timestamp even if it is a static record.
+ self.assert_soon_after(self.get_unique_txt_record(name, A), now)
+ else:
+ self.assert_timestamps_equal(self.get_unique_txt_record(name, A), atime)
+
+ def test_dns_delete_simple_2_3_days_aging(self):
+ self._test_dns_delete_simple(2, 3, True)
+
+ def test_dns_delete_simple_2_3_days_no_aging(self):
+ self._test_dns_delete_simple(2, 3, False)
+
+ def test_dns_delete_simple_2_13_days_aging(self):
+ self._test_dns_delete_simple(2, 13, True)
+
+ def test_dns_delete_simple_2_13_days_no_aging(self):
+ self._test_dns_delete_simple(2, 13, False)
+
+ def test_dns_delete_simple_12_13_days_aging(self):
+ self._test_dns_delete_simple(12, 13, True)
+
+ def test_dns_delete_simple_12_13_days_no_aging(self):
+ self._test_dns_delete_simple(12, 13, False)
+
+ def test_dns_delete_simple_112_113_days_aging(self):
+ self._test_dns_delete_simple(112, 113, True)
+
+ def test_dns_delete_simple_112_113_days_no_aging(self):
+ self._test_dns_delete_simple(112, 113, False)
+
+ def test_dns_delete_simple_0_113_days_aging(self):
+ # 1e9 hours ago evaluates to 0, i.e static
+ self._test_dns_delete_simple(1e9, 113, True)
+
+ def test_dns_delete_simple_0_113_days_no_aging(self):
+ self._test_dns_delete_simple(1e9, 113, False)
+
+ def test_dns_delete_simple_0_0_days_aging(self):
+ self._test_dns_delete_simple(1e9, 1e9, True)
+
+ def test_dns_delete_simple_0_0_days_no_aging(self):
+ self._test_dns_delete_simple(1e9, 1e9, False)
+
+ def test_dns_delete_simple_10_0_days_aging(self):
+ self._test_dns_delete_simple(10, 1e9, True)
+
+ def test_dns_delete_simple_10_0_days_no_aging(self):
+ self._test_dns_delete_simple(10, 1e9, False)
+
+ def test_dns_delete_simple_2_3_days_aging_touch(self):
+ self._test_dns_delete_simple(2, 3, True, True)
+
+ def test_dns_delete_simple_2_3_days_no_aging_touch(self):
+ self._test_dns_delete_simple(2, 3, False, True)
+
+ def test_dns_delete_simple_2_13_days_aging_touch(self):
+ self._test_dns_delete_simple(2, 13, True, True)
+
+ def test_dns_delete_simple_2_13_days_no_aging_touch(self):
+ self._test_dns_delete_simple(2, 13, False, True)
+
+ def test_dns_delete_simple_12_13_days_aging_touch(self):
+ self._test_dns_delete_simple(12, 13, True, True)
+
+ def test_dns_delete_simple_12_13_days_no_aging_touch(self):
+ self._test_dns_delete_simple(12, 13, False, True)
+
+ def test_dns_delete_simple_112_113_days_aging_touch(self):
+ self._test_dns_delete_simple(112, 113, True, True)
+
+ def test_dns_delete_simple_112_113_days_no_aging_touch(self):
+ self._test_dns_delete_simple(112, 113, False, True)
+
+ def test_dns_delete_simple_0_113_days_aging_touch(self):
+ # 1e9 hours ago evaluates to 0, i.e static
+ self._test_dns_delete_simple(1e9, 113, True, True)
+
+ def test_dns_delete_simple_0_113_days_no_aging_touch(self):
+ self._test_dns_delete_simple(1e9, 113, False, True)
+
+ def test_dns_delete_simple_0_0_days_aging_touch(self):
+ self._test_dns_delete_simple(1e9, 1e9, True, True)
+
+ def test_dns_delete_simple_0_0_days_no_aging_touch(self):
+ self._test_dns_delete_simple(1e9, 1e9, False, True)
+
+ def test_dns_delete_simple_10_0_days_aging_touch(self):
+ self._test_dns_delete_simple(10, 1e9, True, True)
+
+ def test_dns_delete_simple_10_0_days_no_aging_touch(self):
+ self._test_dns_delete_simple(10, 1e9, False, True)
+
+ def windows_variation(self, fn, *args, msg=None, **kwargs):
+ try:
+ fn(*args, **kwargs)
+ except AssertionError as e:
+ print("Expected success on Windows only, failed as expected:\n" +
+ c_GREEN(e))
+ return
+ print(c_RED("known Windows failure"))
+ if msg is not None:
+ print(c_DARK_YELLOW(msg))
+ print("Expected success on Windows:\n" +
+ c_GREEN(f"{fn.__name__} {args} {kwargs}"))
+
+ def _test_dns_add_sibling(self, a_days, refresh, aging=True, touch=False):
+ # Here we show that with aging enabled, the timestamp of
+ # sibling records *is* modified when a record is added.
+ #
+ # With aging disabled, it *is* modified, if the dns server has
+ # seen it updated before ldap set the time (that is, probably
+ # the dns server overwrites AD). This happens even if AD
+ # thinks the record is static.
+ name = 'test'
+ A = ['A']
+ B = ['B']
+ self.set_zone_int_params(RefreshInterval=int(refresh),
+ NoRefreshInterval=7,
+ Aging=int(aging))
+
+ now = dsdb_dns.unix_to_dns_timestamp(int(time.time()))
+ a_days_ago = max(now - a_days * 24, 0)
+
+ if touch:
+ self.dns_update_record(name, A)
+
+ self.ldap_update_record(name, A, dwTimeStamp=a_days_ago)
+
+ atime = self.get_unique_txt_record(name, A).dwTimeStamp
+
+ self.dns_update_record(name, B)
+ a_rec = self.get_unique_txt_record(name, A)
+ if not aging and touch:
+ # On Windows, this resets the timestamp even if it is a
+ # static record, though in that case it may be a
+ # transitory effect of the DNS cache. We will insist on
+ # the Samba behaviour of not changing (that is
+ # un-static-ing) a zero timestamp, because that is the
+ # sensible thing.
+ if a_days_ago == 0:
+ self.windows_variation(
+ self.assert_soon_after, a_rec, now,
+ msg="Windows resets static siblings (cache effect?)")
+ self.assert_timestamps_equal(a_rec, 0)
+ else:
+ self.assert_soon_after(a_rec, now)
+ else:
+ self.assert_timestamps_equal(a_rec, atime)
+
+ b_rec = self.get_unique_txt_record(name, B)
+ self.assert_soon_after(b_rec, now)
+
+ def test_dns_add_sibling_2_7_days_aging(self):
+ self._test_dns_add_sibling(2, 7, True)
+
+ def test_dns_add_sibling_2_7_days_no_aging(self):
+ self._test_dns_add_sibling(2, 7, False)
+
+ def test_dns_add_sibling_12_7_days_aging(self):
+ self._test_dns_add_sibling(12, 7, True)
+
+ def test_dns_add_sibling_12_7_days_no_aging(self):
+ self._test_dns_add_sibling(12, 7, False)
+
+ def test_dns_add_sibling_12_3_days_aging(self):
+ self._test_dns_add_sibling(12, 3, True)
+
+ def test_dns_add_sibling_12_3_days_no_aging(self):
+ self._test_dns_add_sibling(12, 3, False)
+
+ def test_dns_add_sibling_112_7_days_aging(self):
+ self._test_dns_add_sibling(112, 7, True)
+
+ def test_dns_add_sibling_112_7_days_no_aging(self):
+ self._test_dns_add_sibling(112, 7, False)
+
+ def test_dns_add_sibling_12_113_days_aging(self):
+ self._test_dns_add_sibling(12, 113, True)
+
+ def test_dns_add_sibling_12_113_days_no_aging(self):
+ self._test_dns_add_sibling(12, 113, False)
+
+ def test_dns_add_sibling_0_7_days_aging(self):
+ # 1e9 days ago evaluates to 0, i.e static
+ self._test_dns_add_sibling(1e9, 7, True)
+
+ def test_dns_add_sibling_0_7_days_no_aging(self):
+ self._test_dns_add_sibling(1e9, 7, False)
+
+ def test_dns_add_sibling_0_0_days_aging(self):
+ self._test_dns_add_sibling(1e9, 0, True)
+
+ def test_dns_add_sibling_0_0_days_no_aging(self):
+ self._test_dns_add_sibling(1e9, 0, False)
+
+ def test_dns_add_sibling_10_0_days_aging(self):
+ self._test_dns_add_sibling(10, 0, True)
+
+ def test_dns_add_sibling_10_0_days_no_aging(self):
+ self._test_dns_add_sibling(10, 0, False)
+
+ def test_dns_add_sibling_2_7_days_aging_touch(self):
+ self._test_dns_add_sibling(2, 7, True, True)
+
+ def test_dns_add_sibling_2_7_days_no_aging_touch(self):
+ self._test_dns_add_sibling(2, 7, False, True)
+
+ def test_dns_add_sibling_12_7_days_aging_touch(self):
+ self._test_dns_add_sibling(12, 7, True, True)
+
+ def test_dns_add_sibling_12_7_days_no_aging_touch(self):
+ self._test_dns_add_sibling(12, 7, False, True)
+
+ def test_dns_add_sibling_12_3_days_aging_touch(self):
+ self._test_dns_add_sibling(12, 3, True, True)
+
+ def test_dns_add_sibling_12_3_days_no_aging_touch(self):
+ self._test_dns_add_sibling(12, 3, False, True)
+
+ def test_dns_add_sibling_112_7_days_aging_touch(self):
+ self._test_dns_add_sibling(112, 7, True, True)
+
+ def test_dns_add_sibling_112_7_days_no_aging_touch(self):
+ self._test_dns_add_sibling(112, 7, False, True)
+
+ def test_dns_add_sibling_12_113_days_aging_touch(self):
+ self._test_dns_add_sibling(12, 113, True, True)
+
+ def test_dns_add_sibling_12_113_days_no_aging_touch(self):
+ self._test_dns_add_sibling(12, 113, False, True)
+
+ def test_dns_add_sibling_0_7_days_aging_touch(self):
+ self._test_dns_add_sibling(1e9, 7, True, True)
+
+ def test_dns_add_sibling_0_7_days_no_aging_touch(self):
+ self._test_dns_add_sibling(1e9, 7, False, True)
+
+ def test_dns_add_sibling_0_0_days_aging_touch(self):
+ self._test_dns_add_sibling(1e9, 0, True, True)
+
+ def test_dns_add_sibling_0_0_days_no_aging_touch(self):
+ self._test_dns_add_sibling(1e9, 0, False, True)
+
+ def test_dns_add_sibling_10_0_days_aging_touch(self):
+ self._test_dns_add_sibling(10, 0, True, True)
+
+ def test_dns_add_sibling_10_0_days_no_aging_touch(self):
+ self._test_dns_add_sibling(10, 0, False, True)
+
+TestProgram(module=__name__, opts=subunitopts)
diff --git a/python/samba/tests/dns_base.py b/python/samba/tests/dns_base.py
new file mode 100644
index 0000000..d320a0e
--- /dev/null
+++ b/python/samba/tests/dns_base.py
@@ -0,0 +1,437 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Kai Blin <kai@samba.org> 2011
+# Copyright (C) Ralph Boehme <slow@samba.org> 2016
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from samba.tests import TestCaseInTempDir
+from samba.dcerpc import dns, dnsp
+from samba import gensec, tests
+from samba import credentials
+import struct
+import samba.ndr as ndr
+import random
+import socket
+import uuid
+import time
+
+
+class DNSTest(TestCaseInTempDir):
+
+ def setUp(self):
+ super().setUp()
+ self.timeout = None
+
+ def errstr(self, errcode):
+ "Return a readable error code"
+ string_codes = [
+ "OK",
+ "FORMERR",
+ "SERVFAIL",
+ "NXDOMAIN",
+ "NOTIMP",
+ "REFUSED",
+ "YXDOMAIN",
+ "YXRRSET",
+ "NXRRSET",
+ "NOTAUTH",
+ "NOTZONE",
+ "0x0B",
+ "0x0C",
+ "0x0D",
+ "0x0E",
+ "0x0F",
+ "BADSIG",
+ "BADKEY"
+ ]
+
+ return string_codes[errcode]
+
+ def assert_rcode_equals(self, rcode, expected):
+ "Helper function to check return code"
+ self.assertEqual(rcode, expected, "Expected RCODE %s, got %s" %
+ (self.errstr(expected), self.errstr(rcode)))
+
+ def assert_dns_rcode_equals(self, packet, rcode):
+ "Helper function to check return code"
+ p_errcode = packet.operation & dns.DNS_RCODE
+ self.assertEqual(p_errcode, rcode, "Expected RCODE %s, got %s" %
+ (self.errstr(rcode), self.errstr(p_errcode)))
+
+ def assert_dns_opcode_equals(self, packet, opcode):
+ "Helper function to check opcode"
+ p_opcode = packet.operation & dns.DNS_OPCODE
+ self.assertEqual(p_opcode, opcode, "Expected OPCODE %s, got %s" %
+ (opcode, p_opcode))
+
+ def make_name_packet(self, opcode, qid=None):
+ "Helper creating a dns.name_packet"
+ p = dns.name_packet()
+ if qid is None:
+ p.id = random.randint(0x0, 0xff00)
+ p.operation = opcode
+ p.questions = []
+ p.additional = []
+ return p
+
+ def finish_name_packet(self, packet, questions):
+ "Helper to finalize a dns.name_packet"
+ packet.qdcount = len(questions)
+ packet.questions = questions
+
+ def make_name_question(self, name, qtype, qclass):
+ "Helper creating a dns.name_question"
+ q = dns.name_question()
+ q.name = name
+ q.question_type = qtype
+ q.question_class = qclass
+ return q
+
+ def make_txt_record(self, records):
+ rdata_txt = dns.txt_record()
+ s_list = dnsp.string_list()
+ s_list.count = len(records)
+ s_list.str = records
+ rdata_txt.txt = s_list
+ return rdata_txt
+
+ def get_dns_domain(self):
+ "Helper to get dns domain"
+ return self.creds.get_realm().lower()
+
+ def dns_transaction_udp(self, packet, host,
+ dump=False, timeout=None):
+ "send a DNS query and read the reply"
+ s = None
+ if timeout is None:
+ timeout = self.timeout
+ try:
+ send_packet = ndr.ndr_pack(packet)
+ if dump:
+ print(self.hexdump(send_packet))
+ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
+ s.settimeout(timeout)
+ s.connect((host, 53))
+ s.sendall(send_packet, 0)
+ recv_packet = s.recv(2048, 0)
+ if dump:
+ print(self.hexdump(recv_packet))
+ response = ndr.ndr_unpack(dns.name_packet, recv_packet)
+ return (response, recv_packet)
+ finally:
+ if s is not None:
+ s.close()
+
+ def dns_transaction_tcp(self, packet, host,
+ dump=False, timeout=None):
+ "send a DNS query and read the reply, also return the raw packet"
+ s = None
+ if timeout is None:
+ timeout = self.timeout
+ try:
+ send_packet = ndr.ndr_pack(packet)
+ if dump:
+ print(self.hexdump(send_packet))
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
+ s.settimeout(timeout)
+ s.connect((host, 53))
+ tcp_packet = struct.pack('!H', len(send_packet))
+ tcp_packet += send_packet
+ s.sendall(tcp_packet)
+
+ recv_packet = s.recv(0xffff + 2, 0)
+ if dump:
+ print(self.hexdump(recv_packet))
+ response = ndr.ndr_unpack(dns.name_packet, recv_packet[2:])
+
+ finally:
+ if s is not None:
+ s.close()
+
+ # unpacking and packing again should produce same bytestream
+ my_packet = ndr.ndr_pack(response)
+ self.assertEqual(my_packet, recv_packet[2:])
+ return (response, recv_packet[2:])
+
+ def make_txt_update(self, prefix, txt_array, zone=None, ttl=900):
+ p = self.make_name_packet(dns.DNS_OPCODE_UPDATE)
+ updates = []
+
+ name = zone or self.get_dns_domain()
+ u = self.make_name_question(name, dns.DNS_QTYPE_SOA, dns.DNS_QCLASS_IN)
+ updates.append(u)
+ self.finish_name_packet(p, updates)
+
+ updates = []
+ r = dns.res_rec()
+ r.name = "%s.%s" % (prefix, name)
+ r.rr_type = dns.DNS_QTYPE_TXT
+ r.rr_class = dns.DNS_QCLASS_IN
+ r.ttl = ttl
+ r.length = 0xffff
+ rdata = self.make_txt_record(txt_array)
+ r.rdata = rdata
+ updates.append(r)
+ p.nscount = len(updates)
+ p.nsrecs = updates
+
+ return p
+
+ def check_query_txt(self, prefix, txt_array, zone=None):
+ name = "%s.%s" % (prefix, zone or self.get_dns_domain())
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ questions = []
+
+ q = self.make_name_question(name, dns.DNS_QTYPE_TXT, dns.DNS_QCLASS_IN)
+ questions.append(q)
+
+ self.finish_name_packet(p, questions)
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=self.server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
+ self.assertEqual(response.ancount, 1)
+ self.assertEqual(response.answers[0].rdata.txt.str, txt_array)
+
+
+class DNSTKeyTest(DNSTest):
+ def setUp(self):
+ super().setUp()
+ self.settings = {}
+ self.settings["lp_ctx"] = self.lp_ctx = tests.env_loadparm()
+ self.settings["target_hostname"] = self.server
+
+ self.creds = credentials.Credentials()
+ self.creds.guess(self.lp_ctx)
+ self.creds.set_username(tests.env_get_var_value('USERNAME'))
+ self.creds.set_password(tests.env_get_var_value('PASSWORD'))
+ self.creds.set_kerberos_state(credentials.MUST_USE_KERBEROS)
+ self.newrecname = "tkeytsig.%s" % self.get_dns_domain()
+
+ def tkey_trans(self, creds=None):
+ "Do a TKEY transaction and establish a gensec context"
+
+ if creds is None:
+ creds = self.creds
+
+ self.key_name = "%s.%s" % (uuid.uuid4(), self.get_dns_domain())
+
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ q = self.make_name_question(self.key_name,
+ dns.DNS_QTYPE_TKEY,
+ dns.DNS_QCLASS_IN)
+ questions = []
+ questions.append(q)
+ self.finish_name_packet(p, questions)
+
+ r = dns.res_rec()
+ r.name = self.key_name
+ r.rr_type = dns.DNS_QTYPE_TKEY
+ r.rr_class = dns.DNS_QCLASS_IN
+ r.ttl = 0
+ r.length = 0xffff
+ rdata = dns.tkey_record()
+ rdata.algorithm = "gss-tsig"
+ rdata.inception = int(time.time())
+ rdata.expiration = int(time.time()) + 60 * 60
+ rdata.mode = dns.DNS_TKEY_MODE_GSSAPI
+ rdata.error = 0
+ rdata.other_size = 0
+
+ self.g = gensec.Security.start_client(self.settings)
+ self.g.set_credentials(creds)
+ self.g.set_target_service("dns")
+ self.g.set_target_hostname(self.server)
+ self.g.want_feature(gensec.FEATURE_SIGN)
+ self.g.start_mech_by_name("spnego")
+
+ finished = False
+ client_to_server = b""
+
+ (finished, server_to_client) = self.g.update(client_to_server)
+ self.assertFalse(finished)
+
+ data = [x if isinstance(x, int) else ord(x) for x in list(server_to_client)]
+ rdata.key_data = data
+ rdata.key_size = len(data)
+ r.rdata = rdata
+
+ additional = [r]
+ p.arcount = 1
+ p.additional = additional
+
+ (response, response_packet) =\
+ self.dns_transaction_tcp(p, self.server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
+
+ tkey_record = response.answers[0].rdata
+ server_to_client = bytes(tkey_record.key_data)
+ (finished, client_to_server) = self.g.update(server_to_client)
+ self.assertTrue(finished)
+
+ self.verify_packet(response, response_packet)
+
+ def verify_packet(self, response, response_packet, request_mac=b""):
+ self.assertEqual(response.additional[0].rr_type, dns.DNS_QTYPE_TSIG)
+
+ tsig_record = response.additional[0].rdata
+ mac = bytes(tsig_record.mac)
+
+ # Cut off tsig record from dns response packet for MAC verification
+ # and reset additional record count.
+ key_name_len = len(self.key_name) + 2
+ tsig_record_len = len(ndr.ndr_pack(tsig_record)) + key_name_len + 10
+
+ # convert str/bytes to a list (of string char or int)
+ # so it can be modified
+ response_packet_list = [x if isinstance(x, int) else ord(x) for x in response_packet]
+ del response_packet_list[-tsig_record_len:]
+ response_packet_list[11] = 0
+
+ # convert modified list (of string char or int) to str/bytes
+ response_packet_wo_tsig = bytes(response_packet_list)
+
+ fake_tsig = dns.fake_tsig_rec()
+ fake_tsig.name = self.key_name
+ fake_tsig.rr_class = dns.DNS_QCLASS_ANY
+ fake_tsig.ttl = 0
+ fake_tsig.time_prefix = tsig_record.time_prefix
+ fake_tsig.time = tsig_record.time
+ fake_tsig.algorithm_name = tsig_record.algorithm_name
+ fake_tsig.fudge = tsig_record.fudge
+ fake_tsig.error = 0
+ fake_tsig.other_size = 0
+ fake_tsig_packet = ndr.ndr_pack(fake_tsig)
+
+ data = request_mac + response_packet_wo_tsig + fake_tsig_packet
+ self.g.check_packet(data, data, mac)
+
+ def sign_packet(self, packet, key_name):
+ "Sign a packet, calculate a MAC and add TSIG record"
+ packet_data = ndr.ndr_pack(packet)
+
+ fake_tsig = dns.fake_tsig_rec()
+ fake_tsig.name = key_name
+ fake_tsig.rr_class = dns.DNS_QCLASS_ANY
+ fake_tsig.ttl = 0
+ fake_tsig.time_prefix = 0
+ fake_tsig.time = int(time.time())
+ fake_tsig.algorithm_name = "gss-tsig"
+ fake_tsig.fudge = 300
+ fake_tsig.error = 0
+ fake_tsig.other_size = 0
+ fake_tsig_packet = ndr.ndr_pack(fake_tsig)
+
+ data = packet_data + fake_tsig_packet
+ mac = self.g.sign_packet(data, data)
+ mac_list = [x if isinstance(x, int) else ord(x) for x in list(mac)]
+
+ rdata = dns.tsig_record()
+ rdata.algorithm_name = "gss-tsig"
+ rdata.time_prefix = 0
+ rdata.time = fake_tsig.time
+ rdata.fudge = 300
+ rdata.original_id = packet.id
+ rdata.error = 0
+ rdata.other_size = 0
+ rdata.mac = mac_list
+ rdata.mac_size = len(mac_list)
+
+ r = dns.res_rec()
+ r.name = key_name
+ r.rr_type = dns.DNS_QTYPE_TSIG
+ r.rr_class = dns.DNS_QCLASS_ANY
+ r.ttl = 0
+ r.length = 0xffff
+ r.rdata = rdata
+
+ additional = [r]
+ packet.additional = additional
+ packet.arcount = 1
+
+ return mac
+
+ def bad_sign_packet(self, packet, key_name):
+ """Add bad signature for a packet by bitflipping
+ the final byte in the MAC"""
+
+ mac_list = [x if isinstance(x, int) else ord(x) for x in list("badmac")]
+
+ rdata = dns.tsig_record()
+ rdata.algorithm_name = "gss-tsig"
+ rdata.time_prefix = 0
+ rdata.time = int(time.time())
+ rdata.fudge = 300
+ rdata.original_id = packet.id
+ rdata.error = 0
+ rdata.other_size = 0
+ rdata.mac = mac_list
+ rdata.mac_size = len(mac_list)
+
+ r = dns.res_rec()
+ r.name = key_name
+ r.rr_type = dns.DNS_QTYPE_TSIG
+ r.rr_class = dns.DNS_QCLASS_ANY
+ r.ttl = 0
+ r.length = 0xffff
+ r.rdata = rdata
+
+ additional = [r]
+ packet.additional = additional
+ packet.arcount = 1
+
+ def search_record(self, name):
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ questions = []
+
+ q = self.make_name_question(name, dns.DNS_QTYPE_TXT, dns.DNS_QCLASS_IN)
+ questions.append(q)
+
+ self.finish_name_packet(p, questions)
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, self.server_ip)
+ return response.operation & 0x000F
+
+ def make_update_request(self, delete=False):
+ "Create a DNS update request"
+
+ rr_class = dns.DNS_QCLASS_IN
+ ttl = 900
+
+ if delete:
+ rr_class = dns.DNS_QCLASS_NONE
+ ttl = 0
+
+ p = self.make_name_packet(dns.DNS_OPCODE_UPDATE)
+ q = self.make_name_question(self.get_dns_domain(),
+ dns.DNS_QTYPE_SOA,
+ dns.DNS_QCLASS_IN)
+ questions = []
+ questions.append(q)
+ self.finish_name_packet(p, questions)
+
+ updates = []
+ r = dns.res_rec()
+ r.name = self.newrecname
+ r.rr_type = dns.DNS_QTYPE_TXT
+ r.rr_class = rr_class
+ r.ttl = ttl
+ r.length = 0xffff
+ rdata = self.make_txt_record(['"This is a test"'])
+ r.rdata = rdata
+ updates.append(r)
+ p.nscount = len(updates)
+ p.nsrecs = updates
+
+ return p
diff --git a/python/samba/tests/dns_forwarder.py b/python/samba/tests/dns_forwarder.py
new file mode 100644
index 0000000..86b553e
--- /dev/null
+++ b/python/samba/tests/dns_forwarder.py
@@ -0,0 +1,600 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Kai Blin <kai@samba.org> 2011
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import os
+import sys
+import random
+import socket
+import samba
+import time
+import errno
+import samba.ndr as ndr
+from samba import credentials
+from samba.tests import TestCase
+from samba.dcerpc import dns
+from samba.tests.subunitrun import SubunitOptions, TestProgram
+import samba.getopt as options
+import optparse
+import subprocess
+
+DNS_PORT2 = 54
+
+parser = optparse.OptionParser("dns_forwarder.py <server name> <server ip> (dns forwarder)+ [options]")
+sambaopts = options.SambaOptions(parser)
+parser.add_option_group(sambaopts)
+
+# This timeout only has relevance when testing against Windows
+# Format errors tend to return patchy responses, so a timeout is needed.
+parser.add_option("--timeout", type="int", dest="timeout",
+ help="Specify timeout for DNS requests")
+
+# use command line creds if available
+credopts = options.CredentialsOptions(parser)
+parser.add_option_group(credopts)
+subunitopts = SubunitOptions(parser)
+parser.add_option_group(subunitopts)
+
+opts, args = parser.parse_args()
+
+lp = sambaopts.get_loadparm()
+creds = credopts.get_credentials(lp)
+
+timeout = opts.timeout
+
+if len(args) < 3:
+ parser.print_usage()
+ sys.exit(1)
+
+server_name = args[0]
+server_ip = args[1]
+dns_servers = args[2:]
+
+creds.set_krb_forwardable(credentials.NO_KRB_FORWARDABLE)
+
+
+class DNSTest(TestCase):
+
+ errcodes = dict((v, k) for k, v in vars(dns).items() if k.startswith('DNS_RCODE_'))
+
+ def assert_dns_rcode_equals(self, packet, rcode):
+ "Helper function to check return code"
+ p_errcode = packet.operation & dns.DNS_RCODE
+ self.assertEqual(p_errcode, rcode, "Expected RCODE %s, got %s" %
+ (self.errcodes[rcode], self.errcodes[p_errcode]))
+
+ def assert_dns_opcode_equals(self, packet, opcode):
+ "Helper function to check opcode"
+ p_opcode = packet.operation & dns.DNS_OPCODE
+ self.assertEqual(p_opcode, opcode, "Expected OPCODE %s, got %s" %
+ (opcode, p_opcode))
+
+ def make_name_packet(self, opcode, qid=None):
+ "Helper creating a dns.name_packet"
+ p = dns.name_packet()
+ if qid is None:
+ p.id = random.randint(0x0, 0xffff)
+ p.operation = opcode
+ p.questions = []
+ return p
+
+ def finish_name_packet(self, packet, questions):
+ "Helper to finalize a dns.name_packet"
+ packet.qdcount = len(questions)
+ packet.questions = questions
+
+ def make_name_question(self, name, qtype, qclass):
+ "Helper creating a dns.name_question"
+ q = dns.name_question()
+ q.name = name
+ q.question_type = qtype
+ q.question_class = qclass
+ return q
+
+ def get_dns_domain(self):
+ "Helper to get dns domain"
+ return self.creds.get_realm().lower()
+
+ def dns_transaction_udp(self, packet, host=server_ip,
+ dump=False, timeout=timeout):
+ "send a DNS query and read the reply"
+ s = None
+ try:
+ send_packet = ndr.ndr_pack(packet)
+ if dump:
+ print(self.hexdump(send_packet))
+ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
+ s.settimeout(timeout)
+ s.connect((host, 53))
+ s.send(send_packet, 0)
+ recv_packet = s.recv(2048, 0)
+ if dump:
+ print(self.hexdump(recv_packet))
+ return ndr.ndr_unpack(dns.name_packet, recv_packet)
+ finally:
+ if s is not None:
+ s.close()
+
+ def make_cname_update(self, key, value):
+ p = self.make_name_packet(dns.DNS_OPCODE_UPDATE)
+
+ name = self.get_dns_domain()
+ u = self.make_name_question(name, dns.DNS_QTYPE_SOA, dns.DNS_QCLASS_IN)
+ self.finish_name_packet(p, [u])
+
+ r = dns.res_rec()
+ r.name = key
+ r.rr_type = dns.DNS_QTYPE_CNAME
+ r.rr_class = dns.DNS_QCLASS_IN
+ r.ttl = 900
+ r.length = 0xffff
+ rdata = value
+ r.rdata = rdata
+ p.nscount = 1
+ p.nsrecs = [r]
+ response = self.dns_transaction_udp(p)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
+
+
+def contact_real_server(host, port):
+ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
+ s.connect((host, port))
+ return s
+
+
+class TestDnsForwarding(DNSTest):
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.subprocesses = []
+
+ def setUp(self):
+ super().setUp()
+ self.server = server_name
+ self.server_ip = server_ip
+ self.lp = lp
+ self.creds = creds
+
+ def start_toy_server(self, host, port, id):
+ python = sys.executable
+ p = subprocess.Popen([python,
+ os.path.join(samba.source_tree_topdir(),
+ 'python/samba/tests/'
+ 'dns_forwarder_helpers/server.py'),
+ host, str(port), id])
+ self.subprocesses.append(p)
+ if (host.find(':') != -1):
+ s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM, 0)
+ else:
+ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
+ for i in range(300):
+ time.sleep(0.05)
+ s.connect((host, port))
+ try:
+ s.send(b'timeout 0', 0)
+ except socket.error as e:
+ if e.errno in (errno.ECONNREFUSED, errno.EHOSTUNREACH):
+ continue
+
+ if p.returncode is not None:
+ self.fail("Toy server has managed to die already!")
+
+ return s
+
+ def tearDown(self):
+ super().tearDown()
+ for p in self.subprocesses:
+ p.kill()
+
+ def test_comatose_forwarder(self):
+ s = self.start_toy_server(dns_servers[0], 53, 'forwarder1')
+ s.send(b"timeout 1000000", 0)
+
+ # make DNS query
+ name = "an-address-that-will-not-resolve"
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ questions = []
+
+ q = self.make_name_question(name, dns.DNS_QTYPE_TXT, dns.DNS_QCLASS_IN)
+ questions.append(q)
+
+ self.finish_name_packet(p, questions)
+ send_packet = ndr.ndr_pack(p)
+
+ s.send(send_packet, 0)
+ s.settimeout(1)
+ try:
+ s.recv(0xffff + 2, 0)
+ self.fail("DNS forwarder should have been inactive")
+ except socket.timeout:
+ # Expected forwarder to be dead
+ pass
+
+ def test_no_active_forwarder(self):
+ ad = contact_real_server(server_ip, 53)
+
+ name = "dsfsfds.dsfsdfs"
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ questions = []
+
+ q = self.make_name_question(name, dns.DNS_QTYPE_TXT, dns.DNS_QCLASS_IN)
+ questions.append(q)
+
+ self.finish_name_packet(p, questions)
+ send_packet = ndr.ndr_pack(p)
+
+ self.finish_name_packet(p, questions)
+ p.operation |= dns.DNS_FLAG_RECURSION_DESIRED
+ send_packet = ndr.ndr_pack(p)
+
+ ad.send(send_packet, 0)
+ ad.settimeout(timeout)
+ try:
+ data = ad.recv(0xffff + 2, 0)
+ data = ndr.ndr_unpack(dns.name_packet, data)
+ self.assert_dns_rcode_equals(data, dns.DNS_RCODE_SERVFAIL)
+ self.assertEqual(data.ancount, 0)
+ except socket.timeout:
+ self.fail("DNS server is too slow (timeout %s)" % timeout)
+
+ def test_no_flag_recursive_forwarder(self):
+ ad = contact_real_server(server_ip, 53)
+
+ name = "dsfsfds.dsfsdfs"
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ questions = []
+
+ q = self.make_name_question(name, dns.DNS_QTYPE_TXT, dns.DNS_QCLASS_IN)
+ questions.append(q)
+
+ self.finish_name_packet(p, questions)
+ send_packet = ndr.ndr_pack(p)
+
+ self.finish_name_packet(p, questions)
+ # Leave off the recursive flag
+ send_packet = ndr.ndr_pack(p)
+
+ ad.send(send_packet, 0)
+ ad.settimeout(timeout)
+ try:
+ data = ad.recv(0xffff + 2, 0)
+ data = ndr.ndr_unpack(dns.name_packet, data)
+ self.assert_dns_rcode_equals(data, dns.DNS_RCODE_NXDOMAIN)
+ self.assertEqual(data.ancount, 0)
+ except socket.timeout:
+ self.fail("DNS server is too slow (timeout %s)" % timeout)
+
+ def test_single_forwarder(self):
+ s = self.start_toy_server(dns_servers[0], 53, 'forwarder1')
+ ad = contact_real_server(server_ip, 53)
+ name = "dsfsfds.dsfsdfs"
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ questions = []
+
+ q = self.make_name_question(name, dns.DNS_QTYPE_CNAME,
+ dns.DNS_QCLASS_IN)
+ questions.append(q)
+
+ self.finish_name_packet(p, questions)
+ p.operation |= dns.DNS_FLAG_RECURSION_DESIRED
+ send_packet = ndr.ndr_pack(p)
+
+ ad.send(send_packet, 0)
+ ad.settimeout(timeout)
+ try:
+ data = ad.recv(0xffff + 2, 0)
+ data = ndr.ndr_unpack(dns.name_packet, data)
+ self.assert_dns_rcode_equals(data, dns.DNS_RCODE_OK)
+ self.assertEqual('forwarder1', data.answers[0].rdata)
+ except socket.timeout:
+ self.fail("DNS server is too slow (timeout %s)" % timeout)
+
+ def test_single_forwarder_not_actually_there(self):
+ ad = contact_real_server(server_ip, 53)
+ name = "dsfsfds.dsfsdfs"
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ questions = []
+
+ q = self.make_name_question(name, dns.DNS_QTYPE_CNAME,
+ dns.DNS_QCLASS_IN)
+ questions.append(q)
+
+ self.finish_name_packet(p, questions)
+ p.operation |= dns.DNS_FLAG_RECURSION_DESIRED
+ send_packet = ndr.ndr_pack(p)
+
+ ad.send(send_packet, 0)
+ ad.settimeout(timeout)
+ try:
+ data = ad.recv(0xffff + 2, 0)
+ data = ndr.ndr_unpack(dns.name_packet, data)
+ self.assert_dns_rcode_equals(data, dns.DNS_RCODE_SERVFAIL)
+ except socket.timeout:
+ self.fail("DNS server is too slow (timeout %s)" % timeout)
+
+ def test_single_forwarder_waiting_forever(self):
+ s = self.start_toy_server(dns_servers[0], 53, 'forwarder1')
+ s.send(b'timeout 10000', 0)
+ ad = contact_real_server(server_ip, 53)
+ name = "dsfsfds.dsfsdfs"
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ questions = []
+
+ q = self.make_name_question(name, dns.DNS_QTYPE_CNAME,
+ dns.DNS_QCLASS_IN)
+ questions.append(q)
+
+ self.finish_name_packet(p, questions)
+ p.operation |= dns.DNS_FLAG_RECURSION_DESIRED
+ send_packet = ndr.ndr_pack(p)
+
+ ad.send(send_packet, 0)
+ ad.settimeout(timeout)
+ try:
+ data = ad.recv(0xffff + 2, 0)
+ data = ndr.ndr_unpack(dns.name_packet, data)
+ self.assert_dns_rcode_equals(data, dns.DNS_RCODE_SERVFAIL)
+ except socket.timeout:
+ self.fail("DNS server is too slow (timeout %s)" % timeout)
+
+ def test_double_forwarder_first_frozen(self):
+ if len(dns_servers) < 2:
+ print("Ignoring test_double_forwarder_first_frozen")
+ return
+ s1 = self.start_toy_server(dns_servers[0], 53, 'forwarder1')
+ s2 = self.start_toy_server(dns_servers[1], DNS_PORT2, 'forwarder2')
+ s1.send(b'timeout 1000', 0)
+ ad = contact_real_server(server_ip, 53)
+ name = "dsfsfds.dsfsdfs"
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ questions = []
+
+ q = self.make_name_question(name, dns.DNS_QTYPE_CNAME,
+ dns.DNS_QCLASS_IN)
+ questions.append(q)
+
+ self.finish_name_packet(p, questions)
+ p.operation |= dns.DNS_FLAG_RECURSION_DESIRED
+ send_packet = ndr.ndr_pack(p)
+
+ ad.send(send_packet, 0)
+ ad.settimeout(timeout)
+ try:
+ data = ad.recv(0xffff + 2, 0)
+ data = ndr.ndr_unpack(dns.name_packet, data)
+ self.assert_dns_rcode_equals(data, dns.DNS_RCODE_OK)
+ self.assertEqual('forwarder2', data.answers[0].rdata)
+ except socket.timeout:
+ self.fail("DNS server is too slow (timeout %s)" % timeout)
+
+ def test_double_forwarder_first_down(self):
+ if len(dns_servers) < 2:
+ print("Ignoring test_double_forwarder_first_down")
+ return
+ s2 = self.start_toy_server(dns_servers[1], DNS_PORT2, 'forwarder2')
+ ad = contact_real_server(server_ip, 53)
+ name = "dsfsfds.dsfsdfs"
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ questions = []
+
+ q = self.make_name_question(name, dns.DNS_QTYPE_CNAME,
+ dns.DNS_QCLASS_IN)
+ questions.append(q)
+
+ self.finish_name_packet(p, questions)
+ p.operation |= dns.DNS_FLAG_RECURSION_DESIRED
+ send_packet = ndr.ndr_pack(p)
+
+ ad.send(send_packet, 0)
+ ad.settimeout(timeout)
+ try:
+ data = ad.recv(0xffff + 2, 0)
+ data = ndr.ndr_unpack(dns.name_packet, data)
+ self.assert_dns_rcode_equals(data, dns.DNS_RCODE_OK)
+ self.assertEqual('forwarder2', data.answers[0].rdata)
+ except socket.timeout:
+ self.fail("DNS server is too slow (timeout %s)" % timeout)
+
+ def test_double_forwarder_both_slow(self):
+ if len(dns_servers) < 2:
+ print("Ignoring test_double_forwarder_both_slow")
+ return
+ s1 = self.start_toy_server(dns_servers[0], 53, 'forwarder1')
+ s2 = self.start_toy_server(dns_servers[1], DNS_PORT2, 'forwarder2')
+ s1.send(b'timeout 1.5', 0)
+ s2.send(b'timeout 1.5', 0)
+ ad = contact_real_server(server_ip, 53)
+ name = "dsfsfds.dsfsdfs"
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ questions = []
+
+ q = self.make_name_question(name, dns.DNS_QTYPE_CNAME,
+ dns.DNS_QCLASS_IN)
+ questions.append(q)
+
+ self.finish_name_packet(p, questions)
+ p.operation |= dns.DNS_FLAG_RECURSION_DESIRED
+ send_packet = ndr.ndr_pack(p)
+
+ ad.send(send_packet, 0)
+ ad.settimeout(timeout)
+ try:
+ data = ad.recv(0xffff + 2, 0)
+ data = ndr.ndr_unpack(dns.name_packet, data)
+ self.assert_dns_rcode_equals(data, dns.DNS_RCODE_OK)
+ self.assertEqual('forwarder1', data.answers[0].rdata)
+ except socket.timeout:
+ self.fail("DNS server is too slow (timeout %s)" % timeout)
+
+ def test_cname(self):
+ s1 = self.start_toy_server(dns_servers[0], 53, 'forwarder1')
+
+ ad = contact_real_server(server_ip, 53)
+ name = "resolve.cname"
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ questions = []
+
+ q = self.make_name_question(name, dns.DNS_QTYPE_CNAME,
+ dns.DNS_QCLASS_IN)
+ questions.append(q)
+
+ self.finish_name_packet(p, questions)
+ p.operation |= dns.DNS_FLAG_RECURSION_DESIRED
+ send_packet = ndr.ndr_pack(p)
+
+ ad.send(send_packet, 0)
+ ad.settimeout(timeout)
+ try:
+ data = ad.recv(0xffff + 2, 0)
+ data = ndr.ndr_unpack(dns.name_packet, data)
+ self.assert_dns_rcode_equals(data, dns.DNS_RCODE_OK)
+ self.assertEqual(len(data.answers), 1)
+ self.assertEqual('forwarder1', data.answers[0].rdata)
+ except socket.timeout:
+ self.fail("DNS server is too slow (timeout %s)" % timeout)
+
+ def test_double_cname(self):
+ s1 = self.start_toy_server(dns_servers[0], 53, 'forwarder1')
+
+ name = 'resolve.cname.%s' % self.get_dns_domain()
+ self.make_cname_update(name, "dsfsfds.dsfsdfs")
+
+ ad = contact_real_server(server_ip, 53)
+
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ questions = []
+ q = self.make_name_question(name, dns.DNS_QTYPE_A,
+ dns.DNS_QCLASS_IN)
+ questions.append(q)
+
+ self.finish_name_packet(p, questions)
+ p.operation |= dns.DNS_FLAG_RECURSION_DESIRED
+ send_packet = ndr.ndr_pack(p)
+
+ ad.send(send_packet, 0)
+ ad.settimeout(timeout)
+ try:
+ data = ad.recv(0xffff + 2, 0)
+ data = ndr.ndr_unpack(dns.name_packet, data)
+ self.assert_dns_rcode_equals(data, dns.DNS_RCODE_OK)
+ self.assertEqual('forwarder1', data.answers[1].rdata)
+ except socket.timeout:
+ self.fail("DNS server is too slow (timeout %s)" % timeout)
+
+ def test_cname_forwarding_with_slow_server(self):
+ if len(dns_servers) < 2:
+ print("Ignoring test_cname_forwarding_with_slow_server")
+ return
+ s1 = self.start_toy_server(dns_servers[0], 53, 'forwarder1')
+ s2 = self.start_toy_server(dns_servers[1], DNS_PORT2, 'forwarder2')
+ s1.send(b'timeout 10000', 0)
+
+ name = 'resolve.cname.%s' % self.get_dns_domain()
+ self.make_cname_update(name, "dsfsfds.dsfsdfs")
+
+ ad = contact_real_server(server_ip, 53)
+
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ questions = []
+ q = self.make_name_question(name, dns.DNS_QTYPE_A,
+ dns.DNS_QCLASS_IN)
+ questions.append(q)
+
+ self.finish_name_packet(p, questions)
+ p.operation |= dns.DNS_FLAG_RECURSION_DESIRED
+ send_packet = ndr.ndr_pack(p)
+
+ ad.send(send_packet, 0)
+ ad.settimeout(timeout)
+ try:
+ data = ad.recv(0xffff + 2, 0)
+ data = ndr.ndr_unpack(dns.name_packet, data)
+ self.assert_dns_rcode_equals(data, dns.DNS_RCODE_OK)
+ self.assertEqual('forwarder2', data.answers[-1].rdata)
+ except socket.timeout:
+ self.fail("DNS server is too slow (timeout %s)" % timeout)
+
+ def test_cname_forwarding_with_server_down(self):
+ if len(dns_servers) < 2:
+ print("Ignoring test_cname_forwarding_with_server_down")
+ return
+ s2 = self.start_toy_server(dns_servers[1], DNS_PORT2, 'forwarder2')
+
+ name1 = 'resolve1.cname.%s' % self.get_dns_domain()
+ name2 = 'resolve2.cname.%s' % self.get_dns_domain()
+ self.make_cname_update(name1, name2)
+ self.make_cname_update(name2, "dsfsfds.dsfsdfs")
+
+ ad = contact_real_server(server_ip, 53)
+
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ questions = []
+ q = self.make_name_question(name1, dns.DNS_QTYPE_A,
+ dns.DNS_QCLASS_IN)
+ questions.append(q)
+
+ self.finish_name_packet(p, questions)
+ p.operation |= dns.DNS_FLAG_RECURSION_DESIRED
+ send_packet = ndr.ndr_pack(p)
+
+ ad.send(send_packet, 0)
+ ad.settimeout(timeout)
+ try:
+ data = ad.recv(0xffff + 2, 0)
+ data = ndr.ndr_unpack(dns.name_packet, data)
+ self.assert_dns_rcode_equals(data, dns.DNS_RCODE_OK)
+ self.assertEqual('forwarder2', data.answers[-1].rdata)
+ except socket.timeout:
+ self.fail("DNS server is too slow (timeout %s)" % timeout)
+
+ def test_cname_forwarding_with_lots_of_cnames(self):
+ name3 = 'resolve3.cname.%s' % self.get_dns_domain()
+ s1 = self.start_toy_server(dns_servers[0], 53, name3)
+
+ name1 = 'resolve1.cname.%s' % self.get_dns_domain()
+ name2 = 'resolve2.cname.%s' % self.get_dns_domain()
+ self.make_cname_update(name1, name2)
+ self.make_cname_update(name3, name1)
+ self.make_cname_update(name2, "dsfsfds.dsfsdfs")
+
+ ad = contact_real_server(server_ip, 53)
+
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ questions = []
+ q = self.make_name_question(name1, dns.DNS_QTYPE_A,
+ dns.DNS_QCLASS_IN)
+ questions.append(q)
+
+ self.finish_name_packet(p, questions)
+ p.operation |= dns.DNS_FLAG_RECURSION_DESIRED
+ send_packet = ndr.ndr_pack(p)
+
+ ad.send(send_packet, 0)
+ ad.settimeout(timeout)
+ try:
+ data = ad.recv(0xffff + 2, 0)
+ data = ndr.ndr_unpack(dns.name_packet, data)
+ # This should cause a loop in Windows
+ # (which is restricted by a 20 CNAME limit)
+ #
+ # The reason it doesn't here is because forwarded CNAME have no
+ # additional processing in the internal DNS server.
+ self.assert_dns_rcode_equals(data, dns.DNS_RCODE_OK)
+ self.assertEqual(name3, data.answers[-1].rdata)
+ except socket.timeout:
+ self.fail("DNS server is too slow (timeout %s)" % timeout)
+
+
+TestProgram(module=__name__, opts=subunitopts)
diff --git a/python/samba/tests/dns_forwarder_helpers/server.py b/python/samba/tests/dns_forwarder_helpers/server.py
new file mode 100644
index 0000000..d947d2c
--- /dev/null
+++ b/python/samba/tests/dns_forwarder_helpers/server.py
@@ -0,0 +1,104 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Catalyst.Net Ltd 2016
+# Catalyst.Net's contribution was written by Douglas Bagnall
+# <douglas.bagnall@catalyst.net.nz> and Garming Sam <garming@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# Based on the EchoServer example from python docs
+
+import socketserver as SocketServer
+import socket
+import sys
+from threading import Timer
+from samba.dcerpc import dns
+import samba.ndr as ndr
+import re
+
+VERBOSE = False
+
+
+def debug(msg):
+ if VERBOSE:
+ sys.stdout.flush()
+ print("\033[00;36m%s\033[00m" % msg)
+ sys.stdout.flush()
+
+
+timeout = 0
+
+
+def answer_question(data, question):
+ r = dns.res_rec()
+ r.name = question.name
+ r.rr_type = dns.DNS_QTYPE_CNAME
+ r.rr_class = dns.DNS_QCLASS_IN
+ r.ttl = 900
+ r.length = 0xffff
+ r.rdata = SERVER_ID
+ return r
+
+
+class DnsHandler(SocketServer.BaseRequestHandler):
+ def make_answer(self, data):
+ data = ndr.ndr_unpack(dns.name_packet, data)
+
+ debug('answering this question:')
+ debug(data.__ndr_print__())
+
+ answer = answer_question(data, data.questions[0])
+ if answer is not None:
+ data.answers = [answer] * 1
+ data.ancount += 1
+ debug('the answer was: ')
+ debug(data.__ndr_print__())
+
+ data.operation |= dns.DNS_FLAG_REPLY
+
+ return ndr.ndr_pack(data)
+
+ def really_handle(self, data, socket):
+ answer = self.make_answer(data)
+ socket.sendto(answer, self.client_address)
+
+ def handle(self):
+ data, socket = self.request
+ debug("%s: %s wrote:" % (SERVER_ID, self.client_address[0]))
+
+ global timeout
+ m = re.match(br'^timeout\s+([\d.]+)$', data.strip())
+ if m:
+ timeout = float(m.group(1))
+ debug("timing out at %s" % timeout)
+ return
+
+ t = Timer(timeout, self.really_handle, [data, socket])
+ t.start()
+
+class TestUDPServer(SocketServer.UDPServer):
+ def __init__(self, server_address, RequestHandlerClass):
+ if server_address[0].find(':') != -1:
+ self.address_family = socket.AF_INET6
+ else:
+ self.address_family = socket.AF_INET
+ super().__init__(server_address, RequestHandlerClass)
+
+def main():
+ global SERVER_ID
+ host, port, SERVER_ID = sys.argv[1:]
+ server = TestUDPServer((host, int(port)), DnsHandler)
+ server.serve_forever()
+
+
+main()
diff --git a/python/samba/tests/dns_invalid.py b/python/samba/tests/dns_invalid.py
new file mode 100644
index 0000000..7415cef
--- /dev/null
+++ b/python/samba/tests/dns_invalid.py
@@ -0,0 +1,80 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Kai Blin <kai@samba.org> 2018
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+from samba import credentials
+from samba.dcerpc import dns
+from samba.tests.subunitrun import SubunitOptions, TestProgram
+from samba.tests.dns_base import DNSTest
+import samba.getopt as options
+import optparse
+
+parser = optparse.OptionParser("dns_invalid.py <server ip> [options]")
+sambaopts = options.SambaOptions(parser)
+parser.add_option_group(sambaopts)
+
+# This timeout only has relevance when testing against Windows
+# Format errors tend to return patchy responses, so a timeout is needed.
+parser.add_option("--timeout", type="int", dest="timeout",
+ help="Specify timeout for DNS requests")
+
+# use command line creds if available
+credopts = options.CredentialsOptions(parser)
+parser.add_option_group(credopts)
+subunitopts = SubunitOptions(parser)
+parser.add_option_group(subunitopts)
+
+opts, args = parser.parse_args()
+
+lp = sambaopts.get_loadparm()
+creds = credopts.get_credentials(lp)
+
+timeout = opts.timeout
+
+if len(args) < 1:
+ parser.print_usage()
+ sys.exit(1)
+
+server_ip = args[0]
+creds.set_krb_forwardable(credentials.NO_KRB_FORWARDABLE)
+
+
+class TestBrokenQueries(DNSTest):
+ def setUp(self):
+ super().setUp()
+ global server, server_ip, lp, creds, timeout
+ self.server_ip = server_ip
+ self.lp = lp
+ self.creds = creds
+ self.timeout = timeout
+
+ def test_invalid_chars_in_name(self):
+ """Check the server refuses invalid characters in the query name"""
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ questions = []
+
+ name = "\x10\x11\x05\xa8.%s" % self.get_dns_domain()
+ q = self.make_name_question(name, dns.DNS_QTYPE_A, dns.DNS_QCLASS_IN)
+ print("asking for %s" % (q.name))
+ questions.append(q)
+
+ self.finish_name_packet(p, questions)
+ (response, response_packet) = self.dns_transaction_udp(p, host=server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_NXDOMAIN)
+
+
+TestProgram(module=__name__, opts=subunitopts)
diff --git a/python/samba/tests/dns_packet.py b/python/samba/tests/dns_packet.py
new file mode 100644
index 0000000..61d5aab
--- /dev/null
+++ b/python/samba/tests/dns_packet.py
@@ -0,0 +1,230 @@
+# Tests of malformed DNS packets
+# Copyright (C) Catalyst.NET ltd
+#
+# written by Douglas Bagnall <douglas.bagnall@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""Sanity tests for DNS and NBT server parsing.
+
+We don't use a proper client library so we can make improper packets.
+"""
+
+import os
+import struct
+import socket
+import select
+from samba.dcerpc import dns, nbt
+
+from samba.tests import TestCase
+
+
+def _msg_id():
+ while True:
+ for i in range(1, 0xffff):
+ yield i
+
+
+SERVER = os.environ['SERVER_IP']
+SERVER_NAME = f"{os.environ['SERVER']}.{os.environ['REALM']}"
+TIMEOUT = 0.5
+
+
+def encode_netbios_bytes(chars):
+ """Even RFC 1002 uses distancing quotes when calling this "compression"."""
+ out = []
+ chars = (chars + b' ')[:16]
+ for c in chars:
+ out.append((c >> 4) + 65)
+ out.append((c & 15) + 65)
+ return bytes(out)
+
+
+class TestDnsPacketBase(TestCase):
+ msg_id = _msg_id()
+
+ def tearDown(self):
+ # we need to ensure the DNS server is responsive before
+ # continuing.
+ for i in range(40):
+ ok = self._known_good_query()
+ if ok:
+ return
+ print(f"the server is STILL unresponsive after {40 * TIMEOUT} seconds")
+
+ def decode_reply(self, data):
+ header = data[:12]
+ id, flags, n_q, n_a, n_rec, n_exta = struct.unpack('!6H',
+ header)
+ return {
+ 'rcode': flags & 0xf
+ }
+
+ def construct_query(self, names):
+ """Create a query packet containing one query record.
+
+ *names* is either a single string name in the usual dotted
+ form, or a list of names. In the latter case, each name can
+ be a dotted string or a list of byte components, which allows
+ dots in components. Where I say list, I mean non-string
+ iterable.
+
+ Examples:
+
+ # these 3 are all the same
+ "example.com"
+ ["example.com"]
+ [[b"example", b"com"]]
+
+ # this is three names in the same request
+ ["example.com",
+ [b"example", b"com", b"..!"],
+ (b"first component", b" 2nd component")]
+ """
+ header = struct.pack('!6H',
+ next(self.msg_id),
+ 0x0100, # query, with recursion
+ len(names), # number of queries
+ 0x0000, # no answers
+ 0x0000, # no records
+ 0x0000, # no extra records
+ )
+ tail = struct.pack('!BHH',
+ 0x00, # root node
+ self.qtype,
+ 0x0001, # class IN-ternet
+ )
+ encoded_bits = []
+ for name in names:
+ if isinstance(name, str):
+ bits = name.encode('utf8').split(b'.')
+ else:
+ bits = name
+
+ for b in bits:
+ encoded_bits.append(b'%c%s' % (len(b), b))
+ encoded_bits.append(tail)
+
+ return header + b''.join(encoded_bits)
+
+ def _test_query(self, names=(), expected_rcode=None):
+
+ if isinstance(names, str):
+ names = [names]
+
+ packet = self.construct_query(names)
+ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ s.sendto(packet, self.server)
+ r, _, _ = select.select([s], [], [], TIMEOUT)
+ s.close()
+ # It is reasonable to not reply to these packets (Windows
+ # doesn't), but it is not reasonable to render the server
+ # unresponsive.
+ if r != [s]:
+ ok = self._known_good_query()
+ self.assertTrue(ok, "the server is unresponsive")
+
+ def _known_good_query(self):
+ if self.server[1] == 53:
+ name = SERVER_NAME
+ expected_rcode = dns.DNS_RCODE_OK
+ else:
+ name = [encode_netbios_bytes(b'nxdomain'), b'nxdomain']
+ expected_rcode = nbt.NBT_RCODE_NAM
+
+ packet = self.construct_query([name])
+ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ s.sendto(packet, self.server)
+ r, _, _ = select.select([s], [], [], TIMEOUT)
+ if r != [s]:
+ s.close()
+ return False
+
+ data, addr = s.recvfrom(4096)
+ s.close()
+ rcode = self.decode_reply(data)['rcode']
+ return expected_rcode == rcode
+
+ def _test_empty_packet(self):
+
+ packet = b""
+ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ s.sendto(packet, self.server)
+ s.close()
+
+ # It is reasonable not to reply to an empty packet
+ # but it is not reasonable to render the server
+ # unresponsive.
+ ok = self._known_good_query()
+ self.assertTrue(ok, "the server is unresponsive")
+
+
+class TestDnsPackets(TestDnsPacketBase):
+ server = (SERVER, 53)
+ qtype = 1 # dns type A
+
+ def _test_many_repeated_components(self, label, n, expected_rcode=None):
+ name = [label] * n
+ self._test_query([name],
+ expected_rcode=expected_rcode)
+
+ def test_127_very_dotty_components(self):
+ label = b'.' * 63
+ self._test_many_repeated_components(label, 127)
+
+ def test_127_half_dotty_components(self):
+ label = b'x.' * 31 + b'x'
+ self._test_many_repeated_components(label, 127)
+
+ def test_empty_packet(self):
+ self._test_empty_packet()
+
+
+class TestNbtPackets(TestDnsPacketBase):
+ server = (SERVER, 137)
+ qtype = 0x20 # NBT_QTYPE_NETBIOS
+
+ def _test_nbt_encode_query(self, names, *args, **kwargs):
+ if isinstance(names, str):
+ names = [names]
+
+ nbt_names = []
+ for name in names:
+ if isinstance(name, str):
+ bits = name.encode('utf8').split(b'.')
+ else:
+ bits = name
+
+ encoded = [encode_netbios_bytes(bits[0])]
+ encoded.extend(bits[1:])
+ nbt_names.append(encoded)
+
+ self._test_query(nbt_names, *args, **kwargs)
+
+ def _test_many_repeated_components(self, label, n, expected_rcode=None):
+ name = [label] * n
+ name[0] = encode_netbios_bytes(label)
+ self._test_query([name],
+ expected_rcode=expected_rcode)
+
+ def test_127_very_dotty_components(self):
+ label = b'.' * 63
+ self._test_many_repeated_components(label, 127)
+
+ def test_127_half_dotty_components(self):
+ label = b'x.' * 31 + b'x'
+ self._test_many_repeated_components(label, 127)
+
+ def test_empty_packet(self):
+ self._test_empty_packet()
diff --git a/python/samba/tests/dns_tkey.py b/python/samba/tests/dns_tkey.py
new file mode 100644
index 0000000..69af14d
--- /dev/null
+++ b/python/samba/tests/dns_tkey.py
@@ -0,0 +1,208 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Kai Blin <kai@samba.org> 2011
+# Copyright (C) Ralph Boehme <slow@samba.org> 2016
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+import optparse
+import samba.getopt as options
+from samba.dcerpc import dns
+from samba.tests.subunitrun import SubunitOptions, TestProgram
+from samba.tests.dns_base import DNSTKeyTest
+
+parser = optparse.OptionParser("dns_tkey.py <server name> <server ip> [options]")
+sambaopts = options.SambaOptions(parser)
+parser.add_option_group(sambaopts)
+
+# This timeout only has relevance when testing against Windows
+# Format errors tend to return patchy responses, so a timeout is needed.
+parser.add_option("--timeout", type="int", dest="timeout",
+ help="Specify timeout for DNS requests")
+
+# use command line creds if available
+credopts = options.CredentialsOptions(parser)
+parser.add_option_group(credopts)
+subunitopts = SubunitOptions(parser)
+parser.add_option_group(subunitopts)
+
+opts, args = parser.parse_args()
+timeout = opts.timeout
+
+if len(args) < 2:
+ parser.print_usage()
+ sys.exit(1)
+
+server_name = args[0]
+server_ip = args[1]
+
+
+class TestDNSUpdates(DNSTKeyTest):
+ def setUp(self):
+ self.server = server_name
+ self.server_ip = server_ip
+ super().setUp()
+
+ def test_tkey(self):
+ "test DNS TKEY handshake"
+
+ self.tkey_trans()
+
+ def test_update_wo_tsig(self):
+ "test DNS update without TSIG record"
+
+ p = self.make_update_request()
+ (response, response_p) = self.dns_transaction_udp(p, self.server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_REFUSED)
+
+ rcode = self.search_record(self.newrecname)
+ self.assert_rcode_equals(rcode, dns.DNS_RCODE_NXDOMAIN)
+
+ def test_update_tsig_bad_keyname(self):
+ "test DNS update with a TSIG record with a bad keyname"
+
+ self.tkey_trans()
+
+ p = self.make_update_request()
+ self.sign_packet(p, "badkey")
+ (response, response_p) = self.dns_transaction_udp(p, self.server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_NOTAUTH)
+ tsig_record = response.additional[0].rdata
+ self.assertEqual(tsig_record.error, dns.DNS_RCODE_BADKEY)
+ self.assertEqual(tsig_record.mac_size, 0)
+
+ rcode = self.search_record(self.newrecname)
+ self.assert_rcode_equals(rcode, dns.DNS_RCODE_NXDOMAIN)
+
+ def test_update_tsig_bad_mac(self):
+ "test DNS update with a TSIG record with a bad MAC"
+
+ self.tkey_trans()
+
+ p = self.make_update_request()
+ self.bad_sign_packet(p, self.key_name)
+ (response, response_p) = self.dns_transaction_udp(p, self.server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_NOTAUTH)
+ tsig_record = response.additional[0].rdata
+ self.assertEqual(tsig_record.error, dns.DNS_RCODE_BADSIG)
+ self.assertEqual(tsig_record.mac_size, 0)
+
+ rcode = self.search_record(self.newrecname)
+ self.assert_rcode_equals(rcode, dns.DNS_RCODE_NXDOMAIN)
+
+ def test_update_tsig(self):
+ "test DNS update with correct TSIG record"
+
+ self.tkey_trans()
+
+ p = self.make_update_request()
+ mac = self.sign_packet(p, self.key_name)
+ (response, response_p) = self.dns_transaction_udp(p, self.server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
+ self.verify_packet(response, response_p, mac)
+
+ # Check the record is around
+ rcode = self.search_record(self.newrecname)
+ self.assert_rcode_equals(rcode, dns.DNS_RCODE_OK)
+
+ # Now delete the record
+ p = self.make_update_request(delete=True)
+ mac = self.sign_packet(p, self.key_name)
+ (response, response_p) = self.dns_transaction_udp(p, self.server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
+ self.verify_packet(response, response_p, mac)
+
+ # check it's gone
+ rcode = self.search_record(self.newrecname)
+ self.assert_rcode_equals(rcode, dns.DNS_RCODE_NXDOMAIN)
+
+ def test_update_tsig_windows(self):
+ "test DNS update with correct TSIG record (follow Windows pattern)"
+
+ newrecname = "win" + self.newrecname
+ rr_class = dns.DNS_QCLASS_IN
+ ttl = 1200
+
+ p = self.make_name_packet(dns.DNS_OPCODE_UPDATE)
+ q = self.make_name_question(self.get_dns_domain(),
+ dns.DNS_QTYPE_SOA,
+ dns.DNS_QCLASS_IN)
+ questions = []
+ questions.append(q)
+ self.finish_name_packet(p, questions)
+
+ updates = []
+ r = dns.res_rec()
+ r.name = newrecname
+ r.rr_type = dns.DNS_QTYPE_A
+ r.rr_class = dns.DNS_QCLASS_ANY
+ r.ttl = 0
+ r.length = 0
+ updates.append(r)
+ r = dns.res_rec()
+ r.name = newrecname
+ r.rr_type = dns.DNS_QTYPE_AAAA
+ r.rr_class = dns.DNS_QCLASS_ANY
+ r.ttl = 0
+ r.length = 0
+ updates.append(r)
+ r = dns.res_rec()
+ r.name = newrecname
+ r.rr_type = dns.DNS_QTYPE_A
+ r.rr_class = rr_class
+ r.ttl = ttl
+ r.length = 0xffff
+ r.rdata = "10.1.45.64"
+ updates.append(r)
+ p.nscount = len(updates)
+ p.nsrecs = updates
+
+ prereqs = []
+ r = dns.res_rec()
+ r.name = newrecname
+ r.rr_type = dns.DNS_QTYPE_CNAME
+ r.rr_class = dns.DNS_QCLASS_NONE
+ r.ttl = 0
+ r.length = 0
+ prereqs.append(r)
+ p.ancount = len(prereqs)
+ p.answers = prereqs
+
+ (response, response_p) = self.dns_transaction_udp(p, self.server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_REFUSED)
+
+ self.tkey_trans()
+ mac = self.sign_packet(p, self.key_name)
+ (response, response_p) = self.dns_transaction_udp(p, self.server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
+ self.verify_packet(response, response_p, mac)
+
+ # Check the record is around
+ rcode = self.search_record(newrecname)
+ self.assert_rcode_equals(rcode, dns.DNS_RCODE_OK)
+
+ # Now delete the record
+ p = self.make_update_request(delete=True)
+ mac = self.sign_packet(p, self.key_name)
+ (response, response_p) = self.dns_transaction_udp(p, self.server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
+ self.verify_packet(response, response_p, mac)
+
+ # check it's gone
+ rcode = self.search_record(self.newrecname)
+ self.assert_rcode_equals(rcode, dns.DNS_RCODE_NXDOMAIN)
+
+
+TestProgram(module=__name__, opts=subunitopts)
diff --git a/python/samba/tests/dns_wildcard.py b/python/samba/tests/dns_wildcard.py
new file mode 100644
index 0000000..d65a537
--- /dev/null
+++ b/python/samba/tests/dns_wildcard.py
@@ -0,0 +1,336 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Andrew Bartlett 2007
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+from samba import credentials
+from samba.dcerpc import dns, dnsserver
+from samba.dnsserver import record_from_string
+from samba.tests.subunitrun import SubunitOptions, TestProgram
+from samba import werror, WERRORError
+from samba.tests.dns_base import DNSTest
+import samba.getopt as options
+import optparse
+
+parser = optparse.OptionParser(
+ "dns_wildcard.py <server name> <server ip> [options]")
+sambaopts = options.SambaOptions(parser)
+parser.add_option_group(sambaopts)
+
+# This timeout only has relevance when testing against Windows
+# Format errors tend to return patchy responses, so a timeout is needed.
+parser.add_option("--timeout", type="int", dest="timeout",
+ help="Specify timeout for DNS requests")
+
+# To run against Windows
+# python python/samba/tests/dns_wildcard.py computer_name ip
+# -U"Administrator%admin_password"
+# --realm=Domain_name
+# --timeout 10
+#
+
+# use command line creds if available
+credopts = options.CredentialsOptions(parser)
+parser.add_option_group(credopts)
+subunitopts = SubunitOptions(parser)
+parser.add_option_group(subunitopts)
+
+opts, args = parser.parse_args()
+
+lp = sambaopts.get_loadparm()
+creds = credopts.get_credentials(lp)
+
+timeout = opts.timeout
+
+if len(args) < 2:
+ parser.print_usage()
+ sys.exit(1)
+
+server_name = args[0]
+server_ip = args[1]
+creds.set_krb_forwardable(credentials.NO_KRB_FORWARDABLE)
+
+WILDCARD_IP = "1.1.1.1"
+WILDCARD = "*.wildcardtest"
+EXACT_IP = "1.1.1.2"
+EXACT = "exact.wildcardtest"
+LEVEL2_WILDCARD_IP = "1.1.1.3"
+LEVEL2_WILDCARD = "*.level2.wildcardtest"
+LEVEL2_EXACT_IP = "1.1.1.4"
+LEVEL2_EXACT = "exact.level2.wildcardtest"
+
+
+class TestWildCardQueries(DNSTest):
+
+ def setUp(self):
+ super().setUp()
+ global server, server_ip, lp, creds, timeout
+ self.server = server_name
+ self.server_ip = server_ip
+ self.lp = lp
+ self.creds = creds
+ self.timeout = timeout
+
+ # Create the dns records
+ self.dns_records = [(dns.DNS_QTYPE_A,
+ "%s.%s" % (WILDCARD, self.get_dns_domain()),
+ WILDCARD_IP),
+ (dns.DNS_QTYPE_A,
+ "%s.%s" % (EXACT, self.get_dns_domain()),
+ EXACT_IP),
+ (dns.DNS_QTYPE_A,
+ ("%s.%s" % (
+ LEVEL2_WILDCARD,
+ self.get_dns_domain())),
+ LEVEL2_WILDCARD_IP),
+ (dns.DNS_QTYPE_A,
+ ("%s.%s" % (
+ LEVEL2_EXACT,
+ self.get_dns_domain())),
+ LEVEL2_EXACT_IP)]
+
+ c = self.dns_connect()
+ for (typ, name, data) in self.dns_records:
+ self.add_record(c, typ, name, data)
+
+ def tearDown(self):
+ c = self.dns_connect()
+ for (typ, name, data) in self.dns_records:
+ self.delete_record(c, typ, name, data)
+
+ def dns_connect(self):
+ binding_str = "ncacn_ip_tcp:%s[sign]" % self.server_ip
+ return dnsserver.dnsserver(binding_str, self.lp, self.creds)
+
+ def delete_record(self, dns_conn, typ, name, data):
+
+ rec = record_from_string(typ, data)
+ del_rec_buf = dnsserver.DNS_RPC_RECORD_BUF()
+ del_rec_buf.rec = rec
+
+ try:
+ dns_conn.DnssrvUpdateRecord2(dnsserver.DNS_CLIENT_VERSION_LONGHORN,
+ 0,
+ self.server,
+ self.get_dns_domain(),
+ name,
+ None,
+ del_rec_buf)
+ except WERRORError as e:
+ # Ignore record does not exist errors
+ if e.args[0] != werror.WERR_DNS_ERROR_NAME_DOES_NOT_EXIST:
+ raise e
+
+ def add_record(self, dns_conn, typ, name, data):
+
+ rec = record_from_string(typ, data)
+ add_rec_buf = dnsserver.DNS_RPC_RECORD_BUF()
+ add_rec_buf.rec = rec
+ try:
+ dns_conn.DnssrvUpdateRecord2(dnsserver.DNS_CLIENT_VERSION_LONGHORN,
+ 0,
+ self.server,
+ self.get_dns_domain(),
+ name,
+ add_rec_buf,
+ None)
+ except WERRORError as e:
+ raise e
+
+ def test_one_a_query_match_wildcard(self):
+ "Query an A record, should match the wildcard entry"
+
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ questions = []
+
+ # Check the record
+ name = "miss.wildcardtest.%s" % self.get_dns_domain()
+ q = self.make_name_question(name,
+ dns.DNS_QTYPE_A,
+ dns.DNS_QCLASS_IN)
+ questions.append(q)
+
+ self.finish_name_packet(p, questions)
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=self.server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
+ self.assert_dns_opcode_equals(response, dns.DNS_OPCODE_QUERY)
+ self.assertEqual(response.ancount, 1)
+ self.assertEqual(response.answers[0].rr_type, dns.DNS_QTYPE_A)
+ self.assertEqual(response.answers[0].rdata, WILDCARD_IP)
+
+ def test_one_a_query_match_wildcard_2_labels(self):
+ """ Query an A record, should match the wild card entry
+ have two labels to the left of the wild card target.
+ """
+
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ questions = []
+
+ # Check the record
+ name = "label2.label1.wildcardtest.%s" % self.get_dns_domain()
+ q = self.make_name_question(name,
+ dns.DNS_QTYPE_A,
+ dns.DNS_QCLASS_IN)
+ questions.append(q)
+
+ self.finish_name_packet(p, questions)
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=self.server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
+ self.assert_dns_opcode_equals(response, dns.DNS_OPCODE_QUERY)
+ self.assertEqual(response.ancount, 1)
+ self.assertEqual(response.answers[0].rr_type, dns.DNS_QTYPE_A)
+ self.assertEqual(response.answers[0].rdata, WILDCARD_IP)
+
+ def test_one_a_query_wildcard_entry(self):
+ "Query the wildcard entry"
+
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ questions = []
+
+ # Check the record
+ name = "%s.%s" % (WILDCARD, self.get_dns_domain())
+ q = self.make_name_question(name,
+ dns.DNS_QTYPE_A,
+ dns.DNS_QCLASS_IN)
+ questions.append(q)
+
+ self.finish_name_packet(p, questions)
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=self.server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
+ self.assert_dns_opcode_equals(response, dns.DNS_OPCODE_QUERY)
+ self.assertEqual(response.ancount, 1)
+ self.assertEqual(response.answers[0].rr_type, dns.DNS_QTYPE_A)
+ self.assertEqual(response.answers[0].rdata, WILDCARD_IP)
+
+ def test_one_a_query_exact_match(self):
+ """Query an entry that matches the wild card but has an exact match as
+ well.
+ """
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ questions = []
+
+ # Check the record
+ name = "%s.%s" % (EXACT, self.get_dns_domain())
+ q = self.make_name_question(name,
+ dns.DNS_QTYPE_A,
+ dns.DNS_QCLASS_IN)
+ questions.append(q)
+
+ self.finish_name_packet(p, questions)
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=self.server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
+ self.assert_dns_opcode_equals(response, dns.DNS_OPCODE_QUERY)
+ self.assertEqual(response.ancount, 1)
+ self.assertEqual(response.answers[0].rr_type, dns.DNS_QTYPE_A)
+ self.assertEqual(response.answers[0].rdata, EXACT_IP)
+
+ def test_one_a_query_match_wildcard_l2(self):
+ "Query an A record, should match the level 2 wildcard entry"
+
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ questions = []
+
+ # Check the record
+ name = "miss.level2.wildcardtest.%s" % self.get_dns_domain()
+ q = self.make_name_question(name,
+ dns.DNS_QTYPE_A,
+ dns.DNS_QCLASS_IN)
+ questions.append(q)
+
+ self.finish_name_packet(p, questions)
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=self.server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
+ self.assert_dns_opcode_equals(response, dns.DNS_OPCODE_QUERY)
+ self.assertEqual(response.ancount, 1)
+ self.assertEqual(response.answers[0].rr_type, dns.DNS_QTYPE_A)
+ self.assertEqual(response.answers[0].rdata, LEVEL2_WILDCARD_IP)
+
+ def test_one_a_query_match_wildcard_l2_2_labels(self):
+ """Query an A record, should match the level 2 wild card entry
+ have two labels to the left of the wild card target
+ """
+
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ questions = []
+
+ # Check the record
+ name = "label1.label2.level2.wildcardtest.%s" % self.get_dns_domain()
+ q = self.make_name_question(name,
+ dns.DNS_QTYPE_A,
+ dns.DNS_QCLASS_IN)
+ questions.append(q)
+
+ self.finish_name_packet(p, questions)
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=self.server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
+ self.assert_dns_opcode_equals(response, dns.DNS_OPCODE_QUERY)
+ self.assertEqual(response.ancount, 1)
+ self.assertEqual(response.answers[0].rr_type, dns.DNS_QTYPE_A)
+ self.assertEqual(response.answers[0].rdata, LEVEL2_WILDCARD_IP)
+
+ def test_one_a_query_exact_match_l2(self):
+ """Query an entry that matches the wild card but has an exact match as
+ well.
+ """
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ questions = []
+
+ # Check the record
+ name = "%s.%s" % (LEVEL2_EXACT, self.get_dns_domain())
+ q = self.make_name_question(name,
+ dns.DNS_QTYPE_A,
+ dns.DNS_QCLASS_IN)
+ questions.append(q)
+
+ self.finish_name_packet(p, questions)
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=self.server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
+ self.assert_dns_opcode_equals(response, dns.DNS_OPCODE_QUERY)
+ self.assertEqual(response.ancount, 1)
+ self.assertEqual(response.answers[0].rr_type, dns.DNS_QTYPE_A)
+ self.assertEqual(response.answers[0].rdata, LEVEL2_EXACT_IP)
+
+ def test_one_a_query_wildcard_entry_l2(self):
+ "Query the level 2 wildcard entry"
+
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ questions = []
+
+ # Check the record
+ name = "%s.%s" % (LEVEL2_WILDCARD, self.get_dns_domain())
+ q = self.make_name_question(name,
+ dns.DNS_QTYPE_A,
+ dns.DNS_QCLASS_IN)
+ questions.append(q)
+
+ self.finish_name_packet(p, questions)
+ (response, response_packet) =\
+ self.dns_transaction_udp(p, host=self.server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
+ self.assert_dns_opcode_equals(response, dns.DNS_OPCODE_QUERY)
+ self.assertEqual(response.ancount, 1)
+ self.assertEqual(response.answers[0].rr_type, dns.DNS_QTYPE_A)
+ self.assertEqual(response.answers[0].rdata, LEVEL2_WILDCARD_IP)
+
+
+TestProgram(module=__name__, opts=subunitopts)
diff --git a/python/samba/tests/docs.py b/python/samba/tests/docs.py
new file mode 100644
index 0000000..df20b04
--- /dev/null
+++ b/python/samba/tests/docs.py
@@ -0,0 +1,511 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007-2012
+#
+# Tests for documentation.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for presence of documentation."""
+
+import samba
+import samba.tests
+
+import os
+import subprocess
+import xml.etree.ElementTree as ET
+import multiprocessing
+import concurrent.futures
+import tempfile
+
+class TestCase(samba.tests.TestCaseInTempDir):
+
+ def _format_message(self, parameters, message):
+ parameters = list(parameters)
+ parameters = list(map(str, parameters))
+ parameters.sort()
+ return message + '\n\n %s' % ('\n '.join(parameters))
+
+def get_max_worker_count():
+ cpu_count = multiprocessing.cpu_count()
+
+ # Always run two processes in parallel
+ if cpu_count < 2:
+ return 2
+
+ max_workers = int(cpu_count / 2)
+ if max_workers < 2:
+ return 2
+
+ return max_workers
+
+def check_or_set_smbconf_default(cmdline, topdir, param, default_param):
+ p = subprocess.Popen(cmdline,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ cwd=topdir).communicate()
+ result = p[0].decode().upper().strip()
+ if result != default_param.upper():
+ if not (result == "" and default_param == '""'):
+ return result, param, default_param
+
+ return None
+
+def set_smbconf_arbitrary(cmdline, topdir, param, param_type, value_to_use):
+ p = subprocess.Popen(cmdline,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ cwd=topdir).communicate()
+ result = p[0].decode().upper().strip()
+ if result != value_to_use.upper():
+ # currently no way to distinguish command lists
+ if param_type == 'list':
+ if ", ".join(result.split()) == value_to_use.upper():
+ return None
+
+ # currently no way to identify octal
+ if param_type == 'integer':
+ try:
+ if int(value_to_use, 8) == int(p[0].strip(), 8):
+ return None
+ except:
+ pass
+
+ return result, param, value_to_use
+
+ return None
+
+def set_smbconf_arbitrary_opposite(cmdline, topdir, tempdir, section, param,
+ param_type, opposite_value, value_to_use):
+ g = tempfile.NamedTemporaryFile(mode='w', dir=tempdir, delete=False)
+ try:
+ towrite = section + "\n"
+ towrite += param + " = " + opposite_value
+ g.write(towrite)
+ finally:
+ g.close()
+
+ p = subprocess.Popen(cmdline + ["-s", g.name],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ cwd=topdir).communicate()
+ os.unlink(g.name)
+
+ # testparm doesn't display a value if they are equivalent
+ if (value_to_use.lower() != opposite_value.lower()):
+ for line in p[0].decode().splitlines():
+ if not line.strip().startswith(param):
+ return None
+
+ value_found = line.split("=")[1].upper().strip()
+ if value_found != value_to_use.upper():
+ # currently no way to distinguish command lists
+ if param_type == 'list':
+ if ", ".join(value_found.split()) == value_to_use.upper():
+ return None
+
+ # currently no way to identify octal
+ if param_type == 'integer':
+ try:
+ if int(value_to_use, 8) == int(value_found, 8):
+ continue
+ except:
+ pass
+
+ return param, value_to_use, value_found
+
+ return None
+
+def get_documented_parameters(sourcedir):
+ path = os.path.join(sourcedir, "bin", "default", "docs-xml", "smbdotconf")
+ if not os.path.exists(os.path.join(path, "parameters.all.xml")):
+ raise Exception("Unable to find parameters.all.xml")
+ try:
+ p = open(os.path.join(path, "parameters.all.xml"), 'r')
+ except IOError as e:
+ raise Exception("Error opening parameters file")
+ out = p.read()
+
+ root = ET.fromstring(out)
+ for parameter in root:
+ name = parameter.attrib.get('name')
+ if parameter.attrib.get('removed') == "1":
+ continue
+ yield name
+ syn = parameter.findall('synonym')
+ if syn is not None:
+ for sy in syn:
+ yield sy.text
+ p.close()
+
+
+def get_documented_tuples(sourcedir, omit_no_default=True):
+ path = os.path.join(sourcedir, "bin", "default", "docs-xml", "smbdotconf")
+ if not os.path.exists(os.path.join(path, "parameters.all.xml")):
+ raise Exception("Unable to find parameters.all.xml")
+ try:
+ p = open(os.path.join(path, "parameters.all.xml"), 'r')
+ except IOError as e:
+ raise Exception("Error opening parameters file")
+ out = p.read()
+
+ root = ET.fromstring(out)
+ for parameter in root:
+ name = parameter.attrib.get("name")
+ param_type = parameter.attrib.get("type")
+ if parameter.attrib.get('removed') == "1":
+ continue
+ values = parameter.findall("value")
+ defaults = []
+ for value in values:
+ if value.attrib.get("type") == "default":
+ defaults.append(value)
+
+ default_text = None
+ if len(defaults) == 0:
+ if omit_no_default:
+ continue
+ elif len(defaults) > 1:
+ raise Exception("More than one default found for parameter %s" % name)
+ else:
+ default_text = defaults[0].text
+
+ if default_text is None:
+ default_text = ""
+ context = parameter.attrib.get("context")
+ yield name, default_text, context, param_type
+ p.close()
+
+
+class SmbDotConfTests(TestCase):
+
+ # defines the cases where the defaults may differ from the documentation
+ #
+ # Please pass the default via waf rather than adding to this
+ # list if at all possible.
+ special_cases = set([
+ 'log level',
+ 'path',
+ 'panic action',
+ 'homedir map',
+ 'NIS homedir',
+ 'server string',
+ 'netbios name',
+ 'socket options',
+ 'ctdbd socket',
+ 'printing',
+ 'printcap name',
+ 'queueresume command',
+ 'queuepause command',
+ 'lpresume command',
+ 'lppause command',
+ 'lprm command',
+ 'lpq command',
+ 'print command',
+ 'template homedir',
+ 'max open files',
+ 'include system krb5 conf',
+ 'smbd max async dosmode',
+ ])
+
+ def setUp(self):
+ super().setUp()
+ # create a minimal smb.conf file for testparm
+ self.smbconf = os.path.join(self.tempdir, "paramtestsmb.conf")
+ f = open(self.smbconf, 'w')
+ try:
+ f.write("""
+[test]
+ path = /
+""")
+ finally:
+ f.close()
+
+ self.blankconf = os.path.join(self.tempdir, "emptytestsmb.conf")
+ f = open(self.blankconf, 'w')
+ try:
+ f.write("")
+ finally:
+ f.close()
+
+ self.topdir = os.path.abspath(samba.source_tree_topdir())
+
+ try:
+ self.documented = set(get_documented_parameters(self.topdir))
+ except:
+ self.fail("Unable to load documented parameters")
+
+ try:
+ self.defaults = set(get_documented_tuples(self.topdir))
+ except:
+ self.fail("Unable to load parameters")
+
+ try:
+ self.defaults_all = set(get_documented_tuples(self.topdir, False))
+ except:
+ self.fail("Unable to load parameters")
+
+ def tearDown(self):
+ super().tearDown()
+ os.unlink(self.smbconf)
+ os.unlink(self.blankconf)
+
+ def test_default_s3(self):
+ self._test_default(['bin/testparm'])
+ self._set_defaults(['bin/testparm'])
+
+ # registry shares appears to need sudo
+ self._set_arbitrary(['bin/testparm'],
+ exceptions = ['client lanman auth',
+ 'client plaintext auth',
+ 'registry shares',
+ 'smb ports',
+ 'rpc server dynamic port range',
+ 'name resolve order',
+ 'clustering'])
+ self._test_empty(['bin/testparm'])
+
+ def test_default_s4(self):
+ self._test_default(['bin/samba-tool', 'testparm'])
+ self._set_defaults(['bin/samba-tool', 'testparm'])
+ self._set_arbitrary(['bin/samba-tool', 'testparm'],
+ exceptions=['smb ports',
+ 'rpc server dynamic port range',
+ 'name resolve order'])
+ self._test_empty(['bin/samba-tool', 'testparm'])
+
+ def _test_default(self, program):
+
+ if program[0] == 'bin/samba-tool' and os.getenv("PYTHON", None):
+ program = [os.environ["PYTHON"]] + program
+
+ failset = set()
+
+ with concurrent.futures.ProcessPoolExecutor(max_workers=get_max_worker_count()) as executor:
+ result_futures = []
+
+ for tuples in self.defaults:
+ param, default, context, param_type = tuples
+
+ if param in self.special_cases:
+ continue
+ # bad, bad parametric options - we don't have their default values
+ if ':' in param:
+ continue
+ section = None
+ if context == "G":
+ section = "global"
+ elif context == "S":
+ section = "test"
+ else:
+ self.fail("%s has no valid context" % param)
+
+ program_arg1 = ["--configfile=%s" % (self.smbconf)]
+ if (program[0] == 'bin/testparm'):
+ program_arg1 = ["--suppress-prompt", self.smbconf]
+
+ cmdline = program + program_arg1 + [
+ "--section-name",
+ section,
+ "--parameter-name",
+ param]
+
+ future = executor.submit(check_or_set_smbconf_default, cmdline, self.topdir, param, default)
+ result_futures.append(future)
+
+ for f in concurrent.futures.as_completed(result_futures):
+ if f.result():
+ result, param, default_param = f.result()
+
+ doc_triple = "%s\n Expected: %s" % (param, default_param)
+ failset.add("%s\n Got: %s" % (doc_triple, result))
+
+ if len(failset) > 0:
+ self.fail(self._format_message(failset,
+ "Parameters that do not have matching defaults:"))
+
+ def _set_defaults(self, program):
+
+ if program[0] == 'bin/samba-tool' and os.getenv("PYTHON", None):
+ program = [os.environ["PYTHON"]] + program
+
+ failset = set()
+
+ with concurrent.futures.ProcessPoolExecutor(max_workers=get_max_worker_count()) as executor:
+ result_futures = []
+
+ for tuples in self.defaults:
+ param, default, context, param_type = tuples
+
+ exceptions = set([
+ 'printing',
+ 'smbd max async dosmode',
+ ])
+
+ if param in exceptions:
+ continue
+
+ section = None
+ if context == "G":
+ section = "global"
+ elif context == "S":
+ section = "test"
+ else:
+ self.fail("%s has no valid context" % param)
+
+ program_arg1 = ["--configfile=%s" % (self.smbconf)]
+ if (program[0] == 'bin/testparm'):
+ program_arg1 = ["--suppress-prompt", self.smbconf]
+
+ cmdline = program + program_arg1 + [
+ "--section-name",
+ section,
+ "--parameter-name",
+ param,
+ "--option",
+ "%s = %s" % (param, default)]
+ future = executor.submit(check_or_set_smbconf_default, cmdline, self.topdir, param, default)
+ result_futures.append(future)
+
+ for f in concurrent.futures.as_completed(result_futures):
+ if f.result():
+ result, param, default_param = f.result()
+
+ doc_triple = "%s\n Expected: %s" % (param, default)
+ failset.add("%s\n Got: %s" % (doc_triple, result))
+
+ if len(failset) > 0:
+ self.fail(self._format_message(failset,
+ "Parameters that do not have matching defaults:"))
+
+ def _set_arbitrary(self, program, exceptions=None):
+
+ if program[0] == 'bin/samba-tool' and os.getenv("PYTHON", None):
+ program = [os.environ["PYTHON"]] + program
+
+ arbitrary = {'string': 'string', 'boolean': 'yes', 'integer': '5',
+ 'boolean-rev': 'yes',
+ 'cmdlist': 'a b c',
+ 'bytes': '10',
+ 'octal': '0123',
+ 'ustring': 'ustring',
+ 'enum': '', 'boolean-auto': '', 'char': 'a', 'list': 'a, b, c'}
+ opposite_arbitrary = {'string': 'string2', 'boolean': 'no', 'integer': '6',
+ 'boolean-rev': 'no',
+ 'cmdlist': 'd e f',
+ 'bytes': '11',
+ 'octal': '0567',
+ 'ustring': 'ustring2',
+ 'enum': '', 'boolean-auto': '', 'char': 'b', 'list': 'd, e, f'}
+
+ failset = set()
+
+ with concurrent.futures.ProcessPoolExecutor(max_workers=get_max_worker_count()) as executor:
+ result_futures1 = []
+ result_futures2 = []
+
+ for tuples in self.defaults_all:
+ param, default, context, param_type = tuples
+
+ if param in ['printing', 'copy', 'include', 'log level']:
+ continue
+
+ # currently no easy way to set an arbitrary value for these
+ if param_type in ['enum', 'boolean-auto']:
+ continue
+
+ if exceptions is not None:
+ if param in exceptions:
+ continue
+
+ section = None
+ if context == "G":
+ section = "global"
+ elif context == "S":
+ section = "test"
+ else:
+ self.fail("%s has no valid context" % param)
+
+ value_to_use = arbitrary.get(param_type)
+ if value_to_use is None:
+ self.fail("%s has an invalid type" % param)
+
+ program_arg1 = ["--configfile=%s" % (self.smbconf)]
+ if (program[0] == 'bin/testparm'):
+ program_arg1 = ["--suppress-prompt", self.smbconf]
+
+ cmdline = program + program_arg1 + [
+ "--section-name",
+ section,
+ "--parameter-name",
+ param,
+ "--option",
+ "%s = %s" % (param, value_to_use)]
+
+ future = executor.submit(set_smbconf_arbitrary, cmdline, self.topdir, param, param_type, value_to_use)
+ result_futures1.append(future)
+
+ opposite_value = opposite_arbitrary.get(param_type)
+
+ cmdline = program + ["--suppress-prompt",
+ "--option",
+ "%s = %s" % (param, value_to_use)]
+
+ future = executor.submit(set_smbconf_arbitrary_opposite, cmdline, self.topdir, self.tempdir,
+ section, param, param_type, opposite_value, value_to_use)
+ result_futures2.append(future)
+
+ for f in concurrent.futures.as_completed(result_futures1):
+ if f.result():
+ result, param, value_to_use = f.result()
+
+ doc_triple = "%s\n Expected: %s" % (param, value_to_use)
+ failset.add("%s\n Got: %s" % (doc_triple, result))
+
+ for f in concurrent.futures.as_completed(result_futures2):
+ if f.result():
+ param, value_to_use, value_found = f.result()
+
+ doc_triple = "%s\n Expected: %s" % (param, value_to_use)
+ failset.add("%s\n Got: %s" % (doc_triple, value_found))
+
+ if len(failset) > 0:
+ self.fail(self._format_message(failset,
+ "Parameters that were unexpectedly not set:"))
+
+ def _test_empty(self, program):
+
+ if program[0] == 'bin/samba-tool' and os.getenv("PYTHON", None):
+ program = [os.environ["PYTHON"]] + program
+
+ program_arg1 = ["--configfile=%s" % (self.blankconf), "--suppress-prompt"]
+ if (program[0] == 'bin/testparm'):
+ program_arg1 = ["--suppress-prompt", self.blankconf]
+
+ print(program + program_arg1)
+ p = subprocess.Popen(program + program_arg1,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ cwd=self.topdir).communicate()
+ output = ""
+
+ for line in p[0].decode().splitlines():
+ if line.strip().startswith('#'):
+ continue
+ if line.strip().startswith("idmap config *"):
+ continue
+ output += line.strip().lower() + '\n'
+
+ if output.strip() != '[global]' and output.strip() != '[globals]':
+ self.fail("Testparm returned unexpected output on an empty smb.conf.")
diff --git a/python/samba/tests/domain_backup.py b/python/samba/tests/domain_backup.py
new file mode 100644
index 0000000..c2ba2db
--- /dev/null
+++ b/python/samba/tests/domain_backup.py
@@ -0,0 +1,624 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Andrew Bartlett <abartlet@samba.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+from samba import provision, param
+import os
+import shutil
+from samba.tests import (env_loadparm, create_test_ou, BlackboxProcessError,
+ BlackboxTestCase, connect_samdb)
+import ldb
+from samba.samdb import SamDB
+from samba.auth import system_session
+from samba import Ldb, dn_from_dns_name
+from samba.netcmd.fsmo import get_fsmo_roleowner
+import re
+from samba import sites
+from samba.dsdb import _dsdb_load_udv_v2
+from samba import safe_tarfile as tarfile
+
+
+def get_prim_dom(secrets_path, lp):
+ secrets_ldb = Ldb(secrets_path, session_info=system_session(), lp=lp)
+ return secrets_ldb.search(base="CN=Primary Domains",
+ attrs=['objectClass', 'samAccountName',
+ 'secret', 'msDS-KeyVersionNumber'],
+ scope=ldb.SCOPE_SUBTREE,
+ expression="(objectClass=kerberosSecret)")
+
+# The backup tests require that a completely clean LoadParm object gets used
+# for the restore. Otherwise the same global LP gets re-used, and the LP
+# settings can bleed from one test case to another.
+# To do this, these tests should use check_output(), which executes the command
+# in a separate process (as opposed to runcmd(), runsubcmd()).
+# So although this is a samba-tool test, we don't inherit from SambaToolCmdTest
+# so that we never inadvertently use .runcmd() by accident.
+class DomainBackupBase(BlackboxTestCase):
+
+ def setUp(self):
+ super().setUp()
+
+ server = os.environ["DC_SERVER"]
+ self.user_auth = "-U%s%%%s" % (os.environ["DC_USERNAME"],
+ os.environ["DC_PASSWORD"])
+
+ # LDB connection to the original server being backed up
+ self.ldb = connect_samdb("ldap://%s" % server)
+ self.new_server = "BACKUPSERV"
+ self.server = server.upper()
+ self.base_cmd = None
+ self.backup_markers = ['sidForRestore', 'backupDate']
+ self.restore_domain = os.environ["DOMAIN"]
+ self.restore_realm = os.environ["REALM"]
+ self.backend = None
+
+ def use_backend(self, backend):
+ """Explicitly set the DB backend that the backup should use"""
+ self.backend = backend
+ self.base_cmd += ["--backend-store=" + backend]
+
+ def get_expected_partitions(self, samdb):
+ basedn = str(samdb.get_default_basedn())
+ config_dn = "CN=Configuration,%s" % basedn
+ return [basedn, config_dn, "CN=Schema,%s" % config_dn,
+ "DC=DomainDnsZones,%s" % basedn,
+ "DC=ForestDnsZones,%s" % basedn]
+
+ def assert_partitions_present(self, samdb):
+ """Asserts all expected partitions are present in the backup samdb"""
+ res = samdb.search(base="", scope=ldb.SCOPE_BASE,
+ attrs=['namingContexts'])
+ actual_ncs = [str(r) for r in res[0].get('namingContexts')]
+
+ expected_ncs = self.get_expected_partitions(samdb)
+
+ for nc in expected_ncs:
+ self.assertTrue(nc in actual_ncs,
+ "%s not in %s" % (nc, str(actual_ncs)))
+
+ def assert_repl_uptodate_vector(self, samdb):
+ """Asserts an replUpToDateVector entry exists for the original DC"""
+ orig_invoc_id = self.ldb.get_invocation_id()
+ expected_ncs = self.get_expected_partitions(samdb)
+
+ # loop through the partitions and check the upToDateness vector
+ for nc in expected_ncs:
+ found = False
+ for cursor in _dsdb_load_udv_v2(samdb, nc):
+ if orig_invoc_id == str(cursor.source_dsa_invocation_id):
+ found = True
+ break
+ self.assertTrue(found, "Couldn't find UDTV for original DC")
+
+ def assert_dcs_present(self, samdb, expected_server, expected_count=None):
+ """Checks that the expected server is present in the restored DB"""
+ search_expr = "(&(objectClass=Server)(serverReference=*))"
+ res = samdb.search(samdb.get_config_basedn(),
+ scope=ldb.SCOPE_SUBTREE,
+ expression=search_expr)
+ server_found = False
+ for msg in res:
+ if expected_server in str(msg.dn):
+ server_found = True
+
+ self.assertTrue(server_found,
+ "Could not find %s server" % expected_server)
+
+ if expected_count:
+ self.assertTrue(len(res) == expected_count)
+
+ def restore_dir(self):
+ extract_dir = os.path.join(self.tempdir, 'tree')
+ if not os.path.exists(extract_dir):
+ os.mkdir(extract_dir)
+ self.addCleanup(shutil.rmtree, extract_dir)
+ return extract_dir
+
+ def untar_backup(self, backup_file):
+ """Untar the backup file's raw contents (i.e. not a proper restore)"""
+ extract_dir = self.restore_dir()
+ with tarfile.open(backup_file) as tf:
+ tf.extractall(extract_dir)
+
+ def _test_backup_untar(self, primary_domain_secrets=0):
+ """Creates a backup, untars the raw files, and sanity-checks the DB"""
+ backup_file = self.create_backup()
+ self.untar_backup(backup_file)
+
+ private_dir = os.path.join(self.restore_dir(), "private")
+ samdb_path = os.path.join(private_dir, "sam.ldb")
+ lp = env_loadparm()
+ samdb = SamDB(url=samdb_path, session_info=system_session(), lp=lp)
+
+ # check that backup markers were added to the DB
+ res = samdb.search(base=ldb.Dn(samdb, "@SAMBA_DSDB"),
+ scope=ldb.SCOPE_BASE,
+ attrs=self.backup_markers)
+ self.assertEqual(len(res), 1)
+ for marker in self.backup_markers:
+ self.assertIsNotNone(res[0].get(marker),
+ "%s backup marker missing" % marker)
+
+ # check the secrets.ldb entry for the primary domain. (Online/clone
+ # backups shouldn't have this, as they never got it during the backup)
+ secrets_path = os.path.join(private_dir, "secrets.ldb")
+ res = get_prim_dom(secrets_path, lp)
+ self.assertEqual(len(res), primary_domain_secrets)
+
+ # sanity-check that all the partitions got backed up
+ self.assert_partitions_present(samdb)
+
+ def _test_backup_restore(self):
+ """Does a backup/restore, with specific checks of the resulting DB"""
+ backup_file = self.create_backup()
+ self.restore_backup(backup_file)
+ lp = self.check_restored_smbconf()
+ self.check_restored_database(lp)
+
+ def _test_backup_restore_no_secrets(self):
+ """Does a backup/restore with secrets excluded from the resulting DB"""
+
+ # exclude secrets when we create the backup
+ backup_file = self.create_backup(extra_args=["--no-secrets"])
+ self.restore_backup(backup_file)
+ lp = self.check_restored_smbconf()
+
+ # assert that we don't find user secrets in the DB
+ self.check_restored_database(lp, expect_secrets=False)
+
+ def _test_backup_restore_into_site(self):
+ """Does a backup and restores into a non-default site"""
+
+ # create a new non-default site
+ sitename = "Test-Site-For-Backups"
+ sites.create_site(self.ldb, self.ldb.get_config_basedn(), sitename)
+ self.addCleanup(sites.delete_site, self.ldb,
+ self.ldb.get_config_basedn(), sitename)
+
+ # restore the backup DC into the site we just created
+ backup_file = self.create_backup()
+ self.restore_backup(backup_file, ["--site=" + sitename])
+
+ lp = self.check_restored_smbconf()
+ restored_ldb = self.check_restored_database(lp)
+
+ # check the restored DC was added to the site we created, i.e. there's
+ # an entry matching the new DC sitting underneath the site DN
+ site_dn = "CN={0},CN=Sites,{1}".format(sitename,
+ restored_ldb.get_config_basedn())
+ match_server = "(&(objectClass=server)(cn={0}))".format(self.new_server)
+ res = restored_ldb.search(site_dn, scope=ldb.SCOPE_SUBTREE,
+ expression=match_server)
+ self.assertTrue(len(res) == 1,
+ "Failed to find new DC under site")
+
+ def create_smbconf(self, settings):
+ """Creates a very basic smb.conf to pass to the restore tool"""
+
+ # without the testenv config's settings, the NTACL backup_restore()
+ # operation will fail (because we're not root). So first suck in all
+ # testenv's settings, so we retain these in the new config. Note we
+ # use a non-global LP so that these settings don't leak into other
+ # places we use LoadParms
+ testenv_conf = os.environ["SMB_CONF_PATH"]
+ local_lp = param.LoadParm(filename_for_non_global_lp=testenv_conf)
+
+ # add the new settings to the LP, then write the settings to file
+ for key, val in settings.items():
+ local_lp.set(key, val)
+
+ new_smbconf = os.path.join(self.tempdir, "smb.conf")
+ local_lp.dump(False, new_smbconf)
+
+ self.addCleanup(os.remove, new_smbconf)
+ return new_smbconf
+
+ def _test_backup_restore_with_conf(self):
+ """Checks smb.conf values passed to the restore are retained"""
+ backup_file = self.create_backup()
+
+ # create an smb.conf that we pass to the restore. The netbios/state
+ # dir should get overridden by the restore, the other settings should
+ # trickle through into the restored dir's smb.conf
+ settings = {'state directory': '/var/run',
+ 'netbios name': 'FOOBAR',
+ 'workgroup': 'NOTMYDOMAIN',
+ 'realm': 'NOT.MY.REALM'}
+ assert_settings = {'drs: max link sync': '275',
+ 'prefork children': '7'}
+ settings.update(assert_settings)
+ smbconf = self.create_smbconf(settings)
+
+ self.restore_backup(backup_file, ["--configfile=" + smbconf])
+
+ # this will check netbios name/state dir
+ lp = self.check_restored_smbconf()
+ self.check_restored_database(lp)
+
+ # check the remaining settings are still intact
+ for key, val in assert_settings.items():
+ self.assertEqual(str(lp.get(key)), val,
+ "'%s' was '%s' in smb.conf" % (key, lp.get(key)))
+
+ def check_restored_smbconf(self):
+ """Sanity-check important smb.conf values are restored correctly"""
+ smbconf = os.path.join(self.restore_dir(), "etc", "smb.conf")
+ bkp_lp = param.LoadParm(filename_for_non_global_lp=smbconf)
+ self.assertEqual(bkp_lp.get('netbios name'), self.new_server)
+ self.assertEqual(bkp_lp.get('workgroup'), self.restore_domain)
+ self.assertEqual(bkp_lp.get('realm'), self.restore_realm.upper())
+
+ # we restore with a fixed directory structure, so we can sanity-check
+ # that the core filepaths settings are what we expect them to be
+ private_dir = os.path.join(self.restore_dir(), "private")
+ self.assertEqual(bkp_lp.get('private dir'), private_dir)
+ state_dir = os.path.join(self.restore_dir(), "state")
+ self.assertEqual(bkp_lp.get('state directory'), state_dir)
+ return bkp_lp
+
+ def check_restored_database(self, bkp_lp, expect_secrets=True):
+ paths = provision.provision_paths_from_lp(bkp_lp, bkp_lp.get("realm"))
+
+ bkp_pd = get_prim_dom(paths.secrets, bkp_lp)
+ self.assertEqual(len(bkp_pd), 1)
+ account = bkp_pd[0].get('samAccountName')
+ self.assertIsNotNone(account)
+ self.assertEqual(str(account[0]), self.new_server + '$')
+ self.assertIsNotNone(bkp_pd[0].get('secret'))
+
+ samdb = SamDB(url=paths.samdb, session_info=system_session(),
+ lp=bkp_lp, credentials=self.get_credentials())
+
+ # check that the backup markers have been removed from the restored DB
+ res = samdb.search(base=ldb.Dn(samdb, "@SAMBA_DSDB"),
+ scope=ldb.SCOPE_BASE,
+ attrs=self.backup_markers)
+ self.assertEqual(len(res), 1)
+ for marker in self.backup_markers:
+ self.assertIsNone(res[0].get(marker),
+ "%s backup-marker left behind" % marker)
+
+ # check that the repsFrom and repsTo values have been removed
+ # from the restored DB
+ res = samdb.search(base=samdb.get_default_basedn(),
+ scope=ldb.SCOPE_BASE,
+ attrs=['repsFrom', 'repsTo'])
+ self.assertEqual(len(res), 1)
+ self.assertIsNone(res[0].get('repsFrom'))
+ self.assertIsNone(res[0].get('repsTo'))
+
+ res = samdb.search(base=samdb.get_config_basedn(),
+ scope=ldb.SCOPE_BASE,
+ attrs=['repsFrom', 'repsTo'])
+ self.assertEqual(len(res), 1)
+ self.assertIsNone(res[0].get('repsFrom'))
+ self.assertIsNone(res[0].get('repsTo'))
+
+ # check the DB is using the backend we supplied
+ if self.backend:
+ res = samdb.search(base="@PARTITION", scope=ldb.SCOPE_BASE,
+ attrs=["backendStore"])
+ backend = str(res[0].get("backendStore"))
+ self.assertEqual(backend, self.backend)
+
+ # check the restored DB has the expected partitions/DC/FSMO roles
+ self.assert_partitions_present(samdb)
+ self.assert_dcs_present(samdb, self.new_server, expected_count=1)
+ self.assert_fsmo_roles(samdb, self.new_server, self.server)
+ self.assert_secrets(samdb, expect_secrets=expect_secrets)
+
+ # check we still have an uptodateness vector for the original DC
+ self.assert_repl_uptodate_vector(samdb)
+ return samdb
+
+ def assert_user_secrets(self, samdb, username, expect_secrets):
+ """Asserts that a user has/doesn't have secrets as expected"""
+ basedn = str(samdb.get_default_basedn())
+ user_dn = "CN=%s,CN=users,%s" % (username, basedn)
+
+ if expect_secrets:
+ self.assertIsNotNone(samdb.searchone("unicodePwd", user_dn))
+ else:
+ # the search should throw an exception because the secrets
+ # attribute isn't actually there
+ self.assertRaises(KeyError, samdb.searchone, "unicodePwd", user_dn)
+
+ def assert_secrets(self, samdb, expect_secrets):
+ """Check the user secrets in the restored DB match what's expected"""
+
+ # check secrets for the built-in testenv users match what's expected
+ test_users = ["alice", "bob", "jane"]
+ for user in test_users:
+ self.assert_user_secrets(samdb, user, expect_secrets)
+
+ def assert_fsmo_roles(self, samdb, server, exclude_server):
+ """Asserts the expected server is the FSMO role owner"""
+ domain_dn = samdb.domain_dn()
+ forest_dn = dn_from_dns_name(samdb.forest_dns_name())
+ fsmos = {'infrastructure': "CN=Infrastructure," + domain_dn,
+ 'naming': "CN=Partitions,%s" % samdb.get_config_basedn(),
+ 'schema': str(samdb.get_schema_basedn()),
+ 'rid': "CN=RID Manager$,CN=System," + domain_dn,
+ 'pdc': domain_dn,
+ 'domaindns':
+ "CN=Infrastructure,DC=DomainDnsZones," + domain_dn,
+ 'forestdns':
+ "CN=Infrastructure,DC=ForestDnsZones," + forest_dn}
+ for role, dn in fsmos.items():
+ owner = get_fsmo_roleowner(samdb, ldb.Dn(samdb, dn), role)
+ self.assertTrue("CN={0},".format(server) in owner.extended_str(),
+ "Expected %s to own FSMO role %s" % (server, role))
+ self.assertTrue("CN={0},".format(exclude_server)
+ not in owner.extended_str(),
+ "%s found as FSMO %s role owner" % (server, role))
+
+ def cleanup_tempdir(self):
+ for filename in os.listdir(self.tempdir):
+ filepath = os.path.join(self.tempdir, filename)
+ if os.path.isfile(filepath):
+ os.remove(filepath)
+ elif os.path.isdir(filepath):
+ shutil.rmtree(filepath)
+
+ def run_cmd(self, args):
+ """Executes a samba-tool backup/restore command"""
+
+ cmd = " ".join(args)
+ print("Executing: samba-tool %s" % cmd)
+ try:
+ # note: it's important we run the cmd in a separate process here
+ out = self.check_output("samba-tool " + cmd)
+ except BlackboxProcessError as e:
+ # if the command failed, it may have left behind temporary files.
+ # We're going to fail the test, but first cleanup any temp files so
+ # that we skip the TestCaseInTempDir._remove_tempdir() assertions
+ self.cleanup_tempdir()
+ self.fail("Error calling samba-tool: %s" % e)
+ print(out)
+
+ def create_backup(self, extra_args=None):
+ """Runs the backup cmd to produce a backup file for the testenv DC"""
+ # Run the backup command and check we got one backup tar file
+ args = self.base_cmd + ["--targetdir=" + self.tempdir]
+ if extra_args:
+ args += extra_args
+
+ self.run_cmd(args)
+
+ # find the filename of the backup-file generated
+ tar_files = []
+ for fn in os.listdir(self.tempdir):
+ if (fn.startswith("samba-backup-") and fn.endswith(".tar.bz2")):
+ tar_files.append(fn)
+
+ self.assertTrue(len(tar_files) == 1,
+ "Domain backup created %u tar files" % len(tar_files))
+
+ # clean up the backup file once the test finishes
+ backup_file = os.path.join(self.tempdir, tar_files[0])
+ self.addCleanup(os.remove, backup_file)
+ return backup_file
+
+ def restore_backup(self, backup_file, extra_args=None):
+ """Restores the samba directory files from a given backup"""
+ # Run the restore command
+ extract_dir = self.restore_dir()
+ args = ["domain", "backup", "restore", "--backup-file=" + backup_file,
+ "--targetdir=" + extract_dir,
+ "--newservername=" + self.new_server]
+ if extra_args:
+ args += extra_args
+
+ self.run_cmd(args)
+
+ # sanity-check the restore doesn't modify the original DC by mistake
+ self.assert_partitions_present(self.ldb)
+ self.assert_dcs_present(self.ldb, self.server)
+ self.assert_fsmo_roles(self.ldb, self.server, self.new_server)
+
+
+class DomainBackupOnline(DomainBackupBase):
+
+ def setUp(self):
+ super().setUp()
+ self.base_cmd = ["domain", "backup", "online",
+ "--server=" + self.server, self.user_auth]
+
+ # run the common test cases above using online backups
+ def test_backup_untar(self):
+ self._test_backup_untar()
+
+ def test_backup_restore(self):
+ self.use_backend("tdb")
+ self._test_backup_restore()
+
+ def test_backup_restore_with_conf(self):
+ self.use_backend("mdb")
+ self._test_backup_restore_with_conf()
+
+ def test_backup_restore_no_secrets(self):
+ self.use_backend("tdb")
+ self._test_backup_restore_no_secrets()
+
+ def test_backup_restore_into_site(self):
+ self.use_backend("mdb")
+ self._test_backup_restore_into_site()
+
+
+class DomainBackupRename(DomainBackupBase):
+
+ # run the above test cases using a rename backup
+ def setUp(self):
+ super().setUp()
+ self.new_server = "RENAMESERV"
+ self.restore_domain = "NEWDOMAIN"
+ self.restore_realm = "rename.test.net"
+ self.new_basedn = "DC=rename,DC=test,DC=net"
+ self.base_cmd = ["domain", "backup", "rename", self.restore_domain,
+ self.restore_realm, "--server=" + self.server,
+ self.user_auth]
+ self.backup_markers += ['backupRename']
+
+ # run the common test case code for backup-renames
+ def test_backup_untar(self):
+ self._test_backup_untar()
+
+ def test_backup_restore(self):
+ self.use_backend("mdb")
+ self._test_backup_restore()
+
+ def test_backup_restore_with_conf(self):
+ self.use_backend("tdb")
+ self._test_backup_restore_with_conf()
+
+ def test_backup_restore_no_secrets(self):
+ self.use_backend("mdb")
+ self._test_backup_restore_no_secrets()
+
+ def test_backup_restore_into_site(self):
+ self.use_backend("tdb")
+ self._test_backup_restore_into_site()
+
+ def test_backup_invalid_args(self):
+ """Checks that rename commands with invalid args are rejected"""
+
+ # try a "rename" using the same realm as the DC currently has
+ rename_cmd = "samba-tool domain backup rename "
+ bad_cmd = "{cmd} {domain} {realm}".format(cmd=rename_cmd,
+ domain=self.restore_domain,
+ realm=os.environ["REALM"])
+ self.assertRaises(BlackboxProcessError, self.check_output, bad_cmd)
+
+ # try a "rename" using the same domain as the DC currently has
+ bad_cmd = "{cmd} {domain} {realm}".format(cmd=rename_cmd,
+ domain=os.environ["DOMAIN"],
+ realm=self.restore_realm)
+ self.assertRaises(BlackboxProcessError, self.check_output, bad_cmd)
+
+ def add_link(self, attr, source, target):
+ m = ldb.Message()
+ m.dn = ldb.Dn(self.ldb, source)
+ m[attr] = ldb.MessageElement(target, ldb.FLAG_MOD_ADD, attr)
+ self.ldb.modify(m)
+
+ def test_one_way_links(self):
+ """Sanity-check that a rename handles one-way links correctly"""
+
+ # Do some initial setup on the DC before back it up:
+ # create an OU to hold the test objects we'll create
+ test_ou = create_test_ou(self.ldb, "rename_test")
+ self.addCleanup(self.ldb.delete, test_ou, ["tree_delete:1"])
+
+ # create the source and target objects and link them together.
+ # We use addressBookRoots2 here because it's a one-way link
+ src_dn = "CN=link_src,%s" % test_ou
+ self.ldb.add({"dn": src_dn,
+ "objectclass": "msExchConfigurationContainer"})
+ target_dn = "OU=link_tgt,%s" % test_ou
+ self.ldb.add({"dn": target_dn, "objectclass": "organizationalunit"})
+ link_attr = "addressBookRoots2"
+ self.add_link(link_attr, src_dn, target_dn)
+
+ # add a second link target that's in a different partition
+ server_dn = ("CN=testrename,CN=Servers,CN=Default-First-Site-Name,"
+ "CN=Sites,%s" % str(self.ldb.get_config_basedn()))
+ self.ldb.add({"dn": server_dn, "objectclass": "server"})
+ self.addCleanup(self.ldb.delete, server_dn)
+ self.add_link(link_attr, src_dn, server_dn)
+
+ # do the backup/restore
+ backup_file = self.create_backup()
+ self.restore_backup(backup_file)
+ lp = self.check_restored_smbconf()
+ restored_ldb = self.check_restored_database(lp)
+
+ # work out what the new DNs should be
+ old_basedn = str(self.ldb.get_default_basedn())
+ new_target_dn = re.sub(old_basedn + '$', self.new_basedn, target_dn)
+ new_src_dn = re.sub(old_basedn + '$', self.new_basedn, src_dn)
+ new_server_dn = re.sub(old_basedn + '$', self.new_basedn, server_dn)
+
+ # check the links exist in the renamed DB with the correct DNs
+ res = restored_ldb.search(base=new_src_dn, scope=ldb.SCOPE_BASE,
+ attrs=[link_attr])
+ self.assertEqual(len(res), 1,
+ "Failed to find renamed link source object")
+ self.assertTrue(link_attr in res[0], "Missing link attribute")
+ link_values = [str(x) for x in res[0][link_attr]]
+ self.assertTrue(new_target_dn in link_values)
+ self.assertTrue(new_server_dn in link_values)
+
+ # extra checks we run on the restored DB in the rename case
+ def check_restored_database(self, bkp_lp, expect_secrets=True):
+ # run the common checks over the restored DB
+ common_test = super()
+ samdb = common_test.check_restored_database(bkp_lp, expect_secrets)
+
+ # check we have actually renamed the DNs
+ basedn = str(samdb.get_default_basedn())
+ self.assertEqual(basedn, self.new_basedn)
+
+ # check the partition and netBIOS name match the new domain
+ partitions_dn = samdb.get_partitions_dn()
+ nc_name = ldb.binary_encode(str(basedn))
+ res = samdb.search(base=partitions_dn, scope=ldb.SCOPE_ONELEVEL,
+ attrs=["nETBIOSName", "cn"],
+ expression='ncName=%s' % nc_name)
+ self.assertEqual(len(res), 1,
+ "Looking up partition's NetBIOS name failed")
+ self.assertEqual(str(res[0].get("nETBIOSName")), self.restore_domain)
+ self.assertEqual(str(res[0].get("cn")), self.restore_domain)
+
+ # check the DC has the correct dnsHostname
+ realm = self.restore_realm
+ dn = "CN=%s,OU=Domain Controllers,%s" % (self.new_server,
+ self.new_basedn)
+ res = samdb.search(base=dn, scope=ldb.SCOPE_BASE,
+ attrs=["dNSHostName"])
+ self.assertEqual(len(res), 1,
+ "Looking up new DC's dnsHostname failed")
+ expected_val = "%s.%s" % (self.new_server.lower(), realm)
+ self.assertEqual(str(res[0].get("dNSHostName")), expected_val)
+
+ # check the DNS zones for the new realm are present
+ dn = "DC=%s,CN=MicrosoftDNS,DC=DomainDnsZones,%s" % (realm, basedn)
+ res = samdb.search(base=dn, scope=ldb.SCOPE_BASE)
+ self.assertEqual(len(res), 1, "Lookup of new domain's DNS zone failed")
+
+ forestdn = samdb.get_root_basedn().get_linearized()
+ dn = "DC=_msdcs.%s,CN=MicrosoftDNS,DC=ForestDnsZones,%s" % (realm,
+ forestdn)
+ res = samdb.search(base=dn, scope=ldb.SCOPE_BASE)
+ self.assertEqual(len(res), 1, "Lookup of new domain's DNS zone failed")
+ return samdb
+
+
+class DomainBackupOffline(DomainBackupBase):
+
+ def setUp(self):
+ super().setUp()
+ self.base_cmd = ["domain", "backup", "offline"]
+
+ def test_backup_untar(self):
+ self._test_backup_untar(primary_domain_secrets=1)
+
+ def test_backup_restore_with_conf(self):
+ self._test_backup_restore_with_conf()
+
+ def test_backup_restore(self):
+ self._test_backup_restore()
+
+ def test_backup_restore_into_site(self):
+ self._test_backup_restore_into_site()
diff --git a/python/samba/tests/domain_backup_offline.py b/python/samba/tests/domain_backup_offline.py
new file mode 100644
index 0000000..3b2f252
--- /dev/null
+++ b/python/samba/tests/domain_backup_offline.py
@@ -0,0 +1,252 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Andrew Bartlett <abartlet@samba.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import os
+import shutil
+import tempfile
+from samba.tests import BlackboxTestCase, BlackboxProcessError
+from samba.netcmd import CommandError
+from samba.param import LoadParm
+from samba import safe_tarfile as tarfile
+
+
+# The backup tests require that a completely clean LoadParm object gets used
+# for the restore. Otherwise the same global LP gets re-used, and the LP
+# settings can bleed from one test case to another.
+# To do this, these tests should use check_output(), which executes the command
+# in a separate process (as opposed to runcmd(), runsubcmd()).
+# So although this is a samba-tool test, we don't inherit from SambaToolCmdTest
+# so that we never inadvertently use .runcmd() by accident.
+class DomainBackupOfflineCmp(BlackboxTestCase):
+
+ def test_domain_backup_offline_nested_tdb(self):
+ self.nested_testcase('tdb')
+
+ def test_domain_backup_offline_nested_mdb(self):
+ self.nested_testcase('mdb')
+
+ def nested_testcase(self, backend):
+ self.prov_dir = self.provision(backend)
+ self.extract_dir = None
+
+ src = os.path.join(self.prov_dir, "private")
+ dst = os.path.join(self.prov_dir, "state", "private")
+
+ # Move private directory inside state directory
+ shutil.move(src, dst)
+
+ smbconf = os.path.join(self.prov_dir, "etc", "smb.conf")
+
+ # Update the conf file
+ lp = LoadParm(filename_for_non_global_lp=smbconf)
+ lp.set("private dir", dst)
+ lp.dump(False, smbconf)
+
+ backup_file = self.backup(self.prov_dir)
+
+ # Ensure each file is only present once in the tar file
+ tf = tarfile.open(backup_file)
+ names = tf.getnames()
+ self.assertEqual(len(names), len(set(names)))
+
+ def test_domain_backup_offline_join_restore_tdb(self):
+ self.join_restore_testcase('tdb')
+
+ def test_domain_backup_offline_join_restore_mdb(self):
+ self.join_restore_testcase('mdb')
+
+ def join_restore_testcase(self, backend):
+ self.prov_dir = self.join(backend)
+ self.extract_dir = None
+
+ try:
+ backup_file = self.backup(self.prov_dir)
+ except BlackboxProcessError as e:
+ self.fail(e)
+
+ self.extract_dir = self.restore(backup_file)
+
+ def test_domain_backup_offline_hard_link_tdb(self):
+ self.hard_link_testcase('tdb')
+
+ def test_domain_backup_offline_hard_link_mdb(self):
+ self.hard_link_testcase('mdb')
+
+ def hard_link_testcase(self, backend):
+ self.prov_dir = self.provision(backend)
+ self.extract_dir = None
+
+ # Create hard links in the private and state directories
+ os.link(os.path.join(self.prov_dir, "private", "krb5.conf"),
+ os.path.join(self.prov_dir, "state", "krb5.conf"))
+
+ backup_file = self.backup(self.prov_dir)
+
+ # Extract the backup
+ self.extract_dir = tempfile.mkdtemp(dir=self.tempdir)
+ tf = tarfile.open(backup_file)
+ tf.extractall(self.extract_dir)
+
+ # Ensure that the hard link in the private directory was backed up,
+ # while the one in the state directory was not.
+ self.assertTrue(os.path.exists(os.path.join(self.extract_dir,
+ "private", "krb5.conf")))
+ self.assertFalse(os.path.exists(os.path.join(self.extract_dir,
+ "statedir", "krb5.conf")))
+
+ def test_domain_backup_offline_untar_tdb(self):
+ self.untar_testcase('tdb')
+
+ def test_domain_backup_offline_untar_mdb(self):
+ self.untar_testcase('mdb')
+
+ def test_domain_backup_offline_restore_tdb(self):
+ self.restore_testcase('tdb')
+
+ def test_domain_backup_offline_restore_mdb(self):
+ self.restore_testcase('mdb')
+
+ def restore_testcase(self, backend):
+ self.prov_dir = self.provision(backend)
+ self.extract_dir = None
+ backup_file = self.backup(self.prov_dir)
+
+ self.extract_dir = self.restore(backup_file)
+
+ # attrs that are altered by the restore process
+ ignore_attrs = ["servicePrincipalName", "lastLogonTimestamp",
+ "rIDAllocationPool", "rIDAvailablePool", "rIDUsedPool",
+ "localPolicyFlags", "operatingSystem", "displayName",
+ "dnsRecord", "dNSTombstoned",
+ "msDS-NC-Replica-Locations", "msDS-HasInstantiatedNCs",
+ "interSiteTopologyGenerator"]
+ filter_arg = "--filter=" + ",".join(ignore_attrs)
+ args = ["--two", filter_arg]
+ self.ldapcmp(self.prov_dir, self.extract_dir, args)
+
+ def untar_testcase(self, backend):
+ self.prov_dir = self.provision(backend)
+ self.extract_dir = None
+ backup_file = self.backup(self.prov_dir)
+
+ self.extract_dir = tempfile.mkdtemp(dir=self.tempdir)
+ tf = tarfile.open(backup_file)
+ tf.extractall(self.extract_dir)
+
+ self.ldapcmp(self.prov_dir, self.extract_dir)
+
+ def ldapcmp(self, prov_dir, ex_dir, args=None):
+ if args is None:
+ args = []
+ sam_fn = os.path.join("private", "sam.ldb")
+ url1 = "tdb://" + os.path.join(os.path.realpath(prov_dir), sam_fn)
+ url2 = "tdb://" + os.path.join(os.path.realpath(ex_dir), sam_fn)
+
+ # Compare the restored sam.ldb with the old one
+ for partition in ["domain", "configuration", "schema",
+ "dnsdomain", "dnsforest"]:
+ cmd = "samba-tool ldapcmp " + " ".join([url1, url2, partition] + args)
+ self.check_output(cmd)
+
+ # Test the "samba-tool domain backup" command with ldapcmp
+ def provision(self, backend):
+ target = tempfile.mkdtemp(dir=self.tempdir)
+
+ # Provision domain. Use fake ACLs and store xattrs in tdbs so that
+ # NTACL backup will work inside the testenv.
+ # host-name option must be given because if this test runs on a
+ # system with a very long hostname, it will be shortened in certain
+ # circumstances, causing the ldapcmp to fail.
+ prov_cmd = "samba-tool domain provision " +\
+ "--domain FOO --realm foo.example.com " +\
+ "--targetdir {target} " +\
+ "--backend-store {backend} " +\
+ "--host-name OLDSERVER "+\
+ "--option=\"vfs objects=dfs_samba4 acl_xattr fake_acls xattr_tdb\""
+ prov_cmd = prov_cmd.format(target=target, backend=backend)
+ self.check_output(prov_cmd)
+
+ return target
+
+ def join(self, backend):
+ target = tempfile.mkdtemp(dir=self.tempdir)
+
+ new_dc_name = "offlinebackupdc"
+
+ join_cmd = "samba-tool domain join {domain} DC " +\
+ "--server {server} " +\
+ "--realm {realm} " +\
+ "--username {username}%{password} " +\
+ "--targetdir {target} " +\
+ "--backend-store {backend} " +\
+ "--option='netbios name = {new_dc_name}' " +\
+ "--option=\"vfs objects=dfs_samba4 acl_xattr fake_acls xattr_tdb\""
+ join_cmd = join_cmd.format(server=os.environ["DC_SERVER"],
+ domain=os.environ["DOMAIN"],
+ realm=os.environ["REALM"],
+ username=os.environ["USERNAME"],
+ password=os.environ["PASSWORD"],
+ target=target,
+ backend=backend,
+ new_dc_name=new_dc_name)
+ self.check_output(join_cmd)
+
+ demote_cmd = "samba-tool domain demote " +\
+ "--server {server} " +\
+ "--username {username}%{password} " +\
+ "--remove-other-dead-server={new_dc_name}"
+
+ demote_cmd = demote_cmd.format(server=os.environ["DC_SERVER"],
+ username=os.environ["USERNAME"],
+ password=os.environ["PASSWORD"],
+ new_dc_name=new_dc_name)
+ self.check_output(demote_cmd)
+
+ return target
+
+ def backup(self, prov_dir):
+ # Run the backup and check we got one backup tar file
+ cmd = ("samba-tool domain backup offline --targetdir={prov_dir} "
+ "--configfile={prov_dir}/etc/smb.conf").format(prov_dir=prov_dir)
+ self.check_output(cmd)
+
+ tar_files = [fn for fn in os.listdir(prov_dir)
+ if fn.startswith("samba-backup-") and
+ fn.endswith(".tar.bz2")]
+ if len(tar_files) != 1:
+ raise CommandError("expected domain backup to create one tar" +
+ " file but got {0}".format(len(tar_files)))
+
+ backup_file = os.path.join(prov_dir, tar_files[0])
+ return backup_file
+
+ def restore(self, backup_file):
+ # Restore from a backup file
+ extract_dir = tempfile.mkdtemp(dir=self.tempdir)
+ cmd = ("samba-tool domain backup restore --backup-file={f}"
+ " --targetdir={d} "
+ "--newservername=NEWSERVER").format(f=backup_file,
+ d=extract_dir)
+ self.check_output(cmd)
+
+ return extract_dir
+
+ def tearDown(self):
+ # Remove temporary directories
+ shutil.rmtree(self.prov_dir)
+ if self.extract_dir:
+ shutil.rmtree(self.extract_dir)
diff --git a/python/samba/tests/dsdb.py b/python/samba/tests/dsdb.py
new file mode 100644
index 0000000..4d5b620
--- /dev/null
+++ b/python/samba/tests/dsdb.py
@@ -0,0 +1,1223 @@
+# Unix SMB/CIFS implementation. Tests for dsdb
+# Copyright (C) Matthieu Patou <mat@matws.net> 2010
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for samba.dsdb."""
+
+from samba.credentials import Credentials
+from samba.samdb import SamDB
+from samba.auth import system_session
+from samba.tests import TestCase
+from samba.tests import delete_force
+from samba.ndr import ndr_unpack, ndr_pack
+from samba.dcerpc import drsblobs, security, misc
+from samba.param import LoadParm
+from samba import dsdb, functional_level
+from samba import werror
+import ldb
+import samba
+import uuid
+
+
+class DsdbAccountTests(TestCase):
+
+ def setUp(self):
+ super().setUp()
+ self.lp = samba.tests.env_loadparm()
+ self.creds = Credentials()
+ self.creds.guess(self.lp)
+ self.session = system_session()
+ self.samdb = SamDB(session_info=self.session,
+ credentials=self.creds,
+ lp=self.lp)
+
+ # Create a test user
+ user_name = "dsdb-user-" + str(uuid.uuid4().hex[0:6])
+ user_pass = samba.generate_random_password(32, 32)
+ user_description = "Test user for dsdb test"
+
+ base_dn = self.samdb.domain_dn()
+
+ self.account_dn = "CN=" + user_name + ",CN=Users," + base_dn
+ self.samdb.newuser(username=user_name,
+ password=user_pass,
+ description=user_description)
+ # Cleanup (teardown)
+ self.addCleanup(delete_force, self.samdb, self.account_dn)
+
+ # Get server reference DN
+ res = self.samdb.search(base=ldb.Dn(self.samdb,
+ self.samdb.get_serverName()),
+ scope=ldb.SCOPE_BASE,
+ attrs=["serverReference"])
+ # Get server reference
+ self.server_ref_dn = ldb.Dn(
+ self.samdb, res[0]["serverReference"][0].decode("utf-8"))
+
+ # Get RID Set DN
+ res = self.samdb.search(base=self.server_ref_dn,
+ scope=ldb.SCOPE_BASE,
+ attrs=["rIDSetReferences"])
+ rid_set_refs = res[0]
+ self.assertIn("rIDSetReferences", rid_set_refs)
+ rid_set_str = rid_set_refs["rIDSetReferences"][0].decode("utf-8")
+ self.rid_set_dn = ldb.Dn(self.samdb, rid_set_str)
+
+ def get_rid_set(self, rid_set_dn):
+ res = self.samdb.search(base=rid_set_dn,
+ scope=ldb.SCOPE_BASE,
+ attrs=["rIDAllocationPool",
+ "rIDPreviousAllocationPool",
+ "rIDUsedPool",
+ "rIDNextRID"])
+ return res[0]
+
+ def test_ridalloc_next_free_rid(self):
+ # Test RID allocation. We assume that RID
+ # pools allocated to us are contiguous.
+ self.samdb.transaction_start()
+ try:
+ orig_rid_set = self.get_rid_set(self.rid_set_dn)
+ self.assertIn("rIDAllocationPool", orig_rid_set)
+ self.assertIn("rIDPreviousAllocationPool", orig_rid_set)
+ self.assertIn("rIDUsedPool", orig_rid_set)
+ self.assertIn("rIDNextRID", orig_rid_set)
+
+ # Get rIDNextRID value from RID set.
+ next_rid = int(orig_rid_set["rIDNextRID"][0])
+
+ # Check the result of next_free_rid().
+ next_free_rid = self.samdb.next_free_rid()
+ self.assertEqual(next_rid + 1, next_free_rid)
+
+ # Check calling it twice in succession gives the same result.
+ next_free_rid2 = self.samdb.next_free_rid()
+ self.assertEqual(next_free_rid, next_free_rid2)
+
+ # Ensure that the RID set attributes have not changed.
+ rid_set2 = self.get_rid_set(self.rid_set_dn)
+ self.assertEqual(orig_rid_set, rid_set2)
+ finally:
+ self.samdb.transaction_cancel()
+
+ def test_ridalloc_no_ridnextrid(self):
+ self.samdb.transaction_start()
+ try:
+ # Delete the rIDNextRID attribute of the RID set,
+ # and set up previous and next pools.
+ prev_lo = 1000
+ prev_hi = 1999
+ next_lo = 3000
+ next_hi = 3999
+ msg = ldb.Message()
+ msg.dn = self.rid_set_dn
+ msg["rIDNextRID"] = ldb.MessageElement([],
+ ldb.FLAG_MOD_DELETE,
+ "rIDNextRID")
+ msg["rIDPreviousAllocationPool"] = (
+ ldb.MessageElement(str((prev_hi << 32) | prev_lo),
+ ldb.FLAG_MOD_REPLACE,
+ "rIDPreviousAllocationPool"))
+ msg["rIDAllocationPool"] = (
+ ldb.MessageElement(str((next_hi << 32) | next_lo),
+ ldb.FLAG_MOD_REPLACE,
+ "rIDAllocationPool"))
+ self.samdb.modify(msg)
+
+ # Ensure that next_free_rid() returns the start of the next pool.
+ next_free_rid3 = self.samdb.next_free_rid()
+ self.assertEqual(next_lo, next_free_rid3)
+
+ # Check the result of allocate_rid() matches.
+ rid = self.samdb.allocate_rid()
+ self.assertEqual(next_free_rid3, rid)
+
+ # Check that the result of next_free_rid() has now changed.
+ next_free_rid4 = self.samdb.next_free_rid()
+ self.assertEqual(rid + 1, next_free_rid4)
+
+ # Check the range of available RIDs.
+ free_lo, free_hi = self.samdb.free_rid_bounds()
+ self.assertEqual(rid + 1, free_lo)
+ self.assertEqual(next_hi, free_hi)
+ finally:
+ self.samdb.transaction_cancel()
+
+ def test_ridalloc_no_free_rids(self):
+ self.samdb.transaction_start()
+ try:
+ # Exhaust our current pool of RIDs.
+ pool_lo = 2000
+ pool_hi = 2999
+ msg = ldb.Message()
+ msg.dn = self.rid_set_dn
+ msg["rIDPreviousAllocationPool"] = (
+ ldb.MessageElement(str((pool_hi << 32) | pool_lo),
+ ldb.FLAG_MOD_REPLACE,
+ "rIDPreviousAllocationPool"))
+ msg["rIDAllocationPool"] = (
+ ldb.MessageElement(str((pool_hi << 32) | pool_lo),
+ ldb.FLAG_MOD_REPLACE,
+ "rIDAllocationPool"))
+ msg["rIDNextRID"] = (
+ ldb.MessageElement(str(pool_hi),
+ ldb.FLAG_MOD_REPLACE,
+ "rIDNextRID"))
+ self.samdb.modify(msg)
+
+ # Ensure that calculating the next free RID fails.
+ with self.assertRaises(ldb.LdbError) as err:
+ self.samdb.next_free_rid()
+
+ self.assertEqual("RID pools out of RIDs", err.exception.args[1])
+
+ # Ensure we can still allocate a new RID.
+ self.samdb.allocate_rid()
+ finally:
+ self.samdb.transaction_cancel()
+
+ def test_ridalloc_new_ridset(self):
+ self.samdb.transaction_start()
+ try:
+ # Test what happens with RID Set values set to zero (similar to
+ # when a RID Set is first created, except we also set
+ # rIDAllocationPool to zero).
+ msg = ldb.Message()
+ msg.dn = self.rid_set_dn
+ msg["rIDPreviousAllocationPool"] = (
+ ldb.MessageElement("0",
+ ldb.FLAG_MOD_REPLACE,
+ "rIDPreviousAllocationPool"))
+ msg["rIDAllocationPool"] = (
+ ldb.MessageElement("0",
+ ldb.FLAG_MOD_REPLACE,
+ "rIDAllocationPool"))
+ msg["rIDNextRID"] = (
+ ldb.MessageElement("0",
+ ldb.FLAG_MOD_REPLACE,
+ "rIDNextRID"))
+ self.samdb.modify(msg)
+
+ # Ensure that calculating the next free RID fails.
+ with self.assertRaises(ldb.LdbError) as err:
+ self.samdb.next_free_rid()
+
+ self.assertEqual("RID pools out of RIDs", err.exception.args[1])
+
+ # Set values for the next pool.
+ pool_lo = 2000
+ pool_hi = 2999
+ msg = ldb.Message()
+ msg.dn = self.rid_set_dn
+ msg["rIDAllocationPool"] = (
+ ldb.MessageElement(str((pool_hi << 32) | pool_lo),
+ ldb.FLAG_MOD_REPLACE,
+ "rIDAllocationPool"))
+ self.samdb.modify(msg)
+
+ # Ensure the next free RID value is equal to the next pool's lower
+ # bound.
+ next_free_rid5 = self.samdb.next_free_rid()
+ self.assertEqual(pool_lo, next_free_rid5)
+
+ # Check the range of available RIDs.
+ free_lo, free_hi = self.samdb.free_rid_bounds()
+ self.assertEqual(pool_lo, free_lo)
+ self.assertEqual(pool_hi, free_hi)
+ finally:
+ self.samdb.transaction_cancel()
+
+ def test_ridalloc_move_to_new_pool(self):
+ self.samdb.transaction_start()
+ try:
+ # Test moving to a new pool from the previous pool.
+ pool_lo = 2000
+ pool_hi = 2999
+ new_pool_lo = 4500
+ new_pool_hi = 4599
+ msg = ldb.Message()
+ msg.dn = self.rid_set_dn
+ msg["rIDPreviousAllocationPool"] = (
+ ldb.MessageElement(str((pool_hi << 32) | pool_lo),
+ ldb.FLAG_MOD_REPLACE,
+ "rIDPreviousAllocationPool"))
+ msg["rIDAllocationPool"] = (
+ ldb.MessageElement(str((new_pool_hi << 32) | new_pool_lo),
+ ldb.FLAG_MOD_REPLACE,
+ "rIDAllocationPool"))
+ msg["rIDNextRID"] = (
+ ldb.MessageElement(str(pool_hi - 1),
+ ldb.FLAG_MOD_REPLACE,
+ "rIDNextRID"))
+ self.samdb.modify(msg)
+
+ # We should have remained in the previous pool.
+ next_free_rid6 = self.samdb.next_free_rid()
+ self.assertEqual(pool_hi, next_free_rid6)
+
+ # Check the range of available RIDs.
+ free_lo, free_hi = self.samdb.free_rid_bounds()
+ self.assertEqual(pool_hi, free_lo)
+ self.assertEqual(pool_hi, free_hi)
+
+ # Allocate a new RID.
+ rid2 = self.samdb.allocate_rid()
+ self.assertEqual(next_free_rid6, rid2)
+
+ # We should now move to the next pool.
+ next_free_rid7 = self.samdb.next_free_rid()
+ self.assertEqual(new_pool_lo, next_free_rid7)
+
+ # Check the new range of available RIDs.
+ free_lo2, free_hi2 = self.samdb.free_rid_bounds()
+ self.assertEqual(new_pool_lo, free_lo2)
+ self.assertEqual(new_pool_hi, free_hi2)
+
+ # Ensure that allocate_rid() matches.
+ rid3 = self.samdb.allocate_rid()
+ self.assertEqual(next_free_rid7, rid3)
+ finally:
+ self.samdb.transaction_cancel()
+
+ def test_ridalloc_no_ridsetreferences(self):
+ self.samdb.transaction_start()
+ try:
+ # Delete the rIDSetReferences attribute.
+ msg = ldb.Message()
+ msg.dn = self.server_ref_dn
+ msg["rIDSetReferences"] = (
+ ldb.MessageElement([],
+ ldb.FLAG_MOD_DELETE,
+ "rIDSetReferences"))
+ self.samdb.modify(msg)
+
+ # Ensure calculating the next free RID fails.
+ with self.assertRaises(ldb.LdbError) as err:
+ self.samdb.next_free_rid()
+
+ enum, estr = err.exception.args
+ self.assertEqual(ldb.ERR_NO_SUCH_ATTRIBUTE, enum)
+ self.assertIn("No RID Set DN - "
+ "Cannot find attribute rIDSetReferences of %s "
+ "to calculate reference dn" % self.server_ref_dn,
+ estr)
+
+ # Ensure allocating a new RID fails.
+ with self.assertRaises(ldb.LdbError) as err:
+ self.samdb.allocate_rid()
+
+ enum, estr = err.exception.args
+ self.assertEqual(ldb.ERR_ENTRY_ALREADY_EXISTS, enum)
+ self.assertIn("No RID Set DN - "
+ "Failed to add RID Set %s - "
+ "Entry %s already exists" %
+ (self.rid_set_dn, self.rid_set_dn),
+ estr)
+ finally:
+ self.samdb.transaction_cancel()
+
+ def test_ridalloc_no_rid_set(self):
+ self.samdb.transaction_start()
+ try:
+ # Set the rIDSetReferences attribute to not point to a RID Set.
+ fake_rid_set_str = self.account_dn
+ msg = ldb.Message()
+ msg.dn = self.server_ref_dn
+ msg["rIDSetReferences"] = (
+ ldb.MessageElement(fake_rid_set_str,
+ ldb.FLAG_MOD_REPLACE,
+ "rIDSetReferences"))
+ self.samdb.modify(msg)
+
+ # Ensure calculating the next free RID fails.
+ with self.assertRaises(ldb.LdbError) as err:
+ self.samdb.next_free_rid()
+
+ enum, estr = err.exception.args
+ self.assertEqual(ldb.ERR_OPERATIONS_ERROR, enum)
+ self.assertIn("Bad RID Set " + fake_rid_set_str, estr)
+
+ # Ensure allocating a new RID fails.
+ with self.assertRaises(ldb.LdbError) as err:
+ self.samdb.allocate_rid()
+
+ enum, estr = err.exception.args
+ self.assertEqual(ldb.ERR_OPERATIONS_ERROR, enum)
+ self.assertIn("Bad RID Set " + fake_rid_set_str, estr)
+ finally:
+ self.samdb.transaction_cancel()
+
+ def test_error_replpropertymetadata(self):
+ res = self.samdb.search(scope=ldb.SCOPE_SUBTREE,
+ base=self.account_dn,
+ attrs=["replPropertyMetaData"])
+ repl = ndr_unpack(drsblobs.replPropertyMetaDataBlob,
+ res[0]["replPropertyMetaData"][0])
+ ctr = repl.ctr
+ for o in ctr.array:
+ # Search for Description
+ if o.attid == 13:
+ old_version = o.version
+ o.version = o.version + 1
+ replBlob = ndr_pack(repl)
+ msg = ldb.Message()
+ msg.dn = res[0].dn
+ msg["replPropertyMetaData"] = ldb.MessageElement(replBlob, ldb.FLAG_MOD_REPLACE, "replPropertyMetaData")
+ self.assertRaises(ldb.LdbError, self.samdb.modify, msg, ["local_oid:1.3.6.1.4.1.7165.4.3.14:0"])
+
+ def test_error_replpropertymetadata_nochange(self):
+ res = self.samdb.search(scope=ldb.SCOPE_SUBTREE,
+ base=self.account_dn,
+ attrs=["replPropertyMetaData"])
+ repl = ndr_unpack(drsblobs.replPropertyMetaDataBlob,
+ res[0]["replPropertyMetaData"][0])
+ replBlob = ndr_pack(repl)
+ msg = ldb.Message()
+ msg.dn = res[0].dn
+ msg["replPropertyMetaData"] = ldb.MessageElement(replBlob, ldb.FLAG_MOD_REPLACE, "replPropertyMetaData")
+ self.assertRaises(ldb.LdbError, self.samdb.modify, msg, ["local_oid:1.3.6.1.4.1.7165.4.3.14:0"])
+
+ def test_error_replpropertymetadata_allow_sort(self):
+ res = self.samdb.search(scope=ldb.SCOPE_SUBTREE,
+ base=self.account_dn,
+ attrs=["replPropertyMetaData"])
+ repl = ndr_unpack(drsblobs.replPropertyMetaDataBlob,
+ res[0]["replPropertyMetaData"][0])
+ replBlob = ndr_pack(repl)
+ msg = ldb.Message()
+ msg.dn = res[0].dn
+ msg["replPropertyMetaData"] = ldb.MessageElement(replBlob, ldb.FLAG_MOD_REPLACE, "replPropertyMetaData")
+ self.samdb.modify(msg, ["local_oid:1.3.6.1.4.1.7165.4.3.14:0", "local_oid:1.3.6.1.4.1.7165.4.3.25:0"])
+
+ def test_twoatt_replpropertymetadata(self):
+ res = self.samdb.search(scope=ldb.SCOPE_SUBTREE,
+ base=self.account_dn,
+ attrs=["replPropertyMetaData", "uSNChanged"])
+ repl = ndr_unpack(drsblobs.replPropertyMetaDataBlob,
+ res[0]["replPropertyMetaData"][0])
+ ctr = repl.ctr
+ for o in ctr.array:
+ # Search for Description
+ if o.attid == 13:
+ old_version = o.version
+ o.version = o.version + 1
+ o.local_usn = int(str(res[0]["uSNChanged"])) + 1
+ replBlob = ndr_pack(repl)
+ msg = ldb.Message()
+ msg.dn = res[0].dn
+ msg["replPropertyMetaData"] = ldb.MessageElement(replBlob, ldb.FLAG_MOD_REPLACE, "replPropertyMetaData")
+ msg["description"] = ldb.MessageElement("new val", ldb.FLAG_MOD_REPLACE, "description")
+ self.assertRaises(ldb.LdbError, self.samdb.modify, msg, ["local_oid:1.3.6.1.4.1.7165.4.3.14:0"])
+
+ def test_set_replpropertymetadata(self):
+ res = self.samdb.search(scope=ldb.SCOPE_SUBTREE,
+ base=self.account_dn,
+ attrs=["replPropertyMetaData", "uSNChanged"])
+ repl = ndr_unpack(drsblobs.replPropertyMetaDataBlob,
+ res[0]["replPropertyMetaData"][0])
+ ctr = repl.ctr
+ for o in ctr.array:
+ # Search for Description
+ if o.attid == 13:
+ old_version = o.version
+ o.version = o.version + 1
+ o.local_usn = int(str(res[0]["uSNChanged"])) + 1
+ o.originating_usn = int(str(res[0]["uSNChanged"])) + 1
+ replBlob = ndr_pack(repl)
+ msg = ldb.Message()
+ msg.dn = res[0].dn
+ msg["replPropertyMetaData"] = ldb.MessageElement(replBlob, ldb.FLAG_MOD_REPLACE, "replPropertyMetaData")
+ self.samdb.modify(msg, ["local_oid:1.3.6.1.4.1.7165.4.3.14:0"])
+
+ def test_get_attribute_replmetadata_version(self):
+ res = self.samdb.search(scope=ldb.SCOPE_SUBTREE,
+ base=self.account_dn,
+ attrs=["dn"])
+ self.assertEqual(len(res), 1)
+ dn = str(res[0].dn)
+ self.assertEqual(self.samdb.get_attribute_replmetadata_version(dn, "unicodePwd"), 2)
+
+ def test_set_attribute_replmetadata_version(self):
+ res = self.samdb.search(scope=ldb.SCOPE_SUBTREE,
+ base=self.account_dn,
+ attrs=["dn"])
+ self.assertEqual(len(res), 1)
+ dn = str(res[0].dn)
+ version = self.samdb.get_attribute_replmetadata_version(dn, "description")
+ self.samdb.set_attribute_replmetadata_version(dn, "description", version + 2)
+ self.assertEqual(self.samdb.get_attribute_replmetadata_version(dn, "description"), version + 2)
+
+ def test_no_error_on_invalid_control(self):
+ try:
+ res = self.samdb.search(scope=ldb.SCOPE_SUBTREE,
+ base=self.account_dn,
+ attrs=["replPropertyMetaData"],
+ controls=["local_oid:%s:0"
+ % dsdb.DSDB_CONTROL_INVALID_NOT_IMPLEMENTED])
+ except ldb.LdbError as e:
+ self.fail("Should have not raised an exception")
+
+ def test_error_on_invalid_critical_control(self):
+ try:
+ res = self.samdb.search(scope=ldb.SCOPE_SUBTREE,
+ base=self.account_dn,
+ attrs=["replPropertyMetaData"],
+ controls=["local_oid:%s:1"
+ % dsdb.DSDB_CONTROL_INVALID_NOT_IMPLEMENTED])
+ except ldb.LdbError as e:
+ (errno, estr) = e.args
+ if errno != ldb.ERR_UNSUPPORTED_CRITICAL_EXTENSION:
+ self.fail("Got %s should have got ERR_UNSUPPORTED_CRITICAL_EXTENSION"
+ % e[1])
+
+class DsdbTests(TestCase):
+ def setUp(self):
+ super().setUp()
+ self.lp = samba.tests.env_loadparm()
+ self.creds = Credentials()
+ self.creds.guess(self.lp)
+ self.session = system_session()
+ self.samdb = SamDB(session_info=self.session,
+ credentials=self.creds,
+ lp=self.lp)
+
+ # Allocate a unique RID for use in the objectSID tests.
+ #
+ def allocate_rid(self):
+ self.samdb.transaction_start()
+ try:
+ rid = self.samdb.allocate_rid()
+ except:
+ self.samdb.transaction_cancel()
+ raise
+ self.samdb.transaction_commit()
+ return str(rid)
+
+ def test_get_oid_from_attrid(self):
+ oid = self.samdb.get_oid_from_attid(591614)
+ self.assertEqual(oid, "1.2.840.113556.1.4.1790")
+
+ def test_ok_get_attribute_from_attid(self):
+ self.assertEqual(self.samdb.get_attribute_from_attid(13), "description")
+
+ def test_ko_get_attribute_from_attid(self):
+ self.assertEqual(self.samdb.get_attribute_from_attid(11979), None)
+
+ # Ensure that duplicate objectSID's are permitted for foreign security
+ # principals.
+ #
+ def test_duplicate_objectSIDs_allowed_on_foreign_security_principals(self):
+
+ #
+ # We need to build a foreign security principal SID
+ # i.e a SID not in the current domain.
+ #
+ dom_sid = self.samdb.get_domain_sid()
+ if str(dom_sid).endswith("0"):
+ c = "9"
+ else:
+ c = "0"
+ sid_str = str(dom_sid)[:-1] + c + "-1000"
+ sid = ndr_pack(security.dom_sid(sid_str))
+ basedn = self.samdb.get_default_basedn()
+ dn = "CN=%s,CN=ForeignSecurityPrincipals,%s" % (sid_str, basedn)
+
+ #
+ # First without control
+ #
+
+ try:
+ self.samdb.add({
+ "dn": dn,
+ "objectClass": "foreignSecurityPrincipal"})
+ self.fail("No exception should get ERR_OBJECT_CLASS_VIOLATION")
+ except ldb.LdbError as e:
+ (code, msg) = e.args
+ self.assertEqual(code, ldb.ERR_OBJECT_CLASS_VIOLATION, str(e))
+ werr = "%08X" % werror.WERR_DS_MISSING_REQUIRED_ATT
+ self.assertTrue(werr in msg, msg)
+
+ try:
+ self.samdb.add({
+ "dn": dn,
+ "objectClass": "foreignSecurityPrincipal",
+ "objectSid": sid})
+ self.fail("No exception should get ERR_UNWILLING_TO_PERFORM")
+ except ldb.LdbError as e:
+ (code, msg) = e.args
+ self.assertEqual(code, ldb.ERR_UNWILLING_TO_PERFORM, str(e))
+ werr = "%08X" % werror.WERR_DS_ILLEGAL_MOD_OPERATION
+ self.assertTrue(werr in msg, msg)
+
+ #
+ # We need to use the provision control
+ # in order to add foreignSecurityPrincipal
+ # objects
+ #
+
+ controls = ["provision:0"]
+ self.samdb.add({
+ "dn": dn,
+ "objectClass": "foreignSecurityPrincipal"},
+ controls=controls)
+
+ self.samdb.delete(dn)
+
+ try:
+ self.samdb.add({
+ "dn": dn,
+ "objectClass": "foreignSecurityPrincipal"},
+ controls=controls)
+ except ldb.LdbError as e:
+ (code, msg) = e.args
+ self.fail("Got unexpected exception %d - %s "
+ % (code, msg))
+
+ # cleanup
+ self.samdb.delete(dn)
+
+ def _test_foreignSecurityPrincipal(self, obj_class, fpo_attr):
+
+ dom_sid = self.samdb.get_domain_sid()
+ lsid_str = str(dom_sid) + "-4294967294"
+ bsid_str = "S-1-5-32-4294967294"
+ fsid_str = "S-1-5-4294967294"
+ basedn = self.samdb.get_default_basedn()
+ cn = "dsdb_test_fpo"
+ dn_str = "cn=%s,cn=Users,%s" % (cn, basedn)
+ dn = ldb.Dn(self.samdb, dn_str)
+
+ res = self.samdb.search(scope=ldb.SCOPE_SUBTREE,
+ base=basedn,
+ expression="(objectSid=%s)" % lsid_str,
+ attrs=[])
+ self.assertEqual(len(res), 0)
+ res = self.samdb.search(scope=ldb.SCOPE_SUBTREE,
+ base=basedn,
+ expression="(objectSid=%s)" % bsid_str,
+ attrs=[])
+ self.assertEqual(len(res), 0)
+ res = self.samdb.search(scope=ldb.SCOPE_SUBTREE,
+ base=basedn,
+ expression="(objectSid=%s)" % fsid_str,
+ attrs=[])
+ self.assertEqual(len(res), 0)
+
+ self.addCleanup(delete_force, self.samdb, dn_str)
+
+ self.samdb.add({
+ "dn": dn_str,
+ "objectClass": obj_class})
+
+ msg = ldb.Message()
+ msg.dn = dn
+ msg[fpo_attr] = ldb.MessageElement("<SID=%s>" % lsid_str,
+ ldb.FLAG_MOD_ADD,
+ fpo_attr)
+ try:
+ self.samdb.modify(msg)
+ self.fail("No exception should get LDB_ERR_UNWILLING_TO_PERFORM")
+ except ldb.LdbError as e:
+ (code, msg) = e.args
+ self.assertEqual(code, ldb.ERR_UNWILLING_TO_PERFORM, str(e))
+ werr = "%08X" % werror.WERR_DS_INVALID_GROUP_TYPE
+ self.assertTrue(werr in msg, msg)
+
+ msg = ldb.Message()
+ msg.dn = dn
+ msg[fpo_attr] = ldb.MessageElement("<SID=%s>" % bsid_str,
+ ldb.FLAG_MOD_ADD,
+ fpo_attr)
+ try:
+ self.samdb.modify(msg)
+ self.fail("No exception should get LDB_ERR_NO_SUCH_OBJECT")
+ except ldb.LdbError as e:
+ (code, msg) = e.args
+ self.assertEqual(code, ldb.ERR_NO_SUCH_OBJECT, str(e))
+ werr = "%08X" % werror.WERR_NO_SUCH_MEMBER
+ self.assertTrue(werr in msg, msg)
+
+ msg = ldb.Message()
+ msg.dn = dn
+ msg[fpo_attr] = ldb.MessageElement("<SID=%s>" % fsid_str,
+ ldb.FLAG_MOD_ADD,
+ fpo_attr)
+ try:
+ self.samdb.modify(msg)
+ except ldb.LdbError as e:
+ self.fail("Should have not raised an exception")
+
+ res = self.samdb.search(scope=ldb.SCOPE_SUBTREE,
+ base=basedn,
+ expression="(objectSid=%s)" % fsid_str,
+ attrs=[])
+ self.assertEqual(len(res), 1)
+ self.samdb.delete(res[0].dn)
+ self.samdb.delete(dn)
+ res = self.samdb.search(scope=ldb.SCOPE_SUBTREE,
+ base=basedn,
+ expression="(objectSid=%s)" % fsid_str,
+ attrs=[])
+ self.assertEqual(len(res), 0)
+
+ def test_foreignSecurityPrincipal_member(self):
+ return self._test_foreignSecurityPrincipal(
+ "group", "member")
+
+ def test_foreignSecurityPrincipal_MembersForAzRole(self):
+ return self._test_foreignSecurityPrincipal(
+ "msDS-AzRole", "msDS-MembersForAzRole")
+
+ def test_foreignSecurityPrincipal_NeverRevealGroup(self):
+ return self._test_foreignSecurityPrincipal(
+ "computer", "msDS-NeverRevealGroup")
+
+ def test_foreignSecurityPrincipal_RevealOnDemandGroup(self):
+ return self._test_foreignSecurityPrincipal(
+ "computer", "msDS-RevealOnDemandGroup")
+
+ def _test_fail_foreignSecurityPrincipal(self, obj_class, fpo_attr,
+ msg_exp, lerr_exp, werr_exp,
+ allow_reference=True):
+
+ dom_sid = self.samdb.get_domain_sid()
+ lsid_str = str(dom_sid) + "-4294967294"
+ bsid_str = "S-1-5-32-4294967294"
+ fsid_str = "S-1-5-4294967294"
+ basedn = self.samdb.get_default_basedn()
+ cn1 = "dsdb_test_fpo1"
+ dn1_str = "cn=%s,cn=Users,%s" % (cn1, basedn)
+ dn1 = ldb.Dn(self.samdb, dn1_str)
+ cn2 = "dsdb_test_fpo2"
+ dn2_str = "cn=%s,cn=Users,%s" % (cn2, basedn)
+ dn2 = ldb.Dn(self.samdb, dn2_str)
+
+ res = self.samdb.search(scope=ldb.SCOPE_SUBTREE,
+ base=basedn,
+ expression="(objectSid=%s)" % lsid_str,
+ attrs=[])
+ self.assertEqual(len(res), 0)
+ res = self.samdb.search(scope=ldb.SCOPE_SUBTREE,
+ base=basedn,
+ expression="(objectSid=%s)" % bsid_str,
+ attrs=[])
+ self.assertEqual(len(res), 0)
+ res = self.samdb.search(scope=ldb.SCOPE_SUBTREE,
+ base=basedn,
+ expression="(objectSid=%s)" % fsid_str,
+ attrs=[])
+ self.assertEqual(len(res), 0)
+
+ self.addCleanup(delete_force, self.samdb, dn1_str)
+ self.addCleanup(delete_force, self.samdb, dn2_str)
+
+ self.samdb.add({
+ "dn": dn1_str,
+ "objectClass": obj_class})
+
+ self.samdb.add({
+ "dn": dn2_str,
+ "objectClass": obj_class})
+
+ msg = ldb.Message()
+ msg.dn = dn1
+ msg[fpo_attr] = ldb.MessageElement("<SID=%s>" % lsid_str,
+ ldb.FLAG_MOD_ADD,
+ fpo_attr)
+ try:
+ self.samdb.modify(msg)
+ self.fail("No exception should get %s" % msg_exp)
+ except ldb.LdbError as e:
+ (code, msg) = e.args
+ self.assertEqual(code, lerr_exp, str(e))
+ werr = "%08X" % werr_exp
+ self.assertTrue(werr in msg, msg)
+
+ msg = ldb.Message()
+ msg.dn = dn1
+ msg[fpo_attr] = ldb.MessageElement("<SID=%s>" % bsid_str,
+ ldb.FLAG_MOD_ADD,
+ fpo_attr)
+ try:
+ self.samdb.modify(msg)
+ self.fail("No exception should get %s" % msg_exp)
+ except ldb.LdbError as e:
+ (code, msg) = e.args
+ self.assertEqual(code, lerr_exp, str(e))
+ werr = "%08X" % werr_exp
+ self.assertTrue(werr in msg, msg)
+
+ msg = ldb.Message()
+ msg.dn = dn1
+ msg[fpo_attr] = ldb.MessageElement("<SID=%s>" % fsid_str,
+ ldb.FLAG_MOD_ADD,
+ fpo_attr)
+ try:
+ self.samdb.modify(msg)
+ self.fail("No exception should get %s" % msg)
+ except ldb.LdbError as e:
+ (code, msg) = e.args
+ self.assertEqual(code, lerr_exp, str(e))
+ werr = "%08X" % werr_exp
+ self.assertTrue(werr in msg, msg)
+
+ msg = ldb.Message()
+ msg.dn = dn1
+ msg[fpo_attr] = ldb.MessageElement("%s" % dn2,
+ ldb.FLAG_MOD_ADD,
+ fpo_attr)
+ try:
+ self.samdb.modify(msg)
+ if not allow_reference:
+ self.fail("No exception should get %s" % msg_exp)
+ except ldb.LdbError as e:
+ if allow_reference:
+ self.fail("Should have not raised an exception: %s" % e)
+ (code, msg) = e.args
+ self.assertEqual(code, lerr_exp, str(e))
+ werr = "%08X" % werr_exp
+ self.assertTrue(werr in msg, msg)
+
+ self.samdb.delete(dn2)
+ self.samdb.delete(dn1)
+
+ def test_foreignSecurityPrincipal_NonMembers(self):
+ return self._test_fail_foreignSecurityPrincipal(
+ "group", "msDS-NonMembers",
+ "LDB_ERR_UNWILLING_TO_PERFORM/WERR_NOT_SUPPORTED",
+ ldb.ERR_UNWILLING_TO_PERFORM, werror.WERR_NOT_SUPPORTED,
+ allow_reference=False)
+
+ def test_foreignSecurityPrincipal_HostServiceAccount(self):
+ return self._test_fail_foreignSecurityPrincipal(
+ "computer", "msDS-HostServiceAccount",
+ "LDB_ERR_CONSTRAINT_VIOLATION/WERR_DS_NAME_REFERENCE_INVALID",
+ ldb.ERR_CONSTRAINT_VIOLATION,
+ werror.WERR_DS_NAME_REFERENCE_INVALID)
+
+ def test_foreignSecurityPrincipal_manager(self):
+ return self._test_fail_foreignSecurityPrincipal(
+ "user", "manager",
+ "LDB_ERR_CONSTRAINT_VIOLATION/WERR_DS_NAME_REFERENCE_INVALID",
+ ldb.ERR_CONSTRAINT_VIOLATION,
+ werror.WERR_DS_NAME_REFERENCE_INVALID)
+
+ #
+ # Duplicate objectSID's should not be permitted for sids in the local
+ # domain. The test sequence is add an object, delete it, then attempt to
+ # re-add it, this should fail with a constraint violation
+ #
+ def test_duplicate_objectSIDs_not_allowed_on_local_objects(self):
+
+ dom_sid = self.samdb.get_domain_sid()
+ rid = self.allocate_rid()
+ sid_str = str(dom_sid) + "-" + rid
+ sid = ndr_pack(security.dom_sid(sid_str))
+ basedn = self.samdb.get_default_basedn()
+ cn = "dsdb_test_01"
+ dn = "cn=%s,cn=Users,%s" % (cn, basedn)
+
+ self.samdb.add({
+ "dn": dn,
+ "objectClass": "user",
+ "objectSID": sid})
+ self.samdb.delete(dn)
+
+ try:
+ self.samdb.add({
+ "dn": dn,
+ "objectClass": "user",
+ "objectSID": sid})
+ self.fail("No exception should get LDB_ERR_CONSTRAINT_VIOLATION")
+ except ldb.LdbError as e:
+ (code, msg) = e.args
+ if code != ldb.ERR_CONSTRAINT_VIOLATION:
+ self.fail("Got %d - %s should have got "
+ "LDB_ERR_CONSTRAINT_VIOLATION"
+ % (code, msg))
+
+ def test_linked_vs_non_linked_reference(self):
+ basedn = self.samdb.get_default_basedn()
+ kept_dn_str = "cn=reference_kept,cn=Users,%s" % (basedn)
+ removed_dn_str = "cn=reference_removed,cn=Users,%s" % (basedn)
+ dom_sid = self.samdb.get_domain_sid()
+ none_sid_str = str(dom_sid) + "-4294967294"
+ none_guid_str = "afafafaf-fafa-afaf-fafa-afafafafafaf"
+
+ self.addCleanup(delete_force, self.samdb, kept_dn_str)
+ self.addCleanup(delete_force, self.samdb, removed_dn_str)
+
+ self.samdb.add({
+ "dn": kept_dn_str,
+ "objectClass": "user"})
+ res = self.samdb.search(scope=ldb.SCOPE_SUBTREE,
+ base=kept_dn_str,
+ attrs=["objectGUID", "objectSID"])
+ self.assertEqual(len(res), 1)
+ kept_guid = ndr_unpack(misc.GUID, res[0]["objectGUID"][0])
+ kept_sid = ndr_unpack(security.dom_sid, res[0]["objectSid"][0])
+ kept_dn = res[0].dn
+
+ self.samdb.add({
+ "dn": removed_dn_str,
+ "objectClass": "user"})
+ res = self.samdb.search(scope=ldb.SCOPE_SUBTREE,
+ base=removed_dn_str,
+ attrs=["objectGUID", "objectSID"])
+ self.assertEqual(len(res), 1)
+ removed_guid = ndr_unpack(misc.GUID, res[0]["objectGUID"][0])
+ removed_sid = ndr_unpack(security.dom_sid, res[0]["objectSid"][0])
+ self.samdb.delete(removed_dn_str)
+
+ #
+ # First try the linked attribute 'manager'
+ # by GUID and SID
+ #
+
+ msg = ldb.Message()
+ msg.dn = kept_dn
+ msg["manager"] = ldb.MessageElement("<SID=%s>" % removed_sid,
+ ldb.FLAG_MOD_ADD,
+ "manager")
+ try:
+ self.samdb.modify(msg)
+ self.fail("No exception should get LDB_ERR_CONSTRAINT_VIOLATION")
+ except ldb.LdbError as e:
+ (code, msg) = e.args
+ self.assertEqual(code, ldb.ERR_CONSTRAINT_VIOLATION, str(e))
+ werr = "%08X" % werror.WERR_DS_NAME_REFERENCE_INVALID
+ self.assertTrue(werr in msg, msg)
+
+ msg = ldb.Message()
+ msg.dn = kept_dn
+ msg["manager"] = ldb.MessageElement("<GUID=%s>" % removed_guid,
+ ldb.FLAG_MOD_ADD,
+ "manager")
+ try:
+ self.samdb.modify(msg)
+ self.fail("No exception should get LDB_ERR_CONSTRAINT_VIOLATION")
+ except ldb.LdbError as e:
+ (code, msg) = e.args
+ self.assertEqual(code, ldb.ERR_CONSTRAINT_VIOLATION, str(e))
+ werr = "%08X" % werror.WERR_DS_NAME_REFERENCE_INVALID
+ self.assertTrue(werr in msg, msg)
+
+ #
+ # Try the non-linked attribute 'assistant'
+ # by GUID and SID, which should work.
+ #
+ msg = ldb.Message()
+ msg.dn = kept_dn
+ msg["assistant"] = ldb.MessageElement("<SID=%s>" % removed_sid,
+ ldb.FLAG_MOD_ADD,
+ "assistant")
+ self.samdb.modify(msg)
+ msg = ldb.Message()
+ msg.dn = kept_dn
+ msg["assistant"] = ldb.MessageElement("<SID=%s>" % removed_sid,
+ ldb.FLAG_MOD_DELETE,
+ "assistant")
+ self.samdb.modify(msg)
+
+ msg = ldb.Message()
+ msg.dn = kept_dn
+ msg["assistant"] = ldb.MessageElement("<GUID=%s>" % removed_guid,
+ ldb.FLAG_MOD_ADD,
+ "assistant")
+ self.samdb.modify(msg)
+ msg = ldb.Message()
+ msg.dn = kept_dn
+ msg["assistant"] = ldb.MessageElement("<GUID=%s>" % removed_guid,
+ ldb.FLAG_MOD_DELETE,
+ "assistant")
+ self.samdb.modify(msg)
+
+ #
+ # Finally ry the non-linked attribute 'assistant'
+ # but with non existing GUID, SID, DN
+ #
+ msg = ldb.Message()
+ msg.dn = kept_dn
+ msg["assistant"] = ldb.MessageElement("CN=NoneNone,%s" % (basedn),
+ ldb.FLAG_MOD_ADD,
+ "assistant")
+ try:
+ self.samdb.modify(msg)
+ self.fail("No exception should get LDB_ERR_CONSTRAINT_VIOLATION")
+ except ldb.LdbError as e:
+ (code, msg) = e.args
+ self.assertEqual(code, ldb.ERR_CONSTRAINT_VIOLATION, str(e))
+ werr = "%08X" % werror.WERR_DS_NAME_REFERENCE_INVALID
+ self.assertTrue(werr in msg, msg)
+
+ msg = ldb.Message()
+ msg.dn = kept_dn
+ msg["assistant"] = ldb.MessageElement("<SID=%s>" % none_sid_str,
+ ldb.FLAG_MOD_ADD,
+ "assistant")
+ try:
+ self.samdb.modify(msg)
+ self.fail("No exception should get LDB_ERR_CONSTRAINT_VIOLATION")
+ except ldb.LdbError as e:
+ (code, msg) = e.args
+ self.assertEqual(code, ldb.ERR_CONSTRAINT_VIOLATION, str(e))
+ werr = "%08X" % werror.WERR_DS_NAME_REFERENCE_INVALID
+ self.assertTrue(werr in msg, msg)
+
+ msg = ldb.Message()
+ msg.dn = kept_dn
+ msg["assistant"] = ldb.MessageElement("<GUID=%s>" % none_guid_str,
+ ldb.FLAG_MOD_ADD,
+ "assistant")
+ try:
+ self.samdb.modify(msg)
+ self.fail("No exception should get LDB_ERR_CONSTRAINT_VIOLATION")
+ except ldb.LdbError as e:
+ (code, msg) = e.args
+ self.assertEqual(code, ldb.ERR_CONSTRAINT_VIOLATION, str(e))
+ werr = "%08X" % werror.WERR_DS_NAME_REFERENCE_INVALID
+ self.assertTrue(werr in msg, msg)
+
+ self.samdb.delete(kept_dn)
+
+ def test_normalize_dn_in_domain_full(self):
+ domain_dn = self.samdb.domain_dn()
+
+ part_dn = ldb.Dn(self.samdb, "CN=Users")
+
+ full_dn = part_dn
+ full_dn.add_base(domain_dn)
+
+ full_str = str(full_dn)
+
+ # That is, no change
+ self.assertEqual(full_dn,
+ self.samdb.normalize_dn_in_domain(full_str))
+
+ def test_normalize_dn_in_domain_part(self):
+ domain_dn = self.samdb.domain_dn()
+
+ part_str = "CN=Users"
+
+ full_dn = ldb.Dn(self.samdb, part_str)
+ full_dn.add_base(domain_dn)
+
+ # That is, the domain DN appended
+ self.assertEqual(full_dn,
+ self.samdb.normalize_dn_in_domain(part_str))
+
+ def test_normalize_dn_in_domain_full_dn(self):
+ domain_dn = self.samdb.domain_dn()
+
+ part_dn = ldb.Dn(self.samdb, "CN=Users")
+
+ full_dn = part_dn
+ full_dn.add_base(domain_dn)
+
+ # That is, no change
+ self.assertEqual(full_dn,
+ self.samdb.normalize_dn_in_domain(full_dn))
+
+ def test_normalize_dn_in_domain_part_dn(self):
+ domain_dn = self.samdb.domain_dn()
+
+ part_dn = ldb.Dn(self.samdb, "CN=Users")
+
+ # That is, the domain DN appended
+ self.assertEqual(ldb.Dn(self.samdb,
+ str(part_dn) + "," + str(domain_dn)),
+ self.samdb.normalize_dn_in_domain(part_dn))
+
+class DsdbNCRootTests(TestCase):
+
+ def setUp(self):
+ super().setUp()
+ self.lp = samba.tests.env_loadparm()
+ self.creds = Credentials()
+ self.creds.guess(self.lp)
+ self.session = system_session()
+ self.samdb = SamDB(session_info=self.session,
+ credentials=self.creds,
+ lp=self.lp)
+ self.remote = False
+
+ # These all use the local mode of operation inside
+ # dsdb_find_nc_root() using the partitions control
+ def test_dsdb_dn_nc_root_sid(self):
+ dom_sid = self.samdb.get_domain_sid()
+ domain_dn = ldb.Dn(self.samdb, self.samdb.domain_dn())
+ dn = ldb.Dn(self.samdb, f"<SID={dom_sid}>")
+ try:
+ nc_root = self.samdb.get_nc_root(dn)
+ except ldb.LdbError as e:
+ (code, msg) = e.args
+ self.fail("Got unexpected exception %d - %s "
+ % (code, msg))
+ self.assertEqual(domain_dn, nc_root)
+
+ def test_dsdb_dn_nc_root_admin_sid(self):
+ dom_sid = self.samdb.get_domain_sid()
+ domain_dn = ldb.Dn(self.samdb, self.samdb.domain_dn())
+ dn = ldb.Dn(self.samdb, f"<SID={dom_sid}-500>")
+ try:
+ nc_root = self.samdb.get_nc_root(dn)
+ except ldb.LdbError as e:
+ (code, msg) = e.args
+ self.fail("Got unexpected exception %d - %s "
+ % (code, msg))
+ self.assertEqual(domain_dn, nc_root)
+
+ def test_dsdb_dn_nc_root_users_container(self):
+ dom_sid = self.samdb.get_domain_sid()
+ domain_dn = ldb.Dn(self.samdb, self.samdb.domain_dn())
+ dn = ldb.Dn(self.samdb, f"CN=Users,{domain_dn}")
+ try:
+ nc_root = self.samdb.get_nc_root(dn)
+ except ldb.LdbError as e:
+ (code, msg) = e.args
+ self.fail("Got unexpected exception %d - %s "
+ % (code, msg))
+ self.assertEqual(domain_dn, nc_root)
+
+ def test_dsdb_dn_nc_root_new_dn(self):
+ dom_sid = self.samdb.get_domain_sid()
+ domain_dn = ldb.Dn(self.samdb, self.samdb.domain_dn())
+ dn = ldb.Dn(self.samdb, f"CN=Xnotexisting,CN=Users,{domain_dn}")
+ try:
+ nc_root = self.samdb.get_nc_root(dn)
+ except ldb.LdbError as e:
+ (code, msg) = e.args
+ self.fail("Got unexpected exception %d - %s "
+ % (code, msg))
+ self.assertEqual(domain_dn, nc_root)
+
+ def test_dsdb_dn_nc_root_new_dn_with_guid(self):
+ domain_dn = ldb.Dn(self.samdb, self.samdb.domain_dn())
+ dn = ldb.Dn(self.samdb, f"<GUID=828e3baf-fa02-4d82-ba5d-6f647dab5fd8>;CN=Xnotexisting,CN=Users,{domain_dn}")
+ try:
+ nc_root = self.samdb.get_nc_root(dn)
+ except ldb.LdbError as e:
+ (code, msg) = e.args
+ self.fail("Got unexpected exception %d - %s "
+ % (code, msg))
+ self.assertEqual(domain_dn, nc_root)
+
+ def test_dsdb_dn_nc_root_guid(self):
+ ntds_guid = self.samdb.get_ntds_GUID()
+ configuration_dn = self.samdb.get_config_basedn()
+ dn = ldb.Dn(self.samdb, f"<GUID={ntds_guid}>")
+ try:
+ nc_root = self.samdb.get_nc_root(dn)
+ except ldb.LdbError as e:
+ (code, msg) = e.args
+ self.fail("Got unexpected exception %d - %s "
+ % (code, msg))
+ self.assertEqual(configuration_dn, nc_root)
+
+ def test_dsdb_dn_nc_root_misleading_to_noexisting_guid(self):
+ ntds_guid = self.samdb.get_ntds_GUID()
+ configuration_dn = self.samdb.get_config_basedn()
+ domain_dn = ldb.Dn(self.samdb, self.samdb.domain_dn())
+ dn = ldb.Dn(self.samdb, f"<GUID={ntds_guid}>;CN=Xnotexisting,CN=Users,{domain_dn}")
+ try:
+ nc_root = self.samdb.get_nc_root(dn)
+ except ldb.LdbError as e:
+ (code, msg) = e.args
+ self.fail("Got unexpected exception %d - %s "
+ % (code, msg))
+ self.assertEqual(configuration_dn, nc_root)
+
+ def test_dsdb_dn_nc_root_misleading_to_existing_guid(self):
+ ntds_guid = self.samdb.get_ntds_GUID()
+ configuration_dn = self.samdb.get_config_basedn()
+ domain_dn = ldb.Dn(self.samdb, self.samdb.domain_dn())
+ dn = ldb.Dn(self.samdb, f"<GUID={ntds_guid}>;{domain_dn}")
+ try:
+ nc_root = self.samdb.get_nc_root(dn)
+ except ldb.LdbError as e:
+ (code, msg) = e.args
+ self.fail("Got unexpected exception %d - %s "
+ % (code, msg))
+ self.assertEqual(configuration_dn, nc_root)
+
+class DsdbRemoteNCRootTests(DsdbNCRootTests):
+ def setUp(self):
+ super().setUp()
+ # Reconnect to the remote LDAP port
+ self.samdb = SamDB(url="ldap://%s" % samba.tests.env_get_var_value('SERVER'),
+ session_info=self.session,
+ credentials=self.get_credentials(),
+ lp=self.lp)
+ self.remote = True
+
+
+class DsdbFullScanTests(TestCase):
+
+ def setUp(self):
+ super().setUp()
+ self.lp = samba.tests.env_loadparm()
+ self.creds = Credentials()
+ self.creds.guess(self.lp)
+ self.session = system_session()
+
+ def test_sam_ldb_open_no_full_scan(self):
+ try:
+ self.samdb = SamDB(session_info=self.session,
+ credentials=self.creds,
+ lp=self.lp,
+ options=["disable_full_db_scan_for_self_test:1"])
+ except ldb.LdbError as err:
+ estr = err.args[1]
+ self.fail("sam.ldb required a full scan to start up")
+
+class DsdbStartUpTests(TestCase):
+ def setUp(self):
+ super().setUp()
+ lp = samba.tests.env_loadparm()
+ path = lp.configfile
+
+ # This is to avoid a tattoo of the global state
+ self.lp = LoadParm(filename_for_non_global_lp=path)
+ self.creds = Credentials()
+ self.creds.guess(self.lp)
+ self.session = system_session()
+ self.samdb = SamDB(session_info=self.session,
+ credentials=self.creds,
+ lp=self.lp)
+
+ def test_correct_fl(self):
+ res = self.samdb.search(base="",
+ scope=ldb.SCOPE_BASE,
+ attrs=["domainFunctionality"])
+ # This confirms the domain is in FL 2016 by default, this is
+ # important to verify the original state
+ self.assertEqual(int(res[0]["domainFunctionality"][0]),
+ dsdb.DS_DOMAIN_FUNCTION_2016)
+ self.assertEqual(functional_level.dc_level_from_lp(self.lp),
+ dsdb.DS_DOMAIN_FUNCTION_2016)
+ dsdb.check_and_update_fl(self.samdb, self.lp)
+
+ def test_lower_smb_conf_fl(self):
+ old_lp_fl = self.lp.get("ad dc functional level")
+ self.lp.set("ad dc functional level",
+ "2008_R2")
+ self.addCleanup(self.lp.set, "ad dc functional level", old_lp_fl)
+ try:
+ dsdb.check_and_update_fl(self.samdb, self.lp)
+ self.fail("Should have failed to start DC with 2008 R2 FL in 2016 domain")
+ except ldb.LdbError as err:
+ (errno, estr) = err.args
+ self.assertEqual(errno, ldb.ERR_CONSTRAINT_VIOLATION)
diff --git a/python/samba/tests/dsdb_api.py b/python/samba/tests/dsdb_api.py
new file mode 100644
index 0000000..9974079
--- /dev/null
+++ b/python/samba/tests/dsdb_api.py
@@ -0,0 +1,57 @@
+# Unix SMB/CIFS implementation. Tests for dsdb
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2021
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for samba.dsdb."""
+
+from samba.tests import TestCase, DynamicTestCase
+from samba.dsdb import user_account_control_flag_bit_to_string
+import samba
+
+
+@DynamicTestCase
+class DsdbFlagTests(TestCase):
+
+ @classmethod
+ def setUpDynamicTestCases(cls):
+
+ for x in dir(samba.dsdb):
+ if x.startswith("UF_"):
+ cls.generate_dynamic_test("test",
+ x,
+ x,
+ getattr(samba.dsdb, x))
+
+
+ def _test_with_args(self, uf_string, uf_bit):
+ self.assertEqual(user_account_control_flag_bit_to_string(uf_bit),
+ uf_string)
+
+
+ def test_not_a_flag(self):
+ self.assertRaises(KeyError,
+ user_account_control_flag_bit_to_string,
+ 0xabcdef)
+
+ def test_too_long(self):
+ self.assertRaises(OverflowError,
+ user_account_control_flag_bit_to_string,
+ 0xabcdefffff)
+
+ def test_way_too_long(self):
+ self.assertRaises(OverflowError,
+ user_account_control_flag_bit_to_string,
+ 0xabcdeffffffffffff)
diff --git a/python/samba/tests/dsdb_dns.py b/python/samba/tests/dsdb_dns.py
new file mode 100644
index 0000000..c175adb
--- /dev/null
+++ b/python/samba/tests/dsdb_dns.py
@@ -0,0 +1,85 @@
+# Unix SMB/CIFS implementation. Tests for dsdb_dns module
+# Copyright © Catalyst IT 2021
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+from samba.tests import TestCase
+from samba import dsdb_dns
+
+
+def unix2nttime(t):
+ # here we reimplement unix_to_nt_time from lib/util/time.c
+ if t == -1:
+ return t
+ if t == (1 << 63) - 1:
+ return (1 << 63) - 1
+ if t == 0:
+ return 0
+ t += 11644473600
+ t *= 1e7
+ return int(t)
+
+
+def unix2dns_timestamp(t):
+ nt = unix2nttime(t)
+ if nt < 0:
+ # because NTTIME is a uint64_t.
+ nt += 1 << 64
+ return nt // int(3.6e10)
+
+
+def timestamp2nttime(ts):
+ nt = ts * int(3.6e10)
+ if nt >= 1 << 63:
+ raise OverflowError("nt time won't fit this")
+ return nt
+
+
+class DsdbDnsTestCase(TestCase):
+ def test_unix_to_dns_timestamp(self):
+ unixtimes = [1616829393,
+ 1,
+ 0,
+ -1,
+ 1 << 31 - 1]
+
+ for t in unixtimes:
+ expected = unix2dns_timestamp(t)
+ result = dsdb_dns.unix_to_dns_timestamp(t)
+ self.assertEqual(result, expected)
+
+ def test_dns_timestamp_to_nt_time(self):
+ timestamps = [16168393,
+ 1,
+ 0,
+ (1 << 32) - 1,
+ (1 << 63) - 1,
+ int((1 << 63) / 3.6e10),
+ int((1 << 63) / 3.6e10) + 1, # overflows
+ ]
+
+ for t in timestamps:
+ overflows = False
+ try:
+ expected = timestamp2nttime(t)
+ except OverflowError:
+ overflows = True
+ try:
+ result = dsdb_dns.dns_timestamp_to_nt_time(t)
+ except ValueError:
+ self.assertTrue(overflows, f"timestamp {t} should not overflow")
+ continue
+ self.assertFalse(overflows, f"timestamp {t} should overflow")
+
+ self.assertEqual(result, expected)
diff --git a/python/samba/tests/dsdb_lock.py b/python/samba/tests/dsdb_lock.py
new file mode 100644
index 0000000..628be9c
--- /dev/null
+++ b/python/samba/tests/dsdb_lock.py
@@ -0,0 +1,374 @@
+# Unix SMB/CIFS implementation. Tests for DSDB locking
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for samba's dsdb modules"""
+
+from samba.tests.samdb import SamDBTestCase
+from samba.samdb import SamDB
+import ldb
+import os
+import gc
+import time
+
+
+class DsdbLockTestCase(SamDBTestCase):
+ def test_db_lock1(self):
+ basedn = self.samdb.get_default_basedn()
+ (r1, w1) = os.pipe()
+
+ pid = os.fork()
+ if pid == 0:
+ # In the child, close the main DB, re-open just one DB
+ del(self.samdb)
+ gc.collect()
+ self.samdb = SamDB(session_info=self.session,
+ lp=self.lp)
+
+ self.samdb.transaction_start()
+
+ dn = "cn=test_db_lock_user,cn=users," + str(basedn)
+ self.samdb.add({
+ "dn": dn,
+ "objectclass": "user",
+ })
+ self.samdb.delete(dn)
+
+ # Obtain a write lock
+ self.samdb.transaction_prepare_commit()
+ os.write(w1, b"prepared")
+ time.sleep(2)
+
+ # Drop the write lock
+ self.samdb.transaction_cancel()
+ os._exit(0)
+
+ self.assertEqual(os.read(r1, 8), b"prepared")
+
+ start = time.time()
+
+ # We need to hold this iterator open to hold the all-record lock.
+ res = self.samdb.search_iterator()
+
+ # This should take at least 2 seconds because the transaction
+ # has a write lock on one backend db open
+
+ # Release the locks
+ for l in res:
+ pass
+
+ end = time.time()
+ self.assertGreater(end - start, 1.9)
+
+ (got_pid, status) = os.waitpid(pid, 0)
+ self.assertEqual(got_pid, pid)
+ self.assertTrue(os.WIFEXITED(status))
+ self.assertEqual(os.WEXITSTATUS(status), 0)
+
+ def test_db_lock2(self):
+ basedn = self.samdb.get_default_basedn()
+ (r1, w1) = os.pipe()
+ (r2, w2) = os.pipe()
+
+ pid = os.fork()
+ if pid == 0:
+ # In the child, close the main DB, re-open
+ del(self.samdb)
+ gc.collect()
+ self.samdb = SamDB(session_info=self.session,
+ lp=self.lp)
+
+ # We need to hold this iterator open to hold the all-record lock.
+ res = self.samdb.search_iterator()
+
+ os.write(w2, b"start")
+ if (os.read(r1, 7) != b"started"):
+ os._exit(1)
+
+ os.write(w2, b"add")
+ if (os.read(r1, 5) != b"added"):
+ os._exit(2)
+
+ # Wait 2 seconds to block prepare_commit() in the child.
+ os.write(w2, b"prepare")
+ time.sleep(2)
+
+ # Release the locks
+ for l in res:
+ pass
+
+ if (os.read(r1, 8) != b"prepared"):
+ os._exit(3)
+
+ os._exit(0)
+
+ # We can start the transaction during the search
+ # because both just grab the all-record read lock.
+ self.assertEqual(os.read(r2, 5), b"start")
+ self.samdb.transaction_start()
+ os.write(w1, b"started")
+
+ self.assertEqual(os.read(r2, 3), b"add")
+ dn = "cn=test_db_lock_user,cn=users," + str(basedn)
+ self.samdb.add({
+ "dn": dn,
+ "objectclass": "user",
+ })
+ self.samdb.delete(dn)
+ os.write(w1, b"added")
+
+ # Obtain a write lock, this will block until
+ # the parent releases the read lock.
+ self.assertEqual(os.read(r2, 7), b"prepare")
+ start = time.time()
+ self.samdb.transaction_prepare_commit()
+ end = time.time()
+ try:
+ self.assertGreater(end - start, 1.9)
+ except:
+ raise
+ finally:
+ os.write(w1, b"prepared")
+
+ # Drop the write lock
+ self.samdb.transaction_cancel()
+
+ (got_pid, status) = os.waitpid(pid, 0)
+ self.assertEqual(got_pid, pid)
+ self.assertTrue(os.WIFEXITED(status))
+ self.assertEqual(os.WEXITSTATUS(status), 0)
+
+ def test_db_lock3(self):
+ basedn = self.samdb.get_default_basedn()
+ (r1, w1) = os.pipe()
+ (r2, w2) = os.pipe()
+
+ pid = os.fork()
+ if pid == 0:
+ # In the child, close the main DB, re-open
+ del(self.samdb)
+ gc.collect()
+ self.samdb = SamDB(session_info=self.session,
+ lp=self.lp)
+
+ # We need to hold this iterator open to hold the all-record lock.
+ res = self.samdb.search_iterator()
+
+ os.write(w2, b"start")
+ if (os.read(r1, 7) != b"started"):
+ os._exit(1)
+
+ os.write(w2, b"add")
+ if (os.read(r1, 5) != b"added"):
+ os._exit(2)
+
+ # Wait 2 seconds to block prepare_commit() in the child.
+ os.write(w2, b"prepare")
+ time.sleep(2)
+
+ # Release the locks
+ for l in res:
+ pass
+
+ if (os.read(r1, 8) != b"prepared"):
+ os._exit(3)
+
+ os._exit(0)
+
+ # We can start the transaction during the search
+ # because both just grab the all-record read lock.
+ self.assertEqual(os.read(r2, 5), b"start")
+ self.samdb.transaction_start()
+ os.write(w1, b"started")
+
+ self.assertEqual(os.read(r2, 3), b"add")
+
+ # This will end up in the top level db
+ dn = "@DSDB_LOCK_TEST"
+ self.samdb.add({
+ "dn": dn})
+ self.samdb.delete(dn)
+ os.write(w1, b"added")
+
+ # Obtain a write lock, this will block until
+ # the child releases the read lock.
+ self.assertEqual(os.read(r2, 7), b"prepare")
+ start = time.time()
+ self.samdb.transaction_prepare_commit()
+ end = time.time()
+ self.assertGreater(end - start, 1.9)
+ os.write(w1, b"prepared")
+
+ # Drop the write lock
+ self.samdb.transaction_cancel()
+
+ (got_pid, status) = os.waitpid(pid, 0)
+ self.assertTrue(os.WIFEXITED(status))
+ self.assertEqual(os.WEXITSTATUS(status), 0)
+ self.assertEqual(got_pid, pid)
+
+ def _test_full_db_lock1(self, backend_path):
+ (r1, w1) = os.pipe()
+
+ pid = os.fork()
+ if pid == 0:
+ # In the child, close the main DB, re-open just one DB
+ del(self.samdb)
+ gc.collect()
+
+ backenddb = ldb.Ldb(backend_path)
+
+ backenddb.transaction_start()
+
+ backenddb.add({"dn": "@DSDB_LOCK_TEST"})
+ backenddb.delete("@DSDB_LOCK_TEST")
+
+ # Obtain a write lock
+ backenddb.transaction_prepare_commit()
+ os.write(w1, b"prepared")
+ time.sleep(2)
+
+ # Drop the write lock
+ backenddb.transaction_cancel()
+ os._exit(0)
+
+ self.assertEqual(os.read(r1, 8), b"prepared")
+
+ start = time.time()
+
+ # We need to hold this iterator open to hold the all-record lock.
+ res = self.samdb.search_iterator()
+
+ # This should take at least 2 seconds because the transaction
+ # has a write lock on one backend db open
+
+ end = time.time()
+ self.assertGreater(end - start, 1.9)
+
+ # Release the locks
+ for l in res:
+ pass
+
+ (got_pid, status) = os.waitpid(pid, 0)
+ self.assertEqual(got_pid, pid)
+ self.assertTrue(os.WIFEXITED(status))
+ self.assertEqual(os.WEXITSTATUS(status), 0)
+
+ def test_full_db_lock1(self):
+ basedn = self.samdb.get_default_basedn()
+ backend_filename = "%s.ldb" % basedn.get_casefold()
+ backend_subpath = os.path.join("sam.ldb.d",
+ backend_filename)
+ backend_path = self.lp.private_path(backend_subpath)
+ self._test_full_db_lock1(backend_path)
+
+ def test_full_db_lock1_config(self):
+ basedn = self.samdb.get_config_basedn()
+ backend_filename = "%s.ldb" % basedn.get_casefold()
+ backend_subpath = os.path.join("sam.ldb.d",
+ backend_filename)
+ backend_path = self.lp.private_path(backend_subpath)
+ self._test_full_db_lock1(backend_path)
+
+ def _test_full_db_lock2(self, backend_path):
+ (r1, w1) = os.pipe()
+ (r2, w2) = os.pipe()
+
+ pid = os.fork()
+ if pid == 0:
+
+ # In the child, close the main DB, re-open
+ del(self.samdb)
+ gc.collect()
+ self.samdb = SamDB(session_info=self.session,
+ lp=self.lp)
+
+ # We need to hold this iterator open to hold the all-record lock.
+ res = self.samdb.search_iterator()
+
+ os.write(w2, b"start")
+ if (os.read(r1, 7) != b"started"):
+ os._exit(1)
+ os.write(w2, b"add")
+ if (os.read(r1, 5) != b"added"):
+ os._exit(2)
+
+ # Wait 2 seconds to block prepare_commit() in the child.
+ os.write(w2, b"prepare")
+ time.sleep(2)
+
+ # Release the locks
+ for l in res:
+ pass
+
+ if (os.read(r1, 8) != b"prepared"):
+ os._exit(3)
+
+ os._exit(0)
+
+ # In the parent, close the main DB, re-open just one DB
+ del(self.samdb)
+ gc.collect()
+ backenddb = ldb.Ldb(backend_path)
+
+ # We can start the transaction during the search
+ # because both just grab the all-record read lock.
+ self.assertEqual(os.read(r2, 5), b"start")
+ backenddb.transaction_start()
+ os.write(w1, b"started")
+
+ self.assertEqual(os.read(r2, 3), b"add")
+ backenddb.add({"dn": "@DSDB_LOCK_TEST"})
+ backenddb.delete("@DSDB_LOCK_TEST")
+ os.write(w1, b"added")
+
+ # Obtain a write lock, this will block until
+ # the child releases the read lock.
+ self.assertEqual(os.read(r2, 7), b"prepare")
+ start = time.time()
+ backenddb.transaction_prepare_commit()
+ end = time.time()
+
+ try:
+ self.assertGreater(end - start, 1.9)
+ except:
+ raise
+ finally:
+ os.write(w1, b"prepared")
+
+ # Drop the write lock
+ backenddb.transaction_cancel()
+
+ (got_pid, status) = os.waitpid(pid, 0)
+ self.assertEqual(got_pid, pid)
+ self.assertTrue(os.WIFEXITED(status))
+ self.assertEqual(os.WEXITSTATUS(status), 0)
+
+ def test_full_db_lock2(self):
+ basedn = self.samdb.get_default_basedn()
+ backend_filename = "%s.ldb" % basedn.get_casefold()
+ backend_subpath = os.path.join("sam.ldb.d",
+ backend_filename)
+ backend_path = self.lp.private_path(backend_subpath)
+ self._test_full_db_lock2(backend_path)
+
+ def test_full_db_lock2_config(self):
+ basedn = self.samdb.get_config_basedn()
+ backend_filename = "%s.ldb" % basedn.get_casefold()
+ backend_subpath = os.path.join("sam.ldb.d",
+ backend_filename)
+ backend_path = self.lp.private_path(backend_subpath)
+ self._test_full_db_lock2(backend_path)
diff --git a/python/samba/tests/dsdb_schema_attributes.py b/python/samba/tests/dsdb_schema_attributes.py
new file mode 100644
index 0000000..7d5c7f9
--- /dev/null
+++ b/python/samba/tests/dsdb_schema_attributes.py
@@ -0,0 +1,249 @@
+# -*- coding: utf-8 -*-
+#
+# Unix SMB/CIFS implementation.
+# Copyright (C) Kamen Mazdrashki <kamenim@samba.org> 2010
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+#
+# Usage:
+# export SUBUNITRUN=$samba4srcdir/scripting/bin/subunitrun
+# PYTHONPATH="$PYTHONPATH:$samba4srcdir/dsdb/tests/python" $SUBUNITRUN dsdb_schema_attributes
+#
+
+import time
+import random
+
+import samba.tests
+import ldb
+from ldb import SCOPE_BASE, LdbError
+
+
+class SchemaAttributesTestCase(samba.tests.TestCase):
+
+ def setUp(self):
+ super().setUp()
+
+ self.lp = samba.tests.env_loadparm()
+ self.samdb = samba.tests.connect_samdb(self.lp.samdb_url())
+
+ # fetch rootDSE
+ res = self.samdb.search(base="", expression="", scope=SCOPE_BASE, attrs=["*"])
+ self.assertEqual(len(res), 1)
+ self.schema_dn = res[0]["schemaNamingContext"][0]
+ self.base_dn = res[0]["defaultNamingContext"][0]
+ self.forest_level = int(res[0]["forestFunctionality"][0])
+
+ def _ldap_schemaUpdateNow(self):
+ ldif = """
+dn:
+changetype: modify
+add: schemaUpdateNow
+schemaUpdateNow: 1
+"""
+ self.samdb.modify_ldif(ldif)
+
+ def _make_obj_names(self, prefix):
+ obj_name = prefix + time.strftime("%s", time.gmtime())
+ obj_ldap_name = obj_name.replace("-", "")
+ obj_dn = "CN=%s,%s" % (obj_name, self.schema_dn)
+ return (obj_name, obj_ldap_name, obj_dn)
+
+ def _make_attr_ldif(self, attr_name, attr_dn, sub_oid, extra=None):
+ ldif = """
+dn: """ + attr_dn + """
+objectClass: top
+objectClass: attributeSchema
+adminDescription: """ + attr_name + """
+adminDisplayName: """ + attr_name + """
+cn: """ + attr_name + """
+attributeId: 1.3.6.1.4.1.7165.4.6.1.8.%d.""" % sub_oid + str(random.randint(1, 100000)) + """
+attributeSyntax: 2.5.5.12
+omSyntax: 64
+instanceType: 4
+isSingleValued: TRUE
+systemOnly: FALSE
+"""
+
+ if extra is not None:
+ ldif += extra + "\n"
+
+ return ldif
+
+ def test_AddIndexedAttribute(self):
+ # create names for an attribute to add
+ (attr_name, attr_ldap_name, attr_dn) = self._make_obj_names("schemaAttributes-IdxAttr-")
+ ldif = self._make_attr_ldif(attr_name, attr_dn, 1,
+ "searchFlags: %d" % samba.dsdb.SEARCH_FLAG_ATTINDEX)
+
+ # add the new attribute
+ self.samdb.add_ldif(ldif)
+ self._ldap_schemaUpdateNow()
+
+ # Check @ATTRIBUTES
+
+ attr_res = self.samdb.search(base="@ATTRIBUTES", scope=ldb.SCOPE_BASE)
+
+ self.assertIn(attr_ldap_name, attr_res[0])
+ self.assertEqual(len(attr_res[0][attr_ldap_name]), 1)
+ self.assertEqual(str(attr_res[0][attr_ldap_name][0]), "CASE_INSENSITIVE")
+
+ # Check @INDEXLIST
+
+ idx_res = self.samdb.search(base="@INDEXLIST", scope=ldb.SCOPE_BASE)
+
+ self.assertIn(attr_ldap_name, [str(x) for x in idx_res[0]["@IDXATTR"]])
+
+ def test_AddUnIndexedAttribute(self):
+ # create names for an attribute to add
+ (attr_name, attr_ldap_name, attr_dn) = self._make_obj_names("schemaAttributes-UnIdxAttr-")
+ ldif = self._make_attr_ldif(attr_name, attr_dn, 2)
+
+ # add the new attribute
+ self.samdb.add_ldif(ldif)
+ self._ldap_schemaUpdateNow()
+
+ # Check @ATTRIBUTES
+
+ attr_res = self.samdb.search(base="@ATTRIBUTES", scope=ldb.SCOPE_BASE)
+
+ self.assertIn(attr_ldap_name, attr_res[0])
+ self.assertEqual(len(attr_res[0][attr_ldap_name]), 1)
+ self.assertEqual(str(attr_res[0][attr_ldap_name][0]), "CASE_INSENSITIVE")
+
+ # Check @INDEXLIST
+
+ idx_res = self.samdb.search(base="@INDEXLIST", scope=ldb.SCOPE_BASE)
+
+ self.assertNotIn(attr_ldap_name, [str(x) for x in idx_res[0]["@IDXATTR"]])
+
+ def test_AddTwoIndexedAttributes(self):
+ # create names for an attribute to add
+ (attr_name, attr_ldap_name, attr_dn) = self._make_obj_names("schemaAttributes-2IdxAttr-")
+ ldif = self._make_attr_ldif(attr_name, attr_dn, 3,
+ "searchFlags: %d" % samba.dsdb.SEARCH_FLAG_ATTINDEX)
+
+ # add the new attribute
+ self.samdb.add_ldif(ldif)
+ self._ldap_schemaUpdateNow()
+
+ # create names for an attribute to add
+ (attr_name2, attr_ldap_name2, attr_dn2) = self._make_obj_names("schemaAttributes-Attr-")
+ ldif = self._make_attr_ldif(attr_name2, attr_dn2, 4,
+ "searchFlags: %d" % samba.dsdb.SEARCH_FLAG_ATTINDEX)
+
+ # add the new attribute
+ self.samdb.add_ldif(ldif)
+ self._ldap_schemaUpdateNow()
+
+ # Check @ATTRIBUTES
+
+ attr_res = self.samdb.search(base="@ATTRIBUTES", scope=ldb.SCOPE_BASE)
+
+ self.assertIn(attr_ldap_name, attr_res[0])
+ self.assertEqual(len(attr_res[0][attr_ldap_name]), 1)
+ self.assertEqual(str(attr_res[0][attr_ldap_name][0]), "CASE_INSENSITIVE")
+
+ self.assertIn(attr_ldap_name2, attr_res[0])
+ self.assertEqual(len(attr_res[0][attr_ldap_name2]), 1)
+ self.assertEqual(str(attr_res[0][attr_ldap_name2][0]), "CASE_INSENSITIVE")
+
+ # Check @INDEXLIST
+
+ idx_res = self.samdb.search(base="@INDEXLIST", scope=ldb.SCOPE_BASE)
+
+ self.assertIn(attr_ldap_name, [str(x) for x in idx_res[0]["@IDXATTR"]])
+ self.assertIn(attr_ldap_name2, [str(x) for x in idx_res[0]["@IDXATTR"]])
+
+ def test_modify_at_attributes(self):
+ m = {"dn": "@ATTRIBUTES",
+ "@TEST_EXTRA": ["HIDDEN"]
+ }
+
+ msg = ldb.Message.from_dict(self.samdb, m, ldb.FLAG_MOD_ADD)
+ self.samdb.modify(msg)
+
+ res = self.samdb.search(base="@ATTRIBUTES", scope=ldb.SCOPE_BASE,
+ attrs=["@TEST_EXTRA"])
+ self.assertEqual(len(res), 1)
+ self.assertEqual(str(res[0].dn), "@ATTRIBUTES")
+ self.assertEqual(len(res[0]), 1)
+ self.assertTrue("@TEST_EXTRA" in res[0])
+ self.assertEqual(len(res[0]["@TEST_EXTRA"]), 1)
+ self.assertEqual(str(res[0]["@TEST_EXTRA"][0]), "HIDDEN")
+
+ samdb2 = samba.tests.connect_samdb(self.lp.samdb_url())
+
+ # We now only update the @ATTRIBUTES when a transaction happens
+ # rather than making a read of the DB do writes.
+ #
+ # This avoids locking issues and is more expected
+
+ samdb2.transaction_start()
+ samdb2.transaction_commit()
+
+ res = self.samdb.search(base="@ATTRIBUTES", scope=ldb.SCOPE_BASE,
+ attrs=["@TEST_EXTRA"])
+ self.assertEqual(len(res), 1)
+ self.assertEqual(str(res[0].dn), "@ATTRIBUTES")
+ self.assertEqual(len(res[0]), 0)
+ self.assertFalse("@TEST_EXTRA" in res[0])
+
+ def test_modify_at_indexlist(self):
+ m = {"dn": "@INDEXLIST",
+ "@TEST_EXTRA": ["1"]
+ }
+
+ msg = ldb.Message.from_dict(self.samdb, m, ldb.FLAG_MOD_ADD)
+ self.samdb.modify(msg)
+
+ res = self.samdb.search(base="@INDEXLIST", scope=ldb.SCOPE_BASE,
+ attrs=["@TEST_EXTRA"])
+ self.assertEqual(len(res), 1)
+ self.assertEqual(str(res[0].dn), "@INDEXLIST")
+ self.assertEqual(len(res[0]), 1)
+ self.assertTrue("@TEST_EXTRA" in res[0])
+ self.assertEqual(len(res[0]["@TEST_EXTRA"]), 1)
+ self.assertEqual(str(res[0]["@TEST_EXTRA"][0]), "1")
+
+ samdb2 = samba.tests.connect_samdb(self.lp.samdb_url())
+
+ # We now only update the @INDEXLIST when a transaction happens
+ # rather than making a read of the DB do writes.
+ #
+ # This avoids locking issues and is more expected
+
+ samdb2.transaction_start()
+ samdb2.transaction_commit()
+
+ res = self.samdb.search(base="@INDEXLIST", scope=ldb.SCOPE_BASE,
+ attrs=["@TEST_EXTRA"])
+ self.assertEqual(len(res), 1)
+ self.assertEqual(str(res[0].dn), "@INDEXLIST")
+ self.assertEqual(len(res[0]), 0)
+ self.assertFalse("@TEST_EXTRA" in res[0])
+
+ def test_modify_fail_of_at_indexlist(self):
+ m = {"dn": "@INDEXLIST",
+ "@TEST_NOT_EXTRA": ["1"]
+ }
+
+ msg = ldb.Message.from_dict(self.samdb, m, ldb.FLAG_MOD_DELETE)
+ try:
+ self.samdb.modify(msg)
+ self.fail("modify of @INDEXLIST with a failed constraint should fail")
+ except LdbError as err:
+ enum = err.args[0]
+ self.assertEqual(enum, ldb.ERR_NO_SUCH_ATTRIBUTE)
diff --git a/python/samba/tests/emulate/__init__.py b/python/samba/tests/emulate/__init__.py
new file mode 100644
index 0000000..9b4ed83
--- /dev/null
+++ b/python/samba/tests/emulate/__init__.py
@@ -0,0 +1,17 @@
+# Package initialisation
+#
+# Copyright (C) Catalyst IT Ltd. 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
diff --git a/python/samba/tests/emulate/traffic.py b/python/samba/tests/emulate/traffic.py
new file mode 100644
index 0000000..63fbd10
--- /dev/null
+++ b/python/samba/tests/emulate/traffic.py
@@ -0,0 +1,164 @@
+# Unit and integration tests for traffic.py
+#
+# Copyright (C) Catalyst IT Ltd. 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+# from pprint import pprint
+from io import StringIO
+
+import samba.tests
+
+from samba.emulate import traffic
+
+
+TEST_FILE = 'testdata/traffic-sample-very-short.txt'
+
+
+class TrafficEmulatorTests(samba.tests.TestCase):
+ def setUp(self):
+ self.model = traffic.TrafficModel()
+
+ def tearDown(self):
+ del self.model
+
+ def test_parse_ngrams_dns_included(self):
+ model = traffic.TrafficModel()
+ f = open(TEST_FILE)
+ (conversations,
+ interval,
+ duration,
+ dns_counts) = traffic.ingest_summaries([f], dns_mode='include')
+ f.close()
+ model.learn(conversations)
+ expected_ngrams = {
+ ('-', '-'): ['dns:0', 'dns:0', 'dns:0', 'ldap:3'],
+ ('-', 'dns:0'): ['dns:0', 'dns:0', 'dns:0'],
+ ('-', 'ldap:3'): ['wait:0'],
+ ('cldap:3', 'cldap:3'): ['cldap:3', 'wait:0'],
+ ('cldap:3', 'wait:0'): ['rpc_netlogon:29'],
+ ('dns:0', 'dns:0'): ['dns:0', 'dns:0', 'dns:0', 'wait:0'],
+ ('dns:0', 'wait:0'): ['cldap:3'],
+ ('kerberos:', 'ldap:3'): ['-'],
+ ('ldap:3', 'wait:0'): ['ldap:2'],
+ ('rpc_netlogon:29', 'kerberos:'): ['ldap:3'],
+ ('wait:0', 'cldap:3'): ['cldap:3'],
+ ('wait:0', 'rpc_netlogon:29'): ['kerberos:']
+ }
+ expected_query_details = {
+ 'cldap:3': [('', '', '', 'Netlogon', '', '', ''),
+ ('', '', '', 'Netlogon', '', '', ''),
+ ('', '', '', 'Netlogon', '', '', '')],
+ 'dns:0': [(), (), (), (), (), (), (), (), ()],
+ 'kerberos:': [('',)],
+ 'ldap:2': [('', '', '', '', '', '', '')],
+ 'ldap:3': [('',
+ '',
+ '',
+ 'subschemaSubentry,dsServiceName,namingContexts,'
+ 'defaultNamingContext,schemaNamingContext,'
+ 'configurationNamingContext,rootDomainNamingContext,'
+ 'supportedControl,supportedLDAPVersion,'
+ 'supportedLDAPPolicies,supportedSASLMechanisms,'
+ 'dnsHostName,ldapServiceName,serverName,'
+ 'supportedCapabilities',
+ '',
+ '',
+ ''),
+ ('2', 'DC,DC', '', 'cn', '', '', '')],
+ 'rpc_netlogon:29': [()]
+ }
+ self.maxDiff = 5000
+ ngrams = {k: sorted(v) for k, v in model.ngrams.items()}
+ details = {k: sorted(v) for k, v in model.query_details.items()}
+
+ self.assertEqual(expected_ngrams, ngrams)
+ self.assertEqual(expected_query_details, details)
+ # We use a stringIO instead of a temporary file
+ f = StringIO()
+ model.save(f)
+
+ model2 = traffic.TrafficModel()
+ f.seek(0)
+ model2.load(f)
+
+ ngrams = {k: sorted(v) for k, v in model2.ngrams.items()}
+ details = {k: sorted(v) for k, v in model2.query_details.items()}
+ self.assertEqual(expected_ngrams, ngrams)
+ self.assertEqual(expected_query_details, details)
+
+ def test_parse_ngrams(self):
+ f = open(TEST_FILE)
+ (conversations,
+ interval,
+ duration,
+ dns_counts) = traffic.ingest_summaries([f])
+ f.close()
+ self.model.learn(conversations, dns_counts)
+ # print 'ngrams'
+ # pprint(self.model.ngrams, width=50)
+ # print 'query_details'
+ # pprint(self.model.query_details, width=55)
+ expected_ngrams = {
+ ('-', '-'): ['cldap:3', 'ldap:3'],
+ ('-', 'cldap:3'): ['cldap:3'],
+ ('-', 'ldap:3'): ['wait:0'],
+ ('cldap:3', 'cldap:3'): ['cldap:3', 'wait:0'],
+ ('cldap:3', 'wait:0'): ['rpc_netlogon:29'],
+ ('kerberos:', 'ldap:3'): ['-'],
+ ('ldap:3', 'wait:0'): ['ldap:2'],
+ ('rpc_netlogon:29', 'kerberos:'): ['ldap:3'],
+ ('wait:0', 'rpc_netlogon:29'): ['kerberos:']
+ }
+
+ expected_query_details = {
+ 'cldap:3': [('', '', '', 'Netlogon', '', '', ''),
+ ('', '', '', 'Netlogon', '', '', ''),
+ ('', '', '', 'Netlogon', '', '', '')],
+ 'kerberos:': [('',)],
+ 'ldap:2': [('', '', '', '', '', '', '')],
+ 'ldap:3': [('',
+ '',
+ '',
+ 'subschemaSubentry,dsServiceName,namingContexts,'
+ 'defaultNamingContext,schemaNamingContext,'
+ 'configurationNamingContext,rootDomainNamingContext,'
+ 'supportedControl,supportedLDAPVersion,'
+ 'supportedLDAPPolicies,supportedSASLMechanisms,'
+ 'dnsHostName,ldapServiceName,serverName,'
+ 'supportedCapabilities',
+ '',
+ '',
+ ''),
+ ('2', 'DC,DC', '', 'cn', '', '', '')],
+ 'rpc_netlogon:29': [()]
+ }
+ self.maxDiff = 5000
+ ngrams = {k: sorted(v) for k, v in self.model.ngrams.items()}
+ details = {k: sorted(v) for k, v in self.model.query_details.items()}
+
+ self.assertEqual(expected_ngrams, ngrams)
+ self.assertEqual(expected_query_details, details)
+ # We use a stringIO instead of a temporary file
+ f = StringIO()
+ self.model.save(f)
+
+ model2 = traffic.TrafficModel()
+ f.seek(0)
+ model2.load(f)
+
+ ngrams = {k: sorted(v) for k, v in model2.ngrams.items()}
+ details = {k: sorted(v) for k, v in model2.query_details.items()}
+ self.assertEqual(expected_ngrams, ngrams)
+ self.assertEqual(expected_query_details, details)
diff --git a/python/samba/tests/emulate/traffic_packet.py b/python/samba/tests/emulate/traffic_packet.py
new file mode 100644
index 0000000..73ecd24
--- /dev/null
+++ b/python/samba/tests/emulate/traffic_packet.py
@@ -0,0 +1,736 @@
+# Unit and integration tests for traffic_packet.py
+#
+# Copyright (C) Catalyst IT Ltd. 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import shutil
+import tempfile
+
+
+from samba.auth import system_session
+
+from samba.credentials import MUST_USE_KERBEROS, DONT_USE_KERBEROS
+from samba.emulate import traffic_packets as p
+from samba.emulate import traffic
+from samba.emulate.traffic import Packet
+
+from samba.samdb import SamDB
+import samba.tests
+from samba import sd_utils
+
+
+class TrafficEmulatorPacketTests(samba.tests.TestCase):
+ def setUp(self):
+ super().setUp()
+ self.server = os.environ["SERVER"]
+ self.domain = os.environ["DOMAIN"]
+ self.host = os.environ["SERVER_IP"]
+ self.lp = self.get_loadparm()
+ self.session = system_session()
+ self.credentials = self.get_credentials()
+
+ self.ldb = SamDB(url="ldap://%s" % self.host,
+ session_info=self.session,
+ credentials=self.credentials,
+ lp=self.lp)
+ self.domain_sid = self.ldb.get_domain_sid()
+
+ traffic.clean_up_accounts(self.ldb, 1)
+ self.tempdir = tempfile.mkdtemp(prefix="traffic_packet_test_")
+ self.context = traffic.ReplayContext(server=self.server,
+ lp=self.lp,
+ creds=self.credentials,
+ tempdir=self.tempdir,
+ ou=traffic.ou_name(self.ldb, 1),
+ domain_sid=self.domain_sid,
+ total_conversations=3,
+ instance_id=1)
+
+ self.conversation = traffic.Conversation()
+ self.conversation.conversation_id = 1
+ self.machinename = "STGM-1-1"
+ self.machinepass = samba.generate_random_password(32, 32)
+ self.username = "STGU-1-1"
+ self.userpass = samba.generate_random_password(32, 32)
+ account = traffic.ConversationAccounts(
+ self.machinename,
+ self.machinepass,
+ self.username,
+ self.userpass)
+
+ traffic.create_ou(self.ldb, 1)
+ traffic.create_machine_account(self.ldb,
+ 1,
+ self.machinename,
+ self.machinepass)
+ traffic.create_user_account(self.ldb,
+ 1,
+ self.username,
+ self.userpass)
+
+ self.context.generate_process_local_config(account, self.conversation)
+
+ # grant user write permission to do things like write account SPN
+ sdutils = sd_utils.SDUtils(self.ldb)
+ mod = "(A;;WP;;;PS)"
+ sdutils.dacl_add_ace(self.context.user_dn, mod)
+
+ def tearDown(self):
+ super().tearDown()
+ traffic.clean_up_accounts(self.ldb, 1)
+ del self.ldb
+ shutil.rmtree(self.tempdir)
+
+ def test_packet_cldap_03(self):
+ packet = Packet.from_line(
+ "0.0\t11\t1\t2\t1\tcldap\t3\tsearchRequest\t")
+ self.assertTrue(p.packet_cldap_3(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_cldap_05(self):
+ packet = Packet.from_line(
+ "0.0\t11\t1\t1\t2\tcldap\t5\tsearchResDone\t")
+ self.assertFalse(p.packet_cldap_5(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_dcerpc_00(self):
+ packet = Packet.from_line("0.0\t11\t1\t2\t1\tdcerpc\t0\tRequest\t")
+ self.assertFalse(p.packet_dcerpc_0(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_dcerpc_02(self):
+ packet = Packet.from_line("0.0\t11\t1\t1\t2\tdcerpc\t2\tResponse\t")
+ self.assertFalse(p.packet_dcerpc_2(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_dcerpc_03(self):
+ packet = Packet.from_line("0.0\t11\t1\t1\t2\tdcerpc\t3\t\t")
+ self.assertFalse(p.packet_dcerpc_3(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_dcerpc_11(self):
+ packet = Packet.from_line("0.0\t11\t1\t2\t1\tdcerpc\t11\tBind\t")
+ self.assertFalse(p.packet_dcerpc_11(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_dcerpc_13(self):
+ packet = Packet.from_line("0.0\t11\t1\t2\t1\tdcerpc\t13\t\t")
+ self.assertFalse(p.packet_dcerpc_13(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_dcerpc_14(self):
+ packet = Packet.from_line(
+ "0.0\t11\t1\t2\t1\tdcerpc\t14\tAlter_context\t")
+ self.assertFalse(p.packet_dcerpc_14(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_dcerpc_15(self):
+ packet = Packet.from_line(
+ "0.0\t11\t1\t1\t2\tdcerpc\t15\tAlter_context_resp\t")
+ # Set user_creds MUST_USE_KERBEROS to suppress the warning message.
+ self.context.user_creds.set_kerberos_state(MUST_USE_KERBEROS)
+ self.assertFalse(p.packet_dcerpc_15(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_dcerpc_16(self):
+ packet = Packet.from_line(
+ "0.0\t11\t1\t1\t2\tdcerpc\t16\tAUTH3\t")
+ self.assertFalse(p.packet_dcerpc_16(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_dns_01(self):
+ packet = Packet.from_line(
+ "0.0\t11\t1\t1\t2\tdns\t1\tresponse\t")
+ self.assertFalse(p.packet_dns_1(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_drsuapi_00(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t1\t2\tdrsuapi\t0\tDsBind\t")
+ self.assertTrue(p.packet_drsuapi_0(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_drsuapi_01(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t1\t2\tdrsuapi\t1\tDsUnBind\t")
+ self.assertTrue(p.packet_drsuapi_1(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_drsuapi_02(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t1\t2\tdrsuapi\t2\tDsReplicaSync\t")
+ self.assertFalse(p.packet_drsuapi_2(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_drsuapi_03(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t1\t2\tdrsuapi\t3\tDsGetNCChanges\t")
+ self.assertFalse(p.packet_drsuapi_3(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_drsuapi_04(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t1\t2\tdrsuapi\t4\tDsReplicaUpdateRefs\t")
+ self.assertFalse(p.packet_drsuapi_4(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_drsuapi_12(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t1\t2\tdrsuapi\t12\tDsCrackNames\t")
+ self.assertTrue(p.packet_drsuapi_12(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_drsuapi_13(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t1\t2\tdrsuapi\t13\tDsWriteAccountSpn\t")
+ self.assertTrue(p.packet_drsuapi_13(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_epm_03(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t1\t2\tepm\t3\tMap\t")
+ self.assertFalse(p.packet_epm_3(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_kerberos(self):
+ """Kerberos packets are not generated, but are used as a hint to
+ favour kerberos.
+ """
+ packet = Packet.from_line(
+ "0.0\t11\t1\t1\t2\tkerberos\t\t\t")
+ self.assertFalse(p.packet_kerberos_(packet,
+ self.conversation,
+ self. context))
+ self.assertEqual(MUST_USE_KERBEROS,
+ self.context.user_creds.get_kerberos_state())
+ self.assertEqual(MUST_USE_KERBEROS,
+ self.context.user_creds_bad.get_kerberos_state())
+ self.assertEqual(MUST_USE_KERBEROS,
+ self.context.machine_creds.get_kerberos_state())
+ self.assertEqual(MUST_USE_KERBEROS,
+ self.context.machine_creds_bad.get_kerberos_state())
+ self.assertEqual(MUST_USE_KERBEROS,
+ self.context.creds.get_kerberos_state())
+
+ # Need to restore kerberos creds on the admin creds otherwise
+ # subsequent tests fail
+ self.credentials.set_kerberos_state(DONT_USE_KERBEROS)
+
+ def test_packet_ldap(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t1\t2\tldap\t\t*** Unknown ***\t")
+ self.assertFalse(p.packet_ldap_(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_ldap_00_sasl(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\tldap\t0\tbindRequest"
+ "\t\t\t\t\t3\tsasl\t1.3.6.1.5.5.2")
+ self.assertTrue(p.packet_ldap_0(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_ldap_00_simple(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\tldap\t0\tbindRequest"
+ "\t\t\t\t\t0\tsimple\t")
+ self.assertTrue(p.packet_ldap_0(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_ldap_01(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t1\t2\tldap\t1\tbindResponse\t")
+ self.assertFalse(p.packet_ldap_1(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_ldap_02(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\tldap\t2\tunbindRequest\t")
+ self.assertFalse(p.packet_ldap_2(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_ldap_03(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\tldap\t3\tsearchRequest"
+ "\t2\tDC,DC\t\tcn\t\t\t")
+ self.assertTrue(p.packet_ldap_3(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_ldap_04(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t1\t2\tldap\t4\tsearchResEntry\t")
+ self.assertFalse(p.packet_ldap_4(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_ldap_05(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t1\t2\tldap\t5\tsearchResDone\t")
+ self.assertFalse(p.packet_ldap_5(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_ldap_06(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\tldap\t6\tmodifyRequest\t"
+ "\t\t\t\t0\tadd")
+ self.assertFalse(p.packet_ldap_6(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_ldap_07(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t1\t2\tldap\t7\tmodifyResponse\t")
+ self.assertFalse(p.packet_ldap_7(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_ldap_08(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\tldap\t8\taddRequest\t")
+ self.assertFalse(p.packet_ldap_8(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_ldap_09(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t1\t2\tldap\t9\taddResponse\t")
+ self.assertFalse(p.packet_ldap_9(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_ldap_16(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\tldap\t16\tabandonRequest\t")
+ self.assertFalse(p.packet_ldap_16(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_lsarpc_00(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\tlsarpc\t0\tlsa_Close\t")
+ self.assertFalse(p.packet_lsarpc_1(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_lsarpc_01(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\tlsarpc\t1\tlsa_Delete\t")
+ self.assertFalse(p.packet_lsarpc_1(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_lsarpc_02(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\tlsarpc\t2\tlsa_EnumeratePrivileges\t")
+ self.assertFalse(p.packet_lsarpc_2(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_lsarpc_03(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\tlsarpc\t3\tlsa_QuerySecurityObject\t")
+ self.assertFalse(p.packet_lsarpc_3(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_lsarpc_04(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\tlsarpc\t4\tlsa_SetSecurityObject\t")
+ self.assertFalse(p.packet_lsarpc_4(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_lsarpc_05(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\tlsarpc\t5\tlsa_ChangePassword\t")
+ self.assertFalse(p.packet_lsarpc_5(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_lsarpc_06(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\tlsarpc\t6\tlsa_OpenPolicy\t")
+ self.assertFalse(p.packet_lsarpc_6(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_lsarpc_14(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\tlsarpc\t14\tlsa_LookupNames\t")
+ self.assertTrue(p.packet_lsarpc_14(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_lsarpc_15(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\tlsarpc\t15\tlsa_LookupSids\t")
+ self.assertTrue(p.packet_lsarpc_15(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_lsarpc_39(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\tlsarpc\t39\tlsa_QueryTrustedDomainInfoBySid\t")
+ self.assertTrue(p.packet_lsarpc_39(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_lsarpc_40(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\tlsarpc\t40\tlsa_SetTrustedDomainInfo\t")
+ self.assertFalse(p.packet_lsarpc_40(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_lsarpc_43(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\tlsarpc\t43\tlsa_StorePrivateData\t")
+ self.assertFalse(p.packet_lsarpc_43(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_lsarpc_44(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\tlsarpc\t44\tlsa_RetrievePrivateData\t")
+ self.assertFalse(p.packet_lsarpc_44(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_lsarpc_68(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\tlsarpc\t68\tlsa_LookupNames3\t")
+ self.assertFalse(p.packet_lsarpc_68(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_lsarpc_76(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\tlsarpc\t76\tlsa_LookupSids3\t")
+ self.assertTrue(p.packet_lsarpc_76(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_lsarpc_77(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\tlsarpc\t77\tlsa_LookupNames4\t")
+ self.assertTrue(p.packet_lsarpc_77(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_nbns_00(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\tnbns\t0\tquery\t")
+ self.assertTrue(p.packet_nbns_0(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_nbns_01(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t1\t2\tnbns\t1\tresponse\t")
+ self.assertTrue(p.packet_nbns_0(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_rpc_netlogon_00(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\trpc_netlogon\t0\tNetrLogonUasLogon\t")
+ self.assertFalse(p.packet_rpc_netlogon_0(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_rpc_netlogon_01(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\trpc_netlogon\t1\tNetrLogonUasLogoff\t")
+ self.assertFalse(p.packet_rpc_netlogon_1(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_rpc_netlogon_04(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\trpc_netlogon\t4\tNetrServerReqChallenge\t")
+ self.assertFalse(p.packet_rpc_netlogon_4(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_rpc_netlogon_14(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\trpc_netlogon\t14\tNetrLogonControl2\t")
+ self.assertFalse(p.packet_rpc_netlogon_14(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_rpc_netlogon_15(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\trpc_netlogon\t15\tNetrServerAuthenticate2\t")
+ self.assertFalse(p.packet_rpc_netlogon_15(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_rpc_netlogon_21(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\trpc_netlogon\t21\tNetrLogonDummyRoutine1\t")
+ self.assertFalse(p.packet_rpc_netlogon_21(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_rpc_netlogon_26(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\trpc_netlogon\t26\tNetrServerAuthenticate3\t")
+ self.assertFalse(p.packet_rpc_netlogon_26(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_rpc_netlogon_29(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\trpc_netlogon\t29\tNetrLogonGetDomainInfo\t")
+ self.assertTrue(p.packet_rpc_netlogon_29(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_rpc_netlogon_30(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\trpc_netlogon\t30\tNetrServerPasswordSet2\t")
+ self.assertTrue(p.packet_rpc_netlogon_30(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_rpc_netlogon_34(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\trpc_netlogon\t34\tDsrGetDcNameEx2\t")
+ self.assertFalse(p.packet_rpc_netlogon_34(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_rpc_netlogon_39(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\trpc_netlogon\t39\tNetrLogonSamLogonEx\t")
+ self.assertTrue(p.packet_rpc_netlogon_39(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_rpc_netlogon_40(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\trpc_netlogon\t40\tDsrEnumerateDomainTrusts\t")
+ self.assertTrue(p.packet_rpc_netlogon_40(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_rpc_netlogon_45(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\trpc_netlogon\t45\tNetrLogonSamLogonWithFlags\t")
+ self.assertTrue(p.packet_rpc_netlogon_45(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_samr_00(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\tsamr\t0\tConnect\t")
+ self.assertTrue(p.packet_samr_0(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_samr_01(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\tsamr\t1\tClose\t")
+ self.assertTrue(p.packet_samr_1(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_samr_03(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\tsamr\t3\tQuerySecurity\t")
+ self.assertTrue(p.packet_samr_3(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_samr_05(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\tsamr\t5\tLookupDomain\t")
+ self.assertTrue(p.packet_samr_5(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_samr_06(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\tsamr\t6\tEnumDomains\t")
+ self.assertTrue(p.packet_samr_6(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_samr_07(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\tsamr\t7\tOpenDomain\t")
+ self.assertTrue(p.packet_samr_7(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_samr_08(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\tsamr\t8\tQueryDomainInfo'\t")
+ self.assertTrue(p.packet_samr_8(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_samr_14(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\tsamr\t14\tCreateDomAlias\t")
+ self.assertFalse(p.packet_samr_14(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_samr_15(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\tsamr\t15\tEnumDomainAliases\t")
+ self.assertTrue(p.packet_samr_15(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_samr_16(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\tsamr\t16\tGetAliasMembership\t")
+ self.assertTrue(p.packet_samr_16(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_samr_17(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\tsamr\t17\tLookupNames\t")
+ self.assertTrue(p.packet_samr_17(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_samr_18(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\tsamr\t18\tLookupRids\t")
+ self.assertTrue(p.packet_samr_18(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_samr_19(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\tsamr\t19\tOpenGroup\t")
+ self.assertTrue(p.packet_samr_19(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_samr_25(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\tsamr\t25\tQueryGroupMember\t")
+ self.assertTrue(p.packet_samr_25(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_samr_34(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\tsamr\t34\tOpenUser\t")
+ self.assertTrue(p.packet_samr_34(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_samr_36(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\tsamr\t36\tQueryUserInfo\t")
+ self.assertTrue(p.packet_samr_36(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_samr_37(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\tsamr\t37\tSetUserInfo\t")
+ self.assertFalse(p.packet_samr_37(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_samr_39(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\tsamr\t39\tGetGroupsForUser\t")
+ self.assertTrue(p.packet_samr_39(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_samr_40(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\tsamr\t40\tQueryDisplayInfo\t")
+ self.assertFalse(p.packet_samr_40(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_samr_44(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\tsamr\t44\tGetUserPwInfo\t")
+ self.assertFalse(p.packet_samr_44(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_samr_57(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\tsamr\t57\tConnect2\t")
+ self.assertTrue(p.packet_samr_57(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_samr_64(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\tsamr\t64\tConnect5\t")
+ self.assertTrue(p.packet_samr_64(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_samr_68(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\tsamr\t68\t\t")
+ self.assertFalse(p.packet_samr_68(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_srvsvc_16(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\tsrvsvc\t16\tNetShareGetInfo\t")
+ self.assertTrue(p.packet_srvsvc_16(packet,
+ self.conversation,
+ self. context))
+
+ def test_packet_srvsvc_21(self):
+ packet = Packet.from_line(
+ "0.0\t06\t1\t2\t1\tsrvsvc\t21\tNetSrvGetInfo\t")
+ self.assertTrue(p.packet_srvsvc_21(packet,
+ self.conversation,
+ self. context))
diff --git a/python/samba/tests/encrypted_secrets.py b/python/samba/tests/encrypted_secrets.py
new file mode 100644
index 0000000..e251a3c
--- /dev/null
+++ b/python/samba/tests/encrypted_secrets.py
@@ -0,0 +1,83 @@
+# Unix SMB/CIFS implementation.
+#
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Smoke test for encrypted secrets
+
+A quick test to confirm that the secret attributes are being stored
+encrypted on disk.
+"""
+
+
+import os
+import ldb
+import samba
+from samba.tests import TestCase
+from samba.credentials import Credentials
+from samba.samdb import SamDB
+from samba.auth import system_session
+from samba.ndr import ndr_unpack
+from samba.dcerpc import drsblobs
+
+
+class EncryptedSecretsTests(TestCase):
+
+ def setUp(self):
+ super().setUp()
+ self.lp = samba.tests.env_loadparm()
+ self.creds = Credentials()
+ self.session = system_session()
+ self.creds.guess(self.lp)
+ self.session = system_session()
+ self.ldb = SamDB(session_info=self.session,
+ credentials=self.creds,
+ lp=self.lp)
+
+ def test_encrypted_secrets(self):
+ """Test that secret attributes are stored encrypted on disk"""
+ basedn = self.ldb.domain_dn()
+ backend_filename = "%s.ldb" % basedn.upper()
+ backend_subpath = os.path.join("sam.ldb.d",
+ backend_filename)
+ backend_path = self.lp.private_path(backend_subpath)
+ backenddb = ldb.Ldb("ldb://" + backend_path, flags=ldb.FLG_DONT_CREATE_DB)
+
+ dn = "CN=Administrator,CN=Users,%s" % basedn
+
+ res = backenddb.search(scope=ldb.SCOPE_BASE,
+ base=dn,
+ attrs=["unicodePwd"])
+ self.assertIs(True, len(res) > 0)
+ obj = res[0]
+ blob = obj["unicodePwd"][0]
+ self.assertTrue(len(blob) > 30)
+ # Now verify that the header contains the correct magic value.
+ encrypted = ndr_unpack(drsblobs.EncryptedSecret, blob)
+ magic = 0xca5caded
+ self.assertEqual(magic, encrypted.header.magic)
+
+ def test_required_features(self):
+ """Test that databases are provisioned with encryptedSecrets as a
+ required feature
+ """
+ res = self.ldb.search(scope=ldb.SCOPE_BASE,
+ base="@SAMBA_DSDB",
+ attrs=["requiredFeatures"])
+ self.assertTrue(len(res) > 0)
+ self.assertTrue("requiredFeatures" in res[0])
+ required_features = res[0]["requiredFeatures"]
+ self.assertTrue(b"encryptedSecrets" in required_features)
diff --git a/python/samba/tests/gensec.py b/python/samba/tests/gensec.py
new file mode 100644
index 0000000..8f9c88d
--- /dev/null
+++ b/python/samba/tests/gensec.py
@@ -0,0 +1,259 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2009
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for GENSEC.
+
+Note that this just tests the bindings work. It does not intend to test
+the functionality, that's already done in other tests.
+"""
+
+from samba.credentials import Credentials
+from samba import gensec, auth
+import samba.tests
+
+
+class GensecTests(samba.tests.TestCase):
+
+ def setUp(self):
+ super().setUp()
+ self.settings = {}
+ self.settings["lp_ctx"] = self.lp_ctx = samba.tests.env_loadparm()
+ self.settings["target_hostname"] = self.lp_ctx.get("netbios name")
+ self.lp_ctx.set("spnego:simulate_w2k", "no")
+
+ # This is just for the API tests
+ self.gensec = gensec.Security.start_client(self.settings)
+
+ def test_start_mech_by_unknown_name(self):
+ self.assertRaises(RuntimeError, self.gensec.start_mech_by_name, "foo")
+
+ def test_start_mech_by_name(self):
+ self.gensec.start_mech_by_name("spnego")
+
+ def test_info_uninitialized(self):
+ self.assertRaises(RuntimeError, self.gensec.session_info)
+
+ def _test_update(self, mech, *, creds=None, client_mech=None, client_only_opt=None):
+ """Test GENSEC by doing an exchange with ourselves using GSSAPI against a KDC"""
+
+ # Start up a client and server GENSEC instance to test things with
+
+ if creds is None:
+ creds = self.get_credentials()
+
+ if client_only_opt:
+ orig_client_opt = self.lp_ctx.get(client_only_opt)
+ if not orig_client_opt:
+ orig_client_opt = ''
+ self.lp_ctx.set(client_only_opt, "yes")
+
+ self.gensec_client = gensec.Security.start_client(self.settings)
+ self.gensec_client.set_credentials(creds)
+ self.gensec_client.want_feature(gensec.FEATURE_SEAL)
+ if client_mech is not None:
+ self.gensec_client.start_mech_by_name(client_mech)
+ else:
+ self.gensec_client.start_mech_by_sasl_name(mech)
+
+ if client_only_opt:
+ self.lp_ctx.set(client_only_opt, "no")
+
+ self.gensec_server = gensec.Security.start_server(settings=self.settings,
+ auth_context=auth.AuthContext(lp_ctx=self.lp_ctx))
+ creds = Credentials()
+ creds.guess(self.lp_ctx)
+ creds.set_machine_account(self.lp_ctx)
+ self.gensec_server.set_credentials(creds)
+
+ self.gensec_server.want_feature(gensec.FEATURE_SEAL)
+ self.gensec_server.start_mech_by_sasl_name(mech)
+
+ client_finished = False
+ server_finished = False
+ server_to_client = b""
+ client_to_server = b""
+
+ # Run the actual call loop
+ while True:
+ if not client_finished:
+ if client_only_opt:
+ self.lp_ctx.set(client_only_opt, "yes")
+ print("running client gensec_update")
+ try:
+ (client_finished, client_to_server) = self.gensec_client.update(server_to_client)
+ except samba.NTSTATUSError as nt:
+ raise AssertionError(nt)
+ if client_only_opt:
+ self.lp_ctx.set(client_only_opt, "no")
+ if not server_finished:
+ print("running server gensec_update")
+ try:
+ (server_finished, server_to_client) = self.gensec_server.update(client_to_server)
+ except samba.NTSTATUSError as nt:
+ raise AssertionError(nt)
+
+ if client_finished and server_finished:
+ break
+
+ if client_only_opt:
+ self.lp_ctx.set(client_only_opt, orig_client_opt)
+
+ self.assertTrue(server_finished)
+ self.assertTrue(client_finished)
+
+ session_info = self.gensec_server.session_info()
+
+ test_bytes = b"Hello Server"
+ try:
+ test_wrapped = self.gensec_client.wrap(test_bytes)
+ test_unwrapped = self.gensec_server.unwrap(test_wrapped)
+ except samba.NTSTATUSError as e:
+ self.fail(str(e))
+
+ self.assertEqual(test_bytes, test_unwrapped)
+ test_bytes = b"Hello Client"
+ test_wrapped = self.gensec_server.wrap(test_bytes)
+ test_unwrapped = self.gensec_client.unwrap(test_wrapped)
+ self.assertEqual(test_bytes, test_unwrapped)
+
+ client_session_key = self.gensec_client.session_key()
+ server_session_key = self.gensec_server.session_key()
+ self.assertEqual(client_session_key, server_session_key)
+
+ def test_update(self):
+ self._test_update("GSSAPI")
+
+ def test_update_spnego(self):
+ self._test_update("GSS-SPNEGO")
+
+ def test_update_spnego_downgrade(self):
+ self._test_update("GSS-SPNEGO", client_mech="spnego", client_only_opt="gensec:gssapi_krb5")
+
+ def test_update_no_optimistic_spnego(self):
+ self._test_update("GSS-SPNEGO", client_mech="spnego", client_only_opt="spnego:client_no_optimistic")
+
+ def test_update_w2k_spnego_client(self):
+ self.lp_ctx.set("spnego:simulate_w2k", "yes")
+
+ # Re-start the client with this set
+ self.gensec = gensec.Security.start_client(self.settings)
+
+ # Unset it for the server
+ self.lp_ctx.set("spnego:simulate_w2k", "no")
+
+ self._test_update("GSS-SPNEGO")
+
+ def test_update_w2k_spnego_server(self):
+ # Re-start the client with this set
+ self.gensec = gensec.Security.start_client(self.settings)
+
+ # Unset it for the server
+ self.lp_ctx.set("spnego:simulate_w2k", "yes")
+
+ self._test_update("GSS-SPNEGO")
+
+ def test_update_w2k_spnego(self):
+ self.lp_ctx.set("spnego:simulate_w2k", "no")
+
+ # Re-start the client with this set
+ self.gensec = gensec.Security.start_client(self.settings)
+
+ self._test_update("GSS-SPNEGO")
+
+ def test_update_gss_krb5_to_spnego(self):
+ self._test_update("GSS-SPNEGO", client_mech="gssapi_krb5")
+
+ def test_update_ntlmssp_to_spnego(self):
+ self._test_update("GSS-SPNEGO", client_mech="ntlmssp")
+
+ def test_update_fast(self):
+ """Test associating a machine account with the credentials
+ to protect the password from cracking and show
+ 'log in from device' pattern.
+
+ (Note we can't tell if FAST armor was actually used with this test)"""
+ creds = self.insta_creds(template=self.get_credentials())
+ machine_creds = Credentials()
+ machine_creds.guess(self.lp_ctx)
+ machine_creds.set_machine_account(self.lp_ctx)
+ creds.set_krb5_fast_armor_credentials(machine_creds, True)
+ self._test_update("GSSAPI", creds=creds)
+
+ def test_update_anon_fast(self):
+ """Test setting no FAST credentials, but requiring FAST.
+ Against a Heimdal KDC this will trigger the anonymous
+ PKINIT protection.
+
+ (Note we can't tell if FAST armor was actually used with this test)
+ """
+ creds = self.insta_creds(template=self.get_credentials())
+ creds.set_krb5_fast_armor_credentials(None, True)
+ self._test_update("GSSAPI", creds=creds)
+
+ def test_max_update_size(self):
+ """Test GENSEC by doing an exchange with ourselves using GSSAPI against a KDC"""
+
+ # Start up a client and server GENSEC instance to test things with
+
+ self.gensec_client = gensec.Security.start_client(self.settings)
+ self.gensec_client.set_credentials(self.get_credentials())
+ self.gensec_client.want_feature(gensec.FEATURE_SIGN)
+ self.gensec_client.set_max_update_size(5)
+ self.gensec_client.start_mech_by_name("spnego")
+
+ self.gensec_server = gensec.Security.start_server(settings=self.settings,
+ auth_context=auth.AuthContext(lp_ctx=self.lp_ctx))
+ creds = Credentials()
+ creds.guess(self.lp_ctx)
+ creds.set_machine_account(self.lp_ctx)
+ self.gensec_server.set_credentials(creds)
+ self.gensec_server.want_feature(gensec.FEATURE_SIGN)
+ self.gensec_server.set_max_update_size(5)
+ self.gensec_server.start_mech_by_name("spnego")
+
+ client_finished = False
+ server_finished = False
+ server_to_client = b""
+
+ # Run the actual call loop
+ i = 0
+ while not client_finished or not server_finished:
+ i += 1
+ if not client_finished:
+ print("running client gensec_update: %d: %r" % (len(server_to_client), server_to_client))
+ (client_finished, client_to_server) = self.gensec_client.update(server_to_client)
+ if not server_finished:
+ print("running server gensec_update: %d: %r" % (len(client_to_server), client_to_server))
+ (server_finished, server_to_client) = self.gensec_server.update(client_to_server)
+
+ # Here we expect a lot more than the typical 1 or 2 roundtrips
+ self.assertTrue(i > 10)
+
+ session_info = self.gensec_server.session_info()
+
+ test_bytes = b"Hello Server"
+ test_wrapped = self.gensec_client.wrap(test_bytes)
+ test_unwrapped = self.gensec_server.unwrap(test_wrapped)
+ self.assertEqual(test_bytes, test_unwrapped)
+ test_bytes = b"Hello Client"
+ test_wrapped = self.gensec_server.wrap(test_bytes)
+ test_unwrapped = self.gensec_client.unwrap(test_wrapped)
+ self.assertEqual(test_bytes, test_unwrapped)
+
+ client_session_key = self.gensec_client.session_key()
+ server_session_key = self.gensec_server.session_key()
+ self.assertEqual(client_session_key, server_session_key)
diff --git a/python/samba/tests/get_opt.py b/python/samba/tests/get_opt.py
new file mode 100644
index 0000000..60caf52
--- /dev/null
+++ b/python/samba/tests/get_opt.py
@@ -0,0 +1,69 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2011
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for option parsing.
+
+"""
+
+import optparse
+from samba.getopt import (
+ AUTO_USE_KERBEROS,
+ DONT_USE_KERBEROS,
+ MUST_USE_KERBEROS,
+ parse_kerberos_arg_legacy,
+ parse_kerberos_arg,
+)
+import samba.tests
+
+
+class KerberosOptionTests(samba.tests.TestCase):
+
+ def test_legacy_parse_true(self):
+ self.assertEqual(
+ MUST_USE_KERBEROS, parse_kerberos_arg_legacy("yes", "--kerberos"))
+ self.assertEqual(
+ MUST_USE_KERBEROS, parse_kerberos_arg_legacy("true", "--kerberos"))
+ self.assertEqual(
+ MUST_USE_KERBEROS, parse_kerberos_arg_legacy("1", "--kerberos"))
+
+ def test_legacy_parse_false(self):
+ self.assertEqual(
+ DONT_USE_KERBEROS, parse_kerberos_arg_legacy("no", "--kerberos"))
+ self.assertEqual(
+ DONT_USE_KERBEROS, parse_kerberos_arg_legacy("false", "--kerberos"))
+ self.assertEqual(
+ DONT_USE_KERBEROS, parse_kerberos_arg_legacy("0", "--kerberos"))
+
+ def test_legacy_parse_auto(self):
+ self.assertEqual(
+ AUTO_USE_KERBEROS, parse_kerberos_arg_legacy("auto", "--kerberos"))
+
+ def test_legacy_parse_invalid(self):
+ self.assertRaises(optparse.OptionValueError,
+ parse_kerberos_arg_legacy, "blah?", "--kerberos")
+
+ def test_parse_valid(self):
+ self.assertEqual(
+ MUST_USE_KERBEROS, parse_kerberos_arg("required", "--use-kerberos"))
+ self.assertEqual(
+ AUTO_USE_KERBEROS, parse_kerberos_arg("desired", "--use-kerberos"))
+ self.assertEqual(
+ DONT_USE_KERBEROS, parse_kerberos_arg("off", "--use-kerberos"))
+
+ def test_parse_invalid(self):
+ self.assertRaises(optparse.OptionValueError,
+ parse_kerberos_arg, "wurst", "--use-kerberos")
diff --git a/python/samba/tests/getdcname.py b/python/samba/tests/getdcname.py
new file mode 100644
index 0000000..d248b20
--- /dev/null
+++ b/python/samba/tests/getdcname.py
@@ -0,0 +1,700 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2018
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""
+ Tests GetDCNameEx calls in NETLOGON
+"""
+
+from samba import WERRORError, werror
+import samba.tests
+import os
+from samba.credentials import Credentials
+from samba.dcerpc import netlogon, nbt
+from samba.dcerpc.misc import GUID
+from samba.net import Net
+
+class GetDCNameEx(samba.tests.TestCase):
+
+ def setUp(self):
+ self.lp = samba.tests.env_loadparm()
+ self.creds = Credentials()
+
+ self.netlogon_conn = None
+ self.server = os.environ.get('SERVER')
+ self.realm = os.environ.get('REALM')
+ self.domain = os.environ.get('DOMAIN')
+ self.trust_realm = os.environ.get('TRUST_REALM')
+ self.trust_domain = os.environ.get('TRUST_DOMAIN')
+ self.trust_server = os.environ.get('TRUST_SERVER')
+
+ def _call_get_dc_name(self, domain=None, domain_guid=None,
+ site_name=None, ex2=False, flags=0):
+ if self.netlogon_conn is None:
+ self.netlogon_conn = netlogon.netlogon(f"ncacn_ip_tcp:{self.server}",
+ self.get_loadparm())
+
+ if ex2:
+ return self.netlogon_conn.netr_DsRGetDCNameEx2(self.server,
+ None, 0,
+ domain,
+ domain_guid,
+ site_name,
+ flags)
+ else:
+ return self.netlogon_conn.netr_DsRGetDCNameEx(self.server,
+ domain,
+ domain_guid,
+ site_name,
+ flags)
+
+ def test_get_dc_ex2(self):
+ """Check the most trivial requirements of Ex2 (no domain or site)
+
+ a) The paths are prefixed with two backslashes
+ b) The returned domains conform to the format requested
+ c) The domain matches our own domain
+ """
+ response = self._call_get_dc_name(ex2=True)
+
+ self.assertIsNotNone(response.dc_unc)
+ self.assertTrue(response.dc_unc.startswith('\\\\'))
+ self.assertIsNotNone(response.dc_address)
+ self.assertTrue(response.dc_address.startswith('\\\\'))
+
+ self.assertTrue(response.domain_name.lower() ==
+ self.realm.lower() or
+ response.domain_name.lower() ==
+ self.domain.lower())
+
+ response = self._call_get_dc_name(ex2=True,
+ flags=netlogon.DS_RETURN_DNS_NAME)
+ self.assertEqual(response.domain_name.lower(),
+ self.realm.lower())
+
+ response = self._call_get_dc_name(ex2=True,
+ flags=netlogon.DS_RETURN_FLAT_NAME)
+ self.assertEqual(response.domain_name.lower(),
+ self.domain.lower())
+
+ def test_get_dc_over_winbind_ex2(self):
+ """Check what happens to Ex2 requests after being forwarded to winbind
+
+ a) The paths must still have the same backslash prefixes
+ b) The returned domain does not match our own domain
+ c) The domain matches the format requested
+ """
+ self.assertIsNotNone(self.trust_realm)
+
+ response_trust = self._call_get_dc_name(domain=self.trust_realm,
+ ex2=True)
+ response = self._call_get_dc_name(domain=self.realm,
+ ex2=True)
+
+ self.assertIsNotNone(response_trust.dc_unc)
+ self.assertTrue(response_trust.dc_unc.startswith('\\\\'))
+ self.assertIsNotNone(response_trust.dc_address)
+ self.assertTrue(response_trust.dc_address.startswith('\\\\'))
+
+ self.assertNotEqual(response_trust.dc_unc,
+ response.dc_unc)
+ self.assertNotEqual(response_trust.dc_address,
+ response.dc_address)
+
+ self.assertTrue(response_trust.domain_name.lower() ==
+ self.trust_realm.lower() or
+ response_trust.domain_name.lower() ==
+ self.trust_domain.lower())
+
+ response_trust = self._call_get_dc_name(domain=self.trust_realm,
+ flags=netlogon.DS_RETURN_DNS_NAME,
+ ex2=True)
+ self.assertEqual(response_trust.domain_name.lower(),
+ self.trust_realm.lower())
+
+ response_trust = self._call_get_dc_name(domain=self.trust_realm,
+ flags=netlogon.DS_RETURN_FLAT_NAME,
+ ex2=True)
+ self.assertEqual(response_trust.domain_name.lower(),
+ self.trust_domain.lower())
+
+ def test_get_dc_over_winbind(self):
+ """Test the standard Ex version (not Ex2)
+
+ Ex calls Ex2 anyways, from now on, just test Ex.
+ """
+ self.assertIsNotNone(self.trust_realm)
+
+ response_trust = self._call_get_dc_name(domain=self.trust_realm,
+ flags=netlogon.DS_RETURN_DNS_NAME)
+
+ self.assertIsNotNone(response_trust.dc_unc)
+ self.assertTrue(response_trust.dc_unc.startswith('\\\\'))
+ self.assertIsNotNone(response_trust.dc_address)
+ self.assertTrue(response_trust.dc_address.startswith('\\\\'))
+
+ self.assertEqual(response_trust.domain_name.lower(),
+ self.trust_realm.lower())
+
+ def test_get_dc_over_winbind_with_site(self):
+ """Test the standard Ex version (not Ex2)
+
+ We assume that there is a Default-First-Site-Name site.
+ """
+ self.assertIsNotNone(self.trust_realm)
+
+ site = 'Default-First-Site-Name'
+ response_trust = self._call_get_dc_name(domain=self.trust_realm,
+ site_name=site,
+ flags=netlogon.DS_RETURN_DNS_NAME)
+
+ self.assertIsNotNone(response_trust.dc_unc)
+ self.assertTrue(response_trust.dc_unc.startswith('\\\\'))
+ self.assertIsNotNone(response_trust.dc_address)
+ self.assertTrue(response_trust.dc_address.startswith('\\\\'))
+
+ self.assertEqual(response_trust.domain_name.lower(),
+ self.trust_realm.lower())
+
+ self.assertEqual(site.lower(), response_trust.dc_site_name.lower())
+
+ def test_get_dc_over_winbind_invalid_site(self):
+ """Test the standard Ex version (not Ex2)
+
+ We assume that there is no Invalid-First-Site-Name site.
+ """
+ self.assertIsNotNone(self.trust_realm)
+
+ site = 'Invalid-First-Site-Name'
+ try:
+ response_trust = self._call_get_dc_name(domain=self.trust_realm,
+ site_name=site,
+ flags=netlogon.DS_RETURN_DNS_NAME,
+ ex2=False)
+ self.fail("Failed to give the correct error for incorrect site")
+ except WERRORError as e:
+ enum, estr = e.args
+ if enum != werror.WERR_NO_SUCH_DOMAIN:
+ self.fail("Failed to detect an invalid site name")
+
+ def test_get_dc_over_winbind_invalid_site_ex2(self):
+ """Test the Ex2 version.
+
+ We assume that there is no Invalid-First-Site-Name site.
+ """
+ self.assertIsNotNone(self.trust_realm)
+
+ site = 'Invalid-First-Site-Name'
+ try:
+ response_trust = self._call_get_dc_name(domain=self.trust_realm,
+ site_name=site,
+ flags=netlogon.DS_RETURN_DNS_NAME,
+ ex2=True)
+ self.fail("Failed to give the correct error for incorrect site")
+ except WERRORError as e:
+ enum, estr = e.args
+ if enum != werror.WERR_NO_SUCH_DOMAIN:
+ self.fail("Failed to detect an invalid site name")
+
+ def test_get_dc_over_winbind_empty_string_site(self):
+ """Test the standard Ex version (not Ex2)
+
+ We assume that there is a Default-First-Site-Name site.
+ """
+ self.assertIsNotNone(self.trust_realm)
+
+ site = ''
+ try:
+ response_trust = self._call_get_dc_name(domain=self.trust_realm,
+ site_name=site,
+ flags=netlogon.DS_RETURN_DNS_NAME)
+ except WERRORError as e:
+ self.fail("Unable to get empty string site result: " + str(e))
+
+ self.assertIsNotNone(response_trust.dc_unc)
+ self.assertTrue(response_trust.dc_unc.startswith('\\\\'))
+ self.assertIsNotNone(response_trust.dc_address)
+ self.assertTrue(response_trust.dc_address.startswith('\\\\'))
+
+ self.assertEqual(response_trust.domain_name.lower(),
+ self.trust_realm.lower())
+
+ self.assertIsNotNone(response_trust.dc_site_name)
+ self.assertNotEqual('', response_trust.dc_site_name)
+
+ def test_get_dc_over_winbind_netbios(self):
+ """Supply a NETBIOS trust domain name."""
+ self.assertIsNotNone(self.trust_realm)
+
+ try:
+ response_trust = self._call_get_dc_name(domain=self.trust_domain,
+ flags=netlogon.DS_RETURN_DNS_NAME,
+ ex2=False)
+ except WERRORError as e:
+ self.fail("Failed to succeed over winbind: " + str(e))
+
+ self.assertIsNotNone(response_trust)
+ self.assertEqual(response_trust.domain_name.lower(),
+ self.trust_realm.lower())
+
+ def test_get_dc_over_winbind_with_site_netbios(self):
+ """Supply a NETBIOS trust domain name.
+
+ Sporadically fails because NETBIOS queries do not return site name in
+ winbind. The site check in NETLOGON will trigger and fail the request.
+
+ Currently marked in flapping...
+ """
+ self.assertIsNotNone(self.trust_realm)
+
+ site = 'Default-First-Site-Name'
+ try:
+ response_trust = self._call_get_dc_name(domain=self.trust_domain,
+ site_name=site,
+ flags=netlogon.DS_RETURN_DNS_NAME,
+ ex2=False)
+ except WERRORError as e:
+ self.fail("get_dc_name (domain=%s,site=%s) over winbind failed: %s"
+ % (self.trust_domain, site, e))
+
+ self.assertIsNotNone(response_trust)
+ self.assertEqual(response_trust.domain_name.lower(),
+ self.trust_realm.lower())
+
+ self.assertEqual(site.lower(), response_trust.dc_site_name.lower())
+
+ def test_get_dc_over_winbind_domain_guid(self):
+ """Ensure that we do not reject requests supplied with a NULL GUID"""
+
+ self.assertIsNotNone(self.trust_realm)
+
+ null_guid = GUID()
+ try:
+ response_trust = self._call_get_dc_name(domain=self.trust_realm,
+ domain_guid=null_guid,
+ flags=netlogon.DS_RETURN_DNS_NAME)
+ except WERRORError as e:
+ self.fail("Unable to get NULL domain GUID result: " + str(e))
+
+ self.assertIsNotNone(response_trust.dc_unc)
+ self.assertTrue(response_trust.dc_unc.startswith('\\\\'))
+ self.assertIsNotNone(response_trust.dc_address)
+ self.assertTrue(response_trust.dc_address.startswith('\\\\'))
+
+ self.assertEqual(response_trust.domain_name.lower(),
+ self.trust_realm.lower())
+
+ def test_get_dc_with_site(self):
+ """Test the standard Ex version (not Ex2)
+
+ We assume that there is a Default-First-Site-Name site.
+ """
+
+ site = 'Default-First-Site-Name'
+ response = self._call_get_dc_name(domain=self.realm,
+ site_name=site,
+ flags=netlogon.DS_RETURN_DNS_NAME)
+
+ self.assertIsNotNone(response.dc_unc)
+ self.assertTrue(response.dc_unc.startswith('\\\\'))
+ self.assertIsNotNone(response.dc_address)
+ self.assertTrue(response.dc_address.startswith('\\\\'))
+
+ self.assertEqual(response.domain_name.lower(),
+ self.realm.lower())
+
+ self.assertEqual(site.lower(), response.dc_site_name.lower())
+
+ def test_get_dc_invalid_site(self):
+ """Test the standard Ex version (not Ex2)
+
+ We assume that there is no Invalid-First-Site-Name site.
+ """
+ self.assertIsNotNone(self.realm)
+
+ site = 'Invalid-First-Site-Name'
+ try:
+ response = self._call_get_dc_name(domain=self.realm,
+ site_name=site,
+ flags=netlogon.DS_RETURN_DNS_NAME,
+ ex2=False)
+ self.fail("Failed to give the correct error for incorrect site")
+ except WERRORError as e:
+ enum, estr = e.args
+ if enum != werror.WERR_NO_SUCH_DOMAIN:
+ self.fail("Failed to detect an invalid site name")
+
+ def test_get_dc_invalid_site_ex2(self):
+ """Test the Ex2 version
+
+ We assume that there is no Invalid-First-Site-Name site.
+ """
+
+ site = 'Invalid-First-Site-Name'
+ try:
+ response = self._call_get_dc_name(domain=self.realm,
+ site_name=site,
+ flags=netlogon.DS_RETURN_DNS_NAME,
+ ex2=True)
+ self.fail("Failed to give the correct error for incorrect site")
+ except WERRORError as e:
+ enum, estr = e.args
+ if enum != werror.WERR_NO_SUCH_DOMAIN:
+ self.fail("Failed to detect an invalid site name")
+
+ def test_get_dc_empty_string_site(self):
+ """Test the standard Ex version (not Ex2)
+
+ We assume that there is a Default-First-Site-Name site.
+ """
+
+ site = ''
+ try:
+ response = self._call_get_dc_name(domain=self.realm,
+ site_name=site,
+ flags=netlogon.DS_RETURN_DNS_NAME)
+ except WERRORError as e:
+ self.fail("Unable to get empty string site result: " + str(e))
+
+ self.assertIsNotNone(response.dc_unc)
+ self.assertTrue(response.dc_unc.startswith('\\\\'))
+ self.assertIsNotNone(response.dc_address)
+ self.assertTrue(response.dc_address.startswith('\\\\'))
+
+ self.assertEqual(response.domain_name.lower(),
+ self.realm.lower())
+
+ self.assertIsNotNone(response.dc_site_name)
+ self.assertNotEqual('', response.dc_site_name)
+
+ def test_get_dc_netbios(self):
+ """Supply a NETBIOS domain name."""
+
+ try:
+ response = self._call_get_dc_name(domain=self.domain,
+ flags=netlogon.DS_RETURN_DNS_NAME,
+ ex2=False)
+ except WERRORError as e:
+ self.fail("Failed to succeed over winbind: " + str(e))
+
+ self.assertIsNotNone(response)
+ self.assertEqual(response.domain_name.lower(),
+ self.realm.lower())
+
+ def test_get_dc_with_site_netbios(self):
+ """Supply a NETBIOS domain name."""
+
+ site = 'Default-First-Site-Name'
+ try:
+ response = self._call_get_dc_name(domain=self.domain,
+ site_name=site,
+ flags=netlogon.DS_RETURN_DNS_NAME,
+ ex2=False)
+ except WERRORError as e:
+ self.fail("Failed to succeed over winbind: " + str(e))
+
+ self.assertIsNotNone(response)
+ self.assertEqual(response.domain_name.lower(),
+ self.realm.lower())
+
+ self.assertEqual(site.lower(), response.dc_site_name.lower())
+
+ def test_get_dc_with_domain_guid(self):
+ """Ensure that we do not reject requests supplied with a NULL GUID"""
+
+ null_guid = GUID()
+ response = self._call_get_dc_name(domain=self.realm,
+ domain_guid=null_guid,
+ flags=netlogon.DS_RETURN_DNS_NAME)
+
+ self.assertIsNotNone(response.dc_unc)
+ self.assertTrue(response.dc_unc.startswith('\\\\'))
+ self.assertIsNotNone(response.dc_address)
+ self.assertTrue(response.dc_address.startswith('\\\\'))
+
+ self.assertEqual(response.domain_name.lower(),
+ self.realm.lower())
+
+ def test_get_dc_with_empty_string_domain(self):
+ """Ensure that empty domain resolve to the DC domain"""
+ response = self._call_get_dc_name(domain='',
+ flags=netlogon.DS_RETURN_DNS_NAME)
+
+ self.assertIsNotNone(response.dc_unc)
+ self.assertTrue(response.dc_unc.startswith('\\\\'))
+ self.assertIsNotNone(response.dc_address)
+ self.assertTrue(response.dc_address.startswith('\\\\'))
+
+ self.assertEqual(response.domain_name.lower(),
+ self.realm.lower())
+
+ def test_get_dc_winbind_need_2012r2(self):
+ """Test requiring that we have a FL2012R2 DC as answer
+ """
+ self.assertIsNotNone(self.trust_realm)
+
+ try:
+ response_trust = self._call_get_dc_name(domain=self.trust_realm,
+ flags=netlogon.DS_RETURN_DNS_NAME|netlogon.DS_DIRECTORY_SERVICE_9_REQUIRED)
+ except WERRORError as e:
+ enum, estr = e.args
+ self.fail(f"netr_DsRGetDCNameEx failed: {estr}")
+
+ self.assertIsNotNone(response_trust.dc_unc)
+ self.assertTrue(response_trust.dc_unc.startswith('\\\\'))
+ self.assertIsNotNone(response_trust.dc_address)
+ self.assertTrue(response_trust.dc_address.startswith('\\\\'))
+
+ self.assertEqual(response_trust.domain_name.lower(),
+ self.trust_realm.lower())
+
+ # Now check the CLDAP netlogon response matches the above
+ dc_ip = response_trust.dc_address[2:]
+
+ net = Net(creds=self.creds, lp=self.lp)
+ cldap_netlogon_reply = net.finddc(domain=self.trust_realm, address=dc_ip,
+ flags=(nbt.NBT_SERVER_LDAP |
+ nbt.NBT_SERVER_DS))
+ self.assertTrue(cldap_netlogon_reply.server_type & nbt.NBT_SERVER_DS_9)
+
+ def test_get_dc_direct_need_2012r2_but_not_found(self):
+ """Test requiring that we have a FL2012R2 DC as answer, against the FL2008R2 domain
+
+ This test requires that the DC in the FL2008R2 does not claim
+ to be 2012R2 capable (off by default in Samba)
+
+ """
+ self.assertIsNotNone(self.realm)
+
+
+ try:
+ response = self._call_get_dc_name(domain=self.realm,
+ flags=netlogon.DS_RETURN_DNS_NAME|netlogon.DS_DIRECTORY_SERVICE_9_REQUIRED)
+
+ self.fail("Failed to detect that requirement for 2012R2 was not met")
+ except WERRORError as e:
+ enum, estr = e.args
+ if enum != werror.WERR_NO_SUCH_DOMAIN:
+ self.fail(f"Incorrect error {estr} from GetDcNameEx looking for 2012R2 DC that was not available")
+
+ def test_get_dc_direct_need_web_but_not_found(self):
+ """Test requiring that we (do not) have a AD Web Services on the DC
+
+ This test requires that the DC does not advertise AD Web Services
+
+ This is used as a test that is easy for a modern windows
+ version to fail, as (say) Windows 2022 will succeed for all
+ the DS_DIRECTORY_SERVICE_* flags. Disable AD Web services in
+ services.mmc to run this test successfully.
+
+ """
+ self.assertIsNotNone(self.realm)
+
+
+ try:
+ response = self._call_get_dc_name(domain=self.realm,
+ flags=netlogon.DS_RETURN_DNS_NAME|netlogon.DS_WEB_SERVICE_REQUIRED)
+
+ self.fail("Failed to detect that requirement for Web Services was not met")
+ except WERRORError as e:
+ enum, estr = e.args
+ if enum != werror.WERR_NO_SUCH_DOMAIN:
+ self.fail(f"Incorrect error {estr} from GetDcNameEx looking for AD Web Services enabled DC that should not be available")
+
+ # Now check the CLDAP netlogon response matches the above - that the bit was not set
+ net = Net(creds=self.creds, lp=self.lp)
+ cldap_netlogon_reply = net.finddc(domain=self.realm,
+ flags=(nbt.NBT_SERVER_LDAP |
+ nbt.NBT_SERVER_DS))
+ # We can assert this, even without looking for a particular
+ # DC, as if any DC has WEB_SERVICE we would have got it above.
+ self.assertFalse(cldap_netlogon_reply.server_type & nbt.NBT_SERVER_ADS_WEB_SERVICE)
+
+ def test_get_dc_winbind_need_web_but_not_found(self):
+ """Test requiring that we (do not) have a AD Web Services on the trusted DC
+
+ This test requires that the DC does not advertise AD Web Services
+
+ This is used as a test that is easy for a modern windows
+ version to fail, as (say) Windows 2022 will succeed for all
+ the DS_DIRECTORY_SERVICE_* flags. Disable AD Web services in
+ services.mmc to run this test successfully.
+
+ """
+ self.assertIsNotNone(self.trust_realm)
+
+
+ try:
+ response = self._call_get_dc_name(domain=self.trust_realm,
+ flags=netlogon.DS_RETURN_DNS_NAME|netlogon.DS_WEB_SERVICE_REQUIRED)
+
+ self.fail("Failed to detect that requirement for Web Services was not met")
+ except WERRORError as e:
+ enum, estr = e.args
+ if enum != werror.WERR_NO_SUCH_DOMAIN:
+ self.fail(f"Incorrect error {estr} from GetDcNameEx looking for AD Web Services enabled DC that should not be available")
+
+ # Now check the CLDAP netlogon response matches the above - that the bit was not set
+ net = Net(creds=self.creds, lp=self.lp)
+ cldap_netlogon_reply = net.finddc(domain=self.trust_realm,
+ flags=(nbt.NBT_SERVER_LDAP |
+ nbt.NBT_SERVER_DS))
+ # We can assert this, even without looking for a particular
+ # DC, as if any DC has WEB_SERVICE we would have got it above.
+ self.assertFalse(cldap_netlogon_reply.server_type & nbt.NBT_SERVER_ADS_WEB_SERVICE)
+
+ def test_get_dc_direct_need_2012r2(self):
+ """Test requiring that we have a FL2012R2 DC as answer
+ """
+ self.assertIsNotNone(self.trust_realm)
+
+ self.netlogon_conn = netlogon.netlogon(f"ncacn_ip_tcp:{self.trust_server}",
+ self.get_loadparm())
+
+ response_trust = self._call_get_dc_name(domain=self.trust_realm,
+ flags=netlogon.DS_RETURN_DNS_NAME|netlogon.DS_DIRECTORY_SERVICE_9_REQUIRED)
+
+ self.assertIsNotNone(response_trust.dc_unc)
+ self.assertTrue(response_trust.dc_unc.startswith('\\\\'))
+ self.assertIsNotNone(response_trust.dc_address)
+ self.assertTrue(response_trust.dc_address.startswith('\\\\'))
+
+ self.assertEqual(response_trust.domain_name.lower(),
+ self.trust_realm.lower())
+
+ # Now check the CLDAP netlogon response matches the above
+ dc_ip = response_trust.dc_address[2:]
+
+ net = Net(creds=self.creds, lp=self.lp)
+ cldap_netlogon_reply = net.finddc(domain=self.trust_realm, address=dc_ip,
+ flags=(nbt.NBT_SERVER_LDAP |
+ nbt.NBT_SERVER_DS))
+ self.assertTrue(cldap_netlogon_reply.server_type & nbt.NBT_SERVER_DS_9)
+
+ def test_get_dc_winbind_need_2012r2_but_not_found(self):
+ """Test requiring that we have a FL2012R2 DC as answer, against the FL2008R2 domain
+
+ This test requires that the DC in the FL2008R2 does not claim
+ to be 2012R2 capable (off by default in Samba)
+
+ """
+ self.assertIsNotNone(self.realm)
+
+ self.netlogon_conn = netlogon.netlogon(f"ncacn_ip_tcp:{self.trust_server}",
+ self.get_loadparm())
+
+
+ try:
+ response = self._call_get_dc_name(domain=self.realm,
+ flags=netlogon.DS_RETURN_DNS_NAME|netlogon.DS_DIRECTORY_SERVICE_9_REQUIRED)
+
+ self.fail("Failed to detect requirement for 2012R2 that is not met")
+ except WERRORError as e:
+ enum, estr = e.args
+ if enum != werror.WERR_NO_SUCH_DOMAIN:
+ self.fail("Failed to detect requirement for 2012R2 that is not met")
+
+ # Now check the CLDAP netlogon response matches the above - that the DS_9 bit was not set
+ net = Net(creds=self.creds, lp=self.lp)
+ cldap_netlogon_reply = net.finddc(domain=self.realm,
+ flags=(nbt.NBT_SERVER_LDAP |
+ nbt.NBT_SERVER_DS))
+ self.assertFalse(cldap_netlogon_reply.server_type & nbt.NBT_SERVER_DS_9)
+
+ def test_get_dc_winbind_need_2012r2_but_not_found_fallback(self):
+ """Test requiring that we have a FL2012R2 DC as answer, against the
+ FL2008R2 domain, then trying for just FL2008R2 (to show caching bugs)
+
+ This test requires that the DC in the FL2008R2 does not claim
+ to be 2012R2 capable (off by default in Samba)
+
+ """
+ self.assertIsNotNone(self.realm)
+
+ self.netlogon_conn = netlogon.netlogon(f"ncacn_ip_tcp:{self.trust_server}",
+ self.get_loadparm())
+
+
+ try:
+ response = self._call_get_dc_name(domain=self.realm,
+ flags=netlogon.DS_RETURN_DNS_NAME|netlogon.DS_DIRECTORY_SERVICE_9_REQUIRED)
+
+ self.fail("Failed to detect requirement for 2012R2 that is not met")
+ except WERRORError as e:
+ enum, estr = e.args
+ if enum != werror.WERR_NO_SUCH_DOMAIN:
+ self.fail("Failed to detect requirement for 2012R2 that is not met")
+
+ try:
+ response = self._call_get_dc_name(domain=self.realm,
+ flags=netlogon.DS_RETURN_DNS_NAME|netlogon.DS_DIRECTORY_SERVICE_6_REQUIRED)
+
+ except WERRORError as e:
+ enum, estr = e.args
+ self.fail("Unexpectedly failed to find 2008 DC")
+
+ dc_ip = response.dc_address[2:]
+
+ net = Net(creds=self.creds, lp=self.lp)
+ cldap_netlogon_reply = net.finddc(domain=self.realm, address=dc_ip,
+ flags=(nbt.NBT_SERVER_LDAP |
+ nbt.NBT_SERVER_DS))
+ self.assertTrue(cldap_netlogon_reply.server_type & nbt.NBT_SERVER_FULL_SECRET_DOMAIN_6)
+
+ def test_get_dc_direct_need_2012r2_but_not_found_fallback(self):
+ """Test requiring that we have a FL2012R2 DC as answer, against the
+ FL2008R2 domain, then trying for just FL2008R2 (to show caching bugs)
+
+ This test requires that the DC in the FL2008R2 does not claim
+ to be 2012R2 capable (off by default in Samba)
+
+ """
+ self.assertIsNotNone(self.realm)
+
+ self.netlogon_conn = netlogon.netlogon(f"ncacn_ip_tcp:{self.server}",
+ self.get_loadparm())
+
+
+ try:
+ response = self._call_get_dc_name(domain=self.realm,
+ flags=netlogon.DS_RETURN_DNS_NAME|netlogon.DS_DIRECTORY_SERVICE_9_REQUIRED)
+
+ self.fail("Failed to detect requirement for 2012R2 that is not met")
+ except WERRORError as e:
+ enum, estr = e.args
+ if enum != werror.WERR_NO_SUCH_DOMAIN:
+ self.fail("Failed to detect requirement for 2012R2 that is not met")
+
+ try:
+ response = self._call_get_dc_name(domain=self.realm,
+ flags=netlogon.DS_RETURN_DNS_NAME|netlogon.DS_DIRECTORY_SERVICE_6_REQUIRED)
+
+ except WERRORError as e:
+ enum, estr = e.args
+ self.fail("Unexpectedly failed to find 2008 DC")
+
+ dc_ip = response.dc_address[2:]
+
+ net = Net(creds=self.creds, lp=self.lp)
+ cldap_netlogon_reply = net.finddc(domain=self.realm, address=dc_ip,
+ flags=(nbt.NBT_SERVER_LDAP |
+ nbt.NBT_SERVER_DS))
+ self.assertTrue(cldap_netlogon_reply.server_type & nbt.NBT_SERVER_FULL_SECRET_DOMAIN_6)
+
+ # TODO Thorough tests of domain GUID
+ #
+ # The domain GUID does not seem to be authoritative, and seems to be a
+ # fallback case for renamed domains.
diff --git a/python/samba/tests/gkdi.py b/python/samba/tests/gkdi.py
new file mode 100644
index 0000000..375b444
--- /dev/null
+++ b/python/samba/tests/gkdi.py
@@ -0,0 +1,647 @@
+#
+# Helper classes for testing the Group Key Distribution Service.
+#
+# Copyright (C) Catalyst.Net Ltd 2023
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <https://www.gnu.org/licenses/>.
+#
+
+import sys
+import os
+
+sys.path.insert(0, "bin/python")
+os.environ["PYTHONUNBUFFERED"] = "1"
+
+import datetime
+import secrets
+from typing import NewType, Optional, Tuple, Union
+
+import ldb
+
+from cryptography.hazmat.backends import default_backend
+from cryptography.hazmat.primitives import hashes
+from cryptography.hazmat.primitives.kdf.kbkdf import CounterLocation, KBKDFHMAC, Mode
+
+from samba import (
+ ntstatus,
+ NTSTATUSError,
+ werror,
+)
+from samba.credentials import Credentials
+from samba.dcerpc import gkdi, misc
+from samba.gkdi import (
+ Algorithm,
+ Gkid,
+ GkidType,
+ GroupKey,
+ KEY_CYCLE_DURATION,
+ KEY_LEN_BYTES,
+ MAX_CLOCK_SKEW,
+ SeedKeyPair,
+)
+from samba.hresult import (
+ HRES_E_INVALIDARG,
+ HRES_NTE_BAD_KEY,
+ HRES_NTE_NO_KEY,
+)
+from samba.ndr import ndr_pack, ndr_unpack
+from samba.nt_time import (
+ nt_time_from_datetime,
+ NtTime,
+ NtTimeDelta,
+ timedelta_from_nt_time_delta,
+)
+from samba.param import LoadParm
+from samba.samdb import SamDB
+
+from samba.tests import delete_force, TestCase
+
+
+HResult = NewType("HResult", int)
+RootKey = NewType("RootKey", ldb.Message)
+
+
+ROOT_KEY_START_TIME = NtTime(KEY_CYCLE_DURATION + MAX_CLOCK_SKEW)
+
+
+class GetKeyError(Exception):
+ def __init__(self, status: HResult, message: str):
+ super().__init__(status, message)
+
+
+class GkdiBaseTest(TestCase):
+ # This is the NDR‐encoded security descriptor O:SYD:(A;;FRFW;;;S-1-5-9).
+ gmsa_sd = (
+ b"\x01\x00\x04\x800\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
+ b"\x14\x00\x00\x00\x02\x00\x1c\x00\x01\x00\x00\x00\x00\x00\x14\x00"
+ b"\x9f\x01\x12\x00\x01\x01\x00\x00\x00\x00\x00\x05\t\x00\x00\x00"
+ b"\x01\x01\x00\x00\x00\x00\x00\x05\x12\x00\x00\x00"
+ )
+
+ @staticmethod
+ def current_time(offset: Optional[datetime.timedelta] = None) -> datetime.datetime:
+ if offset is None:
+ # Allow for clock skew.
+ offset = timedelta_from_nt_time_delta(MAX_CLOCK_SKEW)
+
+ current_time = datetime.datetime.now(tz=datetime.timezone.utc)
+ return current_time + offset
+
+ def current_nt_time(self, offset: Optional[datetime.timedelta] = None) -> NtTime:
+ return nt_time_from_datetime(self.current_time(offset))
+
+ def current_gkid(self, offset: Optional[datetime.timedelta] = None) -> Gkid:
+ return Gkid.from_nt_time(self.current_nt_time(offset))
+
+ def gkdi_connect(
+ self, host: str, lp: LoadParm, server_creds: Credentials
+ ) -> gkdi.gkdi:
+ try:
+ return gkdi.gkdi(f"ncacn_ip_tcp:{host}[seal]", lp, server_creds)
+ except NTSTATUSError as err:
+ if err.args[0] == ntstatus.NT_STATUS_PORT_UNREACHABLE:
+ self.fail(
+ "Try starting the Microsoft Key Distribution Service (KdsSvc).\n"
+ "In PowerShell, run:\n\tStart-Service -Name KdsSvc"
+ )
+
+ raise
+
+ def rpc_get_key(
+ self,
+ conn: gkdi.gkdi,
+ target_sd: bytes,
+ root_key_id: Optional[misc.GUID],
+ gkid: Gkid,
+ ) -> SeedKeyPair:
+ out_len, out, result = conn.GetKey(
+ list(target_sd), root_key_id, gkid.l0_idx, gkid.l1_idx, gkid.l2_idx
+ )
+ result_code, result_string = result
+ if (
+ root_key_id is None
+ and result_code & 0xFFFF == werror.WERR_TOO_MANY_OPEN_FILES
+ ):
+ self.fail(
+ "The server has given up selecting a root key because there are too"
+ " many keys (more than 1000) in the Master Root Keys container. Delete"
+ " some root keys and try again."
+ )
+ if result != (0, None):
+ raise GetKeyError(result_code, result_string)
+ self.assertEqual(len(out), out_len, "output len mismatch")
+
+ envelope = ndr_unpack(gkdi.GroupKeyEnvelope, bytes(out))
+
+ gkid = Gkid(envelope.l0_index, envelope.l1_index, envelope.l2_index)
+ l1_key = bytes(envelope.l1_key) if envelope.l1_key else None
+ l2_key = bytes(envelope.l2_key) if envelope.l2_key else None
+
+ hash_algorithm = Algorithm.from_kdf_parameters(bytes(envelope.kdf_parameters))
+
+ root_key_id = envelope.root_key_id
+
+ return SeedKeyPair(l1_key, l2_key, gkid, hash_algorithm, root_key_id)
+
+ def get_root_key_object(
+ self, samdb: SamDB, root_key_id: Optional[misc.GUID], gkid: Gkid
+ ) -> Tuple[RootKey, misc.GUID]:
+ """Return a root key object and its corresponding GUID.
+
+ *root_key_id* specifies the GUID of the root key object to return. It
+ can be ``None`` to indicate that the selected key should be the most
+ recently created key starting not after the time indicated by *gkid*.
+
+ Bear in mind as that the Microsoft Key Distribution Service caches root
+ keys, the most recently created key might not be the one that Windows
+ chooses."""
+
+ root_key_attrs = [
+ "cn",
+ "msKds-CreateTime",
+ "msKds-KDFAlgorithmID",
+ "msKds-KDFParam",
+ "msKds-RootKeyData",
+ "msKds-UseStartTime",
+ "msKds-Version",
+ ]
+
+ gkid_start_nt_time = gkid.start_nt_time()
+
+ exact_key_specified = root_key_id is not None
+ if exact_key_specified:
+ root_key_dn = self.get_root_key_container_dn(samdb)
+ root_key_dn.add_child(f"CN={root_key_id}")
+
+ try:
+ root_key_res = samdb.search(
+ root_key_dn, scope=ldb.SCOPE_BASE, attrs=root_key_attrs
+ )
+ except ldb.LdbError as err:
+ if err.args[0] == ldb.ERR_NO_SUCH_OBJECT:
+ raise GetKeyError(HRES_NTE_NO_KEY, "no such root key exists")
+
+ raise
+
+ root_key_object = root_key_res[0]
+ else:
+ root_keys = samdb.search(
+ self.get_root_key_container_dn(samdb),
+ scope=ldb.SCOPE_SUBTREE,
+ expression=f"(msKds-UseStartTime<={gkid_start_nt_time})",
+ attrs=root_key_attrs,
+ )
+ if not root_keys:
+ raise GetKeyError(
+ HRES_NTE_NO_KEY, "no root keys exist at specified time"
+ )
+
+ def root_key_create_time(key: RootKey) -> NtTime:
+ create_time = key.get("msKds-CreateTime", idx=0)
+ if create_time is None:
+ return NtTime(0)
+
+ return NtTime(int(create_time))
+
+ root_key_object = max(root_keys, key=root_key_create_time)
+
+ root_key_cn = root_key_object.get("cn", idx=0)
+ self.assertIsNotNone(root_key_cn)
+ root_key_id = misc.GUID(root_key_cn)
+
+ data = root_key_object.get("msKds-RootKeyData", idx=0)
+ self.assertIsNotNone(data)
+ if len(data) != KEY_LEN_BYTES:
+ raise GetKeyError(
+ HRES_NTE_BAD_KEY, f"root key data must be {KEY_LEN_BYTES} bytes"
+ )
+
+ use_start_nt_time = NtTime(
+ int(root_key_object.get("msKds-UseStartTime", idx=0))
+ )
+ if use_start_nt_time == 0:
+ raise GetKeyError(HRES_NTE_BAD_KEY, "root key effective time is 0")
+ use_start_nt_time = NtTime(
+ use_start_nt_time - NtTimeDelta(KEY_CYCLE_DURATION + MAX_CLOCK_SKEW)
+ )
+
+ if exact_key_specified and not (0 <= use_start_nt_time <= gkid_start_nt_time):
+ raise GetKeyError(HRES_E_INVALIDARG, "root key is not yet valid")
+
+ return root_key_object, root_key_id
+
+ def validate_get_key_request(
+ self, gkid: Gkid, current_gkid: Gkid, root_key_specified: bool
+ ) -> None:
+ if gkid > current_gkid:
+ raise GetKeyError(
+ HRES_E_INVALIDARG, "invalid request for a key from the future"
+ )
+
+ gkid_type = gkid.gkid_type()
+ if gkid_type is GkidType.DEFAULT:
+ derived_from = (
+ " derived from the specified root key" if root_key_specified else ""
+ )
+ raise NotImplementedError(
+ f"The latest group key{derived_from} is being requested."
+ )
+
+ if gkid_type is not GkidType.L2_SEED_KEY:
+ raise GetKeyError(
+ HRES_E_INVALIDARG, f"invalid request for {gkid_type.description()}"
+ )
+
+ def get_key(
+ self,
+ samdb: SamDB,
+ target_sd: bytes, # An NDR‐encoded valid security descriptor in self‐relative format.
+ root_key_id: Optional[misc.GUID],
+ gkid: Gkid,
+ *,
+ root_key_id_hint: Optional[misc.GUID] = None,
+ current_gkid: Optional[Gkid] = None,
+ ) -> SeedKeyPair:
+ """Emulate the ISDKey.GetKey() RPC method.
+
+ When passed a NULL root key ID, GetKey() may use a cached root key
+ rather than picking the most recently created applicable key as the
+ documentation implies. If it’s important to arrive at the same result as
+ Windows, pass a GUID in the *root_key_id_hint* parameter to specify a
+ particular root key to use."""
+
+ if current_gkid is None:
+ current_gkid = self.current_gkid()
+
+ root_key_specified = root_key_id is not None
+ if root_key_specified:
+ self.assertIsNone(
+ root_key_id_hint, "don’t provide both root key ID parameters"
+ )
+
+ self.validate_get_key_request(gkid, current_gkid, root_key_specified)
+
+ root_key_object, root_key_id = self.get_root_key_object(
+ samdb, root_key_id if root_key_specified else root_key_id_hint, gkid
+ )
+
+ if root_key_specified:
+ if gkid.l0_idx < current_gkid.l0_idx:
+ # All of the seed keys with an L0 index less than the current L0
+ # index are from the past and thus are safe to return. If the
+ # caller has requested a specific seed key with a past L0 index,
+ # return the L1 seed key (L0, 31, −1), from which any L1 or L2
+ # seed key having that L0 index can be derived.
+ l1_gkid = Gkid(gkid.l0_idx, 31, -1)
+ seed_key = self.compute_seed_key(
+ target_sd, root_key_id, root_key_object, l1_gkid
+ )
+ return SeedKeyPair(
+ seed_key.key,
+ None,
+ Gkid(gkid.l0_idx, 31, 31),
+ seed_key.hash_algorithm,
+ root_key_id,
+ )
+
+ # All of the previous seed keys with an L0 index equal to the
+ # current L0 index can be derived from the current seed key or from
+ # the next older L1 seed key.
+ gkid = current_gkid
+
+ if gkid.l2_idx == 31:
+ # The current seed key, and all previous seed keys with that same L0
+ # index, can be derived from the L1 seed key (L0, L1, 31).
+ l1_gkid = Gkid(gkid.l0_idx, gkid.l1_idx, -1)
+ seed_key = self.compute_seed_key(
+ target_sd, root_key_id, root_key_object, l1_gkid
+ )
+ return SeedKeyPair(
+ seed_key.key, None, gkid, seed_key.hash_algorithm, root_key_id
+ )
+
+ # Compute the L2 seed key to return.
+ seed_key = self.compute_seed_key(target_sd, root_key_id, root_key_object, gkid)
+
+ next_older_seed_key = None
+ if gkid.l1_idx != 0:
+ # From the current seed key can be derived only those seed keys that
+ # share its L1 and L2 indices. To be able to derive previous seed
+ # keys with older L1 indices, the caller must be given the next
+ # older L1 seed key as well.
+ next_older_l1_gkid = Gkid(gkid.l0_idx, gkid.l1_idx - 1, -1)
+ next_older_seed_key = self.compute_seed_key(
+ target_sd, root_key_id, root_key_object, next_older_l1_gkid
+ ).key
+
+ return SeedKeyPair(
+ next_older_seed_key,
+ seed_key.key,
+ gkid,
+ seed_key.hash_algorithm,
+ root_key_id,
+ )
+
+ def get_key_exact(
+ self,
+ samdb: SamDB,
+ target_sd: bytes, # An NDR‐encoded valid security descriptor in self‐relative format.
+ root_key_id: Optional[misc.GUID],
+ gkid: Gkid,
+ current_gkid: Optional[Gkid] = None,
+ ) -> GroupKey:
+ if current_gkid is None:
+ current_gkid = self.current_gkid()
+
+ root_key_specified = root_key_id is not None
+ self.validate_get_key_request(gkid, current_gkid, root_key_specified)
+
+ root_key_object, root_key_id = self.get_root_key_object(
+ samdb, root_key_id, gkid
+ )
+
+ return self.compute_seed_key(target_sd, root_key_id, root_key_object, gkid)
+
+ def get_root_key_data(self, root_key: RootKey) -> Tuple[bytes, Algorithm]:
+ version = root_key.get("msKds-Version", idx=0)
+ self.assertEqual(b"1", version)
+
+ algorithm_id = root_key.get("msKds-KDFAlgorithmID", idx=0)
+ self.assertEqual(b"SP800_108_CTR_HMAC", algorithm_id)
+
+ hash_algorithm = Algorithm.from_kdf_parameters(
+ root_key.get("msKds-KDFParam", idx=0)
+ )
+
+ root_key_data = root_key.get("msKds-RootKeyData", idx=0)
+ self.assertIsInstance(root_key_data, bytes)
+
+ return root_key_data, hash_algorithm
+
+ def compute_seed_key(
+ self,
+ target_sd: bytes,
+ root_key_id: misc.GUID,
+ root_key: RootKey,
+ target_gkid: Gkid,
+ ) -> GroupKey:
+ target_gkid_type = target_gkid.gkid_type()
+ self.assertIn(
+ target_gkid_type,
+ (GkidType.L1_SEED_KEY, GkidType.L2_SEED_KEY),
+ f"unexpected attempt to compute {target_gkid_type.description()}",
+ )
+
+ root_key_data, algorithm = self.get_root_key_data(root_key)
+ root_key_id_bytes = ndr_pack(root_key_id)
+
+ hash_algorithm = algorithm.algorithm()
+
+ # Derive the L0 seed key.
+ gkid = Gkid.l0_seed_key(target_gkid.l0_idx)
+ key = self.derive_key(root_key_data, root_key_id_bytes, hash_algorithm, gkid)
+
+ # Derive the L1 seed key.
+
+ gkid = gkid.derive_l1_seed_key()
+ key = self.derive_key(
+ key, root_key_id_bytes, hash_algorithm, gkid, target_sd=target_sd
+ )
+
+ while gkid.l1_idx != target_gkid.l1_idx:
+ gkid = gkid.derive_l1_seed_key()
+ key = self.derive_key(key, root_key_id_bytes, hash_algorithm, gkid)
+
+ # Derive the L2 seed key.
+ while gkid != target_gkid:
+ gkid = gkid.derive_l2_seed_key()
+ key = self.derive_key(key, root_key_id_bytes, hash_algorithm, gkid)
+
+ return GroupKey(key, gkid, algorithm, root_key_id)
+
+ def derive_key(
+ self,
+ key: bytes,
+ root_key_id_bytes: bytes,
+ hash_algorithm: hashes.HashAlgorithm,
+ gkid: Gkid,
+ *,
+ target_sd: Optional[bytes] = None,
+ ) -> bytes:
+ def u32_bytes(n: int) -> bytes:
+ return (n & 0xFFFF_FFFF).to_bytes(length=4, byteorder="little")
+
+ context = (
+ root_key_id_bytes
+ + u32_bytes(gkid.l0_idx)
+ + u32_bytes(gkid.l1_idx)
+ + u32_bytes(gkid.l2_idx)
+ )
+ if target_sd is not None:
+ context += target_sd
+ return self.kdf(hash_algorithm, key, context)
+
+ def kdf(
+ self,
+ hash_algorithm: hashes.HashAlgorithm,
+ key: bytes,
+ context: bytes,
+ *,
+ label="KDS service",
+ len_in_bytes=KEY_LEN_BYTES,
+ ) -> bytes:
+ label = label.encode("utf-16-le") + b"\x00\x00"
+ kdf = KBKDFHMAC(
+ algorithm=hash_algorithm,
+ mode=Mode.CounterMode,
+ length=len_in_bytes,
+ rlen=4,
+ llen=4,
+ location=CounterLocation.BeforeFixed,
+ label=label,
+ context=context,
+ fixed=None,
+ backend=default_backend(),
+ )
+ return kdf.derive(key)
+
+ def get_config_dn(self, samdb: SamDB, dn: str) -> ldb.Dn:
+ config_dn = samdb.get_config_basedn()
+ config_dn.add_child(dn)
+ return config_dn
+
+ def get_server_config_dn(self, samdb: SamDB) -> ldb.Dn:
+ # [MS-GKDI] has “CN=Sid Key Service” for “CN=Group Key Distribution
+ # Service”, and “CN=SID Key Server Configuration” for “CN=Group Key
+ # Distribution Service Server Configuration”.
+ return self.get_config_dn(
+ samdb,
+ "CN=Group Key Distribution Service Server Configuration,"
+ "CN=Server Configuration,"
+ "CN=Group Key Distribution Service,"
+ "CN=Services",
+ )
+
+ def get_root_key_container_dn(self, samdb: SamDB) -> ldb.Dn:
+ # [MS-GKDI] has “CN=Sid Key Service” for “CN=Group Key Distribution Service”.
+ return self.get_config_dn(
+ samdb,
+ "CN=Master Root Keys,CN=Group Key Distribution Service,CN=Services",
+ )
+
+ def create_root_key(
+ self,
+ samdb: SamDB,
+ domain_dn: ldb.Dn,
+ *,
+ use_start_time: Optional[Union[datetime.datetime, NtTime]] = None,
+ hash_algorithm: Optional[Algorithm] = Algorithm.SHA512,
+ guid: Optional[misc.GUID] = None,
+ data: Optional[bytes] = None,
+ ) -> misc.GUID:
+ # [MS-GKDI] 3.1.4.1.1, “Creating a New Root Key”, states that if the
+ # server receives a GetKey request and the root keys container in Active
+ # Directory is empty, the the server must create a new root key object
+ # based on the default Server Configuration object. Additional root keys
+ # are to be created based on either the default Server Configuration
+ # object or an updated one specifying optional configuration values.
+
+ guid_specified = guid is not None
+ if not guid_specified:
+ guid = misc.GUID(secrets.token_bytes(16))
+
+ if data is None:
+ data = secrets.token_bytes(KEY_LEN_BYTES)
+
+ create_time = current_nt_time = self.current_nt_time()
+
+ if use_start_time is None:
+ # Root keys created by Windows without the ‘-EffectiveImmediately’
+ # parameter have an effective time of exactly ten days in the
+ # future, presumably to allow time for replication.
+ #
+ # Microsoft’s documentation on creating a KDS root key, located at
+ # https://learn.microsoft.com/en-us/windows-server/security/group-managed-service-accounts/create-the-key-distribution-services-kds-root-key,
+ # claims to the contrary that domain controllers will only wait up
+ # to ten hours before allowing Group Managed Service Accounts to be
+ # created.
+ #
+ # The same page includes instructions for creating a root key with
+ # an effective time of ten hours in the past (for testing purposes),
+ # but I’m not sure why — the KDS will consider a key valid for use
+ # immediately after its start time has passed, without bothering to
+ # wait ten hours first. In fact, it will consider a key to be valid
+ # a full ten hours (plus clock skew) *before* its declared start
+ # time — intentional, or (conceivably) the result of an accidental
+ # negation?
+ current_interval_start_nt_time = Gkid.from_nt_time(
+ current_nt_time
+ ).start_nt_time()
+ use_start_time = NtTime(
+ current_interval_start_nt_time + KEY_CYCLE_DURATION + MAX_CLOCK_SKEW
+ )
+
+ if isinstance(use_start_time, datetime.datetime):
+ use_start_nt_time = nt_time_from_datetime(use_start_time)
+ else:
+ self.assertIsInstance(use_start_time, int)
+ use_start_nt_time = use_start_time
+
+ kdf_parameters = None
+ if hash_algorithm is not None:
+ kdf_parameters = gkdi.KdfParameters()
+ kdf_parameters.hash_algorithm = hash_algorithm.value
+ kdf_parameters = ndr_pack(kdf_parameters)
+
+ # These are the encoded p and g values, respectively, of the “2048‐bit
+ # MODP Group with 256‐bit Prime Order Subgroup” from RFC 5114 section
+ # 2.3.
+ field_order = (
+ b"\x87\xa8\xe6\x1d\xb4\xb6f<\xff\xbb\xd1\x9ce\x19Y\x99\x8c\xee\xf6\x08"
+ b"f\r\xd0\xf2],\xee\xd4C^;\x00\xe0\r\xf8\xf1\xd6\x19W\xd4\xfa\xf7\xdfE"
+ b"a\xb2\xaa0\x16\xc3\xd9\x114\to\xaa;\xf4)m\x83\x0e\x9a|"
+ b" \x9e\x0cd\x97Qz\xbd"
+ b'Z\x8a\x9d0k\xcfg\xed\x91\xf9\xe6r[GX\xc0"\xe0\xb1\xefBu\xbf{l[\xfc\x11'
+ b"\xd4_\x90\x88\xb9A\xf5N\xb1\xe5\x9b\xb8\xbc9\xa0\xbf\x120\x7f\\O\xdbp\xc5"
+ b"\x81\xb2?v\xb6:\xca\xe1\xca\xa6\xb7\x90-RRg5H\x8a\x0e\xf1<m\x9aQ\xbf\xa4\xab"
+ b":\xd84w\x96RM\x8e\xf6\xa1g\xb5\xa4\x18%\xd9g\xe1D\xe5\x14\x05d%"
+ b"\x1c\xca\xcb\x83\xe6\xb4\x86\xf6\xb3\xca?yqP`&\xc0\xb8W\xf6\x89\x96(V"
+ b"\xde\xd4\x01\n\xbd\x0b\xe6!\xc3\xa3\x96\nT\xe7\x10\xc3u\xf2cu\xd7\x01A\x03"
+ b"\xa4\xb5C0\xc1\x98\xaf\x12a\x16\xd2'n\x11q_i8w\xfa\xd7\xef\t\xca\xdb\tJ\xe9"
+ b"\x1e\x1a\x15\x97"
+ )
+ generator = (
+ b"?\xb3,\x9bs\x13M\x0b.wPf`\xed\xbdHL\xa7\xb1\x8f!\xef T\x07\xf4y:"
+ b"\x1a\x0b\xa1%\x10\xdb\xc1Pw\xbeF?\xffO\xedJ\xac\x0b\xb5U\xbe:l\x1b\x0ckG\xb1"
+ b"\xbc7s\xbf~\x8cob\x90\x12(\xf8\xc2\x8c\xbb\x18\xa5Z\xe3\x13A\x00\ne"
+ b"\x01\x96\xf91\xc7zW\xf2\xdd\xf4c\xe5\xe9\xec\x14Kw}\xe6*\xaa\xb8\xa8b"
+ b"\x8a\xc3v\xd2\x82\xd6\xed8d\xe6y\x82B\x8e\xbc\x83\x1d\x144\x8fo/\x91\x93"
+ b"\xb5\x04Z\xf2vqd\xe1\xdf\xc9g\xc1\xfb?.U\xa4\xbd\x1b\xff\xe8;\x9c\x80"
+ b"\xd0R\xb9\x85\xd1\x82\xea\n\xdb*;s\x13\xd3\xfe\x14\xc8HK\x1e\x05%\x88\xb9"
+ b"\xb7\xd2\xbb\xd2\xdf\x01a\x99\xec\xd0n\x15W\xcd\t\x15\xb35;\xbbd\xe0\xec7"
+ b"\x7f\xd0(7\r\xf9+R\xc7\x89\x14(\xcd\xc6~\xb6\x18KR=\x1d\xb2F\xc3/c\x07\x84"
+ b"\x90\xf0\x0e\xf8\xd6G\xd1H\xd4yTQ^#'\xcf\xef\x98\xc5\x82fKL\x0fl\xc4\x16Y"
+ )
+ self.assertEqual(len(field_order), len(generator))
+ key_length = len(field_order)
+
+ ffc_dh_parameters = gkdi.FfcDhParameters()
+ ffc_dh_parameters.field_order = list(field_order)
+ ffc_dh_parameters.generator = list(generator)
+ ffc_dh_parameters.key_length = key_length
+ ffc_dh_parameters = ndr_pack(ffc_dh_parameters)
+
+ root_key_dn = self.get_root_key_container_dn(samdb)
+ root_key_dn.add_child(f"CN={guid}")
+
+ # Avoid deleting root key objects without subsequently restarting the
+ # Microsoft Key Distribution Service. This service will keep its root
+ # key cached even after the corresponding AD object has been deleted,
+ # breaking later tests that try to look up the root key object.
+
+ details = {
+ "dn": root_key_dn,
+ "objectClass": "msKds-ProvRootKey",
+ "msKds-RootKeyData": data,
+ "msKds-CreateTime": str(create_time),
+ "msKds-UseStartTime": str(use_start_nt_time),
+ "msKds-DomainID": str(domain_dn),
+ "msKds-Version": "1", # comes from Server Configuration object.
+ "msKds-KDFAlgorithmID": (
+ "SP800_108_CTR_HMAC"
+ ), # comes from Server Configuration.
+ "msKds-SecretAgreementAlgorithmID": (
+ "DH"
+ ), # comes from Server Configuration.
+ "msKds-SecretAgreementParam": (
+ ffc_dh_parameters
+ ), # comes from Server Configuration.
+ "msKds-PublicKeyLength": "2048", # comes from Server Configuration.
+ "msKds-PrivateKeyLength": (
+ "512"
+ ), # comes from Server Configuration. [MS-GKDI] claims this defaults to ‘256’.
+ }
+ if kdf_parameters is not None:
+ details["msKds-KDFParam"] = (
+ kdf_parameters # comes from Server Configuration.
+ )
+
+ if guid_specified:
+ # A test may request that a root key have a specific GUID so that
+ # results may be reproducible. Ensure these keys are cleaned up
+ # afterwards.
+ self.addCleanup(delete_force, samdb, root_key_dn)
+ samdb.add(details)
+
+ return guid
diff --git a/python/samba/tests/glue.py b/python/samba/tests/glue.py
new file mode 100644
index 0000000..ac504b3
--- /dev/null
+++ b/python/samba/tests/glue.py
@@ -0,0 +1,90 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for the _glue Python bindings."""
+
+from samba import _glue
+from samba import param
+import samba.tests
+
+
+class GlueTests(samba.tests.TestCase):
+
+ def test_generate_random_str(self):
+ string = _glue.generate_random_str(10)
+ self.assertEqual(type(string), str)
+ self.assertEqual(len(string), 10)
+
+ def test_generate_random_password(self):
+ password = _glue.generate_random_password(5, 10)
+ self.assertEqual(type(password), str)
+ self.assertTrue(5 <= len(password) <= 10)
+
+ def test_unix2nttime(self):
+ self.assertEqual(_glue.unix2nttime(1), 116444736010000000)
+
+ def test_nttime2unix(self):
+ self.assertEqual(_glue.nttime2unix(116444736010000000), 1)
+
+ def test_float2nttime(self):
+ self.assertEqual(_glue.float2nttime(1.0), 116444736010000000)
+ self.assertEqual(_glue.float2nttime(1611058908.0), 132555325080000000)
+ # NTTIME has a resolution of 100ns
+ self.assertEqual(_glue.float2nttime(1611058908.1234567), 132555325081234567)
+ self.assertEqual(_glue.float2nttime(1611058908.123456789), 132555325081234567)
+
+ def test_nttime2float(self):
+ self.assertEqual(_glue.nttime2float(1), -11644473600.0)
+ self.assertEqual(_glue.nttime2float(0x7fffffffffffffff), 910692730085.4775)
+ self.assertEqual(_glue.nttime2float(0x8000000000000000), 910692730085.4775)
+ self.assertEqual(_glue.nttime2float(0xf000000000000000), 910692730085.4775)
+ self.assertEqual(_glue.nttime2float(116444736010000000), 1.0)
+ self.assertEqual(_glue.nttime2float(132555325080000000), 1611058908.0)
+ self.assertEqual(_glue.nttime2float(132555325081234567), 1611058908.1234567)
+ # NTTIME_OMIT (0) and NTTIME_FREEZE (UINT64_MAX) map to SAMBA_UTIME_OMIT (1)
+ self.assertEqual(_glue.nttime2float(0), 1.0)
+ self.assertEqual(_glue.nttime2float(0xffffffffffffffff), 1.0)
+
+ def test_nttime2string(self):
+ string = _glue.nttime2string(116444736010000000)
+ self.assertEqual(type(string), str)
+ self.assertIn('1970', string)
+
+ def test_debug_level(self):
+ prev_level = _glue.get_debug_level()
+ try:
+ self.assertIsNone(_glue.set_debug_level(0))
+ self.assertEqual(_glue.get_debug_level(), 0)
+ self.assertIsNone(_glue.set_debug_level(5))
+ self.assertEqual(_glue.get_debug_level(), 5)
+ finally:
+ _glue.set_debug_level(prev_level)
+
+ def test_interface_ips(self):
+ lp = param.LoadParm()
+ ips = _glue.interface_ips(lp)
+ self.assertEqual(type(ips), list)
+
+ def test_strcasecmp(self):
+ self.assertEqual(_glue.strcasecmp_m('aA', 'Aa'), 0)
+ self.assertNotEqual(_glue.strcasecmp_m('ab', 'Aa'), 0)
+
+ def test_strstr_m(self):
+ string = 'testing_string_num__one'
+ self.assertEqual(_glue.strstr_m(string, '_'), '_string_num__one')
+ self.assertEqual(_glue.strstr_m(string, '__'), '__one')
+ self.assertEqual(_glue.strstr_m(string, 'ring'), 'ring_num__one')
diff --git a/python/samba/tests/gpo.py b/python/samba/tests/gpo.py
new file mode 100644
index 0000000..9177eef
--- /dev/null
+++ b/python/samba/tests/gpo.py
@@ -0,0 +1,8192 @@
+# Unix SMB/CIFS implementation. Tests for smb manipulation
+# Copyright (C) David Mulder <dmulder@suse.com> 2018
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os, grp, pwd, re
+import errno
+from samba import gpo, tests
+from samba.gp.gpclass import register_gp_extension, list_gp_extensions, \
+ unregister_gp_extension, GPOStorage, get_gpo_list
+from samba.param import LoadParm
+from samba.gp.gpclass import check_refresh_gpo_list, check_safe_path, \
+ check_guid, parse_gpext_conf, atomic_write_conf, get_deleted_gpos_list
+from subprocess import Popen, PIPE
+from tempfile import NamedTemporaryFile, TemporaryDirectory
+from samba.gp import gpclass
+# Disable privilege dropping for testing
+gpclass.drop_privileges = lambda _, func, *args : func(*args)
+from samba.gp.gp_sec_ext import gp_krb_ext
+from samba.gp.gp_scripts_ext import gp_scripts_ext, gp_user_scripts_ext
+from samba.gp.gp_sudoers_ext import gp_sudoers_ext
+from samba.gp.vgp_sudoers_ext import vgp_sudoers_ext
+from samba.gp.vgp_symlink_ext import vgp_symlink_ext
+from samba.gp.gpclass import gp_inf_ext
+from samba.gp.gp_smb_conf_ext import gp_smb_conf_ext
+from samba.gp.vgp_files_ext import vgp_files_ext
+from samba.gp.vgp_openssh_ext import vgp_openssh_ext
+from samba.gp.vgp_startup_scripts_ext import vgp_startup_scripts_ext
+from samba.gp.vgp_motd_ext import vgp_motd_ext
+from samba.gp.vgp_issue_ext import vgp_issue_ext
+from samba.gp.vgp_access_ext import vgp_access_ext
+from samba.gp.gp_gnome_settings_ext import gp_gnome_settings_ext
+from samba.gp import gp_cert_auto_enroll_ext as cae
+from samba.gp.gp_firefox_ext import gp_firefox_ext
+from samba.gp.gp_chromium_ext import gp_chromium_ext
+from samba.gp.gp_firewalld_ext import gp_firewalld_ext
+from samba.credentials import Credentials
+from samba.gp.gp_msgs_ext import gp_msgs_ext
+from samba.gp.gp_centrify_sudoers_ext import gp_centrify_sudoers_ext
+from samba.gp.gp_centrify_crontab_ext import gp_centrify_crontab_ext, \
+ gp_user_centrify_crontab_ext
+from samba.gp.gp_drive_maps_ext import gp_drive_maps_user_ext
+from samba.common import get_bytes
+from samba.dcerpc import preg
+from samba.ndr import ndr_pack
+import codecs
+from shutil import copyfile
+import xml.etree.ElementTree as etree
+import hashlib
+from samba.gp_parse.gp_pol import GPPolParser
+from glob import glob
+from configparser import ConfigParser
+from samba.gp.gpclass import get_dc_hostname, expand_pref_variables
+from samba import Ldb
+import ldb as _ldb
+from samba.auth import system_session
+import json
+from shutil import which
+import requests
+from cryptography import x509
+from cryptography.hazmat.primitives import hashes
+from cryptography.hazmat.backends import default_backend
+from cryptography.hazmat.primitives.asymmetric import rsa
+from cryptography.hazmat.primitives.serialization import Encoding
+from datetime import datetime, timedelta, timezone
+from samba.samba3 import param as s3param
+
+def dummy_certificate():
+ name = x509.Name([
+ x509.NameAttribute(x509.NameOID.COMMON_NAME,
+ os.environ.get('SERVER'))
+ ])
+ cons = x509.BasicConstraints(ca=True, path_length=0)
+ now = datetime.now(tz=timezone.utc)
+
+ key = rsa.generate_private_key(public_exponent=65537, key_size=2048,
+ backend=default_backend())
+
+ cert = (
+ x509.CertificateBuilder()
+ .subject_name(name)
+ .issuer_name(name)
+ .public_key(key.public_key())
+ .serial_number(1000)
+ .not_valid_before(now)
+ .not_valid_after(now + timedelta(seconds=300))
+ .add_extension(cons, False)
+ .sign(key, hashes.SHA256(), default_backend())
+ )
+
+ return cert.public_bytes(encoding=Encoding.DER)
+
+# Dummy requests structure for Certificate Auto Enrollment
+class dummy_requests(object):
+ class exceptions(object):
+ ConnectionError = Exception
+
+ def __init__(self, want_exception=False):
+ self.want_exception = want_exception
+
+ def get(self, url=None, params=None):
+ if self.want_exception:
+ raise self.exceptions.ConnectionError
+
+ dummy = requests.Response()
+ dummy._content = dummy_certificate()
+ dummy.headers = {'Content-Type': 'application/x-x509-ca-cert'}
+ return dummy
+
+realm = os.environ.get('REALM')
+policies = realm + '/POLICIES'
+realm = realm.lower()
+poldir = r'\\{0}\sysvol\{0}\Policies'.format(realm)
+# the first part of the base DN varies by testenv. Work it out from the realm
+base_dn = 'DC={0},DC=samba,DC=example,DC=com'.format(realm.split('.')[0])
+dspath = 'CN=Policies,CN=System,' + base_dn
+gpt_data = '[General]\nVersion=%d'
+
+gnome_test_reg_pol = \
+br"""
+<?xml version="1.0" encoding="utf-8"?>
+<PolFile num_entries="26" signature="PReg" version="1">
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>GNOME Settings\Lock Down Settings</Key>
+ <ValueName>Lock Down Enabled Extensions</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>GNOME Settings\Lock Down Settings</Key>
+ <ValueName>Lock Down Specific Settings</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>GNOME Settings\Lock Down Settings</Key>
+ <ValueName>Disable Printing</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>GNOME Settings\Lock Down Settings</Key>
+ <ValueName>Disable File Saving</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>GNOME Settings\Lock Down Settings</Key>
+ <ValueName>Disable Command-Line Access</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>GNOME Settings\Lock Down Settings</Key>
+ <ValueName>Disallow Login Using a Fingerprint</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>GNOME Settings\Lock Down Settings</Key>
+ <ValueName>Disable User Logout</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>GNOME Settings\Lock Down Settings</Key>
+ <ValueName>Disable User Switching</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>GNOME Settings\Lock Down Settings</Key>
+ <ValueName>Disable Repartitioning</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>GNOME Settings\Lock Down Settings</Key>
+ <ValueName>Whitelisted Online Accounts</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>GNOME Settings\Lock Down Settings</Key>
+ <ValueName>Compose Key</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>GNOME Settings\Lock Down Settings</Key>
+ <ValueName>Dim Screen when User is Idle</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>GNOME Settings\Lock Down Settings</Key>
+ <ValueName>Enabled Extensions</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>GNOME Settings\Lock Down Settings\Compose Key</Key>
+ <ValueName>Key Name</ValueName>
+ <Value>Right Alt</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>GNOME Settings\Lock Down Settings\Dim Screen when User is Idle</Key>
+ <ValueName>Delay</ValueName>
+ <Value>300</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>GNOME Settings\Lock Down Settings\Dim Screen when User is Idle</Key>
+ <ValueName>Dim Idle Brightness</ValueName>
+ <Value>30</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>GNOME Settings\Lock Down Settings\Enabled Extensions</Key>
+ <ValueName>**delvals.</ValueName>
+ <Value> </Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>GNOME Settings\Lock Down Settings\Enabled Extensions</Key>
+ <ValueName>myextension1@myname.example.com</ValueName>
+ <Value>myextension1@myname.example.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>GNOME Settings\Lock Down Settings\Enabled Extensions</Key>
+ <ValueName>myextension2@myname.example.com</ValueName>
+ <Value>myextension2@myname.example.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>GNOME Settings\Lock Down Settings\Lock Down Specific Settings</Key>
+ <ValueName>**delvals.</ValueName>
+ <Value> </Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>GNOME Settings\Lock Down Settings\Lock Down Specific Settings</Key>
+ <ValueName>/org/gnome/desktop/background/picture-uri</ValueName>
+ <Value>/org/gnome/desktop/background/picture-uri</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>GNOME Settings\Lock Down Settings\Lock Down Specific Settings</Key>
+ <ValueName>/org/gnome/desktop/background/picture-options</ValueName>
+ <Value>/org/gnome/desktop/background/picture-options</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>GNOME Settings\Lock Down Settings\Lock Down Specific Settings</Key>
+ <ValueName>/org/gnome/desktop/background/primary-color</ValueName>
+ <Value>/org/gnome/desktop/background/primary-color</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>GNOME Settings\Lock Down Settings\Lock Down Specific Settings</Key>
+ <ValueName>/org/gnome/desktop/background/secondary-color</ValueName>
+ <Value>/org/gnome/desktop/background/secondary-color</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>GNOME Settings\Lock Down Settings\Whitelisted Online Accounts</Key>
+ <ValueName>**delvals.</ValueName>
+ <Value> </Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>GNOME Settings\Lock Down Settings\Whitelisted Online Accounts</Key>
+ <ValueName>google</ValueName>
+ <Value>google</Value>
+ </Entry>
+</PolFile>
+"""
+
+auto_enroll_reg_pol = \
+br"""
+<?xml version="1.0" encoding="utf-8"?>
+<PolFile num_entries="3" signature="PReg" version="1">
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Microsoft\Cryptography\AutoEnrollment</Key>
+ <ValueName>AEPolicy</ValueName>
+ <Value>7</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Microsoft\Cryptography\AutoEnrollment</Key>
+ <ValueName>OfflineExpirationPercent</ValueName>
+ <Value>10</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Microsoft\Cryptography\AutoEnrollment</Key>
+ <ValueName>OfflineExpirationStoreNames</ValueName>
+ <Value>MY</Value>
+ </Entry>
+</PolFile>
+"""
+
+auto_enroll_unchecked_reg_pol = \
+br"""
+<?xml version="1.0" encoding="utf-8"?>
+<PolFile num_entries="3" signature="PReg" version="1">
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Microsoft\Cryptography\AutoEnrollment</Key>
+ <ValueName>AEPolicy</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Microsoft\Cryptography\AutoEnrollment</Key>
+ <ValueName>OfflineExpirationPercent</ValueName>
+ <Value>10</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Microsoft\Cryptography\AutoEnrollment</Key>
+ <ValueName>OfflineExpirationStoreNames</ValueName>
+ <Value>MY</Value>
+ </Entry>
+</PolFile>
+"""
+
+advanced_enroll_reg_pol = \
+br"""
+<?xml version="1.0" encoding="utf-8"?>
+<PolFile num_entries="30" signature="PReg" version="1">
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Microsoft\Cryptography</Key>
+ <ValueName>**DeleteKeys</ValueName>
+ <Value>Software\Policies\Microsoft\Cryptography\PolicyServers</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Microsoft\Cryptography\AutoEnrollment</Key>
+ <ValueName>AEPolicy</ValueName>
+ <Value>7</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Microsoft\Cryptography\AutoEnrollment</Key>
+ <ValueName>OfflineExpirationPercent</ValueName>
+ <Value>25</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Microsoft\Cryptography\AutoEnrollment</Key>
+ <ValueName>OfflineExpirationStoreNames</ValueName>
+ <Value>MY</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Microsoft\Cryptography\PolicyServers</Key>
+ <ValueName/>
+ <Value>{5AD0BE6D-3393-4940-BFC3-6E19555A8919}</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Microsoft\Cryptography\PolicyServers</Key>
+ <ValueName>Flags</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Microsoft\Cryptography\PolicyServers\37c9dc30f207f27f61a2f7c3aed598a6e2920b54</Key>
+ <ValueName>URL</ValueName>
+ <Value>LDAP:</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Microsoft\Cryptography\PolicyServers\37c9dc30f207f27f61a2f7c3aed598a6e2920b54</Key>
+ <ValueName>PolicyID</ValueName>
+ <Value>%s</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Microsoft\Cryptography\PolicyServers\37c9dc30f207f27f61a2f7c3aed598a6e2920b54</Key>
+ <ValueName>FriendlyName</ValueName>
+ <Value>Example</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Microsoft\Cryptography\PolicyServers\37c9dc30f207f27f61a2f7c3aed598a6e2920b54</Key>
+ <ValueName>Flags</ValueName>
+ <Value>16</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Microsoft\Cryptography\PolicyServers\37c9dc30f207f27f61a2f7c3aed598a6e2920b54</Key>
+ <ValueName>AuthFlags</ValueName>
+ <Value>2</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Microsoft\Cryptography\PolicyServers\37c9dc30f207f27f61a2f7c3aed598a6e2920b54</Key>
+ <ValueName>Cost</ValueName>
+ <Value>2147483645</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Microsoft\Cryptography\PolicyServers\144bdbb8e4717c26e408f3c9a0cb8d6cfacbcbbe</Key>
+ <ValueName>URL</ValueName>
+ <Value>https://example2.com/ADPolicyProvider_CEP_Certificate/service.svc/CEP</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Microsoft\Cryptography\PolicyServers\144bdbb8e4717c26e408f3c9a0cb8d6cfacbcbbe</Key>
+ <ValueName>PolicyID</ValueName>
+ <Value>%s</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Microsoft\Cryptography\PolicyServers\144bdbb8e4717c26e408f3c9a0cb8d6cfacbcbbe</Key>
+ <ValueName>FriendlyName</ValueName>
+ <Value>Example2</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Microsoft\Cryptography\PolicyServers\144bdbb8e4717c26e408f3c9a0cb8d6cfacbcbbe</Key>
+ <ValueName>Flags</ValueName>
+ <Value>16</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Microsoft\Cryptography\PolicyServers\144bdbb8e4717c26e408f3c9a0cb8d6cfacbcbbe</Key>
+ <ValueName>AuthFlags</ValueName>
+ <Value>8</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Microsoft\Cryptography\PolicyServers\144bdbb8e4717c26e408f3c9a0cb8d6cfacbcbbe</Key>
+ <ValueName>Cost</ValueName>
+ <Value>10</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Microsoft\Cryptography\PolicyServers\20d46e856e9b9746c0b1265c328f126a7b3283a9</Key>
+ <ValueName>URL</ValueName>
+ <Value>https://example0.com/ADPolicyProvider_CEP_Kerberos/service.svc/CEP</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Microsoft\Cryptography\PolicyServers\20d46e856e9b9746c0b1265c328f126a7b3283a9</Key>
+ <ValueName>PolicyID</ValueName>
+ <Value>%s</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Microsoft\Cryptography\PolicyServers\20d46e856e9b9746c0b1265c328f126a7b3283a9</Key>
+ <ValueName>FriendlyName</ValueName>
+ <Value>Example0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Microsoft\Cryptography\PolicyServers\20d46e856e9b9746c0b1265c328f126a7b3283a9</Key>
+ <ValueName>Flags</ValueName>
+ <Value>16</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Microsoft\Cryptography\PolicyServers\20d46e856e9b9746c0b1265c328f126a7b3283a9</Key>
+ <ValueName>AuthFlags</ValueName>
+ <Value>2</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Microsoft\Cryptography\PolicyServers\20d46e856e9b9746c0b1265c328f126a7b3283a9</Key>
+ <ValueName>Cost</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Microsoft\Cryptography\PolicyServers\855b5246433a48402ac4f5c3427566df26ccc9ac</Key>
+ <ValueName>URL</ValueName>
+ <Value>https://example1.com/ADPolicyProvider_CEP_Kerberos/service.svc/CEP</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Microsoft\Cryptography\PolicyServers\855b5246433a48402ac4f5c3427566df26ccc9ac</Key>
+ <ValueName>PolicyID</ValueName>
+ <Value>%s</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Microsoft\Cryptography\PolicyServers\855b5246433a48402ac4f5c3427566df26ccc9ac</Key>
+ <ValueName>FriendlyName</ValueName>
+ <Value>Example1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Microsoft\Cryptography\PolicyServers\855b5246433a48402ac4f5c3427566df26ccc9ac</Key>
+ <ValueName>Flags</ValueName>
+ <Value>16</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Microsoft\Cryptography\PolicyServers\855b5246433a48402ac4f5c3427566df26ccc9ac</Key>
+ <ValueName>AuthFlags</ValueName>
+ <Value>2</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Microsoft\Cryptography\PolicyServers\855b5246433a48402ac4f5c3427566df26ccc9ac</Key>
+ <ValueName>Cost</ValueName>
+ <Value>1</Value>
+ </Entry>
+</PolFile>
+"""
+
+firefox_reg_pol = \
+b"""
+<?xml version="1.0" encoding="utf-8"?>
+<PolFile num_entries="241" signature="PReg" version="1">
+ <Entry type="7" type_name="REG_MULTI_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>ExtensionSettings</ValueName>
+ <Value>{ &quot;*&quot;: { &quot;blocked_install_message&quot;: &quot;Custom error message.&quot;, &quot;install_sources&quot;: [&quot;about:addons&quot;,&quot;https://addons.mozilla.org/&quot;], &quot;installation_mode&quot;: &quot;blocked&quot;, &quot;allowed_types&quot;: [&quot;extension&quot;] }, &quot;uBlock0@raymondhill.net&quot;: { &quot;installation_mode&quot;: &quot;force_installed&quot;, &quot;install_url&quot;: &quot;https://addons.mozilla.org/firefox/downloads/latest/ublock-origin/latest.xpi&quot; }, &quot;https-everywhere@eff.org&quot;: { &quot;installation_mode&quot;: &quot;allowed&quot; } }</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>ExtensionUpdate</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>SearchSuggestEnabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>AppAutoUpdate</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>AppUpdateURL</ValueName>
+ <Value>https://yoursite.com</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>BlockAboutAddons</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>BlockAboutConfig</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>BlockAboutProfiles</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>BlockAboutSupport</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>CaptivePortal</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="2" type_name="REG_EXPAND_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>DefaultDownloadDirectory</ValueName>
+ <Value>${home}/Downloads</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>DisableAppUpdate</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>DisableBuiltinPDFViewer</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>DisableDefaultBrowserAgent</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>DisableDeveloperTools</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>DisableFeedbackCommands</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>DisableFirefoxAccounts</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>DisableFirefoxScreenshots</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>DisableFirefoxStudies</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>DisableForgetButton</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>DisableFormHistory</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>DisableMasterPasswordCreation</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>DisablePasswordReveal</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>DisablePocket</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>DisablePrivateBrowsing</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>DisableProfileImport</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>DisableProfileRefresh</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>DisableSafeMode</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>DisableSetDesktopBackground</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>DisableSystemAddonUpdate</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>DisableTelemetry</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>DisplayBookmarksToolbar</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>DisplayMenuBar</ValueName>
+ <Value>default-on</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>DontCheckDefaultBrowser</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="2" type_name="REG_EXPAND_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>DownloadDirectory</ValueName>
+ <Value>${home}/Downloads</Value>
+ </Entry>
+ <Entry type="7" type_name="REG_MULTI_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>Handlers</ValueName>
+ <Value>{ &quot;mimeTypes&quot;: { &quot;application/msword&quot;: { &quot;action&quot;: &quot;useSystemDefault&quot;, &quot;ask&quot;: true } }, &quot;schemes&quot;: { &quot;mailto&quot;: { &quot;action&quot;: &quot;useHelperApp&quot;, &quot;ask&quot;: true, &quot;handlers&quot;: [{ &quot;name&quot;: &quot;Gmail&quot;, &quot;uriTemplate&quot;: &quot;https://mail.google.com/mail/?extsrc=mailto&amp;url=%s&quot; }] } }, &quot;extensions&quot;: { &quot;pdf&quot;: { &quot;action&quot;: &quot;useHelperApp&quot;, &quot;ask&quot;: true, &quot;handlers&quot;: [{ &quot;name&quot;: &quot;Adobe Acrobat&quot;, &quot;path&quot;: &quot;/usr/bin/acroread&quot; }] } } }</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>HardwareAcceleration</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="7" type_name="REG_MULTI_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>ManagedBookmarks</ValueName>
+ <Value>[ { &quot;toplevel_name&quot;: &quot;My managed bookmarks folder&quot; }, { &quot;url&quot;: &quot;example.com&quot;, &quot;name&quot;: &quot;Example&quot; }, { &quot;name&quot;: &quot;Mozilla links&quot;, &quot;children&quot;: [ { &quot;url&quot;: &quot;https://mozilla.org&quot;, &quot;name&quot;: &quot;Mozilla.org&quot; }, { &quot;url&quot;: &quot;https://support.mozilla.org/&quot;, &quot;name&quot;: &quot;SUMO&quot; } ] } ]</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>NetworkPrediction</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>NewTabPage</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>NoDefaultBookmarks</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>OfferToSaveLogins</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>OfferToSaveLoginsDefault</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>OverrideFirstRunPage</ValueName>
+ <Value>http://example.org</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>OverridePostUpdatePage</ValueName>
+ <Value>http://example.org</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>PasswordManagerEnabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="7" type_name="REG_MULTI_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>Preferences</ValueName>
+ <Value>{ &quot;accessibility.force_disabled&quot;: { &quot;Value&quot;: 1, &quot;Status&quot;: &quot;default&quot; }, &quot;browser.cache.disk.parent_directory&quot;: { &quot;Value&quot;: &quot;SOME_NATIVE_PATH&quot;, &quot;Status&quot;: &quot;user&quot; }, &quot;browser.tabs.warnOnClose&quot;: { &quot;Value&quot;: false, &quot;Status&quot;: &quot;locked&quot; } }</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>PrimaryPassword</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>PromptForDownloadLocation</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\RequestedLocales</Key>
+ <ValueName>**delvals.</ValueName>
+ <Value> </Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\RequestedLocales</Key>
+ <ValueName>1</ValueName>
+ <Value>de</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\RequestedLocales</Key>
+ <ValueName>2</ValueName>
+ <Value>en-US</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>SSLVersionMax</ValueName>
+ <Value>tls1.3</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>SSLVersionMin</ValueName>
+ <Value>tls1.3</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>SearchBar</ValueName>
+ <Value>unified</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Authentication</Key>
+ <ValueName>Locked</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Authentication</Key>
+ <ValueName>PrivateBrowsing</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Authentication\\AllowNonFQDN</Key>
+ <ValueName>NTLM</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Authentication\\AllowNonFQDN</Key>
+ <ValueName>SPNEGO</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Authentication\\AllowProxies</Key>
+ <ValueName>NTLM</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Authentication\\AllowProxies</Key>
+ <ValueName>SPNEGO</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Authentication\\Delegated</Key>
+ <ValueName>**delvals.</ValueName>
+ <Value> </Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Authentication\\Delegated</Key>
+ <ValueName>1</ValueName>
+ <Value>mydomain.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Authentication\\Delegated</Key>
+ <ValueName>1</ValueName>
+ <Value>https://myotherdomain.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Authentication\\NTLM</Key>
+ <ValueName>**delvals.</ValueName>
+ <Value> </Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Authentication\\NTLM</Key>
+ <ValueName>1</ValueName>
+ <Value>mydomain.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Authentication\\NTLM</Key>
+ <ValueName>1</ValueName>
+ <Value>https://myotherdomain.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Authentication\\SPNEGO</Key>
+ <ValueName>**delvals.</ValueName>
+ <Value> </Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Authentication\\SPNEGO</Key>
+ <ValueName>1</ValueName>
+ <Value>mydomain.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Authentication\\SPNEGO</Key>
+ <ValueName>1</ValueName>
+ <Value>https://myotherdomain.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Bookmarks\\1</Key>
+ <ValueName>Title</ValueName>
+ <Value>Example</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Bookmarks\\1</Key>
+ <ValueName>URL</ValueName>
+ <Value>https://example.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Bookmarks\\1</Key>
+ <ValueName>Favicon</ValueName>
+ <Value>https://example.com/favicon.ico</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Bookmarks\\1</Key>
+ <ValueName>Placement</ValueName>
+ <Value>menu</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Bookmarks\\1</Key>
+ <ValueName>Folder</ValueName>
+ <Value>FolderName</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Bookmarks\\10</Key>
+ <ValueName>Title</ValueName>
+ <Value>Samba</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Bookmarks\\10</Key>
+ <ValueName>URL</ValueName>
+ <Value>www.samba.org</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Bookmarks\\10</Key>
+ <ValueName>Favicon</ValueName>
+ <Value/>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Bookmarks\\10</Key>
+ <ValueName>Placement</ValueName>
+ <Value>toolbar</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Bookmarks\\10</Key>
+ <ValueName>Folder</ValueName>
+ <Value/>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Cookies</Key>
+ <ValueName>AcceptThirdParty</ValueName>
+ <Value>never</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Cookies</Key>
+ <ValueName>Default</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Cookies</Key>
+ <ValueName>ExpireAtSessionEnd</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Cookies</Key>
+ <ValueName>Locked</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Cookies</Key>
+ <ValueName>RejectTracker</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Cookies\\Allow</Key>
+ <ValueName>**delvals.</ValueName>
+ <Value> </Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Cookies\\Allow</Key>
+ <ValueName>1</ValueName>
+ <Value>http://example.org/</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Cookies\\AllowSession</Key>
+ <ValueName>**delvals.</ValueName>
+ <Value> </Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Cookies\\AllowSession</Key>
+ <ValueName>1</ValueName>
+ <Value>http://example.edu/</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Cookies\\Block</Key>
+ <ValueName>**delvals.</ValueName>
+ <Value> </Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Cookies\\Block</Key>
+ <ValueName>1</ValueName>
+ <Value>http://example.edu/</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\DisabledCiphers</Key>
+ <ValueName>TLS_DHE_RSA_WITH_AES_128_CBC_SHA</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\DisabledCiphers</Key>
+ <ValueName>TLS_DHE_RSA_WITH_AES_256_CBC_SHA</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\DisabledCiphers</Key>
+ <ValueName>TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\DisabledCiphers</Key>
+ <ValueName>TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\DisabledCiphers</Key>
+ <ValueName>TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\DisabledCiphers</Key>
+ <ValueName>TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\DisabledCiphers</Key>
+ <ValueName>TLS_RSA_WITH_3DES_EDE_CBC_SHA</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\DisabledCiphers</Key>
+ <ValueName>TLS_RSA_WITH_AES_128_CBC_SHA</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\DisabledCiphers</Key>
+ <ValueName>TLS_RSA_WITH_AES_128_GCM_SHA256</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\DisabledCiphers</Key>
+ <ValueName>TLS_RSA_WITH_AES_256_CBC_SHA</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\DisabledCiphers</Key>
+ <ValueName>TLS_RSA_WITH_AES_256_GCM_SHA384</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\DisableSecurityBypass</Key>
+ <ValueName>InvalidCertificate</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\DisableSecurityBypass</Key>
+ <ValueName>SafeBrowsing</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\DNSOverHTTPS</Key>
+ <ValueName>Enabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\DNSOverHTTPS</Key>
+ <ValueName>Locked</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\DNSOverHTTPS</Key>
+ <ValueName>ProviderURL</ValueName>
+ <Value>URL_TO_ALTERNATE_PROVIDER</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\DNSOverHTTPS\\ExcludedDomains</Key>
+ <ValueName>**delvals.</ValueName>
+ <Value> </Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\DNSOverHTTPS\\ExcludedDomains</Key>
+ <ValueName>1</ValueName>
+ <Value>example.com</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\EnableTrackingProtection</Key>
+ <ValueName>Value</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\EnableTrackingProtection</Key>
+ <ValueName>Cryptomining</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\EnableTrackingProtection</Key>
+ <ValueName>Fingerprinting</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\EnableTrackingProtection</Key>
+ <ValueName>Locked</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\EnableTrackingProtection\\Exceptions</Key>
+ <ValueName>**delvals.</ValueName>
+ <Value> </Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\EnableTrackingProtection\\Exceptions</Key>
+ <ValueName>1</ValueName>
+ <Value>https://example.com</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\EncryptedMediaExtensions</Key>
+ <ValueName>Enabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\EncryptedMediaExtensions</Key>
+ <ValueName>Locked</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Extensions\\Install</Key>
+ <ValueName>**delvals.</ValueName>
+ <Value> </Value>
+ </Entry>
+ <Entry type="2" type_name="REG_EXPAND_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Extensions\\Install</Key>
+ <ValueName>1</ValueName>
+ <Value>https://addons.mozilla.org/firefox/downloads/somefile.xpi</Value>
+ </Entry>
+ <Entry type="2" type_name="REG_EXPAND_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Extensions\\Install</Key>
+ <ValueName>2</ValueName>
+ <Value>//path/to/xpi</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Extensions\\Locked</Key>
+ <ValueName>**delvals.</ValueName>
+ <Value> </Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Extensions\\Locked</Key>
+ <ValueName>1</ValueName>
+ <Value>addon_id@mozilla.org</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Extensions\\Uninstall</Key>
+ <ValueName>**delvals.</ValueName>
+ <Value> </Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Extensions\\Uninstall</Key>
+ <ValueName>1</ValueName>
+ <Value>bad_addon_id@mozilla.org</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\FirefoxHome</Key>
+ <ValueName>Search</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\FirefoxHome</Key>
+ <ValueName>TopSites</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\FirefoxHome</Key>
+ <ValueName>Highlights</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\FirefoxHome</Key>
+ <ValueName>Pocket</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\FirefoxHome</Key>
+ <ValueName>Snippets</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\FirefoxHome</Key>
+ <ValueName>Locked</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\FlashPlugin</Key>
+ <ValueName>Default</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\FlashPlugin</Key>
+ <ValueName>Locked</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\FlashPlugin\\Allow</Key>
+ <ValueName>**delvals.</ValueName>
+ <Value> </Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\FlashPlugin\\Allow</Key>
+ <ValueName>1</ValueName>
+ <Value>http://example.org/</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\FlashPlugin\\Block</Key>
+ <ValueName>**delvals.</ValueName>
+ <Value> </Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\FlashPlugin\\Block</Key>
+ <ValueName>1</ValueName>
+ <Value>http://example.edu/</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Homepage</Key>
+ <ValueName>StartPage</ValueName>
+ <Value>homepage</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Homepage</Key>
+ <ValueName>URL</ValueName>
+ <Value>http://example.com/</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Homepage</Key>
+ <ValueName>Locked</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Homepage\\Additional</Key>
+ <ValueName>**delvals.</ValueName>
+ <Value> </Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Homepage\\Additional</Key>
+ <ValueName>1</ValueName>
+ <Value>http://example.org/</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Homepage\\Additional</Key>
+ <ValueName>2</ValueName>
+ <Value>http://example.edu/</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\InstallAddonsPermission</Key>
+ <ValueName>Default</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\InstallAddonsPermission\\Allow</Key>
+ <ValueName>**delvals.</ValueName>
+ <Value> </Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\InstallAddonsPermission\\Allow</Key>
+ <ValueName>1</ValueName>
+ <Value>http://example.org/</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\InstallAddonsPermission\\Allow</Key>
+ <ValueName>2</ValueName>
+ <Value>http://example.edu/</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\LocalFileLinks</Key>
+ <ValueName>**delvals.</ValueName>
+ <Value> </Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\LocalFileLinks</Key>
+ <ValueName>1</ValueName>
+ <Value>http://example.org/</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\LocalFileLinks</Key>
+ <ValueName>2</ValueName>
+ <Value>http://example.edu/</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\PDFjs</Key>
+ <ValueName>EnablePermissions</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\PDFjs</Key>
+ <ValueName>Enabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Permissions\\Autoplay</Key>
+ <ValueName>Default</ValueName>
+ <Value>block-audio</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Permissions\\Autoplay</Key>
+ <ValueName>Locked</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Permissions\\Autoplay\\Allow</Key>
+ <ValueName>**delvals.</ValueName>
+ <Value> </Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Permissions\\Autoplay\\Allow</Key>
+ <ValueName>1</ValueName>
+ <Value>https://example.org</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Permissions\\Autoplay\\Block</Key>
+ <ValueName>**delvals.</ValueName>
+ <Value> </Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Permissions\\Autoplay\\Block</Key>
+ <ValueName>1</ValueName>
+ <Value>https://example.edu</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Permissions\\Camera</Key>
+ <ValueName>BlockNewRequests</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Permissions\\Camera</Key>
+ <ValueName>Locked</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Permissions\\Camera\\Allow</Key>
+ <ValueName>**delvals.</ValueName>
+ <Value> </Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Permissions\\Camera\\Allow</Key>
+ <ValueName>1</ValueName>
+ <Value>https://example.org</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Permissions\\Camera\\Allow</Key>
+ <ValueName>2</ValueName>
+ <Value>https://example.org:1234</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Permissions\\Camera\\Block</Key>
+ <ValueName>**delvals.</ValueName>
+ <Value> </Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Permissions\\Camera\\Block</Key>
+ <ValueName>1</ValueName>
+ <Value>https://example.edu</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Permissions\\Location</Key>
+ <ValueName>BlockNewRequests</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Permissions\\Location</Key>
+ <ValueName>Locked</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Permissions\\Location\\Allow</Key>
+ <ValueName>**delvals.</ValueName>
+ <Value> </Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Permissions\\Location\\Allow</Key>
+ <ValueName>1</ValueName>
+ <Value>https://example.org</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Permissions\\Location\\Block</Key>
+ <ValueName>**delvals.</ValueName>
+ <Value> </Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Permissions\\Location\\Block</Key>
+ <ValueName>1</ValueName>
+ <Value>https://example.edu</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Permissions\\Microphone</Key>
+ <ValueName>BlockNewRequests</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Permissions\\Microphone</Key>
+ <ValueName>Locked</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Permissions\\Microphone\\Allow</Key>
+ <ValueName>**delvals.</ValueName>
+ <Value> </Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Permissions\\Microphone\\Allow</Key>
+ <ValueName>1</ValueName>
+ <Value>https://example.org</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Permissions\\Microphone\\Block</Key>
+ <ValueName>**delvals.</ValueName>
+ <Value> </Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Permissions\\Microphone\\Block</Key>
+ <ValueName>1</ValueName>
+ <Value>https://example.edu</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Permissions\\Notifications</Key>
+ <ValueName>BlockNewRequests</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Permissions\\Notifications</Key>
+ <ValueName>Locked</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Permissions\\Notifications\\Allow</Key>
+ <ValueName>**delvals.</ValueName>
+ <Value> </Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Permissions\\Notifications\\Allow</Key>
+ <ValueName>1</ValueName>
+ <Value>https://example.org</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Permissions\\Notifications\\Block</Key>
+ <ValueName>**delvals.</ValueName>
+ <Value> </Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Permissions\\Notifications\\Block</Key>
+ <ValueName>1</ValueName>
+ <Value>https://example.edu</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Permissions\\VirtualReality</Key>
+ <ValueName>BlockNewRequests</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Permissions\\VirtualReality</Key>
+ <ValueName>Locked</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Permissions\\VirtualReality\\Allow</Key>
+ <ValueName>**delvals.</ValueName>
+ <Value> </Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Permissions\\VirtualReality\\Allow</Key>
+ <ValueName>1</ValueName>
+ <Value>https://example.org</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Permissions\\VirtualReality\\Block</Key>
+ <ValueName>**delvals.</ValueName>
+ <Value> </Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Permissions\\VirtualReality\\Block</Key>
+ <ValueName>1</ValueName>
+ <Value>https://example.edu</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\PictureInPicture</Key>
+ <ValueName>Enabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\PictureInPicture</Key>
+ <ValueName>Locked</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\PopupBlocking</Key>
+ <ValueName>Default</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\PopupBlocking</Key>
+ <ValueName>Locked</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\PopupBlocking\\Allow</Key>
+ <ValueName>**delvals.</ValueName>
+ <Value> </Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\PopupBlocking\\Allow</Key>
+ <ValueName>1</ValueName>
+ <Value>http://example.org/</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\PopupBlocking\\Allow</Key>
+ <ValueName>2</ValueName>
+ <Value>http://example.edu/</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Proxy</Key>
+ <ValueName>Locked</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Proxy</Key>
+ <ValueName>Mode</ValueName>
+ <Value>autoDetect</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Proxy</Key>
+ <ValueName>HTTPProxy</ValueName>
+ <Value>hostname</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Proxy</Key>
+ <ValueName>UseHTTPProxyForAllProtocols</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Proxy</Key>
+ <ValueName>SSLProxy</ValueName>
+ <Value>hostname</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Proxy</Key>
+ <ValueName>FTPProxy</ValueName>
+ <Value>hostname</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Proxy</Key>
+ <ValueName>SOCKSProxy</ValueName>
+ <Value>hostname</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Proxy</Key>
+ <ValueName>SOCKSVersion</ValueName>
+ <Value>5</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Proxy</Key>
+ <ValueName>Passthrough</ValueName>
+ <Value>&lt;local&gt;</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Proxy</Key>
+ <ValueName>AutoConfigURL</ValueName>
+ <Value>URL_TO_AUTOCONFIG</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Proxy</Key>
+ <ValueName>AutoLogin</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Proxy</Key>
+ <ValueName>UseProxyForDNS</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>SanitizeOnShutdown</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\SearchEngines</Key>
+ <ValueName>Default</ValueName>
+ <Value>Google</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\SearchEngines</Key>
+ <ValueName>PreventInstalls</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\SearchEngines\\Add\\1</Key>
+ <ValueName>Name</ValueName>
+ <Value>Example1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\SearchEngines\\Add\\1</Key>
+ <ValueName>URLTemplate</ValueName>
+ <Value>https://www.example.org/q={searchTerms}</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\SearchEngines\\Add\\1</Key>
+ <ValueName>Method</ValueName>
+ <Value>POST</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\SearchEngines\\Add\\1</Key>
+ <ValueName>IconURL</ValueName>
+ <Value>https://www.example.org/favicon.ico</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\SearchEngines\\Add\\1</Key>
+ <ValueName>Alias</ValueName>
+ <Value>example</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\SearchEngines\\Add\\1</Key>
+ <ValueName>Description</ValueName>
+ <Value>Description</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\SearchEngines\\Add\\1</Key>
+ <ValueName>SuggestURLTemplate</ValueName>
+ <Value>https://www.example.org/suggestions/q={searchTerms}</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\SearchEngines\\Add\\1</Key>
+ <ValueName>PostData</ValueName>
+ <Value>name=value&amp;q={searchTerms}</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\SearchEngines\\Remove</Key>
+ <ValueName>**delvals.</ValueName>
+ <Value> </Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\SearchEngines\\Remove</Key>
+ <ValueName>1</ValueName>
+ <Value>Bing</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\SupportMenu</Key>
+ <ValueName>Title</ValueName>
+ <Value>Support Menu</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\SupportMenu</Key>
+ <ValueName>URL</ValueName>
+ <Value>http://example.com/support</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\SupportMenu</Key>
+ <ValueName>AccessKey</ValueName>
+ <Value>S</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\UserMessaging</Key>
+ <ValueName>ExtensionRecommendations</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\UserMessaging</Key>
+ <ValueName>FeatureRecommendations</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\UserMessaging</Key>
+ <ValueName>WhatsNew</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\UserMessaging</Key>
+ <ValueName>UrlbarInterventions</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\UserMessaging</Key>
+ <ValueName>SkipOnboarding</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\WebsiteFilter\\Block</Key>
+ <ValueName>**delvals.</ValueName>
+ <Value> </Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\WebsiteFilter\\Block</Key>
+ <ValueName>1</ValueName>
+ <Value>&lt;all_urls&gt;</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\WebsiteFilter\\Exceptions</Key>
+ <ValueName>**delvals.</ValueName>
+ <Value> </Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\WebsiteFilter\\Exceptions</Key>
+ <ValueName>1</ValueName>
+ <Value>http://example.org/*</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>AllowedDomainsForApps</ValueName>
+ <Value>managedfirefox.com,example.com</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>BackgroundAppUpdate</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Certificates</Key>
+ <ValueName>ImportEnterpriseRoots</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Certificates\\Install</Key>
+ <ValueName>**delvals.</ValueName>
+ <Value> </Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Certificates\\Install</Key>
+ <ValueName>1</ValueName>
+ <Value>cert1.der</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\Certificates\\Install</Key>
+ <ValueName>2</ValueName>
+ <Value>/home/username/cert2.pem</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox\\SecurityDevices</Key>
+ <ValueName>NAME_OF_DEVICE</ValueName>
+ <Value>PATH_TO_LIBRARY_FOR_DEVICE</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>ShowHomeButton</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="7" type_name="REG_MULTI_SZ">
+ <Key>Software\\Policies\\Mozilla\\Firefox</Key>
+ <ValueName>AutoLaunchProtocolsFromOrigins</ValueName>
+ <Value>[{&quot;protocol&quot;: &quot;zoommtg&quot;, &quot;allowed_origins&quot;: [&quot;https://somesite.zoom.us&quot;]}]</Value>
+ </Entry>
+</PolFile>
+"""
+
+firefox_json_expected = \
+"""
+{
+ "policies": {
+ "AppAutoUpdate": true,
+ "AllowedDomainsForApps": "managedfirefox.com,example.com",
+ "AppUpdateURL": "https://yoursite.com",
+ "Authentication": {
+ "SPNEGO": [
+ "mydomain.com",
+ "https://myotherdomain.com"
+ ],
+ "Delegated": [
+ "mydomain.com",
+ "https://myotherdomain.com"
+ ],
+ "NTLM": [
+ "mydomain.com",
+ "https://myotherdomain.com"
+ ],
+ "AllowNonFQDN": {
+ "SPNEGO": true,
+ "NTLM": true
+ },
+ "AllowProxies": {
+ "SPNEGO": true,
+ "NTLM": true
+ },
+ "Locked": true,
+ "PrivateBrowsing": true
+ },
+ "AutoLaunchProtocolsFromOrigins": [
+ {
+ "protocol": "zoommtg",
+ "allowed_origins": [
+ "https://somesite.zoom.us"
+ ]
+ }
+ ],
+ "BackgroundAppUpdate": true,
+ "BlockAboutAddons": true,
+ "BlockAboutConfig": true,
+ "BlockAboutProfiles": true,
+ "BlockAboutSupport": true,
+ "Bookmarks": [
+ {
+ "Title": "Example",
+ "URL": "https://example.com",
+ "Favicon": "https://example.com/favicon.ico",
+ "Placement": "menu",
+ "Folder": "FolderName"
+ },
+ {
+ "Title": "Samba",
+ "URL": "www.samba.org",
+ "Favicon": "",
+ "Placement": "toolbar",
+ "Folder": ""
+ }
+ ],
+ "CaptivePortal": true,
+ "Certificates": {
+ "ImportEnterpriseRoots": true,
+ "Install": [
+ "cert1.der",
+ "/home/username/cert2.pem"
+ ]
+ },
+ "Cookies": {
+ "Allow": [
+ "http://example.org/"
+ ],
+ "AllowSession": [
+ "http://example.edu/"
+ ],
+ "Block": [
+ "http://example.edu/"
+ ],
+ "Default": true,
+ "AcceptThirdParty": "never",
+ "ExpireAtSessionEnd": true,
+ "RejectTracker": true,
+ "Locked": true
+ },
+ "DisableSetDesktopBackground": true,
+ "DisableMasterPasswordCreation": true,
+ "DisableAppUpdate": true,
+ "DisableBuiltinPDFViewer": true,
+ "DisabledCiphers": {
+ "TLS_DHE_RSA_WITH_AES_128_CBC_SHA": true,
+ "TLS_DHE_RSA_WITH_AES_256_CBC_SHA": true,
+ "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA": true,
+ "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA": true,
+ "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": true,
+ "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": true,
+ "TLS_RSA_WITH_AES_128_CBC_SHA": true,
+ "TLS_RSA_WITH_AES_256_CBC_SHA": true,
+ "TLS_RSA_WITH_3DES_EDE_CBC_SHA": true,
+ "TLS_RSA_WITH_AES_128_GCM_SHA256": true,
+ "TLS_RSA_WITH_AES_256_GCM_SHA384": true
+ },
+ "DisableDefaultBrowserAgent": true,
+ "DisableDeveloperTools": true,
+ "DisableFeedbackCommands": true,
+ "DisableFirefoxScreenshots": true,
+ "DisableFirefoxAccounts": true,
+ "DisableFirefoxStudies": true,
+ "DisableForgetButton": true,
+ "DisableFormHistory": true,
+ "DisablePasswordReveal": true,
+ "DisablePocket": true,
+ "DisablePrivateBrowsing": true,
+ "DisableProfileImport": true,
+ "DisableProfileRefresh": true,
+ "DisableSafeMode": true,
+ "DisableSecurityBypass": {
+ "InvalidCertificate": true,
+ "SafeBrowsing": true
+ },
+ "DisableSystemAddonUpdate": true,
+ "DisableTelemetry": true,
+ "DisplayBookmarksToolbar": true,
+ "DisplayMenuBar": "default-on",
+ "DNSOverHTTPS": {
+ "Enabled": true,
+ "ProviderURL": "URL_TO_ALTERNATE_PROVIDER",
+ "Locked": true,
+ "ExcludedDomains": [
+ "example.com"
+ ]
+ },
+ "DontCheckDefaultBrowser": true,
+ "EnableTrackingProtection": {
+ "Value": true,
+ "Locked": true,
+ "Cryptomining": true,
+ "Fingerprinting": true,
+ "Exceptions": [
+ "https://example.com"
+ ]
+ },
+ "EncryptedMediaExtensions": {
+ "Enabled": true,
+ "Locked": true
+ },
+ "Extensions": {
+ "Install": [
+ "https://addons.mozilla.org/firefox/downloads/somefile.xpi",
+ "//path/to/xpi"
+ ],
+ "Uninstall": [
+ "bad_addon_id@mozilla.org"
+ ],
+ "Locked": [
+ "addon_id@mozilla.org"
+ ]
+ },
+ "ExtensionSettings": {
+ "*": {
+ "blocked_install_message": "Custom error message.",
+ "install_sources": [
+ "about:addons",
+ "https://addons.mozilla.org/"
+ ],
+ "installation_mode": "blocked",
+ "allowed_types": [
+ "extension"
+ ]
+ },
+ "uBlock0@raymondhill.net": {
+ "installation_mode": "force_installed",
+ "install_url": "https://addons.mozilla.org/firefox/downloads/latest/ublock-origin/latest.xpi"
+ },
+ "https-everywhere@eff.org": {
+ "installation_mode": "allowed"
+ }
+ },
+ "ExtensionUpdate": true,
+ "FlashPlugin": {
+ "Allow": [
+ "http://example.org/"
+ ],
+ "Block": [
+ "http://example.edu/"
+ ],
+ "Default": true,
+ "Locked": true
+ },
+ "Handlers": {
+ "mimeTypes": {
+ "application/msword": {
+ "action": "useSystemDefault",
+ "ask": true
+ }
+ },
+ "schemes": {
+ "mailto": {
+ "action": "useHelperApp",
+ "ask": true,
+ "handlers": [
+ {
+ "name": "Gmail",
+ "uriTemplate": "https://mail.google.com/mail/?extsrc=mailto&url=%s"
+ }
+ ]
+ }
+ },
+ "extensions": {
+ "pdf": {
+ "action": "useHelperApp",
+ "ask": true,
+ "handlers": [
+ {
+ "name": "Adobe Acrobat",
+ "path": "/usr/bin/acroread"
+ }
+ ]
+ }
+ }
+ },
+ "FirefoxHome": {
+ "Search": true,
+ "TopSites": true,
+ "Highlights": true,
+ "Pocket": true,
+ "Snippets": true,
+ "Locked": true
+ },
+ "HardwareAcceleration": true,
+ "Homepage": {
+ "URL": "http://example.com/",
+ "Locked": true,
+ "Additional": [
+ "http://example.org/",
+ "http://example.edu/"
+ ],
+ "StartPage": "homepage"
+ },
+ "InstallAddonsPermission": {
+ "Allow": [
+ "http://example.org/",
+ "http://example.edu/"
+ ],
+ "Default": true
+ },
+ "LocalFileLinks": [
+ "http://example.org/",
+ "http://example.edu/"
+ ],
+ "ManagedBookmarks": [
+ {
+ "toplevel_name": "My managed bookmarks folder"
+ },
+ {
+ "url": "example.com",
+ "name": "Example"
+ },
+ {
+ "name": "Mozilla links",
+ "children": [
+ {
+ "url": "https://mozilla.org",
+ "name": "Mozilla.org"
+ },
+ {
+ "url": "https://support.mozilla.org/",
+ "name": "SUMO"
+ }
+ ]
+ }
+ ],
+ "PrimaryPassword": true,
+ "NoDefaultBookmarks": true,
+ "OfferToSaveLogins": true,
+ "OfferToSaveLoginsDefault": true,
+ "OverrideFirstRunPage": "http://example.org",
+ "OverridePostUpdatePage": "http://example.org",
+ "PasswordManagerEnabled": true,
+ "PSFjs": {
+ "Enabled": true,
+ "EnablePermissions": true
+ },
+ "Permissions": {
+ "Camera": {
+ "Allow": [
+ "https://example.org",
+ "https://example.org:1234"
+ ],
+ "Block": [
+ "https://example.edu"
+ ],
+ "BlockNewRequests": true,
+ "Locked": true
+ },
+ "Microphone": {
+ "Allow": [
+ "https://example.org"
+ ],
+ "Block": [
+ "https://example.edu"
+ ],
+ "BlockNewRequests": true,
+ "Locked": true
+ },
+ "Location": {
+ "Allow": [
+ "https://example.org"
+ ],
+ "Block": [
+ "https://example.edu"
+ ],
+ "BlockNewRequests": true,
+ "Locked": true
+ },
+ "Notifications": {
+ "Allow": [
+ "https://example.org"
+ ],
+ "Block": [
+ "https://example.edu"
+ ],
+ "BlockNewRequests": true,
+ "Locked": true
+ },
+ "Autoplay": {
+ "Allow": [
+ "https://example.org"
+ ],
+ "Block": [
+ "https://example.edu"
+ ],
+ "Default": "block-audio",
+ "Locked": true
+ },
+ "VirtualReality": {
+ "Allow": [
+ "https://example.org"
+ ],
+ "Block": [
+ "https://example.edu"
+ ],
+ "BlockNewRequests": true,
+ "Locked": true
+ }
+ },
+ "PictureInPicture": {
+ "Enabled": true,
+ "Locked": true
+ },
+ "PopupBlocking": {
+ "Allow": [
+ "http://example.org/",
+ "http://example.edu/"
+ ],
+ "Default": true,
+ "Locked": true
+ },
+ "Preferences": {
+ "accessibility.force_disabled": {
+ "Value": 1,
+ "Status": "default"
+ },
+ "browser.cache.disk.parent_directory": {
+ "Value": "SOME_NATIVE_PATH",
+ "Status": "user"
+ },
+ "browser.tabs.warnOnClose": {
+ "Value": false,
+ "Status": "locked"
+ }
+ },
+ "PromptForDownloadLocation": true,
+ "Proxy": {
+ "Mode": "autoDetect",
+ "Locked": true,
+ "HTTPProxy": "hostname",
+ "UseHTTPProxyForAllProtocols": true,
+ "SSLProxy": "hostname",
+ "FTPProxy": "hostname",
+ "SOCKSProxy": "hostname",
+ "SOCKSVersion": 5,
+ "Passthrough": "<local>",
+ "AutoConfigURL": "URL_TO_AUTOCONFIG",
+ "AutoLogin": true,
+ "UseProxyForDNS": true
+ },
+ "SanitizeOnShutdown": true,
+ "SearchEngines": {
+ "Add": [
+ {
+ "Name": "Example1",
+ "URLTemplate": "https://www.example.org/q={searchTerms}",
+ "Method": "POST",
+ "IconURL": "https://www.example.org/favicon.ico",
+ "Alias": "example",
+ "Description": "Description",
+ "PostData": "name=value&q={searchTerms}",
+ "SuggestURLTemplate": "https://www.example.org/suggestions/q={searchTerms}"
+ }
+ ],
+ "Remove": [
+ "Bing"
+ ],
+ "Default": "Google",
+ "PreventInstalls": true
+ },
+ "SearchSuggestEnabled": true,
+ "SecurityDevices": {
+ "NAME_OF_DEVICE": "PATH_TO_LIBRARY_FOR_DEVICE"
+ },
+ "ShowHomeButton": true,
+ "SSLVersionMax": "tls1.3",
+ "SSLVersionMin": "tls1.3",
+ "SupportMenu": {
+ "Title": "Support Menu",
+ "URL": "http://example.com/support",
+ "AccessKey": "S"
+ },
+ "UserMessaging": {
+ "WhatsNew": true,
+ "ExtensionRecommendations": true,
+ "FeatureRecommendations": true,
+ "UrlbarInterventions": true,
+ "SkipOnboarding": true
+ },
+ "WebsiteFilter": {
+ "Block": [
+ "<all_urls>"
+ ],
+ "Exceptions": [
+ "http://example.org/*"
+ ]
+ },
+ "DefaultDownloadDirectory": "${home}/Downloads",
+ "DownloadDirectory": "${home}/Downloads",
+ "NetworkPrediction": true,
+ "NewTabPage": true,
+ "RequestedLocales": ["de", "en-US"],
+ "SearchBar": "unified"
+ }
+}
+"""
+
+chromium_reg_pol = \
+br"""
+<?xml version="1.0" encoding="utf-8"?>
+<PolFile num_entries="418" signature="PReg" version="1">
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>AbusiveExperienceInterventionEnforce</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>AccessibilityImageLabelsEnabled</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>AdditionalDnsQueryTypesEnabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>AdsSettingForIntrusiveAdsSites</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>AdvancedProtectionAllowed</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>AllowCrossOriginAuthPrompt</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>AllowDeletingBrowserHistory</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>AllowDinosaurEasterEgg</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>AllowFileSelectionDialogs</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>AllowSyncXHRInPageDismissal</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>AllowedDomainsForApps</ValueName>
+ <Value>managedchrome.com,example.com</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>AlternateErrorPagesEnabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>AlternativeBrowserPath</ValueName>
+ <Value>${ie}</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>AlwaysOpenPdfExternally</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>AmbientAuthenticationInPrivateModesEnabled</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>AppCacheForceEnabled</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>ApplicationLocaleValue</ValueName>
+ <Value>en</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>AudioCaptureAllowed</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>AudioProcessHighPriorityEnabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>AudioSandboxEnabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>AuthNegotiateDelegateAllowlist</ValueName>
+ <Value>foobar.example.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>AuthSchemes</ValueName>
+ <Value>basic,digest,ntlm,negotiate</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>AuthServerAllowlist</ValueName>
+ <Value>*.example.com,example.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>AutoLaunchProtocolsFromOrigins</ValueName>
+ <Value>[{&quot;allowed_origins&quot;: [&quot;example.com&quot;, &quot;http://www.example.com:8080&quot;], &quot;protocol&quot;: &quot;spotify&quot;}, {&quot;allowed_origins&quot;: [&quot;https://example.com&quot;, &quot;https://.mail.example.com&quot;], &quot;protocol&quot;: &quot;teams&quot;}, {&quot;allowed_origins&quot;: [&quot;*&quot;], &quot;protocol&quot;: &quot;outlook&quot;}]</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>AutofillAddressEnabled</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>AutofillCreditCardEnabled</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>AutoplayAllowed</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>BackgroundModeEnabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>BasicAuthOverHttpEnabled</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>BlockExternalExtensions</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>BlockThirdPartyCookies</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>BookmarkBarEnabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>BrowserAddPersonEnabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>BrowserGuestModeEnabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>BrowserGuestModeEnforced</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>BrowserLabsEnabled</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>BrowserNetworkTimeQueriesEnabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>BrowserSignin</ValueName>
+ <Value>2</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>BrowserSwitcherChromePath</ValueName>
+ <Value>${chrome}</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>BrowserSwitcherDelay</ValueName>
+ <Value>10000</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>BrowserSwitcherEnabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>BrowserSwitcherExternalGreylistUrl</ValueName>
+ <Value>http://example.com/greylist.xml</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>BrowserSwitcherExternalSitelistUrl</ValueName>
+ <Value>http://example.com/sitelist.xml</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>BrowserSwitcherKeepLastChromeTab</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>BrowserSwitcherUseIeSitelist</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>BrowserThemeColor</ValueName>
+ <Value>#FFFFFF</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>BrowsingDataLifetime</ValueName>
+ <Value>[{&quot;data_types&quot;: [&quot;browsing_history&quot;], &quot;time_to_live_in_hours&quot;: 24}, {&quot;data_types&quot;: [&quot;password_signin&quot;, &quot;autofill&quot;], &quot;time_to_live_in_hours&quot;: 12}]</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>BuiltInDnsClientEnabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>CECPQ2Enabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>ChromeCleanupEnabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>ChromeCleanupReportingEnabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>ChromeVariations</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>ClickToCallEnabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>CloudManagementEnrollmentMandatory</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>CloudManagementEnrollmentToken</ValueName>
+ <Value>37185d02-e055-11e7-80c1-9a214cf093ae</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>CloudPolicyOverridesPlatformPolicy</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>CloudPrintProxyEnabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>CloudPrintSubmitEnabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>CloudUserPolicyMerge</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>CommandLineFlagSecurityWarningsEnabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>ComponentUpdatesEnabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>DNSInterceptionChecksEnabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>DefaultBrowserSettingEnabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>DefaultCookiesSetting</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>DefaultFileHandlingGuardSetting</ValueName>
+ <Value>2</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>DefaultFileSystemReadGuardSetting</ValueName>
+ <Value>2</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>DefaultFileSystemWriteGuardSetting</ValueName>
+ <Value>2</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>DefaultGeolocationSetting</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>DefaultImagesSetting</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>DefaultInsecureContentSetting</ValueName>
+ <Value>2</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>DefaultJavaScriptSetting</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>DefaultNotificationsSetting</ValueName>
+ <Value>2</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>DefaultPopupsSetting</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>DefaultPrinterSelection</ValueName>
+ <Value>{ &quot;kind&quot;: &quot;cloud&quot;, &quot;idPattern&quot;: &quot;.*public&quot;, &quot;namePattern&quot;: &quot;.*Color&quot; }</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>DefaultSearchProviderContextMenuAccessAllowed</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>DefaultSearchProviderEnabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>DefaultSearchProviderIconURL</ValueName>
+ <Value>https://search.my.company/favicon.ico</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>DefaultSearchProviderImageURL</ValueName>
+ <Value>https://search.my.company/searchbyimage/upload</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>DefaultSearchProviderImageURLPostParams</ValueName>
+ <Value>content={imageThumbnail},url={imageURL},sbisrc={SearchSource}</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>DefaultSearchProviderKeyword</ValueName>
+ <Value>mis</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>DefaultSearchProviderName</ValueName>
+ <Value>My Intranet Search</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>DefaultSearchProviderNewTabURL</ValueName>
+ <Value>https://search.my.company/newtab</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>DefaultSearchProviderSearchURL</ValueName>
+ <Value>https://search.my.company/search?q={searchTerms}</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>DefaultSearchProviderSearchURLPostParams</ValueName>
+ <Value>q={searchTerms},ie=utf-8,oe=utf-8</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>DefaultSearchProviderSuggestURL</ValueName>
+ <Value>https://search.my.company/suggest?q={searchTerms}</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>DefaultSearchProviderSuggestURLPostParams</ValueName>
+ <Value>q={searchTerms},ie=utf-8,oe=utf-8</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>DefaultSensorsSetting</ValueName>
+ <Value>2</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>DefaultSerialGuardSetting</ValueName>
+ <Value>2</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>DefaultWebBluetoothGuardSetting</ValueName>
+ <Value>2</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>DefaultWebUsbGuardSetting</ValueName>
+ <Value>2</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>DeveloperToolsAvailability</ValueName>
+ <Value>2</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>Disable3DAPIs</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>DisableAuthNegotiateCnameLookup</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>DisablePrintPreview</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>DisableSafeBrowsingProceedAnyway</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>DisableScreenshots</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>DiskCacheDir</ValueName>
+ <Value>${user_home}/Chrome_cache</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>DiskCacheSize</ValueName>
+ <Value>104857600</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>DnsOverHttpsMode</ValueName>
+ <Value>off</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>DnsOverHttpsTemplates</ValueName>
+ <Value>https://dns.example.net/dns-query{?dns}</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>DownloadDirectory</ValueName>
+ <Value>/home/${user_name}/Downloads</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>DownloadRestrictions</ValueName>
+ <Value>2</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>EditBookmarksEnabled</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>EnableAuthNegotiatePort</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>EnableDeprecatedPrivetPrinting</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>EnableMediaRouter</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>EnableOnlineRevocationChecks</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>EnterpriseHardwarePlatformAPIEnabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>ExtensionSettings</ValueName>
+ <Value>{&quot;*&quot;: {&quot;allowed_types&quot;: [&quot;hosted_app&quot;], &quot;blocked_install_message&quot;: &quot;Custom error message.&quot;, &quot;blocked_permissions&quot;: [&quot;downloads&quot;, &quot;bookmarks&quot;], &quot;install_sources&quot;: [&quot;https://company-intranet/chromeapps&quot;], &quot;installation_mode&quot;: &quot;blocked&quot;, &quot;runtime_allowed_hosts&quot;: [&quot;*://good.example.com&quot;], &quot;runtime_blocked_hosts&quot;: [&quot;*://*.example.com&quot;]}, &quot;abcdefghijklmnopabcdefghijklmnop&quot;: {&quot;blocked_permissions&quot;: [&quot;history&quot;], &quot;installation_mode&quot;: &quot;allowed&quot;, &quot;minimum_version_required&quot;: &quot;1.0.1&quot;, &quot;toolbar_pin&quot;: &quot;force_pinned&quot;}, &quot;bcdefghijklmnopabcdefghijklmnopa&quot;: {&quot;allowed_permissions&quot;: [&quot;downloads&quot;], &quot;installation_mode&quot;: &quot;force_installed&quot;, &quot;runtime_allowed_hosts&quot;: [&quot;*://good.example.com&quot;], &quot;runtime_blocked_hosts&quot;: [&quot;*://*.example.com&quot;], &quot;update_url&quot;: &quot;https://example.com/update_url&quot;}, &quot;cdefghijklmnopabcdefghijklmnopab&quot;: {&quot;blocked_install_message&quot;: &quot;Custom error message.&quot;, &quot;installation_mode&quot;: &quot;blocked&quot;}, &quot;defghijklmnopabcdefghijklmnopabc,efghijklmnopabcdefghijklmnopabcd&quot;: {&quot;blocked_install_message&quot;: &quot;Custom error message.&quot;, &quot;installation_mode&quot;: &quot;blocked&quot;}, &quot;fghijklmnopabcdefghijklmnopabcde&quot;: {&quot;blocked_install_message&quot;: &quot;Custom removal message.&quot;, &quot;installation_mode&quot;: &quot;removed&quot;}, &quot;ghijklmnopabcdefghijklmnopabcdef&quot;: {&quot;installation_mode&quot;: &quot;force_installed&quot;, &quot;override_update_url&quot;: true, &quot;update_url&quot;: &quot;https://example.com/update_url&quot;}, &quot;update_url:https://www.example.com/update.xml&quot;: {&quot;allowed_permissions&quot;: [&quot;downloads&quot;], &quot;blocked_permissions&quot;: [&quot;wallpaper&quot;], &quot;installation_mode&quot;: &quot;allowed&quot;}}</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>ExternalProtocolDialogShowAlwaysOpenCheckbox</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>FetchKeepaliveDurationSecondsOnShutdown</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>ForceEphemeralProfiles</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>ForceGoogleSafeSearch</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>ForceYouTubeRestrict</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>FullscreenAllowed</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>GloballyScopeHTTPAuthCacheEnabled</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>HardwareAccelerationModeEnabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>HeadlessMode</ValueName>
+ <Value>2</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>HideWebStoreIcon</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>HomepageIsNewTabPage</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>HomepageLocation</ValueName>
+ <Value>https://www.chromium.org</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>ImportAutofillFormData</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>ImportBookmarks</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>ImportHistory</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>ImportHomepage</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>ImportSavedPasswords</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>ImportSearchEngine</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>IncognitoModeAvailability</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>InsecureFormsWarningsEnabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>InsecurePrivateNetworkRequestsAllowed</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>IntensiveWakeUpThrottlingEnabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>IntranetRedirectBehavior</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>IsolateOrigins</ValueName>
+ <Value>https://example.com/,https://othersite.org/</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>ManagedBookmarks</ValueName>
+ <Value>[{&quot;toplevel_name&quot;: &quot;My managed bookmarks folder&quot;}, {&quot;name&quot;: &quot;Google&quot;, &quot;url&quot;: &quot;google.com&quot;}, {&quot;name&quot;: &quot;Youtube&quot;, &quot;url&quot;: &quot;youtube.com&quot;}, {&quot;children&quot;: [{&quot;name&quot;: &quot;Chromium&quot;, &quot;url&quot;: &quot;chromium.org&quot;}, {&quot;name&quot;: &quot;Chromium Developers&quot;, &quot;url&quot;: &quot;dev.chromium.org&quot;}], &quot;name&quot;: &quot;Chrome links&quot;}]</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>ManagedConfigurationPerOrigin</ValueName>
+ <Value>[{&quot;managed_configuration_hash&quot;: &quot;asd891jedasd12ue9h&quot;, &quot;managed_configuration_url&quot;: &quot;https://gstatic.google.com/configuration.json&quot;, &quot;origin&quot;: &quot;https://www.google.com&quot;}, {&quot;managed_configuration_hash&quot;: &quot;djio12easd89u12aws&quot;, &quot;managed_configuration_url&quot;: &quot;https://gstatic.google.com/configuration2.json&quot;, &quot;origin&quot;: &quot;https://www.example.com&quot;}]</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>MaxConnectionsPerProxy</ValueName>
+ <Value>32</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>MaxInvalidationFetchDelay</ValueName>
+ <Value>10000</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>MediaRecommendationsEnabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>MediaRouterCastAllowAllIPs</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>MetricsReportingEnabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>NTPCardsVisible</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>NTPCustomBackgroundEnabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>NativeMessagingUserLevelHosts</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>NetworkPredictionOptions</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>NewTabPageLocation</ValueName>
+ <Value>https://www.chromium.org</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>PasswordLeakDetectionEnabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>PasswordManagerEnabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>PasswordProtectionChangePasswordURL</ValueName>
+ <Value>https://mydomain.com/change_password.html</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>PasswordProtectionWarningTrigger</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>PaymentMethodQueryEnabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>PolicyAtomicGroupsEnabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>PolicyRefreshRate</ValueName>
+ <Value>3600000</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>PrintHeaderFooter</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>PrintPreviewUseSystemDefaultPrinter</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>PrintRasterizationMode</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>PrintingAllowedBackgroundGraphicsModes</ValueName>
+ <Value>enabled</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>PrintingBackgroundGraphicsDefault</ValueName>
+ <Value>enabled</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>PrintingEnabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>PrintingPaperSizeDefault</ValueName>
+ <Value>{&quot;custom_size&quot;: {&quot;height&quot;: 297000, &quot;width&quot;: 210000}, &quot;name&quot;: &quot;custom&quot;}</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>ProfilePickerOnStartupAvailability</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>PromotionalTabsEnabled</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>PromptForDownloadLocation</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>ProxySettings</ValueName>
+ <Value>{&quot;ProxyBypassList&quot;: &quot;https://www.example1.com,https://www.example2.com,https://internalsite/&quot;, &quot;ProxyMode&quot;: &quot;direct&quot;, &quot;ProxyPacUrl&quot;: &quot;https://internal.site/example.pac&quot;, &quot;ProxyServer&quot;: &quot;123.123.123.123:8080&quot;, &quot;ProxyServerMode&quot;: 2}</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>QuicAllowed</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>RelaunchNotification</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>RelaunchNotificationPeriod</ValueName>
+ <Value>604800000</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>RemoteAccessHostAllowClientPairing</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>RemoteAccessHostAllowFileTransfer</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>RemoteAccessHostAllowRelayedConnection</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>RemoteAccessHostAllowRemoteAccessConnections</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>RemoteAccessHostAllowUiAccessForRemoteAssistance</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>RemoteAccessHostFirewallTraversal</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>RemoteAccessHostMaximumSessionDurationMinutes</ValueName>
+ <Value>1200</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>RemoteAccessHostRequireCurtain</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>RemoteAccessHostUdpPortRange</ValueName>
+ <Value>12400-12409</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>RendererCodeIntegrityEnabled</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>RequireOnlineRevocationChecksForLocalAnchors</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>RestoreOnStartup</ValueName>
+ <Value>4</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>RestrictSigninToPattern</ValueName>
+ <Value>.*@example\.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>RoamingProfileLocation</ValueName>
+ <Value>${roaming_app_data}\chrome-profile</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>RoamingProfileSupportEnabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>SSLErrorOverrideAllowed</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>SSLVersionMin</ValueName>
+ <Value>tls1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>SafeBrowsingExtendedReportingEnabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>SafeBrowsingForTrustedSourcesEnabled</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>SafeBrowsingProtectionLevel</ValueName>
+ <Value>2</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>SafeSitesFilterBehavior</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>SavingBrowserHistoryDisabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>ScreenCaptureAllowed</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>ScrollToTextFragmentEnabled</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>SearchSuggestEnabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>SharedArrayBufferUnrestrictedAccessAllowed</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>SharedClipboardEnabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>ShowAppsShortcutInBookmarkBar</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>ShowCastIconInToolbar</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>ShowFullUrlsInAddressBar</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>ShowHomeButton</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>SignedHTTPExchangeEnabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>SigninInterceptionEnabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>SitePerProcess</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>SpellCheckServiceEnabled</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>SpellcheckEnabled</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>SuppressDifferentOriginSubframeDialogs</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>SuppressUnsupportedOSWarning</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>SyncDisabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>TargetBlankImpliesNoOpener</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>TaskManagerEndProcessEnabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>ThirdPartyBlockingEnabled</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>TotalMemoryLimitMb</ValueName>
+ <Value>2048</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>TranslateEnabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>TripleDESEnabled</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>UrlKeyedAnonymizedDataCollectionEnabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>UserAgentClientHintsEnabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>UserDataDir</ValueName>
+ <Value>${users}/${user_name}/Chrome</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>UserDataSnapshotRetentionLimit</ValueName>
+ <Value>3</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>UserFeedbackAllowed</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>VideoCaptureAllowed</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>WPADQuickCheckEnabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>WebAppInstallForceList</ValueName>
+ <Value>[{&quot;create_desktop_shortcut&quot;: true, &quot;default_launch_container&quot;: &quot;window&quot;, &quot;url&quot;: &quot;https://www.google.com/maps&quot;}, {&quot;default_launch_container&quot;: &quot;tab&quot;, &quot;url&quot;: &quot;https://docs.google.com&quot;}, {&quot;default_launch_container&quot;: &quot;window&quot;, &quot;fallback_app_name&quot;: &quot;Editor&quot;, &quot;url&quot;: &quot;https://docs.google.com/editor&quot;}]</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>WebRtcAllowLegacyTLSProtocols</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>WebRtcEventLogCollectionAllowed</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>WebRtcIPHandling</ValueName>
+ <Value>default</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>WebRtcUdpPortRange</ValueName>
+ <Value>10000-11999</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>WebUsbAllowDevicesForUrls</ValueName>
+ <Value>[{&quot;devices&quot;: [{&quot;product_id&quot;: 5678, &quot;vendor_id&quot;: 1234}], &quot;urls&quot;: [&quot;https://google.com&quot;]}]</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome</Key>
+ <ValueName>WindowOcclusionEnabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\AlternativeBrowserParameters</Key>
+ <ValueName>1</ValueName>
+ <Value>-foreground</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\AlternativeBrowserParameters</Key>
+ <ValueName>2</ValueName>
+ <Value>-new-window</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\AlternativeBrowserParameters</Key>
+ <ValueName>3</ValueName>
+ <Value>${url}</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\AlternativeBrowserParameters</Key>
+ <ValueName>4</ValueName>
+ <Value>-profile</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\AlternativeBrowserParameters</Key>
+ <ValueName>5</ValueName>
+ <Value>%HOME%\browser_profile</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\AudioCaptureAllowedUrls</Key>
+ <ValueName>1</ValueName>
+ <Value>https://www.example.com/</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\AudioCaptureAllowedUrls</Key>
+ <ValueName>2</ValueName>
+ <Value>https://[*.]example.edu/</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\AutoOpenAllowedForURLs</Key>
+ <ValueName>1</ValueName>
+ <Value>example.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\AutoOpenAllowedForURLs</Key>
+ <ValueName>2</ValueName>
+ <Value>https://ssl.server.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\AutoOpenAllowedForURLs</Key>
+ <ValueName>3</ValueName>
+ <Value>hosting.com/good_path</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\AutoOpenAllowedForURLs</Key>
+ <ValueName>4</ValueName>
+ <Value>https://server:8080/path</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\AutoOpenAllowedForURLs</Key>
+ <ValueName>5</ValueName>
+ <Value>.exact.hostname.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\AutoOpenFileTypes</Key>
+ <ValueName>1</ValueName>
+ <Value>exe</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\AutoOpenFileTypes</Key>
+ <ValueName>2</ValueName>
+ <Value>txt</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\AutoSelectCertificateForUrls</Key>
+ <ValueName>1</ValueName>
+ <Value>{&quot;pattern&quot;:&quot;https://www.example.com&quot;,&quot;filter&quot;:{&quot;ISSUER&quot;:{&quot;CN&quot;:&quot;certificate issuer name&quot;, &quot;L&quot;: &quot;certificate issuer location&quot;, &quot;O&quot;: &quot;certificate issuer org&quot;, &quot;OU&quot;: &quot;certificate issuer org unit&quot;}, &quot;SUBJECT&quot;:{&quot;CN&quot;:&quot;certificate subject name&quot;, &quot;L&quot;: &quot;certificate subject location&quot;, &quot;O&quot;: &quot;certificate subject org&quot;, &quot;OU&quot;: &quot;certificate subject org unit&quot;}}}</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\AutoplayAllowlist</Key>
+ <ValueName>1</ValueName>
+ <Value>https://www.example.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\AutoplayAllowlist</Key>
+ <ValueName>2</ValueName>
+ <Value>[*.]example.edu</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\BrowserSwitcherChromeParameters</Key>
+ <ValueName>1</ValueName>
+ <Value>--force-dark-mode</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\BrowserSwitcherUrlGreylist</Key>
+ <ValueName>1</ValueName>
+ <Value>ie.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\BrowserSwitcherUrlGreylist</Key>
+ <ValueName>2</ValueName>
+ <Value>!open-in-chrome.ie.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\BrowserSwitcherUrlGreylist</Key>
+ <ValueName>3</ValueName>
+ <Value>foobar.com/ie-only/</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\BrowserSwitcherUrlList</Key>
+ <ValueName>1</ValueName>
+ <Value>ie.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\BrowserSwitcherUrlList</Key>
+ <ValueName>2</ValueName>
+ <Value>!open-in-chrome.ie.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\BrowserSwitcherUrlList</Key>
+ <ValueName>3</ValueName>
+ <Value>foobar.com/ie-only/</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\CertificateTransparencyEnforcementDisabledForCas</Key>
+ <ValueName>1</ValueName>
+ <Value>sha256/AAAAAAAAAAAAAAAAAAAAAA==</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\CertificateTransparencyEnforcementDisabledForCas</Key>
+ <ValueName>2</ValueName>
+ <Value>sha256//////////////////////w==</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\CertificateTransparencyEnforcementDisabledForLegacyCas</Key>
+ <ValueName>1</ValueName>
+ <Value>sha256/AAAAAAAAAAAAAAAAAAAAAA==</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\CertificateTransparencyEnforcementDisabledForLegacyCas</Key>
+ <ValueName>2</ValueName>
+ <Value>sha256//////////////////////w==</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\CertificateTransparencyEnforcementDisabledForUrls</Key>
+ <ValueName>1</ValueName>
+ <Value>example.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\CertificateTransparencyEnforcementDisabledForUrls</Key>
+ <ValueName>2</ValueName>
+ <Value>.example.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\ClearBrowsingDataOnExitList</Key>
+ <ValueName>1</ValueName>
+ <Value>browsing_history</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\ClearBrowsingDataOnExitList</Key>
+ <ValueName>2</ValueName>
+ <Value>download_history</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\ClearBrowsingDataOnExitList</Key>
+ <ValueName>3</ValueName>
+ <Value>cookies_and_other_site_data</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\ClearBrowsingDataOnExitList</Key>
+ <ValueName>4</ValueName>
+ <Value>cached_images_and_files</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\ClearBrowsingDataOnExitList</Key>
+ <ValueName>5</ValueName>
+ <Value>password_signin</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\ClearBrowsingDataOnExitList</Key>
+ <ValueName>6</ValueName>
+ <Value>autofill</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\ClearBrowsingDataOnExitList</Key>
+ <ValueName>7</ValueName>
+ <Value>site_settings</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\ClearBrowsingDataOnExitList</Key>
+ <ValueName>8</ValueName>
+ <Value>hosted_app_data</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\CookiesAllowedForUrls</Key>
+ <ValueName>1</ValueName>
+ <Value>https://www.example.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\CookiesAllowedForUrls</Key>
+ <ValueName>2</ValueName>
+ <Value>[*.]example.edu</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\CookiesBlockedForUrls</Key>
+ <ValueName>1</ValueName>
+ <Value>https://www.example.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\CookiesBlockedForUrls</Key>
+ <ValueName>2</ValueName>
+ <Value>[*.]example.edu</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\CookiesSessionOnlyForUrls</Key>
+ <ValueName>1</ValueName>
+ <Value>https://www.example.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\CookiesSessionOnlyForUrls</Key>
+ <ValueName>2</ValueName>
+ <Value>[*.]example.edu</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\DefaultSearchProviderAlternateURLs</Key>
+ <ValueName>1</ValueName>
+ <Value>https://search.my.company/suggest#q={searchTerms}</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\DefaultSearchProviderAlternateURLs</Key>
+ <ValueName>2</ValueName>
+ <Value>https://search.my.company/suggest/search#q={searchTerms}</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\DefaultSearchProviderEncodings</Key>
+ <ValueName>1</ValueName>
+ <Value>UTF-8</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\DefaultSearchProviderEncodings</Key>
+ <ValueName>2</ValueName>
+ <Value>UTF-16</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\DefaultSearchProviderEncodings</Key>
+ <ValueName>3</ValueName>
+ <Value>GB2312</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\DefaultSearchProviderEncodings</Key>
+ <ValueName>4</ValueName>
+ <Value>ISO-8859-1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\EnableExperimentalPolicies</Key>
+ <ValueName>1</ValueName>
+ <Value>ExtensionInstallAllowlist</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\EnableExperimentalPolicies</Key>
+ <ValueName>2</ValueName>
+ <Value>ExtensionInstallBlocklist</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\ExplicitlyAllowedNetworkPorts</Key>
+ <ValueName>1</ValueName>
+ <Value>10080</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\ExtensionAllowedTypes</Key>
+ <ValueName>1</ValueName>
+ <Value>hosted_app</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\ExtensionInstallAllowlist</Key>
+ <ValueName>1</ValueName>
+ <Value>extension_id1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\ExtensionInstallAllowlist</Key>
+ <ValueName>2</ValueName>
+ <Value>extension_id2</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\ExtensionInstallBlocklist</Key>
+ <ValueName>1</ValueName>
+ <Value>extension_id1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\ExtensionInstallBlocklist</Key>
+ <ValueName>2</ValueName>
+ <Value>extension_id2</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\ExtensionInstallForcelist</Key>
+ <ValueName>1</ValueName>
+ <Value>aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa;https://clients2.google.com/service/update2/crx</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\ExtensionInstallForcelist</Key>
+ <ValueName>2</ValueName>
+ <Value>abcdefghijklmnopabcdefghijklmnop</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\ExtensionInstallSources</Key>
+ <ValueName>1</ValueName>
+ <Value>https://corp.mycompany.com/*</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\FileHandlingAllowedForUrls</Key>
+ <ValueName>1</ValueName>
+ <Value>https://www.example.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\FileHandlingAllowedForUrls</Key>
+ <ValueName>2</ValueName>
+ <Value>[*.]example.edu</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\FileHandlingBlockedForUrls</Key>
+ <ValueName>1</ValueName>
+ <Value>https://www.example.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\FileHandlingBlockedForUrls</Key>
+ <ValueName>2</ValueName>
+ <Value>[*.]example.edu</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\FileSystemReadAskForUrls</Key>
+ <ValueName>1</ValueName>
+ <Value>https://www.example.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\FileSystemReadAskForUrls</Key>
+ <ValueName>2</ValueName>
+ <Value>[*.]example.edu</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\FileSystemReadBlockedForUrls</Key>
+ <ValueName>1</ValueName>
+ <Value>https://www.example.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\FileSystemReadBlockedForUrls</Key>
+ <ValueName>2</ValueName>
+ <Value>[*.]example.edu</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\FileSystemWriteAskForUrls</Key>
+ <ValueName>1</ValueName>
+ <Value>https://www.example.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\FileSystemWriteAskForUrls</Key>
+ <ValueName>2</ValueName>
+ <Value>[*.]example.edu</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\FileSystemWriteBlockedForUrls</Key>
+ <ValueName>1</ValueName>
+ <Value>https://www.example.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\FileSystemWriteBlockedForUrls</Key>
+ <ValueName>2</ValueName>
+ <Value>[*.]example.edu</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\ForcedLanguages</Key>
+ <ValueName>1</ValueName>
+ <Value>en-US</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\HSTSPolicyBypassList</Key>
+ <ValueName>1</ValueName>
+ <Value>meet</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\ImagesAllowedForUrls</Key>
+ <ValueName>1</ValueName>
+ <Value>https://www.example.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\ImagesAllowedForUrls</Key>
+ <ValueName>2</ValueName>
+ <Value>[*.]example.edu</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\ImagesBlockedForUrls</Key>
+ <ValueName>1</ValueName>
+ <Value>https://www.example.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\ImagesBlockedForUrls</Key>
+ <ValueName>2</ValueName>
+ <Value>[*.]example.edu</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\InsecureContentAllowedForUrls</Key>
+ <ValueName>1</ValueName>
+ <Value>https://www.example.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\InsecureContentAllowedForUrls</Key>
+ <ValueName>2</ValueName>
+ <Value>[*.]example.edu</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\InsecureContentBlockedForUrls</Key>
+ <ValueName>1</ValueName>
+ <Value>https://www.example.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\InsecureContentBlockedForUrls</Key>
+ <ValueName>2</ValueName>
+ <Value>[*.]example.edu</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\InsecurePrivateNetworkRequestsAllowedForUrls</Key>
+ <ValueName>1</ValueName>
+ <Value>http://www.example.com:8080</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\InsecurePrivateNetworkRequestsAllowedForUrls</Key>
+ <ValueName>2</ValueName>
+ <Value>[*.]example.edu</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\JavaScriptAllowedForUrls</Key>
+ <ValueName>1</ValueName>
+ <Value>https://www.example.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\JavaScriptAllowedForUrls</Key>
+ <ValueName>2</ValueName>
+ <Value>[*.]example.edu</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\JavaScriptBlockedForUrls</Key>
+ <ValueName>1</ValueName>
+ <Value>https://www.example.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\JavaScriptBlockedForUrls</Key>
+ <ValueName>2</ValueName>
+ <Value>[*.]example.edu</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\LegacySameSiteCookieBehaviorEnabledForDomainList</Key>
+ <ValueName>1</ValueName>
+ <Value>www.example.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\LegacySameSiteCookieBehaviorEnabledForDomainList</Key>
+ <ValueName>2</ValueName>
+ <Value>[*.]example.edu</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\LookalikeWarningAllowlistDomains</Key>
+ <ValueName>1</ValueName>
+ <Value>foo.example.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\LookalikeWarningAllowlistDomains</Key>
+ <ValueName>2</ValueName>
+ <Value>example.org</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\NativeMessagingAllowlist</Key>
+ <ValueName>1</ValueName>
+ <Value>com.native.messaging.host.name1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\NativeMessagingAllowlist</Key>
+ <ValueName>2</ValueName>
+ <Value>com.native.messaging.host.name2</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\NativeMessagingBlocklist</Key>
+ <ValueName>1</ValueName>
+ <Value>com.native.messaging.host.name1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\NativeMessagingBlocklist</Key>
+ <ValueName>2</ValueName>
+ <Value>com.native.messaging.host.name2</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\NotificationsAllowedForUrls</Key>
+ <ValueName>1</ValueName>
+ <Value>https://www.example.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\NotificationsAllowedForUrls</Key>
+ <ValueName>2</ValueName>
+ <Value>[*.]example.edu</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\NotificationsBlockedForUrls</Key>
+ <ValueName>1</ValueName>
+ <Value>https://www.example.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\NotificationsBlockedForUrls</Key>
+ <ValueName>2</ValueName>
+ <Value>[*.]example.edu</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\OverrideSecurityRestrictionsOnInsecureOrigin</Key>
+ <ValueName>1</ValueName>
+ <Value>http://testserver.example.com/</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\OverrideSecurityRestrictionsOnInsecureOrigin</Key>
+ <ValueName>2</ValueName>
+ <Value>*.example.org</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\PasswordProtectionLoginURLs</Key>
+ <ValueName>1</ValueName>
+ <Value>https://mydomain.com/login.html</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\PasswordProtectionLoginURLs</Key>
+ <ValueName>2</ValueName>
+ <Value>https://login.mydomain.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\PolicyDictionaryMultipleSourceMergeList</Key>
+ <ValueName>1</ValueName>
+ <Value>ExtensionSettings</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\PolicyListMultipleSourceMergeList</Key>
+ <ValueName>1</ValueName>
+ <Value>ExtensionInstallAllowlist</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\PolicyListMultipleSourceMergeList</Key>
+ <ValueName>2</ValueName>
+ <Value>ExtensionInstallBlocklist</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\PopupsAllowedForUrls</Key>
+ <ValueName>1</ValueName>
+ <Value>https://www.example.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\PopupsAllowedForUrls</Key>
+ <ValueName>2</ValueName>
+ <Value>[*.]example.edu</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\PopupsBlockedForUrls</Key>
+ <ValueName>1</ValueName>
+ <Value>https://www.example.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\PopupsBlockedForUrls</Key>
+ <ValueName>2</ValueName>
+ <Value>[*.]example.edu</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\PrinterTypeDenyList</Key>
+ <ValueName>1</ValueName>
+ <Value>cloud</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\PrinterTypeDenyList</Key>
+ <ValueName>2</ValueName>
+ <Value>privet</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\RemoteAccessHostClientDomainList</Key>
+ <ValueName>1</ValueName>
+ <Value>my-awesome-domain.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\RemoteAccessHostClientDomainList</Key>
+ <ValueName>2</ValueName>
+ <Value>my-auxiliary-domain.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\RemoteAccessHostDomainList</Key>
+ <ValueName>1</ValueName>
+ <Value>my-awesome-domain.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\RemoteAccessHostDomainList</Key>
+ <ValueName>2</ValueName>
+ <Value>my-auxiliary-domain.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\RestoreOnStartupURLs</Key>
+ <ValueName>1</ValueName>
+ <Value>https://example.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\RestoreOnStartupURLs</Key>
+ <ValueName>2</ValueName>
+ <Value>https://www.chromium.org</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\SSLErrorOverrideAllowedForOrigins</Key>
+ <ValueName>1</ValueName>
+ <Value>https://www.example.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\SSLErrorOverrideAllowedForOrigins</Key>
+ <ValueName>2</ValueName>
+ <Value>[*.]example.edu</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\SafeBrowsingAllowlistDomains</Key>
+ <ValueName>1</ValueName>
+ <Value>mydomain.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\SafeBrowsingAllowlistDomains</Key>
+ <ValueName>2</ValueName>
+ <Value>myuniversity.edu</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\SecurityKeyPermitAttestation</Key>
+ <ValueName>1</ValueName>
+ <Value>https://example.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\SensorsAllowedForUrls</Key>
+ <ValueName>1</ValueName>
+ <Value>https://www.example.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\SensorsAllowedForUrls</Key>
+ <ValueName>2</ValueName>
+ <Value>[*.]example.edu</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\SensorsBlockedForUrls</Key>
+ <ValueName>1</ValueName>
+ <Value>https://www.example.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\SensorsBlockedForUrls</Key>
+ <ValueName>2</ValueName>
+ <Value>[*.]example.edu</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\SerialAskForUrls</Key>
+ <ValueName>1</ValueName>
+ <Value>https://www.example.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\SerialAskForUrls</Key>
+ <ValueName>2</ValueName>
+ <Value>[*.]example.edu</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\SerialBlockedForUrls</Key>
+ <ValueName>1</ValueName>
+ <Value>https://www.example.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\SerialBlockedForUrls</Key>
+ <ValueName>2</ValueName>
+ <Value>[*.]example.edu</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\SpellcheckLanguage</Key>
+ <ValueName>1</ValueName>
+ <Value>fr</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\SpellcheckLanguage</Key>
+ <ValueName>2</ValueName>
+ <Value>es</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\SpellcheckLanguageBlocklist</Key>
+ <ValueName>1</ValueName>
+ <Value>fr</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\SpellcheckLanguageBlocklist</Key>
+ <ValueName>2</ValueName>
+ <Value>es</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\SyncTypesListDisabled</Key>
+ <ValueName>1</ValueName>
+ <Value>bookmarks</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\URLAllowlist</Key>
+ <ValueName>1</ValueName>
+ <Value>example.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\URLAllowlist</Key>
+ <ValueName>2</ValueName>
+ <Value>https://ssl.server.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\URLAllowlist</Key>
+ <ValueName>3</ValueName>
+ <Value>hosting.com/good_path</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\URLAllowlist</Key>
+ <ValueName>4</ValueName>
+ <Value>https://server:8080/path</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\URLAllowlist</Key>
+ <ValueName>5</ValueName>
+ <Value>.exact.hostname.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\URLBlocklist</Key>
+ <ValueName>1</ValueName>
+ <Value>example.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\URLBlocklist</Key>
+ <ValueName>2</ValueName>
+ <Value>https://ssl.server.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\URLBlocklist</Key>
+ <ValueName>3</ValueName>
+ <Value>hosting.com/bad_path</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\URLBlocklist</Key>
+ <ValueName>4</ValueName>
+ <Value>https://server:8080/path</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\URLBlocklist</Key>
+ <ValueName>5</ValueName>
+ <Value>.exact.hostname.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\URLBlocklist</Key>
+ <ValueName>6</ValueName>
+ <Value>file://*</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\URLBlocklist</Key>
+ <ValueName>7</ValueName>
+ <Value>custom_scheme:*</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\URLBlocklist</Key>
+ <ValueName>8</ValueName>
+ <Value>*</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\VideoCaptureAllowedUrls</Key>
+ <ValueName>1</ValueName>
+ <Value>https://www.example.com/</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\VideoCaptureAllowedUrls</Key>
+ <ValueName>2</ValueName>
+ <Value>https://[*.]example.edu/</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\WebRtcLocalIpsAllowedUrls</Key>
+ <ValueName>1</ValueName>
+ <Value>https://www.example.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\WebRtcLocalIpsAllowedUrls</Key>
+ <ValueName>2</ValueName>
+ <Value>*example.com*</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\WebUsbAskForUrls</Key>
+ <ValueName>1</ValueName>
+ <Value>https://www.example.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\WebUsbAskForUrls</Key>
+ <ValueName>2</ValueName>
+ <Value>[*.]example.edu</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\WebUsbBlockedForUrls</Key>
+ <ValueName>1</ValueName>
+ <Value>https://www.example.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\WebUsbBlockedForUrls</Key>
+ <ValueName>2</ValueName>
+ <Value>[*.]example.edu</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome\Recommended</Key>
+ <ValueName>AlternateErrorPagesEnabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\Recommended</Key>
+ <ValueName>ApplicationLocaleValue</ValueName>
+ <Value>en</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome\Recommended</Key>
+ <ValueName>AutofillAddressEnabled</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome\Recommended</Key>
+ <ValueName>AutofillCreditCardEnabled</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome\Recommended</Key>
+ <ValueName>BackgroundModeEnabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome\Recommended</Key>
+ <ValueName>BlockThirdPartyCookies</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome\Recommended</Key>
+ <ValueName>BookmarkBarEnabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\Recommended</Key>
+ <ValueName>DefaultDownloadDirectory</ValueName>
+ <Value>/home/${user_name}/Downloads</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\Recommended</Key>
+ <ValueName>DownloadDirectory</ValueName>
+ <Value>/home/${user_name}/Downloads</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome\Recommended</Key>
+ <ValueName>DownloadRestrictions</ValueName>
+ <Value>2</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome\Recommended</Key>
+ <ValueName>HomepageIsNewTabPage</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\Recommended</Key>
+ <ValueName>HomepageLocation</ValueName>
+ <Value>https://www.chromium.org</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome\Recommended</Key>
+ <ValueName>ImportAutofillFormData</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome\Recommended</Key>
+ <ValueName>ImportBookmarks</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome\Recommended</Key>
+ <ValueName>ImportHistory</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome\Recommended</Key>
+ <ValueName>ImportSavedPasswords</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome\Recommended</Key>
+ <ValueName>ImportSearchEngine</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome\Recommended</Key>
+ <ValueName>MetricsReportingEnabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome\Recommended</Key>
+ <ValueName>NetworkPredictionOptions</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome\Recommended</Key>
+ <ValueName>PasswordLeakDetectionEnabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome\Recommended</Key>
+ <ValueName>PasswordManagerEnabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome\Recommended</Key>
+ <ValueName>PrintHeaderFooter</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome\Recommended</Key>
+ <ValueName>PrintPreviewUseSystemDefaultPrinter</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\Recommended</Key>
+ <ValueName>RegisteredProtocolHandlers</ValueName>
+ <Value>[{&quot;default&quot;: true, &quot;protocol&quot;: &quot;mailto&quot;, &quot;url&quot;: &quot;https://mail.google.com/mail/?extsrc=mailto&amp;url=%s&quot;}]</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome\Recommended</Key>
+ <ValueName>RestoreOnStartup</ValueName>
+ <Value>4</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome\Recommended</Key>
+ <ValueName>SafeBrowsingForTrustedSourcesEnabled</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome\Recommended</Key>
+ <ValueName>SafeBrowsingProtectionLevel</ValueName>
+ <Value>2</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome\Recommended</Key>
+ <ValueName>SearchSuggestEnabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome\Recommended</Key>
+ <ValueName>ShowFullUrlsInAddressBar</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome\Recommended</Key>
+ <ValueName>ShowHomeButton</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome\Recommended</Key>
+ <ValueName>SpellCheckServiceEnabled</ValueName>
+ <Value>0</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Google\Chrome\Recommended</Key>
+ <ValueName>TranslateEnabled</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\Recommended\RestoreOnStartupURLs</Key>
+ <ValueName>1</ValueName>
+ <Value>https://example.com</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Google\Chrome\Recommended\RestoreOnStartupURLs</Key>
+ <ValueName>2</ValueName>
+ <Value>https://www.chromium.org</Value>
+ </Entry>
+</PolFile>
+"""
+
+chromium_json_expected_managed = \
+b"""
+{
+ "FileSystemWriteAskForUrls": [
+ "https://www.example.com",
+ "[*.]example.edu"
+ ],
+ "InsecureContentBlockedForUrls": [
+ "https://www.example.com",
+ "[*.]example.edu"
+ ],
+ "DefaultSearchProviderImageURLPostParams": "content={imageThumbnail},url={imageURL},sbisrc={SearchSource}",
+ "BrowserAddPersonEnabled": true,
+ "DefaultSearchProviderImageURL": "https://search.my.company/searchbyimage/upload",
+ "ShowHomeButton": true,
+ "ClearBrowsingDataOnExitList": [
+ "browsing_history",
+ "download_history",
+ "cookies_and_other_site_data",
+ "cached_images_and_files",
+ "password_signin",
+ "autofill",
+ "site_settings",
+ "hosted_app_data"
+ ],
+ "JavaScriptAllowedForUrls": [
+ "https://www.example.com",
+ "[*.]example.edu"
+ ],
+ "AmbientAuthenticationInPrivateModesEnabled": 0,
+ "AllowFileSelectionDialogs": true,
+ "PrintingAllowedBackgroundGraphicsModes": "enabled",
+ "DnsOverHttpsTemplates": "https://dns.example.net/dns-query{?dns}",
+ "ComponentUpdatesEnabled": true,
+ "RemoteAccessHostAllowRemoteAccessConnections": false,
+ "WindowOcclusionEnabled": true,
+ "PrintPreviewUseSystemDefaultPrinter": false,
+ "AutoLaunchProtocolsFromOrigins": [
+ {
+ "allowed_origins": [
+ "example.com",
+ "http://www.example.com:8080"
+ ],
+ "protocol": "spotify"
+ },
+ {
+ "allowed_origins": [
+ "https://example.com",
+ "https://.mail.example.com"
+ ],
+ "protocol": "teams"
+ },
+ {
+ "allowed_origins": [
+ "*"
+ ],
+ "protocol": "outlook"
+ }
+ ],
+ "ManagedConfigurationPerOrigin": [
+ {
+ "origin": "https://www.google.com",
+ "managed_configuration_hash": "asd891jedasd12ue9h",
+ "managed_configuration_url": "https://gstatic.google.com/configuration.json"
+ },
+ {
+ "origin": "https://www.example.com",
+ "managed_configuration_hash": "djio12easd89u12aws",
+ "managed_configuration_url": "https://gstatic.google.com/configuration2.json"
+ }
+ ],
+ "SyncTypesListDisabled": [
+ "bookmarks"
+ ],
+ "SecurityKeyPermitAttestation": [
+ "https://example.com"
+ ],
+ "DefaultSearchProviderSearchURL": "https://search.my.company/search?q={searchTerms}",
+ "MetricsReportingEnabled": true,
+ "MaxInvalidationFetchDelay": 10000,
+ "AudioProcessHighPriorityEnabled": true,
+ "ExtensionInstallForcelist": [
+ "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa;https://clients2.google.com/service/update2/crx",
+ "abcdefghijklmnopabcdefghijklmnop"
+ ],
+ "ExternalProtocolDialogShowAlwaysOpenCheckbox": true,
+ "CookiesBlockedForUrls": [
+ "https://www.example.com",
+ "[*.]example.edu"
+ ],
+ "BrowserSwitcherExternalSitelistUrl": "http://example.com/sitelist.xml",
+ "AudioCaptureAllowedUrls": [
+ "https://www.example.com/",
+ "https://[*.]example.edu/"
+ ],
+ "NTPCustomBackgroundEnabled": true,
+ "BlockExternalExtensions": true,
+ "BrowserSwitcherChromeParameters": [
+ "--force-dark-mode"
+ ],
+ "SafeSitesFilterBehavior": 0,
+ "EnableOnlineRevocationChecks": false,
+ "ImagesBlockedForUrls": [
+ "https://www.example.com",
+ "[*.]example.edu"
+ ],
+ "InsecureFormsWarningsEnabled": true,
+ "RelaunchNotificationPeriod": 604800000,
+ "TotalMemoryLimitMb": 2048,
+ "CloudManagementEnrollmentMandatory": true,
+ "ClickToCallEnabled": true,
+ "AppCacheForceEnabled": false,
+ "UrlKeyedAnonymizedDataCollectionEnabled": true,
+ "FullscreenAllowed": true,
+ "AuthSchemes": "basic,digest,ntlm,negotiate",
+ "PasswordLeakDetectionEnabled": true,
+ "AuthServerAllowlist": "*.example.com,example.com",
+ "AllowSyncXHRInPageDismissal": false,
+ "PasswordProtectionChangePasswordURL": "https://mydomain.com/change_password.html",
+ "MaxConnectionsPerProxy": 32,
+ "RemoteAccessHostMaximumSessionDurationMinutes": 1200,
+ "RemoteAccessHostAllowFileTransfer": false,
+ "PrintRasterizationMode": 1,
+ "CertificateTransparencyEnforcementDisabledForLegacyCas": [
+ "sha256/AAAAAAAAAAAAAAAAAAAAAA==",
+ "sha256//////////////////////w=="
+ ],
+ "DefaultWebBluetoothGuardSetting": 2,
+ "AutoplayAllowed": true,
+ "BrowserSwitcherUrlList": [
+ "ie.com",
+ "!open-in-chrome.ie.com",
+ "foobar.com/ie-only/"
+ ],
+ "CertificateTransparencyEnforcementDisabledForUrls": [
+ "example.com",
+ ".example.com"
+ ],
+ "SpellcheckLanguageBlocklist": [
+ "fr",
+ "es"
+ ],
+ "PrintHeaderFooter": false,
+ "ShowAppsShortcutInBookmarkBar": false,
+ "SerialAskForUrls": [
+ "https://www.example.com",
+ "[*.]example.edu"
+ ],
+ "ImagesAllowedForUrls": [
+ "https://www.example.com",
+ "[*.]example.edu"
+ ],
+ "ProfilePickerOnStartupAvailability": 0,
+ "CommandLineFlagSecurityWarningsEnabled": true,
+ "QuicAllowed": true,
+ "IntensiveWakeUpThrottlingEnabled": true,
+ "WPADQuickCheckEnabled": true,
+ "SensorsAllowedForUrls": [
+ "https://www.example.com",
+ "[*.]example.edu"
+ ],
+ "NTPCardsVisible": true,
+ "DefaultSearchProviderAlternateURLs": [
+ "https://search.my.company/suggest#q={searchTerms}",
+ "https://search.my.company/suggest/search#q={searchTerms}"
+ ],
+ "DisableSafeBrowsingProceedAnyway": true,
+ "DefaultFileSystemWriteGuardSetting": 2,
+ "DefaultSearchProviderSuggestURL": "https://search.my.company/suggest?q={searchTerms}",
+ "SSLErrorOverrideAllowed": true,
+ "CloudPrintProxyEnabled": true,
+ "BrowserSwitcherUrlGreylist": [
+ "ie.com",
+ "!open-in-chrome.ie.com",
+ "foobar.com/ie-only/"
+ ],
+ "BrowserNetworkTimeQueriesEnabled": true,
+ "WebUsbAllowDevicesForUrls": [
+ {
+ "urls": [
+ "https://google.com"
+ ],
+ "devices": [
+ {
+ "vendor_id": 1234,
+ "product_id": 5678
+ }
+ ]
+ }
+ ],
+ "TaskManagerEndProcessEnabled": true,
+ "SuppressDifferentOriginSubframeDialogs": true,
+ "UserDataDir": "${users}/${user_name}/Chrome",
+ "CookiesAllowedForUrls": [
+ "https://www.example.com",
+ "[*.]example.edu"
+ ],
+ "SuppressUnsupportedOSWarning": true,
+ "RequireOnlineRevocationChecksForLocalAnchors": false,
+ "BrowsingDataLifetime": [
+ {
+ "data_types": [
+ "browsing_history"
+ ],
+ "time_to_live_in_hours": 24
+ },
+ {
+ "data_types": [
+ "password_signin",
+ "autofill"
+ ],
+ "time_to_live_in_hours": 12
+ }
+ ],
+ "FileHandlingBlockedForUrls": [
+ "https://www.example.com",
+ "[*.]example.edu"
+ ],
+ "AudioCaptureAllowed": false,
+ "PromotionalTabsEnabled": false,
+ "ShowFullUrlsInAddressBar": false,
+ "EnableMediaRouter": true,
+ "BrowserSwitcherDelay": 10000,
+ "AllowDinosaurEasterEgg": false,
+ "ImportSearchEngine": true,
+ "PrintingBackgroundGraphicsDefault": "enabled",
+ "TripleDESEnabled": false,
+ "AutoplayAllowlist": [
+ "https://www.example.com",
+ "[*.]example.edu"
+ ],
+ "RemoteAccessHostUdpPortRange": "12400-12409",
+ "DefaultSearchProviderIconURL": "https://search.my.company/favicon.ico",
+ "BrowserSwitcherChromePath": "${chrome}",
+ "InsecureContentAllowedForUrls": [
+ "https://www.example.com",
+ "[*.]example.edu"
+ ],
+ "DefaultSearchProviderSearchURLPostParams": "q={searchTerms},ie=utf-8,oe=utf-8",
+ "ForceGoogleSafeSearch": false,
+ "UserFeedbackAllowed": true,
+ "ForceYouTubeRestrict": 0,
+ "ApplicationLocaleValue": "en",
+ "RoamingProfileSupportEnabled": true,
+ "AlternativeBrowserPath": "${ie}",
+ "AlternativeBrowserParameters": [
+ "-foreground",
+ "-new-window",
+ "${url}",
+ "-profile",
+ "%HOME%\\\\browser_profile"
+ ],
+ "AdvancedProtectionAllowed": true,
+ "EditBookmarksEnabled": false,
+ "DefaultPrinterSelection": "{ \\"kind\\": \\"cloud\\", \\"idPattern\\": \\".*public\\", \\"namePattern\\": \\".*Color\\" }",
+ "SSLVersionMin": "tls1",
+ "SharedArrayBufferUnrestrictedAccessAllowed": true,
+ "DefaultSerialGuardSetting": 2,
+ "DefaultPopupsSetting": 1,
+ "IntranetRedirectBehavior": 1,
+ "RendererCodeIntegrityEnabled": false,
+ "BrowserGuestModeEnforced": true,
+ "HSTSPolicyBypassList": [
+ "meet"
+ ],
+ "DefaultWebUsbGuardSetting": 2,
+ "CECPQ2Enabled": true,
+ "RemoteAccessHostDomainList": [
+ "my-awesome-domain.com",
+ "my-auxiliary-domain.com"
+ ],
+ "URLBlocklist": [
+ "example.com",
+ "https://ssl.server.com",
+ "hosting.com/bad_path",
+ "https://server:8080/path",
+ ".exact.hostname.com",
+ "file://*",
+ "custom_scheme:*",
+ "*"
+ ],
+ "IsolateOrigins": "https://example.com/,https://othersite.org/",
+ "ExtensionAllowedTypes": [
+ "hosted_app"
+ ],
+ "NativeMessagingBlocklist": [
+ "com.native.messaging.host.name1",
+ "com.native.messaging.host.name2"
+ ],
+ "ExtensionSettings": {
+ "abcdefghijklmnopabcdefghijklmnop": {
+ "blocked_permissions": [
+ "history"
+ ],
+ "minimum_version_required": "1.0.1",
+ "toolbar_pin": "force_pinned",
+ "installation_mode": "allowed"
+ },
+ "bcdefghijklmnopabcdefghijklmnopa": {
+ "runtime_blocked_hosts": [
+ "*://*.example.com"
+ ],
+ "allowed_permissions": [
+ "downloads"
+ ],
+ "update_url": "https://example.com/update_url",
+ "runtime_allowed_hosts": [
+ "*://good.example.com"
+ ],
+ "installation_mode": "force_installed"
+ },
+ "update_url:https://www.example.com/update.xml": {
+ "allowed_permissions": [
+ "downloads"
+ ],
+ "blocked_permissions": [
+ "wallpaper"
+ ],
+ "installation_mode": "allowed"
+ },
+ "cdefghijklmnopabcdefghijklmnopab": {
+ "blocked_install_message": "Custom error message.",
+ "installation_mode": "blocked"
+ },
+ "*": {
+ "blocked_permissions": [
+ "downloads",
+ "bookmarks"
+ ],
+ "installation_mode": "blocked",
+ "runtime_blocked_hosts": [
+ "*://*.example.com"
+ ],
+ "blocked_install_message": "Custom error message.",
+ "allowed_types": [
+ "hosted_app"
+ ],
+ "runtime_allowed_hosts": [
+ "*://good.example.com"
+ ],
+ "install_sources": [
+ "https://company-intranet/chromeapps"
+ ]
+ },
+ "defghijklmnopabcdefghijklmnopabc,efghijklmnopabcdefghijklmnopabcd": {
+ "blocked_install_message": "Custom error message.",
+ "installation_mode": "blocked"
+ },
+ "fghijklmnopabcdefghijklmnopabcde": {
+ "blocked_install_message": "Custom removal message.",
+ "installation_mode": "removed"
+ },
+ "ghijklmnopabcdefghijklmnopabcdef": {
+ "update_url": "https://example.com/update_url",
+ "override_update_url": true,
+ "installation_mode": "force_installed"
+ }
+ },
+ "FileSystemReadAskForUrls": [
+ "https://www.example.com",
+ "[*.]example.edu"
+ ],
+ "SpellCheckServiceEnabled": false,
+ "ExtensionInstallSources": [
+ "https://corp.mycompany.com/*"
+ ],
+ "PrinterTypeDenyList": [
+ "cloud",
+ "privet"
+ ],
+ "SharedClipboardEnabled": true,
+ "BlockThirdPartyCookies": false,
+ "MediaRouterCastAllowAllIPs": false,
+ "DnsOverHttpsMode": "off",
+ "SyncDisabled": true,
+ "LookalikeWarningAllowlistDomains": [
+ "foo.example.com",
+ "example.org"
+ ],
+ "UserDataSnapshotRetentionLimit": 3,
+ "SafeBrowsingProtectionLevel": 2,
+ "ScrollToTextFragmentEnabled": false,
+ "ImportBookmarks": true,
+ "DefaultBrowserSettingEnabled": true,
+ "DefaultSearchProviderEnabled": true,
+ "AdditionalDnsQueryTypesEnabled": true,
+ "PolicyRefreshRate": 3600000,
+ "PrintingPaperSizeDefault": {
+ "custom_size": {
+ "width": 210000,
+ "height": 297000
+ },
+ "name": "custom"
+ },
+ "RestoreOnStartup": 4,
+ "PasswordProtectionWarningTrigger": 1,
+ "ChromeCleanupEnabled": true,
+ "AbusiveExperienceInterventionEnforce": true,
+ "BasicAuthOverHttpEnabled": false,
+ "EnableAuthNegotiatePort": false,
+ "DefaultGeolocationSetting": 1,
+ "PolicyDictionaryMultipleSourceMergeList": [
+ "ExtensionSettings"
+ ],
+ "AllowedDomainsForApps": "managedchrome.com,example.com",
+ "DisableAuthNegotiateCnameLookup": false,
+ "IncognitoModeAvailability": 1,
+ "ChromeVariations": 1,
+ "DefaultSearchProviderNewTabURL": "https://search.my.company/newtab",
+ "SavingBrowserHistoryDisabled": true,
+ "SpellcheckEnabled": false,
+ "FileSystemWriteBlockedForUrls": [
+ "https://www.example.com",
+ "[*.]example.edu"
+ ],
+ "BuiltInDnsClientEnabled": true,
+ "SSLErrorOverrideAllowedForOrigins": [
+ "https://www.example.com",
+ "[*.]example.edu"
+ ],
+ "WebRtcIPHandling": "default",
+ "DefaultNotificationsSetting": 2,
+ "PopupsAllowedForUrls": [
+ "https://www.example.com",
+ "[*.]example.edu"
+ ],
+ "TranslateEnabled": true,
+ "DefaultSearchProviderEncodings": [
+ "UTF-8",
+ "UTF-16",
+ "GB2312",
+ "ISO-8859-1"
+ ],
+ "DownloadRestrictions": 2,
+ "PromptForDownloadLocation": false,
+ "DisablePrintPreview": false,
+ "NetworkPredictionOptions": 1,
+ "FileSystemReadBlockedForUrls": [
+ "https://www.example.com",
+ "[*.]example.edu"
+ ],
+ "AutoOpenFileTypes": [
+ "exe",
+ "txt"
+ ],
+ "DownloadDirectory": "/home/${user_name}/Downloads",
+ "ImportHomepage": true,
+ "GloballyScopeHTTPAuthCacheEnabled": false,
+ "CloudManagementEnrollmentToken": "37185d02-e055-11e7-80c1-9a214cf093ae",
+ "ThirdPartyBlockingEnabled": false,
+ "AdsSettingForIntrusiveAdsSites": 1,
+ "FetchKeepaliveDurationSecondsOnShutdown": 1,
+ "BookmarkBarEnabled": true,
+ "DisableScreenshots": true,
+ "AccessibilityImageLabelsEnabled": false,
+ "RemoteAccessHostAllowUiAccessForRemoteAssistance": true,
+ "PopupsBlockedForUrls": [
+ "https://www.example.com",
+ "[*.]example.edu"
+ ],
+ "DefaultFileSystemReadGuardSetting": 2,
+ "BrowserSignin": 2,
+ "WebRtcAllowLegacyTLSProtocols": false,
+ "PasswordManagerEnabled": true,
+ "SafeBrowsingExtendedReportingEnabled": true,
+ "CloudPolicyOverridesPlatformPolicy": false,
+ "InsecurePrivateNetworkRequestsAllowedForUrls": [
+ "http://www.example.com:8080",
+ "[*.]example.edu"
+ ],
+ "RelaunchNotification": 1,
+ "AlwaysOpenPdfExternally": true,
+ "DefaultFileHandlingGuardSetting": 2,
+ "ForceEphemeralProfiles": true,
+ "PasswordProtectionLoginURLs": [
+ "https://mydomain.com/login.html",
+ "https://login.mydomain.com"
+ ],
+ "BrowserSwitcherExternalGreylistUrl": "http://example.com/greylist.xml",
+ "BrowserGuestModeEnabled": true,
+ "MediaRecommendationsEnabled": true,
+ "WebRtcLocalIpsAllowedUrls": [
+ "https://www.example.com",
+ "*example.com*"
+ ],
+ "DeveloperToolsAvailability": 2,
+ "DNSInterceptionChecksEnabled": true,
+ "DefaultSearchProviderContextMenuAccessAllowed": true,
+ "RemoteAccessHostRequireCurtain": false,
+ "PaymentMethodQueryEnabled": true,
+ "HomepageLocation": "https://www.chromium.org",
+ "WebUsbAskForUrls": [
+ "https://www.example.com",
+ "[*.]example.edu"
+ ],
+ "RemoteAccessHostAllowClientPairing": false,
+ "ProxySettings": {
+ "ProxyMode": "direct",
+ "ProxyPacUrl": "https://internal.site/example.pac",
+ "ProxyServer": "123.123.123.123:8080",
+ "ProxyServerMode": 2,
+ "ProxyBypassList": "https://www.example1.com,https://www.example2.com,https://internalsite/"
+ },
+ "AutofillCreditCardEnabled": false,
+ "FileHandlingAllowedForUrls": [
+ "https://www.example.com",
+ "[*.]example.edu"
+ ],
+ "ChromeCleanupReportingEnabled": true,
+ "AlternateErrorPagesEnabled": true,
+ "WebRtcEventLogCollectionAllowed": true,
+ "AutoSelectCertificateForUrls": [
+ "{\\"pattern\\":\\"https://www.example.com\\",\\"filter\\":{\\"ISSUER\\":{\\"CN\\":\\"certificate issuer name\\", \\"L\\": \\"certificate issuer location\\", \\"O\\": \\"certificate issuer org\\", \\"OU\\": \\"certificate issuer org unit\\"}, \\"SUBJECT\\":{\\"CN\\":\\"certificate subject name\\", \\"L\\": \\"certificate subject location\\", \\"O\\": \\"certificate subject org\\", \\"OU\\": \\"certificate subject org unit\\"}}}"
+ ],
+ "PolicyListMultipleSourceMergeList": [
+ "ExtensionInstallAllowlist",
+ "ExtensionInstallBlocklist"
+ ],
+ "CertificateTransparencyEnforcementDisabledForCas": [
+ "sha256/AAAAAAAAAAAAAAAAAAAAAA==",
+ "sha256//////////////////////w=="
+ ],
+ "CookiesSessionOnlyForUrls": [
+ "https://www.example.com",
+ "[*.]example.edu"
+ ],
+ "SitePerProcess": true,
+ "RemoteAccessHostFirewallTraversal": false,
+ "DefaultSearchProviderSuggestURLPostParams": "q={searchTerms},ie=utf-8,oe=utf-8",
+ "BackgroundModeEnabled": true,
+ "DefaultJavaScriptSetting": 1,
+ "ForcedLanguages": [
+ "en-US"
+ ],
+ "ManagedBookmarks": [
+ {
+ "toplevel_name": "My managed bookmarks folder"
+ },
+ {
+ "url": "google.com",
+ "name": "Google"
+ },
+ {
+ "url": "youtube.com",
+ "name": "Youtube"
+ },
+ {
+ "children": [
+ {
+ "url": "chromium.org",
+ "name": "Chromium"
+ },
+ {
+ "url": "dev.chromium.org",
+ "name": "Chromium Developers"
+ }
+ ],
+ "name": "Chrome links"
+ }
+ ],
+ "Disable3DAPIs": false,
+ "CloudPrintSubmitEnabled": true,
+ "DefaultCookiesSetting": 1,
+ "ExtensionInstallBlocklist": [
+ "extension_id1",
+ "extension_id2"
+ ],
+ "URLAllowlist": [
+ "example.com",
+ "https://ssl.server.com",
+ "hosting.com/good_path",
+ "https://server:8080/path",
+ ".exact.hostname.com"
+ ],
+ "ExplicitlyAllowedNetworkPorts": [
+ "10080"
+ ],
+ "HomepageIsNewTabPage": true,
+ "SensorsBlockedForUrls": [
+ "https://www.example.com",
+ "[*.]example.edu"
+ ],
+ "BrowserLabsEnabled": false,
+ "NotificationsAllowedForUrls": [
+ "https://www.example.com",
+ "[*.]example.edu"
+ ],
+ "NativeMessagingUserLevelHosts": false,
+ "AuthNegotiateDelegateAllowlist": "foobar.example.com",
+ "CloudUserPolicyMerge": true,
+ "OverrideSecurityRestrictionsOnInsecureOrigin": [
+ "http://testserver.example.com/",
+ "*.example.org"
+ ],
+ "HideWebStoreIcon": true,
+ "SafeBrowsingForTrustedSourcesEnabled": false,
+ "NewTabPageLocation": "https://www.chromium.org",
+ "DiskCacheSize": 104857600,
+ "BrowserSwitcherUseIeSitelist": true,
+ "WebRtcUdpPortRange": "10000-11999",
+ "EnterpriseHardwarePlatformAPIEnabled": true,
+ "AutoOpenAllowedForURLs": [
+ "example.com",
+ "https://ssl.server.com",
+ "hosting.com/good_path",
+ "https://server:8080/path",
+ ".exact.hostname.com"
+ ],
+ "NativeMessagingAllowlist": [
+ "com.native.messaging.host.name1",
+ "com.native.messaging.host.name2"
+ ],
+ "DefaultSearchProviderName": "My Intranet Search",
+ "JavaScriptBlockedForUrls": [
+ "https://www.example.com",
+ "[*.]example.edu"
+ ],
+ "EnableExperimentalPolicies": [
+ "ExtensionInstallAllowlist",
+ "ExtensionInstallBlocklist"
+ ],
+ "SafeBrowsingAllowlistDomains": [
+ "mydomain.com",
+ "myuniversity.edu"
+ ],
+ "AutofillAddressEnabled": false,
+ "AllowCrossOriginAuthPrompt": false,
+ "SpellcheckLanguage": [
+ "fr",
+ "es"
+ ],
+ "VideoCaptureAllowed": false,
+ "ScreenCaptureAllowed": false,
+ "VideoCaptureAllowedUrls": [
+ "https://www.example.com/",
+ "https://[*.]example.edu/"
+ ],
+ "ImportHistory": true,
+ "ShowCastIconInToolbar": false,
+ "RestoreOnStartupURLs": [
+ "https://example.com",
+ "https://www.chromium.org"
+ ],
+ "LegacySameSiteCookieBehaviorEnabledForDomainList": [
+ "www.example.com",
+ "[*.]example.edu"
+ ],
+ "PrintingEnabled": true,
+ "ImportSavedPasswords": true,
+ "EnableDeprecatedPrivetPrinting": true,
+ "InsecurePrivateNetworkRequestsAllowed": false,
+ "HeadlessMode": 2,
+ "PolicyAtomicGroupsEnabled": true,
+ "HardwareAccelerationModeEnabled": true,
+ "AllowDeletingBrowserHistory": true,
+ "DefaultSearchProviderKeyword": "mis",
+ "ExtensionInstallAllowlist": [
+ "extension_id1",
+ "extension_id2"
+ ],
+ "WebAppInstallForceList": [
+ {
+ "url": "https://www.google.com/maps",
+ "create_desktop_shortcut": true,
+ "default_launch_container": "window"
+ },
+ {
+ "url": "https://docs.google.com",
+ "default_launch_container": "tab"
+ },
+ {
+ "url": "https://docs.google.com/editor",
+ "fallback_app_name": "Editor",
+ "default_launch_container": "window"
+ }
+ ],
+ "DiskCacheDir": "${user_home}/Chrome_cache",
+ "SignedHTTPExchangeEnabled": true,
+ "SearchSuggestEnabled": true,
+ "BrowserThemeColor": "#FFFFFF",
+ "RestrictSigninToPattern": ".*@example\\\\.com",
+ "DefaultInsecureContentSetting": 2,
+ "DefaultSensorsSetting": 2,
+ "AudioSandboxEnabled": true,
+ "RemoteAccessHostAllowRelayedConnection": false,
+ "RoamingProfileLocation": "${roaming_app_data}\\\\chrome-profile",
+ "UserAgentClientHintsEnabled": true,
+ "TargetBlankImpliesNoOpener": false,
+ "BrowserSwitcherKeepLastChromeTab": false,
+ "RemoteAccessHostClientDomainList": [
+ "my-awesome-domain.com",
+ "my-auxiliary-domain.com"
+ ],
+ "NotificationsBlockedForUrls": [
+ "https://www.example.com",
+ "[*.]example.edu"
+ ],
+ "SerialBlockedForUrls": [
+ "https://www.example.com",
+ "[*.]example.edu"
+ ],
+ "DefaultImagesSetting": 1,
+ "SigninInterceptionEnabled": true,
+ "WebUsbBlockedForUrls": [
+ "https://www.example.com",
+ "[*.]example.edu"
+ ],
+ "ImportAutofillFormData": true,
+ "BrowserSwitcherEnabled": true
+}
+"""
+
+chromium_json_expected_recommended = \
+b"""
+{
+ "BackgroundModeEnabled": true,
+ "RestoreOnStartup": 4,
+ "RegisteredProtocolHandlers": [
+ {
+ "default": true,
+ "url": "https://mail.google.com/mail/?extsrc=mailto&url=%s",
+ "protocol": "mailto"
+ }
+ ],
+ "ShowHomeButton": true,
+ "PrintHeaderFooter": false,
+ "SafeBrowsingForTrustedSourcesEnabled": false,
+ "ShowFullUrlsInAddressBar": false,
+ "MetricsReportingEnabled": true,
+ "SpellCheckServiceEnabled": false,
+ "ImportSearchEngine": true,
+ "DownloadRestrictions": 2,
+ "NetworkPredictionOptions": 1,
+ "DownloadDirectory": "/home/${user_name}/Downloads",
+ "TranslateEnabled": true,
+ "AutofillAddressEnabled": false,
+ "BookmarkBarEnabled": true,
+ "PrintPreviewUseSystemDefaultPrinter": false,
+ "ApplicationLocaleValue": "en",
+ "ImportHistory": true,
+ "RestoreOnStartupURLs": [
+ "https://example.com",
+ "https://www.chromium.org"
+ ],
+ "PasswordManagerEnabled": true,
+ "ImportSavedPasswords": true,
+ "DefaultDownloadDirectory": "/home/${user_name}/Downloads",
+ "PasswordLeakDetectionEnabled": true,
+ "SearchSuggestEnabled": true,
+ "AlternateErrorPagesEnabled": true,
+ "HomepageIsNewTabPage": true,
+ "ImportAutofillFormData": true,
+ "BlockThirdPartyCookies": false,
+ "AutofillCreditCardEnabled": false,
+ "HomepageLocation": "https://www.chromium.org",
+ "SafeBrowsingProtectionLevel": 2,
+ "ImportBookmarks": true
+}
+"""
+
+firewalld_reg_pol = \
+br"""
+<?xml version="1.0" encoding="utf-8"?>
+<PolFile num_entries="6" signature="PReg" version="1">
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Samba\Unix Settings\Firewalld</Key>
+ <ValueName>Zones</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="4" type_name="REG_DWORD">
+ <Key>Software\Policies\Samba\Unix Settings\Firewalld</Key>
+ <ValueName>Rules</ValueName>
+ <Value>1</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Samba\Unix Settings\Firewalld\Rules</Key>
+ <ValueName>Rules</ValueName>
+ <Value>{&quot;work&quot;: [{&quot;rule&quot;: {&quot;family&quot;: &quot;ipv4&quot;}, &quot;source address&quot;: &quot;172.25.1.7&quot;, &quot;service name&quot;: &quot;ftp&quot;, &quot;reject&quot;: {}}]}</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Samba\Unix Settings\Firewalld\Zones</Key>
+ <ValueName>**delvals.</ValueName>
+ <Value> </Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Samba\Unix Settings\Firewalld\Zones</Key>
+ <ValueName>work</ValueName>
+ <Value>work</Value>
+ </Entry>
+ <Entry type="1" type_name="REG_SZ">
+ <Key>Software\Policies\Samba\Unix Settings\Firewalld\Zones</Key>
+ <ValueName>home</ValueName>
+ <Value>home</Value>
+ </Entry>
+</PolFile>
+"""
+
+drive_maps_xml = b"""<?xml version="1.0" encoding="utf-8"?>
+<Drives clsid="{8FDDCC1A-0C3C-43cd-A6B4-71A6DF20DA8C}"><Drive clsid="{935D1B74-9CB8-4e3c-9914-7DD559B7A417}" name="A:" status="A:" image="2" changed="2023-03-08 19:23:02" uid="{1641E121-DEF3-418D-A428-2D8DF4749504}" bypassErrors="1"><Properties action="U" thisDrive="NOCHANGE" allDrives="NOCHANGE" userName="" path="\\\\example.com\\test" label="TEST" persistent="1" useLetter="0" letter="A"/></Drive>
+</Drives>
+"""
+
+empty_multi_sz_reg_pol = \
+br"""
+<?xml version="1.0" encoding="utf-8"?>
+<PolFile num_entries="1" signature="PReg" version="1">
+ <Entry type="7" type_name="REG_MULTI_SZ">
+ <Key>KeyName</Key>
+ <ValueName>ValueName</ValueName>
+ <Value/>
+ </Entry>
+</PolFile>
+"""
+
+multiple_values_multi_sz_reg_pol = \
+br"""
+<?xml version="1.0" encoding="utf-8"?>
+<PolFile num_entries="1" signature="PReg" version="1">
+ <Entry type="7" type_name="REG_MULTI_SZ">
+ <Key>KeyName</Key>
+ <ValueName>ValueName</ValueName>
+ <Value>Value1</Value>
+ <Value>Value2</Value>
+ <Value>Value3</Value>
+ </Entry>
+</PolFile>
+"""
+
+def days2rel_nttime(val):
+ seconds = 60
+ minutes = 60
+ hours = 24
+ sam_add = 10000000
+ return -(val * seconds * minutes * hours * sam_add)
+
+def gpupdate(lp, arg):
+ gpupdate = lp.get('gpo update command')
+ gpupdate.append(arg)
+
+ p = Popen(gpupdate, stdout=PIPE, stderr=PIPE)
+ stdoutdata, stderrdata = p.communicate()
+ print(stderrdata)
+ return p.returncode
+
+def gpupdate_force(lp):
+ return gpupdate(lp, '--force')
+
+def gpupdate_unapply(lp):
+ return gpupdate(lp, '--unapply')
+
+def rsop(lp):
+ return gpupdate(lp, '--rsop')
+
+def stage_file(path, data):
+ dirname = os.path.dirname(path)
+ if not os.path.exists(dirname):
+ try:
+ os.makedirs(dirname)
+ except OSError as e:
+ if not (e.errno == errno.EEXIST and os.path.isdir(dirname)):
+ return False
+ if os.path.exists(path):
+ os.rename(path, '%s.bak' % path)
+ with NamedTemporaryFile(delete=False, dir=os.path.dirname(path)) as f:
+ f.write(get_bytes(data))
+ os.rename(f.name, path)
+ os.chmod(path, 0o644)
+ return True
+
+def unstage_file(path):
+ backup = '%s.bak' % path
+ if os.path.exists(backup):
+ os.rename(backup, path)
+ elif os.path.exists(path):
+ os.remove(path)
+
+class GPOTests(tests.TestCase):
+ def setUp(self):
+ super().setUp()
+ self.server = os.environ["SERVER"]
+ self.dc_account = self.server.upper() + '$'
+ self.lp = s3param.get_context()
+ self.lp.load_default()
+ self.creds = self.insta_creds(template=self.get_credentials())
+
+ def test_gpo_list(self):
+ global poldir, dspath
+ gpos = get_gpo_list(self.server, self.creds, self.lp,
+ self.creds.get_username())
+ guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
+ names = ['Local Policy', guid]
+ file_sys_paths = [None, '%s\\%s' % (poldir, guid)]
+ ds_paths = [None, 'CN=%s,%s' % (guid, dspath)]
+ for i in range(0, len(gpos)):
+ self.assertEqual(gpos[i].name, names[i],
+ 'The gpo name did not match expected name %s' % gpos[i].name)
+ self.assertEqual(gpos[i].file_sys_path, file_sys_paths[i],
+ 'file_sys_path did not match expected %s' % gpos[i].file_sys_path)
+ self.assertEqual(gpos[i].ds_path, ds_paths[i],
+ 'ds_path did not match expected %s' % gpos[i].ds_path)
+
+ def test_gpt_version(self):
+ global gpt_data
+ local_path = self.lp.cache_path('gpo_cache')
+ guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
+ gpo_path = os.path.join(local_path, policies, guid)
+ old_vers = gpo.gpo_get_sysvol_gpt_version(gpo_path)[1]
+
+ with open(os.path.join(gpo_path, 'GPT.INI'), 'w') as gpt:
+ gpt.write(gpt_data % 42)
+ self.assertEqual(gpo.gpo_get_sysvol_gpt_version(gpo_path)[1], 42,
+ 'gpo_get_sysvol_gpt_version() did not return the expected version')
+
+ with open(os.path.join(gpo_path, 'GPT.INI'), 'w') as gpt:
+ gpt.write(gpt_data % old_vers)
+ self.assertEqual(gpo.gpo_get_sysvol_gpt_version(gpo_path)[1], old_vers,
+ 'gpo_get_sysvol_gpt_version() did not return the expected version')
+
+ def test_check_refresh_gpo_list(self):
+ cache = self.lp.cache_path('gpo_cache')
+ gpos = get_gpo_list(self.server, self.creds, self.lp,
+ self.creds.get_username())
+ check_refresh_gpo_list(self.server, self.lp, self.creds, gpos)
+
+ self.assertTrue(os.path.exists(cache),
+ 'GPO cache %s was not created' % cache)
+
+ guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
+ gpt_ini = os.path.join(cache, policies,
+ guid, 'GPT.INI')
+ self.assertTrue(os.path.exists(gpt_ini),
+ 'GPT.INI was not cached for %s' % guid)
+
+ def test_check_refresh_gpo_list_malicious_paths(self):
+ # the path cannot contain ..
+ path = '/usr/local/samba/var/locks/sysvol/../../../../../../root/'
+ self.assertRaises(OSError, check_safe_path, path)
+
+ self.assertEqual(check_safe_path('/etc/passwd'), 'etc/passwd')
+ self.assertEqual(check_safe_path('\\\\etc/\\passwd'), 'etc/passwd')
+
+ # there should be no backslashes used to delineate paths
+ before = 'sysvol/' + realm + '\\Policies/' \
+ '{31B2F340-016D-11D2-945F-00C04FB984F9}\\GPT.INI'
+ after = realm + '/Policies/' \
+ '{31B2F340-016D-11D2-945F-00C04FB984F9}/GPT.INI'
+ result = check_safe_path(before)
+ self.assertEqual(result, after, 'check_safe_path() didn\'t'
+ ' correctly convert \\ to /')
+
+ def test_check_safe_path_typesafe_name(self):
+ path = '\\\\toady.suse.de\\SysVol\\toady.suse.de\\Policies\\' \
+ '{31B2F340-016D-11D2-945F-00C04FB984F9}\\GPT.INI'
+ expected_path = 'toady.suse.de/Policies/' \
+ '{31B2F340-016D-11D2-945F-00C04FB984F9}/GPT.INI'
+
+ result = check_safe_path(path)
+ self.assertEqual(result, expected_path,
+ 'check_safe_path unable to detect variable case sysvol components')
+
+ def test_gpt_ext_register(self):
+ this_path = os.path.dirname(os.path.realpath(__file__))
+ samba_path = os.path.realpath(os.path.join(this_path, '../../../'))
+ ext_path = os.path.join(samba_path, 'python/samba/gp/gp_sec_ext.py')
+ ext_guid = '{827D319E-6EAC-11D2-A4EA-00C04F79F83A}'
+ ret = register_gp_extension(ext_guid, 'gp_access_ext', ext_path,
+ smb_conf=self.lp.configfile,
+ machine=True, user=False)
+ self.assertTrue(ret, 'Failed to register a gp ext')
+ gp_exts = list_gp_extensions(self.lp.configfile)
+ self.assertTrue(ext_guid in gp_exts.keys(),
+ 'Failed to list gp exts')
+ self.assertEqual(gp_exts[ext_guid]['DllName'], ext_path,
+ 'Failed to list gp exts')
+
+ unregister_gp_extension(ext_guid)
+ gp_exts = list_gp_extensions(self.lp.configfile)
+ self.assertTrue(ext_guid not in gp_exts.keys(),
+ 'Failed to unregister gp exts')
+
+ self.assertTrue(check_guid(ext_guid), 'Failed to parse valid guid')
+ self.assertFalse(check_guid('AAAAAABBBBBBBCCC'), 'Parsed invalid guid')
+
+ lp, parser = parse_gpext_conf(self.lp.configfile)
+ self.assertTrue(lp and parser, 'parse_gpext_conf() invalid return')
+ parser.add_section('test_section')
+ parser.set('test_section', 'test_var', ext_guid)
+ atomic_write_conf(lp, parser)
+
+ lp, parser = parse_gpext_conf(self.lp.configfile)
+ self.assertTrue('test_section' in parser.sections(),
+ 'test_section not found in gpext.conf')
+ self.assertEqual(parser.get('test_section', 'test_var'), ext_guid,
+ 'Failed to find test variable in gpext.conf')
+ parser.remove_section('test_section')
+ atomic_write_conf(lp, parser)
+
+ def test_gp_log_get_applied(self):
+ local_path = self.lp.get('path', 'sysvol')
+ guids = ['{31B2F340-016D-11D2-945F-00C04FB984F9}',
+ '{6AC1786C-016F-11D2-945F-00C04FB984F9}']
+ gpofile = '%s/' + realm + '/Policies/%s/MACHINE/Microsoft/' \
+ 'Windows NT/SecEdit/GptTmpl.inf'
+ stage = '[System Access]\nMinimumPasswordAge = 998\n'
+ cache_dir = self.lp.get('cache directory')
+ store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
+ for guid in guids:
+ gpttmpl = gpofile % (local_path, guid)
+ ret = stage_file(gpttmpl, stage)
+ self.assertTrue(ret, 'Could not create the target %s' % gpttmpl)
+
+ ret = gpupdate_force(self.lp)
+ self.assertEqual(ret, 0, 'gpupdate force failed')
+
+ gp_db = store.get_gplog(self.dc_account)
+
+ applied_guids = gp_db.get_applied_guids()
+ self.assertEqual(len(applied_guids), 2, 'The guids were not found')
+ self.assertIn(guids[0], applied_guids,
+ '%s not in applied guids' % guids[0])
+ self.assertIn(guids[1], applied_guids,
+ '%s not in applied guids' % guids[1])
+
+ applied_settings = gp_db.get_applied_settings(applied_guids)
+ for policy in applied_settings:
+ self.assertIn('System Access', policy[1],
+ 'System Access policies not set')
+ self.assertIn('minPwdAge', policy[1]['System Access'],
+ 'minPwdAge policy not set')
+ if policy[0] == guids[0]:
+ self.assertEqual(int(policy[1]['System Access']['minPwdAge']),
+ days2rel_nttime(1),
+ 'minPwdAge policy not set')
+ elif policy[0] == guids[1]:
+ self.assertEqual(int(policy[1]['System Access']['minPwdAge']),
+ days2rel_nttime(998),
+ 'minPwdAge policy not set')
+
+ gpos = get_gpo_list(self.server, self.creds, self.lp,
+ self.dc_account)
+ del_gpos = get_deleted_gpos_list(gp_db, gpos[:-1])
+ self.assertEqual(len(del_gpos), 1, 'Returned delete gpos is incorrect')
+ self.assertEqual(guids[-1], del_gpos[0][0],
+ 'GUID for delete gpo is incorrect')
+ self.assertIn('System Access', del_gpos[0][1],
+ 'System Access policies not set for removal')
+ self.assertIn('minPwdAge', del_gpos[0][1]['System Access'],
+ 'minPwdAge policy not set for removal')
+
+ for guid in guids:
+ gpttmpl = gpofile % (local_path, guid)
+ unstage_file(gpttmpl)
+
+ ret = gpupdate_unapply(self.lp)
+ self.assertEqual(ret, 0, 'gpupdate unapply failed')
+
+ def test_process_group_policy(self):
+ local_path = self.lp.cache_path('gpo_cache')
+ guids = ['{31B2F340-016D-11D2-945F-00C04FB984F9}',
+ '{6AC1786C-016F-11D2-945F-00C04FB984F9}']
+ gpofile = '%s/' + policies + '/%s/MACHINE/MICROSOFT/' \
+ 'WINDOWS NT/SECEDIT/GPTTMPL.INF'
+ cache_dir = self.lp.get('cache directory')
+ store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
+
+ machine_creds = Credentials()
+ machine_creds.guess(self.lp)
+ machine_creds.set_machine_account()
+
+ # Initialize the group policy extension
+ ext = gp_krb_ext(self.lp, machine_creds,
+ machine_creds.get_username(), store)
+
+ gpos = get_gpo_list(self.server, machine_creds, self.lp,
+ machine_creds.get_username())
+
+ # Include MaxClockSkew to ensure we don't fail on a key we ignore
+ stage = '[Kerberos Policy]\nMaxTicketAge = %d\nMaxClockSkew = 5'
+ opts = [100, 200]
+ for i in range(0, 2):
+ gpttmpl = gpofile % (local_path, guids[i])
+ ret = stage_file(gpttmpl, stage % opts[i])
+ self.assertTrue(ret, 'Could not create the target %s' % gpttmpl)
+
+ # Process all gpos
+ ext.process_group_policy([], gpos)
+
+ ret = store.get_int('kdc:user_ticket_lifetime')
+ self.assertEqual(ret, opts[1], 'Higher priority policy was not set')
+
+ # Remove policy
+ gp_db = store.get_gplog(machine_creds.get_username())
+ del_gpos = get_deleted_gpos_list(gp_db, [])
+ ext.process_group_policy(del_gpos, [])
+
+ ret = store.get_int('kdc:user_ticket_lifetime')
+ self.assertEqual(ret, None, 'MaxTicketAge should not have applied')
+
+ # Process just the first gpo
+ ext.process_group_policy([], gpos[:-1])
+
+ ret = store.get_int('kdc:user_ticket_lifetime')
+ self.assertEqual(ret, opts[0], 'Lower priority policy was not set')
+
+ # Remove policy
+ ext.process_group_policy(del_gpos, [])
+
+ for guid in guids:
+ gpttmpl = gpofile % (local_path, guid)
+ unstage_file(gpttmpl)
+
+ def test_gp_scripts(self):
+ local_path = self.lp.cache_path('gpo_cache')
+ guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
+ reg_pol = os.path.join(local_path, policies, guid,
+ 'MACHINE/REGISTRY.POL')
+ cache_dir = self.lp.get('cache directory')
+ store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
+
+ machine_creds = Credentials()
+ machine_creds.guess(self.lp)
+ machine_creds.set_machine_account()
+
+ # Initialize the group policy extension
+ ext = gp_scripts_ext(self.lp, machine_creds,
+ machine_creds.get_username(), store)
+
+ gpos = get_gpo_list(self.server, machine_creds, self.lp,
+ machine_creds.get_username())
+
+ reg_key = b'Software\\Policies\\Samba\\Unix Settings'
+ sections = { b'%s\\Daily Scripts' % reg_key : '.cron.daily',
+ b'%s\\Monthly Scripts' % reg_key : '.cron.monthly',
+ b'%s\\Weekly Scripts' % reg_key : '.cron.weekly',
+ b'%s\\Hourly Scripts' % reg_key : '.cron.hourly' }
+ for keyname in sections.keys():
+ # Stage the Registry.pol file with test data
+ stage = preg.file()
+ e = preg.entry()
+ e.keyname = keyname
+ e.valuename = b'Software\\Policies\\Samba\\Unix Settings'
+ e.type = 1
+ e.data = b'echo hello world'
+ stage.num_entries = 1
+ stage.entries = [e]
+ ret = stage_file(reg_pol, ndr_pack(stage))
+ self.assertTrue(ret, 'Could not create the target %s' % reg_pol)
+
+ # Process all gpos, with temp output directory
+ with TemporaryDirectory(sections[keyname]) as dname:
+ ext.process_group_policy([], gpos, dname)
+ scripts = os.listdir(dname)
+ self.assertEqual(len(scripts), 1,
+ 'The %s script was not created' % keyname.decode())
+ out, _ = Popen([os.path.join(dname, scripts[0])], stdout=PIPE).communicate()
+ self.assertIn(b'hello world', out,
+ '%s script execution failed' % keyname.decode())
+
+ # Check that a call to gpupdate --rsop also succeeds
+ ret = rsop(self.lp)
+ self.assertEqual(ret, 0, 'gpupdate --rsop failed!')
+
+ # Remove policy
+ gp_db = store.get_gplog(machine_creds.get_username())
+ del_gpos = get_deleted_gpos_list(gp_db, [])
+ ext.process_group_policy(del_gpos, [])
+ self.assertEqual(len(os.listdir(dname)), 0,
+ 'Unapply failed to cleanup scripts')
+
+ # Unstage the Registry.pol file
+ unstage_file(reg_pol)
+
+ def test_gp_sudoers(self):
+ local_path = self.lp.cache_path('gpo_cache')
+ guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
+ reg_pol = os.path.join(local_path, policies, guid,
+ 'MACHINE/REGISTRY.POL')
+ cache_dir = self.lp.get('cache directory')
+ store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
+
+ machine_creds = Credentials()
+ machine_creds.guess(self.lp)
+ machine_creds.set_machine_account()
+
+ # Initialize the group policy extension
+ ext = gp_sudoers_ext(self.lp, machine_creds,
+ machine_creds.get_username(), store)
+
+ gpos = get_gpo_list(self.server, machine_creds, self.lp,
+ machine_creds.get_username())
+
+ # Stage the Registry.pol file with test data
+ stage = preg.file()
+ e = preg.entry()
+ e.keyname = b'Software\\Policies\\Samba\\Unix Settings\\Sudo Rights'
+ e.valuename = b'Software\\Policies\\Samba\\Unix Settings'
+ e.type = 1
+ e.data = b'fakeu ALL=(ALL) NOPASSWD: ALL'
+ stage.num_entries = 1
+ stage.entries = [e]
+ ret = stage_file(reg_pol, ndr_pack(stage))
+ self.assertTrue(ret, 'Could not create the target %s' % reg_pol)
+
+ # Process all gpos, with temp output directory
+ with TemporaryDirectory() as dname:
+ ext.process_group_policy([], gpos, dname)
+ sudoers = os.listdir(dname)
+ self.assertEqual(len(sudoers), 1, 'The sudoer file was not created')
+ self.assertIn(e.data,
+ open(os.path.join(dname, sudoers[0]), 'r').read(),
+ 'The sudoers entry was not applied')
+
+ # Check that a call to gpupdate --rsop also succeeds
+ ret = rsop(self.lp)
+ self.assertEqual(ret, 0, 'gpupdate --rsop failed!')
+
+ # Remove policy
+ gp_db = store.get_gplog(machine_creds.get_username())
+ del_gpos = get_deleted_gpos_list(gp_db, [])
+ ext.process_group_policy(del_gpos, [])
+ self.assertEqual(len(os.listdir(dname)), 0,
+ 'Unapply failed to cleanup scripts')
+
+ # Unstage the Registry.pol file
+ unstage_file(reg_pol)
+
+ def test_vgp_sudoers(self):
+ local_path = self.lp.cache_path('gpo_cache')
+ guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
+ manifest = os.path.join(local_path, policies, guid, 'MACHINE',
+ 'VGP/VTLA/SUDO/SUDOERSCONFIGURATION/MANIFEST.XML')
+ cache_dir = self.lp.get('cache directory')
+ store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
+
+ machine_creds = Credentials()
+ machine_creds.guess(self.lp)
+ machine_creds.set_machine_account()
+
+ # Initialize the group policy extension
+ ext = vgp_sudoers_ext(self.lp, machine_creds,
+ machine_creds.get_username(), store)
+
+ gpos = get_gpo_list(self.server, machine_creds, self.lp,
+ machine_creds.get_username())
+
+ # Stage the manifest.xml file with test data
+ stage = etree.Element('vgppolicy')
+ policysetting = etree.Element('policysetting')
+ stage.append(policysetting)
+ version = etree.Element('version')
+ version.text = '1'
+ policysetting.append(version)
+ data = etree.Element('data')
+ sudoers_entry = etree.Element('sudoers_entry')
+ command = etree.Element('command')
+ command.text = 'ALL'
+ sudoers_entry.append(command)
+ user = etree.Element('user')
+ user.text = 'ALL'
+ sudoers_entry.append(user)
+ principal_list = etree.Element('listelement')
+ principal = etree.Element('principal')
+ principal.text = 'fakeu'
+ principal.attrib['type'] = 'user'
+ group = etree.Element('principal')
+ group.text = 'fakeg'
+ group.attrib['type'] = 'group'
+ principal_list.append(principal)
+ principal_list.append(group)
+ sudoers_entry.append(principal_list)
+ data.append(sudoers_entry)
+ # Ensure an empty principal doesn't cause a crash
+ sudoers_entry = etree.SubElement(data, 'sudoers_entry')
+ command = etree.SubElement(sudoers_entry, 'command')
+ command.text = 'ALL'
+ user = etree.SubElement(sudoers_entry, 'user')
+ user.text = 'ALL'
+ # Ensure having dispersed principals still works
+ sudoers_entry = etree.SubElement(data, 'sudoers_entry')
+ command = etree.SubElement(sudoers_entry, 'command')
+ command.text = 'ALL'
+ user = etree.SubElement(sudoers_entry, 'user')
+ user.text = 'ALL'
+ listelement = etree.SubElement(sudoers_entry, 'listelement')
+ principal = etree.SubElement(listelement, 'principal')
+ principal.text = 'fakeu2'
+ principal.attrib['type'] = 'user'
+ listelement = etree.SubElement(sudoers_entry, 'listelement')
+ group = etree.SubElement(listelement, 'principal')
+ group.text = 'fakeg2'
+ group.attrib['type'] = 'group'
+ policysetting.append(data)
+ ret = stage_file(manifest, etree.tostring(stage))
+ self.assertTrue(ret, 'Could not create the target %s' % manifest)
+
+ # Process all gpos, with temp output directory
+ data = 'fakeu,fakeg% ALL=(ALL) NOPASSWD: ALL'
+ data2 = 'fakeu2,fakeg2% ALL=(ALL) NOPASSWD: ALL'
+ data_no_principal = 'ALL ALL=(ALL) NOPASSWD: ALL'
+ with TemporaryDirectory() as dname:
+ ext.process_group_policy([], gpos, dname)
+ sudoers = os.listdir(dname)
+ self.assertEqual(len(sudoers), 3, 'The sudoer file was not created')
+ output = open(os.path.join(dname, sudoers[0]), 'r').read() + \
+ open(os.path.join(dname, sudoers[1]), 'r').read() + \
+ open(os.path.join(dname, sudoers[2]), 'r').read()
+ self.assertIn(data, output,
+ 'The sudoers entry was not applied')
+ self.assertIn(data2, output,
+ 'The sudoers entry was not applied')
+ self.assertIn(data_no_principal, output,
+ 'The sudoers entry was not applied')
+
+ # Check that a call to gpupdate --rsop also succeeds
+ ret = rsop(self.lp)
+ self.assertEqual(ret, 0, 'gpupdate --rsop failed!')
+
+ # Remove policy
+ gp_db = store.get_gplog(machine_creds.get_username())
+ del_gpos = get_deleted_gpos_list(gp_db, [])
+ ext.process_group_policy(del_gpos, [])
+ self.assertEqual(len(os.listdir(dname)), 0,
+ 'Unapply failed to cleanup scripts')
+
+ # Unstage the Registry.pol file
+ unstage_file(manifest)
+
+ def test_gp_inf_ext_utf(self):
+ cache_dir = self.lp.get('cache directory')
+ store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
+
+ machine_creds = Credentials()
+ machine_creds.guess(self.lp)
+ machine_creds.set_machine_account()
+
+ ext = gp_inf_ext(self.lp, machine_creds,
+ machine_creds.get_username(), store)
+ test_data = '[Kerberos Policy]\nMaxTicketAge = 99\n'
+
+ with NamedTemporaryFile() as f:
+ with codecs.open(f.name, 'w', 'utf-16') as w:
+ w.write(test_data)
+ try:
+ inf_conf = ext.read(f.name)
+ except UnicodeDecodeError:
+ self.fail('Failed to parse utf-16')
+ self.assertIn('Kerberos Policy', inf_conf.keys(),
+ 'Kerberos Policy was not read from the file')
+ self.assertEqual(inf_conf.get('Kerberos Policy', 'MaxTicketAge'),
+ '99', 'MaxTicketAge was not read from the file')
+
+ with NamedTemporaryFile() as f:
+ with codecs.open(f.name, 'w', 'utf-8') as w:
+ w.write(test_data)
+ inf_conf = ext.read(f.name)
+ self.assertIn('Kerberos Policy', inf_conf.keys(),
+ 'Kerberos Policy was not read from the file')
+ self.assertEqual(inf_conf.get('Kerberos Policy', 'MaxTicketAge'),
+ '99', 'MaxTicketAge was not read from the file')
+
+ def test_rsop(self):
+ cache_dir = self.lp.get('cache directory')
+ local_path = self.lp.cache_path('gpo_cache')
+ store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
+
+ machine_creds = Credentials()
+ machine_creds.guess(self.lp)
+ machine_creds.set_machine_account()
+
+ gpos = get_gpo_list(self.server, machine_creds, self.lp,
+ machine_creds.get_username())
+
+ gp_extensions = []
+ gp_extensions.append(gp_krb_ext)
+ gp_extensions.append(gp_scripts_ext)
+ gp_extensions.append(gp_sudoers_ext)
+ gp_extensions.append(gp_smb_conf_ext)
+ gp_extensions.append(gp_msgs_ext)
+
+ # Create registry stage data
+ reg_pol = os.path.join(local_path, policies, '%s/MACHINE/REGISTRY.POL')
+ reg_stage = preg.file()
+ e = preg.entry()
+ e.keyname = b'Software\\Policies\\Samba\\Unix Settings\\Daily Scripts'
+ e.valuename = b'Software\\Policies\\Samba\\Unix Settings'
+ e.type = 1
+ e.data = b'echo hello world'
+ e2 = preg.entry()
+ e2.keyname = b'Software\\Policies\\Samba\\Unix Settings\\Sudo Rights'
+ e2.valuename = b'Software\\Policies\\Samba\\Unix Settings'
+ e2.type = 1
+ e2.data = b'fakeu ALL=(ALL) NOPASSWD: ALL'
+ e3 = preg.entry()
+ e3.keyname = 'Software\\Policies\\Samba\\smb_conf\\apply group policies'
+ e3.type = 4
+ e3.data = 1
+ e3.valuename = 'apply group policies'
+ e4 = preg.entry()
+ e4.keyname = b'Software\\Policies\\Samba\\Unix Settings\\Messages'
+ e4.valuename = b'issue'
+ e4.type = 1
+ e4.data = b'Welcome to \\s \\r \\l'
+ reg_stage.num_entries = 4
+ reg_stage.entries = [e, e2, e3, e4]
+
+ # Create krb stage date
+ gpofile = os.path.join(local_path, policies, '%s/MACHINE/MICROSOFT/' \
+ 'WINDOWS NT/SECEDIT/GPTTMPL.INF')
+ krb_stage = '[Kerberos Policy]\nMaxTicketAge = 99\n' \
+ '[System Access]\nMinimumPasswordAge = 998\n'
+
+ for g in [g for g in gpos if g.file_sys_path]:
+ ret = stage_file(gpofile % g.name, krb_stage)
+ self.assertTrue(ret, 'Could not create the target %s' %
+ (gpofile % g.name))
+ ret = stage_file(reg_pol % g.name, ndr_pack(reg_stage))
+ self.assertTrue(ret, 'Could not create the target %s' %
+ (reg_pol % g.name))
+ for ext in gp_extensions:
+ ext = ext(self.lp, machine_creds,
+ machine_creds.get_username(), store)
+ ret = ext.rsop(g)
+ self.assertEqual(len(ret.keys()), 1,
+ 'A single policy should have been displayed')
+
+ # Check the Security Extension
+ if type(ext) == gp_krb_ext:
+ self.assertIn('Kerberos Policy', ret.keys(),
+ 'Kerberos Policy not found')
+ self.assertIn('MaxTicketAge', ret['Kerberos Policy'],
+ 'MaxTicketAge setting not found')
+ self.assertEqual(ret['Kerberos Policy']['MaxTicketAge'], '99',
+ 'MaxTicketAge was not set to 99')
+ # Check the Scripts Extension
+ elif type(ext) == gp_scripts_ext:
+ self.assertIn('Daily Scripts', ret.keys(),
+ 'Daily Scripts not found')
+ self.assertIn('echo hello world', ret['Daily Scripts'],
+ 'Daily script was not created')
+ # Check the Sudoers Extension
+ elif type(ext) == gp_sudoers_ext:
+ self.assertIn('Sudo Rights', ret.keys(),
+ 'Sudoers not found')
+ self.assertIn('fakeu ALL=(ALL) NOPASSWD: ALL',
+ ret['Sudo Rights'],
+ 'Sudoers policy not created')
+ # Check the smb.conf Extension
+ elif type(ext) == gp_smb_conf_ext:
+ self.assertIn('smb.conf', ret.keys(),
+ 'apply group policies was not applied')
+ self.assertIn(e3.valuename, ret['smb.conf'],
+ 'apply group policies was not applied')
+ self.assertEqual(ret['smb.conf'][e3.valuename], e3.data,
+ 'apply group policies was not set')
+ # Check the Messages Extension
+ elif type(ext) == gp_msgs_ext:
+ self.assertIn('/etc/issue', ret,
+ 'Login Prompt Message not applied')
+ self.assertEqual(ret['/etc/issue'], e4.data,
+ 'Login Prompt Message not set')
+
+ # Check that a call to gpupdate --rsop also succeeds
+ ret = rsop(self.lp)
+ self.assertEqual(ret, 0, 'gpupdate --rsop failed!')
+
+ unstage_file(gpofile % g.name)
+ unstage_file(reg_pol % g.name)
+
+ def test_gp_unapply(self):
+ cache_dir = self.lp.get('cache directory')
+ local_path = self.lp.cache_path('gpo_cache')
+ guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
+ store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
+
+ machine_creds = Credentials()
+ machine_creds.guess(self.lp)
+ machine_creds.set_machine_account()
+
+ gpos = get_gpo_list(self.server, machine_creds, self.lp,
+ machine_creds.get_username())
+
+ gp_extensions = []
+ gp_extensions.append(gp_krb_ext)
+ gp_extensions.append(gp_scripts_ext)
+ gp_extensions.append(gp_sudoers_ext)
+
+ # Create registry stage data
+ reg_pol = os.path.join(local_path, policies, '%s/MACHINE/REGISTRY.POL')
+ reg_stage = preg.file()
+ e = preg.entry()
+ e.keyname = b'Software\\Policies\\Samba\\Unix Settings\\Daily Scripts'
+ e.valuename = b'Software\\Policies\\Samba\\Unix Settings'
+ e.type = 1
+ e.data = b'echo hello world'
+ e2 = preg.entry()
+ e2.keyname = b'Software\\Policies\\Samba\\Unix Settings\\Sudo Rights'
+ e2.valuename = b'Software\\Policies\\Samba\\Unix Settings'
+ e2.type = 1
+ e2.data = b'fakeu ALL=(ALL) NOPASSWD: ALL'
+ reg_stage.num_entries = 2
+ reg_stage.entries = [e, e2]
+
+ # Create krb stage date
+ gpofile = os.path.join(local_path, policies, '%s/MACHINE/MICROSOFT/' \
+ 'WINDOWS NT/SECEDIT/GPTTMPL.INF')
+ krb_stage = '[Kerberos Policy]\nMaxTicketAge = 99\n'
+
+ ret = stage_file(gpofile % guid, krb_stage)
+ self.assertTrue(ret, 'Could not create the target %s' %
+ (gpofile % guid))
+ ret = stage_file(reg_pol % guid, ndr_pack(reg_stage))
+ self.assertTrue(ret, 'Could not create the target %s' %
+ (reg_pol % guid))
+
+ # Process all gpos, with temp output directory
+ remove = []
+ with TemporaryDirectory() as dname:
+ for ext in gp_extensions:
+ ext = ext(self.lp, machine_creds,
+ machine_creds.get_username(), store)
+ if type(ext) == gp_krb_ext:
+ ext.process_group_policy([], gpos)
+ ret = store.get_int('kdc:user_ticket_lifetime')
+ self.assertEqual(ret, 99, 'Kerberos policy was not set')
+ elif type(ext) in [gp_scripts_ext, gp_sudoers_ext]:
+ ext.process_group_policy([], gpos, dname)
+ gp_db = store.get_gplog(machine_creds.get_username())
+ applied_settings = gp_db.get_applied_settings([guid])
+ for _, fname in applied_settings[-1][-1][str(ext)].items():
+ fname = fname.split(':')[-1]
+ self.assertIn(dname, fname,
+ 'Test file not created in tmp dir')
+ self.assertTrue(os.path.exists(fname),
+ 'Test file not created')
+ remove.append(fname)
+
+ # Unapply policy, and ensure policies are removed
+ gpupdate_unapply(self.lp)
+
+ for fname in remove:
+ self.assertFalse(os.path.exists(fname),
+ 'Unapply did not remove test file')
+ ret = store.get_int('kdc:user_ticket_lifetime')
+ self.assertNotEqual(ret, 99, 'Kerberos policy was not unapplied')
+
+ unstage_file(gpofile % guid)
+ unstage_file(reg_pol % guid)
+
+ def test_smb_conf_ext(self):
+ local_path = self.lp.cache_path('gpo_cache')
+ guids = ['{31B2F340-016D-11D2-945F-00C04FB984F9}',
+ '{6AC1786C-016F-11D2-945F-00C04FB984F9}']
+ reg_pol = os.path.join(local_path, policies, guids[0],
+ 'MACHINE/REGISTRY.POL')
+ reg_pol2 = os.path.join(local_path, policies, guids[1],
+ 'MACHINE/REGISTRY.POL')
+ cache_dir = self.lp.get('cache directory')
+ store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
+
+ machine_creds = Credentials()
+ machine_creds.guess(self.lp)
+ machine_creds.set_machine_account()
+
+ gpos = get_gpo_list(self.server, machine_creds, self.lp,
+ machine_creds.get_username())
+
+ entries = []
+ e = preg.entry()
+ e.keyname = 'Software\\Policies\\Samba\\smb_conf\\template homedir'
+ e.type = 1
+ e.data = '/home/samba/%D/%U'
+ e.valuename = 'template homedir'
+ entries.append(e)
+ e = preg.entry()
+ e.keyname = 'Software\\Policies\\Samba\\smb_conf\\apply group policies'
+ e.type = 4
+ e.data = 1
+ e.valuename = 'apply group policies'
+ entries.append(e)
+ e = preg.entry()
+ e.keyname = 'Software\\Policies\\Samba\\smb_conf\\ldap timeout'
+ e.type = 4
+ e.data = 9999
+ e.valuename = 'ldap timeout'
+ entries.append(e)
+ stage = preg.file()
+ stage.num_entries = len(entries)
+ stage.entries = entries
+
+ ret = stage_file(reg_pol, ndr_pack(stage))
+ self.assertTrue(ret, 'Failed to create the Registry.pol file')
+
+ # Stage the other Registry.pol
+ entries = []
+ e = preg.entry()
+ e.keyname = 'Software\\Policies\\Samba\\smb_conf\\apply group policies'
+ e.type = 4
+ e.data = 0
+ e.valuename = 'apply group policies'
+ entries.append(e)
+ stage = preg.file()
+ stage.num_entries = len(entries)
+ stage.entries = entries
+ ret = stage_file(reg_pol2, ndr_pack(stage))
+ self.assertTrue(ret, 'Failed to create the Registry.pol file')
+
+ with NamedTemporaryFile(suffix='_smb.conf') as f:
+ copyfile(self.lp.configfile, f.name)
+ lp = LoadParm(f.name)
+
+ # Initialize the group policy extension
+ ext = gp_smb_conf_ext(lp, machine_creds,
+ machine_creds.get_username(), store)
+ ext.process_group_policy([], gpos)
+ lp = LoadParm(f.name)
+
+ template_homedir = lp.get('template homedir')
+ self.assertEqual(template_homedir, '/home/samba/%D/%U',
+ 'template homedir was not applied')
+ apply_group_policies = lp.get('apply group policies')
+ self.assertFalse(apply_group_policies,
+ 'apply group policies was not applied')
+ ldap_timeout = lp.get('ldap timeout')
+ self.assertEqual(ldap_timeout, 9999, 'ldap timeout was not applied')
+
+ # Force apply with removal of second GPO
+ gp_db = store.get_gplog(machine_creds.get_username())
+ del_gpos = gp_db.get_applied_settings([guids[1]])
+ gpos = [gpo for gpo in gpos if gpo.name != guids[1]]
+ ext.process_group_policy(del_gpos, gpos)
+ lp = LoadParm(f.name)
+
+ template_homedir = lp.get('template homedir')
+ self.assertEqual(template_homedir, '/home/samba/%D/%U',
+ 'template homedir was not applied')
+ apply_group_policies = lp.get('apply group policies')
+ self.assertTrue(apply_group_policies,
+ 'apply group policies was not applied')
+ ldap_timeout = lp.get('ldap timeout')
+ self.assertEqual(ldap_timeout, 9999, 'ldap timeout was not applied')
+
+ # Check that a call to gpupdate --rsop also succeeds
+ ret = rsop(self.lp)
+ self.assertEqual(ret, 0, 'gpupdate --rsop failed!')
+
+ # Remove policy
+ del_gpos = get_deleted_gpos_list(gp_db, [])
+ ext.process_group_policy(del_gpos, [])
+
+ lp = LoadParm(f.name)
+
+ template_homedir = lp.get('template homedir')
+ self.assertEqual(template_homedir, self.lp.get('template homedir'),
+ 'template homedir was not unapplied')
+ apply_group_policies = lp.get('apply group policies')
+ self.assertEqual(apply_group_policies, self.lp.get('apply group policies'),
+ 'apply group policies was not unapplied')
+ ldap_timeout = lp.get('ldap timeout')
+ self.assertEqual(ldap_timeout, self.lp.get('ldap timeout'),
+ 'ldap timeout was not unapplied')
+
+ # Unstage the Registry.pol file
+ unstage_file(reg_pol)
+
+ def test_gp_motd(self):
+ local_path = self.lp.cache_path('gpo_cache')
+ guids = ['{31B2F340-016D-11D2-945F-00C04FB984F9}',
+ '{6AC1786C-016F-11D2-945F-00C04FB984F9}']
+ reg_pol = os.path.join(local_path, policies, guids[0],
+ 'MACHINE/REGISTRY.POL')
+ reg_pol2 = os.path.join(local_path, policies, guids[1],
+ 'MACHINE/REGISTRY.POL')
+ cache_dir = self.lp.get('cache directory')
+ store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
+
+ machine_creds = Credentials()
+ machine_creds.guess(self.lp)
+ machine_creds.set_machine_account()
+
+ # Initialize the group policy extension
+ ext = gp_msgs_ext(self.lp, machine_creds,
+ machine_creds.get_username(), store)
+
+ gpos = get_gpo_list(self.server, machine_creds, self.lp,
+ machine_creds.get_username())
+
+ # Stage the Registry.pol file with test data
+ stage = preg.file()
+ e1 = preg.entry()
+ e1.keyname = b'Software\\Policies\\Samba\\Unix Settings\\Messages'
+ e1.valuename = b'motd'
+ e1.type = 1
+ e1.data = b'Have a lot of fun!'
+ stage.num_entries = 2
+ e2 = preg.entry()
+ e2.keyname = b'Software\\Policies\\Samba\\Unix Settings\\Messages'
+ e2.valuename = b'issue'
+ e2.type = 1
+ e2.data = b'Welcome to \\s \\r \\l'
+ stage.entries = [e1, e2]
+ ret = stage_file(reg_pol, ndr_pack(stage))
+ self.assertTrue(ret, 'Could not create the target %s' % reg_pol)
+
+ # Stage the other Registry.pol
+ stage = preg.file()
+ e3 = preg.entry()
+ e3.keyname = b'Software\\Policies\\Samba\\Unix Settings\\Messages'
+ e3.valuename = b'motd'
+ e3.type = 1
+ e3.data = b'This should overwrite the first policy'
+ stage.num_entries = 1
+ stage.entries = [e3]
+ ret = stage_file(reg_pol2, ndr_pack(stage))
+ self.assertTrue(ret, 'Could not create the target %s' % reg_pol2)
+
+ # Process all gpos, with temp output directory
+ with TemporaryDirectory() as dname:
+ ext.process_group_policy([], gpos, dname)
+ motd_file = os.path.join(dname, 'motd')
+ self.assertTrue(os.path.exists(motd_file),
+ 'Message of the day file not created')
+ data = open(motd_file, 'r').read()
+ self.assertEqual(data, e3.data, 'Message of the day not applied')
+ issue_file = os.path.join(dname, 'issue')
+ self.assertTrue(os.path.exists(issue_file),
+ 'Login Prompt Message file not created')
+ data = open(issue_file, 'r').read()
+ self.assertEqual(data, e2.data, 'Login Prompt Message not applied')
+
+ # Force apply with removal of second GPO
+ gp_db = store.get_gplog(machine_creds.get_username())
+ del_gpos = gp_db.get_applied_settings([guids[1]])
+ gpos = [gpo for gpo in gpos if gpo.name != guids[1]]
+ ext.process_group_policy(del_gpos, gpos, dname)
+
+ self.assertTrue(os.path.exists(motd_file),
+ 'Message of the day file not created')
+ data = open(motd_file, 'r').read()
+ self.assertEqual(data, e1.data, 'Message of the day not applied')
+ issue_file = os.path.join(dname, 'issue')
+ self.assertTrue(os.path.exists(issue_file),
+ 'Login Prompt Message file not created')
+ data = open(issue_file, 'r').read()
+ self.assertEqual(data, e2.data, 'Login Prompt Message not applied')
+
+ # Check that a call to gpupdate --rsop also succeeds
+ ret = rsop(self.lp)
+ self.assertEqual(ret, 0, 'gpupdate --rsop failed!')
+
+ # Unapply policy, and ensure the test files are removed
+ del_gpos = get_deleted_gpos_list(gp_db, [])
+ ext.process_group_policy(del_gpos, [], dname)
+ data = open(motd_file, 'r').read()
+ self.assertFalse(data, 'Message of the day file not removed')
+ data = open(issue_file, 'r').read()
+ self.assertFalse(data, 'Login Prompt Message file not removed')
+
+ # Unstage the Registry.pol file
+ unstage_file(reg_pol)
+
+ def test_vgp_symlink(self):
+ local_path = self.lp.cache_path('gpo_cache')
+ guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
+ manifest = os.path.join(local_path, policies, guid, 'MACHINE',
+ 'VGP/VTLA/UNIX/SYMLINK/MANIFEST.XML')
+ cache_dir = self.lp.get('cache directory')
+ store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
+
+ machine_creds = Credentials()
+ machine_creds.guess(self.lp)
+ machine_creds.set_machine_account()
+
+ # Initialize the group policy extension
+ ext = vgp_symlink_ext(self.lp, machine_creds,
+ machine_creds.get_username(), store)
+
+ gpos = get_gpo_list(self.server, machine_creds, self.lp,
+ machine_creds.get_username())
+
+ with TemporaryDirectory() as dname:
+ test_source = os.path.join(dname, 'test.source')
+ test_target = os.path.join(dname, 'test.target')
+
+ # Stage the manifest.xml file with test data
+ stage = etree.Element('vgppolicy')
+ policysetting = etree.Element('policysetting')
+ stage.append(policysetting)
+ version = etree.Element('version')
+ version.text = '1'
+ policysetting.append(version)
+ data = etree.Element('data')
+ file_properties = etree.Element('file_properties')
+ source = etree.Element('source')
+ source.text = test_source
+ file_properties.append(source)
+ target = etree.Element('target')
+ target.text = test_target
+ file_properties.append(target)
+ data.append(file_properties)
+ policysetting.append(data)
+ ret = stage_file(manifest, etree.tostring(stage))
+ self.assertTrue(ret, 'Could not create the target %s' % manifest)
+
+ # Create test source
+ test_source_data = 'hello world!'
+ with open(test_source, 'w') as w:
+ w.write(test_source_data)
+
+ # Process all gpos, with temp output directory
+ ext.process_group_policy([], gpos)
+ self.assertTrue(os.path.exists(test_target),
+ 'The test symlink was not created')
+ self.assertTrue(os.path.islink(test_target),
+ 'The test file is not a symlink')
+ self.assertIn(test_source_data, open(test_target, 'r').read(),
+ 'Reading from symlink does not produce source data')
+
+ # Unapply the policy, ensure removal
+ gp_db = store.get_gplog(machine_creds.get_username())
+ del_gpos = get_deleted_gpos_list(gp_db, [])
+ ext.process_group_policy(del_gpos, [])
+ self.assertFalse(os.path.exists(test_target),
+ 'The test symlink was not delete')
+
+ # Verify RSOP
+ ret = ext.rsop([g for g in gpos if g.name == guid][0])
+ self.assertIn('ln -s %s %s' % (test_source, test_target),
+ list(ret.values())[0])
+
+ # Check that a call to gpupdate --rsop also succeeds
+ ret = rsop(self.lp)
+ self.assertEqual(ret, 0, 'gpupdate --rsop failed!')
+
+ # Unstage the manifest.xml file
+ unstage_file(manifest)
+
+ def test_vgp_files(self):
+ local_path = self.lp.cache_path('gpo_cache')
+ guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
+ manifest = os.path.join(local_path, policies, guid, 'MACHINE',
+ 'VGP/VTLA/UNIX/FILES/MANIFEST.XML')
+ source_file = os.path.join(os.path.dirname(manifest), 'TEST.SOURCE')
+ source_data = '#!/bin/sh\necho hello world'
+ ret = stage_file(source_file, source_data)
+ self.assertTrue(ret, 'Could not create the target %s' % source_file)
+ cache_dir = self.lp.get('cache directory')
+ store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
+
+ machine_creds = Credentials()
+ machine_creds.guess(self.lp)
+ machine_creds.set_machine_account()
+
+ # Initialize the group policy extension
+ ext = vgp_files_ext(self.lp, machine_creds,
+ machine_creds.get_username(), store)
+
+ gpos = get_gpo_list(self.server, machine_creds, self.lp,
+ machine_creds.get_username())
+
+ # Stage the manifest.xml file with test data
+ with TemporaryDirectory() as dname:
+ stage = etree.Element('vgppolicy')
+ policysetting = etree.Element('policysetting')
+ stage.append(policysetting)
+ version = etree.Element('version')
+ version.text = '1'
+ policysetting.append(version)
+ data = etree.Element('data')
+ file_properties = etree.SubElement(data, 'file_properties')
+ source = etree.SubElement(file_properties, 'source')
+ source.text = os.path.basename(source_file).lower()
+ target = etree.SubElement(file_properties, 'target')
+ target.text = os.path.join(dname, 'test.target')
+ user = etree.SubElement(file_properties, 'user')
+ user.text = pwd.getpwuid(os.getuid()).pw_name
+ group = etree.SubElement(file_properties, 'group')
+ group.text = grp.getgrgid(os.getgid()).gr_name
+ # Request permissions of 755
+ permissions = etree.SubElement(file_properties, 'permissions')
+ permissions.set('type', 'user')
+ etree.SubElement(permissions, 'read')
+ etree.SubElement(permissions, 'write')
+ etree.SubElement(permissions, 'execute')
+ permissions = etree.SubElement(file_properties, 'permissions')
+ permissions.set('type', 'group')
+ etree.SubElement(permissions, 'read')
+ etree.SubElement(permissions, 'execute')
+ permissions = etree.SubElement(file_properties, 'permissions')
+ permissions.set('type', 'other')
+ etree.SubElement(permissions, 'read')
+ etree.SubElement(permissions, 'execute')
+ policysetting.append(data)
+ ret = stage_file(manifest, etree.tostring(stage))
+ self.assertTrue(ret, 'Could not create the target %s' % manifest)
+
+ # Process all gpos, with temp output directory
+ ext.process_group_policy([], gpos)
+ self.assertTrue(os.path.exists(target.text),
+ 'The target file does not exist')
+ self.assertEqual(os.stat(target.text).st_mode & 0o777, 0o755,
+ 'The target file permissions are incorrect')
+ self.assertEqual(open(target.text).read(), source_data,
+ 'The target file contents are incorrect')
+
+ # Remove policy
+ gp_db = store.get_gplog(machine_creds.get_username())
+ del_gpos = get_deleted_gpos_list(gp_db, [])
+ ext.process_group_policy(del_gpos, [])
+ self.assertFalse(os.path.exists(target.text),
+ 'The target file was not removed')
+
+ # Test rsop
+ g = [g for g in gpos if g.name == guid][0]
+ ret = ext.rsop(g)
+ self.assertIn(target.text, list(ret.values())[0][0],
+ 'The target file was not listed by rsop')
+ self.assertIn('-rwxr-xr-x', list(ret.values())[0][0],
+ 'The target permissions were not listed by rsop')
+
+ # Check that a call to gpupdate --rsop also succeeds
+ ret = rsop(self.lp)
+ self.assertEqual(ret, 0, 'gpupdate --rsop failed!')
+
+ # Unstage the manifest and source files
+ unstage_file(manifest)
+ unstage_file(source_file)
+
+ def test_vgp_openssh(self):
+ local_path = self.lp.cache_path('gpo_cache')
+ guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
+ manifest = os.path.join(local_path, policies, guid, 'MACHINE',
+ 'VGP/VTLA/SSHCFG/SSHD/MANIFEST.XML')
+ cache_dir = self.lp.get('cache directory')
+ store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
+
+ machine_creds = Credentials()
+ machine_creds.guess(self.lp)
+ machine_creds.set_machine_account()
+
+ # Initialize the group policy extension
+ ext = vgp_openssh_ext(self.lp, machine_creds,
+ machine_creds.get_username(), store)
+
+ gpos = get_gpo_list(self.server, machine_creds, self.lp,
+ machine_creds.get_username())
+
+ # Stage the manifest.xml file with test data
+ stage = etree.Element('vgppolicy')
+ policysetting = etree.Element('policysetting')
+ stage.append(policysetting)
+ version = etree.Element('version')
+ version.text = '1'
+ policysetting.append(version)
+ data = etree.Element('data')
+ configfile = etree.Element('configfile')
+ configsection = etree.Element('configsection')
+ sectionname = etree.Element('sectionname')
+ configsection.append(sectionname)
+ kvpair = etree.Element('keyvaluepair')
+ key = etree.Element('key')
+ key.text = 'AddressFamily'
+ kvpair.append(key)
+ value = etree.Element('value')
+ value.text = 'inet6'
+ kvpair.append(value)
+ configsection.append(kvpair)
+ configfile.append(configsection)
+ data.append(configfile)
+ policysetting.append(data)
+ ret = stage_file(manifest, etree.tostring(stage))
+ self.assertTrue(ret, 'Could not create the target %s' % manifest)
+
+ # Process all gpos, with temp output directory
+ data = 'AddressFamily inet6'
+ with TemporaryDirectory() as dname:
+ ext.process_group_policy([], gpos, dname)
+ conf = os.listdir(dname)
+ self.assertEqual(len(conf), 1, 'The conf file was not created')
+ gp_cfg = os.path.join(dname, conf[0])
+ self.assertIn(data, open(gp_cfg, 'r').read(),
+ 'The sshd_config entry was not applied')
+
+ # Check that a call to gpupdate --rsop also succeeds
+ ret = rsop(self.lp)
+ self.assertEqual(ret, 0, 'gpupdate --rsop failed!')
+
+ # Remove policy
+ gp_db = store.get_gplog(machine_creds.get_username())
+ del_gpos = get_deleted_gpos_list(gp_db, [])
+ ext.process_group_policy(del_gpos, [], dname)
+ self.assertFalse(os.path.exists(gp_cfg),
+ 'Unapply failed to cleanup config')
+
+ # Unstage the Registry.pol file
+ unstage_file(manifest)
+
+ def test_vgp_startup_scripts(self):
+ local_path = self.lp.cache_path('gpo_cache')
+ guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
+ manifest = os.path.join(local_path, policies, guid, 'MACHINE',
+ 'VGP/VTLA/UNIX/SCRIPTS/STARTUP/MANIFEST.XML')
+ test_script = os.path.join(os.path.dirname(manifest), 'TEST.SH')
+ test_data = '#!/bin/sh\necho $@ hello world'
+ ret = stage_file(test_script, test_data)
+ self.assertTrue(ret, 'Could not create the target %s' % test_script)
+ cache_dir = self.lp.get('cache directory')
+ store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
+
+ machine_creds = Credentials()
+ machine_creds.guess(self.lp)
+ machine_creds.set_machine_account()
+
+ # Initialize the group policy extension
+ ext = vgp_startup_scripts_ext(self.lp, machine_creds,
+ machine_creds.get_username(), store)
+
+ gpos = get_gpo_list(self.server, machine_creds, self.lp,
+ machine_creds.get_username())
+
+ # Stage the manifest.xml file with test data
+ stage = etree.Element('vgppolicy')
+ policysetting = etree.SubElement(stage, 'policysetting')
+ version = etree.SubElement(policysetting, 'version')
+ version.text = '1'
+ data = etree.SubElement(policysetting, 'data')
+ listelement = etree.SubElement(data, 'listelement')
+ script = etree.SubElement(listelement, 'script')
+ script.text = os.path.basename(test_script).lower()
+ parameters = etree.SubElement(listelement, 'parameters')
+ parameters.text = '-n'
+ hash = etree.SubElement(listelement, 'hash')
+ hash.text = \
+ hashlib.md5(open(test_script, 'rb').read()).hexdigest().upper()
+ run_as = etree.SubElement(listelement, 'run_as')
+ run_as.text = 'root'
+ ret = stage_file(manifest, etree.tostring(stage))
+ self.assertTrue(ret, 'Could not create the target %s' % manifest)
+
+ # Process all gpos, with temp output directory
+ with TemporaryDirectory() as dname:
+ ext.process_group_policy([], gpos, dname)
+ files = os.listdir(dname)
+ self.assertEqual(len(files), 1,
+ 'The target script was not created')
+ entry = '@reboot %s %s %s' % (run_as.text, test_script,
+ parameters.text)
+ self.assertIn(entry,
+ open(os.path.join(dname, files[0]), 'r').read(),
+ 'The test entry was not found')
+
+ # Remove policy
+ gp_db = store.get_gplog(machine_creds.get_username())
+ del_gpos = get_deleted_gpos_list(gp_db, [])
+ ext.process_group_policy(del_gpos, [])
+ files = os.listdir(dname)
+ self.assertEqual(len(files), 0,
+ 'The target script was not removed')
+
+ # Test rsop
+ g = [g for g in gpos if g.name == guid][0]
+ ret = ext.rsop(g)
+ self.assertIn(entry, list(ret.values())[0][0],
+ 'The target entry was not listed by rsop')
+
+ # Check that a call to gpupdate --rsop also succeeds
+ ret = rsop(self.lp)
+ self.assertEqual(ret, 0, 'gpupdate --rsop failed!')
+
+ # Unstage the manifest.xml and script files
+ unstage_file(manifest)
+
+ # Stage the manifest.xml file for run once scripts
+ etree.SubElement(listelement, 'run_once')
+ run_as.text = pwd.getpwuid(os.getuid()).pw_name
+ ret = stage_file(manifest, etree.tostring(stage))
+ self.assertTrue(ret, 'Could not create the target %s' % manifest)
+
+ # Process all gpos, with temp output directory
+ # A run once script will be executed immediately,
+ # instead of creating a cron job
+ with TemporaryDirectory() as dname:
+ test_file = '%s/TESTING.txt' % dname
+ test_data = '#!/bin/sh\ntouch %s' % test_file
+ ret = stage_file(test_script, test_data)
+ self.assertTrue(ret, 'Could not create the target %s' % test_script)
+
+ ext.process_group_policy([], gpos, dname)
+ files = os.listdir(dname)
+ self.assertEqual(len(files), 1,
+ 'The test file was not created')
+ self.assertEqual(files[0], os.path.basename(test_file),
+ 'The test file was not created')
+
+ # Unlink the test file and ensure that processing
+ # policy again does not recreate it.
+ os.unlink(test_file)
+ ext.process_group_policy([], gpos, dname)
+ files = os.listdir(dname)
+ self.assertEqual(len(files), 0,
+ 'The test file should not have been created')
+
+ # Remove policy
+ gp_db = store.get_gplog(machine_creds.get_username())
+ del_gpos = get_deleted_gpos_list(gp_db, [])
+ ext.process_group_policy(del_gpos, [])
+
+ # Test rsop
+ entry = 'Run once as: %s `%s %s`' % (run_as.text, test_script,
+ parameters.text)
+ g = [g for g in gpos if g.name == guid][0]
+ ret = ext.rsop(g)
+ self.assertIn(entry, list(ret.values())[0][0],
+ 'The target entry was not listed by rsop')
+
+ # Check that a call to gpupdate --rsop also succeeds
+ ret = rsop(self.lp)
+ self.assertEqual(ret, 0, 'gpupdate --rsop failed!')
+
+ # Unstage the manifest.xml and script files
+ unstage_file(manifest)
+
+ # Stage the manifest.xml file for a script without parameters
+ stage = etree.Element('vgppolicy')
+ policysetting = etree.SubElement(stage, 'policysetting')
+ version = etree.SubElement(policysetting, 'version')
+ version.text = '1'
+ data = etree.SubElement(policysetting, 'data')
+ listelement = etree.SubElement(data, 'listelement')
+ script = etree.SubElement(listelement, 'script')
+ script.text = os.path.basename(test_script).lower()
+ hash = etree.SubElement(listelement, 'hash')
+ hash.text = \
+ hashlib.md5(open(test_script, 'rb').read()).hexdigest().upper()
+ run_as = etree.SubElement(listelement, 'run_as')
+ run_as.text = 'root'
+ ret = stage_file(manifest, etree.tostring(stage))
+ self.assertTrue(ret, 'Could not create the target %s' % manifest)
+
+ # Process all gpos, with temp output directory
+ with TemporaryDirectory() as dname:
+ try:
+ ext.process_group_policy([], gpos, dname)
+ except Exception as e:
+ self.fail(str(e))
+ files = os.listdir(dname)
+ self.assertEqual(len(files), 1,
+ 'The target script was not created')
+ entry = '@reboot %s %s' % (run_as.text, test_script)
+ self.assertIn(entry,
+ open(os.path.join(dname, files[0]), 'r').read(),
+ 'The test entry was not found')
+
+ # Remove policy
+ gp_db = store.get_gplog(machine_creds.get_username())
+ del_gpos = get_deleted_gpos_list(gp_db, [])
+ ext.process_group_policy(del_gpos, [])
+ files = os.listdir(dname)
+ self.assertEqual(len(files), 0,
+ 'The target script was not removed')
+
+ # Test rsop
+ g = [g for g in gpos if g.name == guid][0]
+ ret = ext.rsop(g)
+ self.assertIn(entry, list(ret.values())[0][0],
+ 'The target entry was not listed by rsop')
+
+ # Check that a call to gpupdate --rsop also succeeds
+ ret = rsop(self.lp)
+ self.assertEqual(ret, 0, 'gpupdate --rsop failed!')
+
+ # Unstage the manifest.xml and script files
+ unstage_file(manifest)
+ unstage_file(test_script)
+
+ def test_vgp_motd(self):
+ local_path = self.lp.cache_path('gpo_cache')
+ guids = ['{31B2F340-016D-11D2-945F-00C04FB984F9}',
+ '{6AC1786C-016F-11D2-945F-00C04FB984F9}']
+ manifest = os.path.join(local_path, policies, guids[0], 'MACHINE',
+ 'VGP/VTLA/UNIX/MOTD/MANIFEST.XML')
+ manifest2 = os.path.join(local_path, policies, guids[1], 'MACHINE',
+ 'VGP/VTLA/UNIX/MOTD/MANIFEST.XML')
+ cache_dir = self.lp.get('cache directory')
+ store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
+
+ machine_creds = Credentials()
+ machine_creds.guess(self.lp)
+ machine_creds.set_machine_account()
+
+ # Initialize the group policy extension
+ ext = vgp_motd_ext(self.lp, machine_creds,
+ machine_creds.get_username(), store)
+
+ gpos = get_gpo_list(self.server, machine_creds, self.lp,
+ machine_creds.get_username())
+
+ # Stage the manifest.xml file with test data
+ stage = etree.Element('vgppolicy')
+ policysetting = etree.SubElement(stage, 'policysetting')
+ version = etree.SubElement(policysetting, 'version')
+ version.text = '1'
+ data = etree.SubElement(policysetting, 'data')
+ filename = etree.SubElement(data, 'filename')
+ filename.text = 'motd'
+ text = etree.SubElement(data, 'text')
+ text.text = 'This is the message of the day'
+ ret = stage_file(manifest, etree.tostring(stage))
+ self.assertTrue(ret, 'Could not create the target %s' % manifest)
+
+ # Stage the other manifest.xml
+ stage = etree.Element('vgppolicy')
+ policysetting = etree.SubElement(stage, 'policysetting')
+ version = etree.SubElement(policysetting, 'version')
+ version.text = '1'
+ data = etree.SubElement(policysetting, 'data')
+ filename = etree.SubElement(data, 'filename')
+ filename.text = 'motd'
+ text2 = etree.SubElement(data, 'text')
+ text2.text = 'This should overwrite the first policy'
+ ret = stage_file(manifest2, etree.tostring(stage))
+ self.assertTrue(ret, 'Could not create the target %s' % manifest2)
+
+ # Process all gpos, with temp output directory
+ with NamedTemporaryFile() as f:
+ ext.process_group_policy([], gpos, f.name)
+ self.assertTrue(os.path.exists(f.name),
+ 'Message of the day file not created')
+ data = open(f.name, 'r').read()
+ self.assertEqual(data, text2.text, 'Message of the day not applied')
+
+ # Force apply with removal of second GPO
+ gp_db = store.get_gplog(machine_creds.get_username())
+ del_gpos = gp_db.get_applied_settings([guids[1]])
+ gpos = [gpo for gpo in gpos if gpo.name != guids[1]]
+ ext.process_group_policy(del_gpos, gpos, f.name)
+
+ self.assertEqual(open(f.name, 'r').read(), text.text,
+ 'The motd was not applied')
+
+ # Check that a call to gpupdate --rsop also succeeds
+ ret = rsop(self.lp)
+ self.assertEqual(ret, 0, 'gpupdate --rsop failed!')
+
+ # Remove policy
+ del_gpos = get_deleted_gpos_list(gp_db, [])
+ ext.process_group_policy(del_gpos, [], f.name)
+ self.assertNotEqual(open(f.name, 'r').read(), text.text,
+ 'The motd was not unapplied')
+
+ # Unstage the manifest files
+ unstage_file(manifest)
+ unstage_file(manifest2)
+
+ def test_vgp_issue(self):
+ local_path = self.lp.cache_path('gpo_cache')
+ guids = ['{31B2F340-016D-11D2-945F-00C04FB984F9}',
+ '{6AC1786C-016F-11D2-945F-00C04FB984F9}']
+ manifest = os.path.join(local_path, policies, guids[0], 'MACHINE',
+ 'VGP/VTLA/UNIX/ISSUE/MANIFEST.XML')
+ manifest2 = os.path.join(local_path, policies, guids[1], 'MACHINE',
+ 'VGP/VTLA/UNIX/ISSUE/MANIFEST.XML')
+ cache_dir = self.lp.get('cache directory')
+ store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
+
+ machine_creds = Credentials()
+ machine_creds.guess(self.lp)
+ machine_creds.set_machine_account()
+
+ # Initialize the group policy extension
+ ext = vgp_issue_ext(self.lp, machine_creds,
+ machine_creds.get_username(), store)
+
+ gpos = get_gpo_list(self.server, machine_creds, self.lp,
+ machine_creds.get_username())
+
+ # Stage the manifest.xml file with test data
+ stage = etree.Element('vgppolicy')
+ policysetting = etree.SubElement(stage, 'policysetting')
+ version = etree.SubElement(policysetting, 'version')
+ version.text = '1'
+ data = etree.SubElement(policysetting, 'data')
+ filename = etree.SubElement(data, 'filename')
+ filename.text = 'issue'
+ text = etree.SubElement(data, 'text')
+ text.text = 'Welcome to Samba!'
+ ret = stage_file(manifest, etree.tostring(stage))
+ self.assertTrue(ret, 'Could not create the target %s' % manifest)
+
+ # Stage the other manifest.xml
+ stage = etree.Element('vgppolicy')
+ policysetting = etree.SubElement(stage, 'policysetting')
+ version = etree.SubElement(policysetting, 'version')
+ version.text = '1'
+ data = etree.SubElement(policysetting, 'data')
+ filename = etree.SubElement(data, 'filename')
+ filename.text = 'issue'
+ text2 = etree.SubElement(data, 'text')
+ text2.text = 'This test message overwrites the first'
+ ret = stage_file(manifest2, etree.tostring(stage))
+ self.assertTrue(ret, 'Could not create the target %s' % manifest2)
+
+ # Process all gpos, with temp output directory
+ with NamedTemporaryFile() as f:
+ ext.process_group_policy([], gpos, f.name)
+ self.assertEqual(open(f.name, 'r').read(), text2.text,
+ 'The issue was not applied')
+
+ # Force apply with removal of second GPO
+ gp_db = store.get_gplog(machine_creds.get_username())
+ del_gpos = gp_db.get_applied_settings([guids[1]])
+ gpos = [gpo for gpo in gpos if gpo.name != guids[1]]
+ ext.process_group_policy(del_gpos, gpos, f.name)
+
+ self.assertEqual(open(f.name, 'r').read(), text.text,
+ 'The issue was not applied')
+
+ # Check that a call to gpupdate --rsop also succeeds
+ ret = rsop(self.lp)
+ self.assertEqual(ret, 0, 'gpupdate --rsop failed!')
+
+ # Remove policy
+ del_gpos = get_deleted_gpos_list(gp_db, [])
+ ext.process_group_policy(del_gpos, [], f.name)
+ self.assertNotEqual(open(f.name, 'r').read(), text.text,
+ 'The issue was not unapplied')
+
+ # Unstage the manifest.xml file
+ unstage_file(manifest)
+
+ def test_vgp_access(self):
+ local_path = self.lp.cache_path('gpo_cache')
+ guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
+ allow = os.path.join(local_path, policies, guid, 'MACHINE',
+ 'VGP/VTLA/VAS/HOSTACCESSCONTROL/ALLOW/MANIFEST.XML')
+ deny = os.path.join(local_path, policies, guid, 'MACHINE',
+ 'VGP/VTLA/VAS/HOSTACCESSCONTROL/DENY/MANIFEST.XML')
+ cache_dir = self.lp.get('cache directory')
+ store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
+
+ machine_creds = Credentials()
+ machine_creds.guess(self.lp)
+ machine_creds.set_machine_account()
+
+ # Initialize the group policy extension
+ winbind_sep = self.lp.get('winbind separator')
+ self.addCleanup(self.lp.set, 'winbind separator', winbind_sep)
+ self.lp.set('winbind separator', '+')
+ ext = vgp_access_ext(self.lp, machine_creds,
+ machine_creds.get_username(), store)
+
+ gpos = get_gpo_list(self.server, machine_creds, self.lp,
+ machine_creds.get_username())
+
+ # Stage the manifest.xml allow file
+ stage = etree.Element('vgppolicy')
+ policysetting = etree.SubElement(stage, 'policysetting')
+ version = etree.SubElement(policysetting, 'version')
+ version.text = '2'
+ apply_mode = etree.SubElement(policysetting, 'apply_mode')
+ apply_mode.text = 'merge'
+ data = etree.SubElement(policysetting, 'data')
+ # Add an allowed user
+ listelement = etree.SubElement(data, 'listelement')
+ otype = etree.SubElement(listelement, 'type')
+ otype.text = 'USER'
+ entry = etree.SubElement(listelement, 'entry')
+ entry.text = 'goodguy@%s' % realm
+ adobject = etree.SubElement(listelement, 'adobject')
+ name = etree.SubElement(adobject, 'name')
+ name.text = 'goodguy'
+ domain = etree.SubElement(adobject, 'domain')
+ domain.text = realm
+ otype = etree.SubElement(adobject, 'type')
+ otype.text = 'user'
+ # Add an allowed group
+ groupattr = etree.SubElement(data, 'groupattr')
+ groupattr.text = 'samAccountName'
+ listelement = etree.SubElement(data, 'listelement')
+ otype = etree.SubElement(listelement, 'type')
+ otype.text = 'GROUP'
+ entry = etree.SubElement(listelement, 'entry')
+ entry.text = '%s\\goodguys' % realm
+ dn = etree.SubElement(listelement, 'dn')
+ dn.text = 'CN=goodguys,CN=Users,%s' % base_dn
+ adobject = etree.SubElement(listelement, 'adobject')
+ name = etree.SubElement(adobject, 'name')
+ name.text = 'goodguys'
+ domain = etree.SubElement(adobject, 'domain')
+ domain.text = realm
+ otype = etree.SubElement(adobject, 'type')
+ otype.text = 'group'
+ ret = stage_file(allow, etree.tostring(stage))
+ self.assertTrue(ret, 'Could not create the target %s' % allow)
+
+ # Stage the manifest.xml deny file
+ stage = etree.Element('vgppolicy')
+ policysetting = etree.SubElement(stage, 'policysetting')
+ version = etree.SubElement(policysetting, 'version')
+ version.text = '2'
+ apply_mode = etree.SubElement(policysetting, 'apply_mode')
+ apply_mode.text = 'merge'
+ data = etree.SubElement(policysetting, 'data')
+ # Add a denied user
+ listelement = etree.SubElement(data, 'listelement')
+ otype = etree.SubElement(listelement, 'type')
+ otype.text = 'USER'
+ entry = etree.SubElement(listelement, 'entry')
+ entry.text = 'badguy@%s' % realm
+ adobject = etree.SubElement(listelement, 'adobject')
+ name = etree.SubElement(adobject, 'name')
+ name.text = 'badguy'
+ domain = etree.SubElement(adobject, 'domain')
+ domain.text = realm
+ otype = etree.SubElement(adobject, 'type')
+ otype.text = 'user'
+ # Add a denied group
+ groupattr = etree.SubElement(data, 'groupattr')
+ groupattr.text = 'samAccountName'
+ listelement = etree.SubElement(data, 'listelement')
+ otype = etree.SubElement(listelement, 'type')
+ otype.text = 'GROUP'
+ entry = etree.SubElement(listelement, 'entry')
+ entry.text = '%s\\badguys' % realm
+ dn = etree.SubElement(listelement, 'dn')
+ dn.text = 'CN=badguys,CN=Users,%s' % base_dn
+ adobject = etree.SubElement(listelement, 'adobject')
+ name = etree.SubElement(adobject, 'name')
+ name.text = 'badguys'
+ domain = etree.SubElement(adobject, 'domain')
+ domain.text = realm
+ otype = etree.SubElement(adobject, 'type')
+ otype.text = 'group'
+ ret = stage_file(deny, etree.tostring(stage))
+ self.assertTrue(ret, 'Could not create the target %s' % deny)
+
+ # Process all gpos, with temp output directory
+ with TemporaryDirectory() as dname:
+ ext.process_group_policy([], gpos, dname)
+ conf = os.listdir(dname)
+ # There will be 2 files, the policy file and the deny file
+ self.assertEqual(len(conf), 2, 'The conf file was not created')
+ # Ignore the DENY_ALL conf file
+ gp_cfg = os.path.join(dname,
+ [c for c in conf if '_gp_DENY_ALL.conf' not in c][0])
+
+ # Check the access config for the correct access.conf entries
+ print('Config file %s found' % gp_cfg)
+ data = open(gp_cfg, 'r').read()
+ self.assertIn('+:%s+goodguy:ALL' % realm, data)
+ self.assertIn('+:%s+goodguys:ALL' % realm, data)
+ self.assertIn('-:%s+badguy:ALL' % realm, data)
+ self.assertIn('-:%s+badguys:ALL' % realm, data)
+
+ # Check that a call to gpupdate --rsop also succeeds
+ ret = rsop(self.lp)
+ self.assertEqual(ret, 0, 'gpupdate --rsop failed!')
+
+ # Remove policy
+ gp_db = store.get_gplog(machine_creds.get_username())
+ del_gpos = get_deleted_gpos_list(gp_db, [])
+ ext.process_group_policy(del_gpos, [], dname)
+ self.assertFalse(os.path.exists(gp_cfg),
+ 'Unapply failed to cleanup config')
+
+ # Unstage the manifest.pol files
+ unstage_file(allow)
+ unstage_file(deny)
+
+ def test_gnome_settings(self):
+ local_path = self.lp.cache_path('gpo_cache')
+ guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
+ reg_pol = os.path.join(local_path, policies, guid,
+ 'MACHINE/REGISTRY.POL')
+ cache_dir = self.lp.get('cache directory')
+ store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
+
+ machine_creds = Credentials()
+ machine_creds.guess(self.lp)
+ machine_creds.set_machine_account()
+
+ # Initialize the group policy extension
+ ext = gp_gnome_settings_ext(self.lp, machine_creds,
+ machine_creds.get_username(), store)
+
+ gpos = get_gpo_list(self.server, machine_creds, self.lp,
+ machine_creds.get_username())
+
+ # Stage the Registry.pol file with test data
+ parser = GPPolParser()
+ parser.load_xml(etree.fromstring(gnome_test_reg_pol.strip()))
+ ret = stage_file(reg_pol, ndr_pack(parser.pol_file))
+ self.assertTrue(ret, 'Could not create the target %s' % reg_pol)
+
+ with TemporaryDirectory() as dname:
+ ext.process_group_policy([], gpos, dname)
+
+ local_db = os.path.join(dname, 'etc/dconf/db/local.d')
+ self.assertTrue(os.path.isdir(local_db),
+ 'Local db dir not created')
+ def db_check(name, data, count=1):
+ db = glob(os.path.join(local_db, '*-%s' % name))
+ self.assertEqual(len(db), count, '%s not created' % name)
+ file_contents = ConfigParser()
+ file_contents.read(db)
+ for key in data.keys():
+ self.assertTrue(file_contents.has_section(key),
+ 'Section %s not found' % key)
+ options = data[key]
+ for k, v in options.items():
+ v_content = file_contents.get(key, k)
+ self.assertEqual(v_content, v,
+ '%s: %s != %s' % (key, v_content, v))
+
+ def del_db_check(name):
+ db = glob(os.path.join(local_db, '*-%s' % name))
+ self.assertEqual(len(db), 0, '%s not deleted' % name)
+
+ locks = os.path.join(local_db, 'locks')
+ self.assertTrue(os.path.isdir(local_db), 'Locks dir not created')
+ def lock_check(name, items, count=1):
+ lock = glob(os.path.join(locks, '*%s' % name))
+ self.assertEqual(len(lock), count,
+ '%s lock not created' % name)
+ file_contents = []
+ for i in range(count):
+ file_contents.extend(open(lock[i], 'r').read().split('\n'))
+ for data in items:
+ self.assertIn(data, file_contents,
+ '%s lock not created' % data)
+
+ def del_lock_check(name):
+ lock = glob(os.path.join(locks, '*%s' % name))
+ self.assertEqual(len(lock), 0, '%s lock not deleted' % name)
+
+ # Check the user profile
+ user_profile = os.path.join(dname, 'etc/dconf/profile/user')
+ self.assertTrue(os.path.exists(user_profile),
+ 'User profile not created')
+
+ # Enable the compose key
+ data = { 'org/gnome/desktop/input-sources':
+ { 'xkb-options': '[\'compose:ralt\']' }
+ }
+ db_check('input-sources', data)
+ items = ['/org/gnome/desktop/input-sources/xkb-options']
+ lock_check('input-sources', items)
+
+ # Dim screen when user is idle
+ data = { 'org/gnome/settings-daemon/plugins/power':
+ { 'idle-dim': 'true',
+ 'idle-brightness': '30'
+ }
+ }
+ db_check('power', data)
+ data = { 'org/gnome/desktop/session':
+ { 'idle-delay': 'uint32 300' }
+ }
+ db_check('session', data)
+ items = ['/org/gnome/settings-daemon/plugins/power/idle-dim',
+ '/org/gnome/settings-daemon/plugins/power/idle-brightness',
+ '/org/gnome/desktop/session/idle-delay']
+ lock_check('power-saving', items)
+
+ # Lock down specific settings
+ bg_locks = ['/org/gnome/desktop/background/picture-uri',
+ '/org/gnome/desktop/background/picture-options',
+ '/org/gnome/desktop/background/primary-color',
+ '/org/gnome/desktop/background/secondary-color']
+ lock_check('group-policy', bg_locks)
+
+ # Lock down enabled extensions
+ data = { 'org/gnome/shell':
+ { 'enabled-extensions':
+ '[\'myextension1@myname.example.com\', \'myextension2@myname.example.com\']',
+ 'development-tools': 'false' }
+ }
+ db_check('extensions', data)
+ items = [ '/org/gnome/shell/enabled-extensions',
+ '/org/gnome/shell/development-tools' ]
+ lock_check('extensions', items)
+
+ # Disallow login using a fingerprint
+ data = { 'org/gnome/login-screen':
+ { 'enable-fingerprint-authentication': 'false' }
+ }
+ db_check('fingerprintreader', data)
+ items = ['/org/gnome/login-screen/enable-fingerprint-authentication']
+ lock_check('fingerprintreader', items)
+
+ # Disable user logout and user switching
+ data = { 'org/gnome/desktop/lockdown':
+ { 'disable-log-out': 'true',
+ 'disable-user-switching': 'true' }
+ }
+ db_check('logout', data, 2)
+ items = ['/org/gnome/desktop/lockdown/disable-log-out',
+ '/org/gnome/desktop/lockdown/disable-user-switching']
+ lock_check('logout', items, 2)
+
+ # Disable repartitioning
+ actions = os.path.join(dname, 'etc/share/polkit-1/actions')
+ udisk2 = glob(os.path.join(actions,
+ 'org.freedesktop.[u|U][d|D]isks2.policy'))
+ self.assertEqual(len(udisk2), 1, 'udisk2 policy not created')
+ udisk2_tree = etree.fromstring(open(udisk2[0], 'r').read())
+ actions = udisk2_tree.findall('action')
+ md = 'org.freedesktop.udisks2.modify-device'
+ action = [a for a in actions if a.attrib['id'] == md]
+ self.assertEqual(len(action), 1, 'modify-device not found')
+ defaults = action[0].find('defaults')
+ self.assertTrue(defaults is not None,
+ 'modify-device defaults not found')
+ allow_any = defaults.find('allow_any').text
+ self.assertEqual(allow_any, 'no',
+ 'modify-device allow_any not set to no')
+ allow_inactive = defaults.find('allow_inactive').text
+ self.assertEqual(allow_inactive, 'no',
+ 'modify-device allow_inactive not set to no')
+ allow_active = defaults.find('allow_active').text
+ self.assertEqual(allow_active, 'yes',
+ 'modify-device allow_active not set to yes')
+
+ # Disable printing
+ data = { 'org/gnome/desktop/lockdown':
+ { 'disable-printing': 'true' }
+ }
+ db_check('printing', data)
+ items = ['/org/gnome/desktop/lockdown/disable-printing']
+ lock_check('printing', items)
+
+ # Disable file saving
+ data = { 'org/gnome/desktop/lockdown':
+ { 'disable-save-to-disk': 'true' }
+ }
+ db_check('filesaving', data)
+ items = ['/org/gnome/desktop/lockdown/disable-save-to-disk']
+ lock_check('filesaving', items)
+
+ # Disable command-line access
+ data = { 'org/gnome/desktop/lockdown':
+ { 'disable-command-line': 'true' }
+ }
+ db_check('cmdline', data)
+ items = ['/org/gnome/desktop/lockdown/disable-command-line']
+ lock_check('cmdline', items)
+
+ # Allow or disallow online accounts
+ data = { 'org/gnome/online-accounts':
+ { 'whitelisted-providers': '[\'google\']' }
+ }
+ db_check('goa', data)
+ items = ['/org/gnome/online-accounts/whitelisted-providers']
+ lock_check('goa', items)
+
+ # Verify RSOP does not fail
+ ext.rsop([g for g in gpos if g.name == guid][0])
+
+ # Check that a call to gpupdate --rsop also succeeds
+ ret = rsop(self.lp)
+ self.assertEqual(ret, 0, 'gpupdate --rsop failed!')
+
+ # Remove policy
+ gp_db = store.get_gplog(machine_creds.get_username())
+ del_gpos = get_deleted_gpos_list(gp_db, [])
+ ext.process_group_policy(del_gpos, [], dname)
+ del_db_check('input-sources')
+ del_lock_check('input-sources')
+ del_db_check('power')
+ del_db_check('session')
+ del_lock_check('power-saving')
+ del_lock_check('group-policy')
+ del_db_check('extensions')
+ del_lock_check('extensions')
+ del_db_check('fingerprintreader')
+ del_lock_check('fingerprintreader')
+ del_db_check('logout')
+ del_lock_check('logout')
+ actions = os.path.join(dname, 'etc/share/polkit-1/actions')
+ udisk2 = glob(os.path.join(actions,
+ 'org.freedesktop.[u|U][d|D]isks2.policy'))
+ self.assertEqual(len(udisk2), 0, 'udisk2 policy not deleted')
+ del_db_check('printing')
+ del_lock_check('printing')
+ del_db_check('filesaving')
+ del_lock_check('filesaving')
+ del_db_check('cmdline')
+ del_lock_check('cmdline')
+ del_db_check('goa')
+ del_lock_check('goa')
+
+ # Unstage the Registry.pol file
+ unstage_file(reg_pol)
+
+ def test_gp_cert_auto_enroll_ext_without_ndes(self):
+ local_path = self.lp.cache_path('gpo_cache')
+ guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
+ reg_pol = os.path.join(local_path, policies, guid,
+ 'MACHINE/REGISTRY.POL')
+ cache_dir = self.lp.get('cache directory')
+ store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
+
+ machine_creds = Credentials()
+ machine_creds.guess(self.lp)
+ machine_creds.set_machine_account()
+
+ # Initialize the group policy extension
+ cae.requests = dummy_requests(want_exception=True)
+ ext = cae.gp_cert_auto_enroll_ext(self.lp, machine_creds,
+ machine_creds.get_username(), store)
+
+ gpos = get_gpo_list(self.server, machine_creds, self.lp,
+ machine_creds.get_username())
+
+ # Stage the Registry.pol file with test data
+ parser = GPPolParser()
+ parser.load_xml(etree.fromstring(auto_enroll_reg_pol.strip()))
+ ret = stage_file(reg_pol, ndr_pack(parser.pol_file))
+ self.assertTrue(ret, 'Could not create the target %s' % reg_pol)
+
+ # Write the dummy CA entry, Enrollment Services, and Templates Entries
+ admin_creds = Credentials()
+ admin_creds.set_username(os.environ.get('DC_USERNAME'))
+ admin_creds.set_password(os.environ.get('DC_PASSWORD'))
+ admin_creds.set_realm(os.environ.get('REALM'))
+ hostname = get_dc_hostname(machine_creds, self.lp)
+ url = 'ldap://%s' % hostname
+ ldb = Ldb(url=url, session_info=system_session(),
+ lp=self.lp, credentials=admin_creds)
+ # Write the dummy CA
+ confdn = 'CN=Public Key Services,CN=Services,CN=Configuration,%s' % base_dn
+ ca_cn = '%s-CA' % hostname.replace('.', '-')
+ certa_dn = 'CN=%s,CN=Certification Authorities,%s' % (ca_cn, confdn)
+ ldb.add({'dn': certa_dn,
+ 'objectClass': 'certificationAuthority',
+ 'authorityRevocationList': ['XXX'],
+ 'cACertificate': dummy_certificate(),
+ 'certificateRevocationList': ['XXX'],
+ })
+ # Write the dummy pKIEnrollmentService
+ enroll_dn = 'CN=%s,CN=Enrollment Services,%s' % (ca_cn, confdn)
+ ldb.add({'dn': enroll_dn,
+ 'objectClass': 'pKIEnrollmentService',
+ 'cACertificate': dummy_certificate(),
+ 'certificateTemplates': ['Machine'],
+ 'dNSHostName': hostname,
+ })
+ # Write the dummy pKICertificateTemplate
+ template_dn = 'CN=Machine,CN=Certificate Templates,%s' % confdn
+ ldb.add({'dn': template_dn,
+ 'objectClass': 'pKICertificateTemplate',
+ })
+
+ with TemporaryDirectory() as dname:
+ try:
+ ext.process_group_policy([], gpos, dname, dname)
+ except Exception as e:
+ self.fail(str(e))
+
+ ca_crt = os.path.join(dname, '%s.crt' % ca_cn)
+ self.assertTrue(os.path.exists(ca_crt),
+ 'Root CA certificate was not requested')
+ machine_crt = os.path.join(dname, '%s.Machine.crt' % ca_cn)
+ self.assertTrue(os.path.exists(machine_crt),
+ 'Machine certificate was not requested')
+ machine_key = os.path.join(dname, '%s.Machine.key' % ca_cn)
+ self.assertTrue(os.path.exists(machine_key),
+ 'Machine key was not generated')
+
+ # Verify RSOP does not fail
+ ext.rsop([g for g in gpos if g.name == guid][0])
+
+ # Check that a call to gpupdate --rsop also succeeds
+ ret = rsop(self.lp)
+ self.assertEqual(ret, 0, 'gpupdate --rsop failed!')
+
+ # Remove policy
+ gp_db = store.get_gplog(machine_creds.get_username())
+ del_gpos = get_deleted_gpos_list(gp_db, [])
+ ext.process_group_policy(del_gpos, [], dname)
+ self.assertFalse(os.path.exists(ca_crt),
+ 'Root CA certificate was not removed')
+ self.assertFalse(os.path.exists(machine_crt),
+ 'Machine certificate was not removed')
+ self.assertFalse(os.path.exists(machine_key),
+ 'Machine key was not removed')
+ out, _ = Popen(['getcert', 'list-cas'], stdout=PIPE).communicate()
+ self.assertNotIn(get_bytes(ca_cn), out, 'CA was not removed')
+ out, _ = Popen(['getcert', 'list'], stdout=PIPE).communicate()
+ self.assertNotIn(b'Machine', out,
+ 'Machine certificate not removed')
+ self.assertNotIn(b'Workstation', out,
+ 'Workstation certificate not removed')
+
+ # Remove the dummy CA, pKIEnrollmentService, and pKICertificateTemplate
+ ldb.delete(certa_dn)
+ ldb.delete(enroll_dn)
+ ldb.delete(template_dn)
+
+ # Unstage the Registry.pol file
+ unstage_file(reg_pol)
+
+ def test_gp_cert_auto_enroll_ext(self):
+ local_path = self.lp.cache_path('gpo_cache')
+ guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
+ reg_pol = os.path.join(local_path, policies, guid,
+ 'MACHINE/REGISTRY.POL')
+ cache_dir = self.lp.get('cache directory')
+ store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
+
+ machine_creds = Credentials()
+ machine_creds.guess(self.lp)
+ machine_creds.set_machine_account()
+
+ # Initialize the group policy extension
+ cae.requests = dummy_requests()
+ ext = cae.gp_cert_auto_enroll_ext(self.lp, machine_creds,
+ machine_creds.get_username(), store)
+
+ gpos = get_gpo_list(self.server, machine_creds, self.lp,
+ machine_creds.get_username())
+
+ # Stage the Registry.pol file with test data
+ parser = GPPolParser()
+ parser.load_xml(etree.fromstring(auto_enroll_reg_pol.strip()))
+ ret = stage_file(reg_pol, ndr_pack(parser.pol_file))
+ self.assertTrue(ret, 'Could not create the target %s' % reg_pol)
+
+ # Write the dummy CA entry, Enrollment Services, and Templates Entries
+ admin_creds = Credentials()
+ admin_creds.set_username(os.environ.get('DC_USERNAME'))
+ admin_creds.set_password(os.environ.get('DC_PASSWORD'))
+ admin_creds.set_realm(os.environ.get('REALM'))
+ hostname = get_dc_hostname(machine_creds, self.lp)
+ url = 'ldap://%s' % hostname
+ ldb = Ldb(url=url, session_info=system_session(),
+ lp=self.lp, credentials=admin_creds)
+ # Write the dummy CA
+ confdn = 'CN=Public Key Services,CN=Services,CN=Configuration,%s' % base_dn
+ ca_cn = '%s-CA' % hostname.replace('.', '-')
+ certa_dn = 'CN=%s,CN=Certification Authorities,%s' % (ca_cn, confdn)
+ ldb.add({'dn': certa_dn,
+ 'objectClass': 'certificationAuthority',
+ 'authorityRevocationList': ['XXX'],
+ 'cACertificate': b'0\x82\x03u0\x82\x02]\xa0\x03\x02\x01\x02\x02\x10I',
+ 'certificateRevocationList': ['XXX'],
+ })
+ # Write the dummy pKIEnrollmentService
+ enroll_dn = 'CN=%s,CN=Enrollment Services,%s' % (ca_cn, confdn)
+ ldb.add({'dn': enroll_dn,
+ 'objectClass': 'pKIEnrollmentService',
+ 'cACertificate': b'0\x82\x03u0\x82\x02]\xa0\x03\x02\x01\x02\x02\x10I',
+ 'certificateTemplates': ['Machine'],
+ 'dNSHostName': hostname,
+ })
+ # Write the dummy pKICertificateTemplate
+ template_dn = 'CN=Machine,CN=Certificate Templates,%s' % confdn
+ ldb.add({'dn': template_dn,
+ 'objectClass': 'pKICertificateTemplate',
+ })
+
+ with TemporaryDirectory() as dname:
+ ext.process_group_policy([], gpos, dname, dname)
+ ca_crt = os.path.join(dname, '%s.crt' % ca_cn)
+ self.assertTrue(os.path.exists(ca_crt),
+ 'Root CA certificate was not requested')
+ machine_crt = os.path.join(dname, '%s.Machine.crt' % ca_cn)
+ self.assertTrue(os.path.exists(machine_crt),
+ 'Machine certificate was not requested')
+ machine_key = os.path.join(dname, '%s.Machine.key' % ca_cn)
+ self.assertTrue(os.path.exists(machine_key),
+ 'Machine key was not generated')
+
+ # Subsequent apply should react to new certificate templates
+ os.environ['CEPCES_SUBMIT_SUPPORTED_TEMPLATES'] = 'Machine,Workstation'
+ self.addCleanup(os.environ.pop, 'CEPCES_SUBMIT_SUPPORTED_TEMPLATES')
+ ext.process_group_policy([], gpos, dname, dname)
+ self.assertTrue(os.path.exists(ca_crt),
+ 'Root CA certificate was not requested')
+ self.assertTrue(os.path.exists(machine_crt),
+ 'Machine certificate was not requested')
+ self.assertTrue(os.path.exists(machine_key),
+ 'Machine key was not generated')
+ workstation_crt = os.path.join(dname, '%s.Workstation.crt' % ca_cn)
+ self.assertTrue(os.path.exists(workstation_crt),
+ 'Workstation certificate was not requested')
+ workstation_key = os.path.join(dname, '%s.Workstation.key' % ca_cn)
+ self.assertTrue(os.path.exists(workstation_key),
+ 'Workstation key was not generated')
+
+ # Verify RSOP does not fail
+ ext.rsop([g for g in gpos if g.name == guid][0])
+
+ # Check that a call to gpupdate --rsop also succeeds
+ ret = rsop(self.lp)
+ self.assertEqual(ret, 0, 'gpupdate --rsop failed!')
+
+ # Remove policy by staging pol file with auto-enroll unchecked
+ parser.load_xml(etree.fromstring(auto_enroll_unchecked_reg_pol.strip()))
+ ret = stage_file(reg_pol, ndr_pack(parser.pol_file))
+ self.assertTrue(ret, 'Could not create the target %s' % reg_pol)
+ ext.process_group_policy([], gpos, dname, dname)
+ self.assertFalse(os.path.exists(ca_crt),
+ 'Root CA certificate was not removed')
+ self.assertFalse(os.path.exists(machine_crt),
+ 'Machine certificate was not removed')
+ self.assertFalse(os.path.exists(machine_key),
+ 'Machine key was not removed')
+ self.assertFalse(os.path.exists(workstation_crt),
+ 'Workstation certificate was not removed')
+ self.assertFalse(os.path.exists(workstation_key),
+ 'Workstation key was not removed')
+
+ # Reapply policy by staging the enabled pol file
+ parser.load_xml(etree.fromstring(auto_enroll_reg_pol.strip()))
+ ret = stage_file(reg_pol, ndr_pack(parser.pol_file))
+ self.assertTrue(ret, 'Could not create the target %s' % reg_pol)
+ ext.process_group_policy([], gpos, dname, dname)
+ self.assertTrue(os.path.exists(ca_crt),
+ 'Root CA certificate was not requested')
+ self.assertTrue(os.path.exists(machine_crt),
+ 'Machine certificate was not requested')
+ self.assertTrue(os.path.exists(machine_key),
+ 'Machine key was not generated')
+ self.assertTrue(os.path.exists(workstation_crt),
+ 'Workstation certificate was not requested')
+ self.assertTrue(os.path.exists(workstation_key),
+ 'Workstation key was not generated')
+
+ # Remove policy
+ gp_db = store.get_gplog(machine_creds.get_username())
+ del_gpos = get_deleted_gpos_list(gp_db, [])
+ ext.process_group_policy(del_gpos, [], dname)
+ self.assertFalse(os.path.exists(ca_crt),
+ 'Root CA certificate was not removed')
+ self.assertFalse(os.path.exists(machine_crt),
+ 'Machine certificate was not removed')
+ self.assertFalse(os.path.exists(machine_key),
+ 'Machine key was not removed')
+ self.assertFalse(os.path.exists(workstation_crt),
+ 'Workstation certificate was not removed')
+ self.assertFalse(os.path.exists(workstation_key),
+ 'Workstation key was not removed')
+ out, _ = Popen(['getcert', 'list-cas'], stdout=PIPE).communicate()
+ self.assertNotIn(get_bytes(ca_cn), out, 'CA was not removed')
+ out, _ = Popen(['getcert', 'list'], stdout=PIPE).communicate()
+ self.assertNotIn(b'Machine', out,
+ 'Machine certificate not removed')
+ self.assertNotIn(b'Workstation', out,
+ 'Workstation certificate not removed')
+
+ # Remove the dummy CA, pKIEnrollmentService, and pKICertificateTemplate
+ ldb.delete(certa_dn)
+ ldb.delete(enroll_dn)
+ ldb.delete(template_dn)
+
+ # Unstage the Registry.pol file
+ unstage_file(reg_pol)
+
+ def test_gp_user_scripts_ext(self):
+ local_path = self.lp.cache_path('gpo_cache')
+ guids = ['{31B2F340-016D-11D2-945F-00C04FB984F9}',
+ '{6AC1786C-016F-11D2-945F-00C04FB984F9}']
+ reg_pol = os.path.join(local_path, policies, guids[0],
+ 'USER/REGISTRY.POL')
+ reg_pol2 = os.path.join(local_path, policies, guids[1],
+ 'USER/REGISTRY.POL')
+ cache_dir = self.lp.get('cache directory')
+ store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
+
+ machine_creds = Credentials()
+ machine_creds.guess(self.lp)
+ machine_creds.set_machine_account()
+
+ # Initialize the group policy extension
+ ext = gp_user_scripts_ext(self.lp, machine_creds,
+ os.environ.get('DC_USERNAME'), store)
+
+ gpos = get_gpo_list(self.server, machine_creds, self.lp,
+ machine_creds.get_username())
+
+ reg_key = b'Software\\Policies\\Samba\\Unix Settings'
+ sections = { b'%s\\Daily Scripts' % reg_key : b'@daily',
+ b'%s\\Monthly Scripts' % reg_key : b'@monthly',
+ b'%s\\Weekly Scripts' % reg_key : b'@weekly',
+ b'%s\\Hourly Scripts' % reg_key : b'@hourly' }
+ for keyname in sections.keys():
+ # Stage the Registry.pol file with test data
+ stage = preg.file()
+ e = preg.entry()
+ e.keyname = keyname
+ e.valuename = b'Software\\Policies\\Samba\\Unix Settings'
+ e.type = 1
+ e.data = b'echo hello world'
+ stage.num_entries = 1
+ stage.entries = [e]
+ ret = stage_file(reg_pol, ndr_pack(stage))
+ self.assertTrue(ret, 'Could not create the target %s' % reg_pol)
+
+ # Stage the other Registry.pol
+ stage = preg.file()
+ e2 = preg.entry()
+ e2.keyname = keyname
+ e2.valuename = b'Software\\Policies\\Samba\\Unix Settings'
+ e2.type = 1
+ e2.data = b'echo this is a second policy'
+ stage.num_entries = 1
+ stage.entries = [e2]
+ ret = stage_file(reg_pol2, ndr_pack(stage))
+ self.assertTrue(ret, 'Could not create the target %s' % reg_pol2)
+
+ # Process all gpos, intentionally skipping the privilege drop
+ ext.process_group_policy([], gpos)
+ # Dump the fake crontab setup for testing
+ p = Popen(['crontab', '-l'], stdout=PIPE)
+ crontab, _ = p.communicate()
+ entry = b'%s %s' % (sections[keyname], e.data.encode())
+ self.assertIn(entry, crontab,
+ 'The crontab entry was not installed')
+ entry2 = b'%s %s' % (sections[keyname], e2.data.encode())
+ self.assertIn(entry2, crontab,
+ 'The crontab entry was not installed')
+
+ # Force apply with removal of second GPO
+ gp_db = store.get_gplog(os.environ.get('DC_USERNAME'))
+ del_gpos = gp_db.get_applied_settings([guids[1]])
+ rgpos = [gpo for gpo in gpos if gpo.name != guids[1]]
+ ext.process_group_policy(del_gpos, rgpos)
+
+ # Dump the fake crontab setup for testing
+ p = Popen(['crontab', '-l'], stdout=PIPE)
+ crontab, _ = p.communicate()
+
+ # Ensure the first entry remains, and the second entry is removed
+ self.assertIn(entry, crontab,
+ 'The first crontab entry was not found')
+ self.assertNotIn(entry2, crontab,
+ 'The second crontab entry was still present')
+
+ # Check that a call to gpupdate --rsop also succeeds
+ ret = rsop(self.lp)
+ self.assertEqual(ret, 0, 'gpupdate --rsop failed!')
+
+ # Remove policy
+ del_gpos = get_deleted_gpos_list(gp_db, [])
+ ext.process_group_policy(del_gpos, [])
+ # Dump the fake crontab setup for testing
+ p = Popen(['crontab', '-l'], stdout=PIPE)
+ crontab, _ = p.communicate()
+ self.assertNotIn(entry, crontab,
+ 'Unapply failed to cleanup crontab entry')
+
+ # Unstage the Registry.pol files
+ unstage_file(reg_pol)
+ unstage_file(reg_pol2)
+
+ def test_gp_firefox_ext(self):
+ local_path = self.lp.cache_path('gpo_cache')
+ guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
+ reg_pol = os.path.join(local_path, policies, guid,
+ 'MACHINE/REGISTRY.POL')
+ cache_dir = self.lp.get('cache directory')
+ store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
+
+ machine_creds = Credentials()
+ machine_creds.guess(self.lp)
+ machine_creds.set_machine_account()
+
+ # Initialize the group policy extension
+ ext = gp_firefox_ext(self.lp, machine_creds,
+ machine_creds.get_username(), store)
+
+ gpos = get_gpo_list(self.server, machine_creds, self.lp,
+ machine_creds.get_username())
+
+ # Stage the Registry.pol file with test data
+ parser = GPPolParser()
+ parser.load_xml(etree.fromstring(firefox_reg_pol.strip()))
+ ret = stage_file(reg_pol, ndr_pack(parser.pol_file))
+ self.assertTrue(ret, 'Could not create the target %s' % reg_pol)
+
+ with TemporaryDirectory() as dname:
+ ext.process_group_policy([], gpos, dname)
+ policies_file = os.path.join(dname, 'policies.json')
+ with open(policies_file, 'r') as r:
+ policy_data = json.load(r)
+ expected_policy_data = json.loads(firefox_json_expected)
+ self.assertIn('policies', policy_data, 'Policies were not applied')
+ self.assertEqual(expected_policy_data['policies'].keys(),
+ policy_data['policies'].keys(),
+ 'Firefox policies are missing')
+ for name in expected_policy_data['policies'].keys():
+ self.assertEqual(expected_policy_data['policies'][name],
+ policy_data['policies'][name],
+ 'Policies were not applied')
+
+ # Check that modifying the policy will enforce the correct settings
+ entries = [e for e in parser.pol_file.entries
+ if e.valuename != 'AppUpdateURL']
+ for e in entries:
+ if e.valuename == 'AppAutoUpdate':
+ e.data = 0
+ parser.pol_file.entries = entries
+ parser.pol_file.num_entries = len(entries)
+ # Stage the Registry.pol file with altered test data
+ unstage_file(reg_pol)
+ ret = stage_file(reg_pol, ndr_pack(parser.pol_file))
+ self.assertTrue(ret, 'Could not create the target %s' % reg_pol)
+
+ # Enforce the altered policy
+ ext.process_group_policy([], gpos)
+
+ # Check that the App Update policy was altered
+ with open(policies_file, 'r') as r:
+ policy_data = json.load(r)
+ self.assertIn('policies', policy_data, 'Policies were not applied')
+ keys = list(expected_policy_data['policies'].keys())
+ keys.remove('AppUpdateURL')
+ keys.sort()
+ policy_keys = list(policy_data['policies'].keys())
+ policy_keys.sort()
+ self.assertEqual(keys, policy_keys, 'Firefox policies are incorrect')
+ for name in policy_data['policies'].keys():
+ self.assertNotEqual(name, 'AppUpdateURL',
+ 'Failed to remove AppUpdateURL policy')
+ if name == 'AppAutoUpdate':
+ self.assertEqual(False, policy_data['policies'][name],
+ 'Failed to alter AppAutoUpdate policy')
+ continue
+ self.assertEqual(expected_policy_data['policies'][name],
+ policy_data['policies'][name],
+ 'Policies were not applied')
+
+ # Verify RSOP does not fail
+ ext.rsop([g for g in gpos if g.name == guid][0])
+
+ # Check that a call to gpupdate --rsop also succeeds
+ ret = rsop(self.lp)
+ self.assertEqual(ret, 0, 'gpupdate --rsop failed!')
+
+ # Unapply the policy
+ gp_db = store.get_gplog(machine_creds.get_username())
+ del_gpos = get_deleted_gpos_list(gp_db, [])
+ ext.process_group_policy(del_gpos, [], dname)
+ if os.path.exists(policies_file):
+ data = json.load(open(policies_file, 'r'))
+ if 'policies' in data.keys():
+ self.assertEqual(len(data['policies'].keys()), 0,
+ 'The policy was not unapplied')
+
+ # Initialize the cache with old style existing policies,
+ # ensure they are overwritten.
+ old_cache = {'policies': {}}
+ ext.cache_add_attribute(guid, 'policies.json',
+ json.dumps(old_cache))
+ with open(policies_file, 'w') as w:
+ w.write(firefox_json_expected)
+
+ # Overwrite policy
+ ext.process_group_policy([], gpos)
+
+ # Check that policy was overwritten
+ with open(policies_file, 'r') as r:
+ policy_data = json.load(r)
+ self.assertIn('policies', policy_data, 'Policies were not applied')
+ policy_keys = list(policy_data['policies'].keys())
+ policy_keys.sort()
+ self.assertEqual(keys, policy_keys, 'Firefox policies are incorrect')
+ for name in policy_data['policies'].keys():
+ self.assertNotEqual(name, 'AppUpdateURL',
+ 'Failed to remove AppUpdateURL policy')
+ if name == 'AppAutoUpdate':
+ self.assertEqual(False, policy_data['policies'][name],
+ 'Failed to overwrite AppAutoUpdate policy')
+ continue
+ self.assertEqual(expected_policy_data['policies'][name],
+ policy_data['policies'][name],
+ 'Policies were not applied')
+
+ # Unapply the policy
+ gp_db = store.get_gplog(machine_creds.get_username())
+ del_gpos = get_deleted_gpos_list(gp_db, [])
+ ext.process_group_policy(del_gpos, [], dname)
+ if os.path.exists(policies_file):
+ data = json.load(open(policies_file, 'r'))
+ if 'policies' in data.keys():
+ self.assertEqual(len(data['policies'].keys()), 0,
+ 'The policy was not unapplied')
+
+ # Unstage the Registry.pol file
+ unstage_file(reg_pol)
+
+ def test_gp_chromium_ext(self):
+ local_path = self.lp.cache_path('gpo_cache')
+ guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
+ reg_pol = os.path.join(local_path, policies, guid,
+ 'MACHINE/REGISTRY.POL')
+ cache_dir = self.lp.get('cache directory')
+ store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
+
+ machine_creds = Credentials()
+ machine_creds.guess(self.lp)
+ machine_creds.set_machine_account()
+
+ # Initialize the group policy extension
+ ext = gp_chromium_ext(self.lp, machine_creds,
+ machine_creds.get_username(), store)
+
+ gpos = get_gpo_list(self.server, machine_creds, self.lp,
+ machine_creds.get_username())
+
+ # Stage the Registry.pol file with test data
+ parser = GPPolParser()
+ parser.load_xml(etree.fromstring(chromium_reg_pol.strip()))
+ ret = stage_file(reg_pol, ndr_pack(parser.pol_file))
+ self.assertTrue(ret, 'Could not create the target %s' % reg_pol)
+
+ with TemporaryDirectory() as dname:
+ ext.process_group_policy([], gpos, dname)
+ managed = os.path.join(dname, 'managed')
+ managed_files = os.listdir(managed)
+ self.assertEqual(len(managed_files), 1,
+ 'Chromium policies are missing')
+ managed_file = os.path.join(managed, managed_files[0])
+ with open(managed_file, 'r') as r:
+ managed_data = json.load(r)
+ recommended = os.path.join(dname, 'recommended')
+ recommended_files = os.listdir(recommended)
+ self.assertEqual(len(recommended_files), 1,
+ 'Chromium policies are missing')
+ recommended_file = os.path.join(recommended, recommended_files[0])
+ with open(recommended_file, 'r') as r:
+ recommended_data = json.load(r)
+ expected_managed_data = json.loads(chromium_json_expected_managed)
+ expected_recommended_data = \
+ json.loads(chromium_json_expected_recommended)
+ self.maxDiff = None
+ self.assertEqual(sorted(expected_managed_data.keys()),
+ sorted(managed_data.keys()),
+ 'Chromium policies are missing')
+ for name in expected_managed_data.keys():
+ self.assertEqual(expected_managed_data[name],
+ managed_data[name],
+ 'Policies were not applied')
+ self.assertEqual(expected_recommended_data.keys(),
+ recommended_data.keys(),
+ 'Chromium policies are missing')
+ for name in expected_recommended_data.keys():
+ self.assertEqual(expected_recommended_data[name],
+ recommended_data[name],
+ 'Policies were not applied')
+
+ # Ensure modifying the policy does not generate extra policy files
+ unstage_file(reg_pol)
+ # Change a managed entry:
+ parser.pol_file.entries[0].data = 0
+ # Change a recommended entry:
+ parser.pol_file.entries[-1].data = b'https://google.com'
+ ret = stage_file(reg_pol, ndr_pack(parser.pol_file))
+ self.assertTrue(ret, 'Could not create the target %s' % reg_pol)
+
+ ext.process_group_policy([], gpos, dname)
+ managed_files = os.listdir(managed)
+ self.assertEqual(len(managed_files), 1,
+ 'Number of Chromium policies is incorrect')
+ omanaged_file = managed_file
+ managed_file = os.path.join(managed, managed_files[0])
+ self.assertNotEqual(omanaged_file, managed_file,
+ 'The managed Chromium file did not change')
+
+ recommended_files = os.listdir(recommended)
+ self.assertEqual(len(recommended_files), 1,
+ 'Number of Chromium policies is incorrect')
+ orecommended_file = recommended_file
+ recommended_file = os.path.join(recommended, recommended_files[0])
+ self.assertNotEqual(orecommended_file, recommended_file,
+ 'The recommended Chromium file did not change')
+
+ # Verify RSOP does not fail
+ ext.rsop([g for g in gpos if g.name == guid][0])
+
+ # Check that a call to gpupdate --rsop also succeeds
+ ret = rsop(self.lp)
+ self.assertEqual(ret, 0, 'gpupdate --rsop failed!')
+
+ # Unapply the policy
+ gp_db = store.get_gplog(machine_creds.get_username())
+ del_gpos = get_deleted_gpos_list(gp_db, [])
+ ext.process_group_policy(del_gpos, [], dname)
+ managed = os.path.join(managed, managed_files[0])
+ if os.path.exists(managed):
+ data = json.load(open(managed, 'r'))
+ self.assertEqual(len(data.keys()), 0,
+ 'The policy was not unapplied')
+ recommended = os.path.join(recommended, recommended_files[0])
+ if os.path.exists(recommended):
+ data = json.load(open(recommended, 'r'))
+ self.assertEqual(len(data.keys()), 0,
+ 'The policy was not unapplied')
+
+ # Unstage the Registry.pol file
+ unstage_file(reg_pol)
+
+ def test_gp_firewalld_ext(self):
+ local_path = self.lp.cache_path('gpo_cache')
+ guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
+ reg_pol = os.path.join(local_path, policies, guid,
+ 'MACHINE/REGISTRY.POL')
+ cache_dir = self.lp.get('cache directory')
+ store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
+
+ machine_creds = Credentials()
+ machine_creds.guess(self.lp)
+ machine_creds.set_machine_account()
+
+ # Initialize the group policy extension
+ ext = gp_firewalld_ext(self.lp, machine_creds,
+ machine_creds.get_username(), store)
+
+ gpos = get_gpo_list(self.server, machine_creds, self.lp,
+ machine_creds.get_username())
+
+ # Stage the Registry.pol file with test data
+ parser = GPPolParser()
+ parser.load_xml(etree.fromstring(firewalld_reg_pol.strip()))
+ ret = stage_file(reg_pol, ndr_pack(parser.pol_file))
+ self.assertTrue(ret, 'Could not create the target %s' % reg_pol)
+
+ ext.process_group_policy([], gpos)
+
+ # Check that the policy was applied
+ firewall_cmd = which('firewall-cmd')
+ cmd = [firewall_cmd, '--get-zones']
+ p = Popen(cmd, stdout=PIPE, stderr=PIPE)
+ out, err = p.communicate()
+ self.assertIn(b'work', out, 'Failed to apply zones')
+ self.assertIn(b'home', out, 'Failed to apply zones')
+
+ cmd = [firewall_cmd, '--zone=work', '--list-interfaces']
+ p = Popen(cmd, stdout=PIPE, stderr=PIPE)
+ out, err = p.communicate()
+ self.assertIn(b'eth0', out, 'Failed to set interface on zone')
+
+ cmd = [firewall_cmd, '--zone=home', '--list-interfaces']
+ p = Popen(cmd, stdout=PIPE, stderr=PIPE)
+ out, err = p.communicate()
+ self.assertIn(b'eth0', out, 'Failed to set interface on zone')
+
+ cmd = [firewall_cmd, '--zone=work', '--list-rich-rules']
+ p = Popen(cmd, stdout=PIPE, stderr=PIPE)
+ out, err = p.communicate()
+ # Firewalld will report the rule one of two ways:
+ rules = [b'rule family=ipv4 source address=172.25.1.7 ' +
+ b'service name=ftp reject',
+ b'rule family="ipv4" source address="172.25.1.7" ' +
+ b'service name="ftp" reject']
+ self.assertIn(out.strip(), rules, 'Failed to set rich rule')
+
+ # Check that modifying the policy will enforce the correct settings
+ entries = [e for e in parser.pol_file.entries if e.data != 'home']
+ self.assertEqual(len(entries), len(parser.pol_file.entries)-1,
+ 'Failed to remove the home zone entry')
+ parser.pol_file.entries = entries
+ parser.pol_file.num_entries = len(entries)
+ # Stage the Registry.pol file with altered test data
+ unstage_file(reg_pol)
+ ret = stage_file(reg_pol, ndr_pack(parser.pol_file))
+ self.assertTrue(ret, 'Could not create the target %s' % reg_pol)
+
+ # Enforce the altered policy
+ ext.process_group_policy([], gpos)
+
+ # Check that the home zone was removed
+ cmd = [firewall_cmd, '--get-zones']
+ p = Popen(cmd, stdout=PIPE, stderr=PIPE)
+ out, err = p.communicate()
+ self.assertIn(b'work', out, 'Failed to apply zones')
+ self.assertNotIn(b'home', out, 'Failed to apply zones')
+
+ # Verify RSOP does not fail
+ ext.rsop([g for g in gpos if g.name == guid][0])
+
+ # Check that a call to gpupdate --rsop also succeeds
+ ret = rsop(self.lp)
+ self.assertEqual(ret, 0, 'gpupdate --rsop failed!')
+
+ # Unapply the policy
+ gp_db = store.get_gplog(machine_creds.get_username())
+ del_gpos = get_deleted_gpos_list(gp_db, [])
+ ext.process_group_policy(del_gpos, [])
+
+ # Check that the policy was unapplied
+ cmd = [firewall_cmd, '--get-zones']
+ p = Popen(cmd, stdout=PIPE, stderr=PIPE)
+ out, err = p.communicate()
+ self.assertNotIn(b'work', out, 'Failed to unapply zones')
+ self.assertNotIn(b'home', out, 'Failed to unapply zones')
+
+ # Unstage the Registry.pol file
+ unstage_file(reg_pol)
+
+ def test_advanced_gp_cert_auto_enroll_ext(self):
+ local_path = self.lp.cache_path('gpo_cache')
+ guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
+ reg_pol = os.path.join(local_path, policies, guid,
+ 'MACHINE/REGISTRY.POL')
+ cache_dir = self.lp.get('cache directory')
+ store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
+
+ machine_creds = Credentials()
+ machine_creds.guess(self.lp)
+ machine_creds.set_machine_account()
+
+ # Initialize the group policy extension
+ cae.requests = dummy_requests()
+ ext = cae.gp_cert_auto_enroll_ext(self.lp, machine_creds,
+ machine_creds.get_username(), store)
+
+ gpos = get_gpo_list(self.server, machine_creds, self.lp,
+ machine_creds.get_username())
+
+ admin_creds = Credentials()
+ admin_creds.set_username(os.environ.get('DC_USERNAME'))
+ admin_creds.set_password(os.environ.get('DC_PASSWORD'))
+ admin_creds.set_realm(os.environ.get('REALM'))
+ hostname = get_dc_hostname(machine_creds, self.lp)
+ url = 'ldap://%s' % hostname
+ ldb = Ldb(url=url, session_info=system_session(),
+ lp=self.lp, credentials=admin_creds)
+
+ # Stage the Registry.pol file with test data
+ res = ldb.search('', _ldb.SCOPE_BASE, '(objectClass=*)',
+ ['rootDomainNamingContext'])
+ self.assertTrue(len(res) == 1, 'rootDomainNamingContext not found')
+ res2 = ldb.search(res[0]['rootDomainNamingContext'][0],
+ _ldb.SCOPE_BASE, '(objectClass=*)', ['objectGUID'])
+ self.assertTrue(len(res2) == 1, 'objectGUID not found')
+ objectGUID = b'{%s}' % \
+ cae.octet_string_to_objectGUID(res2[0]['objectGUID'][0]).upper().encode()
+ parser = GPPolParser()
+ parser.load_xml(etree.fromstring(advanced_enroll_reg_pol.strip() %
+ (objectGUID, objectGUID, objectGUID, objectGUID)))
+ ret = stage_file(reg_pol, ndr_pack(parser.pol_file))
+ self.assertTrue(ret, 'Could not create the target %s' % reg_pol)
+
+ # Write the dummy CA entry
+ confdn = 'CN=Public Key Services,CN=Services,CN=Configuration,%s' % base_dn
+ ca_cn = '%s-CA' % hostname.replace('.', '-')
+ certa_dn = 'CN=%s,CN=Certification Authorities,%s' % (ca_cn, confdn)
+ ldb.add({'dn': certa_dn,
+ 'objectClass': 'certificationAuthority',
+ 'authorityRevocationList': ['XXX'],
+ 'cACertificate': b'0\x82\x03u0\x82\x02]\xa0\x03\x02\x01\x02\x02\x10I',
+ 'certificateRevocationList': ['XXX'],
+ })
+ # Write the dummy pKIEnrollmentService
+ enroll_dn = 'CN=%s,CN=Enrollment Services,%s' % (ca_cn, confdn)
+ ldb.add({'dn': enroll_dn,
+ 'objectClass': 'pKIEnrollmentService',
+ 'cACertificate': b'0\x82\x03u0\x82\x02]\xa0\x03\x02\x01\x02\x02\x10I',
+ 'certificateTemplates': ['Machine'],
+ 'dNSHostName': hostname,
+ })
+ # Write the dummy pKICertificateTemplate
+ template_dn = 'CN=Machine,CN=Certificate Templates,%s' % confdn
+ ldb.add({'dn': template_dn,
+ 'objectClass': 'pKICertificateTemplate',
+ })
+
+ with TemporaryDirectory() as dname:
+ ext.process_group_policy([], gpos, dname, dname)
+ ca_list = [ca_cn, 'example0-com-CA', 'example1-com-CA',
+ 'example2-com-CA']
+ for ca in ca_list:
+ ca_crt = os.path.join(dname, '%s.crt' % ca)
+ self.assertTrue(os.path.exists(ca_crt),
+ 'Root CA certificate was not requested')
+ machine_crt = os.path.join(dname, '%s.Machine.crt' % ca)
+ self.assertTrue(os.path.exists(machine_crt),
+ 'Machine certificate was not requested')
+ machine_key = os.path.join(dname, '%s.Machine.key' % ca)
+ self.assertTrue(os.path.exists(machine_key),
+ 'Machine key was not generated')
+
+ # Subsequent apply should react to new certificate templates
+ os.environ['CEPCES_SUBMIT_SUPPORTED_TEMPLATES'] = 'Machine,Workstation'
+ self.addCleanup(os.environ.pop, 'CEPCES_SUBMIT_SUPPORTED_TEMPLATES')
+ ext.process_group_policy([], gpos, dname, dname)
+ for ca in ca_list:
+ self.assertTrue(os.path.exists(ca_crt),
+ 'Root CA certificate was not requested')
+ self.assertTrue(os.path.exists(machine_crt),
+ 'Machine certificate was not requested')
+ self.assertTrue(os.path.exists(machine_key),
+ 'Machine key was not generated')
+
+ workstation_crt = os.path.join(dname, '%s.Workstation.crt' % ca)
+ self.assertTrue(os.path.exists(workstation_crt),
+ 'Workstation certificate was not requested')
+ workstation_key = os.path.join(dname, '%s.Workstation.key' % ca)
+ self.assertTrue(os.path.exists(workstation_key),
+ 'Workstation key was not generated')
+
+ # Verify RSOP does not fail
+ ext.rsop([g for g in gpos if g.name == guid][0])
+
+ # Check that a call to gpupdate --rsop also succeeds
+ ret = rsop(self.lp)
+ self.assertEqual(ret, 0, 'gpupdate --rsop failed!')
+
+ # Remove policy
+ gp_db = store.get_gplog(machine_creds.get_username())
+ del_gpos = get_deleted_gpos_list(gp_db, [])
+ ext.process_group_policy(del_gpos, [], dname)
+ self.assertFalse(os.path.exists(ca_crt),
+ 'Root CA certificate was not removed')
+ self.assertFalse(os.path.exists(machine_crt),
+ 'Machine certificate was not removed')
+ self.assertFalse(os.path.exists(machine_key),
+ 'Machine key was not removed')
+ self.assertFalse(os.path.exists(workstation_crt),
+ 'Workstation certificate was not removed')
+ self.assertFalse(os.path.exists(workstation_key),
+ 'Workstation key was not removed')
+ out, _ = Popen(['getcert', 'list-cas'], stdout=PIPE).communicate()
+ for ca in ca_list:
+ self.assertNotIn(get_bytes(ca), out, 'CA was not removed')
+ out, _ = Popen(['getcert', 'list'], stdout=PIPE).communicate()
+ self.assertNotIn(b'Machine', out,
+ 'Machine certificate not removed')
+ self.assertNotIn(b'Workstation', out,
+ 'Workstation certificate not removed')
+
+ # Remove the dummy CA, pKIEnrollmentService, and pKICertificateTemplate
+ ldb.delete(certa_dn)
+ ldb.delete(enroll_dn)
+ ldb.delete(template_dn)
+
+ # Unstage the Registry.pol file
+ unstage_file(reg_pol)
+
+ def test_gp_centrify_sudoers_ext(self):
+ local_path = self.lp.cache_path('gpo_cache')
+ guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
+ reg_pol = os.path.join(local_path, policies, guid,
+ 'MACHINE/REGISTRY.POL')
+ cache_dir = self.lp.get('cache directory')
+ store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
+
+ machine_creds = Credentials()
+ machine_creds.guess(self.lp)
+ machine_creds.set_machine_account()
+
+ # Initialize the group policy extension
+ ext = gp_centrify_sudoers_ext(self.lp, machine_creds,
+ machine_creds.get_username(), store)
+
+ gpos = get_gpo_list(self.server, machine_creds, self.lp,
+ machine_creds.get_username())
+
+ # Stage the Registry.pol file with test data
+ stage = preg.file()
+ e1 = preg.entry()
+ e1.keyname = b'Software\\Policies\\Centrify\\UnixSettings'
+ e1.valuename = b'sudo.enabled'
+ e1.type = 4
+ e1.data = 1
+ e2 = preg.entry()
+ e2.keyname = b'Software\\Policies\\Centrify\\UnixSettings\\SuDo'
+ e2.valuename = b'1'
+ e2.type = 1
+ e2.data = b'fakeu ALL=(ALL) NOPASSWD: ALL'
+ stage.num_entries = 2
+ stage.entries = [e1, e2]
+ ret = stage_file(reg_pol, ndr_pack(stage))
+ self.assertTrue(ret, 'Could not create the target %s' % reg_pol)
+
+ # Process all gpos, with temp output directory
+ with TemporaryDirectory() as dname:
+ ext.process_group_policy([], gpos, dname)
+ sudoers = os.listdir(dname)
+ self.assertEqual(len(sudoers), 1, 'The sudoer file was not created')
+ sudoers_file = os.path.join(dname, sudoers[0])
+ self.assertIn(e2.data, open(sudoers_file, 'r').read(),
+ 'The sudoers entry was not applied')
+
+ # Remove the sudoers file, and make sure a re-apply puts it back
+ os.unlink(sudoers_file)
+ ext.process_group_policy([], gpos, dname)
+ sudoers = os.listdir(dname)
+ self.assertEqual(len(sudoers), 1,
+ 'The sudoer file was not recreated')
+ sudoers_file = os.path.join(dname, sudoers[0])
+ self.assertIn(e2.data, open(sudoers_file, 'r').read(),
+ 'The sudoers entry was not reapplied')
+
+ # Check that a call to gpupdate --rsop also succeeds
+ ret = rsop(self.lp)
+ self.assertEqual(ret, 0, 'gpupdate --rsop failed!')
+
+ # Remove policy
+ gp_db = store.get_gplog(machine_creds.get_username())
+ del_gpos = get_deleted_gpos_list(gp_db, [])
+ ext.process_group_policy(del_gpos, [])
+ self.assertEqual(len(os.listdir(dname)), 0,
+ 'Unapply failed to cleanup scripts')
+
+ # Unstage the Registry.pol file
+ unstage_file(reg_pol)
+
+ def test_gp_centrify_crontab_ext(self):
+ local_path = self.lp.cache_path('gpo_cache')
+ guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
+ reg_pol = os.path.join(local_path, policies, guid,
+ 'MACHINE/REGISTRY.POL')
+ cache_dir = self.lp.get('cache directory')
+ store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
+
+ machine_creds = Credentials()
+ machine_creds.guess(self.lp)
+ machine_creds.set_machine_account()
+
+ # Initialize the group policy extension
+ ext = gp_centrify_crontab_ext(self.lp, machine_creds,
+ machine_creds.get_username(), store)
+
+ gpos = get_gpo_list(self.server, machine_creds, self.lp,
+ machine_creds.get_username())
+
+ # Stage the Registry.pol file with test data
+ stage = preg.file()
+ e = preg.entry()
+ e.keyname = \
+ b'Software\\Policies\\Centrify\\UnixSettings\\CrontabEntries'
+ e.valuename = b'Command1'
+ e.type = 1
+ e.data = b'17 * * * * root echo hello world'
+ stage.num_entries = 1
+ stage.entries = [e]
+ ret = stage_file(reg_pol, ndr_pack(stage))
+ self.assertTrue(ret, 'Could not create the target %s' % reg_pol)
+
+ # Process all gpos, with temp output directory
+ with TemporaryDirectory() as dname:
+ ext.process_group_policy([], gpos, dname)
+ cron_entries = os.listdir(dname)
+ self.assertEqual(len(cron_entries), 1, 'Cron entry not created')
+ fname = os.path.join(dname, cron_entries[0])
+ data = open(fname, 'rb').read()
+ self.assertIn(get_bytes(e.data), data, 'Cron entry is missing')
+
+ # Check that a call to gpupdate --rsop also succeeds
+ ret = rsop(self.lp)
+ self.assertEqual(ret, 0, 'gpupdate --rsop failed!')
+
+ # Remove policy
+ gp_db = store.get_gplog(machine_creds.get_username())
+ del_gpos = get_deleted_gpos_list(gp_db, [])
+ ext.process_group_policy(del_gpos, [])
+ self.assertEqual(len(os.listdir(dname)), 0,
+ 'Unapply failed to cleanup script')
+
+ # Unstage the Registry.pol file
+ unstage_file(reg_pol)
+
+ def test_gp_user_centrify_crontab_ext(self):
+ local_path = self.lp.cache_path('gpo_cache')
+ guids = ['{31B2F340-016D-11D2-945F-00C04FB984F9}',
+ '{6AC1786C-016F-11D2-945F-00C04FB984F9}']
+ reg_pol = os.path.join(local_path, policies, guids[0],
+ 'USER/REGISTRY.POL')
+ reg_pol2 = os.path.join(local_path, policies, guids[1],
+ 'USER/REGISTRY.POL')
+ cache_dir = self.lp.get('cache directory')
+ store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
+
+ machine_creds = Credentials()
+ machine_creds.guess(self.lp)
+ machine_creds.set_machine_account()
+
+ # Initialize the group policy extension
+ ext = gp_user_centrify_crontab_ext(self.lp, machine_creds,
+ os.environ.get('DC_USERNAME'),
+ store)
+
+ gpos = get_gpo_list(self.server, machine_creds, self.lp,
+ machine_creds.get_username())
+
+ # Stage the Registry.pol file with test data
+ stage = preg.file()
+ e = preg.entry()
+ e.keyname = \
+ b'Software\\Policies\\Centrify\\UnixSettings\\CrontabEntries'
+ e.valuename = b'Command1'
+ e.type = 1
+ e.data = b'17 * * * * echo hello world'
+ stage.num_entries = 1
+ stage.entries = [e]
+ ret = stage_file(reg_pol, ndr_pack(stage))
+ self.assertTrue(ret, 'Could not create the target %s' % reg_pol)
+
+ # Stage the other Registry.pol
+ stage = preg.file()
+ e2 = preg.entry()
+ e2.keyname = \
+ b'Software\\Policies\\Centrify\\UnixSettings\\CrontabEntries'
+ e2.valuename = b'Command1'
+ e2.type = 1
+ e2.data = b'17 * * * * echo this is a second policy'
+ stage.num_entries = 1
+ stage.entries = [e2]
+ ret = stage_file(reg_pol2, ndr_pack(stage))
+ self.assertTrue(ret, 'Could not create the target %s' % reg_pol2)
+
+ # Process all gpos, intentionally skipping the privilege drop
+ ext.process_group_policy([], gpos)
+ # Dump the fake crontab setup for testing
+ p = Popen(['crontab', '-l'], stdout=PIPE)
+ crontab, _ = p.communicate()
+ self.assertIn(get_bytes(e.data), crontab,
+ 'The crontab entry was not installed')
+ self.assertIn(get_bytes(e2.data), crontab,
+ 'The crontab entry was not installed')
+
+ # Force apply with removal of second GPO
+ gp_db = store.get_gplog(os.environ.get('DC_USERNAME'))
+ del_gpos = gp_db.get_applied_settings([guids[1]])
+ gpos = [gpo for gpo in gpos if gpo.name != guids[1]]
+ ext.process_group_policy(del_gpos, gpos)
+
+ # Dump the fake crontab setup for testing
+ p = Popen(['crontab', '-l'], stdout=PIPE)
+ crontab, _ = p.communicate()
+
+ # Ensure the first entry remains, and the second entry is removed
+ self.assertIn(get_bytes(e.data), crontab,
+ 'The first crontab entry was not found')
+ self.assertNotIn(get_bytes(e2.data), crontab,
+ 'The second crontab entry was still present')
+
+ # Check that a call to gpupdate --rsop also succeeds
+ ret = rsop(self.lp)
+ self.assertEqual(ret, 0, 'gpupdate --rsop failed!')
+
+ # Remove policy
+ del_gpos = get_deleted_gpos_list(gp_db, [])
+ ext.process_group_policy(del_gpos, [])
+ # Dump the fake crontab setup for testing
+ p = Popen(['crontab', '-l'], stdout=PIPE)
+ crontab, _ = p.communicate()
+ self.assertNotIn(get_bytes(e.data), crontab,
+ 'Unapply failed to cleanup crontab entry')
+
+ # Unstage the Registry.pol files
+ unstage_file(reg_pol)
+ unstage_file(reg_pol2)
+
+ def test_gp_drive_maps_user_ext(self):
+ local_path = self.lp.cache_path('gpo_cache')
+ guid = '{31B2F340-016D-11D2-945F-00C04FB984F9}'
+ xml_path = os.path.join(local_path, policies, guid,
+ 'USER/PREFERENCES/DRIVES/DRIVES.XML')
+ cache_dir = self.lp.get('cache directory')
+ store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
+
+ machine_creds = Credentials()
+ machine_creds.guess(self.lp)
+ machine_creds.set_machine_account()
+
+ # Initialize the group policy extension
+ ext = gp_drive_maps_user_ext(self.lp, machine_creds,
+ os.environ.get('DC_USERNAME'), store)
+
+ ads = gpo.ADS_STRUCT(self.server, self.lp, machine_creds)
+ if ads.connect():
+ gpos = ads.get_gpo_list(machine_creds.get_username())
+
+ # Stage the Drives.xml file with test data
+ ret = stage_file(xml_path, drive_maps_xml)
+ self.assertTrue(ret, 'Could not create the target %s' % xml_path)
+
+ # Process all gpos, intentionally skipping the privilege drop
+ ext.process_group_policy([], gpos)
+ # Dump the fake crontab setup for testing
+ p = Popen(['crontab', '-l'], stdout=PIPE)
+ crontab, _ = p.communicate()
+ entry = b'@hourly gio mount smb://example.com/test'
+ self.assertIn(entry, crontab,
+ 'The crontab entry was not installed')
+
+ # Check that a call to gpupdate --rsop also succeeds
+ ret = rsop(self.lp)
+ self.assertEqual(ret, 0, 'gpupdate --rsop failed!')
+
+ # Unstage the Drives.xml
+ unstage_file(xml_path)
+
+ # Modify the policy and ensure it is updated
+ xml_conf = etree.fromstring(drive_maps_xml.strip())
+ drives = xml_conf.findall('Drive')
+ props = drives[0].find('Properties')
+ props.attrib['action'] = 'D'
+ ret = stage_file(xml_path,
+ etree.tostring(xml_conf, encoding='unicode'))
+ self.assertTrue(ret, 'Could not create the target %s' % xml_path)
+
+ # Process all gpos, intentionally skipping the privilege drop
+ ext.process_group_policy([], gpos)
+ # Dump the fake crontab setup for testing
+ p = Popen(['crontab', '-l'], stdout=PIPE)
+ crontab, _ = p.communicate()
+ self.assertNotIn(entry+b'\n', crontab,
+ 'The old crontab entry was not removed')
+ entry = entry + b' --unmount'
+ self.assertIn(entry, crontab,
+ 'The crontab entry was not installed')
+
+ # Remove policy
+ gp_db = store.get_gplog(os.environ.get('DC_USERNAME'))
+ del_gpos = get_deleted_gpos_list(gp_db, [])
+ ext.process_group_policy(del_gpos, [])
+ # Dump the fake crontab setup for testing
+ p = Popen(['crontab', '-l'], stdout=PIPE)
+ crontab, _ = p.communicate()
+ self.assertNotIn(entry, crontab,
+ 'Unapply failed to cleanup crontab entry')
+
+ # Unstage the Drives.xml
+ unstage_file(xml_path)
+
+ # Modify the policy to set 'run once', ensure there is no cron entry
+ xml_conf = etree.fromstring(drive_maps_xml.strip())
+ drives = xml_conf.findall('Drive')
+ filters = etree.SubElement(drives[0], 'Filters')
+ etree.SubElement(filters, 'FilterRunOnce')
+ ret = stage_file(xml_path,
+ etree.tostring(xml_conf, encoding='unicode'))
+ self.assertTrue(ret, 'Could not create the target %s' % xml_path)
+
+ # Process all gpos, intentionally skipping the privilege drop
+ ext.process_group_policy([], gpos)
+ # Dump the fake crontab setup for testing
+ p = Popen(['crontab', '-l'], stdout=PIPE)
+ crontab, _ = p.communicate()
+ entry = b'@hourly gio mount smb://example.com/test'
+ self.assertNotIn(entry, crontab,
+ 'The crontab entry was added despite run-once request')
+
+ # Remove policy
+ gp_db = store.get_gplog(os.environ.get('DC_USERNAME'))
+ del_gpos = get_deleted_gpos_list(gp_db, [])
+ ext.process_group_policy(del_gpos, [])
+
+ # Unstage the Drives.xml
+ unstage_file(xml_path)
+
+ def test_expand_pref_variables(self):
+ cache_path = self.lp.cache_path(os.path.join('gpo_cache'))
+ gpt_path = 'TEST'
+ username = 'test_uname'
+ test_vars = { 'AppDataDir': os.path.expanduser('~/.config'),
+ 'ComputerName': self.lp.get('netbios name'),
+ 'DesktopDir': os.path.expanduser('~/Desktop'),
+ 'DomainName': self.lp.get('realm'),
+ 'GptPath': os.path.join(cache_path,
+ check_safe_path(gpt_path).upper()),
+ 'LogonDomain': self.lp.get('realm'),
+ 'LogonUser': username,
+ 'SystemDrive': '/',
+ 'TempDir': '/tmp'
+ }
+ for exp_var, val in test_vars.items():
+ self.assertEqual(expand_pref_variables('%%%s%%' % exp_var,
+ gpt_path,
+ self.lp,
+ username),
+ val, 'Failed to expand variable %s' % exp_var)
+ # With the time variables, we can't test for an exact time, so let's do
+ # simple checks instead.
+ time_vars = ['DateTime', 'DateTimeEx', 'LocalTime',
+ 'LocalTimeEx', 'TimeStamp']
+ for time_var in time_vars:
+ self.assertNotEqual(expand_pref_variables('%%%s%%' % time_var,
+ gpt_path,
+ self.lp,
+ username),
+ None, 'Failed to expand variable %s' % time_var)
+
+ # Here we test to ensure undefined preference variables cause an error.
+ # The reason for testing these is to ensure we don't apply nonsense
+ # policies when they can't be defined. Also, these tests will fail if
+ # one of these is implemented in the future (forcing us to write a test
+ # anytime these are implemented).
+ undef_vars = ['BinaryComputerSid',
+ 'BinaryUserSid',
+ 'CommonAppdataDir',
+ 'CommonDesktopDir',
+ 'CommonFavoritesDir',
+ 'CommonProgramsDir',
+ 'CommonStartUpDir',
+ 'CurrentProccessId',
+ 'CurrentThreadId',
+ 'FavoritesDir',
+ 'GphPath',
+ 'GroupPolicyVersion',
+ 'LastDriveMapped',
+ 'LastError',
+ 'LastErrorText',
+ 'LdapComputerSid',
+ 'LdapUserSid',
+ 'LogonServer',
+ 'LogonUserSid',
+ 'MacAddress',
+ 'NetPlacesDir',
+ 'OsVersion',
+ 'ProgramFilesDir',
+ 'ProgramsDir',
+ 'RecentDocumentsDir',
+ 'ResultCode',
+ 'ResultText',
+ 'ReversedComputerSid',
+ 'ReversedUserSid',
+ 'SendToDir',
+ 'StartMenuDir',
+ 'StartUpDir',
+ 'SystemDir',
+ 'TraceFile',
+ 'WindowsDir'
+ ]
+ for undef_var in undef_vars:
+ try:
+ expand_pref_variables('%%%s%%' % undef_var, gpt_path, self.lp)
+ except NameError:
+ pass
+ else:
+ self.fail('Undefined variable %s caused no error' % undef_var)
+
+ def test_parser_roundtrip_empty_multi_sz(self):
+ with TemporaryDirectory() as dname:
+ reg_pol_xml = os.path.join(dname, 'REGISTRY.POL.XML')
+
+ parser = GPPolParser()
+ try:
+ parser.load_xml(etree.fromstring(empty_multi_sz_reg_pol.strip()))
+ except Exception as e:
+ self.fail(str(e))
+ parser.write_xml(reg_pol_xml)
+
+ with open(reg_pol_xml, 'r') as f:
+ pol_xml_data = f.read()
+
+ # Strip whitespace characters due to indentation differences
+ expected_xml_data = re.sub(r"\s+", "", empty_multi_sz_reg_pol.decode(), flags=re.UNICODE)
+ actual_xml_data = re.sub(r"\s+", "", pol_xml_data, flags=re.UNICODE)
+ self.assertEqual(expected_xml_data, actual_xml_data, 'XML data mismatch')
+
+ def test_parser_roundtrip_multiple_values_multi_sz(self):
+ with TemporaryDirectory() as dname:
+ reg_pol_xml = os.path.join(dname, 'REGISTRY.POL.XML')
+
+ parser = GPPolParser()
+ try:
+ parser.load_xml(etree.fromstring(multiple_values_multi_sz_reg_pol.strip()))
+ except Exception as e:
+ self.fail(str(e))
+ parser.write_xml(reg_pol_xml)
+
+ with open(reg_pol_xml, 'r') as f:
+ pol_xml_data = f.read()
+
+ # Strip whitespace characters due to indentation differences
+ expected_xml_data = re.sub(r"\s+", "", multiple_values_multi_sz_reg_pol.decode(), flags=re.UNICODE)
+ actual_xml_data = re.sub(r"\s+", "", pol_xml_data, flags=re.UNICODE)
+ self.assertEqual(expected_xml_data, actual_xml_data, 'XML data mismatch')
diff --git a/python/samba/tests/gpo_member.py b/python/samba/tests/gpo_member.py
new file mode 100644
index 0000000..dda0c3b
--- /dev/null
+++ b/python/samba/tests/gpo_member.py
@@ -0,0 +1,39 @@
+# Unix SMB/CIFS implementation. Tests for smb manipulation
+# Copyright (C) David Mulder <dmulder@suse.com> 2018
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+from samba import tests
+from samba.gp.gpclass import GPOStorage
+from samba.param import LoadParm
+from samba.gp.gp_sec_ext import gp_access_ext
+
+class GPOTests(tests.TestCase):
+ def setUp(self):
+ super().setUp()
+ self.server = os.environ["SERVER"]
+ self.dc_account = self.server.upper() + '$'
+ self.lp = LoadParm()
+ self.lp.load_default()
+ self.creds = self.insta_creds(template=self.get_credentials())
+
+ def test_sec_ext_load_on_member(self):
+ cache_dir = self.lp.get('cache directory')
+ store = GPOStorage(os.path.join(cache_dir, 'gpo.tdb'))
+ try:
+ gp_access_ext(self.lp, self.creds,
+ self.creds.get_username(), store)
+ except Exception:
+ self.fail('Initializing gp_access_ext should not require ad-dc')
diff --git a/python/samba/tests/graph.py b/python/samba/tests/graph.py
new file mode 100644
index 0000000..4edd682
--- /dev/null
+++ b/python/samba/tests/graph.py
@@ -0,0 +1,532 @@
+# -*- coding: utf-8 -*-
+# Test graph dot file generation
+#
+# Copyright (C) Andrew Bartlett 2018.
+#
+# Written by Douglas Bagnall <douglas.bagnall@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""Tests for samba.graph"""
+
+import samba
+import samba.tests
+from samba import graph
+
+import re
+import itertools
+
+
+class DotFileTests(samba.tests.TestCaseInTempDir):
+
+ def assertMatch(self, exp, s):
+ m = re.match(exp, s)
+ if m is None:
+ self.fail("%r did not match /%s/" % (s, exp))
+ return m
+
+ def assertHeader(self, lines, title, directed):
+ self.assertEqual(lines[0], '/* generated by samba */')
+ if directed:
+ exp = r'^digraph \w+ {$'
+ else:
+ exp = r'^graph \w+ {$'
+ self.assertMatch(exp, lines[1])
+ m = self.assertMatch(r'^label="([\w ]+)";$', lines[2])
+ self.assertEqual(m.group(1), title)
+ self.assertMatch(r'^fontsize=10;$', lines[3])
+ self.assertMatch(r'$', lines[4])
+ self.assertEqual(lines[5], 'node[fontname=Helvetica; fontsize=10];')
+ self.assertEqual(lines[6], '')
+
+ def assertVertices(self, lines, names):
+ for n, line in zip(names, lines):
+ m = self.assertMatch(r'^"(\w+)";$', line)
+ self.assertEqual(n, m.group(1))
+
+ def assertEdges(self, lines, edges, directed):
+ connector = '->' if directed else '--'
+
+ for edge, line in zip(edges, lines):
+ a, b = edge
+ m = self.assertMatch((r'^"(\w+)" ([>-]{2}) '
+ r'"(\w+)" ?(?:\[([^\]])\])?;$'),
+ line)
+ self.assertEqual(m.group(1), a)
+ self.assertEqual(m.group(2), connector)
+ self.assertEqual(m.group(3), b)
+ if m.group(4):
+ self.assertMatch(r'^[\w ]*$', m.group(4))
+
+ def test_basic_dot_files(self):
+ vertices = tuple('abcdefgh')
+ all_edges = tuple(itertools.combinations(vertices, 2))
+ line_edges = list(zip(vertices[1:], vertices[:-1]))
+ ring_edges = line_edges + [(vertices[0], vertices[-1])]
+ no_edges = []
+ # even join to even numbers, odd to odd
+ disjoint_edges = [(a, b) for a, b in all_edges if
+ ord(a) ^ ord(b) == 0]
+
+ for name, edges in (('all', all_edges),
+ ('line', line_edges),
+ ('ring', ring_edges),
+ ('no', no_edges),
+ ('disjoint', disjoint_edges)):
+
+ for directed, tag in ((True, "directed"),
+ (False, "undirected")):
+ title = "%s %s" % (name, tag)
+
+ g = graph.dot_graph(vertices, edges,
+ directed=directed,
+ title=title)
+ lines = g.split('\n')
+ self.assertHeader(lines, title, directed)
+ self.assertVertices(lines[7:], vertices)
+ self.assertEdges(lines[len(vertices) + 7:], edges, directed)
+
+
+class DistanceTests(samba.tests.TestCase):
+
+ def setUp(self):
+ super().setUp()
+ # a sorted list of colour set names.
+ self.sorted_colour_sets = sorted(
+ graph.COLOUR_SETS,
+ # return '' for None, so it's sortable.
+ key=lambda name: name or '')
+
+ def test_simple_distance(self):
+ edges = [('ant', 'bat'),
+ ('cat', 'dog'),
+ ('ant', 'elephant'),
+ ('elephant', 'dog'),
+ ('bat', 'dog'),
+ ('frog', 'elephant'),
+ ('frog', 'cat'),
+ ('bat', 'elephant'),
+ ('elephant', 'cat'),
+ ('cat', 'ant'),
+ ('cat', 'dog')]
+
+ expected = {
+ "utf8 True, colour None": '''
+ destination
+ ╭────── ant
+ │╭───── bat
+ ││╭──── cat
+ │││╭─── dog
+ ││││╭── elephant
+ source │││││╭─ frog
+ ant ·1221-
+ bat 3·211-
+ cat 12·12-
+ dog ---·--
+elephant 2311·-
+ frog 23121·''',
+ 'utf8 True, colour ansi': '''
+ destination
+ ╭────── ant
+ │╭───── bat
+ ││╭──── cat
+ │││╭─── dog
+ ││││╭── elephant
+ source │││││╭─ frog
+ ant ·1221-
+ bat 3·211-
+ cat 12·12-
+ dog ---·--
+elephant 2311·-
+ frog 23121·
+ ''',
+ 'utf8 True, colour ansi-heatmap': '''
+ destination
+ ╭────── ant
+ │╭───── bat
+ ││╭──── cat
+ │││╭─── dog
+ ││││╭── elephant
+ source │││││╭─ frog
+ ant ·1221-
+ bat 3·211-
+ cat 12·12-
+ dog ---·--
+elephant 2311·-
+ frog 23121·
+ ''',
+ 'utf8 True, colour xterm-256color': '''
+ destination
+ ╭────── ant
+ │╭───── bat
+ ││╭──── cat
+ │││╭─── dog
+ ││││╭── elephant
+ source │││││╭─ frog
+ ant ·1221-
+ bat 3·211-
+ cat 12·12-
+ dog ---·--
+elephant 2311·-
+ frog 23121·
+ ''',
+ 'utf8 True, colour xterm-256color-heatmap': '''
+ destination
+ ╭────── ant
+ │╭───── bat
+ ││╭──── cat
+ │││╭─── dog
+ ││││╭── elephant
+ source │││││╭─ frog
+ ant ·1221-
+ bat 3·211-
+ cat 12·12-
+ dog ---·--
+elephant 2311·-
+ frog 23121·
+''',
+ 'utf8 False, colour None': '''
+ destination
+ ,------ ant
+ |,----- bat
+ ||,---- cat
+ |||,--- dog
+ ||||,-- elephant
+ source |||||,- frog
+ ant 01221-
+ bat 30211-
+ cat 12012-
+ dog ---0--
+elephant 23110-
+ frog 231210
+''',
+ 'utf8 False, colour ansi': '''
+ destination
+ ,------ ant
+ |,----- bat
+ ||,---- cat
+ |||,--- dog
+ ||||,-- elephant
+ source |||||,- frog
+ ant 01221-
+ bat 30211-
+ cat 12012-
+ dog ---0--
+elephant 23110-
+ frog 231210
+''',
+ 'utf8 False, colour ansi-heatmap': '''
+ destination
+ ,------ ant
+ |,----- bat
+ ||,---- cat
+ |||,--- dog
+ ||||,-- elephant
+ source |||||,- frog
+ ant 01221-
+ bat 30211-
+ cat 12012-
+ dog ---0--
+elephant 23110-
+ frog 231210
+''',
+ 'utf8 False, colour xterm-256color': '''
+ destination
+ ,------ ant
+ |,----- bat
+ ||,---- cat
+ |||,--- dog
+ ||||,-- elephant
+ source |||||,- frog
+ ant 01221-
+ bat 30211-
+ cat 12012-
+ dog ---0--
+elephant 23110-
+ frog 231210
+''',
+ 'utf8 False, colour xterm-256color-heatmap': '''
+ destination
+ ,------ ant
+ |,----- bat
+ ||,---- cat
+ |||,--- dog
+ ||||,-- elephant
+ source |||||,- frog
+ ant 01221-
+ bat 30211-
+ cat 12012-
+ dog ---0--
+elephant 23110-
+ frog 231210
+'''
+ }
+ for utf8 in (True, False):
+ for colour in self.sorted_colour_sets:
+ k = 'utf8 %s, colour %s' % (utf8, colour)
+ s = graph.distance_matrix(None, edges, utf8=utf8,
+ colour=colour)
+ self.assertStringsEqual(s, expected[k], strip=True,
+ msg='Wrong output: %s\n\n%s' % (k, s))
+
+ def test_simple_distance2(self):
+ edges = [('ant', 'bat'),
+ ('cat', 'bat'),
+ ('bat', 'ant'),
+ ('ant', 'cat')]
+ expected = {
+ 'utf8 True, colour None': '''
+ destination
+ ╭─── ant
+ │╭── bat
+source ││╭─ cat
+ ant ·11
+ bat 1·2
+ cat 21·
+ ''',
+ 'utf8 True, colour ansi': '''
+ destination
+ ╭─── ant
+ │╭── bat
+source ││╭─ cat
+ ant ·11
+ bat 1·2
+ cat 21·
+ ''',
+ 'utf8 True, colour ansi-heatmap': '''
+ destination
+ ╭─── ant
+ │╭── bat
+source ││╭─ cat
+ ant ·11
+ bat 1·2
+ cat 21·
+ ''',
+ 'utf8 True, colour xterm-256color': '''
+ destination
+ ╭─── ant
+ │╭── bat
+source ││╭─ cat
+ ant ·11
+ bat 1·2
+ cat 21·
+''',
+ 'utf8 True, colour xterm-256color-heatmap': '''
+ destination
+ ╭─── ant
+ │╭── bat
+source ││╭─ cat
+ ant ·11
+ bat 1·2
+ cat 21·
+''',
+ 'utf8 False, colour None': '''
+ destination
+ ,--- ant
+ |,-- bat
+source ||,- cat
+ ant 011
+ bat 102
+ cat 210
+''',
+ 'utf8 False, colour ansi': '''
+ destination
+ ,--- ant
+ |,-- bat
+source ||,- cat
+ ant 011
+ bat 102
+ cat 210
+''',
+ 'utf8 False, colour ansi-heatmap': '''
+ destination
+ ,--- ant
+ |,-- bat
+source ||,- cat
+ ant 011
+ bat 102
+ cat 210
+''',
+ 'utf8 False, colour xterm-256color': '''
+ destination
+ ,--- ant
+ |,-- bat
+source ||,- cat
+ ant 011
+ bat 102
+ cat 210
+''',
+ 'utf8 False, colour xterm-256color-heatmap': '''
+ destination
+ ,--- ant
+ |,-- bat
+source ||,- cat
+ ant 011
+ bat 102
+ cat 210
+'''
+ }
+ for utf8 in (True, False):
+ for colour in self.sorted_colour_sets:
+ k = 'utf8 %s, colour %s' % (utf8, colour)
+ s = graph.distance_matrix(None, edges, utf8=utf8,
+ colour=colour)
+ self.assertStringsEqual(s, expected[k], strip=True,
+ msg='Wrong output: %s\n\n%s' % (k, s))
+
+ def test_simple_distance3(self):
+ edges = [('ant', 'bat'),
+ ('bat', 'cat'),
+ ('cat', 'dog'),
+ ('dog', 'ant'),
+ ('dog', 'eel')]
+ expected = {
+ 'utf8 True, colour None': '''
+ destination
+ ╭───── ant
+ │╭──── bat
+ ││╭─── cat
+ │││╭── dog
+source ││││╭─ eel
+ ant ·1234
+ bat 3·123
+ cat 23·12
+ dog 123·1
+ eel ----·
+''',
+ 'utf8 True, colour ansi': '''
+ destination
+ ╭───── ant
+ │╭──── bat
+ ││╭─── cat
+ │││╭── dog
+source ││││╭─ eel
+ ant ·1234
+ bat 3·123
+ cat 23·12
+ dog 123·1
+ eel ----·
+''',
+ 'utf8 True, colour ansi-heatmap': '''
+ destination
+ ╭───── ant
+ │╭──── bat
+ ││╭─── cat
+ │││╭── dog
+source ││││╭─ eel
+ ant ·1234
+ bat 3·123
+ cat 23·12
+ dog 123·1
+ eel ----·
+''',
+ 'utf8 True, colour xterm-256color': '''
+ destination
+ ╭───── ant
+ │╭──── bat
+ ││╭─── cat
+ │││╭── dog
+source ││││╭─ eel
+ ant ·1234
+ bat 3·123
+ cat 23·12
+ dog 123·1
+ eel ----·
+''',
+ 'utf8 True, colour xterm-256color-heatmap': '''
+ destination
+ ╭───── ant
+ │╭──── bat
+ ││╭─── cat
+ │││╭── dog
+source ││││╭─ eel
+ ant ·1234
+ bat 3·123
+ cat 23·12
+ dog 123·1
+ eel ----·
+''',
+ 'utf8 False, colour None': '''
+ destination
+ ,----- ant
+ |,---- bat
+ ||,--- cat
+ |||,-- dog
+source ||||,- eel
+ ant 01234
+ bat 30123
+ cat 23012
+ dog 12301
+ eel ----0
+''',
+ 'utf8 False, colour ansi': '''
+ destination
+ ,----- ant
+ |,---- bat
+ ||,--- cat
+ |||,-- dog
+source ||||,- eel
+ ant 01234
+ bat 30123
+ cat 23012
+ dog 12301
+ eel ----0
+''',
+ 'utf8 False, colour ansi-heatmap': '''
+ destination
+ ,----- ant
+ |,---- bat
+ ||,--- cat
+ |||,-- dog
+source ||||,- eel
+ ant 01234
+ bat 30123
+ cat 23012
+ dog 12301
+ eel ----0
+''',
+ 'utf8 False, colour xterm-256color':
+ ''' destination
+ ,----- ant
+ |,---- bat
+ ||,--- cat
+ |||,-- dog
+source ||||,- eel
+ ant 01234
+ bat 30123
+ cat 23012
+ dog 12301
+ eel ----0
+''',
+ 'utf8 False, colour xterm-256color-heatmap': '''
+ destination
+ ,----- ant
+ |,---- bat
+ ||,--- cat
+ |||,-- dog
+source ||||,- eel
+ ant 01234
+ bat 30123
+ cat 23012
+ dog 12301
+ eel ----0
+'''
+ }
+ for utf8 in (True, False):
+ for colour in self.sorted_colour_sets:
+ k = 'utf8 %s, colour %s' % (utf8, colour)
+ s = graph.distance_matrix(None, edges, utf8=utf8,
+ colour=colour)
+ self.assertStringsEqual(s, expected[k], strip=True,
+ msg='Wrong output: %s\n\n%s' % (k, s))
diff --git a/python/samba/tests/group_audit.py b/python/samba/tests/group_audit.py
new file mode 100644
index 0000000..4c83ae8
--- /dev/null
+++ b/python/samba/tests/group_audit.py
@@ -0,0 +1,395 @@
+# Tests for SamDb password change audit logging.
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2018
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+"""Tests for the SamDb logging of password changes.
+"""
+
+import samba.tests
+from samba.dcerpc.messaging import MSG_GROUP_LOG, DSDB_GROUP_EVENT_NAME
+from samba.dcerpc.windows_event_ids import (
+ EVT_ID_USER_ADDED_TO_GLOBAL_SEC_GROUP,
+ EVT_ID_USER_REMOVED_FROM_GLOBAL_SEC_GROUP
+)
+from samba.samdb import SamDB
+from samba.auth import system_session
+import os
+from samba.tests.audit_log_base import AuditLogTestBase
+from samba.tests import delete_force
+import ldb
+from ldb import FLAG_MOD_REPLACE
+
+USER_NAME = "grpadttstuser01"
+USER_PASS = samba.generate_random_password(32, 32)
+
+SECOND_USER_NAME = "grpadttstuser02"
+SECOND_USER_PASS = samba.generate_random_password(32, 32)
+
+GROUP_NAME_01 = "group-audit-01"
+GROUP_NAME_02 = "group-audit-02"
+
+
+class GroupAuditTests(AuditLogTestBase):
+
+ def setUp(self):
+ self.message_type = MSG_GROUP_LOG
+ self.event_type = DSDB_GROUP_EVENT_NAME
+ super().setUp()
+
+ self.server_ip = os.environ["SERVER_IP"]
+
+ host = "ldap://%s" % os.environ["SERVER"]
+ self.ldb = SamDB(url=host,
+ session_info=system_session(),
+ credentials=self.get_credentials(),
+ lp=self.get_loadparm())
+ self.server = os.environ["SERVER"]
+
+ # Gets back the basedn
+ self.base_dn = self.ldb.domain_dn()
+
+ # Get the old "dSHeuristics" if it was set
+ dsheuristics = self.ldb.get_dsheuristics()
+
+ # Set the "dSHeuristics" to activate the correct "userPassword"
+ # behaviour
+ self.ldb.set_dsheuristics("000000001")
+
+ # Reset the "dSHeuristics" as they were before
+ self.addCleanup(self.ldb.set_dsheuristics, dsheuristics)
+
+ # Get the old "minPwdAge"
+ minPwdAge = self.ldb.get_minPwdAge()
+
+ # Set it temporarily to "0"
+ self.ldb.set_minPwdAge("0")
+ self.base_dn = self.ldb.domain_dn()
+
+ # Reset the "minPwdAge" as it was before
+ self.addCleanup(self.ldb.set_minPwdAge, minPwdAge)
+
+ # (Re)adds the test user USER_NAME with password USER_PASS
+ self.ldb.add({
+ "dn": "cn=" + USER_NAME + ",cn=users," + self.base_dn,
+ "objectclass": "user",
+ "sAMAccountName": USER_NAME,
+ "userPassword": USER_PASS
+ })
+ self.ldb.newgroup(GROUP_NAME_01)
+ self.ldb.newgroup(GROUP_NAME_02)
+
+ def tearDown(self):
+ super().tearDown()
+ delete_force(self.ldb, "cn=" + USER_NAME + ",cn=users," + self.base_dn)
+ self.ldb.deletegroup(GROUP_NAME_01)
+ self.ldb.deletegroup(GROUP_NAME_02)
+
+ def test_add_and_remove_users_from_group(self):
+
+ #
+ # Wait for the primary group change for the created user.
+ #
+ messages = self.waitForMessages(2)
+ print("Received %d messages" % len(messages))
+ self.assertEqual(2,
+ len(messages),
+ "Did not receive the expected number of messages")
+ audit = messages[0]["groupChange"]
+
+ self.assertEqual("PrimaryGroup", audit["action"])
+ user_dn = "cn=" + USER_NAME + ",cn=users," + self.base_dn
+ group_dn = "cn=domain users,cn=users," + self.base_dn
+ self.assertTrue(user_dn.lower(), audit["user"].lower())
+ self.assertTrue(group_dn.lower(), audit["group"].lower())
+ self.assertRegex(audit["remoteAddress"],
+ self.remoteAddress)
+ self.assertTrue(self.is_guid(audit["sessionId"]))
+ session_id = self.get_session()
+ self.assertEqual(session_id, audit["sessionId"])
+ service_description = self.get_service_description()
+ self.assertEqual(service_description, "LDAP")
+
+ # Check the Add message for the new users primary group
+ audit = messages[1]["groupChange"]
+
+ self.assertEqual("Added", audit["action"])
+ user_dn = "cn=" + USER_NAME + ",cn=users," + self.base_dn
+ group_dn = "cn=domain users,cn=users," + self.base_dn
+ self.assertTrue(user_dn.lower(), audit["user"].lower())
+ self.assertTrue(group_dn.lower(), audit["group"].lower())
+ self.assertRegex(audit["remoteAddress"],
+ self.remoteAddress)
+ self.assertTrue(self.is_guid(audit["sessionId"]))
+ session_id = self.get_session()
+ self.assertEqual(session_id, audit["sessionId"])
+ self.assertEqual(EVT_ID_USER_ADDED_TO_GLOBAL_SEC_GROUP,
+ audit["eventId"])
+ #
+ # Add the user to a group
+ #
+ self.discardMessages()
+
+ self.ldb.add_remove_group_members(GROUP_NAME_01, [USER_NAME])
+ messages = self.waitForMessages(1)
+ print("Received %d messages" % len(messages))
+ self.assertEqual(1,
+ len(messages),
+ "Did not receive the expected number of messages")
+ audit = messages[0]["groupChange"]
+
+ self.assertEqual("Added", audit["action"])
+ user_dn = "cn=" + USER_NAME + ",cn=users," + self.base_dn
+ group_dn = "cn=" + GROUP_NAME_01 + ",cn=users," + self.base_dn
+ self.assertTrue(user_dn.lower(), audit["user"].lower())
+ self.assertTrue(group_dn.lower(), audit["group"].lower())
+ self.assertRegex(audit["remoteAddress"],
+ self.remoteAddress)
+ self.assertTrue(self.is_guid(audit["sessionId"]))
+ session_id = self.get_session()
+ self.assertEqual(session_id, audit["sessionId"])
+ service_description = self.get_service_description()
+ self.assertEqual(service_description, "LDAP")
+
+ #
+ # Add the user to another group
+ #
+ self.discardMessages()
+ self.ldb.add_remove_group_members(GROUP_NAME_02, [USER_NAME])
+
+ messages = self.waitForMessages(1)
+ print("Received %d messages" % len(messages))
+ self.assertEqual(1,
+ len(messages),
+ "Did not receive the expected number of messages")
+ audit = messages[0]["groupChange"]
+
+ self.assertEqual("Added", audit["action"])
+ user_dn = "cn=" + USER_NAME + ",cn=users," + self.base_dn
+ group_dn = "cn=" + GROUP_NAME_02 + ",cn=users," + self.base_dn
+ self.assertTrue(user_dn.lower(), audit["user"].lower())
+ self.assertTrue(group_dn.lower(), audit["group"].lower())
+ self.assertRegex(audit["remoteAddress"],
+ self.remoteAddress)
+ self.assertTrue(self.is_guid(audit["sessionId"]))
+ session_id = self.get_session()
+ self.assertEqual(session_id, audit["sessionId"])
+ service_description = self.get_service_description()
+ self.assertEqual(service_description, "LDAP")
+
+ #
+ # Remove the user from a group
+ #
+ self.discardMessages()
+ self.ldb.add_remove_group_members(
+ GROUP_NAME_01,
+ [USER_NAME],
+ add_members_operation=False)
+ messages = self.waitForMessages(1)
+ print("Received %d messages" % len(messages))
+ self.assertEqual(1,
+ len(messages),
+ "Did not receive the expected number of messages")
+ audit = messages[0]["groupChange"]
+
+ self.assertEqual("Removed", audit["action"])
+ user_dn = "cn=" + USER_NAME + ",cn=users," + self.base_dn
+ group_dn = "cn=" + GROUP_NAME_01 + ",cn=users," + self.base_dn
+ self.assertTrue(user_dn.lower(), audit["user"].lower())
+ self.assertTrue(group_dn.lower(), audit["group"].lower())
+ self.assertRegex(audit["remoteAddress"],
+ self.remoteAddress)
+ self.assertTrue(self.is_guid(audit["sessionId"]))
+ session_id = self.get_session()
+ self.assertEqual(session_id, audit["sessionId"])
+ service_description = self.get_service_description()
+ self.assertEqual(service_description, "LDAP")
+
+ #
+ # Re-add the user to a group
+ #
+ self.discardMessages()
+ self.ldb.add_remove_group_members(GROUP_NAME_01, [USER_NAME])
+
+ messages = self.waitForMessages(1)
+ print("Received %d messages" % len(messages))
+ self.assertEqual(1,
+ len(messages),
+ "Did not receive the expected number of messages")
+ audit = messages[0]["groupChange"]
+
+ self.assertEqual("Added", audit["action"])
+ user_dn = "cn=" + USER_NAME + ",cn=users," + self.base_dn
+ group_dn = "cn=" + GROUP_NAME_01 + ",cn=users," + self.base_dn
+ self.assertTrue(user_dn.lower(), audit["user"].lower())
+ self.assertTrue(group_dn.lower(), audit["group"].lower())
+ self.assertRegex(audit["remoteAddress"],
+ self.remoteAddress)
+ self.assertTrue(self.is_guid(audit["sessionId"]))
+ session_id = self.get_session()
+ self.assertEqual(session_id, audit["sessionId"])
+ service_description = self.get_service_description()
+ self.assertEqual(service_description, "LDAP")
+
+ def test_change_primary_group(self):
+
+ #
+ # Wait for the primary group change for the created user.
+ #
+ messages = self.waitForMessages(2)
+ print("Received %d messages" % len(messages))
+ self.assertEqual(2,
+ len(messages),
+ "Did not receive the expected number of messages")
+
+ # Check the PrimaryGroup message
+ audit = messages[0]["groupChange"]
+
+ self.assertEqual("PrimaryGroup", audit["action"])
+ user_dn = "cn=" + USER_NAME + ",cn=users," + self.base_dn
+ group_dn = "cn=domain users,cn=users," + self.base_dn
+ self.assertTrue(user_dn.lower(), audit["user"].lower())
+ self.assertTrue(group_dn.lower(), audit["group"].lower())
+ self.assertRegex(audit["remoteAddress"],
+ self.remoteAddress)
+ self.assertTrue(self.is_guid(audit["sessionId"]))
+ session_id = self.get_session()
+ self.assertEqual(session_id, audit["sessionId"])
+ service_description = self.get_service_description()
+ self.assertEqual(service_description, "LDAP")
+
+ # Check the Add message for the new users primary group
+ audit = messages[1]["groupChange"]
+
+ self.assertEqual("Added", audit["action"])
+ user_dn = "cn=" + USER_NAME + ",cn=users," + self.base_dn
+ group_dn = "cn=domain users,cn=users," + self.base_dn
+ self.assertTrue(user_dn.lower(), audit["user"].lower())
+ self.assertTrue(group_dn.lower(), audit["group"].lower())
+ self.assertRegex(audit["remoteAddress"],
+ self.remoteAddress)
+ self.assertTrue(self.is_guid(audit["sessionId"]))
+ session_id = self.get_session()
+ self.assertEqual(session_id, audit["sessionId"])
+ self.assertEqual(EVT_ID_USER_ADDED_TO_GLOBAL_SEC_GROUP,
+ audit["eventId"])
+
+ #
+ # Add the user to a group, the user needs to be a member of a group
+ # before there primary group can be set to that group.
+ #
+ self.discardMessages()
+
+ self.ldb.add_remove_group_members(GROUP_NAME_01, [USER_NAME])
+ messages = self.waitForMessages(1)
+ print("Received %d messages" % len(messages))
+ self.assertEqual(1,
+ len(messages),
+ "Did not receive the expected number of messages")
+ audit = messages[0]["groupChange"]
+
+ self.assertEqual("Added", audit["action"])
+ user_dn = "cn=" + USER_NAME + ",cn=users," + self.base_dn
+ group_dn = "cn=" + GROUP_NAME_01 + ",cn=users," + self.base_dn
+ self.assertTrue(user_dn.lower(), audit["user"].lower())
+ self.assertTrue(group_dn.lower(), audit["group"].lower())
+ self.assertRegex(audit["remoteAddress"],
+ self.remoteAddress)
+ self.assertTrue(self.is_guid(audit["sessionId"]))
+ session_id = self.get_session()
+ self.assertEqual(session_id, audit["sessionId"])
+ service_description = self.get_service_description()
+ self.assertEqual(service_description, "LDAP")
+ self.assertEqual(EVT_ID_USER_ADDED_TO_GLOBAL_SEC_GROUP,
+ audit["eventId"])
+
+ #
+ # Change the primary group of a user
+ #
+ user_dn = "cn=" + USER_NAME + ",cn=users," + self.base_dn
+ group_dn = "cn=" + GROUP_NAME_01 + ",cn=users," + self.base_dn
+ # get the primaryGroupToken of the group
+ res = self.ldb.search(base=group_dn, attrs=["primaryGroupToken"],
+ scope=ldb.SCOPE_BASE)
+ group_id = res[0]["primaryGroupToken"]
+
+ # set primaryGroupID attribute of the user to that group
+ m = ldb.Message()
+ m.dn = ldb.Dn(self.ldb, user_dn)
+ m["primaryGroupID"] = ldb.MessageElement(
+ group_id,
+ FLAG_MOD_REPLACE,
+ "primaryGroupID")
+ self.discardMessages()
+ self.ldb.modify(m)
+
+ #
+ # Wait for the primary group change.
+ # Will see the user removed from the new group
+ # the user added to their old primary group
+ # and a new primary group event.
+ #
+ messages = self.waitForMessages(3)
+ print("Received %d messages" % len(messages))
+ self.assertEqual(3,
+ len(messages),
+ "Did not receive the expected number of messages")
+
+ audit = messages[0]["groupChange"]
+ self.assertEqual("Removed", audit["action"])
+ user_dn = "cn=" + USER_NAME + ",cn=users," + self.base_dn
+ group_dn = "cn=" + GROUP_NAME_01 + ",cn=users," + self.base_dn
+ self.assertTrue(user_dn.lower(), audit["user"].lower())
+ self.assertTrue(group_dn.lower(), audit["group"].lower())
+ self.assertRegex(audit["remoteAddress"],
+ self.remoteAddress)
+ self.assertTrue(self.is_guid(audit["sessionId"]))
+ session_id = self.get_session()
+ self.assertEqual(session_id, audit["sessionId"])
+ service_description = self.get_service_description()
+ self.assertEqual(service_description, "LDAP")
+ self.assertEqual(EVT_ID_USER_REMOVED_FROM_GLOBAL_SEC_GROUP,
+ audit["eventId"])
+
+ audit = messages[1]["groupChange"]
+
+ self.assertEqual("Added", audit["action"])
+ user_dn = "cn=" + USER_NAME + ",cn=users," + self.base_dn
+ group_dn = "cn=domain users,cn=users," + self.base_dn
+ self.assertTrue(user_dn.lower(), audit["user"].lower())
+ self.assertTrue(group_dn.lower(), audit["group"].lower())
+ self.assertRegex(audit["remoteAddress"],
+ self.remoteAddress)
+ self.assertTrue(self.is_guid(audit["sessionId"]))
+ session_id = self.get_session()
+ self.assertEqual(session_id, audit["sessionId"])
+ service_description = self.get_service_description()
+ self.assertEqual(service_description, "LDAP")
+ self.assertEqual(EVT_ID_USER_ADDED_TO_GLOBAL_SEC_GROUP,
+ audit["eventId"])
+
+ audit = messages[2]["groupChange"]
+
+ self.assertEqual("PrimaryGroup", audit["action"])
+ user_dn = "cn=" + USER_NAME + ",cn=users," + self.base_dn
+ group_dn = "cn=" + GROUP_NAME_01 + ",cn=users," + self.base_dn
+ self.assertTrue(user_dn.lower(), audit["user"].lower())
+ self.assertTrue(group_dn.lower(), audit["group"].lower())
+ self.assertRegex(audit["remoteAddress"],
+ self.remoteAddress)
+ self.assertTrue(self.is_guid(audit["sessionId"]))
+ session_id = self.get_session()
+ self.assertEqual(session_id, audit["sessionId"])
+ service_description = self.get_service_description()
+ self.assertEqual(service_description, "LDAP")
diff --git a/python/samba/tests/hostconfig.py b/python/samba/tests/hostconfig.py
new file mode 100644
index 0000000..0f03388
--- /dev/null
+++ b/python/samba/tests/hostconfig.py
@@ -0,0 +1,74 @@
+# Unix SMB/CIFS implementation. Tests for shares
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2009
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for samba.hostconfig."""
+
+from samba.hostconfig import SharesContainer
+from samba.tests import TestCase
+
+
+class MockService(object):
+
+ def __init__(self, data):
+ self.data = data
+
+ def __getitem__(self, name):
+ return self.data[name]
+
+
+class MockLoadParm(object):
+
+ def __init__(self, data):
+ self.data = data
+
+ def __getitem__(self, name):
+ return MockService(self.data[name])
+
+ def __len__(self):
+ return len(self.data)
+
+ def services(self):
+ return self.data.keys()
+
+
+class ShareTests(TestCase):
+
+ def _get_shares(self, conf):
+ return SharesContainer(MockLoadParm(conf))
+
+ def test_len_no_global(self):
+ shares = self._get_shares({})
+ self.assertEqual(0, len(shares))
+
+ def test_iter(self):
+ self.assertEqual([], list(self._get_shares({})))
+ self.assertEqual([], list(self._get_shares({"global": {}})))
+ self.assertEqual(
+ ["bla"],
+ list(self._get_shares({"global": {}, "bla": {}})))
+
+ def test_len(self):
+ shares = self._get_shares({"global": {}})
+ self.assertEqual(0, len(shares))
+
+ def test_getitem_nonexistent(self):
+ shares = self._get_shares({"global": {}})
+ self.assertRaises(KeyError, shares.__getitem__, "bla")
+
+ def test_getitem_global(self):
+ shares = self._get_shares({"global": {}})
+ self.assertRaises(KeyError, shares.__getitem__, "global")
diff --git a/python/samba/tests/imports.py b/python/samba/tests/imports.py
new file mode 100644
index 0000000..727f529
--- /dev/null
+++ b/python/samba/tests/imports.py
@@ -0,0 +1,31 @@
+# Unix SMB/CIFS implementation. Tests for python imports
+# Copyright (C) David Mulder <dmulder@samba.org> 2020
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+from samba.tests import TestCase
+
+class PyImportsTestCase(TestCase):
+ def setUp(self):
+ super().setUp()
+
+ def tearDown(self):
+ super().tearDown()
+
+ def test_samdb_import(self):
+ try:
+ from samba import dsdb, dsdb_dns
+ from samba import samdb
+ except ImportError as e:
+ self.fail('Failed to import samdb from samba: %s' % str(e))
diff --git a/python/samba/tests/join.py b/python/samba/tests/join.py
new file mode 100644
index 0000000..b47bc70
--- /dev/null
+++ b/python/samba/tests/join.py
@@ -0,0 +1,175 @@
+# Test joining as a DC and check the join was done right
+#
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import samba
+import sys
+import shutil
+import os
+from samba.tests.dns_base import DNSTKeyTest
+from samba.join import DCJoinContext
+from samba.dcerpc import drsuapi, misc, dns
+from samba.credentials import Credentials
+from samba.provision import interface_ips_v4
+
+
+def get_logger(name="subunit"):
+ """Get a logger object."""
+ import logging
+ logger = logging.getLogger(name)
+ logger.addHandler(logging.StreamHandler(sys.stderr))
+ return logger
+
+
+class JoinTestCase(DNSTKeyTest):
+ def setUp(self):
+ self.server = samba.tests.env_get_var_value("SERVER")
+ self.server_ip = samba.tests.env_get_var_value("SERVER_IP")
+ super().setUp()
+ self.lp = samba.tests.env_loadparm()
+ self.creds = self.get_credentials()
+ self.netbios_name = "jointest1"
+ logger = get_logger()
+
+ self.join_ctx = DCJoinContext(server=self.server, creds=self.creds,
+ lp=self.get_loadparm(),
+ netbios_name=self.netbios_name,
+ targetdir=self.tempdir,
+ domain=None, logger=logger,
+ dns_backend="SAMBA_INTERNAL")
+ self.join_ctx.userAccountControl = (samba.dsdb.UF_SERVER_TRUST_ACCOUNT |
+ samba.dsdb.UF_TRUSTED_FOR_DELEGATION)
+
+ self.join_ctx.replica_flags |= (drsuapi.DRSUAPI_DRS_WRIT_REP |
+ drsuapi.DRSUAPI_DRS_FULL_SYNC_IN_PROGRESS)
+ self.join_ctx.domain_replica_flags = self.join_ctx.replica_flags
+ self.join_ctx.secure_channel_type = misc.SEC_CHAN_BDC
+
+ self.join_ctx.cleanup_old_join()
+
+ self.join_ctx.force_all_ips = True
+
+ self.join_ctx.do_join()
+
+ def tearDown(self):
+ try:
+ paths = self.join_ctx.paths
+ except AttributeError:
+ paths = None
+
+ if paths is not None:
+ shutil.rmtree(paths.private_dir)
+ shutil.rmtree(paths.state_dir)
+ self.rm_dirs("etc", "msg.lock", "bind-dns")
+ self.rm_files("names.tdb")
+
+ self.join_ctx.cleanup_old_join(force=True)
+
+ super().tearDown()
+
+ def test_join_makes_records(self):
+ "create a query packet containing one query record via TCP"
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ questions = []
+
+ name = self.join_ctx.dnshostname
+ q = self.make_name_question(name, dns.DNS_QTYPE_A, dns.DNS_QCLASS_IN)
+ questions.append(q)
+
+ # Get expected IPs
+ IPs = interface_ips_v4(self.lp, all_interfaces=True)
+
+ self.finish_name_packet(p, questions)
+ (response, response_packet) = self.dns_transaction_tcp(p, host=self.server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
+ self.assert_dns_opcode_equals(response, dns.DNS_OPCODE_QUERY)
+ self.assertEqual(response.ancount, len(IPs))
+
+ questions = []
+ name = "%s._msdcs.%s" % (self.join_ctx.ntds_guid, self.join_ctx.dnsforest)
+ q = self.make_name_question(name, dns.DNS_QTYPE_A, dns.DNS_QCLASS_IN)
+ questions.append(q)
+
+ self.finish_name_packet(p, questions)
+ (response, response_packet) = self.dns_transaction_tcp(p, host=self.server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
+ self.assert_dns_opcode_equals(response, dns.DNS_OPCODE_QUERY)
+
+ self.assertEqual(response.ancount, 1 + len(IPs))
+ self.assertEqual(response.answers[0].rr_type, dns.DNS_QTYPE_CNAME)
+ self.assertEqual(response.answers[0].rdata, self.join_ctx.dnshostname)
+ self.assertEqual(response.answers[1].rr_type, dns.DNS_QTYPE_A)
+
+ def test_join_records_can_update(self):
+ dc_creds = Credentials()
+ dc_creds.guess(self.join_ctx.lp)
+ dc_creds.set_machine_account(self.join_ctx.lp)
+
+ self.tkey_trans(creds=dc_creds)
+
+ p = self.make_name_packet(dns.DNS_OPCODE_UPDATE)
+ q = self.make_name_question(self.join_ctx.dnsdomain,
+ dns.DNS_QTYPE_SOA,
+ dns.DNS_QCLASS_IN)
+ questions = []
+ questions.append(q)
+ self.finish_name_packet(p, questions)
+
+ updates = []
+ # Delete the old expected IPs
+ IPs = interface_ips_v4(self.lp, all_interfaces=True)
+ for IP in IPs[1:]:
+ if ":" in IP:
+ r = dns.res_rec()
+ r.name = self.join_ctx.dnshostname
+ r.rr_type = dns.DNS_QTYPE_AAAA
+ r.rr_class = dns.DNS_QCLASS_NONE
+ r.ttl = 0
+ r.length = 0xffff
+ rdata = IP
+ else:
+ r = dns.res_rec()
+ r.name = self.join_ctx.dnshostname
+ r.rr_type = dns.DNS_QTYPE_A
+ r.rr_class = dns.DNS_QCLASS_NONE
+ r.ttl = 0
+ r.length = 0xffff
+ rdata = IP
+
+ r.rdata = rdata
+ updates.append(r)
+
+ p.nscount = len(updates)
+ p.nsrecs = updates
+
+ mac = self.sign_packet(p, self.key_name)
+ (response, response_p) = self.dns_transaction_udp(p, self.server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
+ self.verify_packet(response, response_p, mac)
+
+ p = self.make_name_packet(dns.DNS_OPCODE_QUERY)
+ questions = []
+
+ name = self.join_ctx.dnshostname
+ q = self.make_name_question(name, dns.DNS_QTYPE_A, dns.DNS_QCLASS_IN)
+ questions.append(q)
+
+ self.finish_name_packet(p, questions)
+ (response, response_packet) = self.dns_transaction_tcp(p, host=self.server_ip)
+ self.assert_dns_rcode_equals(response, dns.DNS_RCODE_OK)
+ self.assert_dns_opcode_equals(response, dns.DNS_OPCODE_QUERY)
+ self.assertEqual(response.ancount, 1)
diff --git a/python/samba/tests/kcc/__init__.py b/python/samba/tests/kcc/__init__.py
new file mode 100644
index 0000000..31354f0
--- /dev/null
+++ b/python/samba/tests/kcc/__init__.py
@@ -0,0 +1,90 @@
+# Unix SMB/CIFS implementation. Tests for samba.kcc core.
+# Copyright (C) Andrew Bartlett 2015
+#
+# Written by Douglas Bagnall <douglas.bagnall@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for samba.kcc"""
+
+import samba
+import os
+import time
+from tempfile import mkdtemp
+
+import samba.tests
+from samba import kcc
+from samba import ldb
+from samba.dcerpc import misc
+
+
+from samba.param import LoadParm
+from samba.credentials import Credentials
+from samba.samdb import SamDB
+
+unix_now = int(time.time())
+unix_once_upon_a_time = 1000000000 # 2001-09-09
+
+ENV_DSAS = {
+ 'ad_dc_ntvfs': ['CN=LOCALDC,CN=Servers,CN=Default-First-Site-Name,CN=Sites,CN=Configuration,DC=samba,DC=example,DC=com'],
+ 'fl2000dc': ['CN=DC5,CN=Servers,CN=Default-First-Site-Name,CN=Sites,CN=Configuration,DC=samba2000,DC=example,DC=com'],
+ 'fl2003dc': ['CN=DC6,CN=Servers,CN=Default-First-Site-Name,CN=Sites,CN=Configuration,DC=samba2003,DC=example,DC=com'],
+ 'fl2008r2dc': ['CN=DC7,CN=Servers,CN=Default-First-Site-Name,CN=Sites,CN=Configuration,DC=samba2008r2,DC=example,DC=com'],
+ 'promoted_dc': ['CN=PROMOTEDVDC,CN=Servers,CN=Default-First-Site-Name,CN=Sites,CN=Configuration,DC=samba,DC=example,DC=com',
+ 'CN=LOCALDC,CN=Servers,CN=Default-First-Site-Name,CN=Sites,CN=Configuration,DC=samba,DC=example,DC=com'],
+ 'vampire_dc': ['CN=LOCALDC,CN=Servers,CN=Default-First-Site-Name,CN=Sites,CN=Configuration,DC=samba,DC=example,DC=com',
+ 'CN=LOCALVAMPIREDC,CN=Servers,CN=Default-First-Site-Name,CN=Sites,CN=Configuration,DC=samba,DC=example,DC=com'],
+}
+
+
+class KCCTests(samba.tests.TestCase):
+ def setUp(self):
+ super().setUp()
+ self.lp = LoadParm()
+ self.creds = Credentials()
+ self.creds.guess(self.lp)
+ self.creds.set_username(os.environ["USERNAME"])
+ self.creds.set_password(os.environ["PASSWORD"])
+
+ def test_list_dsas(self):
+ my_kcc = kcc.KCC(unix_now, False, False, False, False)
+ my_kcc.load_samdb("ldap://%s" % os.environ["SERVER"],
+ self.lp, self.creds)
+ try:
+ dsas = my_kcc.list_dsas()
+ except kcc.KCCError as e:
+ self.fail("kcc.list_dsas failed with %s" % e)
+ env = os.environ['TEST_ENV']
+ for expected_dsa in ENV_DSAS[env]:
+ self.assertIn(expected_dsa, dsas)
+
+ def test_verify(self):
+ """check that the KCC generates graphs that pass its own verify
+ option. This is not a spectacular achievement when there are
+ only a couple of nodes to connect, but it shows something.
+ """
+ my_kcc = kcc.KCC(unix_now, readonly=True, verify=True,
+ debug=False, dot_file_dir=None)
+
+ # As this is flapping with errors under python3, we catch
+ # exceptions and turn them into failures..
+ try:
+ my_kcc.run("ldap://%s" % os.environ["SERVER"],
+ self.lp, self.creds,
+ attempt_live_connections=False)
+ except (samba.kcc.graph_utils.GraphError, kcc.KCCError):
+ import traceback
+ traceback.print_exc()
+ self.fail()
diff --git a/python/samba/tests/kcc/graph.py b/python/samba/tests/kcc/graph.py
new file mode 100644
index 0000000..b581158
--- /dev/null
+++ b/python/samba/tests/kcc/graph.py
@@ -0,0 +1,67 @@
+# Unix SMB/CIFS implementation. Tests for kcc.graph routines
+# Copyright (C) Andrew Bartlett 2015
+#
+# Written by Douglas Bagnall <douglas.bagnall@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for samba.kcc.graph"""
+
+import samba
+import samba.tests
+from samba.kcc.graph import total_schedule, convert_schedule_to_repltimes
+
+def ntdsconn_schedule(times):
+ if times is None:
+ return None
+ from samba.dcerpc import drsblobs
+ schedule = drsblobs.schedule()
+ schedule.size = 188
+ schedule.bandwidth = 0
+ schedule.numberOfSchedules = 1
+ header = drsblobs.scheduleHeader()
+ header.type = 0
+ header.offset = 20
+ schedule.headerArray = [header]
+ data = drsblobs.scheduleSlots()
+ data.slots = times
+ schedule.dataArray = [data]
+ return schedule
+
+
+class GraphFunctionTests(samba.tests.TestCase):
+
+ def test_total_schedule(self):
+ schedule = [0x81] * 84
+ for schedule, total in (
+ ([0x81] * 84, 168),
+ ([0xff] * 84, 84 * 8),
+ ([0xaa] * 84, 84 * 4),
+ ([0x03, 0x33] * 42, 42 * 6),
+ (list(range(7)) * 12, 12 * 9),
+ (list(range(4)) * 21, 21 * 4)):
+ self.assertEqual(total_schedule(schedule), total)
+
+ def test_convert_schedule_to_repltimes(self):
+ for ntdsconn_times, repltimes in (
+ ([0x01] * 168, [0x11] * 84),
+ (None, [0x11] * 84),
+ ([0x06] * 168, [0x66] * 84),
+ ([0x03, 0xa] * 84, [0x3a] * 84),
+ (list(range(7)) * 24,
+ [0x01, 0x23, 0x45, 0x60, 0x12, 0x34, 0x56] * 12)):
+ schedule = ntdsconn_schedule(ntdsconn_times)
+ self.assertEqual(convert_schedule_to_repltimes(schedule),
+ repltimes)
diff --git a/python/samba/tests/kcc/graph_utils.py b/python/samba/tests/kcc/graph_utils.py
new file mode 100644
index 0000000..3eaa1c7
--- /dev/null
+++ b/python/samba/tests/kcc/graph_utils.py
@@ -0,0 +1,165 @@
+# Unix SMB/CIFS implementation. Tests for kcc.graph_utils routines
+# Copyright (C) Andrew Bartlett 2015
+#
+# Written by Douglas Bagnall <douglas.bagnall@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for samba.kcc.graph_utils"""
+
+import samba
+import samba.tests
+from samba.kcc.graph_utils import GraphError
+from samba.kcc.graph_utils import (verify_graph_complete,
+ verify_graph_connected,
+ verify_graph_connected_under_edge_failures,
+ verify_graph_forest,
+ verify_graph_connected_under_vertex_failures,
+ verify_graph_no_lonely_vertices)
+
+import itertools
+
+
+def make_tree(vertices):
+ if len(vertices) < 2:
+ return ()
+ remaining = set(vertices)
+ used = set()
+ edges = set()
+ used.add(remaining.pop())
+ used.add(remaining.pop())
+ edges.add(tuple(used))
+ while remaining:
+ v = remaining.pop()
+ w = used.pop()
+ e = (w, v)
+ edges.add(e)
+ used.update(e)
+ return tuple(edges)
+
+# TODO: test directed graphs
+
+
+class UndirectedGraphTests(samba.tests.TestCase):
+
+ def setUp(self):
+ super().setUp()
+ vertices = tuple('abcdefgh')
+ vertices2 = tuple('ijk')
+ edges = tuple(itertools.combinations(vertices, 2))
+ edges2 = tuple(itertools.combinations(vertices2, 2))
+ line_edges = list(zip(vertices[1:], vertices[:-1]))
+ ring_edges = line_edges + [(vertices[0], vertices[-1])]
+
+ tree = make_tree(vertices)
+ tree2 = make_tree(vertices2)
+
+ self.complete_graph = [edges, vertices, vertices]
+
+ self.disconnected_clusters = [edges + edges2,
+ vertices + vertices2,
+ vertices + vertices2]
+
+ self.graph_with_unreachables = [edges,
+ vertices + vertices2,
+ vertices]
+
+ self.ring = [ring_edges, vertices, vertices]
+ self.line = [line_edges, vertices, vertices]
+
+ self.tree = [tree, vertices, vertices]
+ self.forest = [tree + tree2,
+ vertices + vertices2,
+ vertices + vertices2]
+
+ self.unconnected_graph = ((), vertices, ())
+
+ def assertGraphError(self, fn, *args):
+ return self.assertRaises(GraphError, fn, *args)
+
+ def test_graph_complete(self):
+ fn = verify_graph_complete
+
+ self.assertGraphError(fn, *self.disconnected_clusters)
+ self.assertGraphError(fn, *self.graph_with_unreachables)
+ self.assertGraphError(fn, *self.ring)
+ self.assertGraphError(fn, *self.tree)
+
+ self.assertIsNone(fn(*self.complete_graph))
+
+ def test_graph_connected(self):
+ fn = verify_graph_connected
+
+ self.assertGraphError(fn, *self.disconnected_clusters)
+ self.assertGraphError(fn, *self.graph_with_unreachables)
+ self.assertGraphError(fn, *self.forest)
+ self.assertGraphError(fn, *self.unconnected_graph)
+
+ self.assertIsNone(fn(*self.line))
+ self.assertIsNone(fn(*self.ring))
+ self.assertIsNone(fn(*self.complete_graph))
+ self.assertIsNone(fn(*self.tree))
+
+ def test_graph_forest(self):
+ fn = verify_graph_forest
+
+ self.assertGraphError(fn, *self.disconnected_clusters)
+ self.assertGraphError(fn, *self.graph_with_unreachables)
+ self.assertGraphError(fn, *self.ring)
+
+ self.assertIsNone(fn(*self.line))
+ self.assertIsNone(fn(*self.tree))
+ self.assertIsNone(fn(*self.forest))
+ self.assertIsNone(fn(*self.unconnected_graph))
+
+ def test_graph_connected_under_edge_failures(self):
+ fn = verify_graph_connected_under_edge_failures
+
+ self.assertGraphError(fn, *self.line)
+ self.assertGraphError(fn, *self.tree)
+ self.assertGraphError(fn, *self.forest)
+ self.assertGraphError(fn, *self.disconnected_clusters)
+
+ self.assertIsNone(fn(*self.ring))
+ self.assertIsNone(fn(*self.complete_graph))
+
+ def test_graph_connected_under_vertex_failures(self):
+ # XXX no tests to distinguish this from the edge_failures case
+ fn = verify_graph_connected_under_vertex_failures
+
+ self.assertGraphError(fn, *self.line)
+ self.assertGraphError(fn, *self.tree)
+ self.assertGraphError(fn, *self.forest)
+ self.assertGraphError(fn, *self.disconnected_clusters)
+
+ self.assertIsNone(fn(*self.ring))
+ self.assertIsNone(fn(*self.complete_graph))
+
+ def test_graph_multi_edge_forest(self):
+ pass
+
+ def test_graph_no_lonely_vertices(self):
+ fn = verify_graph_no_lonely_vertices
+ self.assertGraphError(fn, *self.unconnected_graph)
+ self.assertGraphError(fn, *self.graph_with_unreachables)
+
+ self.assertIsNone(fn(*self.ring))
+ self.assertIsNone(fn(*self.complete_graph))
+ self.assertIsNone(fn(*self.line))
+ self.assertIsNone(fn(*self.tree))
+ self.assertIsNone(fn(*self.forest))
+
+ def test_graph_no_unknown_vertices(self):
+ pass
diff --git a/python/samba/tests/kcc/kcc_utils.py b/python/samba/tests/kcc/kcc_utils.py
new file mode 100644
index 0000000..c1af998
--- /dev/null
+++ b/python/samba/tests/kcc/kcc_utils.py
@@ -0,0 +1,393 @@
+# Unix SMB/CIFS implementation. Tests for samba.kcc.kcc_utils.
+# Copyright (C) Andrew Bartlett 2015
+#
+# Written by Douglas Bagnall <douglas.bagnall@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for samba.kcc.kcc_utils"""
+import samba
+import samba.tests
+from samba.kcc.kcc_utils import new_connection_schedule, drsblobs
+from samba.kcc.kcc_utils import uncovered_sites_to_cover
+from samba.credentials import Credentials
+from samba.auth import system_session
+from samba.samdb import SamDB
+from samba.tests import delete_force
+
+
+class ScheduleTests(samba.tests.TestCase):
+
+ def test_new_connection_schedule(self):
+ schedule = new_connection_schedule()
+ self.assertIsInstance(schedule, drsblobs.schedule)
+ self.assertEqual(schedule.size, 188)
+ self.assertEqual(len(schedule.dataArray[0].slots), 168)
+
+
+# OK, this is pathetic, but the rest of it looks really hard, with the
+# classes all intertwingled with each other and the samdb. That is to say:
+# XXX later.
+
+class SiteCoverageTests(samba.tests.TestCase):
+
+ def setUp(self):
+ self.prefix = "kcc_"
+ self.lp = samba.tests.env_loadparm()
+
+ self.sites = {}
+ self.site_links = {}
+
+ self.creds = Credentials()
+ self.creds.guess(self.lp)
+ self.session = system_session()
+
+ self.samdb = SamDB(session_info=self.session,
+ credentials=self.creds,
+ lp=self.lp)
+
+ def tearDown(self):
+ self.samdb.transaction_start()
+
+ for site in self.sites:
+ delete_force(self.samdb, site, controls=['tree_delete:1'])
+
+ for site_link in self.site_links:
+ delete_force(self.samdb, site_link)
+
+ self.samdb.transaction_commit()
+
+ def _add_server(self, name, site):
+ dn = "CN={0},CN=Servers,{1}".format(name, site)
+ self.samdb.add({
+ "dn": dn,
+ "objectClass": "server",
+ "serverReference": self.samdb.domain_dn()
+ })
+ return dn
+
+ def _add_site(self, name):
+ dn = "CN={0},CN=Sites,{1}".format(
+ name, self.samdb.get_config_basedn()
+ )
+ self.samdb.add({
+ "dn": dn,
+ "objectClass": "site"
+ })
+ self.samdb.add({
+ "dn": "CN=Servers," + dn,
+ "objectClass": ["serversContainer"]
+ })
+
+ self.sites[dn] = name
+ return dn, name.lower()
+
+ def _add_site_link(self, name, links=None, cost=100):
+ if links is None:
+ links = []
+ dn = "CN={0},CN=IP,CN=Inter-Site Transports,CN=Sites,{1}".format(
+ name, self.samdb.get_config_basedn()
+ )
+ self.samdb.add({
+ "dn": dn,
+ "objectClass": "siteLink",
+ "cost": str(cost),
+ "siteList": links
+ })
+ self.site_links[dn] = name
+ return dn
+
+ def test_single_site_link_same_dc_count(self):
+ self.samdb.transaction_start()
+ site1, name1 = self._add_site(self.prefix + "ABCD")
+ site2, name2 = self._add_site(self.prefix + "BCDE")
+
+ uncovered_dn, uncovered = self._add_site(self.prefix + "uncovered")
+
+ self._add_server(self.prefix + "ABCD" + '1', site1)
+ self._add_server(self.prefix + "BCDE" + '1', site2)
+
+ self._add_site_link(self.prefix + "link",
+ [site1, site2, uncovered_dn])
+ self.samdb.transaction_commit()
+
+ to_cover = uncovered_sites_to_cover(self.samdb, name1)
+ to_cover.sort()
+
+ self.assertEqual([uncovered], to_cover)
+
+ to_cover = uncovered_sites_to_cover(self.samdb, name2)
+ to_cover.sort()
+
+ self.assertEqual([], to_cover)
+
+ def test_single_site_link_different_dc_count(self):
+ self.samdb.transaction_start()
+ site1, name1 = self._add_site(self.prefix + "ABCD")
+ site2, name2 = self._add_site(self.prefix + "BCDE")
+
+ uncovered_dn, uncovered = self._add_site(self.prefix + "uncovered")
+
+ self._add_server(self.prefix + "ABCD" + '1', site1)
+ self._add_server(self.prefix + "ABCD" + '2', site1)
+ self._add_server(self.prefix + "BCDE" + '1', site2)
+ self._add_server(self.prefix + "BCDE" + '2', site2)
+ self._add_server(self.prefix + "BCDE" + '3', site2)
+
+ self._add_site_link(self.prefix + "link",
+ [site1, site2, uncovered_dn])
+ self.samdb.transaction_commit()
+
+ to_cover = uncovered_sites_to_cover(self.samdb, name1)
+ to_cover.sort()
+
+ self.assertEqual([], to_cover)
+
+ to_cover = uncovered_sites_to_cover(self.samdb, name2)
+ to_cover.sort()
+
+ self.assertEqual([uncovered], to_cover)
+
+ def test_two_site_links_same_cost(self):
+ self.samdb.transaction_start()
+ site1, name1 = self._add_site(self.prefix + "ABCD")
+ site2, name2 = self._add_site(self.prefix + "BCDE")
+
+ uncovered_dn, uncovered = self._add_site(self.prefix + "uncovered")
+
+ self._add_server(self.prefix + "ABCD" + '1', site1)
+ self._add_server(self.prefix + "ABCD" + '2', site1)
+ self._add_server(self.prefix + "BCDE" + '1', site2)
+ self._add_server(self.prefix + "BCDE" + '2', site2)
+ self._add_server(self.prefix + "BCDE" + '3', site2)
+
+ self._add_site_link(self.prefix + "link1",
+ [site1, uncovered_dn])
+ self._add_site_link(self.prefix + "link2",
+ [site2, uncovered_dn])
+ self.samdb.transaction_commit()
+
+ to_cover = uncovered_sites_to_cover(self.samdb, name1)
+ to_cover.sort()
+
+ self.assertEqual([uncovered], to_cover)
+
+ to_cover = uncovered_sites_to_cover(self.samdb, name2)
+ to_cover.sort()
+
+ self.assertEqual([uncovered], to_cover)
+
+ def test_two_site_links_different_costs(self):
+ self.samdb.transaction_start()
+ site1, name1 = self._add_site(self.prefix + "ABCD")
+ site2, name2 = self._add_site(self.prefix + "BCDE")
+
+ uncovered_dn, uncovered = self._add_site(self.prefix + "uncovered")
+
+ self._add_server(self.prefix + "ABCD" + '1', site1)
+ self._add_server(self.prefix + "BCDE" + '1', site2)
+ self._add_server(self.prefix + "BCDE" + '2', site2)
+
+ self._add_site_link(self.prefix + "link1",
+ [site1, uncovered_dn],
+ cost=50)
+ self._add_site_link(self.prefix + "link2",
+ [site2, uncovered_dn],
+ cost=75)
+ self.samdb.transaction_commit()
+
+ to_cover = uncovered_sites_to_cover(self.samdb, name1)
+ to_cover.sort()
+
+ self.assertEqual([uncovered], to_cover)
+
+ to_cover = uncovered_sites_to_cover(self.samdb, name2)
+ to_cover.sort()
+
+ self.assertEqual([], to_cover)
+
+ def test_three_site_links_different_costs(self):
+ self.samdb.transaction_start()
+ site1, name1 = self._add_site(self.prefix + "ABCD")
+ site2, name2 = self._add_site(self.prefix + "BCDE")
+ site3, name3 = self._add_site(self.prefix + "CDEF")
+
+ uncovered_dn, uncovered = self._add_site(self.prefix + "uncovered")
+
+ self._add_server(self.prefix + "ABCD" + '1', site1)
+ self._add_server(self.prefix + "BCDE" + '1', site2)
+ self._add_server(self.prefix + "CDEF" + '1', site3)
+ self._add_server(self.prefix + "CDEF" + '2', site3)
+
+ self._add_site_link(self.prefix + "link1",
+ [site1, uncovered_dn],
+ cost=50)
+ self._add_site_link(self.prefix + "link2",
+ [site2, uncovered_dn],
+ cost=75)
+ self._add_site_link(self.prefix + "link3",
+ [site3, uncovered_dn],
+ cost=60)
+ self.samdb.transaction_commit()
+
+ to_cover = uncovered_sites_to_cover(self.samdb, name1)
+ to_cover.sort()
+
+ self.assertEqual([uncovered], to_cover)
+
+ to_cover = uncovered_sites_to_cover(self.samdb, name2)
+ to_cover.sort()
+
+ self.assertEqual([], to_cover)
+
+ to_cover = uncovered_sites_to_cover(self.samdb, name3)
+ to_cover.sort()
+
+ self.assertEqual([], to_cover)
+
+ def test_three_site_links_duplicate_costs(self):
+ # two of the links have the same cost; the other is higher
+ self.samdb.transaction_start()
+ site1, name1 = self._add_site(self.prefix + "ABCD")
+ site2, name2 = self._add_site(self.prefix + "BCDE")
+ site3, name3 = self._add_site(self.prefix + "CDEF")
+
+ uncovered_dn, uncovered = self._add_site(self.prefix + "uncovered")
+
+ self._add_server(self.prefix + "ABCD" + '1', site1)
+ self._add_server(self.prefix + "BCDE" + '1', site2)
+ self._add_server(self.prefix + "CDEF" + '1', site3)
+ self._add_server(self.prefix + "CDEF" + '2', site3)
+
+ self._add_site_link(self.prefix + "link1",
+ [site1, uncovered_dn],
+ cost=50)
+ self._add_site_link(self.prefix + "link2",
+ [site2, uncovered_dn],
+ cost=75)
+ self._add_site_link(self.prefix + "link3",
+ [site3, uncovered_dn],
+ cost=50)
+ self.samdb.transaction_commit()
+
+ to_cover = uncovered_sites_to_cover(self.samdb, name1)
+ to_cover.sort()
+
+ self.assertEqual([uncovered], to_cover)
+
+ to_cover = uncovered_sites_to_cover(self.samdb, name2)
+ to_cover.sort()
+
+ self.assertEqual([], to_cover)
+
+ to_cover = uncovered_sites_to_cover(self.samdb, name3)
+ to_cover.sort()
+
+ self.assertEqual([uncovered], to_cover)
+
+ def test_complex_setup_with_multiple_uncovered_sites(self):
+ self.samdb.transaction_start()
+ site1, name1 = self._add_site(self.prefix + "ABCD")
+ site2, name2 = self._add_site(self.prefix + "BCDE")
+ site3, name3 = self._add_site(self.prefix + "CDEF")
+
+ site4, name4 = self._add_site(self.prefix + "1234")
+ site5, name5 = self._add_site(self.prefix + "2345")
+ site6, name6 = self._add_site(self.prefix + "3456")
+
+ uncovered_dn1, uncovered1 = self._add_site(self.prefix + "uncovered1")
+ uncovered_dn2, uncovered2 = self._add_site(self.prefix + "uncovered2")
+ uncovered_dn3, uncovered3 = self._add_site(self.prefix + "uncovered3")
+
+ # Site Link Cluster 1 - Server List
+ self._add_server(self.prefix + "ABCD" + '1', site1)
+
+ self._add_server(self.prefix + "BCDE" + '1', site2)
+ self._add_server(self.prefix + "BCDE" + '2', site2)
+
+ self._add_server(self.prefix + "CDEF" + '1', site3)
+ self._add_server(self.prefix + "CDEF" + '2', site3)
+ self._add_server(self.prefix + "CDEF" + '3', site3)
+
+ # Site Link Cluster 2 - Server List
+ self._add_server(self.prefix + "1234" + '1', site4)
+ self._add_server(self.prefix + "1234" + '2', site4)
+
+ self._add_server(self.prefix + "2345" + '1', site5)
+ self._add_server(self.prefix + "2345" + '2', site5)
+
+ self._add_server(self.prefix + "3456" + '1', site6)
+
+ # Join to Uncovered1 (preference to site link cluster 1)
+ self._add_site_link(self.prefix + "link1A",
+ [site1, site2, site3, uncovered_dn1],
+ cost=49)
+ self._add_site_link(self.prefix + "link2A",
+ [site4, site5, site6, uncovered_dn1],
+ cost=50)
+
+ # Join to Uncovered2 (no preferene on site links)
+ self._add_site_link(self.prefix + "link1B",
+ [site1, site2, site3, uncovered_dn2],
+ cost=50)
+ self._add_site_link(self.prefix + "link2B",
+ [site4, site5, site6, uncovered_dn2],
+ cost=50)
+
+ # Join to Uncovered3 (preference to site link cluster 2)
+ self._add_site_link(self.prefix + "link1C",
+ [site1, site2, site3, uncovered_dn3],
+ cost=50)
+ self._add_site_link(self.prefix + "link2C",
+ [site4, site5, site6, uncovered_dn3],
+ cost=49)
+
+ self.samdb.transaction_commit()
+
+ to_cover = uncovered_sites_to_cover(self.samdb, name1)
+ to_cover.sort()
+
+ self.assertEqual([], to_cover)
+
+ to_cover = uncovered_sites_to_cover(self.samdb, name2)
+ to_cover.sort()
+
+ self.assertEqual([], to_cover)
+
+ to_cover = uncovered_sites_to_cover(self.samdb, name3)
+ to_cover.sort()
+
+ self.assertEqual([uncovered1, uncovered2], to_cover)
+
+ to_cover = uncovered_sites_to_cover(self.samdb, name4)
+ to_cover.sort()
+
+ self.assertEqual([uncovered2, uncovered3], to_cover)
+
+ to_cover = uncovered_sites_to_cover(self.samdb, name5)
+ to_cover.sort()
+
+ self.assertEqual([], to_cover)
+
+ to_cover = uncovered_sites_to_cover(self.samdb, name6)
+ to_cover.sort()
+
+ self.assertEqual([], to_cover)
+
+ for to_check in [uncovered1, uncovered2, uncovered3]:
+ to_cover = uncovered_sites_to_cover(self.samdb, to_check)
+ to_cover.sort()
+
+ self.assertEqual([], to_cover)
diff --git a/python/samba/tests/kcc/ldif_import_export.py b/python/samba/tests/kcc/ldif_import_export.py
new file mode 100644
index 0000000..9e573bf
--- /dev/null
+++ b/python/samba/tests/kcc/ldif_import_export.py
@@ -0,0 +1,240 @@
+# Unix SMB/CIFS implementation. Tests for samba.kcc.ldif_import_export.
+# Copyright (C) Andrew Bartlett 2015
+#
+# Written by Douglas Bagnall <douglas.bagnall@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for samba.kcc.ldif_import_export"""
+
+import samba
+import os
+import time
+import subprocess
+import logging
+import samba.tests
+from samba.kcc import ldif_import_export, KCC
+from samba import ldb
+from samba.dcerpc import misc
+
+
+from samba.param import LoadParm
+from samba.credentials import Credentials
+from samba.samdb import SamDB
+
+unix_now = int(time.time())
+
+MULTISITE_LDIF = os.path.join(os.environ['SRCDIR_ABS'],
+ "testdata/ldif-utils-test-multisite.ldif")
+
+
+# UNCONNECTED_LDIF is a single site, unconnected 5DC database that was
+# created using samba-tool domain join in testenv.
+UNCONNECTED_LDIF = os.path.join(os.environ['SRCDIR_ABS'],
+ "testdata/unconnected-intrasite.ldif")
+
+MULTISITE_LDIF_DSAS = (
+ ("CN=WIN08,CN=Servers,CN=Site-4,CN=Sites,CN=Configuration,DC=ad,DC=samba,DC=example,DC=com",
+ "Site-4"),
+ ("CN=WIN07,CN=Servers,CN=Site-4,CN=Sites,CN=Configuration,DC=ad,DC=samba,DC=example,DC=com",
+ "Site-4"),
+ ("CN=WIN06,CN=Servers,CN=Site-3,CN=Sites,CN=Configuration,DC=ad,DC=samba,DC=example,DC=com",
+ "Site-3"),
+ ("CN=WIN09,CN=Servers,CN=Site-5,CN=Sites,CN=Configuration,DC=ad,DC=samba,DC=example,DC=com",
+ "Site-5"),
+ ("CN=WIN10,CN=Servers,CN=Site-5,CN=Sites,CN=Configuration,DC=ad,DC=samba,DC=example,DC=com",
+ "Site-5"),
+ ("CN=WIN02,CN=Servers,CN=Site-2,CN=Sites,CN=Configuration,DC=ad,DC=samba,DC=example,DC=com",
+ "Site-2"),
+ ("CN=WIN04,CN=Servers,CN=Site-2,CN=Sites,CN=Configuration,DC=ad,DC=samba,DC=example,DC=com",
+ "Site-2"),
+ ("CN=WIN03,CN=Servers,CN=Site-2,CN=Sites,CN=Configuration,DC=ad,DC=samba,DC=example,DC=com",
+ "Site-2"),
+ ("CN=WIN05,CN=Servers,CN=Site-2,CN=Sites,CN=Configuration,DC=ad,DC=samba,DC=example,DC=com",
+ "Site-2"),
+ ("CN=WIN01,CN=Servers,CN=Default-First-Site-Name,CN=Sites,CN=Configuration,DC=ad,DC=samba,DC=example,DC=com",
+ "Default-First-Site-Name"),
+)
+
+
+class LdifImportExportTests(samba.tests.TestCaseInTempDir):
+ def setUp(self):
+ super().setUp()
+ self.lp = LoadParm()
+ self.creds = Credentials()
+ self.creds.guess(self.lp)
+
+ def remove_files(self, *files):
+ for f in files:
+ assert(f.startswith(self.tempdir))
+ os.unlink(f)
+
+ def test_write_search_url(self):
+ pass
+
+ def test_ldif_to_samdb(self):
+ dburl = os.path.join(self.tempdir, "ldap")
+ samdb = ldif_import_export.ldif_to_samdb(dburl, self.lp,
+ MULTISITE_LDIF)
+ self.assertIsInstance(samdb, SamDB)
+
+ dsa = ("CN=WIN01,CN=Servers,CN=Default-First-Site-Name,CN=Sites,"
+ "CN=Configuration,DC=ad,DC=samba,DC=example,DC=com")
+ res = samdb.search(ldb.Dn(samdb, "CN=NTDS Settings," + dsa),
+ scope=ldb.SCOPE_BASE, attrs=["objectGUID"])
+
+ ntds_guid = misc.GUID(samdb.get_ntds_GUID())
+ self.assertEqual(misc.GUID(res[0]["objectGUID"][0]), ntds_guid)
+
+ service_name_res = samdb.search(base="",
+ scope=ldb.SCOPE_BASE,
+ attrs=["dsServiceName"])
+ dn = ldb.Dn(samdb,
+ service_name_res[0]["dsServiceName"][0].decode('utf8'))
+ self.assertEqual(dn, ldb.Dn(samdb, "CN=NTDS Settings," + dsa))
+ self.remove_files(dburl)
+
+ def test_ldif_to_samdb_forced_local_dsa(self):
+ for dsa, site in MULTISITE_LDIF_DSAS:
+ dburl = os.path.join(self.tempdir, "ldif-to-samba-forced-local-dsa"
+ "-%s" % dsa)
+ samdb = ldif_import_export.ldif_to_samdb(dburl, self.lp,
+ MULTISITE_LDIF,
+ forced_local_dsa=dsa)
+ self.assertIsInstance(samdb, SamDB)
+ self.assertEqual(samdb.server_site_name(), site)
+
+ res = samdb.search(ldb.Dn(samdb, "CN=NTDS Settings," + dsa),
+ scope=ldb.SCOPE_BASE, attrs=["objectGUID"])
+
+ ntds_guid = misc.GUID(samdb.get_ntds_GUID())
+ self.assertEqual(misc.GUID(res[0]["objectGUID"][0]), ntds_guid)
+
+ service_name_res = samdb.search(base="",
+ scope=ldb.SCOPE_BASE,
+ attrs=["dsServiceName"])
+ dn = ldb.Dn(samdb,
+ service_name_res[0]["dsServiceName"][0].decode('utf8'))
+ self.assertEqual(dn, ldb.Dn(samdb, "CN=NTDS Settings," + dsa))
+ self.remove_files(dburl)
+
+ def test_samdb_to_ldif_file(self):
+ dburl = os.path.join(self.tempdir, "ldap")
+ dburl2 = os.path.join(self.tempdir, "ldap_roundtrip")
+ ldif_file = os.path.join(self.tempdir, "ldif")
+ samdb = ldif_import_export.ldif_to_samdb(dburl, self.lp,
+ MULTISITE_LDIF)
+ self.assertIsInstance(samdb, SamDB)
+ ldif_import_export.samdb_to_ldif_file(samdb, dburl,
+ lp=self.lp, creds=None,
+ ldif_file=ldif_file)
+ self.assertGreater(os.path.getsize(ldif_file), 1000,
+ "LDIF should be larger than 1000 bytes")
+ samdb = ldif_import_export.ldif_to_samdb(dburl2, self.lp,
+ ldif_file)
+ self.assertIsInstance(samdb, SamDB)
+ dsa = ("CN=WIN01,CN=Servers,CN=Default-First-Site-Name,CN=Sites,"
+ "CN=Configuration,DC=ad,DC=samba,DC=example,DC=com")
+ res = samdb.search(ldb.Dn(samdb, "CN=NTDS Settings," + dsa),
+ scope=ldb.SCOPE_BASE, attrs=["objectGUID"])
+ self.remove_files(dburl)
+ self.remove_files(dburl2)
+ self.remove_files(ldif_file)
+
+
+class KCCMultisiteLdifTests(samba.tests.TestCaseInTempDir):
+ def setUp(self):
+ super().setUp()
+ self.lp = LoadParm()
+ self.creds = Credentials()
+ self.creds.guess(self.lp)
+
+ def remove_files(self, *files):
+ for f in files:
+ assert(f.startswith(self.tempdir))
+ os.unlink(f)
+
+ def _get_kcc(self, name, readonly=False, verify=False, dot_file_dir=None):
+ # Note that setting read-only to False won't affect the ldif,
+ # only the temporary database that is created from it.
+ my_kcc = KCC(unix_now, readonly=readonly, verify=verify,
+ dot_file_dir=dot_file_dir)
+ tmpdb = os.path.join(self.tempdir, 'tmpdb')
+ my_kcc.import_ldif(tmpdb, self.lp, MULTISITE_LDIF)
+ self.remove_files(tmpdb)
+ return my_kcc
+
+ def test_list_dsas(self):
+ my_kcc = self._get_kcc('test-list')
+ dsas = set(my_kcc.list_dsas())
+ expected_dsas = set(x[0] for x in MULTISITE_LDIF_DSAS)
+ self.assertEqual(dsas, expected_dsas)
+
+ def test_verify(self):
+ """Check that the KCC generates graphs that pass its own verify
+ option.
+ """
+ my_kcc = self._get_kcc('test-verify', verify=True)
+ tmpdb = os.path.join(self.tempdir, 'verify-tmpdb')
+ my_kcc.import_ldif(tmpdb, self.lp, MULTISITE_LDIF)
+
+ my_kcc.run(None,
+ self.lp, self.creds,
+ attempt_live_connections=False)
+ self.remove_files(tmpdb)
+
+ def test_unconnected_db(self):
+ """Check that the KCC generates errors on a unconnected db
+ """
+ my_kcc = self._get_kcc('test-verify', verify=True)
+ tmpdb = os.path.join(self.tempdir, 'verify-tmpdb')
+ my_kcc.import_ldif(tmpdb, self.lp, UNCONNECTED_LDIF)
+
+ try:
+ my_kcc.run(None,
+ self.lp, self.creds,
+ attempt_live_connections=False)
+ except samba.kcc.graph_utils.GraphError:
+ pass
+ except Exception:
+ self.fail("Did not expect this error.")
+ finally:
+ self.remove_files(tmpdb)
+
+ def test_dotfiles(self):
+ """Check that KCC writes dot_files when asked.
+ """
+ my_kcc = self._get_kcc('test-dotfiles', dot_file_dir=self.tempdir)
+ tmpdb = os.path.join(self.tempdir, 'dotfile-tmpdb')
+ files = [tmpdb]
+ my_kcc.import_ldif(tmpdb, self.lp, MULTISITE_LDIF)
+ my_kcc.run(None,
+ self.lp, self.creds,
+ attempt_live_connections=False)
+
+ dot = '/usr/bin/dot'
+ for fn in os.listdir(self.tempdir):
+ if fn.endswith('.dot'):
+ ffn = os.path.join(self.tempdir, fn)
+ if os.path.exists(dot) and subprocess.call([dot, '-?']) == 0:
+ r = subprocess.call([dot, '-Tcanon', ffn])
+ self.assertEqual(r, 0)
+
+ # even if dot is not there, at least check the file is non-empty
+ size = os.stat(ffn).st_size
+ self.assertNotEqual(size, 0)
+ files.append(ffn)
+
+ self.remove_files(*files)
diff --git a/python/samba/tests/krb5/alias_tests.py b/python/samba/tests/krb5/alias_tests.py
new file mode 100755
index 0000000..a6a3d03
--- /dev/null
+++ b/python/samba/tests/krb5/alias_tests.py
@@ -0,0 +1,202 @@
+#!/usr/bin/env python3
+# Unix SMB/CIFS implementation.
+# Copyright (C) Stefan Metzmacher 2020
+# Copyright (C) 2021 Catalyst.Net Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+import os
+
+sys.path.insert(0, 'bin/python')
+os.environ['PYTHONUNBUFFERED'] = '1'
+
+import ldb
+
+from samba.tests import delete_force
+import samba.tests.krb5.kcrypto as kcrypto
+from samba.tests.krb5.kdc_base_test import KDCBaseTest
+from samba.tests.krb5.rfc4120_constants import (
+ AES256_CTS_HMAC_SHA1_96,
+ ARCFOUR_HMAC_MD5,
+ KDC_ERR_TGT_REVOKED,
+ NT_PRINCIPAL,
+)
+
+global_asn1_print = False
+global_hexdump = False
+
+
+class AliasTests(KDCBaseTest):
+ def test_dc_alias_rename(self):
+ self._run_dc_alias(action='rename')
+
+ def test_dc_alias_delete(self):
+ self._run_dc_alias(action='delete')
+
+ def _run_dc_alias(self, action=None):
+ target_creds = self.get_dc_creds()
+ target_name = target_creds.get_username()[:-1]
+
+ self._run_alias(target_name, lambda: target_creds, action=action)
+
+ def test_create_alias_rename(self):
+ self._run_create_alias(action='rename')
+
+ def test_create_alias_delete(self):
+ self._run_create_alias(action='delete')
+
+ def _run_create_alias(self, action=None):
+ target_name = self.get_new_username()
+
+ def create_target():
+ samdb = self.get_samdb()
+
+ realm = samdb.domain_dns_name().lower()
+
+ hostname = f'{target_name}.{realm}'
+ spn = f'ldap/{hostname}'
+
+ details = {
+ 'dNSHostName': hostname
+ }
+
+ creds, fn = self.create_account(
+ samdb,
+ target_name,
+ account_type=self.AccountType.COMPUTER,
+ spn=spn,
+ additional_details=details)
+
+ return creds
+
+ self._run_alias(target_name, create_target, action=action)
+
+ def _run_alias(self, target_name, target_creds_fn, action=None):
+ samdb = self.get_samdb()
+
+ mach_name = self.get_new_username()
+
+ # Create a machine account.
+ mach_creds, mach_dn = self.create_account(
+ samdb, mach_name, account_type=self.AccountType.COMPUTER)
+ self.addCleanup(delete_force, samdb, mach_dn)
+
+ mach_sid = mach_creds.get_sid()
+ realm = mach_creds.get_realm()
+
+ # The account salt doesn't change when the account is renamed.
+ old_salt = mach_creds.get_salt()
+ mach_creds.set_forced_salt(old_salt)
+
+ # Rename the account to alias with the target account.
+ msg = ldb.Message(ldb.Dn(samdb, mach_dn))
+ msg['sAMAccountName'] = ldb.MessageElement(target_name,
+ ldb.FLAG_MOD_REPLACE,
+ 'sAMAccountName')
+ samdb.modify(msg)
+ mach_creds.set_username(target_name)
+
+ # Get a TGT for the machine account.
+ tgt = self.get_tgt(mach_creds, kdc_options='0', fresh=True)
+
+ # Check the PAC.
+ pac_data = self.get_pac_data(tgt.ticket_private['authorization-data'])
+
+ upn = f'{target_name}@{realm.lower()}'
+
+ self.assertEqual(target_name, str(pac_data.account_name))
+ self.assertEqual(mach_sid, pac_data.account_sid)
+ self.assertEqual(target_name, pac_data.logon_name)
+ self.assertEqual(upn, pac_data.upn)
+ self.assertEqual(realm, pac_data.domain_name)
+
+ # Rename or delete the machine account.
+ if action == 'rename':
+ mach_name2 = self.get_new_username()
+
+ msg = ldb.Message(ldb.Dn(samdb, mach_dn))
+ msg['sAMAccountName'] = ldb.MessageElement(mach_name2,
+ ldb.FLAG_MOD_REPLACE,
+ 'sAMAccountName')
+ samdb.modify(msg)
+ elif action == 'delete':
+ samdb.delete(mach_dn)
+ else:
+ self.fail(action)
+
+ # Get the credentials for the target account.
+ target_creds = target_creds_fn()
+
+ # Look up the DNS host name of the target account.
+ target_dn = target_creds.get_dn()
+ res = samdb.search(target_dn,
+ scope=ldb.SCOPE_BASE,
+ attrs=['dNSHostName'])
+ target_hostname = str(res[0].get('dNSHostName', idx=0))
+
+ sname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=['ldap', target_hostname])
+ target_cname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=[target_name])
+
+ target_decryption_key = self.TicketDecryptionKey_from_creds(
+ target_creds)
+
+ authenticator_subkey = self.RandomKey(kcrypto.Enctype.AES256)
+
+ etypes = (AES256_CTS_HMAC_SHA1_96, ARCFOUR_HMAC_MD5)
+
+ def generate_s4u2self_padata(_kdc_exchange_dict,
+ _callback_dict,
+ req_body):
+ padata = self.PA_S4U2Self_create(name=target_cname,
+ realm=realm,
+ tgt_session_key=tgt.session_key,
+ ctype=None)
+ return [padata], req_body
+
+ expected_error_mode = KDC_ERR_TGT_REVOKED
+
+ # Make a request using S4U2Self. The request should fail.
+ kdc_exchange_dict = self.tgs_exchange_dict(
+ expected_crealm=realm,
+ expected_cname=target_cname,
+ expected_srealm=realm,
+ expected_sname=sname,
+ ticket_decryption_key=target_decryption_key,
+ generate_padata_fn=generate_s4u2self_padata,
+ expected_error_mode=expected_error_mode,
+ check_error_fn=self.generic_check_kdc_error,
+ check_kdc_private_fn=self.generic_check_kdc_private,
+ tgt=tgt,
+ authenticator_subkey=authenticator_subkey,
+ kdc_options='0',
+ expect_pac=True,
+ expect_edata=False)
+
+ rep = self._generic_kdc_exchange(kdc_exchange_dict,
+ cname=None,
+ realm=realm,
+ sname=sname,
+ etypes=etypes)
+ self.check_error_rep(rep, expected_error_mode)
+
+
+if __name__ == '__main__':
+ global_asn1_print = False
+ global_hexdump = False
+ import unittest
+ unittest.main()
diff --git a/python/samba/tests/krb5/as_canonicalization_tests.py b/python/samba/tests/krb5/as_canonicalization_tests.py
new file mode 100755
index 0000000..dd94cb6
--- /dev/null
+++ b/python/samba/tests/krb5/as_canonicalization_tests.py
@@ -0,0 +1,474 @@
+#!/usr/bin/env python3
+# Unix SMB/CIFS implementation.
+#
+# Copyright (C) Catalyst IT Ltd. 2020
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+import os
+
+sys.path.insert(0, "bin/python")
+os.environ["PYTHONUNBUFFERED"] = "1"
+
+from enum import Enum, unique
+import pyasn1
+
+from samba.tests.krb5.kdc_base_test import KDCBaseTest
+import samba.tests.krb5.rfc4120_pyasn1 as krb5_asn1
+from samba.credentials import DONT_USE_KERBEROS
+from samba.dcerpc import krb5pac
+from samba.dcerpc.misc import SEC_CHAN_WKSTA
+from samba.ndr import ndr_unpack
+from samba.tests import DynamicTestCase
+from samba.tests.krb5.rfc4120_constants import (
+ AES256_CTS_HMAC_SHA1_96,
+ AES128_CTS_HMAC_SHA1_96,
+ ARCFOUR_HMAC_MD5,
+ KDC_ERR_PREAUTH_REQUIRED,
+ KRB_AS_REP,
+ KU_AS_REP_ENC_PART,
+ KRB_ERROR,
+ KU_PA_ENC_TIMESTAMP,
+ KU_TICKET,
+ PADATA_ENC_TIMESTAMP,
+ NT_ENTERPRISE_PRINCIPAL,
+ NT_PRINCIPAL,
+ NT_SRV_INST,
+)
+
+global_asn1_print = False
+global_hexdump = False
+
+
+@unique
+class TestOptions(Enum):
+ Canonicalize = 1
+ Enterprise = 2
+ UpperRealm = 4
+ UpperUserName = 8
+ NetbiosRealm = 16
+ UPN = 32
+ RemoveDollar = 64
+ AsReqSelf = 128
+ Last = 256
+
+ def is_set(self, x):
+ return self.value & x
+
+
+@unique
+class CredentialsType(Enum):
+ User = 1
+ Machine = 2
+
+ def is_set(self, x):
+ return self.value & x
+
+
+class TestData:
+
+ def __init__(self, options, creds):
+ self.options = options
+ self.user_creds = creds
+ self.user_name = self._get_username(options, creds)
+ self.realm = self._get_realm(options, creds)
+
+ if TestOptions.Enterprise.is_set(options):
+ client_name_type = NT_ENTERPRISE_PRINCIPAL
+ else:
+ client_name_type = NT_PRINCIPAL
+
+ self.cname = KDCBaseTest.PrincipalName_create(
+ name_type=client_name_type, names=[self.user_name])
+ if TestOptions.AsReqSelf.is_set(options):
+ self.sname = self.cname
+ else:
+ self.sname = KDCBaseTest.PrincipalName_create(
+ name_type=NT_SRV_INST, names=["krbtgt", self.realm])
+ self.canonicalize = TestOptions.Canonicalize.is_set(options)
+
+ def _get_realm(self, options, creds):
+ realm = creds.get_realm()
+ if TestOptions.NetbiosRealm.is_set(options):
+ realm = creds.get_domain()
+ if TestOptions.UpperRealm.is_set(options):
+ realm = realm.upper()
+ else:
+ realm = realm.lower()
+ return realm
+
+ def _get_username(self, options, creds):
+ name = creds.get_username()
+ if TestOptions.RemoveDollar.is_set(options) and name.endswith("$"):
+ name = name[:-1]
+ if TestOptions.Enterprise.is_set(options):
+ realm = creds.get_realm()
+ name = "{0}@{1}".format(name, realm)
+ if TestOptions.UpperUserName.is_set(options):
+ name = name.upper()
+ return name
+
+ def __repr__(self):
+ rep = "Test Data: "
+ rep += "options = '" + "{:08b}".format(self.options) + "'"
+ rep += "user name = '" + self.user_name + "'"
+ rep += ", realm = '" + self.realm + "'"
+ rep += ", cname = '" + str(self.cname) + "'"
+ rep += ", sname = '" + str(self.sname) + "'"
+ return rep
+
+
+MACHINE_NAME = "tstkrb5cnnmch"
+USER_NAME = "tstkrb5cnnusr"
+
+
+@DynamicTestCase
+class KerberosASCanonicalizationTests(KDCBaseTest):
+
+ @classmethod
+ def setUpClass(cls):
+ super().setUpClass()
+ cls.user_creds = None
+ cls.machine_creds = None
+
+ @classmethod
+ def setUpDynamicTestCases(cls):
+
+ def skip(ct, options):
+ """ Filter out any mutually exclusive test options """
+ if ct != CredentialsType.Machine and\
+ TestOptions.RemoveDollar.is_set(options):
+ return True
+ if ct != CredentialsType.Machine and\
+ TestOptions.AsReqSelf.is_set(options):
+ return True
+ return False
+
+ def build_test_name(ct, options):
+ name = "%sCredentials" % ct.name
+ for opt in TestOptions:
+ if opt.is_set(options):
+ name += ("_%s" % opt.name)
+ return name
+
+ for ct in CredentialsType:
+ for x in range(TestOptions.Last.value):
+ if skip(ct, x):
+ continue
+ name = build_test_name(ct, x)
+ cls.generate_dynamic_test("test", name, x, ct)
+
+ def user_account_creds(self):
+ if self.user_creds is None:
+ samdb = self.get_samdb()
+ type(self).user_creds, _ = self.create_account(samdb, USER_NAME)
+
+ return self.user_creds
+
+ def machine_account_creds(self):
+ if self.machine_creds is None:
+ samdb = self.get_samdb()
+ type(self).machine_creds, _ = self.create_account(
+ samdb,
+ MACHINE_NAME,
+ account_type=self.AccountType.COMPUTER)
+ self.machine_creds.set_secure_channel_type(SEC_CHAN_WKSTA)
+ self.machine_creds.set_kerberos_state(DONT_USE_KERBEROS)
+
+ return self.machine_creds
+
+ def setUp(self):
+ super().setUp()
+ self.do_asn1_print = global_asn1_print
+ self.do_hexdump = global_hexdump
+
+ def _test_with_args(self, x, ct):
+ if ct == CredentialsType.User:
+ creds = self.user_account_creds()
+ elif ct == CredentialsType.Machine:
+ creds = self.machine_account_creds()
+ else:
+ raise Exception("Unexpected credential type")
+ data = TestData(x, creds)
+
+ try:
+ (rep, as_rep) = self.as_req(data)
+ except pyasn1.error.PyAsn1Error as e:
+ import traceback
+ self.fail("ASN1 Error, Options {0:08b}:{1} {2}".format(
+ data.options,
+ traceback.format_exc(),
+ e))
+ # If as_req triggered an expected server error response
+ # No need to test the response data.
+ if rep is not None:
+ # The kvno is optional, heimdal includes it
+ # MIT does not.
+ if 'kvno' in rep['enc-part']:
+ kvno = rep['enc-part']['kvno']
+ self.check_kvno(kvno, data)
+
+ cname = rep['cname']
+ self.check_cname(cname, data)
+
+ crealm = rep['crealm'].decode('ascii')
+ self.check_crealm(crealm, data)
+
+ sname = as_rep['sname']
+ self.check_sname(sname, data)
+
+ srealm = as_rep['srealm'].decode('ascii')
+ self.check_srealm(srealm, data)
+
+ if TestOptions.AsReqSelf.is_set(data.options):
+ ticket_creds = creds
+ else:
+ ticket_creds = self.get_krbtgt_creds()
+ ticket_key = self.TicketDecryptionKey_from_creds(ticket_creds)
+
+ ticket_encpart = rep['ticket']['enc-part']
+ self.assertElementEqual(ticket_encpart, 'etype',
+ ticket_key.etype)
+ self.assertElementEqual(ticket_encpart, 'kvno',
+ ticket_key.kvno)
+ ticket_decpart = ticket_key.decrypt(KU_TICKET,
+ ticket_encpart['cipher'])
+ ticket_private = self.der_decode(
+ ticket_decpart,
+ asn1Spec=krb5_asn1.EncTicketPart())
+
+ pac_data = self.get_pac(ticket_private['authorization-data'])
+ pac = ndr_unpack(krb5pac.PAC_DATA, pac_data)
+
+ for pac_buffer in pac.buffers:
+ if pac_buffer.type == krb5pac.PAC_TYPE_LOGON_NAME:
+ if TestOptions.Canonicalize.is_set(data.options):
+ expected = data.user_creds.get_username()
+ else:
+ expected = data.user_name
+
+ self.assertEqual(expected, pac_buffer.info.account_name)
+ break
+ else:
+ self.fail('PAC_TYPE_LOGON_NAME not found')
+
+ def as_req(self, data):
+ user_creds = data.user_creds
+ realm = data.realm
+
+ cname = data.cname
+ sname = data.sname
+
+ till = self.get_KerberosTime(offset=36000)
+
+ kdc_options = "0"
+ if data.canonicalize:
+ kdc_options = str(krb5_asn1.KDCOptions('canonicalize'))
+
+ padata = None
+
+ # Set the allowable encryption types
+ etypes = (
+ AES256_CTS_HMAC_SHA1_96,
+ AES128_CTS_HMAC_SHA1_96,
+ ARCFOUR_HMAC_MD5)
+
+ req = self.AS_REQ_create(padata=padata,
+ kdc_options=kdc_options,
+ cname=cname,
+ realm=realm,
+ sname=sname,
+ from_time=None,
+ till_time=till,
+ renew_time=None,
+ nonce=0x7fffffff,
+ etypes=etypes,
+ addresses=None,
+ additional_tickets=None)
+ rep = self.send_recv_transaction(req)
+ self.assertIsNotNone(rep)
+
+ #
+ # Check the protocol version, should be 5
+ self.assertEqual(
+ rep['pvno'], 5, "Data {0}".format(str(data)))
+
+ self.assertEqual(
+ rep['msg-type'], KRB_ERROR, "Data {0}".format(str(data)))
+
+ self.assertEqual(
+ rep['error-code'],
+ KDC_ERR_PREAUTH_REQUIRED,
+ "Error code {0}, Data {1}".format(rep['error-code'], str(data)))
+
+ rep_padata = self.der_decode(
+ rep['e-data'], asn1Spec=krb5_asn1.METHOD_DATA())
+
+ for pa in rep_padata:
+ if pa['padata-type'] == 19:
+ etype_info2 = pa['padata-value']
+ break
+
+ etype_info2 = self.der_decode(
+ etype_info2, asn1Spec=krb5_asn1.ETYPE_INFO2())
+
+ key = self.PasswordKey_from_etype_info2(user_creds, etype_info2[0])
+
+ (patime, pausec) = self.get_KerberosTimeWithUsec()
+ pa_ts = self.PA_ENC_TS_ENC_create(patime, pausec)
+ pa_ts = self.der_encode(pa_ts, asn1Spec=krb5_asn1.PA_ENC_TS_ENC())
+
+ pa_ts = self.EncryptedData_create(key, KU_PA_ENC_TIMESTAMP, pa_ts)
+ pa_ts = self.der_encode(pa_ts, asn1Spec=krb5_asn1.EncryptedData())
+
+ pa_ts = self.PA_DATA_create(PADATA_ENC_TIMESTAMP, pa_ts)
+
+ kdc_options = "0"
+ if data.canonicalize:
+ kdc_options = str(krb5_asn1.KDCOptions('canonicalize'))
+ padata = [pa_ts]
+
+ req = self.AS_REQ_create(padata=padata,
+ kdc_options=kdc_options,
+ cname=cname,
+ realm=realm,
+ sname=sname,
+ from_time=None,
+ till_time=till,
+ renew_time=None,
+ nonce=0x7fffffff,
+ etypes=etypes,
+ addresses=None,
+ additional_tickets=None)
+ rep = self.send_recv_transaction(req)
+ self.assertIsNotNone(rep)
+
+ #
+ # Check the protocol version, should be 5
+ self.assertEqual(
+ rep['pvno'], 5, "Data {0}".format(str(data)))
+
+ msg_type = rep['msg-type']
+ # Should not have got an error.
+ # If we did, fail and print the error code to help debugging
+ self.assertNotEqual(
+ msg_type,
+ KRB_ERROR,
+ "Error code {0}, Data {1}".format(
+ rep.get('error-code', ''),
+ str(data)))
+
+ self.assertEqual(msg_type, KRB_AS_REP, "Data {0}".format(str(data)))
+
+ # Decrypt and decode the EncKdcRepPart
+ enc = key.decrypt(KU_AS_REP_ENC_PART, rep['enc-part']['cipher'])
+ if enc[0] == 0x7A:
+ # MIT Kerberos Tags the EncASRepPart as a EncKDCRepPart
+ # i.e. tag number 26 instead of tag number 25
+ as_rep = self.der_decode(enc, asn1Spec=krb5_asn1.EncTGSRepPart())
+ else:
+ as_rep = self.der_decode(enc, asn1Spec=krb5_asn1.EncASRepPart())
+
+ return (rep, as_rep)
+
+ def check_cname(self, cname, data):
+ if TestOptions.Canonicalize.is_set(data.options):
+ expected_name_type = NT_PRINCIPAL
+ elif TestOptions.Enterprise.is_set(data.options):
+ expected_name_type = NT_ENTERPRISE_PRINCIPAL
+ else:
+ expected_name_type = NT_PRINCIPAL
+
+ name_type = cname['name-type']
+ self.assertEqual(
+ expected_name_type,
+ name_type,
+ "cname name-type, Options {0:08b}".format(data.options))
+
+ ns = cname['name-string']
+ name = ns[0].decode('ascii')
+
+ expected = data.user_name
+ if TestOptions.Canonicalize.is_set(data.options):
+ expected = data.user_creds.get_username()
+ self.assertEqual(
+ expected,
+ name,
+ "cname principal, Options {0:08b}".format(data.options))
+
+ def check_crealm(self, crealm, data):
+ realm = data.user_creds.get_realm()
+ self.assertEqual(
+ realm, crealm, "crealm, Options {0:08b}".format(data.options))
+
+ def check_sname(self, sname, data):
+ nt = sname['name-type']
+ ns = sname['name-string']
+ name = ns[0].decode('ascii')
+
+ if TestOptions.AsReqSelf.is_set(data.options):
+ expected_name_type = NT_PRINCIPAL
+ if not TestOptions.Canonicalize.is_set(data.options)\
+ and TestOptions.Enterprise.is_set(data.options):
+
+ expected_name_type = NT_ENTERPRISE_PRINCIPAL
+
+ self.assertEqual(
+ expected_name_type,
+ nt,
+ "sname name-type, Options {0:08b}".format(data.options))
+ expected = data.user_name
+ if TestOptions.Canonicalize.is_set(data.options):
+ expected = data.user_creds.get_username()
+ self.assertEqual(
+ expected,
+ name,
+ "sname principal, Options {0:08b}".format(data.options))
+ else:
+ self.assertEqual(
+ NT_SRV_INST,
+ nt,
+ "sname name-type, Options {0:08b}".format(data.options))
+ self.assertEqual(
+ 'krbtgt',
+ name,
+ "sname principal, Options {0:08b}".format(data.options))
+
+ realm = ns[1].decode('ascii')
+ expected = data.realm
+ if TestOptions.Canonicalize.is_set(data.options):
+ expected = data.user_creds.get_realm().upper()
+ self.assertEqual(
+ expected,
+ realm,
+ "sname realm, Options {0:08b}".format(data.options))
+
+ def check_srealm(self, srealm, data):
+ realm = data.user_creds.get_realm()
+ self.assertEqual(
+ realm, srealm, "srealm, Options {0:08b}".format(data.options))
+
+ def check_kvno(self, kvno, data):
+ self.assertEqual(
+ 1, kvno, "kvno, Options {0:08b}".format(data.options))
+
+
+if __name__ == "__main__":
+ global_asn1_print = False
+ global_hexdump = False
+ import unittest
+
+ unittest.main()
diff --git a/python/samba/tests/krb5/as_req_tests.py b/python/samba/tests/krb5/as_req_tests.py
new file mode 100755
index 0000000..4d0940c
--- /dev/null
+++ b/python/samba/tests/krb5/as_req_tests.py
@@ -0,0 +1,606 @@
+#!/usr/bin/env python3
+# Unix SMB/CIFS implementation.
+# Copyright (C) Stefan Metzmacher 2020
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+import os
+
+sys.path.insert(0, "bin/python")
+os.environ["PYTHONUNBUFFERED"] = "1"
+
+from samba import ntstatus
+from samba.tests import DynamicTestCase
+from samba.tests.krb5.kdc_base_test import KDCBaseTest
+import samba.tests.krb5.kcrypto as kcrypto
+import samba.tests.krb5.rfc4120_pyasn1 as krb5_asn1
+from samba.tests.krb5.rfc4120_constants import (
+ KDC_ERR_CLIENT_REVOKED,
+ KDC_ERR_C_PRINCIPAL_UNKNOWN,
+ KDC_ERR_S_PRINCIPAL_UNKNOWN,
+ KDC_ERR_ETYPE_NOSUPP,
+ KDC_ERR_PREAUTH_REQUIRED,
+ KU_PA_ENC_TIMESTAMP,
+ NT_ENTERPRISE_PRINCIPAL,
+ NT_PRINCIPAL,
+ NT_SRV_INST,
+ PADATA_ENC_TIMESTAMP
+)
+
+global_asn1_print = False
+global_hexdump = False
+
+
+class AsReqBaseTest(KDCBaseTest):
+ def _run_as_req_enc_timestamp(self, client_creds, client_account=None,
+ expected_cname=None, sname=None,
+ name_type=NT_PRINCIPAL, etypes=None,
+ expected_error=None, expect_edata=None,
+ expected_pa_error=None, expect_pa_edata=None,
+ expect_status=None,
+ expect_pa_status=None,
+ kdc_options=None, till=None):
+ user_name = client_creds.get_username()
+ if client_account is None:
+ client_account = user_name
+ client_kvno = client_creds.get_kvno()
+ krbtgt_creds = self.get_krbtgt_creds(require_strongest_key=True)
+ krbtgt_account = krbtgt_creds.get_username()
+ krbtgt_supported_etypes = krbtgt_creds.tgs_supported_enctypes
+ realm = krbtgt_creds.get_realm()
+
+ cname = self.PrincipalName_create(name_type=name_type,
+ names=client_account.split('/'))
+ if sname is None:
+ sname = self.PrincipalName_create(name_type=NT_SRV_INST,
+ names=[krbtgt_account, realm])
+
+ expected_crealm = realm
+ if expected_cname is None:
+ expected_cname = cname
+ expected_srealm = realm
+ expected_sname = sname
+ expected_salt = client_creds.get_salt()
+
+ if till is None:
+ till = self.get_KerberosTime(offset=36000)
+
+ if etypes is None:
+ etypes = self.get_default_enctypes(client_creds)
+ if kdc_options is None:
+ kdc_options = krb5_asn1.KDCOptions('forwardable')
+ if expected_error is not None:
+ initial_error_mode = expected_error
+ else:
+ initial_error_mode = KDC_ERR_PREAUTH_REQUIRED
+
+ rep, kdc_exchange_dict = self._test_as_exchange(
+ cname,
+ realm,
+ sname,
+ till,
+ initial_error_mode,
+ expected_crealm,
+ expected_cname,
+ expected_srealm,
+ expected_sname,
+ expected_salt,
+ etypes,
+ None,
+ kdc_options,
+ creds=client_creds,
+ expected_supported_etypes=krbtgt_supported_etypes,
+ expected_account_name=user_name,
+ pac_request=True,
+ expect_edata=expect_edata,
+ expected_status=expect_status)
+
+ if rep['error-code'] != KDC_ERR_PREAUTH_REQUIRED:
+ return None
+
+ etype_info2 = kdc_exchange_dict['preauth_etype_info2']
+ self.assertIsNotNone(etype_info2)
+
+ preauth_key = self.PasswordKey_from_etype_info2(client_creds,
+ etype_info2[0],
+ kvno=client_kvno)
+
+ (patime, pausec) = self.get_KerberosTimeWithUsec()
+ pa_ts = self.PA_ENC_TS_ENC_create(patime, pausec)
+ pa_ts = self.der_encode(pa_ts, asn1Spec=krb5_asn1.PA_ENC_TS_ENC())
+
+ enc_pa_ts_usage = KU_PA_ENC_TIMESTAMP
+ pa_ts = self.EncryptedData_create(preauth_key, enc_pa_ts_usage, pa_ts)
+ pa_ts = self.der_encode(pa_ts, asn1Spec=krb5_asn1.EncryptedData())
+
+ pa_ts = self.PA_DATA_create(PADATA_ENC_TIMESTAMP, pa_ts)
+
+ preauth_padata = [pa_ts]
+ preauth_error_mode = 0 # AS-REP
+ if expected_pa_error is not None:
+ preauth_error_mode = expected_pa_error
+
+ krbtgt_decryption_key = (
+ self.TicketDecryptionKey_from_creds(krbtgt_creds))
+
+ as_rep, kdc_exchange_dict = self._test_as_exchange(
+ cname,
+ realm,
+ sname,
+ till,
+ preauth_error_mode,
+ expected_crealm,
+ expected_cname,
+ expected_srealm,
+ expected_sname,
+ expected_salt,
+ etypes,
+ preauth_padata,
+ kdc_options,
+ expected_supported_etypes=krbtgt_supported_etypes,
+ expected_account_name=user_name,
+ expect_edata=expect_pa_edata,
+ expected_status=expect_pa_status,
+ preauth_key=preauth_key,
+ ticket_decryption_key=krbtgt_decryption_key,
+ pac_request=True)
+ self.assertIsNotNone(as_rep)
+
+ return etype_info2
+
+
+@DynamicTestCase
+class AsReqKerberosTests(AsReqBaseTest):
+
+ @classmethod
+ def setUpDynamicTestCases(cls):
+ for (name, idx) in cls.etype_test_permutation_name_idx():
+ for pac in [None, True, False]:
+ tname = "%s_pac_%s" % (name, pac)
+ targs = (idx, pac)
+ cls.generate_dynamic_test("test_as_req_no_preauth", tname, *targs)
+
+ def setUp(self):
+ super().setUp()
+ self.do_asn1_print = global_asn1_print
+ self.do_hexdump = global_hexdump
+
+ def _test_as_req_nopreauth(self,
+ initial_etypes,
+ pac=None,
+ initial_kdc_options=None):
+ client_creds = self.get_client_creds()
+ client_account = client_creds.get_username()
+ krbtgt_creds = self.get_krbtgt_creds(require_keys=False)
+ krbtgt_account = krbtgt_creds.get_username()
+ realm = krbtgt_creds.get_realm()
+
+ cname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=[client_account])
+ sname = self.PrincipalName_create(name_type=NT_SRV_INST,
+ names=[krbtgt_account, realm])
+
+ expected_crealm = realm
+ expected_cname = cname
+ expected_srealm = realm
+ expected_sname = sname
+ expected_salt = client_creds.get_salt()
+
+ if any(etype in initial_etypes
+ for etype in self.get_default_enctypes(client_creds)):
+ expected_error_mode = KDC_ERR_PREAUTH_REQUIRED
+ else:
+ expected_error_mode = KDC_ERR_ETYPE_NOSUPP
+
+ kdc_exchange_dict = self.as_exchange_dict(
+ creds=client_creds,
+ expected_crealm=expected_crealm,
+ expected_cname=expected_cname,
+ expected_srealm=expected_srealm,
+ expected_sname=expected_sname,
+ generate_padata_fn=None,
+ check_error_fn=self.generic_check_kdc_error,
+ check_rep_fn=None,
+ expected_error_mode=expected_error_mode,
+ expected_salt=expected_salt,
+ kdc_options=str(initial_kdc_options),
+ pac_request=pac)
+
+ self._generic_kdc_exchange(kdc_exchange_dict,
+ cname=cname,
+ realm=realm,
+ sname=sname,
+ etypes=initial_etypes)
+
+ def _test_as_req_no_preauth_with_args(self, etype_idx, pac):
+ name, etypes = self.etype_test_permutation_by_idx(etype_idx)
+ self._test_as_req_nopreauth(
+ pac=pac,
+ initial_etypes=etypes,
+ initial_kdc_options=krb5_asn1.KDCOptions('forwardable'))
+
+ def test_as_req_enc_timestamp(self):
+ client_creds = self.get_client_creds()
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_as_req_enc_timestamp_mac(self):
+ client_creds = self.get_mach_creds()
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_as_req_enc_timestamp_rc4(self):
+ client_creds = self.get_client_creds()
+ self._run_as_req_enc_timestamp(
+ client_creds,
+ etypes=(kcrypto.Enctype.RC4,))
+
+ def test_as_req_enc_timestamp_mac_rc4(self):
+ client_creds = self.get_mach_creds()
+ self._run_as_req_enc_timestamp(
+ client_creds,
+ etypes=(kcrypto.Enctype.RC4,))
+
+ def test_as_req_enc_timestamp_rc4_dummy(self):
+ client_creds = self.get_client_creds()
+ self._run_as_req_enc_timestamp(
+ client_creds,
+ etypes=(kcrypto.Enctype.RC4,
+ -1111))
+
+ def test_as_req_enc_timestamp_mac_rc4_dummy(self):
+ client_creds = self.get_mach_creds()
+ self._run_as_req_enc_timestamp(
+ client_creds,
+ etypes=(kcrypto.Enctype.RC4,
+ -1111))
+
+ def test_as_req_enc_timestamp_aes128_rc4(self):
+ client_creds = self.get_client_creds()
+ self._run_as_req_enc_timestamp(
+ client_creds,
+ etypes=(kcrypto.Enctype.AES128,
+ kcrypto.Enctype.RC4))
+
+ def test_as_req_enc_timestamp_mac_aes128_rc4(self):
+ client_creds = self.get_mach_creds()
+ self._run_as_req_enc_timestamp(
+ client_creds,
+ etypes=(kcrypto.Enctype.AES128,
+ kcrypto.Enctype.RC4))
+
+ def test_as_req_enc_timestamp_spn(self):
+ client_creds = self.get_mach_creds()
+ spn = client_creds.get_spn()
+ self._run_as_req_enc_timestamp(
+ client_creds, client_account=spn,
+ expected_error=KDC_ERR_C_PRINCIPAL_UNKNOWN,
+ expect_edata=False)
+
+ def test_as_req_enc_timestamp_spn_realm(self):
+ samdb = self.get_samdb()
+ realm = samdb.domain_dns_name().upper()
+
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'upn': f'host/{{account}}.{realm}@{realm}'})
+ spn = client_creds.get_spn()
+ self._run_as_req_enc_timestamp(
+ client_creds, client_account=spn,
+ expected_error=KDC_ERR_C_PRINCIPAL_UNKNOWN,
+ expect_edata=False)
+
+ def test_as_req_enc_timestamp_spn_upn(self):
+ samdb = self.get_samdb()
+ realm = samdb.domain_dns_name().upper()
+
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'upn': f'host/{{account}}.{realm}@{realm}',
+ 'spn': f'host/{{account}}.{realm}'})
+ spn = client_creds.get_spn()
+ self._run_as_req_enc_timestamp(client_creds, client_account=spn)
+
+ def test_as_req_enc_timestamp_spn_enterprise(self):
+ client_creds = self.get_mach_creds()
+ spn = client_creds.get_spn()
+ self._run_as_req_enc_timestamp(
+ client_creds, client_account=spn,
+ name_type=NT_ENTERPRISE_PRINCIPAL,
+ expected_error=KDC_ERR_C_PRINCIPAL_UNKNOWN,
+ expect_edata=False)
+
+ def test_as_req_enc_timestamp_spn_enterprise_realm(self):
+ samdb = self.get_samdb()
+ realm = samdb.domain_dns_name().upper()
+
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'upn': f'host/{{account}}.{realm}@{realm}'})
+ spn = client_creds.get_spn()
+ self._run_as_req_enc_timestamp(
+ client_creds,
+ name_type=NT_ENTERPRISE_PRINCIPAL,
+ client_account=spn,
+ expected_error=KDC_ERR_C_PRINCIPAL_UNKNOWN,
+ expect_edata=False)
+
+ def test_as_req_enc_timestamp_spn_upn_enterprise(self):
+ samdb = self.get_samdb()
+ realm = samdb.domain_dns_name().upper()
+
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'upn': f'host/{{account}}.{realm}@{realm}',
+ 'spn': f'host/{{account}}.{realm}'})
+ spn = client_creds.get_spn()
+ self._run_as_req_enc_timestamp(
+ client_creds,
+ name_type=NT_ENTERPRISE_PRINCIPAL,
+ client_account=spn,
+ expected_error=KDC_ERR_C_PRINCIPAL_UNKNOWN,
+ expect_edata=False)
+
+ def test_as_req_enterprise_canon(self):
+ upn = self.get_new_username()
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER,
+ opts={'upn': upn})
+
+ user_name = client_creds.get_username()
+ realm = client_creds.get_realm()
+ client_account = f'{user_name}@{realm}'
+
+ expected_cname = self.PrincipalName_create(
+ name_type=NT_PRINCIPAL,
+ names=[user_name])
+
+ self._run_as_req_enc_timestamp(
+ client_creds,
+ client_account=client_account,
+ expected_cname=expected_cname,
+ name_type=NT_ENTERPRISE_PRINCIPAL,
+ kdc_options=krb5_asn1.KDCOptions('canonicalize'))
+
+ def test_as_req_enterprise_canon_case(self):
+ upn = self.get_new_username()
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER,
+ opts={'upn': upn})
+
+ user_name = client_creds.get_username()
+ realm = client_creds.get_realm().lower()
+ client_account = f'{user_name}@{realm}'
+
+ expected_cname = self.PrincipalName_create(
+ name_type=NT_PRINCIPAL,
+ names=[user_name])
+
+ self._run_as_req_enc_timestamp(
+ client_creds,
+ client_account=client_account,
+ expected_cname=expected_cname,
+ name_type=NT_ENTERPRISE_PRINCIPAL,
+ kdc_options=krb5_asn1.KDCOptions('canonicalize'))
+
+ def test_as_req_enterprise_canon_mac(self):
+ upn = self.get_new_username()
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'upn': upn})
+
+ user_name = client_creds.get_username()
+ realm = client_creds.get_realm()
+ client_account = f'{user_name}@{realm}'
+
+ expected_cname = self.PrincipalName_create(
+ name_type=NT_PRINCIPAL,
+ names=[user_name])
+
+ self._run_as_req_enc_timestamp(
+ client_creds,
+ client_account=client_account,
+ expected_cname=expected_cname,
+ name_type=NT_ENTERPRISE_PRINCIPAL,
+ kdc_options=krb5_asn1.KDCOptions('canonicalize'))
+
+ def test_as_req_enterprise_canon_mac_case(self):
+ upn = self.get_new_username()
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'upn': upn})
+
+ user_name = client_creds.get_username()
+ realm = client_creds.get_realm().lower()
+ client_account = f'{user_name}@{realm}'
+
+ expected_cname = self.PrincipalName_create(
+ name_type=NT_PRINCIPAL,
+ names=[user_name])
+
+ self._run_as_req_enc_timestamp(
+ client_creds,
+ client_account=client_account,
+ expected_cname=expected_cname,
+ name_type=NT_ENTERPRISE_PRINCIPAL,
+ kdc_options=krb5_asn1.KDCOptions('canonicalize'))
+
+ def test_as_req_enterprise_no_canon(self):
+ upn = self.get_new_username()
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER,
+ opts={'upn': upn})
+
+ user_name = client_creds.get_username()
+ realm = client_creds.get_realm()
+ client_account = f'{user_name}@{realm}'
+
+ self._run_as_req_enc_timestamp(
+ client_creds,
+ client_account=client_account,
+ name_type=NT_ENTERPRISE_PRINCIPAL,
+ kdc_options=0)
+
+ def test_as_req_enterprise_no_canon_case(self):
+ upn = self.get_new_username()
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER,
+ opts={'upn': upn})
+
+ user_name = client_creds.get_username()
+ realm = client_creds.get_realm().lower()
+ client_account = f'{user_name}@{realm}'
+
+ self._run_as_req_enc_timestamp(
+ client_creds,
+ client_account=client_account,
+ name_type=NT_ENTERPRISE_PRINCIPAL,
+ kdc_options=0)
+
+ def test_as_req_enterprise_no_canon_mac(self):
+ upn = self.get_new_username()
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'upn': upn})
+
+ user_name = client_creds.get_username()
+ realm = client_creds.get_realm()
+ client_account = f'{user_name}@{realm}'
+
+ self._run_as_req_enc_timestamp(
+ client_creds,
+ client_account=client_account,
+ name_type=NT_ENTERPRISE_PRINCIPAL,
+ kdc_options=0)
+
+ def test_as_req_enterprise_no_canon_mac_case(self):
+ upn = self.get_new_username()
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'upn': upn})
+
+ user_name = client_creds.get_username()
+ realm = client_creds.get_realm().lower()
+ client_account = f'{user_name}@{realm}'
+
+ self._run_as_req_enc_timestamp(
+ client_creds,
+ client_account=client_account,
+ name_type=NT_ENTERPRISE_PRINCIPAL,
+ kdc_options=0)
+
+ # Ensure we can't use truncated well-known principals such as krb@REALM
+ # instead of krbtgt@REALM.
+ def test_krbtgt_wrong_principal(self):
+ client_creds = self.get_client_creds()
+
+ krbtgt_creds = self.get_krbtgt_creds()
+
+ krbtgt_account = krbtgt_creds.get_username()
+ realm = krbtgt_creds.get_realm()
+
+ # Truncate the name of the krbtgt principal.
+ krbtgt_account = krbtgt_account[:3]
+
+ wrong_krbtgt_princ = self.PrincipalName_create(
+ name_type=NT_SRV_INST,
+ names=[krbtgt_account, realm])
+
+ if self.strict_checking:
+ self._run_as_req_enc_timestamp(
+ client_creds,
+ sname=wrong_krbtgt_princ,
+ expected_pa_error=KDC_ERR_S_PRINCIPAL_UNKNOWN,
+ expect_pa_edata=False)
+ else:
+ self._run_as_req_enc_timestamp(
+ client_creds,
+ sname=wrong_krbtgt_princ,
+ expected_error=KDC_ERR_S_PRINCIPAL_UNKNOWN)
+
+ def test_krbtgt_single_component_krbtgt(self):
+ """Test that we can make a request to the single‐component krbtgt
+ principal."""
+
+ client_creds = self.get_client_creds()
+
+ # Create a krbtgt principal with a single component.
+ single_component_krbtgt_principal = self.PrincipalName_create(
+ name_type=NT_SRV_INST,
+ names=['krbtgt'])
+
+ self._run_as_req_enc_timestamp(
+ client_creds,
+ sname=single_component_krbtgt_principal,
+ # Don’t ask for canonicalization.
+ kdc_options=0)
+
+ # Test that we can make a request for a ticket expiring post-2038.
+ def test_future_till(self):
+ client_creds = self.get_client_creds()
+
+ self._run_as_req_enc_timestamp(
+ client_creds,
+ till='99990913024805Z')
+
+ def test_logon_hours(self):
+ """Test making an AS-REQ with a logonHours attribute that disallows
+ logging in."""
+
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER,
+ opts={'logon_hours': bytes(21)})
+
+ # Expect to get a CLIENT_REVOKED error.
+ self._run_as_req_enc_timestamp(
+ client_creds,
+ expected_error=(KDC_ERR_CLIENT_REVOKED, KDC_ERR_PREAUTH_REQUIRED),
+ expect_status=ntstatus.NT_STATUS_INVALID_LOGON_HOURS,
+ expected_pa_error=KDC_ERR_CLIENT_REVOKED,
+ expect_pa_status=ntstatus.NT_STATUS_INVALID_LOGON_HOURS)
+
+ def test_logon_hours_wrong_password(self):
+ """Test making an AS-REQ with a wrong password and a logonHours
+ attribute that disallows logging in."""
+
+ # Use a non-cached account so that it is not locked out for other
+ # tests.
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER,
+ opts={'logon_hours': bytes(21)},
+ use_cache=False)
+
+ client_creds.set_password('wrong password')
+
+ # Expect to get a CLIENT_REVOKED error.
+ self._run_as_req_enc_timestamp(
+ client_creds,
+ expected_error=(KDC_ERR_CLIENT_REVOKED, KDC_ERR_PREAUTH_REQUIRED),
+ expect_status=ntstatus.NT_STATUS_INVALID_LOGON_HOURS,
+ expected_pa_error=KDC_ERR_CLIENT_REVOKED,
+ expect_pa_status=ntstatus.NT_STATUS_INVALID_LOGON_HOURS)
+
+ def test_as_req_unicode(self):
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER,
+ opts={'name_prefix': '🔐'})
+ self._run_as_req_enc_timestamp(client_creds)
+
+
+if __name__ == "__main__":
+ global_asn1_print = False
+ global_hexdump = False
+ import unittest
+ unittest.main()
+
diff --git a/python/samba/tests/krb5/authn_policy_tests.py b/python/samba/tests/krb5/authn_policy_tests.py
new file mode 100755
index 0000000..43db839
--- /dev/null
+++ b/python/samba/tests/krb5/authn_policy_tests.py
@@ -0,0 +1,8903 @@
+#!/usr/bin/env python3
+# Unix SMB/CIFS implementation.
+# Copyright (C) Stefan Metzmacher 2020
+# Copyright (C) Catalyst.Net Ltd 2023
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+import os
+
+sys.path.insert(0, 'bin/python')
+os.environ['PYTHONUNBUFFERED'] = '1'
+
+from datetime import datetime
+from enum import Enum
+import random
+import re
+
+import ldb
+
+from samba import dsdb, ntstatus
+from samba.dcerpc import netlogon, security
+from samba.dcerpc import windows_event_ids as win_event
+from samba.ndr import ndr_pack
+from samba.netcmd.domain.models import AuthenticationPolicy, AuthenticationSilo
+
+import samba.tests
+import samba.tests.krb5.kcrypto as kcrypto
+from samba.hresult import HRES_SEC_E_INVALID_TOKEN, HRES_SEC_E_LOGON_DENIED
+from samba.tests.krb5.kdc_base_test import GroupType
+from samba.tests.krb5.kdc_tgs_tests import KdcTgsBaseTests
+from samba.tests.auth_log_base import AuthLogTestBase, NoMessageException
+from samba.tests.krb5.raw_testcase import RawKerberosTest
+from samba.tests.krb5.rfc4120_constants import (
+ FX_FAST_ARMOR_AP_REQUEST,
+ KDC_ERR_BADOPTION,
+ KDC_ERR_GENERIC,
+ KDC_ERR_NEVER_VALID,
+ KDC_ERR_POLICY,
+ NT_PRINCIPAL,
+ NT_SRV_INST,
+ PADATA_FX_FAST,
+)
+import samba.tests.krb5.rfc4120_pyasn1 as krb5_asn1
+
+SidType = RawKerberosTest.SidType
+
+global_asn1_print = False
+global_hexdump = False
+
+
+AUTHN_VERSION = {'major': 1, 'minor': 3}
+AUTHZ_VERSION = {'major': 1, 'minor': 2}
+KDC_AUTHZ_VERSION = {'major': 1, 'minor': 0}
+
+
+class AuditType(Enum):
+ AUTHN = 'Authentication'
+ AUTHZ = 'Authorization'
+ KDC_AUTHZ = 'KDC Authorization'
+
+
+class AuditEvent(Enum):
+ OK = 'OK'
+ KERBEROS_DEVICE_RESTRICTION = 'KERBEROS_DEVICE_RESTRICTION'
+ KERBEROS_SERVER_RESTRICTION = 'KERBEROS_SERVER_RESTRICTION'
+ NTLM_DEVICE_RESTRICTION = 'NTLM_DEVICE_RESTRICTION'
+ NTLM_SERVER_RESTRICTION = 'NTLM_SERVER_RESTRICTION'
+ OTHER_ERROR = 'OTHER_ERROR'
+
+
+class AuditReason(Enum):
+ NONE = None
+ DESCRIPTOR_INVALID = 'DESCRIPTOR_INVALID'
+ DESCRIPTOR_NO_OWNER = 'DESCRIPTOR_NO_OWNER'
+ SECURITY_TOKEN_FAILURE = 'SECURITY_TOKEN_FAILURE'
+ ACCESS_DENIED = 'ACCESS_DENIED'
+ FAST_REQUIRED = 'FAST_REQUIRED'
+
+
+# This decorator helps reduce boilerplate code in log-checking methods.
+def policy_check_fn(fn):
+ def wrapper_fn(self, client_creds, *,
+ client_policy=None,
+ client_policy_status=None,
+ server_policy=None,
+ server_policy_status=None,
+ status=None,
+ event=AuditEvent.OK,
+ reason=AuditReason.NONE,
+ **kwargs):
+ if client_policy_status is not None:
+ self.assertIsNotNone(client_policy,
+ 'specified client policy status without '
+ 'client policy')
+
+ self.assertIsNone(
+ server_policy_status,
+ 'don’t specify both client policy status and server policy '
+ 'status (at most one of which can appear in the logs)')
+ elif server_policy_status is not None:
+ self.assertIsNotNone(server_policy,
+ 'specified server policy status without '
+ 'server policy')
+ elif client_policy is not None and server_policy is not None:
+ self.assertIsNone(status,
+ 'ambiguous: specify a client policy status or a '
+ 'server policy status')
+
+ overall_status = status
+ if overall_status is None:
+ overall_status = ntstatus.NT_STATUS_OK
+
+ if client_policy_status is None:
+ client_policy_status = ntstatus.NT_STATUS_OK
+ elif status is None and client_policy.enforced:
+ overall_status = client_policy_status
+
+ if server_policy_status is None:
+ server_policy_status = ntstatus.NT_STATUS_OK
+ elif status is None and server_policy.enforced:
+ overall_status = server_policy_status
+
+ if client_policy_status:
+ client_policy_event = event
+ client_policy_reason = reason
+ else:
+ client_policy_event = AuditEvent.OK
+ client_policy_reason = AuditReason.NONE
+
+ if server_policy_status:
+ server_policy_event = event
+ server_policy_reason = reason
+ else:
+ server_policy_event = AuditEvent.OK
+ server_policy_reason = AuditReason.NONE
+
+ return fn(self, client_creds,
+ client_policy=client_policy,
+ client_policy_status=client_policy_status,
+ client_policy_event=client_policy_event,
+ client_policy_reason=client_policy_reason,
+ server_policy=server_policy,
+ server_policy_status=server_policy_status,
+ server_policy_event=server_policy_event,
+ server_policy_reason=server_policy_reason,
+ overall_status=overall_status,
+ **kwargs)
+
+ return wrapper_fn
+
+
+class AuthnPolicyBaseTests(AuthLogTestBase, KdcTgsBaseTests):
+ @classmethod
+ def setUpClass(cls):
+ super().setUpClass()
+
+ as_req_logging_support = samba.tests.env_get_var_value(
+ 'AS_REQ_LOGGING_SUPPORT',
+ allow_missing=False)
+ cls.as_req_logging_support = bool(int(as_req_logging_support))
+
+ tgs_req_logging_support = samba.tests.env_get_var_value(
+ 'TGS_REQ_LOGGING_SUPPORT',
+ allow_missing=False)
+ cls.tgs_req_logging_support = bool(int(tgs_req_logging_support))
+
+ cls._max_ticket_life = None
+ cls._max_renew_life = None
+
+ def take(self, n, iterable, *, take_all=True):
+ """Yield n items from an iterable."""
+ i = -1
+ for i in range(n):
+ try:
+ yield next(iterable)
+ except StopIteration:
+ self.fail(f'expected to find element{i}')
+
+ if take_all:
+ with self.assertRaises(
+ StopIteration,
+ msg=f'got unexpected element after {i+1} elements'):
+ next(iterable)
+
+ def take_pairs(self, n, iterable, *, take_all=True):
+ """Yield n pairs of items from an iterable."""
+ i = -1
+ for i in range(n):
+ try:
+ yield next(iterable), next(iterable)
+ except StopIteration:
+ self.fail(f'expected to find pair of elements {i}')
+
+ if take_all:
+ with self.assertRaises(
+ StopIteration,
+ msg=f'got unexpected element after {i+1} pairs'):
+ next(iterable)
+
+ def get_max_ticket_life(self):
+ if self._max_ticket_life is None:
+ self._fetch_default_lifetimes()
+
+ return self._max_ticket_life
+
+ def get_max_renew_life(self):
+ if self._max_renew_life is None:
+ self._fetch_default_lifetimes()
+
+ return self._max_renew_life
+
+ def _fetch_default_lifetimes(self):
+ samdb = self.get_samdb()
+
+ domain_policy_dn = samdb.get_default_basedn()
+ domain_policy_dn.add_child('CN=Default Domain Policy,CN=System')
+
+ res = samdb.search(domain_policy_dn,
+ scope=ldb.SCOPE_BASE,
+ attrs=['maxTicketAge', 'maxRenewAge'])
+ self.assertEqual(1, len(res))
+
+ max_ticket_age = res[0].get('maxTicketAge', idx=0)
+ max_renew_age = res[0].get('maxRenewAge', idx=0)
+
+ if max_ticket_age is not None:
+ max_ticket_age = int(max_ticket_age.decode('utf-8'))
+ else:
+ max_ticket_age = 10
+
+ if max_renew_age is not None:
+ max_renew_age = int(max_renew_age.decode('utf-8'))
+ else:
+ max_renew_age = 7
+
+ type(self)._max_ticket_life = max_ticket_age * 60 * 60
+ type(self)._max_renew_life = max_renew_age * 24 * 60 * 60
+
+ # Get account credentials for testing.
+ def _get_creds(self,
+ account_type=KdcTgsBaseTests.AccountType.USER,
+ member_of=None,
+ protected=False,
+ assigned_policy=None,
+ assigned_silo=None,
+ ntlm=False,
+ spn=None,
+ allowed_rodc=None,
+ additional_details=None,
+ cached=None):
+ if cached is None:
+ # Policies and silos are rarely reused between accounts.
+ cached = assigned_policy is None and assigned_silo is None
+
+ opts = {
+ 'kerberos_enabled': not ntlm,
+ 'spn': spn,
+ }
+
+ members = ()
+ if protected:
+ samdb = self.get_samdb()
+ protected_users_group = (f'<SID={samdb.get_domain_sid()}-'
+ f'{security.DOMAIN_RID_PROTECTED_USERS}>')
+ members += (protected_users_group,)
+ if member_of is not None:
+ members += (member_of,)
+ if assigned_policy is not None:
+ opts['assigned_policy'] = str(assigned_policy.dn)
+ if assigned_silo is not None:
+ opts['assigned_silo'] = str(assigned_silo.dn)
+ if allowed_rodc:
+ opts['allowed_replication_mock'] = True
+ opts['revealed_to_mock_rodc'] = True
+ if additional_details is not None:
+ opts['additional_details'] = self.freeze(additional_details)
+
+ if members:
+ opts['member_of'] = members
+
+ return self.get_cached_creds(account_type=account_type,
+ opts=opts,
+ use_cache=cached)
+
+ def _fast_as_req(self,
+ client_creds,
+ target_creds,
+ armor_tgt,
+ expected_error=0,
+ expect_status=None,
+ expected_status=None,
+ expected_groups=None,
+ expect_device_info=None,
+ expected_device_groups=None,
+ expect_device_claims=None,
+ expected_device_claims=None):
+ client_username = client_creds.get_username()
+ client_realm = client_creds.get_realm()
+ client_cname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=[client_username])
+
+ target_name = target_creds.get_username()
+ target_sname = self.PrincipalName_create(
+ name_type=NT_PRINCIPAL, names=[target_name])
+ target_realm = target_creds.get_realm()
+ target_decryption_key = self.TicketDecryptionKey_from_creds(
+ target_creds)
+ target_etypes = target_creds.tgs_supported_enctypes
+
+ authenticator_subkey = self.RandomKey(kcrypto.Enctype.AES256)
+ armor_key = self.generate_armor_key(authenticator_subkey,
+ armor_tgt.session_key)
+
+ preauth_key = self.PasswordKey_from_creds(client_creds,
+ kcrypto.Enctype.AES256)
+
+ client_challenge_key = (
+ self.generate_client_challenge_key(armor_key, preauth_key))
+ fast_padata = [self.get_challenge_pa_data(client_challenge_key)]
+
+ def _generate_fast_padata(kdc_exchange_dict,
+ _callback_dict,
+ req_body):
+ return list(fast_padata), req_body
+
+ etypes = kcrypto.Enctype.AES256, kcrypto.Enctype.RC4
+
+ if expected_error:
+ check_error_fn = self.generic_check_kdc_error
+ check_rep_fn = None
+ else:
+ check_error_fn = None
+ check_rep_fn = self.generic_check_kdc_rep
+
+ pac_options = '1' # claims support
+
+ samdb = self.get_samdb()
+ domain_sid_str = samdb.get_domain_sid()
+
+ if expected_groups is not None:
+ expected_groups = self.map_sids(expected_groups, None, domain_sid_str)
+
+ if expected_device_groups is not None:
+ expected_device_groups = self.map_sids(expected_device_groups, None, domain_sid_str)
+
+ kdc_exchange_dict = self.as_exchange_dict(
+ creds=client_creds,
+ expected_crealm=client_realm,
+ expected_cname=client_cname,
+ expected_srealm=target_realm,
+ expected_sname=target_sname,
+ expected_supported_etypes=target_etypes,
+ ticket_decryption_key=target_decryption_key,
+ generate_fast_fn=self.generate_simple_fast,
+ generate_fast_armor_fn=self.generate_ap_req,
+ generate_fast_padata_fn=_generate_fast_padata,
+ fast_armor_type=FX_FAST_ARMOR_AP_REQUEST,
+ check_error_fn=check_error_fn,
+ check_rep_fn=check_rep_fn,
+ check_kdc_private_fn=self.generic_check_kdc_private,
+ expected_error_mode=expected_error,
+ expected_salt=client_creds.get_salt(),
+ expect_status=expect_status,
+ expected_status=expected_status,
+ expected_groups=expected_groups,
+ expect_device_info=expect_device_info,
+ expected_device_domain_sid=domain_sid_str,
+ expected_device_groups=expected_device_groups,
+ expect_device_claims=expect_device_claims,
+ expected_device_claims=expected_device_claims,
+ authenticator_subkey=authenticator_subkey,
+ preauth_key=preauth_key,
+ armor_key=armor_key,
+ armor_tgt=armor_tgt,
+ armor_subkey=authenticator_subkey,
+ kdc_options='0',
+ pac_options=pac_options,
+ # PA-DATA types are not important for these tests.
+ check_patypes=False)
+
+ rep = self._generic_kdc_exchange(
+ kdc_exchange_dict,
+ cname=client_cname,
+ realm=client_realm,
+ sname=target_sname,
+ etypes=etypes)
+ if expected_error:
+ self.check_error_rep(rep, expected_error)
+ return None
+ else:
+ self.check_as_reply(rep)
+ return kdc_exchange_dict['rep_ticket_creds']
+
+ @staticmethod
+ def audit_type(msg):
+ return AuditType(msg['type'])
+
+ @staticmethod
+ def auth_type(msg):
+ audit_type = __class__.audit_type(msg)
+ key = {
+ AuditType.AUTHN: 'authDescription',
+ AuditType.AUTHZ: 'authType',
+ AuditType.KDC_AUTHZ: 'authType',
+ }[audit_type]
+
+ return msg[audit_type.value][key]
+
+ @staticmethod
+ def service_description(msg):
+ audit_type = __class__.audit_type(msg)
+ return msg[audit_type.value]['serviceDescription']
+
+ @staticmethod
+ def client_account(msg):
+ audit_type = __class__.audit_type(msg)
+
+ key = {
+ AuditType.AUTHN: 'clientAccount',
+ AuditType.AUTHZ: 'account',
+ AuditType.KDC_AUTHZ: 'account',
+ }[audit_type]
+
+ return msg[audit_type.value][key]
+
+ def filter_msg(self, audit_type, client_name, *,
+ auth_type=None,
+ service_description=None):
+ def _filter_msg(msg):
+ if audit_type is not self.audit_type(msg):
+ return False
+
+ if auth_type is not None:
+ if isinstance(auth_type, re.Pattern):
+ # Check whether the pattern matches.
+ if not auth_type.fullmatch(self.auth_type(msg)):
+ return False
+ else:
+ # Just do a standard equality check.
+ if auth_type != self.auth_type(msg):
+ return False
+
+ if service_description is not None:
+ if service_description != self.service_description(msg):
+ return False
+
+ return client_name == self.client_account(msg)
+
+ return _filter_msg
+
+ PRE_AUTH_RE = re.compile('.* Pre-authentication')
+
+ def as_req_filter(self, client_creds):
+ username = client_creds.get_username()
+ realm = client_creds.get_realm()
+ client_name = f'{username}@{realm}'
+
+ yield self.filter_msg(AuditType.AUTHN,
+ client_name,
+ auth_type=self.PRE_AUTH_RE,
+ service_description='Kerberos KDC')
+
+ def tgs_req_filter(self, client_creds, target_creds):
+ target_name = target_creds.get_username()
+ if target_name[-1] == '$':
+ target_name = target_name[:-1]
+ target_realm = target_creds.get_realm()
+
+ target_spn = f'host/{target_name}@{target_realm}'
+
+ yield self.filter_msg(AuditType.KDC_AUTHZ,
+ client_creds.get_username(),
+ auth_type='TGS-REQ with Ticket-Granting Ticket',
+ service_description=target_spn)
+
+ def samlogon_filter(self, client_creds, *, logon_type=None):
+ if logon_type is None:
+ auth_type = None
+ elif logon_type == netlogon.NetlogonNetworkInformation:
+ auth_type = 'network'
+ elif logon_type == netlogon.NetlogonInteractiveInformation:
+ auth_type = 'interactive'
+ else:
+ self.fail(f'unknown logon type ‘{logon_type}’')
+
+ yield self.filter_msg(AuditType.AUTHN,
+ client_creds.get_username(),
+ auth_type=auth_type,
+ service_description='SamLogon')
+
+ def ntlm_filter(self, client_creds):
+ username = client_creds.get_username()
+
+ yield self.filter_msg(AuditType.AUTHN,
+ username,
+ auth_type='NTLMSSP',
+ service_description='LDAP')
+
+ yield self.filter_msg(AuditType.AUTHZ,
+ username,
+ auth_type='NTLMSSP',
+ service_description='LDAP')
+
+ def simple_bind_filter(self, client_creds):
+ yield self.filter_msg(AuditType.AUTHN,
+ str(client_creds.get_dn()),
+ auth_type='simple bind/TLS',
+ service_description='LDAP')
+
+ yield self.filter_msg(AuditType.AUTHZ,
+ client_creds.get_username(),
+ auth_type='simple bind',
+ service_description='LDAP')
+
+ def samr_pwd_change_filter(self, client_creds):
+ username = client_creds.get_username()
+
+ yield self.filter_msg(AuditType.AUTHN,
+ username,
+ auth_type='NTLMSSP',
+ service_description='SMB2')
+
+ yield self.filter_msg(AuditType.AUTHZ,
+ username,
+ auth_type='NTLMSSP',
+ service_description='SMB2')
+
+ yield self.filter_msg(AuditType.AUTHN,
+ username,
+ auth_type='NTLMSSP',
+ service_description='DCE/RPC')
+
+ yield self.filter_msg(AuditType.AUTHZ,
+ username,
+ auth_type='NTLMSSP',
+ service_description='DCE/RPC')
+
+ # Password changes are attempted twice, with two different methods.
+
+ yield self.filter_msg(AuditType.AUTHN,
+ username,
+ auth_type='samr_ChangePasswordUser2',
+ service_description='SAMR Password Change')
+
+ yield self.filter_msg(AuditType.AUTHN,
+ username,
+ auth_type='samr_ChangePasswordUser3',
+ service_description='SAMR Password Change')
+
+ def nextMessage(self, *args, **kwargs):
+ """Return the next relevant message, or throw a NoMessageException."""
+ msg = super().nextMessage(*args, **kwargs)
+ self.assert_is_timestamp(msg.pop('timestamp'))
+
+ msg_type = msg.pop('type')
+ inner = msg.pop(msg_type)
+ self.assertFalse(msg, 'unexpected items in outer message')
+
+ return inner
+
+ def assert_is_timestamp(self, ts):
+ try:
+ datetime.strptime(ts, '%Y-%m-%dT%H:%M:%S.%f%z')
+ except (TypeError, ValueError):
+ self.fail(f'‘{ts}’ is not a timestamp')
+
+ def assert_is_guid(self, guid):
+ guid_re = (
+ '^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$')
+ self.assertRegex(guid, guid_re)
+
+ def assert_tgt_lifetime(self, checked_creds, policy, expected_policy):
+ if checked_creds is None:
+ self.assertNotIn('tgtLifetime', policy)
+ return
+
+ account_type = checked_creds.get_type()
+ if account_type is self.AccountType.USER:
+ expected = expected_policy.user_tgt_lifetime
+ elif account_type is self.AccountType.COMPUTER:
+ expected = expected_policy.computer_tgt_lifetime
+ elif account_type is self.AccountType.MANAGED_SERVICE:
+ expected = expected_policy.service_tgt_lifetime
+ else:
+ self.fail(f'unknown account type {account_type}')
+
+ if expected is not None:
+ expected /= 60 * 10_000_000
+ expected = int(expected)
+ else:
+ expected = 0
+
+ self.assertEqual(policy.pop('tgtLifetime'), expected)
+
+ def assert_event_id(self, audit_event, policy, expected_policy):
+ event_map = {
+ AuditEvent.KERBEROS_DEVICE_RESTRICTION: (
+ # unenforced
+ win_event.AUTH_EVT_ID_KERBEROS_DEVICE_RESTRICTION_AUDIT,
+ # enforced
+ win_event.AUTH_EVT_ID_KERBEROS_DEVICE_RESTRICTION,
+ ),
+ AuditEvent.KERBEROS_SERVER_RESTRICTION: (
+ # unenforced
+ win_event.AUTH_EVT_ID_KERBEROS_SERVER_RESTRICTION_AUDIT,
+ # enforced
+ win_event.AUTH_EVT_ID_KERBEROS_SERVER_RESTRICTION,
+ ),
+ AuditEvent.NTLM_DEVICE_RESTRICTION: (
+ win_event.AUTH_EVT_ID_NONE, # unenforced
+ win_event.AUTH_EVT_ID_NTLM_DEVICE_RESTRICTION, # enforced
+ ),
+ }
+
+ event_ids = event_map.get(audit_event)
+ if event_ids is not None:
+ expected_id = event_ids[expected_policy.enforced]
+ else:
+ expected_id = win_event.AUTH_EVT_ID_NONE
+
+ self.assertEqual(expected_id, policy.pop('eventId'))
+
+ def check_policy(self, checked_creds, policy, expected_policy, *,
+ client_creds=None,
+ expected_silo=None,
+ policy_status=ntstatus.NT_STATUS_OK,
+ audit_event=AuditEvent.OK,
+ reason=AuditReason.NONE):
+ if expected_policy is None:
+ self.assertIsNone(policy, 'got unexpected policy')
+ self.assertIs(ntstatus.NT_STATUS_OK, policy_status)
+ self.assertIs(AuditEvent.OK, audit_event)
+ self.assertIs(AuditReason.NONE, reason)
+ return
+
+ self.assertIsNotNone(policy, 'expected to get a policy')
+
+ policy.pop('location') # A location in the source code, for debugging.
+
+ if checked_creds is not None:
+ checked_account = checked_creds.get_username()
+ checked_domain = checked_creds.get_domain()
+ checked_sid = checked_creds.get_sid()
+
+ self.assertEqual(checked_account, policy.pop('checkedAccount'))
+ self.assertRegex(policy.pop('checkedAccountFlags'), '^0x[0-9a-f]{8}$')
+ self.assertEqual(checked_domain, policy.pop('checkedDomain'))
+ self.assertEqual(checked_sid, policy.pop('checkedSid'))
+
+ logon_server = os.environ['DC_NETBIOSNAME']
+ self.assertEqual(logon_server, policy.pop('checkedLogonServer'))
+ else:
+ self.assertNotIn('checkedAccount', policy)
+ self.assertNotIn('checkedAccountFlags', policy)
+ self.assertNotIn('checkedDomain', policy)
+ self.assertNotIn('checkedSid', policy)
+ self.assertNotIn('checkedLogonServer', policy)
+
+ self.assertEqual(expected_policy.enforced,
+ policy.pop('policyEnforced'))
+ self.assertEqual(expected_policy.name, policy.pop('policyName'))
+
+ self.assert_tgt_lifetime(client_creds, policy, expected_policy)
+
+ silo_name = expected_silo.name if expected_silo is not None else None
+ self.assertEqual(silo_name, policy.pop('siloName'))
+
+ got_status = getattr(ntstatus, policy.pop('status'))
+ self.assertEqual(policy_status, got_status)
+
+ got_audit_event = policy.pop('auditEvent')
+ try:
+ got_audit_event = AuditEvent(got_audit_event)
+ except ValueError:
+ self.fail('got unrecognized audit event')
+ self.assertEqual(audit_event, got_audit_event)
+ self.assert_event_id(audit_event, policy, expected_policy)
+
+ got_reason = policy.pop('reason')
+ try:
+ got_reason = AuditReason(got_reason)
+ except ValueError:
+ self.fail('got unrecognized audit reason')
+ self.assertEqual(reason, got_reason)
+
+ self.assertFalse(policy, 'unexpected items remain in policy')
+
+ @policy_check_fn
+ def check_as_log(self, client_creds, *,
+ client_policy,
+ client_policy_status,
+ client_policy_event,
+ client_policy_reason,
+ server_policy,
+ server_policy_status,
+ server_policy_event,
+ server_policy_reason,
+ overall_status,
+ armor_creds=None):
+ if not self.as_req_logging_support:
+ return
+
+ as_req_filter = self.as_req_filter(client_creds)
+ for msg_filter in self.take(1, as_req_filter):
+ try:
+ msg = self.nextMessage(msg_filter)
+ except NoMessageException:
+ self.fail('expected to receive authentication message')
+
+ self.assertEqual(AUTHN_VERSION, msg.pop('version'))
+
+ got_status = getattr(ntstatus, msg.pop('status'))
+ self.assertEqual(overall_status, got_status)
+
+ got_client_policy = msg.pop('clientPolicyAccessCheck', None)
+ self.check_policy(armor_creds, got_client_policy, client_policy,
+ client_creds=client_creds,
+ policy_status=client_policy_status,
+ audit_event=client_policy_event,
+ reason=client_policy_reason)
+
+ got_server_policy = msg.pop('serverPolicyAccessCheck', None)
+ self.check_policy(client_creds, got_server_policy, server_policy,
+ policy_status=server_policy_status,
+ audit_event=server_policy_event,
+ reason=server_policy_reason)
+
+ def check_tgs_log(self, client_creds, target_creds, *,
+ policy=None,
+ policy_status=None,
+ status=None,
+ checked_creds=None,
+ event=AuditEvent.OK,
+ reason=AuditReason.NONE):
+ if not self.tgs_req_logging_support:
+ return
+
+ if checked_creds is None:
+ checked_creds = client_creds
+
+ overall_status = status if status is not None else ntstatus.NT_STATUS_OK
+
+ if policy_status is None:
+ policy_status = ntstatus.NT_STATUS_OK
+
+ if policy is not None:
+ policy_status = overall_status
+ elif status is None and policy.enforced:
+ overall_status = status
+
+ client_domain = client_creds.get_domain()
+
+ logon_server = os.environ['DC_NETBIOSNAME']
+
+ # An example of a typical KDC Authorization log message:
+
+ # {
+ # "KDC Authorization": {
+ # "account": "alice",
+ # "authTime": "2023-06-15T23:45:13.183564+0000",
+ # "authType": "TGS-REQ with Ticket-Granting Ticket",
+ # "domain": "ADDOMAIN",
+ # "localAddress": null,
+ # "logonServer": "ADDC",
+ # "remoteAddress": "ipv4:10.53.57.11:28004",
+ # "serverPolicyAccessCheck": {
+ # "auditEvent": "KERBEROS_SERVER_RESTRICTION",
+ # "checkedAccount": "alice",
+ # "checkedAccountFlags": "0x00000010",
+ # "checkedDomain": "ADDOMAIN",
+ # "checkedLogonServer": "ADDC",
+ # "checkedSid": "S-1-5-21-3907522332-2561495341-3138977981-1159",
+ # "eventId": 106,
+ # "location": "../../source4/kdc/authn_policy_util.c:1181",
+ # "policyEnforced": true,
+ # "policyName": "Example Policy",
+ # "reason": "ACCESS_DENIED",
+ # "siloName": null,
+ # "status": "NT_STATUS_AUTHENTICATION_FIREWALL_FAILED"
+ # },
+ # "serviceDescription": "host/target@ADDOM.SAMBA.EXAMPLE.COM",
+ # "sid": "S-1-5-21-3907522332-2561495341-3138977981-1159",
+ # "status": "NT_STATUS_AUTHENTICATION_FIREWALL_FAILED",
+ # "version": {
+ # "major": 1,
+ # "minor": 0
+ # }
+ # },
+ # "timestamp": "2023-06-15T23:45:13.202312+0000",
+ # "type": "KDC Authorization"
+ # }
+
+ tgs_req_filter = self.tgs_req_filter(client_creds, target_creds)
+ for msg_filter in self.take(1, tgs_req_filter):
+ try:
+ msg = self.nextMessage(msg_filter)
+ except NoMessageException:
+ self.fail('expected to receive KDC authorization message')
+
+ # These parameters have already been checked.
+ msg.pop('account')
+ msg.pop('authType')
+ msg.pop('remoteAddress')
+ msg.pop('serviceDescription')
+
+ self.assertEqual(KDC_AUTHZ_VERSION, msg.pop('version'))
+
+ self.assert_is_timestamp(msg.pop('authTime'))
+ self.assertEqual(client_domain, msg.pop('domain'))
+ self.assertIsNone(msg.pop('localAddress'))
+ self.assertEqual(logon_server, msg.pop('logonServer'))
+ self.assertEqual(client_creds.get_sid(), msg.pop('sid'))
+
+ got_status = getattr(ntstatus, msg.pop('status'))
+ self.assertEqual(overall_status, got_status)
+
+ server_policy = msg.pop('serverPolicyAccessCheck', None)
+ self.check_policy(checked_creds, server_policy, policy,
+ policy_status=policy_status,
+ audit_event=event,
+ reason=reason)
+
+ self.assertFalse(msg, 'unexpected items remain in message')
+
+ @policy_check_fn
+ def check_samlogon_log(self, client_creds, *,
+ client_policy,
+ client_policy_status,
+ client_policy_event,
+ client_policy_reason,
+ server_policy,
+ server_policy_status,
+ server_policy_event,
+ server_policy_reason,
+ overall_status,
+ logon_type=None):
+ samlogon_filter = self.samlogon_filter(client_creds,
+ logon_type=logon_type)
+ for msg_filter in self.take(1, samlogon_filter):
+ try:
+ msg = self.nextMessage(msg_filter)
+ except NoMessageException:
+ self.fail('expected to receive authentication message')
+
+ self.assertEqual(AUTHN_VERSION, msg.pop('version'))
+
+ got_status = getattr(ntstatus, msg.pop('status'))
+ self.assertEqual(overall_status, got_status)
+
+ got_client_policy = msg.pop('clientPolicyAccessCheck', None)
+ self.check_policy(None, got_client_policy, client_policy,
+ policy_status=client_policy_status,
+ audit_event=client_policy_event,
+ reason=client_policy_reason)
+
+ got_server_policy = msg.pop('serverPolicyAccessCheck', None)
+ self.check_policy(client_creds, got_server_policy, server_policy,
+ policy_status=server_policy_status,
+ audit_event=server_policy_event,
+ reason=server_policy_reason)
+
+ def check_samlogon_network_log(self, client_creds, **kwargs):
+ return self.check_samlogon_log(
+ client_creds,
+ logon_type=netlogon.NetlogonNetworkInformation,
+ **kwargs)
+
+ def check_samlogon_interactive_log(self, client_creds, **kwargs):
+ return self.check_samlogon_log(
+ client_creds,
+ logon_type=netlogon.NetlogonInteractiveInformation,
+ **kwargs)
+
+ @policy_check_fn
+ def check_ntlm_log(self, client_creds, *,
+ client_policy,
+ client_policy_status,
+ client_policy_event,
+ client_policy_reason,
+ server_policy,
+ server_policy_status,
+ server_policy_event,
+ server_policy_reason,
+ overall_status):
+ ntlm_filter = self.ntlm_filter(client_creds)
+
+ for authn_filter, authz_filter in self.take_pairs(1, ntlm_filter):
+ try:
+ msg = self.nextMessage(authn_filter)
+ except NoMessageException:
+ self.fail('expected to receive authentication message')
+
+ self.assertEqual(AUTHN_VERSION, msg.pop('version'))
+
+ got_status = getattr(ntstatus, msg.pop('status'))
+ self.assertEqual(overall_status, got_status)
+
+ got_client_policy = msg.pop('clientPolicyAccessCheck', None)
+ self.check_policy(None, got_client_policy, client_policy,
+ policy_status=client_policy_status,
+ audit_event=client_policy_event,
+ reason=client_policy_reason)
+
+ got_server_policy = msg.pop('serverPolicyAccessCheck', None)
+ self.check_policy(client_creds, got_server_policy, server_policy,
+ policy_status=server_policy_status,
+ audit_event=server_policy_event,
+ reason=server_policy_reason)
+
+ if overall_status:
+ # Authentication can proceed no further.
+ return
+
+ try:
+ msg = self.nextMessage(authz_filter)
+ except NoMessageException:
+ self.fail('expected to receive authorization message')
+
+ self.assertEqual(AUTHZ_VERSION, msg.pop('version'))
+
+ got_server_policy = msg.pop('serverPolicyAccessCheck', None)
+ self.check_policy(client_creds, got_server_policy, server_policy)
+
+ @policy_check_fn
+ def check_simple_bind_log(self, client_creds, *,
+ client_policy,
+ client_policy_status,
+ client_policy_event,
+ client_policy_reason,
+ server_policy,
+ server_policy_status,
+ server_policy_event,
+ server_policy_reason,
+ overall_status):
+ simple_bind_filter = self.simple_bind_filter(client_creds)
+
+ for authn_filter, authz_filter in self.take_pairs(1,
+ simple_bind_filter):
+ try:
+ msg = self.nextMessage(authn_filter)
+ except NoMessageException:
+ self.fail('expected to receive authentication message')
+
+ self.assertEqual(AUTHN_VERSION, msg.pop('version'))
+
+ got_status = getattr(ntstatus, msg.pop('status'))
+ self.assertEqual(overall_status, got_status)
+
+ got_client_policy = msg.pop('clientPolicyAccessCheck', None)
+ self.check_policy(None, got_client_policy, client_policy,
+ policy_status=client_policy_status,
+ audit_event=client_policy_event,
+ reason=client_policy_reason)
+
+ got_server_policy = msg.pop('serverPolicyAccessCheck', None)
+ self.check_policy(client_creds, got_server_policy, server_policy,
+ policy_status=server_policy_status,
+ audit_event=server_policy_event,
+ reason=server_policy_reason)
+
+ if overall_status:
+ # Authentication can proceed no further.
+ return
+
+ try:
+ msg = self.nextMessage(authz_filter)
+ except NoMessageException:
+ self.fail('expected to receive authorization message')
+
+ self.assertEqual(AUTHZ_VERSION, msg.pop('version'))
+
+ got_server_policy = msg.pop('serverPolicyAccessCheck', None)
+ self.check_policy(client_creds, got_server_policy, server_policy,
+ policy_status=server_policy_status,
+ audit_event=server_policy_event,
+ reason=server_policy_reason)
+
+ @policy_check_fn
+ def check_samr_pwd_change_log(self, client_creds, *,
+ client_policy,
+ client_policy_status,
+ client_policy_event,
+ client_policy_reason,
+ server_policy,
+ server_policy_status,
+ server_policy_event,
+ server_policy_reason,
+ overall_status):
+ pwd_change_filter = self.samr_pwd_change_filter(client_creds)
+
+ # There will be two authorization attempts.
+ for authn_filter, authz_filter in self.take_pairs(2,
+ pwd_change_filter,
+ take_all=False):
+ try:
+ msg = self.nextMessage(authn_filter)
+ except NoMessageException:
+ self.fail('expected to receive authentication message')
+
+ self.assertEqual(AUTHN_VERSION, msg.pop('version'))
+
+ got_status = getattr(ntstatus, msg.pop('status'))
+ self.assertEqual(overall_status, got_status)
+
+ got_client_policy = msg.pop('clientPolicyAccessCheck', None)
+ self.check_policy(None, got_client_policy, client_policy,
+ policy_status=client_policy_status,
+ audit_event=client_policy_event,
+ reason=client_policy_reason)
+
+ got_server_policy = msg.pop('serverPolicyAccessCheck', None)
+ self.check_policy(client_creds, got_server_policy, server_policy,
+ policy_status=server_policy_status,
+ audit_event=server_policy_event,
+ reason=server_policy_reason)
+
+ if overall_status:
+ # Authentication can proceed no further.
+ return
+
+ try:
+ msg = self.nextMessage(authz_filter)
+ except NoMessageException:
+ self.fail('expected to receive authorization message')
+
+ self.assertEqual(AUTHZ_VERSION, msg.pop('version'))
+
+ got_server_policy = msg.pop('serverPolicyAccessCheck', None)
+ self.check_policy(client_creds, got_server_policy, server_policy,
+ policy_status=server_policy_status,
+ audit_event=server_policy_event,
+ reason=server_policy_reason)
+
+ # There will be two SAMR password change attempts.
+ for msg_filter in self.take(2, pwd_change_filter):
+ try:
+ msg = self.nextMessage(msg_filter)
+ except NoMessageException:
+ self.fail('expected to receive SAMR password change message')
+
+ self.assertEqual(AUTHN_VERSION, msg.pop('version'))
+
+ got_status = getattr(ntstatus, msg.pop('status'))
+ self.assertEqual(ntstatus.NT_STATUS_OK, got_status)
+
+ got_client_policy = msg.pop('clientPolicyAccessCheck', None)
+ self.check_policy(None, got_client_policy, None,
+ policy_status=client_policy_status,
+ audit_event=client_policy_event,
+ reason=client_policy_reason)
+
+ got_server_policy = msg.pop('serverPolicyAccessCheck', None)
+ self.check_policy(client_creds, got_server_policy, None,
+ policy_status=server_policy_status,
+ audit_event=server_policy_event,
+ reason=server_policy_reason)
+
+ def check_ticket_times(self,
+ ticket_creds,
+ expected_life=None,
+ expected_renew_life=None):
+ ticket = ticket_creds.ticket_private
+
+ authtime = ticket['authtime']
+ starttime = ticket.get('starttime', authtime)
+ endtime = ticket['endtime']
+ renew_till = ticket.get('renew-till', None)
+
+ starttime = self.get_EpochFromKerberosTime(starttime)
+
+ if expected_life is not None:
+ actual_end = self.get_EpochFromKerberosTime(
+ endtime.decode('ascii'))
+ actual_lifetime = actual_end - starttime
+
+ self.assertEqual(expected_life, actual_lifetime)
+
+ if renew_till is None:
+ self.assertIsNone(expected_renew_life)
+ else:
+ if expected_renew_life is not None:
+ actual_renew_till = self.get_EpochFromKerberosTime(
+ renew_till.decode('ascii'))
+ actual_renew_life = actual_renew_till - starttime
+
+ self.assertEqual(expected_renew_life, actual_renew_life)
+
+ def _get_tgt(self, creds, *,
+ armor_tgt=None,
+ till=None,
+ kdc_options=None,
+ expected_flags=None,
+ unexpected_flags=None,
+ expected_error=0,
+ expect_status=None,
+ expected_status=None):
+ user_name = creds.get_username()
+ realm = creds.get_realm()
+ salt = creds.get_salt()
+
+ cname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=user_name.split('/'))
+ sname = self.PrincipalName_create(name_type=NT_SRV_INST,
+ names=['krbtgt', realm])
+ expected_sname = self.PrincipalName_create(
+ name_type=NT_SRV_INST, names=['krbtgt', realm.upper()])
+
+ expected_cname = cname
+
+ if till is None:
+ till = self.get_KerberosTime(offset=36000)
+
+ renew_time = till
+
+ krbtgt_creds = self.get_krbtgt_creds()
+ ticket_decryption_key = (
+ self.TicketDecryptionKey_from_creds(krbtgt_creds))
+
+ expected_etypes = krbtgt_creds.tgs_supported_enctypes
+
+ if kdc_options is None:
+ kdc_options = str(krb5_asn1.KDCOptions('renewable'))
+ # Contrary to Microsoft’s documentation, the returned ticket is
+ # renewable.
+ expected_flags = krb5_asn1.TicketFlags('renewable')
+
+ preauth_key = self.PasswordKey_from_creds(creds,
+ kcrypto.Enctype.AES256)
+
+ expected_realm = realm.upper()
+
+ etypes = kcrypto.Enctype.AES256, kcrypto.Enctype.RC4
+
+ if armor_tgt is not None:
+ authenticator_subkey = self.RandomKey(kcrypto.Enctype.AES256)
+ armor_key = self.generate_armor_key(authenticator_subkey,
+ armor_tgt.session_key)
+ armor_subkey = authenticator_subkey
+
+ client_challenge_key = self.generate_client_challenge_key(
+ armor_key, preauth_key)
+ enc_challenge_padata = self.get_challenge_pa_data(
+ client_challenge_key)
+
+ def generate_fast_padata_fn(kdc_exchange_dict,
+ _callback_dict,
+ req_body):
+ return [enc_challenge_padata], req_body
+
+ generate_fast_fn = self.generate_simple_fast
+ generate_fast_armor_fn = self.generate_ap_req
+ generate_padata_fn = None
+
+ fast_armor_type = FX_FAST_ARMOR_AP_REQUEST
+ else:
+ ts_enc_padata = self.get_enc_timestamp_pa_data_from_key(
+ preauth_key)
+
+ def generate_padata_fn(kdc_exchange_dict,
+ _callback_dict,
+ req_body):
+ return [ts_enc_padata], req_body
+
+ generate_fast_fn = None
+ generate_fast_padata_fn = None
+ generate_fast_armor_fn = None
+
+ armor_key = None
+ armor_subkey = None
+
+ fast_armor_type = None
+
+ if not expected_error:
+ check_error_fn = None
+ check_rep_fn = self.generic_check_kdc_rep
+ else:
+ check_error_fn = self.generic_check_kdc_error
+ check_rep_fn = None
+
+ kdc_exchange_dict = self.as_exchange_dict(
+ creds=creds,
+ expected_error_mode=expected_error,
+ expect_status=expect_status,
+ expected_status=expected_status,
+ expected_crealm=expected_realm,
+ expected_cname=expected_cname,
+ expected_srealm=expected_realm,
+ expected_sname=expected_sname,
+ expected_salt=salt,
+ expected_flags=expected_flags,
+ unexpected_flags=unexpected_flags,
+ expected_supported_etypes=expected_etypes,
+ generate_padata_fn=generate_padata_fn,
+ generate_fast_padata_fn=generate_fast_padata_fn,
+ generate_fast_fn=generate_fast_fn,
+ generate_fast_armor_fn=generate_fast_armor_fn,
+ fast_armor_type=fast_armor_type,
+ check_error_fn=check_error_fn,
+ check_rep_fn=check_rep_fn,
+ check_kdc_private_fn=self.generic_check_kdc_private,
+ armor_key=armor_key,
+ armor_tgt=armor_tgt,
+ armor_subkey=armor_subkey,
+ kdc_options=kdc_options,
+ preauth_key=preauth_key,
+ ticket_decryption_key=ticket_decryption_key,
+ # PA-DATA types are not important for these tests.
+ check_patypes=False)
+
+ rep = self._generic_kdc_exchange(kdc_exchange_dict,
+ cname=cname,
+ realm=realm,
+ sname=sname,
+ till_time=till,
+ renew_time=renew_time,
+ etypes=etypes)
+ if expected_error:
+ self.check_error_rep(rep, expected_error)
+
+ return None
+
+ self.check_as_reply(rep)
+
+ ticket_creds = kdc_exchange_dict['rep_ticket_creds']
+ return ticket_creds
+
+
+class AuthnPolicyTests(AuthnPolicyBaseTests):
+ def setUp(self):
+ super().setUp()
+ self.do_asn1_print = global_asn1_print
+ self.do_hexdump = global_hexdump
+
+ def test_authn_policy_tgt_lifetime_user(self):
+ # Create an authentication policy with certain TGT lifetimes set.
+ user_life = 111
+ computer_life = 222
+ service_life = 333
+ policy = self.create_authn_policy(enforced=True,
+ user_tgt_lifetime=user_life,
+ computer_tgt_lifetime=computer_life,
+ service_tgt_lifetime=service_life)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+
+ # Request a Kerberos ticket with a lifetime of two hours, and assert
+ # that the actual lifetime matches the user lifetime set in the policy.
+ till = self.get_KerberosTime(offset=2 * 60 * 60) # 2 hours
+ tgt = self._get_tgt(client_creds, till=till)
+ self.check_ticket_times(tgt, expected_life=user_life,
+ expected_renew_life=user_life)
+
+ self.check_as_log(client_creds)
+
+ def test_authn_policy_tgt_lifetime_computer(self):
+ user_life = 111
+ computer_life = 222
+ service_life = 333
+ policy = self.create_authn_policy(enforced=True,
+ user_tgt_lifetime=user_life,
+ computer_tgt_lifetime=computer_life,
+ service_tgt_lifetime=service_life)
+
+ # Create a computer account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=policy)
+
+ # Request a Kerberos ticket with a lifetime of two hours, and assert
+ # that the actual lifetime matches the computer lifetime set in the
+ # policy.
+ till = self.get_KerberosTime(offset=2 * 60 * 60) # 2 hours
+ tgt = self._get_tgt(client_creds, till=till)
+ self.check_ticket_times(tgt, expected_life=computer_life,
+ expected_renew_life=computer_life)
+
+ self.check_as_log(client_creds)
+
+ def test_authn_policy_tgt_lifetime_service(self):
+ user_life = 111
+ computer_life = 222
+ service_life = 333
+ policy = self.create_authn_policy(enforced=True,
+ user_tgt_lifetime=user_life,
+ computer_tgt_lifetime=computer_life,
+ service_tgt_lifetime=service_life)
+
+ # Create a managed service account with the assigned policy.
+ client_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ assigned_policy=policy)
+
+ # Request a Kerberos ticket with a lifetime of two hours, and assert
+ # that the actual lifetime matches the service lifetime set in the
+ # policy.
+ till = self.get_KerberosTime(offset=2 * 60 * 60) # 2 hours
+ tgt = self._get_tgt(client_creds, till=till)
+ self.check_ticket_times(tgt, expected_life=service_life,
+ expected_renew_life=service_life)
+
+ self.check_as_log(client_creds)
+
+ def test_authn_silo_tgt_lifetime_user(self):
+ # Create an authentication policy with certain TGT lifetimes set.
+ user_life = 111
+ computer_life = 222
+ service_life = 333
+ policy = self.create_authn_policy(enforced=True,
+ user_tgt_lifetime=user_life,
+ computer_tgt_lifetime=computer_life,
+ service_tgt_lifetime=service_life)
+
+ # Create a second policy with different lifetimes, so we can verify the
+ # correct policy is enforced.
+ wrong_policy = self.create_authn_policy(enforced=True,
+ user_tgt_lifetime=444,
+ computer_tgt_lifetime=555,
+ service_tgt_lifetime=666)
+
+ # Create an authentication silo with our existing policies.
+ silo = self.create_authn_silo(user_policy=policy,
+ computer_policy=wrong_policy,
+ service_policy=wrong_policy,
+ enforced=True)
+
+ # Create a user account assigned to the silo.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_silo=silo)
+ client_dn_str = str(client_creds.get_dn())
+
+ # Add the user to the silo as a member.
+ self.add_to_group(client_dn_str, silo.dn,
+ 'msDS-AuthNPolicySiloMembers',
+ expect_attr=False)
+
+ # Request a Kerberos ticket with a lifetime of two hours, and assert
+ # that the actual lifetime matches the user lifetime set in the
+ # appropriate policy.
+ till = self.get_KerberosTime(offset=2 * 60 * 60) # 2 hours
+ tgt = self._get_tgt(client_creds, till=till)
+ self.check_ticket_times(tgt, expected_life=user_life,
+ expected_renew_life=user_life)
+
+ self.check_as_log(client_creds)
+
+ def test_authn_silo_tgt_lifetime_computer(self):
+ user_life = 111
+ computer_life = 222
+ service_life = 333
+ policy = self.create_authn_policy(enforced=True,
+ user_tgt_lifetime=user_life,
+ computer_tgt_lifetime=computer_life,
+ service_tgt_lifetime=service_life)
+
+ wrong_policy = self.create_authn_policy(enforced=True,
+ user_tgt_lifetime=444,
+ computer_tgt_lifetime=555,
+ service_tgt_lifetime=666)
+
+ # Create an authentication silo with our existing policies.
+ silo = self.create_authn_silo(user_policy=wrong_policy,
+ computer_policy=policy,
+ service_policy=wrong_policy,
+ enforced=True)
+
+ # Create a computer account assigned to the silo.
+ client_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_silo=silo)
+ client_dn_str = str(client_creds.get_dn())
+
+ # Add the computer to the silo as a member.
+ self.add_to_group(client_dn_str, silo.dn,
+ 'msDS-AuthNPolicySiloMembers',
+ expect_attr=False)
+
+ # Request a Kerberos ticket with a lifetime of two hours, and assert
+ # that the actual lifetime matches the computer lifetime set in the
+ # appropriate policy.
+ till = self.get_KerberosTime(offset=2 * 60 * 60) # 2 hours
+ tgt = self._get_tgt(client_creds, till=till)
+ self.check_ticket_times(tgt, expected_life=computer_life,
+ expected_renew_life=computer_life)
+
+ self.check_as_log(client_creds)
+
+ def test_authn_silo_tgt_lifetime_service(self):
+ user_life = 111
+ computer_life = 222
+ service_life = 333
+ policy = self.create_authn_policy(enforced=True,
+ user_tgt_lifetime=user_life,
+ computer_tgt_lifetime=computer_life,
+ service_tgt_lifetime=service_life)
+
+ wrong_policy = self.create_authn_policy(enforced=True,
+ user_tgt_lifetime=444,
+ computer_tgt_lifetime=555,
+ service_tgt_lifetime=666)
+
+ # Create an authentication silo with our existing policies.
+ silo = self.create_authn_silo(user_policy=wrong_policy,
+ computer_policy=wrong_policy,
+ service_policy=policy,
+ enforced=True)
+
+ # Create a managed service account assigned to the silo.
+ client_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ assigned_silo=silo)
+ client_dn_str = str(client_creds.get_dn())
+
+ # Add the managed service account to the silo as a member.
+ self.add_to_group(client_dn_str, silo.dn,
+ 'msDS-AuthNPolicySiloMembers',
+ expect_attr=False)
+
+ # Request a Kerberos ticket with a lifetime of two hours, and assert
+ # that the actual lifetime matches the service lifetime set in the
+ # appropriate policy.
+ till = self.get_KerberosTime(offset=2 * 60 * 60) # 2 hours
+ tgt = self._get_tgt(client_creds, till=till)
+ self.check_ticket_times(tgt, expected_life=service_life,
+ expected_renew_life=service_life)
+
+ self.check_as_log(client_creds)
+
+ # Test that an authentication silo takes priority over a policy assigned
+ # directly.
+ def test_authn_silo_and_policy_tgt_lifetime_user(self):
+ # Create an authentication policy with certain TGT lifetimes set.
+ user_life = 111
+ computer_life = 222
+ service_life = 333
+ policy = self.create_authn_policy(enforced=True,
+ user_tgt_lifetime=user_life,
+ computer_tgt_lifetime=computer_life,
+ service_tgt_lifetime=service_life)
+
+ # Create a second policy with different lifetimes, so we can verify the
+ # correct policy is enforced.
+ wrong_policy = self.create_authn_policy(enforced=True,
+ user_tgt_lifetime=444,
+ computer_tgt_lifetime=555,
+ service_tgt_lifetime=666)
+
+ # Create an authentication silo with our existing policies.
+ silo = self.create_authn_silo(user_policy=policy,
+ computer_policy=wrong_policy,
+ service_policy=wrong_policy,
+ enforced=True)
+
+ # Create a user account assigned to the silo, and also to a policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_silo=silo,
+ assigned_policy=wrong_policy)
+ client_dn_str = str(client_creds.get_dn())
+
+ # Add the user to the silo as a member.
+ self.add_to_group(client_dn_str, silo.dn,
+ 'msDS-AuthNPolicySiloMembers',
+ expect_attr=False)
+
+ # Request a Kerberos ticket with a lifetime of two hours, and assert
+ # that the actual lifetime matches the user lifetime set in the
+ # appropriate policy.
+ till = self.get_KerberosTime(offset=2 * 60 * 60) # 2 hours
+ tgt = self._get_tgt(client_creds, till=till)
+ self.check_ticket_times(tgt, expected_life=user_life,
+ expected_renew_life=user_life)
+
+ self.check_as_log(client_creds)
+
+ def test_authn_silo_and_policy_tgt_lifetime_computer(self):
+ user_life = 111
+ computer_life = 222
+ service_life = 333
+ policy = self.create_authn_policy(enforced=True,
+ user_tgt_lifetime=user_life,
+ computer_tgt_lifetime=computer_life,
+ service_tgt_lifetime=service_life)
+
+ wrong_policy = self.create_authn_policy(enforced=True,
+ user_tgt_lifetime=444,
+ computer_tgt_lifetime=555,
+ service_tgt_lifetime=666)
+
+ # Create an authentication silo with our existing policies.
+ silo = self.create_authn_silo(user_policy=wrong_policy,
+ computer_policy=policy,
+ service_policy=wrong_policy,
+ enforced=True)
+
+ # Create a computer account assigned to the silo, and also to a policy.
+ client_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_silo=silo,
+ assigned_policy=wrong_policy)
+ client_dn_str = str(client_creds.get_dn())
+
+ # Add the computer to the silo as a member.
+ self.add_to_group(client_dn_str, silo.dn,
+ 'msDS-AuthNPolicySiloMembers',
+ expect_attr=False)
+
+ # Request a Kerberos ticket with a lifetime of two hours, and assert
+ # that the actual lifetime matches the computer lifetime set in the
+ # appropriate policy.
+ till = self.get_KerberosTime(offset=2 * 60 * 60) # 2 hours
+ tgt = self._get_tgt(client_creds, till=till)
+ self.check_ticket_times(tgt, expected_life=computer_life,
+ expected_renew_life=computer_life)
+
+ self.check_as_log(client_creds)
+
+ def test_authn_silo_and_policy_tgt_lifetime_service(self):
+ user_life = 111
+ computer_life = 222
+ service_life = 333
+ policy = self.create_authn_policy(enforced=True,
+ user_tgt_lifetime=user_life,
+ computer_tgt_lifetime=computer_life,
+ service_tgt_lifetime=service_life)
+
+ wrong_policy = self.create_authn_policy(enforced=True,
+ user_tgt_lifetime=444,
+ computer_tgt_lifetime=555,
+ service_tgt_lifetime=666)
+
+ # Create an authentication silo with our existing policies.
+ silo = self.create_authn_silo(user_policy=wrong_policy,
+ computer_policy=wrong_policy,
+ service_policy=policy,
+ enforced=True)
+
+ # Create a managed service account assigned to the silo, and also to a
+ # policy.
+ client_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ assigned_silo=silo,
+ assigned_policy=wrong_policy)
+ client_dn_str = str(client_creds.get_dn())
+
+ # Add the managed service account to the silo as a member.
+ self.add_to_group(client_dn_str, silo.dn,
+ 'msDS-AuthNPolicySiloMembers',
+ expect_attr=False)
+
+ # Request a Kerberos ticket with a lifetime of two hours, and assert
+ # that the actual lifetime matches the service lifetime set in the
+ # appropriate policy.
+ till = self.get_KerberosTime(offset=2 * 60 * 60) # 2 hours
+ tgt = self._get_tgt(client_creds, till=till)
+ self.check_ticket_times(tgt, expected_life=service_life,
+ expected_renew_life=service_life)
+
+ self.check_as_log(client_creds)
+
+ def test_authn_policy_tgt_lifetime_max(self):
+ # Create an authentication policy with the maximum allowable TGT
+ # lifetime set.
+ INT64_MAX = 0x7fff_ffff_ffff_ffff
+ max_lifetime = INT64_MAX // 10_000_000
+ policy = self.create_authn_policy(enforced=True,
+ user_tgt_lifetime=max_lifetime)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+
+ # Request a Kerberos ticket with a ‘till’ time far in the
+ # future, and assert that the actual lifetime is the maximum
+ # allowed by the Default Domain policy.
+ till = '99991231235959Z'
+ expected_lifetime = self.get_max_ticket_life()
+ tgt = self._get_tgt(client_creds, till=till)
+ self.check_ticket_times(tgt, expected_life=expected_lifetime,
+ expected_renew_life=expected_lifetime)
+
+ self.check_as_log(client_creds)
+
+ def test_authn_policy_tgt_lifetime_min(self):
+ # Create an authentication policy with the minimum allowable TGT
+ # lifetime set.
+ INT64_MIN = -0x8000_0000_0000_0000
+ min_lifetime = round(INT64_MIN / 10_000_000)
+ policy = self.create_authn_policy(enforced=True,
+ user_tgt_lifetime=min_lifetime)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+
+ # Request a Kerberos ticket with a lifetime of two hours. The request
+ # should fail with a NEVER_VALID error.
+ till = self.get_KerberosTime(offset=2 * 60 * 60) # 2 hours
+ self._get_tgt(client_creds, till=till,
+ expected_error=KDC_ERR_NEVER_VALID,
+ expect_status=True,
+ expected_status=ntstatus.NT_STATUS_TIME_DIFFERENCE_AT_DC)
+
+ self.check_as_log(
+ client_creds,
+ status=ntstatus.NT_STATUS_TIME_DIFFERENCE_AT_DC)
+
+ def test_authn_policy_tgt_lifetime_zero(self):
+ # Create an authentication policy with the TGT lifetime set to zero.
+ lifetime = 0
+ policy = self.create_authn_policy(enforced=True,
+ user_tgt_lifetime=lifetime)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+
+ # Request a Kerberos ticket with a ‘till’ time far in the
+ # future. Assert that the actual lifetime is the maximum
+ # allowed by the Default Domain Policy
+ till = '99991231235959Z'
+ expected_lifetime = self.get_max_ticket_life()
+ expected_renew_life = self.get_max_renew_life()
+ tgt = self._get_tgt(client_creds, till=till)
+ self.check_ticket_times(tgt, expected_life=expected_lifetime,
+ expected_renew_life=expected_renew_life)
+
+ self.check_as_log(client_creds)
+
+ def test_authn_policy_tgt_lifetime_one_second(self):
+ # Create an authentication policy with the TGT lifetime set to one
+ # second.
+ lifetime = 1
+ policy = self.create_authn_policy(enforced=True,
+ user_tgt_lifetime=lifetime)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+
+ # Request a Kerberos ticket with a lifetime of two hours, and assert
+ # that the actual lifetime matches the user lifetime set in the
+ # appropriate policy.
+ till = self.get_KerberosTime(offset=2 * 60 * 60) # 2 hours
+ tgt = self._get_tgt(client_creds, till=till)
+ self.check_ticket_times(tgt, expected_life=lifetime,
+ expected_renew_life=lifetime)
+
+ self.check_as_log(client_creds)
+
+ def test_authn_policy_tgt_lifetime_kpasswd_lifetime(self):
+ # Create an authentication policy with the TGT lifetime set to two
+ # minutes (the lifetime of a kpasswd ticket).
+ lifetime = 2 * 60
+ policy = self.create_authn_policy(enforced=True,
+ user_tgt_lifetime=lifetime)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+
+ # Request a Kerberos ticket with a lifetime of two hours, and assert
+ # that the actual lifetime matches the user lifetime set in the
+ # appropriate policy.
+ till = self.get_KerberosTime(offset=2 * 60 * 60) # 2 hours
+ tgt = self._get_tgt(client_creds, till=till)
+ self.check_ticket_times(tgt, expected_life=lifetime,
+ expected_renew_life=lifetime)
+
+ self.check_as_log(client_creds)
+
+ def test_authn_policy_tgt_lifetime_short_protected(self):
+ # Create an authentication policy with a short TGT lifetime set.
+ lifetime = 111
+ policy = self.create_authn_policy(enforced=True,
+ user_tgt_lifetime=lifetime)
+
+ # Create a user account with the assigned policy, belonging to the
+ # Protected Users group.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ protected=True,
+ assigned_policy=policy)
+
+ # Request a Kerberos ticket with a lifetime of two hours, and assert
+ # that the actual lifetime matches the user lifetime set in the policy.
+ till = self.get_KerberosTime(offset=2 * 60 * 60) # 2 hours
+ tgt = self._get_tgt(client_creds, till=till)
+ self.check_ticket_times(tgt, expected_life=lifetime,
+ expected_renew_life=lifetime)
+
+ self.check_as_log(client_creds)
+
+ def test_authn_policy_tgt_lifetime_long_protected(self):
+ # Create an authentication policy with a long TGT lifetime set. This
+ # exceeds the lifetime of four hours enforced by Protected Users.
+ lifetime = 6 * 60 * 60 # 6 hours
+ policy = self.create_authn_policy(enforced=True,
+ user_tgt_lifetime=lifetime)
+
+ # Create a user account with the assigned policy, belonging to the
+ # Protected Users group.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ protected=True,
+ assigned_policy=policy)
+
+ # Request a Kerberos ticket with a lifetime of eight hours, and assert
+ # that the actual lifetime matches the user lifetime set in the policy,
+ # taking precedence over the lifetime enforced by Protected Users.
+ till = self.get_KerberosTime(offset=8 * 60 * 60) # 8 hours
+ tgt = self._get_tgt(client_creds, till=till)
+ self.check_ticket_times(tgt, expected_life=lifetime,
+ expected_renew_life=lifetime)
+
+ self.check_as_log(client_creds)
+
+ # This variant of the test is adapted to the behaviour of Windows and MIT
+ # Kerberos. It asserts that tickets issued to Protected Users are neither
+ # forwardable nor proxiable.
+ def test_authn_policy_protected_flags_without_policy_error(self):
+ # Create an authentication policy with a TGT lifetime set.
+ lifetime = 6 * 60 * 60 # 6 hours
+ policy = self.create_authn_policy(enforced=True,
+ user_tgt_lifetime=lifetime)
+
+ # Create a user account with the assigned policy, belonging to the
+ # Protected Users group.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ protected=True,
+ assigned_policy=policy)
+
+ # Request a Kerberos ticket with a lifetime of eight hours, and request
+ # that it be renewable, forwardable and proxiable. Show that the
+ # returned ticket for the protected user is only renewable.
+ till = self.get_KerberosTime(offset=8 * 60 * 60) # 8 hours
+ tgt = self._get_tgt(
+ client_creds,
+ till=till,
+ kdc_options=str(krb5_asn1.KDCOptions(
+ 'renewable,forwardable,proxiable')),
+ expected_flags=krb5_asn1.TicketFlags('renewable'),
+ unexpected_flags=krb5_asn1.TicketFlags('forwardable,proxiable'))
+ self.check_ticket_times(tgt, expected_life=lifetime,
+ expected_renew_life=lifetime)
+
+ self.check_as_log(client_creds)
+
+ # This variant of the test is adapted to the behaviour of Heimdal
+ # Kerberos. It asserts that we get a policy error when requesting a
+ # proxiable ticket.
+ def test_authn_policy_protected_flags_with_policy_error(self):
+ # Create an authentication policy with a TGT lifetime set.
+ lifetime = 6 * 60 * 60 # 6 hours
+ policy = self.create_authn_policy(enforced=True,
+ user_tgt_lifetime=lifetime)
+
+ # Create a user account with the assigned policy, belonging to the
+ # Protected Users group.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ protected=True,
+ assigned_policy=policy)
+
+ # Request a Kerberos ticket with a lifetime of eight hours, and request
+ # that it be renewable and forwardable. Show that the returned ticket
+ # for the protected user is only renewable.
+ till = self.get_KerberosTime(offset=8 * 60 * 60) # 8 hours
+ tgt = self._get_tgt(
+ client_creds,
+ till=till,
+ kdc_options=str(krb5_asn1.KDCOptions('renewable,forwardable')),
+ expected_flags=krb5_asn1.TicketFlags('renewable'),
+ unexpected_flags=krb5_asn1.TicketFlags('forwardable'))
+ self.check_ticket_times(tgt, expected_life=lifetime,
+ expected_renew_life=lifetime)
+
+ self.check_as_log(client_creds)
+
+ # Request that the Kerberos ticket be proxiable. Show that we get a
+ # policy error.
+ self._get_tgt(client_creds,
+ till=till,
+ kdc_options=str(krb5_asn1.KDCOptions('proxiable')),
+ expected_error=KDC_ERR_POLICY)
+
+ self.check_as_log(client_creds,
+ status=ntstatus.NT_STATUS_INVALID_WORKSTATION)
+
+ def test_authn_policy_tgt_lifetime_zero_protected(self):
+ # Create an authentication policy with the TGT lifetime set to zero.
+ policy = self.create_authn_policy(enforced=True,
+ user_tgt_lifetime=0)
+
+ # Create a user account with the assigned policy, belonging to the
+ # Protected Users group.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ protected=True,
+ assigned_policy=policy)
+
+ # Request a Kerberos ticket with a lifetime of six hours, and assert
+ # that the actual lifetime is the four hours enforced by Protected
+ # Users.
+ till = self.get_KerberosTime(offset=6 * 60 * 60) # 6 hours
+ tgt = self._get_tgt(client_creds, till=till)
+ self.check_ticket_times(tgt, expected_life=4 * 60 * 60,
+ expected_renew_life=4 * 60 * 60)
+
+ self.check_as_log(client_creds)
+
+ def test_authn_policy_tgt_lifetime_none_protected(self):
+ # Create an authentication policy with no TGT lifetime set.
+ policy = self.create_authn_policy(enforced=True)
+
+ # Create a user account with the assigned policy, belonging to the
+ # Protected Users group.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ protected=True,
+ assigned_policy=policy)
+
+ # Request a Kerberos ticket with a lifetime of six hours, and assert
+ # that the actual lifetime is the four hours enforced by Protected
+ # Users.
+ till = self.get_KerberosTime(offset=6 * 60 * 60) # 6 hours
+ tgt = self._get_tgt(client_creds, till=till)
+ self.check_ticket_times(tgt, expected_life=4 * 60 * 60,
+ expected_renew_life=4 * 60 * 60)
+
+ self.check_as_log(client_creds)
+
+ def test_authn_policy_tgt_lifetime_unenforced_protected(self):
+ # Create an unenforced authentication policy with a TGT lifetime set.
+ lifetime = 123
+ policy = self.create_authn_policy(enforced=False,
+ user_tgt_lifetime=lifetime)
+
+ # Create a user account with the assigned policy, belonging to the
+ # Protected Users group.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ protected=True,
+ assigned_policy=policy)
+
+ # Request a Kerberos ticket with a lifetime of six hours, and assert
+ # that the actual lifetime is the four hours enforced by Protected
+ # Users.
+ till = self.get_KerberosTime(offset=6 * 60 * 60) # 6 hours
+ tgt = self._get_tgt(client_creds, till=till)
+ self.check_ticket_times(tgt, expected_life=4 * 60 * 60,
+ expected_renew_life=4 * 60 * 60)
+
+ self.check_as_log(client_creds)
+
+ def test_authn_policy_not_enforced(self):
+ # Create an authentication policy with the TGT lifetime set. The policy
+ # is not enforced.
+ lifetime = 123
+ policy = self.create_authn_policy(user_tgt_lifetime=lifetime)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+
+ # Request a Kerberos ticket with a ‘till’ time far in the
+ # future. Assert that the actual lifetime is the maximum allowed by
+ # the Default Domain Policy.
+ till = '99991231235959Z'
+ expected_lifetime = self.get_max_ticket_life()
+ expected_renew_life = self.get_max_renew_life()
+ tgt = self._get_tgt(client_creds, till=till)
+ self.check_ticket_times(tgt, expected_life=expected_lifetime,
+ expected_renew_life=expected_renew_life)
+
+ self.check_as_log(client_creds)
+
+ def test_authn_policy_unenforced(self):
+ # Create an authentication policy with the TGT lifetime set. The policy
+ # is set to be unenforced.
+ lifetime = 123
+ policy = self.create_authn_policy(enforced=False,
+ user_tgt_lifetime=lifetime)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+
+ # Request a Kerberos ticket with a ‘till’ time far in the
+ # future. Assert that the actual lifetime is the maximum allowed by
+ # the Default Domain Policy.
+ till = '99991231235959Z'
+ expected_lifetime = self.get_max_ticket_life()
+ expected_renew_life = self.get_max_renew_life()
+ tgt = self._get_tgt(client_creds, till=till)
+ self.check_ticket_times(tgt, expected_life=expected_lifetime,
+ expected_renew_life=expected_renew_life)
+
+ self.check_as_log(client_creds)
+
+ def test_authn_silo_not_enforced(self):
+ # Create an authentication policy with the TGT lifetime set.
+ lifetime = 123
+ policy = self.create_authn_policy(enforced=True,
+ user_tgt_lifetime=lifetime)
+
+ # Create an authentication silo with our existing policy. The silo is
+ # not enforced.
+ silo = self.create_authn_silo(user_policy=policy)
+
+ # Create a user account assigned to the silo.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_silo=silo)
+ client_dn_str = str(client_creds.get_dn())
+
+ # Add the user to the silo as a member.
+ self.add_to_group(client_dn_str, silo.dn,
+ 'msDS-AuthNPolicySiloMembers',
+ expect_attr=False)
+
+ # Request a Kerberos ticket with a ‘till’ time far in the
+ # future. Assert that the actual lifetime is the maximum allowed by
+ # the Default Domain Policy.
+ till = '99991231235959Z'
+ expected_lifetime = self.get_max_ticket_life()
+ expected_renew_life = self.get_max_renew_life()
+ tgt = self._get_tgt(client_creds, till=till)
+ self.check_ticket_times(tgt, expected_life=expected_lifetime,
+ expected_renew_life=expected_renew_life)
+
+ self.check_as_log(client_creds)
+
+ def test_authn_silo_unenforced(self):
+ # Create an authentication policy with the TGT lifetime set.
+ lifetime = 123
+ policy = self.create_authn_policy(enforced=True,
+ user_tgt_lifetime=lifetime)
+
+ # Create an authentication silo with our existing policy. The silo is
+ # set to be unenforced.
+ silo = self.create_authn_silo(user_policy=policy,
+ enforced=False)
+
+ # Create a user account assigned to the silo.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_silo=silo)
+ client_dn_str = str(client_creds.get_dn())
+
+ # Add the user to the silo as a member.
+ self.add_to_group(client_dn_str, silo.dn,
+ 'msDS-AuthNPolicySiloMembers',
+ expect_attr=False)
+
+ # Request a Kerberos ticket with a ‘till’ time far in the
+ # future. Assert that the actual lifetime is the maximum allowed by
+ # the Default Domain Policy.
+ till = '99991231235959Z'
+ expected_lifetime = self.get_max_ticket_life()
+ expected_renew_life = self.get_max_renew_life()
+ tgt = self._get_tgt(client_creds, till=till)
+ self.check_ticket_times(tgt, expected_life=expected_lifetime,
+ expected_renew_life=expected_renew_life)
+
+ self.check_as_log(client_creds)
+
+ def test_authn_silo_not_enforced_policy(self):
+ # Create an authentication policy with the TGT lifetime set. The policy
+ # is not enforced.
+ lifetime = 123
+ policy = self.create_authn_policy(user_tgt_lifetime=lifetime)
+
+ # Create an authentication silo with our existing policy.
+ silo = self.create_authn_silo(user_policy=policy,
+ enforced=True)
+
+ # Create a user account assigned to the silo.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_silo=silo)
+ client_dn_str = str(client_creds.get_dn())
+
+ # Add the user to the silo as a member.
+ self.add_to_group(client_dn_str, silo.dn,
+ 'msDS-AuthNPolicySiloMembers',
+ expect_attr=False)
+
+ # Request a Kerberos ticket with a lifetime of two hours. Despite the
+ # fact that the policy is unenforced, the actual lifetime matches the
+ # user lifetime set in the appropriate policy.
+ till = self.get_KerberosTime(offset=2 * 60 * 60) # 2 hours
+ tgt = self._get_tgt(client_creds, till=till)
+ self.check_ticket_times(tgt, expected_life=lifetime,
+ expected_renew_life=lifetime)
+
+ self.check_as_log(client_creds)
+
+ def test_authn_silo_unenforced_policy(self):
+ # Create an authentication policy with the TGT lifetime set. The policy
+ # is set to be unenforced.
+ lifetime = 123
+ policy = self.create_authn_policy(enforced=False,
+ user_tgt_lifetime=lifetime)
+
+ # Create an authentication silo with our existing policy.
+ silo = self.create_authn_silo(user_policy=policy,
+ enforced=True)
+
+ # Create a user account assigned to the silo.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_silo=silo)
+ client_dn_str = str(client_creds.get_dn())
+
+ # Add the user to the silo as a member.
+ self.add_to_group(client_dn_str, silo.dn,
+ 'msDS-AuthNPolicySiloMembers',
+ expect_attr=False)
+
+ # Request a Kerberos ticket with a lifetime of two hours. Despite the
+ # fact that the policy is unenforced, the actual lifetime matches the
+ # user lifetime set in the appropriate policy.
+ till = self.get_KerberosTime(offset=2 * 60 * 60) # 2 hours
+ tgt = self._get_tgt(client_creds, till=till)
+ self.check_ticket_times(tgt, expected_life=lifetime,
+ expected_renew_life=lifetime)
+
+ self.check_as_log(client_creds)
+
+ def test_authn_silo_not_enforced_and_assigned_policy(self):
+ # Create an authentication policy with the TGT lifetime set.
+ silo_lifetime = 123
+ silo_policy = self.create_authn_policy(enforced=True,
+ user_tgt_lifetime=silo_lifetime)
+
+ # Create an authentication silo with our existing policy. The silo is
+ # not enforced.
+ silo = self.create_authn_silo(user_policy=silo_policy)
+
+ # Create a second policy with a different lifetime, so we can verify
+ # the correct policy is enforced.
+ lifetime = 456
+ policy = self.create_authn_policy(enforced=True,
+ user_tgt_lifetime=lifetime)
+
+ # Create a user account assigned to the silo, and also to the policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_silo=silo,
+ assigned_policy=policy)
+ client_dn_str = str(client_creds.get_dn())
+
+ # Add the user to the silo as a member.
+ self.add_to_group(client_dn_str, silo.dn,
+ 'msDS-AuthNPolicySiloMembers',
+ expect_attr=False)
+
+ # Request a Kerberos ticket with a ‘till’ time far in the
+ # future. Assert that the actual lifetime is the maximum
+ # allowed by the Default Domain Policy. The directly-assigned
+ # policy is not enforced.
+ till = '99991231235959Z'
+ expected_lifetime = self.get_max_ticket_life()
+ expected_renew_life = self.get_max_renew_life()
+ tgt = self._get_tgt(client_creds, till=till)
+ self.check_ticket_times(tgt, expected_life=expected_lifetime,
+ expected_renew_life=expected_renew_life)
+
+ self.check_as_log(client_creds)
+
+ def test_authn_silo_unenforced_and_assigned_policy(self):
+ # Create an authentication policy with the TGT lifetime set.
+ silo_lifetime = 123
+ silo_policy = self.create_authn_policy(enforced=True,
+ user_tgt_lifetime=silo_lifetime)
+
+ # Create an authentication silo with our existing policy. The silo is
+ # set to be unenforced.
+ silo = self.create_authn_silo(user_policy=silo_policy,
+ enforced=False)
+
+ # Create a second policy with a different lifetime, so we can verify
+ # the correct policy is enforced.
+ lifetime = 456
+ policy = self.create_authn_policy(enforced=True,
+ user_tgt_lifetime=lifetime)
+
+ # Create a user account assigned to the silo, and also to the policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_silo=silo,
+ assigned_policy=policy)
+ client_dn_str = str(client_creds.get_dn())
+
+ # Add the user to the silo as a member.
+ self.add_to_group(client_dn_str, silo.dn,
+ 'msDS-AuthNPolicySiloMembers',
+ expect_attr=False)
+
+ # Request a Kerberos ticket with a ‘till’ time far in the
+ # future. Assert that the actual lifetime is the maximum
+ # allowed by the Default Domain Policy. The directly-assigned
+ # policy is not enforced.
+ till = '99991231235959Z'
+ expected_lifetime = self.get_max_ticket_life()
+ expected_renew_life = self.get_max_renew_life()
+ tgt = self._get_tgt(client_creds, till=till)
+ self.check_ticket_times(tgt, expected_life=expected_lifetime,
+ expected_renew_life=expected_renew_life)
+
+ self.check_as_log(client_creds)
+
+ def test_authn_silo_not_enforced_policy_and_assigned_policy(self):
+ # Create an authentication policy with the TGT lifetime set. The policy
+ # is not enforced.
+ silo_lifetime = 123
+ silo_policy = self.create_authn_policy(user_tgt_lifetime=silo_lifetime)
+
+ # Create an authentication silo with our existing policy.
+ silo = self.create_authn_silo(user_policy=silo_policy,
+ enforced=True)
+
+ # Create a second policy with a different lifetime, so we can verify
+ # the correct policy is enforced.
+ lifetime = 456
+ policy = self.create_authn_policy(enforced=True,
+ user_tgt_lifetime=lifetime)
+
+ # Create a user account assigned to the silo, and also to the policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_silo=silo,
+ assigned_policy=policy)
+ client_dn_str = str(client_creds.get_dn())
+
+ # Add the user to the silo as a member.
+ self.add_to_group(client_dn_str, silo.dn,
+ 'msDS-AuthNPolicySiloMembers',
+ expect_attr=False)
+
+ # Request a Kerberos ticket with a lifetime of two hours. Despite the
+ # fact that the policy is unenforced, the actual lifetime matches the
+ # user lifetime set in the appropriate policy. The directly-assigned
+ # policy is not enforced.
+ till = self.get_KerberosTime(offset=2 * 60 * 60) # 2 hours
+ tgt = self._get_tgt(client_creds, till=till)
+ self.check_ticket_times(tgt, expected_life=silo_lifetime,
+ expected_renew_life=silo_lifetime)
+
+ self.check_as_log(client_creds)
+
+ def test_authn_silo_unenforced_policy_and_assigned_policy(self):
+ # Create an authentication policy with the TGT lifetime set. The policy
+ # is set to be unenforced.
+ silo_lifetime = 123
+ silo_policy = self.create_authn_policy(enforced=False,
+ user_tgt_lifetime=silo_lifetime)
+
+ # Create an authentication silo with our existing policy.
+ silo = self.create_authn_silo(user_policy=silo_policy,
+ enforced=True)
+
+ # Create a second policy with a different lifetime, so we can verify
+ # the correct policy is enforced.
+ lifetime = 456
+ policy = self.create_authn_policy(enforced=True,
+ user_tgt_lifetime=lifetime)
+
+ # Create a user account assigned to the silo, and also to the policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_silo=silo,
+ assigned_policy=policy)
+ client_dn_str = str(client_creds.get_dn())
+
+ # Add the user to the silo as a member.
+ self.add_to_group(client_dn_str, silo.dn,
+ 'msDS-AuthNPolicySiloMembers',
+ expect_attr=False)
+
+ # Request a Kerberos ticket with a lifetime of two hours. Despite the
+ # fact that the policy is unenforced, the actual lifetime matches the
+ # user lifetime set in the appropriate policy. The directly-assigned
+ # policy is not enforced.
+ till = self.get_KerberosTime(offset=2 * 60 * 60) # 2 hours
+ tgt = self._get_tgt(client_creds, till=till)
+ self.check_ticket_times(tgt, expected_life=silo_lifetime,
+ expected_renew_life=silo_lifetime)
+
+ self.check_as_log(client_creds)
+
+ def test_authn_silo_not_a_member(self):
+ # Create an authentication policy with the TGT lifetime set.
+ lifetime = 123
+ policy = self.create_authn_policy(enforced=True,
+ user_tgt_lifetime=lifetime)
+
+ # Create an authentication silo with our existing policy.
+ silo = self.create_authn_silo(user_policy=policy,
+ enforced=True)
+
+ # Create a user account assigned to the silo.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_silo=silo)
+
+ # Do not add the user to the silo as a member.
+
+ # Request a Kerberos ticket with a ‘till’ time far in the
+ # future. Assert that the actual lifetime is the maximum allowed by
+ # the Default Domain Policy.
+ till = '99991231235959Z'
+ expected_lifetime = self.get_max_ticket_life()
+ expected_renew_life = self.get_max_renew_life()
+ tgt = self._get_tgt(client_creds, till=till)
+ self.check_ticket_times(tgt, expected_life=expected_lifetime,
+ expected_renew_life=expected_renew_life)
+
+ self.check_as_log(client_creds)
+
+ def test_authn_silo_not_a_member_and_assigned_policy(self):
+ # Create an authentication policy with the TGT lifetime set.
+ silo_lifetime = 123
+ silo_policy = self.create_authn_policy(enforced=True,
+ user_tgt_lifetime=silo_lifetime)
+
+ # Create an authentication silo with our existing policy.
+ silo = self.create_authn_silo(user_policy=silo_policy,
+ enforced=True)
+
+ # Create a second policy with a different lifetime, so we can verify
+ # the correct policy is enforced.
+ lifetime = 456
+ policy = self.create_authn_policy(enforced=True,
+ user_tgt_lifetime=lifetime)
+
+ # Create a user account assigned to the silo, and also to the policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_silo=silo,
+ assigned_policy=policy)
+
+ # Do not add the user to the silo as a member.
+
+ # Request a Kerberos ticket with a lifetime of two hours, and assert
+ # that the actual lifetime matches the user lifetime set in the
+ # directly-assigned policy.
+ till = self.get_KerberosTime(offset=2 * 60 * 60) # 2 hours
+ tgt = self._get_tgt(client_creds, till=till)
+ self.check_ticket_times(tgt, expected_life=lifetime,
+ expected_renew_life=lifetime)
+
+ self.check_as_log(client_creds)
+
+ def test_authn_silo_not_assigned(self):
+ # Create an authentication policy with the TGT lifetime set.
+ lifetime = 123
+ policy = self.create_authn_policy(enforced=True,
+ user_tgt_lifetime=lifetime)
+
+ # Create an authentication silo with our existing policies.
+ silo = self.create_authn_silo(user_policy=policy,
+ enforced=True)
+
+ # Create a user account, but don’t assign it to the silo.
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER)
+ client_dn_str = str(client_creds.get_dn())
+
+ # Add the user to the silo as a member.
+ self.add_to_group(client_dn_str, silo.dn,
+ 'msDS-AuthNPolicySiloMembers',
+ expect_attr=False)
+
+ # Request a Kerberos ticket with a ‘till’ time far in the
+ # future. Assert that the actual lifetime is the maximum allowed by
+ # the Default Domain Policy.
+ till = '99991231235959Z'
+ expected_lifetime = self.get_max_ticket_life()
+ expected_renew_life = self.get_max_renew_life()
+ tgt = self._get_tgt(client_creds, till=till)
+ self.check_ticket_times(tgt, expected_life=expected_lifetime,
+ expected_renew_life=expected_renew_life)
+
+ self.check_as_log(client_creds)
+
+ def test_authn_silo_not_assigned_and_assigned_policy(self):
+ # Create an authentication policy with the TGT lifetime set.
+ lifetime = 123
+ policy = self.create_authn_policy(enforced=True,
+ user_tgt_lifetime=lifetime)
+
+ # Create an authentication silo with our existing policies.
+ silo = self.create_authn_silo(user_policy=policy,
+ enforced=True)
+
+ # Create a second policy with a different lifetime, so we can verify
+ # the correct policy is enforced.
+ lifetime = 456
+ policy = self.create_authn_policy(enforced=True,
+ user_tgt_lifetime=lifetime)
+
+ # Create a user account assigned to the policy, but not to the silo.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+ client_dn_str = str(client_creds.get_dn())
+
+ # Add the user to the silo as a member.
+ self.add_to_group(client_dn_str, silo.dn,
+ 'msDS-AuthNPolicySiloMembers',
+ expect_attr=False)
+
+ # Request a Kerberos ticket with a lifetime of two hours, and assert
+ # that the actual lifetime matches the user lifetime set in the
+ # directly-assigned policy.
+ till = self.get_KerberosTime(offset=2 * 60 * 60) # 2 hours
+ tgt = self._get_tgt(client_creds, till=till)
+ self.check_ticket_times(tgt, expected_life=lifetime,
+ expected_renew_life=lifetime)
+
+ self.check_as_log(client_creds)
+
+ def test_authn_silo_no_applicable_policy(self):
+ # Create an authentication policy with the TGT lifetime set.
+ user_life = 111
+ policy = self.create_authn_policy(enforced=True,
+ user_tgt_lifetime=user_life)
+
+ # Create an authentication silo containing no policies.
+ silo = self.create_authn_silo(enforced=True)
+
+ # Create a user account assigned to the silo, and also to a policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_silo=silo,
+ assigned_policy=policy)
+ client_dn_str = str(client_creds.get_dn())
+
+ # Add the user to the silo as a member.
+ self.add_to_group(client_dn_str, silo.dn,
+ 'msDS-AuthNPolicySiloMembers',
+ expect_attr=False)
+
+ # Request a Kerberos ticket with a ‘till’ time far in the
+ # future, and assert that the actual lifetime is the maximum
+ # allowed by the Default Domain Policy.
+ till = '99991231235959Z'
+ expected_lifetime = self.get_max_ticket_life()
+ expected_renew_life = self.get_max_renew_life()
+ tgt = self._get_tgt(client_creds, till=till)
+ self.check_ticket_times(tgt, expected_life=expected_lifetime,
+ expected_renew_life=expected_renew_life)
+
+ self.check_as_log(client_creds)
+
+ def test_authn_silo_no_tgt_lifetime(self):
+ # Create an authentication policy with no TGT lifetime set.
+ silo_policy = self.create_authn_policy(enforced=True)
+
+ # Create a second policy with a lifetime set, so we can verify the
+ # correct policy is enforced.
+ policy = self.create_authn_policy(enforced=True,
+ user_tgt_lifetime=456)
+
+ # Create an authentication silo with our existing policy.
+ silo = self.create_authn_silo(user_policy=silo_policy,
+ enforced=True)
+
+ # Create a user account assigned to the silo, and also to a policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_silo=silo,
+ assigned_policy=policy)
+ client_dn_str = str(client_creds.get_dn())
+
+ # Add the user to the silo as a member.
+ self.add_to_group(client_dn_str, silo.dn,
+ 'msDS-AuthNPolicySiloMembers',
+ expect_attr=False)
+
+ # Request a Kerberos ticket with a ‘till’ time far in the
+ # future, and assert that the actual lifetime is the maximum
+ # allowed by the Default Domain Policy.
+ till = '99991231235959Z'
+ expected_lifetime = self.get_max_ticket_life()
+ expected_renew_life = self.get_max_renew_life()
+ tgt = self._get_tgt(client_creds, till=till)
+ self.check_ticket_times(tgt, expected_life=expected_lifetime,
+ expected_renew_life=expected_renew_life)
+
+ self.check_as_log(client_creds)
+
+ def test_not_a_policy(self):
+ samdb = self.get_samdb()
+
+ not_a_policy = AuthenticationPolicy()
+ not_a_policy.dn = samdb.get_default_basedn()
+
+ # Create a user account with the assigned policy set to something that
+ # isn’t a policy.
+ client_creds = self._get_creds(
+ account_type=self.AccountType.USER,
+ assigned_policy=not_a_policy)
+
+ # Request a Kerberos ticket with a ‘till’ time far in the
+ # future, and assert that the actual lifetime is the maximum
+ # allowed by the Default Domain Policy.
+ till = '99991231235959Z'
+ expected_lifetime = self.get_max_ticket_life()
+ expected_renew_life = self.get_max_renew_life()
+ tgt = self._get_tgt(client_creds, till=till)
+ self.check_ticket_times(tgt, expected_life=expected_lifetime,
+ expected_renew_life=expected_renew_life)
+
+ self.check_as_log(client_creds)
+
+ def test_not_a_silo(self):
+ samdb = self.get_samdb()
+
+ not_a_silo = AuthenticationSilo()
+ not_a_silo.dn = samdb.get_default_basedn()
+
+ # Create a user account assigned to a silo that isn’t a silo.
+ client_creds = self._get_creds(
+ account_type=self.AccountType.USER,
+ assigned_silo=not_a_silo)
+
+ # Request a Kerberos ticket with a ‘till’ time far in the
+ # future, and assert that the actual lifetime is the maximum
+ # allowed by the Default Domain Policy.
+ till = '99991231235959Z'
+ expected_lifetime = self.get_max_ticket_life()
+ expected_renew_life = self.get_max_renew_life()
+ tgt = self._get_tgt(client_creds, till=till)
+ self.check_ticket_times(tgt, expected_life=expected_lifetime,
+ expected_renew_life=expected_renew_life)
+
+ self.check_as_log(client_creds)
+
+ def test_not_a_silo_and_policy(self):
+ samdb = self.get_samdb()
+
+ not_a_silo = AuthenticationSilo()
+ not_a_silo.dn = samdb.get_default_basedn()
+
+ # Create an authentication policy with the TGT lifetime set.
+ user_life = 123
+ policy = self.create_authn_policy(enforced=True,
+ user_tgt_lifetime=user_life)
+
+ # Create a user account assigned to a silo that isn’t a silo, and also
+ # to a policy.
+ client_creds = self._get_creds(
+ account_type=self.AccountType.USER,
+ assigned_silo=not_a_silo,
+ assigned_policy=policy)
+
+ # Request a Kerberos ticket with a lifetime of two hours, and assert
+ # that the actual lifetime matches the user lifetime set in the
+ # directly-assigned policy.
+ till = self.get_KerberosTime(offset=2 * 60 * 60) # 2 hours
+ tgt = self._get_tgt(client_creds, till=till)
+ self.check_ticket_times(tgt, expected_life=user_life,
+ expected_renew_life=user_life)
+
+ self.check_as_log(client_creds)
+
+ def test_authn_policy_allowed_from_empty(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create an authentication policy with no DACL in the security
+ # descriptor.
+ allowed_from = 'O:SY'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_from=allowed_from)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+
+ # Show that we can authenticate using an armor ticket.
+ self._get_tgt(client_creds, armor_tgt=mach_tgt)
+
+ self.check_as_log(client_creds,
+ armor_creds=mach_creds,
+ client_policy=policy)
+
+ def test_authn_policy_allowed_from_user_allow(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create an authentication policy that explicitly allows the machine
+ # account for a user. Include some different TGT lifetimes for testing
+ # what gets logged.
+ allowed = f'O:SYD:(A;;CR;;;{mach_creds.get_sid()})'
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_from=allowed,
+ user_tgt_lifetime=120,
+ computer_tgt_lifetime=240,
+ service_allowed_from=denied,
+ service_tgt_lifetime=360)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+
+ # Show that we can authenticate using an armor ticket.
+ self._get_tgt(client_creds, armor_tgt=mach_tgt)
+
+ self.check_as_log(client_creds,
+ armor_creds=mach_creds,
+ client_policy=policy)
+
+ def test_authn_policy_allowed_from_user_deny(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create an authentication policy that explicitly denies the machine
+ # account for a user. Include some different TGT lifetimes for testing
+ # what gets logged.
+ allowed = 'O:SYD:(A;;CR;;;WD)'
+ denied = f'O:SYD:(D;;CR;;;{mach_creds.get_sid()})'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_from=denied,
+ user_tgt_lifetime=120,
+ computer_tgt_lifetime=240,
+ service_allowed_from=allowed,
+ service_tgt_lifetime=360)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+
+ # Show that we get a policy error when trying to authenticate.
+ self._get_tgt(client_creds, armor_tgt=mach_tgt,
+ expected_error=KDC_ERR_POLICY)
+
+ self.check_as_log(
+ client_creds,
+ armor_creds=mach_creds,
+ client_policy=policy,
+ client_policy_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_DEVICE_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED,
+ status=ntstatus.NT_STATUS_INVALID_WORKSTATION)
+
+ def test_authn_policy_bad_pwd_allowed_from_user_deny(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create an authentication policy that explicitly denies the machine
+ # account for a user.
+ allowed = 'O:SYD:(A;;CR;;;WD)'
+ denied = f'O:SYD:(D;;CR;;;{mach_creds.get_sid()})'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_from=denied,
+ service_allowed_from=allowed)
+
+ # Create a user account with the assigned policy. Use a non-cached
+ # account so that it is not locked out for other tests.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy,
+ cached=False)
+
+ # Set a wrong password.
+ client_creds.set_password('wrong password')
+
+ # Show that we get a policy error when trying to authenticate.
+ self._get_tgt(client_creds, armor_tgt=mach_tgt,
+ expected_error=KDC_ERR_POLICY)
+
+ self.check_as_log(
+ client_creds,
+ armor_creds=mach_creds,
+ client_policy=policy,
+ client_policy_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_DEVICE_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED,
+ status=ntstatus.NT_STATUS_INVALID_WORKSTATION)
+
+ def test_authn_policy_allowed_from_service_allow(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create an authentication policy that explicitly allows the machine
+ # account for a service.
+ allowed = f'O:SYD:(A;;CR;;;{mach_creds.get_sid()})'
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_from=denied,
+ service_allowed_from=allowed)
+
+ # Create a managed service account with the assigned policy.
+ client_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ assigned_policy=policy)
+
+ # Show that we can authenticate using an armor ticket.
+ self._get_tgt(client_creds, armor_tgt=mach_tgt)
+
+ self.check_as_log(client_creds,
+ armor_creds=mach_creds,
+ client_policy=policy)
+
+ def test_authn_policy_allowed_from_service_deny(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create an authentication policy that explicitly denies the machine
+ # account for a service.
+ allowed = 'O:SYD:(A;;CR;;;WD)'
+ denied = f'O:SYD:(D;;CR;;;{mach_creds.get_sid()})'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_from=allowed,
+ service_allowed_from=denied)
+
+ # Create a managed service account with the assigned policy.
+ client_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ assigned_policy=policy)
+
+ # Show that we get a policy error when trying to authenticate.
+ self._get_tgt(client_creds, armor_tgt=mach_tgt,
+ expected_error=KDC_ERR_POLICY)
+
+ self.check_as_log(
+ client_creds,
+ armor_creds=mach_creds,
+ client_policy=policy,
+ client_policy_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_DEVICE_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED,
+ status=ntstatus.NT_STATUS_INVALID_WORKSTATION)
+
+ def test_authn_policy_allowed_from_no_owner(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create an authentication policy that explicitly allows the machine
+ # account for a user. Omit the owner (O:SY) from the SDDL. Enforce a
+ # TGT lifetime for testing what gets logged.
+ allowed = 'D:(A;;CR;;;WD)'
+ INT64_MAX = 0x7fff_ffff_ffff_ffff
+ max_lifetime = INT64_MAX // 10_000_000
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_from=allowed,
+ user_tgt_lifetime=max_lifetime)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+
+ # Show that we get a generic error if the security descriptor lacks an
+ # owner.
+ self._get_tgt(client_creds, armor_tgt=mach_tgt,
+ expected_error=KDC_ERR_GENERIC)
+
+ self.check_as_log(
+ client_creds,
+ armor_creds=mach_creds,
+ client_policy=policy,
+ client_policy_status=ntstatus.NT_STATUS_INVALID_PARAMETER,
+ event=AuditEvent.KERBEROS_DEVICE_RESTRICTION,
+ reason=AuditReason.DESCRIPTOR_NO_OWNER,
+ status=ntstatus.NT_STATUS_UNSUCCESSFUL)
+
+ def test_authn_policy_allowed_from_no_owner_unenforced(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create an unenforced authentication policy that explicitly allows the
+ # machine account for a user. Omit the owner (O:SY) from the SDDL.
+ allowed = 'D:(A;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=False,
+ user_allowed_from=allowed)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+
+ # Show that we don’t get an error if the policy is unenforced.
+ self._get_tgt(client_creds, armor_tgt=mach_tgt)
+
+ self.check_as_log(client_creds,
+ armor_creds=mach_creds,
+ client_policy=policy,
+ client_policy_status=ntstatus.NT_STATUS_INVALID_PARAMETER,
+ event=AuditEvent.KERBEROS_DEVICE_RESTRICTION,
+ reason=AuditReason.DESCRIPTOR_NO_OWNER)
+
+ def test_authn_policy_allowed_from_owner_self(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create an authentication policy that explicitly allows the machine
+ # account for a user. Set the owner to the machine account.
+ allowed = f'O:{mach_creds.get_sid()}D:(A;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_from=allowed)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+
+ # Show that we can authenticate using an armor ticket.
+ self._get_tgt(client_creds, armor_tgt=mach_tgt)
+
+ self.check_as_log(client_creds,
+ armor_creds=mach_creds,
+ client_policy=policy)
+
+ def test_authn_policy_allowed_from_owner_anon(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create an authentication policy that explicitly allows the machine
+ # account for a user. Set the owner to be anonymous.
+ allowed = 'O:AND:(A;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_from=allowed)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+
+ # Show that we can authenticate using an armor ticket.
+ self._get_tgt(client_creds, armor_tgt=mach_tgt)
+
+ self.check_as_log(client_creds,
+ armor_creds=mach_creds,
+ client_policy=policy)
+
+ def test_authn_policy_allowed_from_no_fast(self):
+ # Create an authentication policy that restricts authentication.
+ # Include some different TGT lifetimes for testing what gets logged.
+ allowed_from = 'O:SY'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_from=allowed_from,
+ user_tgt_lifetime=115,
+ computer_tgt_lifetime=235,
+ service_tgt_lifetime=355)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+
+ # Show that we cannot authenticate without using an armor ticket.
+ self._get_tgt(client_creds, expected_error=KDC_ERR_POLICY,
+ expect_status=True,
+ expected_status=ntstatus.NT_STATUS_INVALID_WORKSTATION)
+
+ self.check_as_log(
+ client_creds,
+ client_policy=policy,
+ client_policy_status=ntstatus.NT_STATUS_INVALID_WORKSTATION,
+ event=AuditEvent.KERBEROS_DEVICE_RESTRICTION,
+ reason=AuditReason.FAST_REQUIRED)
+
+ def test_authn_policy_allowed_from_no_fast_negative_lifetime(self):
+ # Create an authentication policy that restricts
+ # authentication. Include some negative TGT lifetimes for testing what
+ # gets logged.
+ allowed_from = 'O:SY'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_from=allowed_from,
+ user_tgt_lifetime=-115,
+ computer_tgt_lifetime=-235,
+ service_tgt_lifetime=-355)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+
+ # Show that we cannot authenticate without using an armor ticket.
+ self._get_tgt(client_creds, expected_error=KDC_ERR_POLICY,
+ expect_status=True,
+ expected_status=ntstatus.NT_STATUS_INVALID_WORKSTATION)
+
+ self.check_as_log(
+ client_creds,
+ client_policy=policy,
+ client_policy_status=ntstatus.NT_STATUS_INVALID_WORKSTATION,
+ event=AuditEvent.KERBEROS_DEVICE_RESTRICTION,
+ reason=AuditReason.FAST_REQUIRED)
+
+ def test_authn_policy_allowed_from_no_fast_unenforced(self):
+ # Create an unenforced authentication policy that restricts
+ # authentication.
+ allowed_from = 'O:SY'
+ policy = self.create_authn_policy(enforced=False,
+ user_allowed_from=allowed_from)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+
+ # Show that we don’t get an error when the policy is unenforced.
+ self._get_tgt(client_creds)
+
+ self.check_as_log(
+ client_creds,
+ client_policy=policy,
+ client_policy_status=ntstatus.NT_STATUS_INVALID_WORKSTATION,
+ event=AuditEvent.KERBEROS_DEVICE_RESTRICTION,
+ reason=AuditReason.FAST_REQUIRED)
+
+ def test_authn_policy_allowed_from_user_allow_group_not_a_member(self):
+ samdb = self.get_samdb()
+
+ # Create a new group.
+ group_name = self.get_new_username()
+ group_dn = self.create_group(samdb, group_name)
+ group_sid = self.get_objectSid(samdb, group_dn)
+
+ # Create a machine account with which to perform FAST and which does
+ # not belong to the group.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create an authentication policy that allows accounts belonging to the
+ # group.
+ allowed = f'O:SYD:(A;;CR;;;{group_sid})'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_from=allowed)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+
+ # Show that we get a policy error, as the machine account does not
+ # belong to the group.
+ self._get_tgt(client_creds, armor_tgt=mach_tgt,
+ expected_error=KDC_ERR_POLICY)
+
+ self.check_as_log(
+ client_creds,
+ armor_creds=mach_creds,
+ client_policy=policy,
+ client_policy_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_DEVICE_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED,
+ status=ntstatus.NT_STATUS_INVALID_WORKSTATION)
+
+ def test_authn_policy_allowed_from_user_allow_group_member(self):
+ samdb = self.get_samdb()
+
+ # Create a new group.
+ group_name = self.get_new_username()
+ group_dn = self.create_group(samdb, group_name)
+ group_sid = self.get_objectSid(samdb, group_dn)
+
+ # Create a machine account with which to perform FAST that belongs to
+ # the group.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'member_of': (group_dn,)})
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create an authentication policy that allows accounts belonging to the
+ # group.
+ allowed = f'O:SYD:(A;;CR;;;{group_sid})'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_from=allowed)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+
+ # Show that we can authenticate using an armor ticket, since the
+ # machine account belongs to the group.
+ self._get_tgt(client_creds, armor_tgt=mach_tgt)
+
+ self.check_as_log(client_creds,
+ armor_creds=mach_creds,
+ client_policy=policy)
+
+ def test_authn_policy_allowed_from_user_allow_domain_local_group(self):
+ samdb = self.get_samdb()
+
+ # Create a new domain-local group.
+ group_name = self.get_new_username()
+ group_dn = self.create_group(samdb, group_name,
+ gtype=GroupType.DOMAIN_LOCAL.value)
+ group_sid = self.get_objectSid(samdb, group_dn)
+
+ # Create a machine account with which to perform FAST that belongs to
+ # the group.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'member_of': (group_dn,)})
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create an authentication policy that allows accounts belonging to the
+ # group.
+ allowed = f'O:SYD:(A;;CR;;;{group_sid})'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_from=allowed)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+
+ # Show that the groups in the armor ticket are expanded to include the
+ # domain-local group.
+ self._get_tgt(client_creds, armor_tgt=mach_tgt)
+
+ self.check_as_log(client_creds,
+ armor_creds=mach_creds,
+ client_policy=policy)
+
+ def test_authn_policy_allowed_from_user_allow_asserted_identity(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create an authentication policy that allows accounts with the
+ # Authentication Authority Asserted Identity SID.
+ allowed = (
+ f'O:SYD:(A;;CR;;;'
+ f'{security.SID_AUTHENTICATION_AUTHORITY_ASSERTED_IDENTITY})'
+ )
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_from=allowed)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+
+ # Show that authentication is allowed.
+ self._get_tgt(client_creds, armor_tgt=mach_tgt)
+
+ self.check_as_log(client_creds,
+ armor_creds=mach_creds,
+ client_policy=policy)
+
+ def test_authn_policy_allowed_from_user_allow_claims_valid(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create an authentication policy that allows accounts with the
+ # Claims Valid SID.
+ allowed = f'O:SYD:(A;;CR;;;{security.SID_CLAIMS_VALID})'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_from=allowed)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+
+ # Show that authentication is allowed.
+ self._get_tgt(client_creds, armor_tgt=mach_tgt)
+
+ self.check_as_log(client_creds,
+ armor_creds=mach_creds,
+ client_policy=policy)
+
+ def test_authn_policy_allowed_from_user_allow_compounded_auth(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create an authentication policy that allows accounts with the
+ # Compounded Authentication SID.
+ allowed = f'O:SYD:(A;;CR;;;{security.SID_COMPOUNDED_AUTHENTICATION})'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_from=allowed)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+
+ # Show that authentication is denied.
+ self._get_tgt(client_creds, armor_tgt=mach_tgt,
+ expected_error=KDC_ERR_POLICY)
+
+ self.check_as_log(
+ client_creds,
+ armor_creds=mach_creds,
+ client_policy=policy,
+ client_policy_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_DEVICE_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED,
+ status=ntstatus.NT_STATUS_INVALID_WORKSTATION)
+
+ def test_authn_policy_allowed_from_user_allow_authenticated_users(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create an authentication policy that allows accounts with the
+ # Authenticated Users SID.
+ allowed = f'O:SYD:(A;;CR;;;{security.SID_NT_AUTHENTICATED_USERS})'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_from=allowed)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+
+ # Show that authentication is allowed.
+ self._get_tgt(client_creds, armor_tgt=mach_tgt)
+
+ self.check_as_log(client_creds,
+ armor_creds=mach_creds,
+ client_policy=policy)
+
+ def test_authn_policy_allowed_from_user_allow_ntlm_authn(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create an authentication policy that allows accounts with the NTLM
+ # Authentication SID.
+ allowed = f'O:SYD:(A;;CR;;;{security.SID_NT_NTLM_AUTHENTICATION})'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_from=allowed)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+
+ # Show that authentication is denied.
+ self._get_tgt(client_creds, armor_tgt=mach_tgt,
+ expected_error=KDC_ERR_POLICY)
+
+ self.check_as_log(
+ client_creds,
+ armor_creds=mach_creds,
+ client_policy=policy,
+ client_policy_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_DEVICE_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED,
+ status=ntstatus.NT_STATUS_INVALID_WORKSTATION)
+
+ def test_authn_policy_allowed_from_user_allow_from_rodc(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ allowed_rodc=True)
+ # Modify the TGT to be issued by an RODC.
+ mach_tgt = self.issued_by_rodc(self.get_tgt(mach_creds))
+
+ # Create an authentication policy that explicitly allows the machine
+ # account for a user.
+ allowed = f'O:SYD:(A;;CR;;;{mach_creds.get_sid()})'
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_from=allowed,
+ service_allowed_from=denied)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+
+ # Show that we can authenticate using an armor ticket.
+ self._get_tgt(client_creds, armor_tgt=mach_tgt)
+
+ self.check_as_log(client_creds,
+ armor_creds=mach_creds,
+ client_policy=policy)
+
+ def test_authn_policy_allowed_from_user_deny_from_rodc(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ allowed_rodc=True)
+ # Modify the TGT to be issued by an RODC.
+ mach_tgt = self.issued_by_rodc(self.get_tgt(mach_creds))
+
+ # Create an authentication policy that explicitly denies the machine
+ # account for a user.
+ allowed = 'O:SYD:(A;;CR;;;WD)'
+ denied = f'O:SYD:(D;;CR;;;{mach_creds.get_sid()})'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_from=denied,
+ service_allowed_from=allowed)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+
+ # Show that we get a policy error when trying to authenticate.
+ self._get_tgt(client_creds, armor_tgt=mach_tgt,
+ expected_error=KDC_ERR_POLICY)
+
+ self.check_as_log(
+ client_creds,
+ armor_creds=mach_creds,
+ client_policy=policy,
+ client_policy_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_DEVICE_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED,
+ status=ntstatus.NT_STATUS_INVALID_WORKSTATION)
+
+ def test_authn_policy_allowed_from_service_allow_from_rodc(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ allowed_rodc=True)
+ # Modify the TGT to be issued by an RODC.
+ mach_tgt = self.issued_by_rodc(self.get_tgt(mach_creds))
+
+ # Create an authentication policy that explicitly allows the machine
+ # account for a service.
+ allowed = f'O:SYD:(A;;CR;;;{mach_creds.get_sid()})'
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_from=denied,
+ service_allowed_from=allowed)
+
+ # Create a managed service account with the assigned policy.
+ client_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ assigned_policy=policy)
+
+ # Show that we can authenticate using an armor ticket.
+ self._get_tgt(client_creds, armor_tgt=mach_tgt)
+
+ self.check_as_log(client_creds,
+ armor_creds=mach_creds,
+ client_policy=policy)
+
+ def test_authn_policy_allowed_from_service_deny_from_rodc(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ allowed_rodc=True)
+ # Modify the TGT to be issued by an RODC.
+ mach_tgt = self.issued_by_rodc(self.get_tgt(mach_creds))
+
+ # Create an authentication policy that explicitly denies the machine
+ # account for a service.
+ allowed = 'O:SYD:(A;;CR;;;WD)'
+ denied = f'O:SYD:(D;;CR;;;{mach_creds.get_sid()})'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_from=allowed,
+ service_allowed_from=denied)
+
+ # Create a managed service account with the assigned policy.
+ client_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ assigned_policy=policy)
+
+ # Show that we get a policy error when trying to authenticate.
+ self._get_tgt(client_creds, armor_tgt=mach_tgt,
+ expected_error=KDC_ERR_POLICY)
+
+ self.check_as_log(
+ client_creds,
+ armor_creds=mach_creds,
+ client_policy=policy,
+ client_policy_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_DEVICE_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED,
+ status=ntstatus.NT_STATUS_INVALID_WORKSTATION)
+
+ def test_authn_policy_allowed_from_user_allow_group_not_a_member_from_rodc(self):
+ samdb = self.get_samdb()
+
+ # Create a new group.
+ group_name = self.get_new_username()
+ group_dn = self.create_group(samdb, group_name)
+ group_sid = self.get_objectSid(samdb, group_dn)
+
+ # Create a machine account with which to perform FAST and which does
+ # not belong to the group.
+ mach_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ allowed_rodc=True)
+ # Modify the TGT to be issued by an RODC.
+ mach_tgt = self.issued_by_rodc(self.get_tgt(mach_creds))
+
+ # Create an authentication policy that allows accounts belonging to the
+ # group.
+ allowed = f'O:SYD:(A;;CR;;;{group_sid})'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_from=allowed)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+
+ # Show that we get a policy error, as the machine account does not
+ # belong to the group.
+ self._get_tgt(client_creds, armor_tgt=mach_tgt,
+ expected_error=KDC_ERR_POLICY)
+
+ self.check_as_log(
+ client_creds,
+ armor_creds=mach_creds,
+ client_policy=policy,
+ client_policy_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_DEVICE_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED,
+ status=ntstatus.NT_STATUS_INVALID_WORKSTATION)
+
+ def test_authn_policy_allowed_from_user_allow_group_member_from_rodc(self):
+ samdb = self.get_samdb()
+
+ # Create a new group.
+ group_name = self.get_new_username()
+ group_dn = self.create_group(samdb, group_name)
+ group_sid = self.get_objectSid(samdb, group_dn)
+
+ # Create a machine account with which to perform FAST that belongs to
+ # the group.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'member_of': (group_dn,),
+ 'allowed_replication_mock': True,
+ 'revealed_to_mock_rodc': True})
+ # Modify the TGT to be issued by an RODC.
+ mach_tgt = self.issued_by_rodc(self.get_tgt(mach_creds))
+
+ # Create an authentication policy that allows accounts belonging to the
+ # group.
+ allowed = f'O:SYD:(A;;CR;;;{group_sid})'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_from=allowed)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+
+ # Show that we can authenticate using an armor ticket, since the
+ # machine account belongs to the group.
+ self._get_tgt(client_creds, armor_tgt=mach_tgt)
+
+ self.check_as_log(client_creds,
+ armor_creds=mach_creds,
+ client_policy=policy)
+
+ def test_authn_policy_allowed_from_user_allow_domain_local_group_from_rodc(self):
+ samdb = self.get_samdb()
+
+ # Create a new domain-local group.
+ group_name = self.get_new_username()
+ group_dn = self.create_group(samdb, group_name,
+ gtype=GroupType.DOMAIN_LOCAL.value)
+ group_sid = self.get_objectSid(samdb, group_dn)
+
+ # Create a machine account with which to perform FAST that belongs to
+ # the group.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'member_of': (group_dn,),
+ 'allowed_replication_mock': True,
+ 'revealed_to_mock_rodc': True})
+ # Modify the TGT to be issued by an RODC.
+ mach_tgt = self.issued_by_rodc(self.get_tgt(mach_creds))
+
+ # Create an authentication policy that allows accounts belonging to the
+ # group.
+ allowed = f'O:SYD:(A;;CR;;;{group_sid})'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_from=allowed)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+
+ # Show that the groups in the armor ticket are expanded to include the
+ # domain-local group.
+ self._get_tgt(client_creds, armor_tgt=mach_tgt)
+
+ self.check_as_log(client_creds,
+ armor_creds=mach_creds,
+ client_policy=policy)
+
+ def test_authn_policy_allowed_from_user_allow_asserted_identity_from_rodc(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ allowed_rodc=True)
+ # Modify the TGT to be issued by an RODC.
+ mach_tgt = self.issued_by_rodc(self.get_tgt(mach_creds))
+
+ # Create an authentication policy that allows accounts with the
+ # Authentication Authority Asserted Identity SID.
+ allowed = (
+ f'O:SYD:(A;;CR;;;'
+ f'{security.SID_AUTHENTICATION_AUTHORITY_ASSERTED_IDENTITY})'
+ )
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_from=allowed)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+
+ # Show that authentication is allowed.
+ self._get_tgt(client_creds, armor_tgt=mach_tgt)
+
+ self.check_as_log(client_creds,
+ armor_creds=mach_creds,
+ client_policy=policy)
+
+ def test_authn_policy_allowed_from_user_allow_claims_valid_from_rodc(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ allowed_rodc=True)
+ # Modify the TGT to be issued by an RODC.
+ mach_tgt = self.issued_by_rodc(self.get_tgt(mach_creds))
+
+ # Create an authentication policy that allows accounts with the
+ # Claims Valid SID.
+ allowed = f'O:SYD:(A;;CR;;;{security.SID_CLAIMS_VALID})'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_from=allowed)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+
+ # Show that authentication is allowed.
+ self._get_tgt(client_creds, armor_tgt=mach_tgt)
+
+ self.check_as_log(client_creds,
+ armor_creds=mach_creds,
+ client_policy=policy)
+
+ def test_authn_policy_allowed_from_user_allow_compounded_authn_from_rodc(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ allowed_rodc=True)
+ # Modify the TGT to be issued by an RODC.
+ mach_tgt = self.issued_by_rodc(self.get_tgt(mach_creds))
+
+ # Create an authentication policy that allows accounts with the
+ # Compounded Authentication SID.
+ allowed = f'O:SYD:(A;;CR;;;{security.SID_COMPOUNDED_AUTHENTICATION})'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_from=allowed)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+
+ # Show that authentication is denied.
+ self._get_tgt(client_creds, armor_tgt=mach_tgt,
+ expected_error=KDC_ERR_POLICY)
+
+ self.check_as_log(
+ client_creds,
+ armor_creds=mach_creds,
+ client_policy=policy,
+ client_policy_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_DEVICE_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED,
+ status=ntstatus.NT_STATUS_INVALID_WORKSTATION)
+
+ def test_authn_policy_allowed_from_user_allow_authenticated_users_from_rodc(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ allowed_rodc=True)
+ # Modify the TGT to be issued by an RODC.
+ mach_tgt = self.issued_by_rodc(self.get_tgt(mach_creds))
+
+ # Create an authentication policy that allows accounts with the
+ # Authenticated Users SID.
+ allowed = f'O:SYD:(A;;CR;;;{security.SID_NT_AUTHENTICATED_USERS})'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_from=allowed)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+
+ # Show that authentication is allowed.
+ self._get_tgt(client_creds, armor_tgt=mach_tgt)
+
+ self.check_as_log(client_creds,
+ armor_creds=mach_creds,
+ client_policy=policy)
+
+ def test_authn_policy_allowed_from_user_allow_ntlm_authn_from_rodc(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ allowed_rodc=True)
+ # Modify the TGT to be issued by an RODC.
+ mach_tgt = self.issued_by_rodc(self.get_tgt(mach_creds))
+
+ # Create an authentication policy that allows accounts with the NTLM
+ # Authentication SID.
+ allowed = f'O:SYD:(A;;CR;;;{security.SID_NT_NTLM_AUTHENTICATION})'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_from=allowed)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+
+ # Show that authentication is denied.
+ self._get_tgt(client_creds, armor_tgt=mach_tgt,
+ expected_error=KDC_ERR_POLICY)
+
+ self.check_as_log(
+ client_creds,
+ armor_creds=mach_creds,
+ client_policy=policy,
+ client_policy_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_DEVICE_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED,
+ status=ntstatus.NT_STATUS_INVALID_WORKSTATION)
+
+ def test_authn_policy_allowed_from_user_deny_user(self):
+ samdb = self.get_samdb()
+
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+ mach_sid = mach_creds.get_sid()
+
+ # Create a user account.
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER,
+ use_cache=False)
+ client_dn = client_creds.get_dn()
+ client_sid = client_creds.get_sid()
+
+ # Create an authentication policy that explicitly allows the machine
+ # account for a user, while denying the user account itself.
+ allowed = f'O:SYD:(A;;CR;;;{mach_sid})(D;;CR;;;{client_sid})'
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_from=allowed,
+ service_allowed_from=denied)
+
+ # Assign the policy to the user account.
+ self.add_attribute(samdb, str(client_dn),
+ 'msDS-AssignedAuthNPolicy', str(policy.dn))
+
+ # Show that authentication is allowed.
+ self._get_tgt(client_creds, armor_tgt=mach_tgt)
+
+ self.check_as_log(client_creds,
+ armor_creds=mach_creds,
+ client_policy=policy)
+
+ def test_authn_policy_allowed_to_empty(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a user account.
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER)
+ tgt = self.get_tgt(client_creds)
+
+ # Create an authentication policy with no DACL in the security
+ # descriptor.
+ allowed_to = 'O:SY'
+ policy = self.create_authn_policy(enforced=True,
+ computer_allowed_to=allowed_to)
+
+ # Create a computer account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=policy)
+
+ # Show that obtaining a service ticket is allowed.
+ self._tgs_req(tgt, 0, client_creds, target_creds,
+ armor_tgt=mach_tgt)
+
+ self.check_tgs_log(client_creds, target_creds,
+ policy=policy)
+
+ def test_authn_policy_allowed_to_computer_allow(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a user account.
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER)
+ tgt = self.get_tgt(client_creds)
+
+ # Create an authentication policy that applies to a computer and
+ # explicitly allows the user account to obtain a service ticket.
+ allowed = f'O:SYD:(A;;CR;;;{client_creds.get_sid()})'
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=denied,
+ computer_allowed_to=allowed,
+ service_allowed_to=denied)
+
+ # Create a computer account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=policy)
+
+ # Show that obtaining a service ticket is allowed.
+ self._tgs_req(tgt, 0, client_creds, target_creds,
+ armor_tgt=mach_tgt)
+
+ self.check_tgs_log(client_creds, target_creds,
+ policy=policy)
+
+ def test_authn_policy_allowed_to_computer_deny(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a user account.
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER)
+ tgt = self.get_tgt(client_creds)
+
+ # Create an authentication policy that applies to a computer and
+ # explicitly denies the user account to obtain a service ticket.
+ denied = f'O:SYD:(D;;CR;;;{client_creds.get_sid()})'
+ allowed = 'O:SYD:(A;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=allowed,
+ computer_allowed_to=denied,
+ service_allowed_to=allowed)
+
+ # Create a computer account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=policy)
+
+ # Show that obtaining a service ticket is denied.
+ self._tgs_req(
+ tgt, KDC_ERR_POLICY, client_creds, target_creds,
+ armor_tgt=mach_tgt,
+ expect_edata=self.expect_padata_outer,
+ # We aren’t particular about whether or not we get an NTSTATUS.
+ expect_status=None,
+ expected_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ check_patypes=False)
+
+ self.check_tgs_log(
+ client_creds, target_creds,
+ policy=policy,
+ status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED)
+
+ def test_authn_policy_allowed_to_computer_allow_but_deny_mach(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+ mach_sid = mach_creds.get_sid()
+
+ # Create a user account.
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER)
+ client_sid = client_creds.get_sid()
+ tgt = self.get_tgt(client_creds)
+
+ # Create an authentication policy that applies to a computer and
+ # explicitly allows the user account to obtain a service ticket, while
+ # explicitly denying the machine account.
+ allowed = f'O:SYD:(A;;CR;;;{client_sid})(D;;CR;;;{mach_sid})'
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=denied,
+ computer_allowed_to=allowed,
+ service_allowed_to=denied)
+
+ # Create a computer account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=policy)
+
+ # Despite the documentation’s claims that the machine account is also
+ # access-checked, obtaining a service ticket is allowed.
+ self._tgs_req(tgt, 0, client_creds, target_creds,
+ armor_tgt=mach_tgt)
+
+ self.check_tgs_log(client_creds, target_creds, policy=policy)
+
+ def test_authn_policy_allowed_to_computer_allow_mach(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a user account.
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER)
+ tgt = self.get_tgt(client_creds)
+
+ # Create an authentication policy that applies to a computer and
+ # explicitly allows the machine account to obtain a service ticket.
+ allowed = f'O:SYD:(A;;CR;;;{mach_creds.get_sid()})'
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=denied,
+ computer_allowed_to=allowed,
+ service_allowed_to=denied)
+
+ # Create a computer account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=policy)
+
+ # Show that obtaining a service ticket is denied.
+ self._tgs_req(
+ tgt, KDC_ERR_POLICY, client_creds, target_creds,
+ armor_tgt=mach_tgt,
+ expect_edata=self.expect_padata_outer,
+ # We aren’t particular about whether or not we get an NTSTATUS.
+ expect_status=None,
+ expected_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ check_patypes=False)
+
+ self.check_tgs_log(
+ client_creds, target_creds,
+ policy=policy,
+ status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED)
+
+ def test_authn_policy_allowed_no_fast(self):
+ # Create a user account.
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER)
+ tgt = self.get_tgt(client_creds)
+
+ # Create an authentication policy that applies to a computer and
+ # explicitly allows the user account to obtain a service ticket.
+ allowed = f'O:SYD:(A;;CR;;;{client_creds.get_sid()})'
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=denied,
+ computer_allowed_to=allowed,
+ service_allowed_to=denied)
+
+ # Create a computer account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=policy)
+
+ # Show that obtaining a service ticket is allowed without an armor TGT.
+ self._tgs_req(tgt, 0, client_creds, target_creds)
+
+ self.check_tgs_log(client_creds, target_creds, policy=policy)
+
+ def test_authn_policy_denied_no_fast(self):
+ # Create a user account.
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER)
+ tgt = self.get_tgt(client_creds)
+
+ # Create an authentication policy that applies to a computer and
+ # explicitly disallows the user account to obtain a service ticket.
+ denied = f'O:SYD:(D;;CR;;;{client_creds.get_sid()})'
+ allowed = 'O:SYD:(A;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=allowed,
+ computer_allowed_to=denied,
+ service_allowed_to=allowed)
+
+ # Create a computer account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=policy)
+
+ # Show that obtaining a service ticket is not allowed.
+ self._tgs_req(
+ tgt, KDC_ERR_POLICY, client_creds, target_creds,
+ expect_edata=self.expect_padata_outer,
+ expect_status=True,
+ expected_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED)
+
+ self.check_tgs_log(
+ client_creds, target_creds,
+ policy=policy,
+ status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED)
+
+ def test_authn_policy_allowed_to_computer_allow_asserted_identity(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a user account.
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER)
+ tgt = self.get_tgt(client_creds)
+
+ # Create an authentication policy that allows accounts with the
+ # Authentication Authority Asserted Identity SID to obtain a service
+ # ticket.
+ allowed = (
+ f'O:SYD:(A;;CR;;;'
+ f'{security.SID_AUTHENTICATION_AUTHORITY_ASSERTED_IDENTITY})'
+ )
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=denied,
+ computer_allowed_to=allowed,
+ service_allowed_to=denied)
+
+ # Create a computer account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=policy)
+
+ # Show that obtaining a service ticket is allowed.
+ self._tgs_req(tgt, 0, client_creds, target_creds,
+ armor_tgt=mach_tgt)
+
+ self.check_tgs_log(client_creds, target_creds, policy=policy)
+
+ def test_authn_policy_allowed_to_computer_allow_claims_valid(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a user account.
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER)
+ tgt = self.get_tgt(client_creds)
+
+ # Create an authentication policy that allows accounts with the Claims
+ # Valid SID to obtain a service ticket.
+ allowed = f'O:SYD:(A;;CR;;;{security.SID_CLAIMS_VALID})'
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=denied,
+ computer_allowed_to=allowed,
+ service_allowed_to=denied)
+
+ # Create a computer account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=policy)
+
+ # Show that obtaining a service ticket is allowed.
+ self._tgs_req(tgt, 0, client_creds, target_creds,
+ armor_tgt=mach_tgt)
+
+ self.check_tgs_log(client_creds, target_creds, policy=policy)
+
+ def test_authn_policy_allowed_to_computer_allow_compounded_auth(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a user account.
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER)
+ tgt = self.get_tgt(client_creds)
+
+ # Create an authentication policy that allows accounts with the
+ # Compounded Authentication SID to obtain a service ticket.
+ allowed = f'O:SYD:(A;;CR;;;{security.SID_COMPOUNDED_AUTHENTICATION})'
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=denied,
+ computer_allowed_to=allowed,
+ service_allowed_to=denied)
+
+ # Create a computer account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=policy)
+
+ # Show that obtaining a service ticket is denied.
+ self._tgs_req(
+ tgt, KDC_ERR_POLICY, client_creds, target_creds,
+ armor_tgt=mach_tgt,
+ expect_edata=self.expect_padata_outer,
+ # We aren’t particular about whether or not we get an NTSTATUS.
+ expect_status=None,
+ expected_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ check_patypes=False)
+
+ self.check_tgs_log(
+ client_creds, target_creds,
+ policy=policy,
+ status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED)
+
+ def test_authn_policy_allowed_to_computer_allow_authenticated_users(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a user account.
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER)
+ tgt = self.get_tgt(client_creds)
+
+ # Create an authentication policy that allows accounts with the
+ # Authenticated Users SID to obtain a service ticket.
+ allowed = f'O:SYD:(A;;CR;;;{security.SID_NT_AUTHENTICATED_USERS})'
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=denied,
+ computer_allowed_to=allowed,
+ service_allowed_to=denied)
+
+ # Create a computer account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=policy)
+
+ # Show that obtaining a service ticket is allowed.
+ self._tgs_req(tgt, 0, client_creds, target_creds,
+ armor_tgt=mach_tgt)
+
+ self.check_tgs_log(client_creds, target_creds, policy=policy)
+
+ def test_authn_policy_allowed_to_computer_allow_ntlm_authn(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a user account.
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER)
+ tgt = self.get_tgt(client_creds)
+
+ # Create an authentication policy that allows accounts with the NTLM
+ # Authentication SID to obtain a service ticket.
+ allowed = f'O:SYD:(A;;CR;;;{security.SID_NT_NTLM_AUTHENTICATION})'
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=denied,
+ computer_allowed_to=allowed,
+ service_allowed_to=denied)
+
+ # Create a computer account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=policy)
+
+ # Show that obtaining a service ticket is denied.
+ self._tgs_req(
+ tgt, KDC_ERR_POLICY, client_creds, target_creds,
+ armor_tgt=mach_tgt,
+ expect_edata=self.expect_padata_outer,
+ # We aren’t particular about whether or not we get an NTSTATUS.
+ expect_status=None,
+ expected_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ check_patypes=False)
+
+ self.check_tgs_log(
+ client_creds, target_creds,
+ policy=policy,
+ status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED)
+
+ def test_authn_policy_allowed_to_no_owner(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a user account.
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER)
+ tgt = self.get_tgt(client_creds)
+
+ # Create an authentication policy that applies to a computer and
+ # explicitly allows the user account to obtain a service ticket. Omit
+ # the owner (O:SY) from the SDDL.
+ allowed = f'D:(A;;CR;;;{client_creds.get_sid()})'
+ policy = self.create_authn_policy(enforced=True,
+ computer_allowed_to=allowed)
+
+ # Create a computer account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=policy)
+
+ # Show that obtaining a service ticket is denied.
+ self._tgs_req(tgt, KDC_ERR_POLICY, client_creds, target_creds,
+ armor_tgt=mach_tgt,
+ expect_edata=self.expect_padata_outer,
+ # We aren’t particular about whether or not we get an
+ # NTSTATUS.
+ expect_status=None,
+ expected_status=ntstatus.NT_STATUS_INVALID_PARAMETER,
+ check_patypes=False)
+
+ self.check_tgs_log(
+ client_creds, target_creds,
+ policy=policy,
+ status=ntstatus.NT_STATUS_INVALID_PARAMETER,
+ event=AuditEvent.KERBEROS_SERVER_RESTRICTION,
+ reason=AuditReason.DESCRIPTOR_NO_OWNER)
+
+ def test_authn_policy_allowed_to_no_owner_unenforced(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a user account.
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER)
+ tgt = self.get_tgt(client_creds)
+
+ # Create an unenforced authentication policy that applies to a computer
+ # and explicitly allows the user account to obtain a service
+ # ticket. Omit the owner (O:SY) from the SDDL.
+ allowed = f'D:(A;;CR;;;{client_creds.get_sid()})'
+ policy = self.create_authn_policy(enforced=False,
+ computer_allowed_to=allowed)
+
+ # Create a computer account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=policy)
+
+ # Show that obtaining a service ticket is allowed.
+ self._tgs_req(tgt, 0, client_creds, target_creds,
+ armor_tgt=mach_tgt)
+
+ self.check_tgs_log(client_creds,
+ target_creds,
+ policy=policy,
+ policy_status=ntstatus.NT_STATUS_INVALID_PARAMETER,
+ event=AuditEvent.KERBEROS_SERVER_RESTRICTION,
+ reason=AuditReason.DESCRIPTOR_NO_OWNER)
+
+ def test_authn_policy_allowed_to_owner_self(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a user account.
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER)
+ client_sid = client_creds.get_sid()
+ tgt = self.get_tgt(client_creds)
+
+ # Create an authentication policy that applies to a computer and
+ # explicitly allows the user account to obtain a service ticket. Set
+ # the owner to the user account.
+ allowed = f'O:{client_sid}D:(A;;CR;;;{client_sid})'
+ policy = self.create_authn_policy(enforced=True,
+ computer_allowed_to=allowed)
+
+ # Create a computer account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=policy)
+
+ # Show that obtaining a service ticket is allowed.
+ self._tgs_req(tgt, 0, client_creds, target_creds,
+ armor_tgt=mach_tgt)
+
+ self.check_tgs_log(client_creds, target_creds, policy=policy)
+
+ def test_authn_policy_allowed_to_owner_anon(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a user account.
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER)
+ tgt = self.get_tgt(client_creds)
+
+ # Create an authentication policy that applies to a computer and
+ # explicitly allows the user account to obtain a service ticket. Set
+ # the owner to be anonymous.
+ allowed = f'O:AND:(A;;CR;;;{client_creds.get_sid()})'
+ policy = self.create_authn_policy(enforced=True,
+ computer_allowed_to=allowed)
+
+ # Create a computer account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=policy)
+
+ # Show that obtaining a service ticket is allowed.
+ self._tgs_req(tgt, 0, client_creds, target_creds,
+ armor_tgt=mach_tgt)
+
+ self.check_tgs_log(client_creds, target_creds, policy=policy)
+
+ def test_authn_policy_allowed_to_user_allow(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a user account.
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER)
+ tgt = self.get_tgt(client_creds)
+
+ # Create an authentication policy that applies to a user and explicitly
+ # allows the user account to obtain a service ticket.
+ allowed = f'O:SYD:(A;;CR;;;{client_creds.get_sid()})'
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=allowed,
+ computer_allowed_to=denied,
+ service_allowed_to=denied)
+
+ # Create a user account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy,
+ spn='host/{account}')
+
+ # Show that obtaining a service ticket is allowed.
+ self._tgs_req(tgt, 0, client_creds, target_creds,
+ armor_tgt=mach_tgt)
+
+ self.check_tgs_log(client_creds, target_creds, policy=policy)
+
+ def test_authn_policy_allowed_to_user_deny(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a user account.
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER)
+ tgt = self.get_tgt(client_creds)
+
+ # Create an authentication policy that applies to a user and
+ # explicitly denies the user account to obtain a service ticket.
+ denied = f'O:SYD:(D;;CR;;;{client_creds.get_sid()})'
+ allowed = 'O:SYD:(A;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=denied,
+ computer_allowed_to=allowed,
+ service_allowed_to=allowed)
+
+ # Create a user account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy,
+ spn='host/{account}')
+
+ # Show that obtaining a service ticket is denied.
+ self._tgs_req(
+ tgt, KDC_ERR_POLICY, client_creds, target_creds,
+ armor_tgt=mach_tgt,
+ expect_edata=self.expect_padata_outer,
+ # We aren’t particular about whether or not we get an NTSTATUS.
+ expect_status=None,
+ expected_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ check_patypes=False)
+
+ self.check_tgs_log(
+ client_creds, target_creds,
+ policy=policy,
+ status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED)
+
+ def test_authn_policy_allowed_to_service_allow(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a user account.
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER)
+ tgt = self.get_tgt(client_creds)
+
+ # Create an authentication policy that applies to a managed service and
+ # explicitly allows the user account to obtain a service ticket.
+ allowed = f'O:SYD:(A;;CR;;;{client_creds.get_sid()})'
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=denied,
+ computer_allowed_to=denied,
+ service_allowed_to=allowed)
+
+ # Create a managed service account with the assigned policy.
+ target_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ assigned_policy=policy)
+
+ # Show that obtaining a service ticket is allowed.
+ self._tgs_req(tgt, 0, client_creds, target_creds,
+ armor_tgt=mach_tgt)
+
+ self.check_tgs_log(client_creds, target_creds, policy=policy)
+
+ def test_authn_policy_allowed_to_service_deny(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a user account.
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER)
+ tgt = self.get_tgt(client_creds)
+
+ # Create an authentication policy that applies to a managed service and
+ # explicitly denies the user account to obtain a service ticket.
+ denied = f'O:SYD:(D;;CR;;;{client_creds.get_sid()})'
+ allowed = 'O:SYD:(A;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=allowed,
+ computer_allowed_to=allowed,
+ service_allowed_to=denied)
+
+ # Create a managed service account with the assigned policy.
+ target_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ assigned_policy=policy)
+
+ # Show that obtaining a service ticket is denied.
+ self._tgs_req(
+ tgt, KDC_ERR_POLICY, client_creds, target_creds,
+ armor_tgt=mach_tgt,
+ expect_edata=self.expect_padata_outer,
+ # We aren’t particular about whether or not we get an NTSTATUS.
+ expect_status=None,
+ expected_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ check_patypes=False)
+
+ self.check_tgs_log(
+ client_creds, target_creds,
+ policy=policy,
+ status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED)
+
+ def test_authn_policy_allowed_to_user_allow_from_rodc(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a user account.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ allowed_rodc=True)
+ # Modify the TGT to be issued by an RODC.
+ tgt = self.issued_by_rodc(self.get_tgt(client_creds))
+
+ # Create an authentication policy that applies to a user and explicitly
+ # allows the user account to obtain a service ticket.
+ allowed = f'O:SYD:(A;;CR;;;{client_creds.get_sid()})'
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=allowed,
+ computer_allowed_to=denied,
+ service_allowed_to=denied)
+
+ # Create a user account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy,
+ spn='host/{account}')
+
+ # Show that obtaining a service ticket is allowed.
+ self._tgs_req(tgt, 0, client_creds, target_creds,
+ armor_tgt=mach_tgt)
+
+ self.check_tgs_log(client_creds, target_creds, policy=policy)
+
+ def test_authn_policy_allowed_to_user_deny_from_rodc(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a user account.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ allowed_rodc=True)
+ # Modify the TGT to be issued by an RODC.
+ tgt = self.issued_by_rodc(self.get_tgt(client_creds))
+
+ # Create an authentication policy that applies to a user and
+ # explicitly denies the user account to obtain a service ticket.
+ denied = f'O:SYD:(D;;CR;;;{client_creds.get_sid()})'
+ allowed = 'O:SYD:(A;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=denied,
+ computer_allowed_to=allowed,
+ service_allowed_to=allowed)
+
+ # Create a user account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy,
+ spn='host/{account}')
+
+ # Show that obtaining a service ticket is denied.
+ self._tgs_req(
+ tgt, KDC_ERR_POLICY, client_creds, target_creds,
+ armor_tgt=mach_tgt,
+ expect_edata=self.expect_padata_outer,
+ # We aren’t particular about whether or not we get an NTSTATUS.
+ expect_status=None,
+ expected_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED)
+
+ self.check_tgs_log(
+ client_creds, target_creds,
+ policy=policy,
+ status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED)
+
+ def test_authn_policy_allowed_to_computer_allow_from_rodc(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a user account.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ allowed_rodc=True)
+ # Modify the TGT to be issued by an RODC.
+ tgt = self.issued_by_rodc(self.get_tgt(client_creds))
+
+ # Create an authentication policy that applies to a computer and
+ # explicitly allows the user account to obtain a service ticket.
+ allowed = f'O:SYD:(A;;CR;;;{client_creds.get_sid()})'
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=denied,
+ computer_allowed_to=allowed,
+ service_allowed_to=denied)
+
+ # Create a computer account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=policy)
+
+ # Show that obtaining a service ticket is allowed.
+ self._tgs_req(tgt, 0, client_creds, target_creds,
+ armor_tgt=mach_tgt)
+
+ self.check_tgs_log(client_creds, target_creds, policy=policy)
+
+ def test_authn_policy_allowed_to_computer_deny_from_rodc(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a user account.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ allowed_rodc=True)
+ # Modify the TGT to be issued by an RODC.
+ tgt = self.issued_by_rodc(self.get_tgt(client_creds))
+
+ # Create an authentication policy that applies to a computer and
+ # explicitly denies the user account to obtain a service ticket.
+ denied = f'O:SYD:(D;;CR;;;{client_creds.get_sid()})'
+ allowed = 'O:SYD:(A;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=allowed,
+ computer_allowed_to=denied,
+ service_allowed_to=allowed)
+
+ # Create a computer account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=policy)
+
+ # Show that obtaining a service ticket is denied.
+ self._tgs_req(
+ tgt, KDC_ERR_POLICY, client_creds, target_creds,
+ armor_tgt=mach_tgt,
+ expect_edata=self.expect_padata_outer,
+ # We aren’t particular about whether or not we get an NTSTATUS.
+ expect_status=None,
+ expected_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,)
+
+ self.check_tgs_log(
+ client_creds, target_creds,
+ policy=policy,
+ status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED)
+
+ def test_authn_policy_allowed_to_service_allow_from_rodc(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a user account.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ allowed_rodc=True)
+ # Modify the TGT to be issued by an RODC.
+ tgt = self.issued_by_rodc(self.get_tgt(client_creds))
+
+ # Create an authentication policy that applies to a managed service and
+ # explicitly allows the user account to obtain a service ticket.
+ allowed = f'O:SYD:(A;;CR;;;{client_creds.get_sid()})'
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=denied,
+ computer_allowed_to=denied,
+ service_allowed_to=allowed)
+
+ # Create a managed service account with the assigned policy.
+ target_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ assigned_policy=policy)
+
+ # Show that obtaining a service ticket is allowed.
+ self._tgs_req(tgt, 0, client_creds, target_creds,
+ armor_tgt=mach_tgt)
+
+ self.check_tgs_log(client_creds, target_creds, policy=policy)
+
+ def test_authn_policy_allowed_to_service_deny_from_rodc(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a user account.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ allowed_rodc=True)
+ # Modify the TGT to be issued by an RODC.
+ tgt = self.issued_by_rodc(self.get_tgt(client_creds))
+
+ # Create an authentication policy that applies to a managed service and
+ # explicitly denies the user account to obtain a service ticket.
+ denied = f'O:SYD:(D;;CR;;;{client_creds.get_sid()})'
+ allowed = 'O:SYD:(A;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=allowed,
+ computer_allowed_to=allowed,
+ service_allowed_to=denied)
+
+ # Create a managed service account with the assigned policy.
+ target_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ assigned_policy=policy)
+
+ # Show that obtaining a service ticket is denied.
+ self._tgs_req(
+ tgt, KDC_ERR_POLICY, client_creds, target_creds,
+ armor_tgt=mach_tgt,
+ expect_edata=self.expect_padata_outer,
+ # We aren’t particular about whether or not we get an NTSTATUS.
+ expect_status=None,
+ expected_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED)
+
+ self.check_tgs_log(
+ client_creds, target_creds,
+ policy=policy,
+ status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED)
+
+ def test_authn_policy_allowed_to_user_allow_group_not_a_member(self):
+ samdb = self.get_samdb()
+
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a new group.
+ group_name = self.get_new_username()
+ group_dn = self.create_group(samdb, group_name)
+ group_sid = self.get_objectSid(samdb, group_dn)
+
+ # Create a user account which does not belong to the group.
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER)
+ tgt = self.get_tgt(client_creds)
+
+ # Create an authentication policy that allows accounts belonging to the
+ # group.
+ allowed = f'O:SYD:(A;;CR;;;{group_sid})'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=allowed)
+
+ # Create a user account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy,
+ spn='host/{account}')
+
+ # Show that we get a policy error, as the user account does not belong
+ # to the group.
+ self._tgs_req(
+ tgt, KDC_ERR_POLICY, client_creds, target_creds,
+ armor_tgt=mach_tgt,
+ expect_edata=self.expect_padata_outer,
+ # We aren’t particular about whether or not we get an NTSTATUS.
+ expect_status=None,
+ expected_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ check_patypes=False)
+
+ self.check_tgs_log(
+ client_creds, target_creds,
+ policy=policy,
+ status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED)
+
+ def test_authn_policy_allowed_to_user_allow_group_member(self):
+ samdb = self.get_samdb()
+
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a new group.
+ group_name = self.get_new_username()
+ group_dn = self.create_group(samdb, group_name)
+ group_sid = self.get_objectSid(samdb, group_dn)
+
+ # Create a user account that belongs to the group.
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER,
+ opts={'member_of': (group_dn,)})
+ tgt = self.get_tgt(client_creds)
+
+ # Create an authentication policy that allows accounts belonging to the
+ # group.
+ allowed = f'O:SYD:(A;;CR;;;{group_sid})'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=allowed)
+
+ # Create a user account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy,
+ spn='host/{account}')
+
+ # Show that we can get a service ticket, since the user account belongs
+ # to the group.
+ self._tgs_req(tgt, 0, client_creds, target_creds,
+ armor_tgt=mach_tgt)
+
+ self.check_tgs_log(client_creds, target_creds, policy=policy)
+
+ def test_authn_policy_allowed_to_user_allow_domain_local_group(self):
+ samdb = self.get_samdb()
+
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a new domain-local group.
+ group_name = self.get_new_username()
+ group_dn = self.create_group(samdb, group_name,
+ gtype=GroupType.DOMAIN_LOCAL.value)
+ group_sid = self.get_objectSid(samdb, group_dn)
+
+ # Create a user account that belongs to the group.
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER,
+ opts={'member_of': (group_dn,)})
+ tgt = self.get_tgt(client_creds)
+
+ # Create an authentication policy that allows accounts belonging to the
+ # group.
+ allowed = f'O:SYD:(A;;CR;;;{group_sid})'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=allowed)
+
+ # Create a user account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy,
+ spn='host/{account}')
+
+ # Show that the groups in the TGT are expanded to include the
+ # domain-local group.
+ self._tgs_req(tgt, 0, client_creds, target_creds,
+ armor_tgt=mach_tgt)
+
+ self.check_tgs_log(client_creds, target_creds, policy=policy)
+
+ def test_authn_policy_allowed_to_computer_allow_asserted_identity_from_rodc(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a user account.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ allowed_rodc=True)
+ # Modify the TGT to be issued by an RODC.
+ tgt = self.issued_by_rodc(self.get_tgt(client_creds))
+
+ # Create an authentication policy that allows accounts with the
+ # Authentication Authority Asserted Identity SID to obtain a service
+ # ticket.
+ allowed = (
+ f'O:SYD:(A;;CR;;;'
+ f'{security.SID_AUTHENTICATION_AUTHORITY_ASSERTED_IDENTITY})'
+ )
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=denied,
+ computer_allowed_to=allowed,
+ service_allowed_to=denied)
+
+ # Create a computer account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=policy)
+
+ # Show that obtaining a service ticket is allowed.
+ self._tgs_req(tgt, 0, client_creds, target_creds,
+ armor_tgt=mach_tgt)
+
+ self.check_tgs_log(client_creds, target_creds, policy=policy)
+
+ def test_authn_policy_allowed_to_computer_allow_claims_valid_from_rodc(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a user account.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ allowed_rodc=True)
+ # Modify the TGT to be issued by an RODC.
+ tgt = self.issued_by_rodc(self.get_tgt(client_creds))
+
+ # Create an authentication policy that allows accounts with the Claims
+ # Valid SID to obtain a service ticket.
+ allowed = f'O:SYD:(A;;CR;;;{security.SID_CLAIMS_VALID})'
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=denied,
+ computer_allowed_to=allowed,
+ service_allowed_to=denied)
+
+ # Create a computer account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=policy)
+
+ # Show that obtaining a service ticket is allowed.
+ self._tgs_req(tgt, 0, client_creds, target_creds,
+ armor_tgt=mach_tgt)
+
+ self.check_tgs_log(client_creds, target_creds, policy=policy)
+
+ def test_authn_policy_allowed_to_computer_allow_compounded_authn_from_rodc(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a user account.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ allowed_rodc=True)
+ # Modify the TGT to be issued by an RODC.
+ tgt = self.issued_by_rodc(self.get_tgt(client_creds))
+
+ # Create an authentication policy that allows accounts with the
+ # Compounded Authentication SID to obtain a service ticket.
+ allowed = f'O:SYD:(A;;CR;;;{security.SID_COMPOUNDED_AUTHENTICATION})'
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=denied,
+ computer_allowed_to=allowed,
+ service_allowed_to=denied)
+
+ # Create a computer account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=policy)
+
+ # Show that obtaining a service ticket is denied.
+ self._tgs_req(
+ tgt, KDC_ERR_POLICY, client_creds, target_creds,
+ armor_tgt=mach_tgt,
+ expect_edata=self.expect_padata_outer,
+ # We aren’t particular about whether or not we get an NTSTATUS.
+ expect_status=None,
+ expected_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED)
+
+ self.check_tgs_log(
+ client_creds, target_creds,
+ policy=policy,
+ status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED)
+
+ def test_authn_policy_allowed_to_computer_allow_authenticated_users_from_rodc(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a user account.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ allowed_rodc=True)
+ # Modify the TGT to be issued by an RODC.
+ tgt = self.issued_by_rodc(self.get_tgt(client_creds))
+
+ # Create an authentication policy that allows accounts with the
+ # Authenticated Users SID to obtain a service ticket.
+ allowed = f'O:SYD:(A;;CR;;;{security.SID_NT_AUTHENTICATED_USERS})'
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=denied,
+ computer_allowed_to=allowed,
+ service_allowed_to=denied)
+
+ # Create a computer account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=policy)
+
+ # Show that obtaining a service ticket is allowed.
+ self._tgs_req(tgt, 0, client_creds, target_creds,
+ armor_tgt=mach_tgt)
+
+ self.check_tgs_log(client_creds, target_creds, policy=policy)
+
+ def test_authn_policy_allowed_to_computer_allow_ntlm_authn_from_rodc(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a user account.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ allowed_rodc=True)
+ # Modify the TGT to be issued by an RODC.
+ tgt = self.issued_by_rodc(self.get_tgt(client_creds))
+
+ # Create an authentication policy that allows accounts with the NTLM
+ # Authentication SID to obtain a service ticket.
+ allowed = f'O:SYD:(A;;CR;;;{security.SID_NT_NTLM_AUTHENTICATION})'
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=denied,
+ computer_allowed_to=allowed,
+ service_allowed_to=denied)
+
+ # Create a computer account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=policy)
+
+ # Show that obtaining a service ticket is denied.
+ self._tgs_req(
+ tgt, KDC_ERR_POLICY, client_creds, target_creds,
+ armor_tgt=mach_tgt,
+ expect_edata=self.expect_padata_outer,
+ # We aren’t particular about whether or not we get an NTSTATUS.
+ expect_status=None,
+ expected_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED)
+
+ self.check_tgs_log(
+ client_creds, target_creds,
+ policy=policy,
+ status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED)
+
+ def test_authn_policy_allowed_to_user_allow_group_not_a_member_from_rodc(self):
+ samdb = self.get_samdb()
+
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a new group.
+ group_name = self.get_new_username()
+ group_dn = self.create_group(samdb, group_name)
+ group_sid = self.get_objectSid(samdb, group_dn)
+
+ # Create a user account which does not belong to the group.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ allowed_rodc=True)
+ # Modify the TGT to be issued by an RODC.
+ tgt = self.issued_by_rodc(self.get_tgt(client_creds))
+
+ # Create an authentication policy that allows accounts belonging to the
+ # group.
+ allowed = f'O:SYD:(A;;CR;;;{group_sid})'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=allowed)
+
+ # Create a user account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy,
+ spn='host/{account}')
+
+ # Show that we get a policy error, as the user account does not belong
+ # to the group.
+ self._tgs_req(
+ tgt, KDC_ERR_POLICY, client_creds, target_creds,
+ armor_tgt=mach_tgt,
+ expect_edata=self.expect_padata_outer,
+ # We aren’t particular about whether or not we get an NTSTATUS.
+ expect_status=None,
+ expected_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED)
+
+ self.check_tgs_log(
+ client_creds, target_creds,
+ policy=policy,
+ status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED)
+
+ def test_authn_policy_allowed_to_user_allow_group_member_from_rodc(self):
+ samdb = self.get_samdb()
+
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a new group.
+ group_name = self.get_new_username()
+ group_dn = self.create_group(samdb, group_name)
+ group_sid = self.get_objectSid(samdb, group_dn)
+
+ # Create a user account that belongs to the group.
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER,
+ opts={'member_of': (group_dn,),
+ 'allowed_replication_mock': True,
+ 'revealed_to_mock_rodc': True})
+ # Modify the TGT to be issued by an RODC.
+ tgt = self.issued_by_rodc(self.get_tgt(client_creds))
+
+ # Create an authentication policy that allows accounts belonging to the
+ # group.
+ allowed = f'O:SYD:(A;;CR;;;{group_sid})'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=allowed)
+
+ # Create a user account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy,
+ spn='host/{account}')
+
+ # Show that we can get a service ticket, since the user account belongs
+ # to the group.
+ self._tgs_req(tgt, 0, client_creds, target_creds,
+ armor_tgt=mach_tgt)
+
+ self.check_tgs_log(client_creds, target_creds, policy=policy)
+
+ def test_authn_policy_allowed_to_user_allow_domain_local_group_from_rodc(self):
+ samdb = self.get_samdb()
+
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a new domain-local group.
+ group_name = self.get_new_username()
+ group_dn = self.create_group(samdb, group_name,
+ gtype=GroupType.DOMAIN_LOCAL.value)
+ group_sid = self.get_objectSid(samdb, group_dn)
+
+ # Create a user account that belongs to the group.
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER,
+ opts={'member_of': (group_dn,),
+ 'allowed_replication_mock': True,
+ 'revealed_to_mock_rodc': True})
+ # Modify the TGT to be issued by an RODC.
+ tgt = self.issued_by_rodc(self.get_tgt(client_creds))
+
+ # Create an authentication policy that allows accounts belonging to the
+ # group.
+ allowed = f'O:SYD:(A;;CR;;;{group_sid})'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=allowed)
+
+ # Create a user account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy,
+ spn='host/{account}')
+
+ # Show that the groups in the TGT are expanded to include the
+ # domain-local group.
+ self._tgs_req(tgt, 0, client_creds, target_creds,
+ armor_tgt=mach_tgt)
+
+ self.check_tgs_log(client_creds, target_creds, policy=policy)
+
+ def test_authn_policy_allowed_to_computer_allow_to_self(self):
+ samdb = self.get_samdb()
+
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a computer account.
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ use_cache=False)
+ client_dn = client_creds.get_dn()
+ tgt = self.get_tgt(client_creds)
+
+ # Create an authentication policy that applies to a computer and
+ # explicitly allows the user account to obtain a service ticket.
+ allowed = f'O:SYD:(A;;CR;;;{client_creds.get_sid()})'
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=denied,
+ computer_allowed_to=allowed,
+ service_allowed_to=denied)
+
+ # Assign the policy to the account.
+ self.add_attribute(samdb, str(client_dn),
+ 'msDS-AssignedAuthNPolicy', str(policy.dn))
+
+ # Show that obtaining a service ticket to ourselves is allowed.
+ self._tgs_req(tgt, 0, client_creds, client_creds,
+ armor_tgt=mach_tgt)
+
+ self.check_tgs_log(client_creds, client_creds, policy=policy)
+
+ def test_authn_policy_allowed_to_computer_deny_to_self(self):
+ samdb = self.get_samdb()
+
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a computer account.
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ use_cache=False)
+ client_dn = client_creds.get_dn()
+ tgt = self.get_tgt(client_creds)
+
+ # Create an authentication policy that applies to a computer and
+ # explicitly denies the user account to obtain a service ticket.
+ denied = f'O:SYD:(D;;CR;;;{client_creds.get_sid()})'
+ allowed = 'O:SYD:(A;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=allowed,
+ computer_allowed_to=denied,
+ service_allowed_to=allowed)
+
+ # Assign the policy to the account.
+ self.add_attribute(samdb, str(client_dn),
+ 'msDS-AssignedAuthNPolicy', str(policy.dn))
+
+ # Show that obtaining a service ticket to ourselves is allowed, despite
+ # the policy disallowing it.
+ self._tgs_req(tgt, 0, client_creds, client_creds,
+ armor_tgt=mach_tgt)
+
+ self.check_tgs_log(client_creds, client_creds, policy=policy)
+
+ def test_authn_policy_allowed_to_computer_allow_to_self_with_self(self):
+ samdb = self.get_samdb()
+
+ # Create a computer account.
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ use_cache=False)
+ client_dn = client_creds.get_dn()
+ tgt = self.get_tgt(client_creds)
+
+ # Create an authentication policy that applies to a computer and
+ # explicitly allows the account to obtain a service ticket.
+ allowed = f'O:SYD:(A;;CR;;;{client_creds.get_sid()})'
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=denied,
+ computer_allowed_to=allowed,
+ service_allowed_to=denied)
+
+ # Assign the policy to the account.
+ self.add_attribute(samdb, str(client_dn),
+ 'msDS-AssignedAuthNPolicy', str(policy.dn))
+
+ # Show that obtaining a service ticket to ourselves armored with our
+ # own TGT is allowed.
+ self._tgs_req(tgt, 0, client_creds, client_creds,
+ armor_tgt=tgt)
+
+ self.check_tgs_log(client_creds, client_creds, policy=policy)
+
+ def test_authn_policy_allowed_to_computer_deny_to_self_with_self(self):
+ samdb = self.get_samdb()
+
+ # Create a computer account.
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ use_cache=False)
+ client_dn = client_creds.get_dn()
+ tgt = self.get_tgt(client_creds)
+
+ # Create an authentication policy that applies to a computer and
+ # explicitly denies the account to obtain a service ticket.
+ denied = f'O:SYD:(D;;CR;;;{client_creds.get_sid()})'
+ allowed = 'O:SYD:(A;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=allowed,
+ computer_allowed_to=denied,
+ service_allowed_to=allowed)
+
+ # Assign the policy to the account.
+ self.add_attribute(samdb, str(client_dn),
+ 'msDS-AssignedAuthNPolicy', str(policy.dn))
+
+ # Show that obtaining a service ticket to ourselves armored with our
+ # own TGT is allowed, despite the policy’s disallowing it.
+ self._tgs_req(tgt, 0, client_creds, client_creds,
+ armor_tgt=tgt)
+
+ self.check_tgs_log(client_creds, client_creds, policy=policy)
+
+ def test_authn_policy_allowed_to_user_allow_s4u2self(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a user account.
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER)
+ client_cname = self.PrincipalName_create(
+ name_type=NT_PRINCIPAL,
+ names=[client_creds.get_username()])
+ client_realm = client_creds.get_realm()
+
+ # Create an authentication policy that applies to a computer and
+ # explicitly allows the user account to obtain a service ticket.
+ allowed = f'O:SYD:(A;;CR;;;{client_creds.get_sid()})'
+ policy = self.create_authn_policy(enforced=True,
+ computer_allowed_to=allowed)
+
+ # Create a computer account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=policy)
+ target_tgt = self.get_tgt(target_creds)
+
+ def generate_s4u2self_padata(_kdc_exchange_dict,
+ _callback_dict,
+ req_body):
+ padata = self.PA_S4U2Self_create(
+ name=client_cname,
+ realm=client_realm,
+ tgt_session_key=target_tgt.session_key,
+ ctype=None)
+
+ return [padata], req_body
+
+ # Show that obtaining a service ticket with S4U2Self is allowed.
+ self._tgs_req(target_tgt, 0, target_creds, target_creds,
+ expected_cname=client_cname,
+ generate_fast_padata_fn=generate_s4u2self_padata,
+ armor_tgt=mach_tgt)
+
+ # The policy does not apply for S4U2Self, and thus does not appear in
+ # the logs.
+ self.check_tgs_log(client_creds, target_creds, policy=None)
+
+ def test_authn_policy_allowed_to_user_deny_s4u2self(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a user account.
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER)
+ client_cname = self.PrincipalName_create(
+ name_type=NT_PRINCIPAL,
+ names=[client_creds.get_username()])
+ client_realm = client_creds.get_realm()
+
+ # Create an authentication policy that applies to a computer and
+ # explicitly denies the user account to obtain a service ticket.
+ denied = f'O:SYD:(D;;CR;;;{client_creds.get_sid()})'
+ policy = self.create_authn_policy(enforced=True,
+ computer_allowed_to=denied)
+
+ # Create a computer account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=policy)
+ target_tgt = self.get_tgt(target_creds)
+
+ def generate_s4u2self_padata(_kdc_exchange_dict,
+ _callback_dict,
+ req_body):
+ padata = self.PA_S4U2Self_create(
+ name=client_cname,
+ realm=client_realm,
+ tgt_session_key=target_tgt.session_key,
+ ctype=None)
+
+ return [padata], req_body
+
+ # Show that obtaining a service ticket with S4U2Self is allowed,
+ # despite the policy.
+ self._tgs_req(target_tgt, 0, target_creds, target_creds,
+ expected_cname=client_cname,
+ generate_fast_padata_fn=generate_s4u2self_padata,
+ armor_tgt=mach_tgt)
+
+ # The policy does not apply for S4U2Self, and thus does not appear in
+ # the logs.
+ self.check_tgs_log(client_creds, target_creds, policy=None)
+
+ # Obtain a service ticket with S4U2Self and use it to perform constrained
+ # delegation while a policy is in place.
+ def test_authn_policy_allowed_to_user_deny_s4u2self_constrained_delegation(self):
+ samdb = self.get_samdb()
+
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a user account.
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER)
+ client_username = client_creds.get_username()
+ client_cname = self.PrincipalName_create(
+ name_type=NT_PRINCIPAL,
+ names=[client_username])
+ client_realm = client_creds.get_realm()
+ client_sid = client_creds.get_sid()
+
+ # Create a target account.
+ target_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ use_cache=False)
+ target_spn = target_creds.get_spn()
+
+ # Create an authentication policy that applies to a computer and
+ # explicitly denies the user account to obtain a service ticket.
+ denied = f'O:SYD:(D;;CR;;;{client_creds.get_sid()})'
+ service_policy = self.create_authn_policy(enforced=True,
+ computer_allowed_to=denied)
+
+ # Create a computer account with the assigned policy.
+ service_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={
+ 'assigned_policy': str(service_policy.dn),
+ # Allow delegation to the target service.
+ 'delegation_to_spn': target_spn,
+ 'trusted_to_auth_for_delegation': True,
+ })
+ service_sid = service_creds.get_sid()
+ service_tgt = self.get_tgt(service_creds)
+
+ # Create an authentication policy that applies to a computer and
+ # explicitly allows the service account to obtain a service ticket,
+ # while denying the user.
+ allowed = f'O:SYD:(A;;CR;;;{service_sid})(D;;CR;;;{client_sid})'
+ target_policy = self.create_authn_policy(enforced=True,
+ computer_allowed_to=allowed)
+
+ # Assign the policy to the target account.
+ self.add_attribute(samdb, str(target_creds.get_dn()),
+ 'msDS-AssignedAuthNPolicy', str(target_policy.dn))
+
+ def generate_s4u2self_padata(_kdc_exchange_dict,
+ _callback_dict,
+ req_body):
+ padata = self.PA_S4U2Self_create(
+ name=client_cname,
+ realm=client_realm,
+ tgt_session_key=service_tgt.session_key,
+ ctype=None)
+
+ return [padata], req_body
+
+ # Make sure the ticket is forwardable, so it can be used with
+ # constrained delegation.
+ forwardable_flag = 'forwardable'
+ client_tkt_options = str(krb5_asn1.KDCOptions(forwardable_flag))
+ expected_flags = krb5_asn1.TicketFlags(forwardable_flag)
+
+ # Show that obtaining a service ticket with S4U2Self is allowed,
+ # despite the policy.
+ client_service_tkt = self._tgs_req(
+ service_tgt, 0, service_creds, service_creds,
+ kdc_options=client_tkt_options,
+ expected_flags=expected_flags,
+ expected_cname=client_cname,
+ generate_fast_padata_fn=generate_s4u2self_padata,
+ armor_tgt=mach_tgt)
+
+ # The policy does not apply for S4U2Self, and thus does not appear in
+ # the logs.
+ self.check_tgs_log(client_creds, service_creds, policy=None)
+
+ # Now perform constrained delegation with this service ticket.
+
+ kdc_options = str(krb5_asn1.KDCOptions('cname-in-addl-tkt'))
+
+ target_decryption_key = self.TicketDecryptionKey_from_creds(
+ target_creds)
+ target_etypes = target_creds.tgs_supported_enctypes
+
+ service_name = service_creds.get_username()
+ if service_name[-1] == '$':
+ service_name = service_name[:-1]
+ expected_transited_services = [
+ f'host/{service_name}@{service_creds.get_realm()}'
+ ]
+
+ # Show that obtaining a service ticket with constrained delegation is
+ # allowed.
+ self._tgs_req(service_tgt, 0, service_creds, target_creds,
+ armor_tgt=mach_tgt,
+ kdc_options=kdc_options,
+ expected_cname=client_cname,
+ expected_account_name=client_username,
+ additional_ticket=client_service_tkt,
+ decryption_key=target_decryption_key,
+ expected_sid=client_sid,
+ expected_supported_etypes=target_etypes,
+ expected_proxy_target=target_spn,
+ expected_transited_services=expected_transited_services)
+
+ self.check_tgs_log(client_creds, target_creds,
+ policy=target_policy,
+ checked_creds=service_creds)
+
+ def test_authn_policy_s4u2self_not_allowed_from(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create an authentication policy that applies to a user and explicitly
+ # denies authentication with any device.
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_from=denied)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+ client_cname = self.PrincipalName_create(
+ name_type=NT_PRINCIPAL,
+ names=[client_creds.get_username()])
+ client_realm = client_creds.get_realm()
+
+ # Create a computer account.
+ target_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ target_tgt = self.get_tgt(target_creds)
+
+ def generate_s4u2self_padata(_kdc_exchange_dict,
+ _callback_dict,
+ req_body):
+ padata = self.PA_S4U2Self_create(
+ name=client_cname,
+ realm=client_realm,
+ tgt_session_key=target_tgt.session_key,
+ ctype=None)
+
+ return [padata], req_body
+
+ # Show that obtaining a service ticket with S4U2Self is allowed,
+ # despite the client’s policy.
+ self._tgs_req(target_tgt, 0, target_creds, target_creds,
+ expected_cname=client_cname,
+ generate_fast_padata_fn=generate_s4u2self_padata,
+ armor_tgt=mach_tgt)
+
+ # The client’s policy does not apply for S4U2Self, and thus does not
+ # appear in the logs.
+ self.check_tgs_log(client_creds, target_creds, policy=None)
+
+ def test_authn_policy_allowed_to_user_allow_s4u2self_inner_fast(self):
+ """Test that the correct Asserted Identity SID is placed into the PAC
+ when an S4U2Self requests contains inner FX‐FAST padata."""
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a user account.
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER)
+ client_cname = self.PrincipalName_create(
+ name_type=NT_PRINCIPAL,
+ names=[client_creds.get_username()])
+ client_realm = client_creds.get_realm()
+
+ # Create a target account.
+ target_creds = self.get_service_creds()
+ target_tgt = self.get_tgt(target_creds)
+
+ def generate_s4u2self_padata(_kdc_exchange_dict,
+ _callback_dict,
+ req_body):
+ s4u2self_padata = self.PA_S4U2Self_create(
+ name=client_cname,
+ realm=client_realm,
+ tgt_session_key=target_tgt.session_key,
+ ctype=None)
+
+ # Add empty FX‐FAST padata to the inner request.
+ fx_fast_padata = self.PA_DATA_create(PADATA_FX_FAST, b'')
+
+ padata = [s4u2self_padata, fx_fast_padata]
+
+ return padata, req_body
+
+ # Check that the PAC contains the correct groups.
+ self._tgs_req(
+ target_tgt, 0, target_creds, target_creds,
+ expected_cname=client_cname,
+ generate_fast_padata_fn=generate_s4u2self_padata,
+ armor_tgt=mach_tgt,
+ expected_groups={
+ (
+ # Expect to get the Service Asserted Identity SID.
+ security.SID_SERVICE_ASSERTED_IDENTITY,
+ SidType.EXTRA_SID,
+ security.SE_GROUP_DEFAULT_FLAGS,
+ ),
+ ...,
+ },
+ unexpected_groups={
+ # Expect not to get the Authentication Authority Asserted
+ # Identity SID.
+ security.SID_AUTHENTICATION_AUTHORITY_ASSERTED_IDENTITY,
+ })
+
+ def test_authn_policy_allowed_to_user_allow_constrained_delegation(self):
+ samdb = self.get_samdb()
+
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER)
+ client_sid = client_creds.get_sid()
+
+ client_username = client_creds.get_username()
+ client_cname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=[client_username])
+
+ client_tkt_options = 'forwardable'
+ expected_flags = krb5_asn1.TicketFlags(client_tkt_options)
+
+ client_tgt = self.get_tgt(client_creds,
+ kdc_options=client_tkt_options,
+ expected_flags=expected_flags)
+
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a target account.
+ target_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ use_cache=False)
+ target_spn = target_creds.get_spn()
+
+ service_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={
+ 'delegation_to_spn': target_spn,
+ })
+ service_sid = service_creds.get_sid()
+ service_tgt = self.get_tgt(service_creds)
+
+ # Create an authentication policy that applies to a computer and
+ # explicitly allows the service account to obtain a service ticket,
+ # while denying the user.
+ allowed = f'O:SYD:(A;;CR;;;{service_sid})(D;;CR;;;{client_sid})'
+ policy = self.create_authn_policy(enforced=True,
+ computer_allowed_to=allowed)
+
+ # Assign the policy to the target account.
+ self.add_attribute(samdb, str(target_creds.get_dn()),
+ 'msDS-AssignedAuthNPolicy', str(policy.dn))
+
+ client_service_tkt = self.get_service_ticket(
+ client_tgt,
+ service_creds,
+ kdc_options=client_tkt_options,
+ expected_flags=expected_flags)
+
+ kdc_options = str(krb5_asn1.KDCOptions('cname-in-addl-tkt'))
+
+ target_decryption_key = self.TicketDecryptionKey_from_creds(
+ target_creds)
+ target_etypes = target_creds.tgs_supported_enctypes
+
+ service_name = service_creds.get_username()
+ if service_name[-1] == '$':
+ service_name = service_name[:-1]
+ expected_transited_services = [
+ f'host/{service_name}@{service_creds.get_realm()}'
+ ]
+
+ # Don’t confuse the client’s TGS-REQ to the service, above, with the
+ # following constrained delegation request to the service.
+ self.discardMessages()
+
+ # Show that obtaining a service ticket with constrained delegation is
+ # allowed.
+ self._tgs_req(service_tgt, 0, service_creds, target_creds,
+ armor_tgt=mach_tgt,
+ kdc_options=kdc_options,
+ expected_cname=client_cname,
+ expected_account_name=client_username,
+ additional_ticket=client_service_tkt,
+ decryption_key=target_decryption_key,
+ expected_sid=client_sid,
+ expected_supported_etypes=target_etypes,
+ expected_proxy_target=target_spn,
+ expected_transited_services=expected_transited_services)
+
+ self.check_tgs_log(client_creds, target_creds,
+ policy=policy,
+ checked_creds=service_creds)
+
+ def test_authn_policy_allowed_to_user_deny_constrained_delegation(self):
+ samdb = self.get_samdb()
+
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER)
+ client_sid = client_creds.get_sid()
+
+ client_tkt_options = 'forwardable'
+ expected_flags = krb5_asn1.TicketFlags(client_tkt_options)
+
+ client_tgt = self.get_tgt(client_creds,
+ kdc_options=client_tkt_options,
+ expected_flags=expected_flags)
+
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a target account.
+ target_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ use_cache=False)
+ target_spn = target_creds.get_spn()
+
+ service_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={
+ 'delegation_to_spn': target_spn,
+ })
+ service_sid = service_creds.get_sid()
+ service_tgt = self.get_tgt(service_creds)
+
+ # Create an authentication policy that applies to a computer and
+ # explicitly denies the service account to obtain a service ticket,
+ # while allowing the user.
+ denied = f'O:SYD:(D;;CR;;;{service_sid})(A;;CR;;;{client_sid})'
+ policy = self.create_authn_policy(enforced=True,
+ computer_allowed_to=denied)
+
+ # Assign the policy to the target account.
+ self.add_attribute(samdb, str(target_creds.get_dn()),
+ 'msDS-AssignedAuthNPolicy', str(policy.dn))
+
+ client_service_tkt = self.get_service_ticket(
+ client_tgt,
+ service_creds,
+ kdc_options=client_tkt_options,
+ expected_flags=expected_flags)
+
+ kdc_options = str(krb5_asn1.KDCOptions('cname-in-addl-tkt'))
+
+ target_decryption_key = self.TicketDecryptionKey_from_creds(
+ target_creds)
+
+ # Don’t confuse the client’s TGS-REQ to the service, above, with the
+ # following constrained delegation request to the service.
+ self.discardMessages()
+
+ # Show that obtaining a service ticket with constrained delegation is
+ # not allowed.
+ self._tgs_req(
+ service_tgt, KDC_ERR_POLICY, service_creds, target_creds,
+ armor_tgt=mach_tgt,
+ kdc_options=kdc_options,
+ additional_ticket=client_service_tkt,
+ decryption_key=target_decryption_key,
+ expect_edata=self.expect_padata_outer,
+ # We aren’t particular about whether or not we get an NTSTATUS.
+ expect_status=None,
+ expected_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ check_patypes=False)
+
+ self.check_tgs_log(
+ service_creds, target_creds,
+ policy=policy,
+ checked_creds=service_creds,
+ status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED)
+
+ def test_authn_policy_constrained_delegation_not_allowed_from(self):
+ samdb = self.get_samdb()
+
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER,
+ use_cache=False)
+ client_sid = client_creds.get_sid()
+
+ client_username = client_creds.get_username()
+ client_cname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=[client_username])
+
+ client_tkt_options = 'forwardable'
+ expected_flags = krb5_asn1.TicketFlags(client_tkt_options)
+
+ client_tgt = self.get_tgt(client_creds,
+ kdc_options=client_tkt_options,
+ expected_flags=expected_flags)
+
+ # Create an authentication policy that applies to a user and explicitly
+ # denies authentication with any device.
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_from=denied)
+
+ # Assign the policy to the client account.
+ self.add_attribute(samdb, str(client_creds.get_dn()),
+ 'msDS-AssignedAuthNPolicy', str(policy.dn))
+
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a target account.
+ target_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ use_cache=False)
+ target_spn = target_creds.get_spn()
+
+ service_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={
+ 'delegation_to_spn': target_spn,
+ })
+ service_tgt = self.get_tgt(service_creds)
+
+ client_service_tkt = self.get_service_ticket(
+ client_tgt,
+ service_creds,
+ kdc_options=client_tkt_options,
+ expected_flags=expected_flags)
+
+ kdc_options = str(krb5_asn1.KDCOptions('cname-in-addl-tkt'))
+
+ target_decryption_key = self.TicketDecryptionKey_from_creds(
+ target_creds)
+ target_etypes = target_creds.tgs_supported_enctypes
+
+ service_name = service_creds.get_username()
+ if service_name[-1] == '$':
+ service_name = service_name[:-1]
+ expected_transited_services = [
+ f'host/{service_name}@{service_creds.get_realm()}'
+ ]
+
+ # Don’t confuse the client’s TGS-REQ to the service, above, with the
+ # following constrained delegation request to the service.
+ self.discardMessages()
+
+ # Show that obtaining a service ticket with constrained delegation is
+ # allowed, despite the client’s policy.
+ self._tgs_req(service_tgt, 0, service_creds, target_creds,
+ armor_tgt=mach_tgt,
+ kdc_options=kdc_options,
+ expected_cname=client_cname,
+ expected_account_name=client_username,
+ additional_ticket=client_service_tkt,
+ decryption_key=target_decryption_key,
+ expected_sid=client_sid,
+ expected_supported_etypes=target_etypes,
+ expected_proxy_target=target_spn,
+ expected_transited_services=expected_transited_services)
+
+ self.check_tgs_log(client_creds, target_creds,
+ policy=None,
+ checked_creds=service_creds)
+
+ def test_authn_policy_rbcd_not_allowed_from(self):
+ samdb = self.get_samdb()
+ functional_level = self.get_domain_functional_level(samdb)
+
+ if functional_level < dsdb.DS_DOMAIN_FUNCTION_2008:
+ self.skipTest('RBCD requires FL2008')
+
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER,
+ use_cache=False)
+ client_sid = client_creds.get_sid()
+
+ client_username = client_creds.get_username()
+ client_cname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=[client_username])
+
+ client_tkt_options = 'forwardable'
+ expected_flags = krb5_asn1.TicketFlags(client_tkt_options)
+
+ client_tgt = self.get_tgt(client_creds,
+ kdc_options=client_tkt_options,
+ expected_flags=expected_flags)
+
+ # Create an authentication policy that applies to a user and explicitly
+ # denies authentication with any device.
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_from=denied)
+
+ # Assign the policy to the client account.
+ self.add_attribute(samdb, str(client_creds.get_dn()),
+ 'msDS-AssignedAuthNPolicy', str(policy.dn))
+
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ service_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'id': 1})
+ service_tgt = self.get_tgt(service_creds)
+
+ # Create a target account.
+ target_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={
+ 'delegation_from_dn': str(service_creds.get_dn()),
+ })
+
+ client_service_tkt = self.get_service_ticket(
+ client_tgt,
+ service_creds,
+ kdc_options=client_tkt_options,
+ expected_flags=expected_flags)
+
+ kdc_options = str(krb5_asn1.KDCOptions('cname-in-addl-tkt'))
+
+ target_decryption_key = self.TicketDecryptionKey_from_creds(
+ target_creds)
+ target_etypes = target_creds.tgs_supported_enctypes
+
+ service_name = service_creds.get_username()
+ if service_name[-1] == '$':
+ service_name = service_name[:-1]
+ expected_transited_services = [
+ f'host/{service_name}@{service_creds.get_realm()}'
+ ]
+
+ # Don’t confuse the client’s TGS-REQ to the service, above, with the
+ # following RBCD request to the service.
+ self.discardMessages()
+
+ # Show that obtaining a service ticket with RBCD is allowed, despite
+ # the client’s policy.
+ self._tgs_req(service_tgt, 0, service_creds, target_creds,
+ armor_tgt=mach_tgt,
+ kdc_options=kdc_options,
+ pac_options='1001', # supports claims, RBCD
+ expected_cname=client_cname,
+ expected_account_name=client_username,
+ additional_ticket=client_service_tkt,
+ decryption_key=target_decryption_key,
+ expected_sid=client_sid,
+ expected_supported_etypes=target_etypes,
+ expected_proxy_target=target_creds.get_spn(),
+ expected_transited_services=expected_transited_services)
+
+ self.check_tgs_log(client_creds, target_creds,
+ policy=None,
+ checked_creds=service_creds)
+
+ def test_authn_policy_allowed_to_user_allow_constrained_delegation_wrong_sname(self):
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER,
+ use_cache=False)
+
+ client_tkt_options = 'forwardable'
+ expected_flags = krb5_asn1.TicketFlags(client_tkt_options)
+
+ client_tgt = self.get_tgt(client_creds,
+ kdc_options=client_tkt_options,
+ expected_flags=expected_flags)
+
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a target account.
+ target_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'id': 1})
+ target_spn = target_creds.get_spn()
+
+ service_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'delegation_to_spn': target_spn})
+ service_tgt = self.get_tgt(service_creds)
+
+ client_service_tkt = self.get_service_ticket(
+ client_tgt,
+ service_creds,
+ kdc_options=client_tkt_options,
+ expected_flags=expected_flags,
+ fresh=True)
+ # Change the ‘sname’ of the ticket to an incorrect value.
+ client_service_tkt.set_sname(self.get_krbtgt_sname())
+
+ kdc_options = str(krb5_asn1.KDCOptions('cname-in-addl-tkt'))
+
+ target_decryption_key = self.TicketDecryptionKey_from_creds(
+ target_creds)
+
+ # Don’t confuse the client’s TGS-REQ to the service, above, with the
+ # following constrained delegation request to the service.
+ self.discardMessages()
+
+ # Show that obtaining a service ticket with constrained delegation
+ # fails if the sname doesn’t match.
+ self._tgs_req(service_tgt, KDC_ERR_BADOPTION,
+ service_creds, target_creds,
+ armor_tgt=mach_tgt,
+ kdc_options=kdc_options,
+ additional_ticket=client_service_tkt,
+ decryption_key=target_decryption_key,
+ expect_edata=self.expect_padata_outer,
+ check_patypes=False)
+
+ self.check_tgs_log(
+ service_creds, target_creds,
+ checked_creds=service_creds,
+ status=ntstatus.NT_STATUS_UNSUCCESSFUL)
+
+ def test_authn_policy_allowed_to_user_allow_rbcd(self):
+ samdb = self.get_samdb()
+ functional_level = self.get_domain_functional_level(samdb)
+
+ if functional_level < dsdb.DS_DOMAIN_FUNCTION_2008:
+ self.skipTest('RBCD requires FL2008')
+
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER)
+ client_sid = client_creds.get_sid()
+
+ client_username = client_creds.get_username()
+ client_cname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=[client_username])
+
+ client_tkt_options = 'forwardable'
+ expected_flags = krb5_asn1.TicketFlags(client_tkt_options)
+
+ client_tgt = self.get_tgt(client_creds,
+ kdc_options=client_tkt_options,
+ expected_flags=expected_flags)
+
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ service_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'id': 1})
+ service_sid = service_creds.get_sid()
+ service_tgt = self.get_tgt(service_creds)
+
+ # Create an authentication policy that applies to a computer and
+ # explicitly allows the service account to obtain a service ticket,
+ # while denying the user.
+ allowed = f'O:SYD:(A;;CR;;;{service_sid})(D;;CR;;;{client_sid})'
+ policy = self.create_authn_policy(enforced=True,
+ computer_allowed_to=allowed)
+
+ # Create a target account with the assigned policy.
+ target_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={
+ 'assigned_policy': str(policy.dn),
+ 'delegation_from_dn': str(service_creds.get_dn()),
+ })
+
+ client_service_tkt = self.get_service_ticket(
+ client_tgt,
+ service_creds,
+ kdc_options=client_tkt_options,
+ expected_flags=expected_flags)
+
+ kdc_options = str(krb5_asn1.KDCOptions('cname-in-addl-tkt'))
+
+ target_decryption_key = self.TicketDecryptionKey_from_creds(
+ target_creds)
+ target_etypes = target_creds.tgs_supported_enctypes
+
+ service_name = service_creds.get_username()
+ if service_name[-1] == '$':
+ service_name = service_name[:-1]
+ expected_transited_services = [
+ f'host/{service_name}@{service_creds.get_realm()}'
+ ]
+
+ # Don’t confuse the client’s TGS-REQ to the service, above, with the
+ # following RBCD request to the service.
+ self.discardMessages()
+
+ # Show that obtaining a service ticket with RBCD is allowed.
+ self._tgs_req(service_tgt, 0, service_creds, target_creds,
+ armor_tgt=mach_tgt,
+ kdc_options=kdc_options,
+ pac_options='1001', # supports claims, RBCD
+ expected_cname=client_cname,
+ expected_account_name=client_username,
+ additional_ticket=client_service_tkt,
+ decryption_key=target_decryption_key,
+ expected_sid=client_sid,
+ expected_supported_etypes=target_etypes,
+ expected_proxy_target=target_creds.get_spn(),
+ expected_transited_services=expected_transited_services)
+
+ self.check_tgs_log(client_creds, target_creds,
+ policy=policy,
+ checked_creds=service_creds)
+
+ def test_authn_policy_allowed_to_user_deny_rbcd(self):
+ samdb = self.get_samdb()
+ functional_level = self.get_domain_functional_level(samdb)
+
+ if functional_level < dsdb.DS_DOMAIN_FUNCTION_2008:
+ self.skipTest('RBCD requires FL2008')
+
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER)
+ client_sid = client_creds.get_sid()
+
+ client_tkt_options = 'forwardable'
+ expected_flags = krb5_asn1.TicketFlags(client_tkt_options)
+
+ client_tgt = self.get_tgt(client_creds,
+ kdc_options=client_tkt_options,
+ expected_flags=expected_flags)
+
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ service_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'id': 1})
+ service_sid = service_creds.get_sid()
+ service_tgt = self.get_tgt(service_creds)
+
+ # Create an authentication policy that applies to a computer and
+ # explicitly denies the service account to obtain a service ticket,
+ # while allowing the user.
+ denied = f'O:SYD:(D;;CR;;;{service_sid})(A;;CR;;;{client_sid})'
+ policy = self.create_authn_policy(enforced=True,
+ computer_allowed_to=denied)
+
+ # Create a target account with the assigned policy.
+ target_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={
+ 'assigned_policy': str(policy.dn),
+ 'delegation_from_dn': str(service_creds.get_dn()),
+ })
+
+ client_service_tkt = self.get_service_ticket(
+ client_tgt,
+ service_creds,
+ kdc_options=client_tkt_options,
+ expected_flags=expected_flags)
+
+ kdc_options = str(krb5_asn1.KDCOptions('cname-in-addl-tkt'))
+
+ target_decryption_key = self.TicketDecryptionKey_from_creds(
+ target_creds)
+
+ # Don’t confuse the client’s TGS-REQ to the service, above, with the
+ # following RBCD request to the service.
+ self.discardMessages()
+
+ # Show that obtaining a service ticket with RBCD is not allowed.
+ self._tgs_req(
+ service_tgt, KDC_ERR_POLICY, service_creds, target_creds,
+ armor_tgt=mach_tgt,
+ kdc_options=kdc_options,
+ pac_options='1001', # supports claims, RBCD
+ additional_ticket=client_service_tkt,
+ decryption_key=target_decryption_key,
+ expect_edata=self.expect_padata_outer,
+ expected_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ check_patypes=False)
+
+ self.check_tgs_log(
+ service_creds, target_creds,
+ policy=policy,
+ checked_creds=service_creds,
+ status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED)
+
+ def test_authn_policy_allowed_to_user_allow_rbcd_wrong_sname(self):
+ samdb = self.get_samdb()
+ functional_level = self.get_domain_functional_level(samdb)
+
+ if functional_level < dsdb.DS_DOMAIN_FUNCTION_2008:
+ self.skipTest('RBCD requires FL2008')
+
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER,
+ use_cache=False)
+
+ client_tkt_options = 'forwardable'
+ expected_flags = krb5_asn1.TicketFlags(client_tkt_options)
+
+ client_tgt = self.get_tgt(client_creds,
+ kdc_options=client_tkt_options,
+ expected_flags=expected_flags)
+
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ service_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'id': 1})
+ service_tgt = self.get_tgt(service_creds)
+
+ # Create a target account with the assigned policy.
+ target_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={
+ 'delegation_from_dn': str(service_creds.get_dn()),
+ })
+
+ client_service_tkt = self.get_service_ticket(
+ client_tgt,
+ service_creds,
+ kdc_options=client_tkt_options,
+ expected_flags=expected_flags,
+ fresh=True)
+ # Change the ‘sname’ of the ticket to an incorrect value.
+ client_service_tkt.set_sname(self.get_krbtgt_sname())
+
+ kdc_options = str(krb5_asn1.KDCOptions('cname-in-addl-tkt'))
+
+ target_decryption_key = self.TicketDecryptionKey_from_creds(
+ target_creds)
+
+ # Don’t confuse the client’s TGS-REQ to the service, above, with the
+ # following RBCD request to the service.
+ self.discardMessages()
+
+ # Show that obtaining a service ticket with RBCD fails if the sname
+ # doesn’t match.
+ self._tgs_req(service_tgt, KDC_ERR_BADOPTION,
+ service_creds, target_creds,
+ armor_tgt=mach_tgt,
+ kdc_options=kdc_options,
+ pac_options='1001', # supports claims, RBCD
+ additional_ticket=client_service_tkt,
+ decryption_key=target_decryption_key,
+ expect_edata=self.expect_padata_outer,
+ check_patypes=False)
+
+ self.check_tgs_log(service_creds, target_creds,
+ checked_creds=service_creds,
+ status=ntstatus.NT_STATUS_UNSUCCESSFUL)
+
+ def test_authn_policy_allowed_to_user_allow_constrained_delegation_to_self(self):
+ samdb = self.get_samdb()
+
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER)
+ client_sid = client_creds.get_sid()
+
+ client_username = client_creds.get_username()
+ client_cname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=[client_username])
+
+ client_tkt_options = 'forwardable'
+ expected_flags = krb5_asn1.TicketFlags(client_tkt_options)
+
+ client_tgt = self.get_tgt(client_creds,
+ kdc_options=client_tkt_options,
+ expected_flags=expected_flags)
+
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a service account.
+ service_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ use_cache=False)
+ service_dn_str = str(service_creds.get_dn())
+ service_spn = service_creds.get_spn()
+ service_sid = service_creds.get_sid()
+ service_tgt = self.get_tgt(service_creds)
+
+ # Allow delegation to ourselves.
+ self.add_attribute(samdb, service_dn_str,
+ 'msDS-AllowedToDelegateTo', service_spn)
+
+ # Create an authentication policy that applies to a computer and
+ # explicitly allows the client account to obtain a service ticket,
+ # while denying the service.
+ allowed = f'O:SYD:(A;;CR;;;{client_sid})(D;;CR;;;{service_sid})'
+ policy = self.create_authn_policy(enforced=True,
+ computer_allowed_to=allowed)
+
+ # Assign the policy to the service account.
+ self.add_attribute(samdb, service_dn_str,
+ 'msDS-AssignedAuthNPolicy', str(policy.dn))
+
+ client_service_tkt = self.get_service_ticket(
+ client_tgt,
+ service_creds,
+ kdc_options=client_tkt_options,
+ expected_flags=expected_flags)
+
+ kdc_options = str(krb5_asn1.KDCOptions('cname-in-addl-tkt'))
+
+ target_decryption_key = self.TicketDecryptionKey_from_creds(
+ service_creds)
+ target_etypes = service_creds.tgs_supported_enctypes
+
+ service_name = service_creds.get_username()
+ if service_name[-1] == '$':
+ service_name = service_name[:-1]
+ expected_transited_services = [
+ f'host/{service_name}@{service_creds.get_realm()}'
+ ]
+
+ # Don’t confuse the client’s TGS-REQ to the service, above, with the
+ # following constrained delegation request to the service.
+ self.discardMessages()
+
+ # Show that obtaining a service ticket to ourselves with constrained
+ # delegation is allowed.
+ self._tgs_req(service_tgt, 0, service_creds, service_creds,
+ armor_tgt=mach_tgt,
+ kdc_options=kdc_options,
+ expected_cname=client_cname,
+ expected_account_name=client_username,
+ additional_ticket=client_service_tkt,
+ decryption_key=target_decryption_key,
+ expected_sid=client_sid,
+ expected_supported_etypes=target_etypes,
+ expected_proxy_target=service_spn,
+ expected_transited_services=expected_transited_services)
+
+ self.check_tgs_log(client_creds, service_creds,
+ policy=policy,
+ checked_creds=service_creds)
+
+ def test_authn_policy_allowed_to_user_deny_constrained_delegation_to_self(self):
+ samdb = self.get_samdb()
+
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER)
+ client_sid = client_creds.get_sid()
+
+ client_username = client_creds.get_username()
+ client_cname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=[client_username])
+
+ client_tkt_options = 'forwardable'
+ expected_flags = krb5_asn1.TicketFlags(client_tkt_options)
+
+ client_tgt = self.get_tgt(client_creds,
+ kdc_options=client_tkt_options,
+ expected_flags=expected_flags)
+
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a service account.
+ service_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ use_cache=False)
+ service_dn_str = str(service_creds.get_dn())
+ service_spn = service_creds.get_spn()
+ service_sid = service_creds.get_sid()
+ service_tgt = self.get_tgt(service_creds)
+
+ # Allow delegation to ourselves.
+ self.add_attribute(samdb, service_dn_str,
+ 'msDS-AllowedToDelegateTo', service_spn)
+
+ client_service_tkt = self.get_service_ticket(
+ client_tgt,
+ service_creds,
+ kdc_options=client_tkt_options,
+ expected_flags=expected_flags)
+
+ # Create an authentication policy that applies to a computer and
+ # explicitly denies the client account to obtain a service ticket,
+ # while allowing the service.
+ allowed = f'O:SYD:(D;;CR;;;{client_sid})(A;;CR;;;{service_sid})'
+ policy = self.create_authn_policy(enforced=True,
+ computer_allowed_to=allowed)
+
+ # Assign the policy to the service account.
+ self.add_attribute(samdb, service_dn_str,
+ 'msDS-AssignedAuthNPolicy', str(policy.dn))
+
+ kdc_options = str(krb5_asn1.KDCOptions('cname-in-addl-tkt'))
+
+ target_decryption_key = self.TicketDecryptionKey_from_creds(
+ service_creds)
+ target_etypes = service_creds.tgs_supported_enctypes
+
+ service_name = service_creds.get_username()
+ if service_name[-1] == '$':
+ service_name = service_name[:-1]
+ expected_transited_services = [
+ f'host/{service_name}@{service_creds.get_realm()}'
+ ]
+
+ # Don’t confuse the client’s TGS-REQ to the service, above, with the
+ # following constrained delegation request to the service.
+ self.discardMessages()
+
+ # Show that obtaining a service ticket to ourselves with constrained
+ # delegation is allowed, despite the policy’s disallowing it.
+ self._tgs_req(service_tgt, 0, service_creds, service_creds,
+ armor_tgt=mach_tgt,
+ kdc_options=kdc_options,
+ expected_cname=client_cname,
+ expected_account_name=client_username,
+ additional_ticket=client_service_tkt,
+ decryption_key=target_decryption_key,
+ expected_sid=client_sid,
+ expected_supported_etypes=target_etypes,
+ expected_proxy_target=service_spn,
+ expected_transited_services=expected_transited_services)
+
+ self.check_tgs_log(client_creds, service_creds,
+ policy=policy,
+ checked_creds=service_creds)
+
+ def test_authn_policy_allowed_to_user_not_allowed_constrained_delegation_to_self(self):
+ samdb = self.get_samdb()
+
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER)
+ client_sid = client_creds.get_sid()
+
+ client_tkt_options = 'forwardable'
+ expected_flags = krb5_asn1.TicketFlags(client_tkt_options)
+
+ client_tgt = self.get_tgt(client_creds,
+ kdc_options=client_tkt_options,
+ expected_flags=expected_flags)
+
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a service account.
+ service_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ use_cache=False)
+ service_dn_str = str(service_creds.get_dn())
+ service_sid = service_creds.get_sid()
+ service_tgt = self.get_tgt(service_creds)
+
+ # Don’t set msDS-AllowedToDelegateTo.
+
+ # Create an authentication policy that applies to a computer and
+ # explicitly allows the client account to obtain a service ticket,
+ # while denying the service.
+ allowed = f'O:SYD:(A;;CR;;;{client_sid})(D;;CR;;;{service_sid})'
+ policy = self.create_authn_policy(enforced=True,
+ computer_allowed_to=allowed)
+
+ # Assign the policy to the service account.
+ self.add_attribute(samdb, service_dn_str,
+ 'msDS-AssignedAuthNPolicy', str(policy.dn))
+
+ client_service_tkt = self.get_service_ticket(
+ client_tgt,
+ service_creds,
+ kdc_options=client_tkt_options,
+ expected_flags=expected_flags)
+
+ kdc_options = str(krb5_asn1.KDCOptions('cname-in-addl-tkt'))
+
+ target_decryption_key = self.TicketDecryptionKey_from_creds(
+ service_creds)
+
+ # Don’t confuse the client’s TGS-REQ to the service, above, with the
+ # following constrained delegation request to the service.
+ self.discardMessages()
+
+ # Show that obtaining a service ticket to ourselves with constrained
+ # delegation is not allowed without msDS-AllowedToDelegateTo.
+ self._tgs_req(service_tgt, KDC_ERR_BADOPTION,
+ service_creds, service_creds,
+ armor_tgt=mach_tgt,
+ kdc_options=kdc_options,
+ additional_ticket=client_service_tkt,
+ decryption_key=target_decryption_key,
+ expect_edata=self.expect_padata_outer,
+ check_patypes=False)
+
+ self.check_tgs_log(
+ service_creds, service_creds,
+ # The failure is not due to a policy error, so no policy appears in
+ # the logs.
+ policy=None,
+ checked_creds=service_creds,
+ status=ntstatus.NT_STATUS_UNSUCCESSFUL)
+
+ def test_authn_policy_allowed_to_user_allow_rbcd_to_self(self):
+ samdb = self.get_samdb()
+ functional_level = self.get_domain_functional_level(samdb)
+
+ if functional_level < dsdb.DS_DOMAIN_FUNCTION_2008:
+ self.skipTest('RBCD requires FL2008')
+
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER)
+ client_sid = client_creds.get_sid()
+
+ client_username = client_creds.get_username()
+ client_cname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=[client_username])
+
+ client_tkt_options = 'forwardable'
+ expected_flags = krb5_asn1.TicketFlags(client_tkt_options)
+
+ client_tgt = self.get_tgt(client_creds,
+ kdc_options=client_tkt_options,
+ expected_flags=expected_flags)
+
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a service account allowed to delegate to itself. We can’t use
+ # a more specific ACE containing the account’s SID (obtained
+ # post-creation) as Samba (unlike Windows) won’t let us modify
+ # msDS-AllowedToActOnBehalfOfOtherIdentity without being System.
+ domain_sid = security.dom_sid(samdb.get_domain_sid())
+ security_descriptor = security.descriptor.from_sddl(
+ 'O:BAD:(A;;CR;;;WD)', domain_sid)
+ service_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'delegation_from_dn': ndr_pack(security_descriptor)},
+ use_cache=False)
+ service_dn_str = str(service_creds.get_dn())
+ service_sid = service_creds.get_sid()
+ service_tgt = self.get_tgt(service_creds)
+
+ # Create an authentication policy that applies to a computer and
+ # explicitly allows the client account to obtain a service ticket,
+ # while denying the service.
+ allowed = f'O:SYD:(A;;CR;;;{client_sid})(D;;CR;;;{service_sid})'
+ policy = self.create_authn_policy(enforced=True,
+ computer_allowed_to=allowed)
+
+ # Assign the policy to the service account.
+ self.add_attribute(samdb, service_dn_str,
+ 'msDS-AssignedAuthNPolicy', str(policy.dn))
+
+ client_service_tkt = self.get_service_ticket(
+ client_tgt,
+ service_creds,
+ kdc_options=client_tkt_options,
+ expected_flags=expected_flags)
+
+ kdc_options = str(krb5_asn1.KDCOptions('cname-in-addl-tkt'))
+
+ service_decryption_key = self.TicketDecryptionKey_from_creds(
+ service_creds)
+ service_etypes = service_creds.tgs_supported_enctypes
+
+ service_name = service_creds.get_username()
+ if service_name[-1] == '$':
+ service_name = service_name[:-1]
+ expected_transited_services = [
+ f'host/{service_name}@{service_creds.get_realm()}'
+ ]
+
+ # Don’t confuse the client’s TGS-REQ to the service, above, with the
+ # following RBCD request to the service.
+ self.discardMessages()
+
+ # Show that obtaining a service ticket to ourselves with RBCD is
+ # allowed.
+ self._tgs_req(service_tgt, 0, service_creds, service_creds,
+ armor_tgt=mach_tgt,
+ kdc_options=kdc_options,
+ pac_options='1001', # supports claims, RBCD
+ expected_cname=client_cname,
+ expected_account_name=client_username,
+ additional_ticket=client_service_tkt,
+ decryption_key=service_decryption_key,
+ expected_sid=client_sid,
+ expected_supported_etypes=service_etypes,
+ expected_proxy_target=service_creds.get_spn(),
+ expected_transited_services=expected_transited_services)
+
+ self.check_tgs_log(client_creds, service_creds,
+ policy=policy,
+ checked_creds=service_creds)
+
+ def test_authn_policy_allowed_to_user_deny_rbcd_to_self(self):
+ samdb = self.get_samdb()
+ functional_level = self.get_domain_functional_level(samdb)
+
+ if functional_level < dsdb.DS_DOMAIN_FUNCTION_2008:
+ self.skipTest('RBCD requires FL2008')
+
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER)
+ client_sid = client_creds.get_sid()
+
+ client_username = client_creds.get_username()
+ client_cname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=[client_username])
+
+ client_tkt_options = 'forwardable'
+ expected_flags = krb5_asn1.TicketFlags(client_tkt_options)
+
+ client_tgt = self.get_tgt(client_creds,
+ kdc_options=client_tkt_options,
+ expected_flags=expected_flags)
+
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a service account allowed to delegate to itself. We can’t use
+ # a more specific ACE containing the account’s SID (obtained
+ # post-creation) as Samba (unlike Windows) won’t let us modify
+ # msDS-AllowedToActOnBehalfOfOtherIdentity without being System.
+ domain_sid = security.dom_sid(samdb.get_domain_sid())
+ security_descriptor = security.descriptor.from_sddl(
+ 'O:BAD:(A;;CR;;;WD)', domain_sid)
+ service_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'delegation_from_dn': ndr_pack(security_descriptor)},
+ use_cache=False)
+ service_dn_str = str(service_creds.get_dn())
+ service_sid = service_creds.get_sid()
+ service_tgt = self.get_tgt(service_creds)
+
+ client_service_tkt = self.get_service_ticket(
+ client_tgt,
+ service_creds,
+ kdc_options=client_tkt_options,
+ expected_flags=expected_flags)
+
+ # Create an authentication policy that applies to a computer and
+ # explicitly denies the client account to obtain a service ticket,
+ # while allowing the service.
+ allowed = f'O:SYD:(D;;CR;;;{client_sid})(A;;CR;;;{service_sid})'
+ policy = self.create_authn_policy(enforced=True,
+ computer_allowed_to=allowed)
+
+ # Assign the policy to the service account.
+ self.add_attribute(samdb, service_dn_str,
+ 'msDS-AssignedAuthNPolicy', str(policy.dn))
+
+ kdc_options = str(krb5_asn1.KDCOptions('cname-in-addl-tkt'))
+
+ service_decryption_key = self.TicketDecryptionKey_from_creds(
+ service_creds)
+ service_etypes = service_creds.tgs_supported_enctypes
+
+ service_name = service_creds.get_username()
+ if service_name[-1] == '$':
+ service_name = service_name[:-1]
+ expected_transited_services = [
+ f'host/{service_name}@{service_creds.get_realm()}'
+ ]
+
+ # Don’t confuse the client’s TGS-REQ to the service, above, with the
+ # following RBCD request to the service.
+ self.discardMessages()
+
+ # Show that obtaining a service ticket to ourselves with RBCD is
+ # allowed, despite the policy’s disallowing it.
+ self._tgs_req(service_tgt, 0, service_creds, service_creds,
+ armor_tgt=mach_tgt,
+ kdc_options=kdc_options,
+ pac_options='1001', # supports claims, RBCD
+ expected_cname=client_cname,
+ expected_account_name=client_username,
+ additional_ticket=client_service_tkt,
+ decryption_key=service_decryption_key,
+ expected_sid=client_sid,
+ expected_supported_etypes=service_etypes,
+ expected_proxy_target=service_creds.get_spn(),
+ expected_transited_services=expected_transited_services)
+
+ self.check_tgs_log(client_creds, service_creds,
+ policy=policy,
+ checked_creds=service_creds)
+
+ def test_authn_policy_allowed_to_user_not_allowed_rbcd_to_self(self):
+ samdb = self.get_samdb()
+ functional_level = self.get_domain_functional_level(samdb)
+
+ if functional_level < dsdb.DS_DOMAIN_FUNCTION_2008:
+ self.skipTest('RBCD requires FL2008')
+
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER)
+ client_sid = client_creds.get_sid()
+
+ client_tkt_options = 'forwardable'
+ expected_flags = krb5_asn1.TicketFlags(client_tkt_options)
+
+ client_tgt = self.get_tgt(client_creds,
+ kdc_options=client_tkt_options,
+ expected_flags=expected_flags)
+
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a service account.
+ service_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ use_cache=False)
+ service_dn_str = str(service_creds.get_dn())
+ service_sid = service_creds.get_sid()
+ service_tgt = self.get_tgt(service_creds)
+
+ # Don’t set msDS-AllowedToActOnBehalfOfOtherIdentity.
+
+ # Create an authentication policy that applies to a computer and
+ # explicitly allows the client account to obtain a service ticket,
+ # while denying the service.
+ allowed = f'O:SYD:(A;;CR;;;{client_sid})(D;;CR;;;{service_sid})'
+ policy = self.create_authn_policy(enforced=True,
+ computer_allowed_to=allowed)
+
+ # Assign the policy to the service account.
+ self.add_attribute(samdb, service_dn_str,
+ 'msDS-AssignedAuthNPolicy', str(policy.dn))
+
+ client_service_tkt = self.get_service_ticket(
+ client_tgt,
+ service_creds,
+ kdc_options=client_tkt_options,
+ expected_flags=expected_flags)
+
+ kdc_options = str(krb5_asn1.KDCOptions('cname-in-addl-tkt'))
+
+ service_decryption_key = self.TicketDecryptionKey_from_creds(
+ service_creds)
+
+ # Don’t confuse the client’s TGS-REQ to the service, above, with the
+ # following RBCD request to the service.
+ self.discardMessages()
+
+ # Show that obtaining a service ticket to ourselves with RBCD
+ # is not allowed without msDS-AllowedToActOnBehalfOfOtherIdentity.
+ self._tgs_req(service_tgt, KDC_ERR_BADOPTION,
+ service_creds, service_creds,
+ armor_tgt=mach_tgt,
+ kdc_options=kdc_options,
+ pac_options='1001', # supports claims, RBCD
+ additional_ticket=client_service_tkt,
+ decryption_key=service_decryption_key,
+ expect_edata=self.expect_padata_outer,
+ check_patypes=False)
+
+ self.check_tgs_log(service_creds, service_creds,
+ # The failure is not due to a policy error, so no
+ # policy appears in the logs.
+ policy=None,
+ status=ntstatus.NT_STATUS_UNSUCCESSFUL,
+ checked_creds=service_creds)
+
+ def test_authn_policy_allowed_to_computer_allow_user2user(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ client_creds = self.get_mach_creds()
+ client_tgt = self.get_tgt(client_creds)
+
+ # Create an authentication policy that applies to a computer and
+ # explicitly allows the user account to obtain a service ticket.
+ allowed = f'O:SYD:(A;;CR;;;{client_creds.get_sid()})'
+ policy = self.create_authn_policy(enforced=True,
+ computer_allowed_to=allowed)
+
+ # Create a computer account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=policy)
+ target_tgt = self._get_tgt(target_creds)
+
+ kdc_options = str(krb5_asn1.KDCOptions('enc-tkt-in-skey'))
+
+ # Show that obtaining a service ticket with user-to-user is allowed.
+ self._tgs_req(client_tgt, 0, client_creds, target_creds,
+ armor_tgt=mach_tgt,
+ kdc_options=kdc_options,
+ additional_ticket=target_tgt)
+
+ self.check_tgs_log(client_creds, target_creds, policy=policy)
+
+ def test_authn_policy_allowed_to_computer_deny_user2user(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ client_creds = self.get_mach_creds()
+ client_tgt = self.get_tgt(client_creds)
+
+ # Create an authentication policy that applies to a computer and
+ # explicitly denies the user account to obtain a service ticket.
+ denied = f'O:SYD:(D;;CR;;;{client_creds.get_sid()})'
+ policy = self.create_authn_policy(enforced=True,
+ computer_allowed_to=denied)
+
+ # Create a computer account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=policy)
+ target_tgt = self._get_tgt(target_creds)
+
+ kdc_options = str(krb5_asn1.KDCOptions('enc-tkt-in-skey'))
+
+ # Show that obtaining a service ticket with user-to-user is not
+ # allowed.
+ self._tgs_req(
+ client_tgt, KDC_ERR_POLICY, client_creds, target_creds,
+ armor_tgt=mach_tgt,
+ kdc_options=kdc_options,
+ additional_ticket=target_tgt,
+ expect_edata=self.expect_padata_outer,
+ # We aren’t particular about whether or not we get an NTSTATUS.
+ expect_status=None,
+ expected_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ check_patypes=False)
+
+ self.check_tgs_log(
+ client_creds, target_creds,
+ policy=policy,
+ status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED)
+
+ def test_authn_policy_allowed_to_user_derived_class_allow(self):
+ samdb = self.get_samdb()
+
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a user account.
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER)
+ tgt = self.get_tgt(client_creds)
+
+ # Create an authentication policy that applies to a user and explicitly
+ # allows the user account to obtain a service ticket.
+ allowed = f'O:SYD:(A;;CR;;;{client_creds.get_sid()})'
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=allowed,
+ computer_allowed_to=denied,
+ service_allowed_to=denied)
+
+ # Create a schema class derived from ‘user’.
+ class_id = random.randint(0, 100000000)
+ user_class_cn = f'my-User-Class-{class_id}'
+ user_class = user_class_cn.replace('-', '')
+ class_dn = samdb.get_schema_basedn()
+ class_dn.add_child(f'CN={user_class_cn}')
+ governs_id = f'1.3.6.1.4.1.7165.4.6.2.9.{class_id}'
+
+ samdb.add({
+ 'dn': class_dn,
+ 'objectClass': 'classSchema',
+ 'subClassOf': 'user',
+ 'governsId': governs_id,
+ 'lDAPDisplayName': user_class,
+ })
+
+ # Create an account derived from ‘user’ with the assigned policy.
+ target_name = self.get_new_username()
+ target_creds, target_dn = self.create_account(
+ samdb, target_name,
+ account_type=self.AccountType.USER,
+ spn='host/{account}',
+ additional_details={
+ 'msDS-AssignedAuthNPolicy': str(policy.dn),
+ 'objectClass': user_class,
+ })
+
+ keys = self.get_keys(target_creds)
+ self.creds_set_keys(target_creds, keys)
+
+ # Show that obtaining a service ticket is allowed.
+ self._tgs_req(tgt, 0, client_creds, target_creds,
+ armor_tgt=mach_tgt)
+
+ self.check_tgs_log(client_creds, target_creds, policy=policy)
+
+ def test_authn_policy_allowed_to_computer_derived_class_allow(self):
+ samdb = self.get_samdb()
+
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a user account.
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER)
+ tgt = self.get_tgt(client_creds)
+
+ # Create an authentication policy that applies to a computer and
+ # explicitly allows the user account to obtain a service ticket.
+ allowed = f'O:SYD:(A;;CR;;;{client_creds.get_sid()})'
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=denied,
+ computer_allowed_to=allowed,
+ service_allowed_to=denied)
+
+ # Create a schema class derived from ‘computer’.
+ class_id = random.randint(0, 100000000)
+ computer_class_cn = f'my-Computer-Class-{class_id}'
+ computer_class = computer_class_cn.replace('-', '')
+ class_dn = samdb.get_schema_basedn()
+ class_dn.add_child(f'CN={computer_class_cn}')
+ governs_id = f'1.3.6.1.4.1.7165.4.6.2.9.{class_id}'
+
+ samdb.add({
+ 'dn': class_dn,
+ 'objectClass': 'classSchema',
+ 'subClassOf': 'computer',
+ 'governsId': governs_id,
+ 'lDAPDisplayName': computer_class,
+ })
+
+ # Create an account derived from ‘computer’ with the assigned policy.
+ target_name = self.get_new_username()
+ target_creds, target_dn = self.create_account(
+ samdb, target_name,
+ account_type=self.AccountType.COMPUTER,
+ spn=f'host/{target_name}',
+ additional_details={
+ 'msDS-AssignedAuthNPolicy': str(policy.dn),
+ 'objectClass': computer_class,
+ })
+
+ keys = self.get_keys(target_creds)
+ self.creds_set_keys(target_creds, keys)
+
+ # Show that obtaining a service ticket is allowed.
+ self._tgs_req(tgt, 0, client_creds, target_creds,
+ armor_tgt=mach_tgt)
+
+ self.check_tgs_log(client_creds, target_creds, policy=policy)
+
+ def test_authn_policy_allowed_to_service_derived_class_allow(self):
+ samdb = self.get_samdb()
+
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a user account.
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER)
+ tgt = self.get_tgt(client_creds)
+
+ # Create an authentication policy that applies to a managed service and
+ # explicitly allows the user account to obtain a service ticket.
+ allowed = f'O:SYD:(A;;CR;;;{client_creds.get_sid()})'
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=denied,
+ computer_allowed_to=denied,
+ service_allowed_to=allowed)
+
+ # Create a schema class derived from ‘msDS-ManagedServiceAccount’.
+ class_id = random.randint(0, 100000000)
+ service_class_cn = f'my-Managed-Service-Class-{class_id}'
+ service_class = service_class_cn.replace('-', '')
+ class_dn = samdb.get_schema_basedn()
+ class_dn.add_child(f'CN={service_class_cn}')
+ governs_id = f'1.3.6.1.4.1.7165.4.6.2.9.{class_id}'
+
+ samdb.add({
+ 'dn': class_dn,
+ 'objectClass': 'classSchema',
+ 'subClassOf': 'msDS-ManagedServiceAccount',
+ 'governsId': governs_id,
+ 'lDAPDisplayName': service_class,
+ })
+
+ # Create an account derived from ‘msDS-ManagedServiceAccount’ with the
+ # assigned policy.
+ target_name = self.get_new_username()
+ target_creds, target_dn = self.create_account(
+ samdb, target_name,
+ account_type=self.AccountType.MANAGED_SERVICE,
+ spn=f'host/{target_name}',
+ additional_details={
+ 'msDS-AssignedAuthNPolicy': str(policy.dn),
+ 'objectClass': service_class,
+ })
+
+ # Show that obtaining a service ticket is allowed.
+ self._tgs_req(tgt, 0, client_creds, target_creds,
+ armor_tgt=mach_tgt)
+
+ self.check_tgs_log(client_creds, target_creds, policy=policy)
+
+ def test_authn_policy_allowed_to_computer_allow_as_req(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a user account.
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER)
+
+ # Create an authentication policy that applies to a computer and
+ # explicitly allows the user account to obtain a service ticket.
+ allowed = f'O:SYD:(A;;CR;;;{client_creds.get_sid()})'
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=denied,
+ computer_allowed_to=allowed,
+ service_allowed_to=denied)
+
+ # Create a computer account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=policy)
+
+ # Show that obtaining a service ticket with an AS-REQ is allowed.
+ self._fast_as_req(client_creds, target_creds, mach_tgt)
+
+ self.check_as_log(client_creds,
+ server_policy=policy)
+
+ def test_authn_policy_allowed_to_computer_deny_as_req(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a user account.
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER)
+
+ # Create an authentication policy that applies to a computer and
+ # explicitly denies the user account to obtain a service ticket.
+ denied = f'O:SYD:(D;;CR;;;{client_creds.get_sid()})'
+ allowed = 'O:SYD:(A;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=allowed,
+ computer_allowed_to=denied,
+ service_allowed_to=allowed)
+
+ # Create a computer account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=policy)
+
+ # Show that obtaining a service ticket with an AS-REQ is denied.
+ self._fast_as_req(
+ client_creds, target_creds, mach_tgt,
+ expected_error=KDC_ERR_POLICY,
+ # We aren’t particular about whether or not we get an NTSTATUS.
+ expect_status=None,
+ expected_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED)
+
+ self.check_as_log(
+ client_creds,
+ server_policy=policy,
+ server_policy_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED,
+ status=ntstatus.NT_STATUS_INVALID_WORKSTATION)
+
+ def test_authn_policy_allowed_to_user_allow_as_req(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a user account.
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER)
+
+ # Create an authentication policy that applies to a user and explicitly
+ # allows the user account to obtain a service ticket.
+ allowed = f'O:SYD:(A;;CR;;;{client_creds.get_sid()})'
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=allowed,
+ computer_allowed_to=denied,
+ service_allowed_to=denied)
+
+ # Create a user account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy,
+ spn='host/{account}')
+
+ # Show that obtaining a service ticket with an AS-REQ is allowed.
+ self._fast_as_req(client_creds, target_creds, mach_tgt)
+
+ self.check_as_log(client_creds,
+ server_policy=policy)
+
+ def test_authn_policy_allowed_to_user_deny_as_req(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a user account.
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER)
+
+ # Create an authentication policy that applies to a user and
+ # explicitly denies the user account to obtain a service ticket.
+ denied = f'O:SYD:(D;;CR;;;{client_creds.get_sid()})'
+ allowed = 'O:SYD:(A;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=denied,
+ computer_allowed_to=allowed,
+ service_allowed_to=allowed)
+
+ # Create a user account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy,
+ spn='host/{account}')
+
+ # Show that obtaining a service ticket with an AS-REQ is denied.
+ self._fast_as_req(
+ client_creds, target_creds, mach_tgt,
+ expected_error=KDC_ERR_POLICY,
+ # We aren’t particular about whether or not we get an NTSTATUS.
+ expect_status=None,
+ expected_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED)
+
+ self.check_as_log(
+ client_creds,
+ server_policy=policy,
+ server_policy_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED,
+ status=ntstatus.NT_STATUS_INVALID_WORKSTATION)
+
+ def test_authn_policy_allowed_to_service_allow_as_req(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a user account.
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER)
+
+ # Create an authentication policy that applies to a managed service and
+ # explicitly allows the user account to obtain a service ticket.
+ allowed = f'O:SYD:(A;;CR;;;{client_creds.get_sid()})'
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=denied,
+ computer_allowed_to=denied,
+ service_allowed_to=allowed)
+
+ # Create a managed service account with the assigned policy.
+ target_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ assigned_policy=policy)
+
+ # Show that obtaining a service ticket with an AS-REQ is allowed.
+ self._fast_as_req(client_creds, target_creds, mach_tgt)
+
+ self.check_as_log(client_creds,
+ server_policy=policy)
+
+ def test_authn_policy_allowed_to_service_deny_as_req(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a user account.
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER)
+
+ # Create an authentication policy that applies to a managed service and
+ # explicitly denies the user account to obtain a service ticket.
+ denied = f'O:SYD:(D;;CR;;;{client_creds.get_sid()})'
+ allowed = 'O:SYD:(A;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=allowed,
+ computer_allowed_to=allowed,
+ service_allowed_to=denied)
+
+ # Create a managed service account with the assigned policy.
+ target_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ assigned_policy=policy)
+
+ # Show that obtaining a service ticket with an AS-REQ is denied.
+ self._fast_as_req(
+ client_creds, target_creds, mach_tgt,
+ expected_error=KDC_ERR_POLICY,
+ # We aren’t particular about whether or not we get an NTSTATUS.
+ expect_status=None,
+ expected_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED)
+
+ self.check_as_log(
+ client_creds,
+ server_policy=policy,
+ server_policy_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED,
+ status=ntstatus.NT_STATUS_INVALID_WORKSTATION)
+
+ def test_authn_policy_allowed_to_computer_allow_as_req_no_fast(self):
+ # Create a user account.
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER)
+
+ # Create an authentication policy that applies to a computer and
+ # explicitly allows the user account to obtain a service ticket.
+ allowed = f'O:SYD:(A;;CR;;;{client_creds.get_sid()})'
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=denied,
+ computer_allowed_to=allowed,
+ service_allowed_to=denied)
+
+ # Create a computer account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=policy)
+
+ # Show that obtaining a service ticket with an AS-REQ is allowed.
+ self._as_req(client_creds, 0, target_creds,
+ etype=(kcrypto.Enctype.AES256,))
+
+ self.check_as_log(client_creds,
+ server_policy=policy)
+
+ def test_authn_policy_allowed_to_computer_deny_as_req_no_fast(self):
+ # Create a user account.
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER)
+
+ # Create an authentication policy that applies to a computer and
+ # explicitly denies the user account to obtain a service ticket.
+ denied = f'O:SYD:(D;;CR;;;{client_creds.get_sid()})'
+ allowed = 'O:SYD:(A;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=allowed,
+ computer_allowed_to=denied,
+ service_allowed_to=allowed)
+
+ # Create a computer account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=policy)
+
+ # Show that obtaining a service ticket with an AS-REQ is denied.
+ self._as_req(client_creds, KDC_ERR_POLICY, target_creds,
+ etype=(kcrypto.Enctype.AES256,))
+
+ self.check_as_log(
+ client_creds,
+ server_policy=policy,
+ server_policy_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED,
+ status=ntstatus.NT_STATUS_INVALID_WORKSTATION)
+
+ def test_authn_policy_ntlm_allow_user(self):
+ # Create an authentication policy allowing NTLM authentication for
+ # users.
+ allowed = 'O:SYD:(A;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_ntlm=True,
+ user_allowed_from=allowed,
+ service_allowed_ntlm=False,
+ service_allowed_from=allowed)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy,
+ ntlm=True)
+
+ # Show that NTLM authentication succeeds.
+ self._connect(client_creds, simple_bind=False)
+
+ self.check_ntlm_log(client_creds,
+ client_policy=policy)
+
+ def test_authn_policy_ntlm_deny_user(self):
+ # Create an authentication policy denying NTLM authentication for
+ # users.
+ allowed = 'O:SYD:(A;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_ntlm=False,
+ user_allowed_from=allowed,
+ service_allowed_ntlm=True,
+ service_allowed_from=allowed)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy,
+ ntlm=True)
+
+ # Show that NTLM authentication fails.
+ self._connect(client_creds, simple_bind=False,
+ expect_error=f'{HRES_SEC_E_LOGON_DENIED:08X}')
+
+ self.check_ntlm_log(
+ client_creds,
+ client_policy=policy,
+ client_policy_status=ntstatus.NT_STATUS_ACCOUNT_RESTRICTION,
+ event=AuditEvent.NTLM_DEVICE_RESTRICTION)
+
+ def test_authn_policy_ntlm_computer(self):
+ # Create an authentication policy denying NTLM authentication.
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_ntlm=False,
+ user_allowed_from=denied,
+ service_allowed_ntlm=False,
+ service_allowed_from=denied)
+
+ # Create a computer account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=policy,
+ ntlm=True)
+
+ # Show that NTLM authentication succeeds.
+ self._connect(client_creds, simple_bind=False)
+
+ self.check_ntlm_log(
+ client_creds,
+ client_policy=None) # Client policies don’t apply to computers.
+
+ def test_authn_policy_ntlm_allow_service(self):
+ # Create an authentication policy allowing NTLM authentication for
+ # services.
+ allowed = 'O:SYD:(A;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_ntlm=False,
+ user_allowed_from=allowed,
+ service_allowed_ntlm=True,
+ service_allowed_from=allowed)
+
+ # Create a managed service account with the assigned policy.
+ client_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ assigned_policy=policy,
+ ntlm=True)
+
+ # Show that NTLM authentication succeeds.
+ self._connect(client_creds, simple_bind=False)
+
+ self.check_ntlm_log(client_creds,
+ client_policy=policy)
+
+ def test_authn_policy_ntlm_deny_service(self):
+ # Create an authentication policy denying NTLM authentication for
+ # services.
+ allowed = 'O:SYD:(A;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_ntlm=True,
+ user_allowed_from=allowed,
+ service_allowed_ntlm=False,
+ service_allowed_from=allowed)
+
+ # Create a managed service account with the assigned policy.
+ client_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ assigned_policy=policy,
+ ntlm=True)
+
+ # Show that NTLM authentication fails.
+ self._connect(client_creds, simple_bind=False,
+ expect_error=f'{HRES_SEC_E_LOGON_DENIED:08X}')
+
+ self.check_ntlm_log(
+ client_creds,
+ client_policy=policy,
+ client_policy_status=ntstatus.NT_STATUS_ACCOUNT_RESTRICTION,
+ event=AuditEvent.NTLM_DEVICE_RESTRICTION)
+
+ def test_authn_policy_ntlm_deny_no_device_restrictions(self):
+ # Create an authentication policy denying NTLM authentication for
+ # users.
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_ntlm=False,
+ service_allowed_ntlm=True)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy,
+ ntlm=True)
+
+ # Show that without AllowedToAuthenticateFrom set in the policy, NTLM
+ # authentication succeeds.
+ self._connect(client_creds, simple_bind=False)
+
+ self.check_ntlm_log(client_creds,
+ client_policy=policy)
+
+ def test_authn_policy_simple_bind_allow_user(self):
+ # Create an authentication policy allowing NTLM authentication for
+ # users.
+ allowed = 'O:SYD:(A;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_ntlm=True,
+ user_allowed_from=allowed,
+ service_allowed_ntlm=False,
+ service_allowed_from=allowed)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy,
+ ntlm=True)
+
+ # Show that a simple bind succeeds.
+ self._connect(client_creds, simple_bind=True)
+
+ self.check_simple_bind_log(client_creds,
+ client_policy=policy)
+
+ def test_authn_policy_simple_bind_deny_user(self):
+ # Create an authentication policy denying NTLM authentication for
+ # users.
+ allowed = 'O:SYD:(A;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_ntlm=False,
+ user_allowed_from=allowed,
+ service_allowed_ntlm=True,
+ service_allowed_from=allowed)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy,
+ ntlm=True)
+
+ # Show that a simple bind fails.
+ self._connect(client_creds, simple_bind=True,
+ expect_error=f'{HRES_SEC_E_INVALID_TOKEN:08X}')
+
+ self.check_simple_bind_log(
+ client_creds,
+ client_policy=policy,
+ client_policy_status=ntstatus.NT_STATUS_ACCOUNT_RESTRICTION,
+ event=AuditEvent.NTLM_DEVICE_RESTRICTION)
+
+ def test_authn_policy_simple_bind_deny_no_device_restrictions(self):
+ # Create an authentication policy denying NTLM authentication for
+ # users.
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_ntlm=False,
+ service_allowed_ntlm=True)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy,
+ ntlm=True)
+
+ # Show that without AllowedToAuthenticateFrom set in the policy, a
+ # simple bind succeeds.
+ self._connect(client_creds, simple_bind=True)
+
+ self.check_simple_bind_log(client_creds,
+ client_policy=policy)
+
+ def test_authn_policy_samr_pwd_change_allow_service_allowed_from(self):
+ # Create an authentication policy allowing NTLM authentication for
+ # managed service accounts.
+ allowed = 'O:SYD:(A;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ service_allowed_ntlm=True,
+ service_allowed_from=allowed)
+
+ # Create a managed service account with the assigned policy.
+ client_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ assigned_policy=policy,
+ ntlm=True)
+
+ # Show that a SAMR password change is allowed.
+ self._test_samr_change_password(client_creds, expect_error=None)
+
+ self.check_samr_pwd_change_log(client_creds,
+ client_policy=policy)
+
+ def test_authn_policy_samr_pwd_change_allow_service_not_allowed_from(self):
+ # Create an authentication policy allowing NTLM authentication for
+ # managed service accounts.
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ service_allowed_ntlm=True,
+ service_allowed_from=denied)
+
+ # Create a managed service account with the assigned policy.
+ client_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ assigned_policy=policy,
+ ntlm=True)
+
+ # Show that a SAMR password change is allowed.
+ self._test_samr_change_password(client_creds, expect_error=None)
+
+ self.check_samr_pwd_change_log(client_creds,
+ client_policy=policy)
+
+ def test_authn_policy_samr_pwd_change_allow_service_no_allowed_from(self):
+ # Create an authentication policy allowing NTLM authentication for
+ # managed service accounts.
+ policy = self.create_authn_policy(enforced=True,
+ service_allowed_ntlm=True)
+
+ # Create a managed service account with the assigned policy.
+ client_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ assigned_policy=policy,
+ ntlm=True)
+
+ # Show that a SAMR password change is allowed.
+ self._test_samr_change_password(client_creds, expect_error=None)
+
+ self.check_samr_pwd_change_log(client_creds,
+ client_policy=policy)
+
+ def test_authn_policy_samr_pwd_change_deny_service_allowed_from(self):
+ # Create an authentication policy denying NTLM authentication for
+ # managed service accounts.
+ allowed = 'O:SYD:(A;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ service_allowed_ntlm=False,
+ service_allowed_from=allowed)
+
+ # Create a managed service account with the assigned policy.
+ client_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ assigned_policy=policy,
+ ntlm=True)
+
+ # Show that the SAMR connection fails.
+ self._test_samr_change_password(
+ client_creds, expect_error=None,
+ connect_error=ntstatus.NT_STATUS_ACCOUNT_RESTRICTION)
+
+ self.check_samr_pwd_change_log(
+ client_creds,
+ client_policy=policy,
+ client_policy_status=ntstatus.NT_STATUS_ACCOUNT_RESTRICTION,
+ event=AuditEvent.NTLM_DEVICE_RESTRICTION)
+
+ def test_authn_policy_samr_pwd_change_deny_service_not_allowed_from(self):
+ # Create an authentication policy denying NTLM authentication for
+ # managed service accounts.
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ service_allowed_ntlm=False,
+ service_allowed_from=denied)
+
+ # Create a managed service account with the assigned policy.
+ client_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ assigned_policy=policy,
+ ntlm=True)
+
+ # Show that the SAMR connection fails.
+ self._test_samr_change_password(
+ client_creds, expect_error=None,
+ connect_error=ntstatus.NT_STATUS_ACCOUNT_RESTRICTION)
+
+ self.check_samr_pwd_change_log(
+ client_creds,
+ client_policy=policy,
+ client_policy_status=ntstatus.NT_STATUS_ACCOUNT_RESTRICTION,
+ event=AuditEvent.NTLM_DEVICE_RESTRICTION)
+
+ def test_authn_policy_samr_pwd_change_deny_service_no_allowed_from(self):
+ # Create an authentication policy denying NTLM authentication for
+ # managed service accounts.
+ policy = self.create_authn_policy(enforced=True,
+ service_allowed_ntlm=False)
+
+ # Create a managed service account with the assigned policy.
+ client_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ assigned_policy=policy,
+ ntlm=True)
+
+ # Show that a SAMR password change is allowed.
+ self._test_samr_change_password(client_creds, expect_error=None)
+
+ self.check_samr_pwd_change_log(client_creds,
+ client_policy=policy)
+
+ def test_authn_policy_samlogon_allow_user(self):
+ # Create an authentication policy allowing NTLM authentication for
+ # users.
+ allowed = 'O:SYD:(A;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_ntlm=True,
+ user_allowed_from=allowed,
+ service_allowed_ntlm=False,
+ service_allowed_from=allowed)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy,
+ ntlm=True)
+
+ # Show that a network SamLogon succeeds.
+ self._test_samlogon(creds=client_creds,
+ logon_type=netlogon.NetlogonNetworkInformation)
+
+ self.check_samlogon_network_log(client_creds,
+ client_policy=policy)
+
+ # Show that an interactive SamLogon succeeds. Although MS-APDS doesn’t
+ # state it, AllowedNTLMNetworkAuthentication applies to interactive
+ # logons too.
+ self._test_samlogon(creds=client_creds,
+ logon_type=netlogon.NetlogonInteractiveInformation)
+
+ self.check_samlogon_interactive_log(client_creds,
+ client_policy=policy)
+
+ def test_authn_policy_samlogon_deny_user(self):
+ # Create an authentication policy denying NTLM authentication for
+ # users.
+ allowed = 'O:SYD:(A;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_ntlm=False,
+ user_allowed_from=allowed,
+ service_allowed_ntlm=True,
+ service_allowed_from=allowed)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy,
+ ntlm=True)
+
+ # Show that a network SamLogon fails.
+ self._test_samlogon(
+ creds=client_creds,
+ logon_type=netlogon.NetlogonNetworkInformation,
+ expect_error=ntstatus.NT_STATUS_ACCOUNT_RESTRICTION)
+
+ self.check_samlogon_network_log(
+ client_creds,
+ client_policy=policy,
+ client_policy_status=ntstatus.NT_STATUS_ACCOUNT_RESTRICTION,
+ event=AuditEvent.NTLM_DEVICE_RESTRICTION)
+
+ # Show that an interactive SamLogon fails.
+ self._test_samlogon(
+ creds=client_creds,
+ logon_type=netlogon.NetlogonInteractiveInformation,
+ expect_error=ntstatus.NT_STATUS_ACCOUNT_RESTRICTION)
+
+ self.check_samlogon_interactive_log(
+ client_creds,
+ client_policy=policy,
+ client_policy_status=ntstatus.NT_STATUS_ACCOUNT_RESTRICTION,
+ event=AuditEvent.NTLM_DEVICE_RESTRICTION)
+
+ def test_authn_policy_samlogon_network_computer(self):
+ # Create an authentication policy denying NTLM authentication.
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_ntlm=False,
+ user_allowed_from=denied,
+ service_allowed_ntlm=False,
+ service_allowed_from=denied)
+
+ # Create a computer account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=policy,
+ ntlm=True)
+
+ # Show that a network SamLogon succeeds.
+ self._test_samlogon(
+ creds=client_creds,
+ logon_type=netlogon.NetlogonNetworkInformation)
+
+ self.check_samlogon_network_log(
+ client_creds,
+ client_policy=None) # Client policies don’t apply to computers.
+
+ def test_authn_policy_samlogon_interactive_allow_user_allowed_from(self):
+ # Create an authentication policy allowing NTLM authentication for
+ # users.
+ allowed = 'O:SYD:(A;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_ntlm=True,
+ user_allowed_from=allowed)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy,
+ ntlm=True,
+ cached=False)
+
+ # Show that an interactive SamLogon succeeds. Although MS-APDS doesn’t
+ # state it, AllowedNTLMNetworkAuthentication applies to interactive
+ # logons too.
+ self._test_samlogon(creds=client_creds,
+ logon_type=netlogon.NetlogonInteractiveInformation)
+
+ self.check_samlogon_interactive_log(client_creds,
+ client_policy=policy)
+
+ def test_authn_policy_samlogon_interactive_allow_user_not_allowed_from(self):
+ # Create an authentication policy allowing NTLM authentication for
+ # users.
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_ntlm=True,
+ user_allowed_from=denied)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy,
+ ntlm=True,
+ cached=False)
+
+ # Show that an interactive SamLogon succeeds. Although MS-APDS doesn’t
+ # state it, AllowedNTLMNetworkAuthentication applies to interactive
+ # logons too.
+ self._test_samlogon(creds=client_creds,
+ logon_type=netlogon.NetlogonInteractiveInformation)
+
+ self.check_samlogon_interactive_log(client_creds,
+ client_policy=policy)
+
+ def test_authn_policy_samlogon_interactive_allow_user_no_allowed_from(self):
+ # Create an authentication policy allowing NTLM authentication for
+ # users.
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_ntlm=True)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy,
+ ntlm=True,
+ cached=False)
+
+ # Show that an interactive SamLogon succeeds.
+ self._test_samlogon(creds=client_creds,
+ logon_type=netlogon.NetlogonInteractiveInformation)
+
+ self.check_samlogon_interactive_log(client_creds,
+ client_policy=policy)
+
+ def test_authn_policy_samlogon_interactive_deny_user_allowed_from(self):
+ # Create an authentication policy disallowing NTLM authentication for
+ # users.
+ allowed = 'O:SYD:(A;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_ntlm=False,
+ user_allowed_from=allowed)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy,
+ ntlm=True,
+ cached=False)
+
+ # Show that an interactive SamLogon fails.
+ self._test_samlogon(
+ creds=client_creds,
+ logon_type=netlogon.NetlogonInteractiveInformation,
+ expect_error=ntstatus.NT_STATUS_ACCOUNT_RESTRICTION)
+
+ self.check_samlogon_interactive_log(
+ client_creds,
+ client_policy=policy,
+ client_policy_status=ntstatus.NT_STATUS_ACCOUNT_RESTRICTION,
+ event=AuditEvent.NTLM_DEVICE_RESTRICTION)
+
+ def test_authn_policy_samlogon_interactive_deny_user_not_allowed_from(self):
+ # Create an authentication policy disallowing NTLM authentication for
+ # users.
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_ntlm=False,
+ user_allowed_from=denied)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy,
+ ntlm=True,
+ cached=False)
+
+ # Show that an interactive SamLogon fails.
+ self._test_samlogon(
+ creds=client_creds,
+ logon_type=netlogon.NetlogonInteractiveInformation,
+ expect_error=ntstatus.NT_STATUS_ACCOUNT_RESTRICTION)
+
+ self.check_samlogon_interactive_log(
+ client_creds,
+ client_policy=policy,
+ client_policy_status=ntstatus.NT_STATUS_ACCOUNT_RESTRICTION,
+ event=AuditEvent.NTLM_DEVICE_RESTRICTION)
+
+ def test_authn_policy_samlogon_interactive_deny_user_no_allowed_from(self):
+ # Create an authentication policy disallowing NTLM authentication for
+ # users.
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_ntlm=False)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy,
+ ntlm=True,
+ cached=False)
+
+ # Show that an interactive SamLogon succeeds.
+ self._test_samlogon(creds=client_creds,
+ logon_type=netlogon.NetlogonInteractiveInformation)
+
+ self.check_samlogon_interactive_log(client_creds,
+ client_policy=policy)
+
+ def test_authn_policy_samlogon_interactive_user_allowed_from(self):
+ # Create an authentication policy not specifying whether NTLM
+ # authentication is allowed or not.
+ allowed = 'O:SYD:(A;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_from=allowed)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy,
+ ntlm=True,
+ cached=False)
+
+ # Show that an interactive SamLogon fails.
+ self._test_samlogon(
+ creds=client_creds,
+ logon_type=netlogon.NetlogonInteractiveInformation,
+ expect_error=ntstatus.NT_STATUS_ACCOUNT_RESTRICTION)
+
+ self.check_samlogon_interactive_log(
+ client_creds,
+ client_policy=policy,
+ client_policy_status=ntstatus.NT_STATUS_ACCOUNT_RESTRICTION,
+ event=AuditEvent.NTLM_DEVICE_RESTRICTION)
+
+ def test_authn_policy_samlogon_network_user_allowed_from(self):
+ # Create an authentication policy not specifying whether NTLM
+ # authentication is allowed or not.
+ allowed = 'O:SYD:(A;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_from=allowed)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy,
+ ntlm=True,
+ cached=False)
+
+ # Show that a network SamLogon fails.
+ self._test_samlogon(
+ creds=client_creds,
+ logon_type=netlogon.NetlogonNetworkInformation,
+ expect_error=ntstatus.NT_STATUS_ACCOUNT_RESTRICTION)
+
+ self.check_samlogon_network_log(
+ client_creds,
+ client_policy=policy,
+ client_policy_status=ntstatus.NT_STATUS_ACCOUNT_RESTRICTION,
+ event=AuditEvent.NTLM_DEVICE_RESTRICTION)
+
+ def test_authn_policy_samlogon_network_allow_service_allowed_from(self):
+ # Create an authentication policy allowing NTLM authentication for
+ # services.
+ allowed = 'O:SYD:(A;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ service_allowed_ntlm=True,
+ service_allowed_from=allowed)
+
+ # Create a managed service account with the assigned policy.
+ client_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ assigned_policy=policy,
+ ntlm=True,
+ cached=False)
+
+ # Show that a network SamLogon succeeds.
+ self._test_samlogon(creds=client_creds,
+ logon_type=netlogon.NetlogonNetworkInformation)
+
+ self.check_samlogon_network_log(client_creds,
+ client_policy=policy)
+
+ def test_authn_policy_samlogon_network_allow_service_not_allowed_from(self):
+ # Create an authentication policy allowing NTLM authentication for
+ # services.
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ service_allowed_ntlm=True,
+ service_allowed_from=denied)
+
+ # Create a managed service account with the assigned policy.
+ client_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ assigned_policy=policy,
+ ntlm=True,
+ cached=False)
+
+ # Show that a network SamLogon succeeds.
+ self._test_samlogon(creds=client_creds,
+ logon_type=netlogon.NetlogonNetworkInformation)
+
+ self.check_samlogon_network_log(client_creds,
+ client_policy=policy)
+
+ def test_authn_policy_samlogon_network_allow_service_no_allowed_from(self):
+ # Create an authentication policy allowing NTLM authentication for
+ # services.
+ policy = self.create_authn_policy(enforced=True,
+ service_allowed_ntlm=True)
+
+ # Create a managed service account with the assigned policy.
+ client_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ assigned_policy=policy,
+ ntlm=True,
+ cached=False)
+
+ # Show that a network SamLogon succeeds.
+ self._test_samlogon(creds=client_creds,
+ logon_type=netlogon.NetlogonNetworkInformation)
+
+ self.check_samlogon_network_log(client_creds,
+ client_policy=policy)
+
+ def test_authn_policy_samlogon_network_deny_service_allowed_from(self):
+ # Create an authentication policy disallowing NTLM authentication for
+ # services.
+ allowed = 'O:SYD:(A;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ service_allowed_ntlm=False,
+ service_allowed_from=allowed)
+
+ # Create a managed service account with the assigned policy.
+ client_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ assigned_policy=policy,
+ ntlm=True,
+ cached=False)
+
+ # Show that a network SamLogon fails.
+ self._test_samlogon(
+ creds=client_creds,
+ logon_type=netlogon.NetlogonNetworkInformation,
+ expect_error=ntstatus.NT_STATUS_ACCOUNT_RESTRICTION)
+
+ self.check_samlogon_network_log(
+ client_creds,
+ client_policy=policy,
+ client_policy_status=ntstatus.NT_STATUS_ACCOUNT_RESTRICTION,
+ event=AuditEvent.NTLM_DEVICE_RESTRICTION)
+
+ def test_authn_policy_samlogon_network_deny_service_not_allowed_from(self):
+ # Create an authentication policy disallowing NTLM authentication for
+ # services.
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ service_allowed_ntlm=False,
+ service_allowed_from=denied)
+
+ # Create a managed service account with the assigned policy.
+ client_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ assigned_policy=policy,
+ ntlm=True,
+ cached=False)
+
+ # Show that a network SamLogon fails.
+ self._test_samlogon(
+ creds=client_creds,
+ logon_type=netlogon.NetlogonNetworkInformation,
+ expect_error=ntstatus.NT_STATUS_ACCOUNT_RESTRICTION)
+
+ self.check_samlogon_network_log(
+ client_creds,
+ client_policy=policy,
+ client_policy_status=ntstatus.NT_STATUS_ACCOUNT_RESTRICTION,
+ event=AuditEvent.NTLM_DEVICE_RESTRICTION)
+
+ def test_authn_policy_samlogon_network_deny_service_no_allowed_from(self):
+ # Create an authentication policy disallowing NTLM authentication for
+ # services.
+ policy = self.create_authn_policy(enforced=True,
+ service_allowed_ntlm=False)
+
+ # Create a managed service account with the assigned policy.
+ client_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ assigned_policy=policy,
+ ntlm=True,
+ cached=False)
+
+ # Show that a network SamLogon succeeds.
+ self._test_samlogon(creds=client_creds,
+ logon_type=netlogon.NetlogonNetworkInformation)
+
+ self.check_samlogon_network_log(client_creds,
+ client_policy=policy)
+
+ def test_authn_policy_samlogon_network_allow_service_allowed_from_to_self(self):
+ # Create an authentication policy allowing NTLM authentication for
+ # services.
+ allowed = 'O:SYD:(A;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ service_allowed_ntlm=True,
+ service_allowed_from=allowed)
+
+ # Create a managed service account with the assigned policy.
+ client_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ assigned_policy=policy,
+ ntlm=True,
+ cached=False)
+
+ # Show that a network SamLogon to ourselves succeeds.
+ self._test_samlogon(creds=client_creds,
+ domain_joined_mach_creds=client_creds,
+ logon_type=netlogon.NetlogonNetworkInformation)
+
+ self.check_samlogon_network_log(client_creds,
+ client_policy=policy,
+ server_policy=policy)
+
+ def test_authn_policy_samlogon_network_allow_service_not_allowed_from_to_self(self):
+ # Create an authentication policy allowing NTLM authentication for
+ # services.
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ service_allowed_ntlm=True,
+ service_allowed_from=denied)
+
+ # Create a managed service account with the assigned policy.
+ client_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ assigned_policy=policy,
+ ntlm=True,
+ cached=False)
+
+ # Show that a network SamLogon to ourselves succeeds.
+ self._test_samlogon(creds=client_creds,
+ domain_joined_mach_creds=client_creds,
+ logon_type=netlogon.NetlogonNetworkInformation)
+
+ self.check_samlogon_network_log(client_creds,
+ client_policy=policy,
+ server_policy=policy)
+
+ def test_authn_policy_samlogon_network_allow_service_no_allowed_from_to_self(self):
+ # Create an authentication policy allowing NTLM authentication for
+ # services.
+ policy = self.create_authn_policy(enforced=True,
+ service_allowed_ntlm=True)
+
+ # Create a managed service account with the assigned policy.
+ client_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ assigned_policy=policy,
+ ntlm=True,
+ cached=False)
+
+ # Show that a network SamLogon to ourselves succeeds.
+ self._test_samlogon(creds=client_creds,
+ domain_joined_mach_creds=client_creds,
+ logon_type=netlogon.NetlogonNetworkInformation)
+
+ self.check_samlogon_network_log(client_creds,
+ client_policy=policy,
+ server_policy=policy)
+
+ def test_authn_policy_samlogon_network_deny_service_allowed_from_to_self(self):
+ # Create an authentication policy disallowing NTLM authentication for
+ # services.
+ allowed = 'O:SYD:(A;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ service_allowed_ntlm=False,
+ service_allowed_from=allowed)
+
+ # Create a managed service account with the assigned policy.
+ client_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ assigned_policy=policy,
+ ntlm=True,
+ cached=False)
+
+ # Show that a network SamLogon to ourselves fails.
+ self._test_samlogon(
+ creds=client_creds,
+ domain_joined_mach_creds=client_creds,
+ logon_type=netlogon.NetlogonNetworkInformation,
+ expect_error=ntstatus.NT_STATUS_ACCOUNT_RESTRICTION)
+
+ self.check_samlogon_network_log(
+ client_creds,
+ client_policy=policy,
+ server_policy=None, # Only the client policy appears in the logs.
+ client_policy_status=ntstatus.NT_STATUS_ACCOUNT_RESTRICTION,
+ event=AuditEvent.NTLM_DEVICE_RESTRICTION)
+
+ def test_authn_policy_samlogon_network_deny_service_not_allowed_from_to_self(self):
+ # Create an authentication policy disallowing NTLM authentication for
+ # services.
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ service_allowed_ntlm=False,
+ service_allowed_from=denied)
+
+ # Create a managed service account with the assigned policy.
+ client_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ assigned_policy=policy,
+ ntlm=True,
+ cached=False)
+
+ # Show that a network SamLogon to ourselves fails.
+ self._test_samlogon(
+ creds=client_creds,
+ domain_joined_mach_creds=client_creds,
+ logon_type=netlogon.NetlogonNetworkInformation,
+ expect_error=ntstatus.NT_STATUS_ACCOUNT_RESTRICTION)
+
+ self.check_samlogon_network_log(
+ client_creds,
+ client_policy=policy,
+ client_policy_status=ntstatus.NT_STATUS_ACCOUNT_RESTRICTION,
+ event=AuditEvent.NTLM_DEVICE_RESTRICTION,
+ server_policy=None) # Only the client policy appears in the logs.
+
+ def test_authn_policy_samlogon_network_deny_service_no_allowed_from_to_self(self):
+ # Create an authentication policy disallowing NTLM authentication for
+ # services.
+ policy = self.create_authn_policy(enforced=True,
+ service_allowed_ntlm=False)
+
+ # Create a managed service account with the assigned policy.
+ client_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ assigned_policy=policy,
+ ntlm=True,
+ cached=False)
+
+ # Show that a network SamLogon to ourselves succeeds.
+ self._test_samlogon(creds=client_creds,
+ domain_joined_mach_creds=client_creds,
+ logon_type=netlogon.NetlogonNetworkInformation)
+
+ self.check_samlogon_network_log(client_creds,
+ client_policy=policy,
+ server_policy=policy)
+
+ def test_authn_policy_samlogon_interactive_deny_no_device_restrictions(self):
+ # Create an authentication policy denying NTLM authentication for
+ # users.
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_ntlm=False,
+ service_allowed_ntlm=True)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy,
+ ntlm=True)
+
+ # Show that without AllowedToAuthenticateFrom set in the policy, an
+ # interactive SamLogon succeeds.
+ self._test_samlogon(creds=client_creds,
+ logon_type=netlogon.NetlogonInteractiveInformation)
+
+ self.check_samlogon_interactive_log(client_creds,
+ client_policy=policy)
+
+ def test_authn_policy_samlogon_network_deny_no_device_restrictions(self):
+ # Create an authentication policy denying NTLM authentication for
+ # users.
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_ntlm=False,
+ service_allowed_ntlm=True)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy,
+ ntlm=True)
+
+ # Show that without AllowedToAuthenticateFrom set in the policy, a
+ # network SamLogon succeeds.
+ self._test_samlogon(creds=client_creds,
+ logon_type=netlogon.NetlogonNetworkInformation)
+
+ self.check_samlogon_network_log(client_creds,
+ client_policy=policy)
+
+ def test_samlogon_allowed_to_computer_allow(self):
+ # Create a user account.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ ntlm=True)
+
+ # Create an authentication policy that applies to a computer and
+ # explicitly allows the user account to obtain a service ticket.
+ allowed = f'O:SYD:(A;;CR;;;{client_creds.get_sid()})'
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=denied,
+ computer_allowed_to=allowed,
+ service_allowed_to=denied)
+
+ # Create a computer account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=policy)
+
+ # Show that a network SamLogon succeeds.
+ self._test_samlogon(creds=client_creds,
+ domain_joined_mach_creds=target_creds,
+ logon_type=netlogon.NetlogonNetworkInformation)
+
+ self.check_samlogon_network_log(client_creds,
+ server_policy=policy)
+
+ # Show that an interactive SamLogon succeeds.
+ self._test_samlogon(creds=client_creds,
+ domain_joined_mach_creds=target_creds,
+ logon_type=netlogon.NetlogonInteractiveInformation)
+
+ self.check_samlogon_interactive_log(client_creds,
+ server_policy=policy)
+
+ def test_samlogon_allowed_to_computer_deny(self):
+ # Create a user account.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ ntlm=True)
+
+ # Create an authentication policy that applies to a computer and
+ # explicitly denies the user account to obtain a service ticket.
+ denied = f'O:SYD:(D;;CR;;;{client_creds.get_sid()})'
+ allowed = 'O:SYD:(A;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=allowed,
+ computer_allowed_to=denied,
+ service_allowed_to=allowed)
+
+ # Create a computer account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=policy)
+
+ # Show that a network SamLogon fails.
+ self._test_samlogon(
+ creds=client_creds,
+ domain_joined_mach_creds=target_creds,
+ logon_type=netlogon.NetlogonNetworkInformation,
+ expect_error=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED)
+
+ self.check_samlogon_network_log(
+ client_creds,
+ server_policy=policy,
+ server_policy_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.NTLM_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED)
+
+ # Show that an interactive SamLogon fails.
+ self._test_samlogon(
+ creds=client_creds,
+ domain_joined_mach_creds=target_creds,
+ logon_type=netlogon.NetlogonInteractiveInformation,
+ expect_error=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED)
+
+ self.check_samlogon_interactive_log(
+ client_creds,
+ server_policy=policy,
+ server_policy_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.NTLM_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED)
+
+ def test_samlogon_allowed_to_computer_deny_protected(self):
+ # Create a protected user account.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ protected=True,
+ ntlm=True)
+
+ # Create an authentication policy that applies to a computer and
+ # explicitly denies the user account to obtain a service ticket.
+ denied = f'O:SYD:(D;;CR;;;{client_creds.get_sid()})'
+ allowed = 'O:SYD:(A;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=allowed,
+ computer_allowed_to=denied,
+ service_allowed_to=allowed)
+
+ # Create a computer account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=policy)
+
+ # Show that a network SamLogon fails.
+ self._test_samlogon(
+ creds=client_creds,
+ domain_joined_mach_creds=target_creds,
+ logon_type=netlogon.NetlogonNetworkInformation,
+ expect_error=ntstatus.NT_STATUS_ACCOUNT_RESTRICTION)
+
+ self.check_samlogon_network_log(
+ client_creds,
+ # The account’s protection takes precedence, and no policy appears
+ # in the log.
+ server_policy=None,
+ status=ntstatus.NT_STATUS_ACCOUNT_RESTRICTION)
+
+ # Show that an interactive SamLogon fails.
+ self._test_samlogon(
+ creds=client_creds,
+ domain_joined_mach_creds=target_creds,
+ logon_type=netlogon.NetlogonInteractiveInformation,
+ expect_error=ntstatus.NT_STATUS_ACCOUNT_RESTRICTION)
+
+ self.check_samlogon_interactive_log(
+ client_creds,
+ # The account’s protection takes precedence, and no policy appears
+ # in the log.
+ server_policy=None,
+ status=ntstatus.NT_STATUS_ACCOUNT_RESTRICTION)
+
+ def test_samlogon_allowed_to_computer_allow_asserted_identity(self):
+ # Create a user account.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ ntlm=True)
+
+ # Create an authentication policy that allows accounts with the
+ # Authentication Authority Asserted Identity SID to obtain a service
+ # ticket.
+ allowed = (
+ f'O:SYD:(A;;CR;;;'
+ f'{security.SID_AUTHENTICATION_AUTHORITY_ASSERTED_IDENTITY})'
+ )
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=denied,
+ computer_allowed_to=allowed,
+ service_allowed_to=denied)
+
+ # Create a computer account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=policy)
+
+ # Show that a network SamLogon fails.
+ self._test_samlogon(
+ creds=client_creds,
+ domain_joined_mach_creds=target_creds,
+ logon_type=netlogon.NetlogonNetworkInformation,
+ expect_error=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED)
+
+ self.check_samlogon_network_log(
+ client_creds,
+ server_policy=policy,
+ server_policy_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.NTLM_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED)
+
+ # Show that an interactive SamLogon fails.
+ self._test_samlogon(
+ creds=client_creds,
+ domain_joined_mach_creds=target_creds,
+ logon_type=netlogon.NetlogonInteractiveInformation,
+ expect_error=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED)
+
+ self.check_samlogon_interactive_log(
+ client_creds,
+ server_policy=policy,
+ server_policy_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.NTLM_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED)
+
+ def test_samlogon_allowed_to_computer_allow_claims_valid(self):
+ # Create a user account.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ ntlm=True)
+
+ # Create an authentication policy that allows accounts with the Claims
+ # Valid SID to obtain a service ticket.
+ allowed = f'O:SYD:(A;;CR;;;{security.SID_CLAIMS_VALID})'
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=denied,
+ computer_allowed_to=allowed,
+ service_allowed_to=denied)
+
+ # Create a computer account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=policy)
+
+ # Show that a network SamLogon fails.
+ self._test_samlogon(
+ creds=client_creds,
+ domain_joined_mach_creds=target_creds,
+ logon_type=netlogon.NetlogonNetworkInformation,
+ expect_error=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED)
+
+ self.check_samlogon_network_log(
+ client_creds,
+ server_policy=policy,
+ server_policy_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.NTLM_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED)
+
+ # Show that an interactive SamLogon fails.
+ self._test_samlogon(
+ creds=client_creds,
+ domain_joined_mach_creds=target_creds,
+ logon_type=netlogon.NetlogonInteractiveInformation,
+ expect_error=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED)
+
+ self.check_samlogon_interactive_log(
+ client_creds,
+ server_policy=policy,
+ server_policy_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.NTLM_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED)
+
+ def test_samlogon_allowed_to_computer_allow_compounded_auth(self):
+ # Create a user account.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ ntlm=True)
+
+ # Create an authentication policy that allows accounts with the
+ # Compounded Authentication SID to obtain a service ticket.
+ allowed = f'O:SYD:(A;;CR;;;{security.SID_COMPOUNDED_AUTHENTICATION})'
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=denied,
+ computer_allowed_to=allowed,
+ service_allowed_to=denied)
+
+ # Create a computer account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=policy)
+
+ # Show that a network SamLogon fails.
+ self._test_samlogon(
+ creds=client_creds,
+ domain_joined_mach_creds=target_creds,
+ logon_type=netlogon.NetlogonNetworkInformation,
+ expect_error=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED)
+
+ self.check_samlogon_network_log(
+ client_creds,
+ server_policy=policy,
+ server_policy_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.NTLM_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED)
+
+ # Show that an interactive SamLogon fails.
+ self._test_samlogon(
+ creds=client_creds,
+ domain_joined_mach_creds=target_creds,
+ logon_type=netlogon.NetlogonInteractiveInformation,
+ expect_error=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED)
+
+ self.check_samlogon_interactive_log(
+ client_creds,
+ server_policy=policy,
+ server_policy_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.NTLM_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED)
+
+ def test_samlogon_allowed_to_computer_allow_authenticated_users(self):
+ # Create a user account.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ ntlm=True)
+
+ # Create an authentication policy that allows accounts with the
+ # Authenticated Users SID to obtain a service ticket.
+ allowed = f'O:SYD:(A;;CR;;;{security.SID_NT_AUTHENTICATED_USERS})'
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=denied,
+ computer_allowed_to=allowed,
+ service_allowed_to=denied)
+
+ # Create a computer account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=policy)
+
+ # Show that a network SamLogon succeeds.
+ self._test_samlogon(creds=client_creds,
+ domain_joined_mach_creds=target_creds,
+ logon_type=netlogon.NetlogonNetworkInformation)
+
+ self.check_samlogon_network_log(client_creds,
+ server_policy=policy)
+
+ # Show that an interactive SamLogon succeeds.
+ self._test_samlogon(creds=client_creds,
+ domain_joined_mach_creds=target_creds,
+ logon_type=netlogon.NetlogonInteractiveInformation)
+
+ self.check_samlogon_interactive_log(client_creds,
+ server_policy=policy)
+
+ def test_samlogon_allowed_to_computer_allow_ntlm_authn(self):
+ # Create a user account.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ ntlm=True)
+
+ # Create an authentication policy that allows accounts with the NTLM
+ # Authentication SID to obtain a service ticket.
+ allowed = f'O:SYD:(A;;CR;;;{security.SID_NT_NTLM_AUTHENTICATION})'
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=denied,
+ computer_allowed_to=allowed,
+ service_allowed_to=denied)
+
+ # Create a computer account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=policy)
+
+ # Show that a network SamLogon fails.
+ self._test_samlogon(
+ creds=client_creds,
+ domain_joined_mach_creds=target_creds,
+ logon_type=netlogon.NetlogonNetworkInformation,
+ expect_error=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED)
+
+ self.check_samlogon_network_log(
+ client_creds,
+ server_policy=policy,
+ server_policy_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.NTLM_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED)
+
+ # Show that an interactive SamLogon fails.
+ self._test_samlogon(
+ creds=client_creds,
+ domain_joined_mach_creds=target_creds,
+ logon_type=netlogon.NetlogonInteractiveInformation,
+ expect_error=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED)
+
+ self.check_samlogon_interactive_log(
+ client_creds,
+ server_policy=policy,
+ server_policy_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.NTLM_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED)
+
+ def test_samlogon_allowed_to_no_owner(self):
+ # Create a user account.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ ntlm=True)
+
+ # Create an authentication policy that applies to a computer and
+ # explicitly allows the user account to obtain a service ticket. Omit
+ # the owner (O:SY) from the SDDL.
+ allowed = f'D:(A;;CR;;;{client_creds.get_sid()})'
+ policy = self.create_authn_policy(enforced=True,
+ computer_allowed_to=allowed)
+
+ # Create a computer account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=policy)
+
+ # Show that a network SamLogon fails.
+ self._test_samlogon(
+ creds=client_creds,
+ domain_joined_mach_creds=target_creds,
+ logon_type=netlogon.NetlogonNetworkInformation,
+ expect_error=ntstatus.NT_STATUS_INVALID_PARAMETER)
+
+ self.check_samlogon_network_log(
+ client_creds,
+ server_policy=policy,
+ server_policy_status=ntstatus.NT_STATUS_INVALID_PARAMETER,
+ event=AuditEvent.NTLM_SERVER_RESTRICTION,
+ reason=AuditReason.DESCRIPTOR_NO_OWNER)
+
+ # Show that an interactive SamLogon fails.
+ self._test_samlogon(
+ creds=client_creds,
+ domain_joined_mach_creds=target_creds,
+ logon_type=netlogon.NetlogonInteractiveInformation,
+ expect_error=ntstatus.NT_STATUS_INVALID_PARAMETER)
+
+ self.check_samlogon_interactive_log(
+ client_creds,
+ server_policy=policy,
+ server_policy_status=ntstatus.NT_STATUS_INVALID_PARAMETER,
+ event=AuditEvent.NTLM_SERVER_RESTRICTION,
+ reason=AuditReason.DESCRIPTOR_NO_OWNER)
+
+ def test_samlogon_allowed_to_no_owner_unenforced(self):
+ # Create a user account.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ ntlm=True)
+
+ # Create an unenforced authentication policy that applies to a computer
+ # and explicitly allows the user account to obtain a service
+ # ticket. Omit the owner (O:SY) from the SDDL.
+ allowed = f'D:(A;;CR;;;{client_creds.get_sid()})'
+ policy = self.create_authn_policy(enforced=False,
+ computer_allowed_to=allowed)
+
+ # Create a computer account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=policy)
+
+ # Show that a network SamLogon succeeds.
+ self._test_samlogon(creds=client_creds,
+ domain_joined_mach_creds=target_creds,
+ logon_type=netlogon.NetlogonNetworkInformation)
+
+ self.check_samlogon_network_log(
+ client_creds,
+ server_policy=policy,
+ server_policy_status=ntstatus.NT_STATUS_INVALID_PARAMETER,
+ event=AuditEvent.NTLM_SERVER_RESTRICTION,
+ reason=AuditReason.DESCRIPTOR_NO_OWNER)
+
+ # Show that an interactive SamLogon succeeds.
+ self._test_samlogon(creds=client_creds,
+ domain_joined_mach_creds=target_creds,
+ logon_type=netlogon.NetlogonInteractiveInformation)
+
+ self.check_samlogon_interactive_log(
+ client_creds,
+ server_policy=policy,
+ server_policy_status=ntstatus.NT_STATUS_INVALID_PARAMETER,
+ event=AuditEvent.NTLM_SERVER_RESTRICTION,
+ reason=AuditReason.DESCRIPTOR_NO_OWNER)
+
+ def test_samlogon_allowed_to_service_allow(self):
+ # Create a user account.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ ntlm=True)
+
+ # Create an authentication policy that applies to a managed service and
+ # explicitly allows the user account to obtain a service ticket.
+ allowed = f'O:SYD:(A;;CR;;;{client_creds.get_sid()})'
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=denied,
+ computer_allowed_to=denied,
+ service_allowed_to=allowed)
+
+ # Create a managed service account with the assigned policy.
+ target_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ assigned_policy=policy)
+
+ # Show that a network SamLogon succeeds.
+ self._test_samlogon(creds=client_creds,
+ domain_joined_mach_creds=target_creds,
+ logon_type=netlogon.NetlogonNetworkInformation)
+
+ self.check_samlogon_network_log(client_creds,
+ server_policy=policy)
+
+ # Show that an interactive SamLogon succeeds.
+ self._test_samlogon(creds=client_creds,
+ domain_joined_mach_creds=target_creds,
+ logon_type=netlogon.NetlogonInteractiveInformation)
+
+ self.check_samlogon_interactive_log(client_creds,
+ server_policy=policy)
+
+ def test_samlogon_allowed_to_service_deny(self):
+ # Create a user account.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ ntlm=True)
+
+ # Create an authentication policy that applies to a managed service and
+ # explicitly denies the user account to obtain a service ticket.
+ denied = f'O:SYD:(D;;CR;;;{client_creds.get_sid()})'
+ allowed = 'O:SYD:(A;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=allowed,
+ computer_allowed_to=allowed,
+ service_allowed_to=denied)
+
+ # Create a managed service account with the assigned policy.
+ target_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ assigned_policy=policy)
+
+ # Show that a network SamLogon fails.
+ self._test_samlogon(
+ creds=client_creds,
+ domain_joined_mach_creds=target_creds,
+ logon_type=netlogon.NetlogonNetworkInformation,
+ expect_error=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED)
+
+ self.check_samlogon_network_log(
+ client_creds,
+ server_policy=policy,
+ server_policy_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.NTLM_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED)
+
+ # Show that an interactive SamLogon fails.
+ self._test_samlogon(
+ creds=client_creds,
+ domain_joined_mach_creds=target_creds,
+ logon_type=netlogon.NetlogonInteractiveInformation,
+ expect_error=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED)
+
+ self.check_samlogon_interactive_log(
+ client_creds,
+ server_policy=policy,
+ server_policy_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.NTLM_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED)
+
+ def test_samlogon_allowed_to_computer_allow_group_not_a_member(self):
+ samdb = self.get_samdb()
+
+ # Create a new group.
+ group_name = self.get_new_username()
+ group_dn = self.create_group(samdb, group_name)
+ group_sid = self.get_objectSid(samdb, group_dn)
+
+ # Create a user account which does not belong to the group.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ ntlm=True)
+
+ # Create an authentication policy that allows accounts belonging to the
+ # group.
+ allowed = f'O:SYD:(A;;CR;;;{group_sid})'
+ policy = self.create_authn_policy(enforced=True,
+ computer_allowed_to=allowed)
+
+ # Create a computer account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=policy)
+
+ # Show that a network SamLogon fails, as the user account does not
+ # belong to the group.
+ self._test_samlogon(
+ creds=client_creds,
+ domain_joined_mach_creds=target_creds,
+ logon_type=netlogon.NetlogonNetworkInformation,
+ expect_error=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED)
+
+ self.check_samlogon_network_log(
+ client_creds,
+ server_policy=policy,
+ server_policy_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.NTLM_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED)
+
+ # Show that an interactive SamLogon fails, as the user account does not
+ # belong to the group.
+ self._test_samlogon(
+ creds=client_creds,
+ domain_joined_mach_creds=target_creds,
+ logon_type=netlogon.NetlogonInteractiveInformation,
+ expect_error=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED)
+
+ self.check_samlogon_interactive_log(
+ client_creds,
+ server_policy=policy,
+ server_policy_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.NTLM_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED)
+
+ def test_samlogon_allowed_to_computer_allow_group_member(self):
+ samdb = self.get_samdb()
+
+ # Create a new group.
+ group_name = self.get_new_username()
+ group_dn = self.create_group(samdb, group_name)
+ group_sid = self.get_objectSid(samdb, group_dn)
+
+ # Create a user account that belongs to the group.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ member_of=group_dn,
+ ntlm=True)
+
+ # Create an authentication policy that allows accounts belonging to the
+ # group.
+ allowed = f'O:SYD:(A;;CR;;;{group_sid})'
+ policy = self.create_authn_policy(enforced=True,
+ computer_allowed_to=allowed)
+
+ # Create a computer account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=policy)
+
+ # Show that a network SamLogon succeeds, since the user account belongs
+ # to the group.
+ self._test_samlogon(creds=client_creds,
+ domain_joined_mach_creds=target_creds,
+ logon_type=netlogon.NetlogonNetworkInformation)
+
+ self.check_samlogon_network_log(client_creds,
+ server_policy=policy)
+
+ # Show that an interactive SamLogon succeeds, since the user account
+ # belongs to the group.
+ self._test_samlogon(creds=client_creds,
+ domain_joined_mach_creds=target_creds,
+ logon_type=netlogon.NetlogonInteractiveInformation)
+
+ self.check_samlogon_interactive_log(client_creds,
+ server_policy=policy)
+
+ def test_samlogon_allowed_to_computer_allow_domain_local_group(self):
+ samdb = self.get_samdb()
+
+ # Create a new domain-local group.
+ group_name = self.get_new_username()
+ group_dn = self.create_group(samdb, group_name,
+ gtype=GroupType.DOMAIN_LOCAL.value)
+ group_sid = self.get_objectSid(samdb, group_dn)
+
+ # Create a user account that belongs to the group.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ member_of=group_dn,
+ ntlm=True)
+
+ # Create an authentication policy that allows accounts belonging to the
+ # group.
+ allowed = f'O:SYD:(A;;CR;;;{group_sid})'
+ policy = self.create_authn_policy(enforced=True,
+ computer_allowed_to=allowed)
+
+ # Create a computer account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=policy)
+
+ # Show that a network SamLogon succeeds, since the user account belongs
+ # to the group.
+ self._test_samlogon(creds=client_creds,
+ domain_joined_mach_creds=target_creds,
+ logon_type=netlogon.NetlogonNetworkInformation)
+
+ self.check_samlogon_network_log(client_creds,
+ server_policy=policy)
+
+ # Show that an interactive SamLogon succeeds, since the user account
+ # belongs to the group.
+ self._test_samlogon(creds=client_creds,
+ domain_joined_mach_creds=target_creds,
+ logon_type=netlogon.NetlogonInteractiveInformation)
+
+ self.check_samlogon_interactive_log(client_creds,
+ server_policy=policy)
+
+ def test_samlogon_allowed_to_computer_allow_to_self(self):
+ samdb = self.get_samdb()
+
+ # Create a computer account.
+ client_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ ntlm=True,
+ cached=False)
+ client_dn = client_creds.get_dn()
+
+ # Create an authentication policy that applies to a computer and
+ # explicitly allows the user account to obtain a service ticket.
+ allowed = f'O:SYD:(A;;CR;;;{client_creds.get_sid()})'
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=denied,
+ computer_allowed_to=allowed,
+ service_allowed_to=denied)
+
+ # Assign the policy to the account.
+ self.add_attribute(samdb, str(client_dn),
+ 'msDS-AssignedAuthNPolicy', str(policy.dn))
+
+ # Show that a network SamLogon to ourselves succeeds.
+ self._test_samlogon(creds=client_creds,
+ domain_joined_mach_creds=client_creds,
+ logon_type=netlogon.NetlogonNetworkInformation)
+
+ self.check_samlogon_network_log(
+ client_creds,
+ client_policy=None, # Client policies don’t apply to computers.
+ server_policy=policy)
+
+ def test_samlogon_allowed_to_computer_deny_to_self(self):
+ samdb = self.get_samdb()
+
+ # Create a computer account.
+ client_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ ntlm=True,
+ cached=False)
+ client_dn = client_creds.get_dn()
+
+ # Create an authentication policy that applies to a computer and
+ # explicitly denies the user account to obtain a service ticket.
+ denied = f'O:SYD:(D;;CR;;;{client_creds.get_sid()})'
+ allowed = 'O:SYD:(A;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=allowed,
+ computer_allowed_to=denied,
+ service_allowed_to=allowed)
+
+ # Assign the policy to the account.
+ self.add_attribute(samdb, str(client_dn),
+ 'msDS-AssignedAuthNPolicy', str(policy.dn))
+
+ # Show that a network SamLogon to ourselves fails, despite
+ # authentication being allowed in the Kerberos case.
+ self._test_samlogon(
+ creds=client_creds,
+ domain_joined_mach_creds=client_creds,
+ logon_type=netlogon.NetlogonNetworkInformation,
+ expect_error=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED)
+
+ self.check_samlogon_network_log(
+ client_creds,
+ client_policy=None, # Client policies don’t apply to computers.
+ server_policy=policy,
+ server_policy_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.NTLM_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED)
+
+ def test_samlogon_allowed_to_service_allow_to_self(self):
+ samdb = self.get_samdb()
+
+ # Create a managed service account.
+ client_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ ntlm=True,
+ cached=False)
+ client_dn = client_creds.get_dn()
+
+ # Create an authentication policy that applies to a managed service and
+ # explicitly allows the user account to obtain a service ticket.
+ allowed = f'O:SYD:(A;;CR;;;{client_creds.get_sid()})'
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=denied,
+ computer_allowed_to=denied,
+ service_allowed_to=allowed)
+
+ # Assign the policy to the account.
+ self.add_attribute(samdb, str(client_dn),
+ 'msDS-AssignedAuthNPolicy', str(policy.dn))
+
+ # Show that a network SamLogon to ourselves succeeds.
+ self._test_samlogon(creds=client_creds,
+ domain_joined_mach_creds=client_creds,
+ logon_type=netlogon.NetlogonNetworkInformation)
+
+ self.check_samlogon_network_log(client_creds,
+ client_policy=policy,
+ server_policy=policy)
+
+ def test_samlogon_allowed_to_service_deny_to_self(self):
+ samdb = self.get_samdb()
+
+ # Create a managed service account.
+ client_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ ntlm=True,
+ cached=False)
+ client_dn = client_creds.get_dn()
+
+ # Create an authentication policy that applies to a managed service and
+ # explicitly denies the user account to obtain a service ticket.
+ denied = f'O:SYD:(D;;CR;;;{client_creds.get_sid()})'
+ allowed = 'O:SYD:(A;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=allowed,
+ computer_allowed_to=allowed,
+ service_allowed_to=denied)
+
+ # Assign the policy to the account.
+ self.add_attribute(samdb, str(client_dn),
+ 'msDS-AssignedAuthNPolicy', str(policy.dn))
+
+ # Show that a network SamLogon to ourselves fails, despite
+ # authentication being allowed in the Kerberos case.
+ self._test_samlogon(
+ creds=client_creds,
+ domain_joined_mach_creds=client_creds,
+ logon_type=netlogon.NetlogonNetworkInformation,
+ expect_error=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED)
+
+ self.check_samlogon_network_log(
+ client_creds,
+ client_policy=policy,
+ server_policy=policy,
+ server_policy_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.NTLM_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED)
+
+ def test_samlogon_allowed_to_computer_derived_class_allow(self):
+ samdb = self.get_samdb()
+
+ # Create a user account.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ ntlm=True)
+
+ # Create an authentication policy that applies to a computer and
+ # explicitly allows the user account to obtain a service ticket.
+ allowed = f'O:SYD:(A;;CR;;;{client_creds.get_sid()})'
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=denied,
+ computer_allowed_to=allowed,
+ service_allowed_to=denied)
+
+ # Create a schema class derived from ‘computer’.
+ class_id = random.randint(0, 100000000)
+ computer_class_cn = f'my-Computer-Class-{class_id}'
+ computer_class = computer_class_cn.replace('-', '')
+ class_dn = samdb.get_schema_basedn()
+ class_dn.add_child(f'CN={computer_class_cn}')
+ governs_id = f'1.3.6.1.4.1.7165.4.6.2.9.{class_id}'
+
+ samdb.add({
+ 'dn': class_dn,
+ 'objectClass': 'classSchema',
+ 'subClassOf': 'computer',
+ 'governsId': governs_id,
+ 'lDAPDisplayName': computer_class,
+ })
+
+ # Create an account derived from ‘computer’ with the assigned policy.
+ target_name = self.get_new_username()
+ target_creds, target_dn = self.create_account(
+ samdb, target_name,
+ account_type=self.AccountType.COMPUTER,
+ spn=f'host/{target_name}',
+ additional_details={
+ 'msDS-AssignedAuthNPolicy': str(policy.dn),
+ 'objectClass': computer_class,
+ })
+
+ keys = self.get_keys(target_creds)
+ self.creds_set_keys(target_creds, keys)
+
+ # Show that a network SamLogon succeeds.
+ self._test_samlogon(creds=client_creds,
+ domain_joined_mach_creds=target_creds,
+ logon_type=netlogon.NetlogonNetworkInformation)
+
+ self.check_samlogon_network_log(client_creds,
+ server_policy=policy)
+
+ # Show that an interactive SamLogon succeeds.
+ self._test_samlogon(creds=client_creds,
+ domain_joined_mach_creds=target_creds,
+ logon_type=netlogon.NetlogonInteractiveInformation)
+
+ self.check_samlogon_interactive_log(client_creds,
+ server_policy=policy)
+
+ def test_samlogon_allowed_to_service_derived_class_allow(self):
+ samdb = self.get_samdb()
+
+ # Create a user account.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ ntlm=True)
+
+ # Create an authentication policy that applies to a managed service and
+ # explicitly allows the user account to obtain a service ticket.
+ allowed = f'O:SYD:(A;;CR;;;{client_creds.get_sid()})'
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=denied,
+ computer_allowed_to=denied,
+ service_allowed_to=allowed)
+
+ # Create a schema class derived from ‘msDS-ManagedServiceAccount’.
+ class_id = random.randint(0, 100000000)
+ service_class_cn = f'my-Managed-Service-Class-{class_id}'
+ service_class = service_class_cn.replace('-', '')
+ class_dn = samdb.get_schema_basedn()
+ class_dn.add_child(f'CN={service_class_cn}')
+ governs_id = f'1.3.6.1.4.1.7165.4.6.2.9.{class_id}'
+
+ samdb.add({
+ 'dn': class_dn,
+ 'objectClass': 'classSchema',
+ 'subClassOf': 'msDS-ManagedServiceAccount',
+ 'governsId': governs_id,
+ 'lDAPDisplayName': service_class,
+ })
+
+ # Create an account derived from ‘msDS-ManagedServiceAccount’ with the
+ # assigned policy.
+ target_name = self.get_new_username()
+ target_creds, target_dn = self.create_account(
+ samdb, target_name,
+ account_type=self.AccountType.MANAGED_SERVICE,
+ spn=f'host/{target_name}',
+ additional_details={
+ 'msDS-AssignedAuthNPolicy': str(policy.dn),
+ 'objectClass': service_class,
+ })
+
+ # Show that a network SamLogon succeeds.
+ self._test_samlogon(creds=client_creds,
+ domain_joined_mach_creds=target_creds,
+ logon_type=netlogon.NetlogonNetworkInformation)
+
+ self.check_samlogon_network_log(client_creds,
+ server_policy=policy)
+
+ # Show that an interactive SamLogon succeeds.
+ self._test_samlogon(creds=client_creds,
+ domain_joined_mach_creds=target_creds,
+ logon_type=netlogon.NetlogonInteractiveInformation)
+
+ self.check_samlogon_interactive_log(client_creds,
+ server_policy=policy)
+
+ def test_samlogon_bad_pwd_client_policy(self):
+ # Create an authentication policy with device restrictions for users.
+ allowed = 'O:SY'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_from=allowed)
+
+ # Create a user account with the assigned policy. Use a non-cached
+ # account so that it is not locked out for other tests.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy,
+ ntlm=True,
+ cached=False)
+
+ # Set a wrong password.
+ client_creds.set_password('wrong password')
+
+ # Show that a network SamLogon fails.
+ self._test_samlogon(
+ creds=client_creds,
+ logon_type=netlogon.NetlogonNetworkInformation,
+ expect_error=ntstatus.NT_STATUS_ACCOUNT_RESTRICTION)
+
+ self.check_samlogon_network_log(
+ client_creds,
+ client_policy=policy,
+ client_policy_status=ntstatus.NT_STATUS_ACCOUNT_RESTRICTION,
+ event=AuditEvent.NTLM_DEVICE_RESTRICTION)
+
+ # Show that an interactive SamLogon fails.
+ self._test_samlogon(
+ creds=client_creds,
+ logon_type=netlogon.NetlogonInteractiveInformation,
+ expect_error=ntstatus.NT_STATUS_ACCOUNT_RESTRICTION)
+
+ self.check_samlogon_interactive_log(
+ client_creds,
+ client_policy=policy,
+ client_policy_status=ntstatus.NT_STATUS_ACCOUNT_RESTRICTION,
+ event=AuditEvent.NTLM_DEVICE_RESTRICTION)
+
+ def test_samlogon_bad_pwd_server_policy(self):
+ # Create a user account. Use a non-cached account so that it is not
+ # locked out for other tests.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ ntlm=True,
+ cached=False)
+
+ # Create an authentication policy that applies to a computer and
+ # explicitly denies the user account to obtain a service ticket.
+ denied = f'O:SYD:(D;;CR;;;{client_creds.get_sid()})'
+ allowed = 'O:SYD:(A;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=allowed,
+ computer_allowed_to=denied,
+ service_allowed_to=allowed)
+
+ # Create a computer account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=policy)
+
+ # Set a wrong password.
+ client_creds.set_password('wrong password')
+
+ # Show that a network SamLogon fails.
+ self._test_samlogon(
+ creds=client_creds,
+ domain_joined_mach_creds=target_creds,
+ logon_type=netlogon.NetlogonNetworkInformation,
+ expect_error=ntstatus.NT_STATUS_WRONG_PASSWORD)
+
+ self.check_samlogon_network_log(
+ client_creds,
+ # The bad password failure takes precedence, and no policy appears
+ # in the log.
+ server_policy=None,
+ status=ntstatus.NT_STATUS_WRONG_PASSWORD)
+
+ # Show that an interactive SamLogon fails.
+ self._test_samlogon(
+ creds=client_creds,
+ domain_joined_mach_creds=target_creds,
+ logon_type=netlogon.NetlogonInteractiveInformation,
+ expect_error=ntstatus.NT_STATUS_WRONG_PASSWORD)
+
+ self.check_samlogon_interactive_log(
+ client_creds,
+ # The bad password failure takes precedence, and no policy appears
+ # in the log.
+ server_policy=None,
+ status=ntstatus.NT_STATUS_WRONG_PASSWORD)
+
+ def test_samlogon_bad_pwd_client_and_server_policy(self):
+ # Create an authentication policy with device restrictions for users.
+ allowed = 'O:SY'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_from=allowed)
+
+ # Create a user account with the assigned policy. Use a non-cached
+ # account so that it is not locked out for other tests.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy,
+ ntlm=True,
+ cached=False)
+
+ # Create an authentication policy that applies to a computer and
+ # explicitly denies the user account to obtain a service ticket.
+ denied = f'O:SYD:(D;;CR;;;{client_creds.get_sid()})'
+ allowed = 'O:SYD:(A;;CR;;;WD)'
+ server_policy = self.create_authn_policy(enforced=True,
+ user_allowed_to=allowed,
+ computer_allowed_to=denied,
+ service_allowed_to=allowed)
+
+ # Create a computer account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=server_policy)
+
+ # Set a wrong password.
+ client_creds.set_password('wrong password')
+
+ # Show that a network SamLogon fails.
+ self._test_samlogon(
+ creds=client_creds,
+ domain_joined_mach_creds=target_creds,
+ logon_type=netlogon.NetlogonNetworkInformation,
+ expect_error=ntstatus.NT_STATUS_ACCOUNT_RESTRICTION)
+
+ self.check_samlogon_network_log(
+ client_creds,
+ client_policy=policy,
+ client_policy_status=ntstatus.NT_STATUS_ACCOUNT_RESTRICTION,
+ event=AuditEvent.NTLM_DEVICE_RESTRICTION)
+
+ # Show that an interactive SamLogon fails.
+ self._test_samlogon(
+ creds=client_creds,
+ domain_joined_mach_creds=target_creds,
+ logon_type=netlogon.NetlogonInteractiveInformation,
+ expect_error=ntstatus.NT_STATUS_ACCOUNT_RESTRICTION)
+
+ self.check_samlogon_interactive_log(
+ client_creds,
+ client_policy=policy,
+ client_policy_status=ntstatus.NT_STATUS_ACCOUNT_RESTRICTION,
+ event=AuditEvent.NTLM_DEVICE_RESTRICTION)
+
+
+if __name__ == '__main__':
+ global_asn1_print = False
+ global_hexdump = False
+ import unittest
+ unittest.main()
diff --git a/python/samba/tests/krb5/claims_in_pac.py b/python/samba/tests/krb5/claims_in_pac.py
new file mode 100755
index 0000000..a5db7ba
--- /dev/null
+++ b/python/samba/tests/krb5/claims_in_pac.py
@@ -0,0 +1,490 @@
+#!/usr/bin/env python3
+# Unix SMB/CIFS implementation.
+# Copyright (C) Andrew Bartlett 2023
+# Copyright (C) Stefan Metzmacher 2020
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+import os
+
+sys.path.insert(0, 'bin/python')
+os.environ['PYTHONUNBUFFERED'] = '1'
+
+from samba.dcerpc import krb5pac, claims
+from samba.ndr import ndr_pack, ndr_unpack
+from samba.tests import TestCase
+
+class PacClaimsTests(TestCase):
+
+ pac_data_uncompressed = bytes.fromhex(
+ '08000000000000000100000000020000880000000000000006000000100000008802'
+ '000000000000070000001000000098020000000000000a00000020000000a8020000'
+ '000000000c000000b8000000c8020000000000000d00000090010000800300000000'
+ '000011000000080000001005000000000000120000001c0000001805000000000000'
+ '01100800ccccccccf001000000000000000002000000000000000000ffffffffffff'
+ 'ff7fffffffffffffff7f6ebd4f913c60d9016e7db9bb0561d9016e3da9863d81d901'
+ '16001600040002000000000008000200000000000c00020000000000100002000000'
+ '000014000200000000001800020000000000ae04000001020000010000001c000200'
+ '20000000000000000000000000000000000000001e002000200002000a000c002400'
+ '02002800020000000000000000001000000000000000000000000000000000000000'
+ '000000000000000000000000020000002c0002000000000000000000000000000b00'
+ '0000000000000b000000370032003000660064003300630033005f00310030000000'
+ '00000000000000000000000000000000000000000000000000000000000000000000'
+ '00000000000000000000000000000000000000000000000000000100000001020000'
+ '0700000010000000000000000f000000410042004100520054004c00450054002d00'
+ '440043002d00570049004e000000060000000000000005000000570049004e003200'
+ '32000000040000000104000000000005150000003f1ba8749a54499be10ea4590200'
+ '00003000020007000000340002000700000005000000010500000000000515000000'
+ '000000000000000000000000f1010000010000000101000000000012010000000000'
+ '000010000000d89573aeb6f036c4ca5f5412100000008ada43082e7dfccb7587a478'
+ '8097ee903c60d9011600370032003000660064003300630033005f00310030003a00'
+ '18002200580003000000160080001c00980000000000370032003000660064003300'
+ '630033005f00310030004000770069006e00320032002e006500780061006d007000'
+ '6c0065002e0063006f006d00000000000000570049004e00320032002e0045005800'
+ '41004d0050004c0045002e0043004f004d0000000000000037003200300066006400'
+ '3300630033005f003100300000000105000000000005150000003f1ba8749a54499b'
+ 'e10ea459ae0400000000000001100800cccccccc8001000000000000000002005801'
+ '00000400020000000000580100000000000000000000000000005801000001100800'
+ 'cccccccc480100000000000000000200010000000400020000000000000000000000'
+ '000001000000010000000300000008000200030000000c0002000600060001000000'
+ '10000200140002000300030003000000180002002800020002000200040000002c00'
+ '02000b000000000000000b000000370032003000660064003300630033005f003900'
+ '00000000010000000000000001000000000000000b000000000000000b0000003700'
+ '32003000660064003300630033005f00370000000000030000001c00020020000200'
+ '2400020004000000000000000400000066006f006f00000004000000000000000400'
+ '00006200610072000000040000000000000004000000620061007a0000000b000000'
+ '000000000b000000370032003000660064003300630033005f003800000000000400'
+ '000009000a0000000000070001000000000006000100000000000000010000000000'
+ '0000000002000000010000000105000000000005150000003f1ba8749a54499be10e'
+ 'a459ae04000000000000'
+ )
+
+ pac_data_compressed = bytes.fromhex(
+ '080000000000000001000000f8010000880000000000000006000000100000008002'
+ '000000000000070000001000000090020000000000000a0000001e000000a0020000'
+ '000000000c000000b0000000c0020000000000000d00000060020000700300000000'
+ '00001100000008000000d005000000000000120000001c000000d805000000000000'
+ '01100800cccccccce801000000000000000002000000000000000000ffffffffffff'
+ 'ff7fffffffffffffff7f50b330913c60d90150739abb0561d90150338a863d81d901'
+ '14001400040002000000000008000200000000000c00020000000000100002000000'
+ '000014000200000000001800020000000000ad04000001020000010000001c000200'
+ '20000000000000000000000000000000000000001e002000200002000a000c002400'
+ '02002800020000000000000000001000000000000000000000000000000000000000'
+ '000000000000000000000000020000002c0002000000000000000000000000000a00'
+ '0000000000000a000000370032003000660064003300630033005f00360000000000'
+ '00000000000000000000000000000000000000000000000000000000000000000000'
+ '00000000000000000000000000000000000000000000010000000102000007000000'
+ '10000000000000000f000000410042004100520054004c00450054002d0044004300'
+ '2d00570049004e000000060000000000000005000000570049004e00320032000000'
+ '040000000104000000000005150000003f1ba8749a54499be10ea459020000003000'
+ '02000700000034000200070000000500000001050000000000051500000000000000'
+ '0000000000000000f10100000100000001010000000000120100000010000000ace7'
+ 'b599ff30aa486b52983210000000b50e9bea014545c97eca0b978097ee903c60d901'
+ '1400370032003000660064003300630033005f003600000038001800220050000300'
+ '0000140078001c00900000000000370032003000660064003300630033005f003600'
+ '4000770069006e00320032002e006500780061006d0070006c0065002e0063006f00'
+ '6d00570049004e00320032002e004500580041004d0050004c0045002e0043004f00'
+ '4d00000000000000370032003000660064003300630033005f003600000000000105'
+ '000000000005150000003f1ba8749a54499be10ea459ad0400000000000001100800'
+ 'cccccccc500200000000000000000200290200000400020004000000282000000000'
+ '00000000000000000000290200007377878887880888070008000780080006000700'
+ '07000708877707800800880088700700080008080000800000000080707877877700'
+ '76770867868788000000000000000000000000000000000000000000000000000000'
+ '00000000000000000000000000000800000000000000000000000000000000000000'
+ '00000000000077000800800000008700000000000000850700000000000074768000'
+ '00000000750587000800000066078000000080706677880080008060878708000000'
+ '00800080000000000080000000000000000000000000000000000000000000000000'
+ '6080080000000070000000000000000000000000000000000000000000000000fd74'
+ 'eaf001add6213aecf4346587eec48c323e3e1a5a32042eecf243669a581e383d2940'
+ 'e80e383c294463b8c0b49024f1def20df819586b086cd2ab98700923386674845663'
+ 'ef57e91718110c1ad4c0ac88912126d2180545e98670ea2aa002052aa54189cc318d'
+ '26c46b667f18b6876262a9a4985ecdf76e5161033fd457ba020075360c837aaa3aa8'
+ '2749ee8152420999b553c60195be5e5c35c4330557538772972a7d527aeca1fc6b29'
+ '51ca254ac83960272a930f3194892d4729eff48e48ccfb929329ff501c356c0e8ed1'
+ '8471ec70986c31da86a8090b4022c1db257514fdba4347532146648d4f99f9065e0d'
+ '9a0d90d80f38389c39cb9ebe6d4e5e681e5a8a5418f591f1dbb7594a3f2aa3220ced'
+ '1cd18cb49cffcc2ff18eef6caf443663640c56640000120000000200000001000000'
+ '0105000000000005150000003f1ba8749a54499be10ea459ad04000000000000'
+ )
+
+ pac_data_int64_claim = bytes.fromhex(
+ '080000000000000001000000f0010000880000000000000006000000100000007802'
+ '000000000000070000001000000088020000000000000a0000001a00000098020000'
+ '000000000c00000088000000b8020000000000000d000000d0000000400300000000'
+ '000011000000080000001004000000000000120000001c0000001804000000000000'
+ '01100800cccccccce001000000000000000002000000000000000000ffffffffffff'
+ 'ff7fffffffffffffff7f52a2a6d607cfd90152621001d1cfd901522200cc08f0d901'
+ '10001000040002000000000008000200000000000c00020000000000100002000000'
+ '0000140002000000000018000200000000004362000001020000010000001c000200'
+ '200000000000000000000000000000000000000014001600200002000e0010002400'
+ '02002800020000000000000000001000000000000000000000000000000000000000'
+ '000000000000000000000000020000002c0002000000000000000000000000000800'
+ '0000000000000800000075007300650072006e0061006d0065000000000000000000'
+ '00000000000000000000000000000000000000000000000000000000000000000000'
+ '0000000000000000000000000000000000000100000001020000070000000b000000'
+ '000000000a000000570049004e0032004b00310039002d0044004300080000000000'
+ '0000070000006500780061006d0070006c0065000000040000000104000000000005'
+ '15000000bcfb8bf5af39e9b21f9b5fcd020000003000020007000000340002000700'
+ '000005000000010500000000000515000000000000000000000000000000f1010000'
+ '010000000101000000000012010000000000000010000000147a8762afe3366b316c'
+ '936410000000e05a433ae9271bcc603d933480353ad607cfd9011000750073006500'
+ '72006e0061006d006500000000000000280018001600400003000000100058001c00'
+ '68000000000075007300650072006e0061006d00650040006500780061006d007000'
+ '6c0065002e0063006f006d004500580041004d0050004c0045002e0043004f004d00'
+ '000075007300650072006e0061006d006500010500000000000515000000bcfb8bf5'
+ 'af39e9b21f9b5fcd436200000000000001100800ccccccccc0000000000000000000'
+ '02009800000004000200000000009800000000000000000000000000000098000000'
+ '01100800cccccccc8800000000000000000002000100000004000200000000000000'
+ '00000000000001000000010000000100000008000200010000000c00020001000100'
+ '05000000100002000800000000000000080000006100200063006c00610069006d00'
+ '0000050000000000000003000000000000002a0000000000000019fcffffffffffff'
+ 'e803000000000000204e000000000000000000000200000001000000010500000000'
+ '000515000000bcfb8bf5af39e9b21f9b5fcd4362000000000000'
+ )
+
+ def test_unpack_raw(self):
+ pac_unpacked_raw = ndr_unpack(krb5pac.PAC_DATA_RAW, self.pac_data_uncompressed)
+ self.assertEqual(pac_unpacked_raw.num_buffers, 8)
+ self.assertEqual(pac_unpacked_raw.version, 0)
+
+ def confirm_uncompressed_claims(self, claim_metadata):
+ self.assertEqual(claim_metadata.uncompressed_claims_set_size,
+ 344)
+ claims_set = claim_metadata.claims_set.claims.claims
+ self.assertEqual(claims_set.claims_array_count,
+ 1)
+ claim_arrays = claims_set.claims_arrays
+ self.assertEqual(claim_arrays[0].claims_source_type,
+ claims.CLAIMS_SOURCE_TYPE_AD)
+ self.assertEqual(claim_arrays[0].claims_count,
+ 3)
+ claim_entries = claim_arrays[0].claim_entries
+ self.assertEqual(claim_entries[0].id,
+ '720fd3c3_9')
+ self.assertEqual(claim_entries[0].type,
+ claims.CLAIM_TYPE_BOOLEAN)
+ self.assertEqual(claim_entries[0].values.value_count,
+ 1)
+ self.assertEqual(claim_entries[0].values.values[0],
+ 1)
+
+ self.assertEqual(claim_entries[1].id,
+ '720fd3c3_7')
+ self.assertEqual(claim_entries[1].type,
+ claims.CLAIM_TYPE_STRING)
+ self.assertEqual(claim_entries[1].values.value_count,
+ 3)
+ self.assertEqual(claim_entries[1].values.values[0],
+ "foo")
+ self.assertEqual(claim_entries[1].values.values[1],
+ "bar")
+ self.assertEqual(claim_entries[1].values.values[2],
+ "baz")
+
+ self.assertEqual(claim_entries[2].id,
+ '720fd3c3_8')
+ self.assertEqual(claim_entries[2].type,
+ claims.CLAIM_TYPE_UINT64)
+ self.assertEqual(claim_entries[2].values.value_count,
+ 4)
+ self.assertEqual(claim_entries[2].values.values[0],
+ 655369)
+ self.assertEqual(claim_entries[2].values.values[1],
+ 65543)
+ self.assertEqual(claim_entries[2].values.values[2],
+ 65542)
+ self.assertEqual(claim_entries[2].values.values[3],
+ 65536)
+
+ def test_unpack_claims_pac_uncompressed(self):
+ pac = ndr_unpack(krb5pac.PAC_DATA, self.pac_data_uncompressed)
+
+ self.assertEqual(pac.num_buffers, 8)
+ self.assertEqual(pac.version, 0)
+ self.assertEqual(pac.buffers[0].type, krb5pac.PAC_TYPE_LOGON_INFO)
+ self.assertEqual(pac.buffers[0].info.info.info3.base.account_name.string, "720fd3c3_10")
+
+ self.assertEqual(pac.buffers[5].type, krb5pac.PAC_TYPE_CLIENT_CLAIMS_INFO)
+ self.assertIsNotNone(pac.buffers[5].info.remaining)
+
+ client_claims = ndr_unpack(claims.CLAIMS_SET_METADATA_NDR, pac.buffers[5].info.remaining)
+ claim_metadata = client_claims.claims.metadata
+
+ self.assertEqual(pac.buffers[6].type, krb5pac.PAC_TYPE_ATTRIBUTES_INFO)
+ self.assertEqual(pac.buffers[7].type, krb5pac.PAC_TYPE_REQUESTER_SID)
+
+ self.assertEqual(claim_metadata.compression_format,
+ claims.CLAIMS_COMPRESSION_FORMAT_NONE)
+ self.confirm_uncompressed_claims(claim_metadata)
+
+ def confirm_compressed_claims(self, claim_metadata):
+ self.assertEqual(claim_metadata.uncompressed_claims_set_size,
+ 8232)
+ claims_set = claim_metadata.claims_set.claims.claims
+ self.assertEqual(claims_set.claims_array_count,
+ 1)
+ claim_arrays = claims_set.claims_arrays
+ self.assertEqual(claim_arrays[0].claims_source_type,
+ claims.CLAIMS_SOURCE_TYPE_AD)
+ self.assertEqual(claim_arrays[0].claims_count,
+ 5)
+ claim_entries = claim_arrays[0].claim_entries
+ self.assertEqual(claim_entries[0].id,
+ '720fd3c3_4')
+ self.assertEqual(claim_entries[0].type,
+ claims.CLAIM_TYPE_BOOLEAN)
+ self.assertEqual(claim_entries[0].values.value_count,
+ 1)
+ self.assertEqual(claim_entries[0].values.values[0],
+ 1)
+
+ self.assertEqual(claim_entries[1].id,
+ '720fd3c3_0')
+ self.assertEqual(claim_entries[1].type,
+ claims.CLAIM_TYPE_STRING)
+ self.assertEqual(claim_entries[1].values.value_count,
+ 4)
+ self.assertEqual(claim_entries[1].values.values[0],
+ "A first value.")
+ self.assertEqual(claim_entries[1].values.values[1],
+ "A second value.")
+ self.assertEqual(claim_entries[1].values.values[2],
+ "A third value.")
+
+ self.assertEqual(claim_entries[2].id,
+ '720fd3c3_1')
+ self.assertEqual(claim_entries[2].type,
+ claims.CLAIM_TYPE_STRING)
+ self.assertEqual(claim_entries[2].values.value_count,
+ 3)
+ self.assertEqual(claim_entries[2].values.values[0],
+ "DC=win22,DC=example,DC=com")
+ self.assertEqual(claim_entries[2].values.values[1],
+ "CN=Users,DC=win22,DC=example,DC=com")
+ self.assertEqual(claim_entries[2].values.values[2],
+ "CN=Computers,DC=win22,DC=example,DC=com")
+
+ self.assertEqual(claim_entries[3].id,
+ '720fd3c3_2')
+ self.assertEqual(claim_entries[3].type,
+ claims.CLAIM_TYPE_UINT64)
+ self.assertEqual(claim_entries[3].values.value_count,
+ 4)
+ self.assertEqual(claim_entries[3].values.values[0],
+ 655369)
+ self.assertEqual(claim_entries[3].values.values[1],
+ 65543)
+ self.assertEqual(claim_entries[3].values.values[2],
+ 65542)
+ self.assertEqual(claim_entries[3].values.values[3],
+ 65536)
+
+ def test_unpack_claims_pac_compressed(self):
+ pac = ndr_unpack(krb5pac.PAC_DATA, self.pac_data_compressed)
+
+ self.assertEqual(pac.num_buffers, 8)
+ self.assertEqual(pac.version, 0)
+ self.assertEqual(pac.buffers[0].type, krb5pac.PAC_TYPE_LOGON_INFO)
+ self.assertEqual(pac.buffers[0].info.info.info3.base.account_name.string, "720fd3c3_6")
+
+ self.assertEqual(pac.buffers[5].type, krb5pac.PAC_TYPE_CLIENT_CLAIMS_INFO)
+ self.assertIsNotNone(pac.buffers[5].info.remaining)
+
+ client_claims = ndr_unpack(claims.CLAIMS_SET_METADATA_NDR, pac.buffers[5].info.remaining)
+ claim_metadata = client_claims.claims.metadata
+
+ self.assertEqual(pac.buffers[6].type, krb5pac.PAC_TYPE_ATTRIBUTES_INFO)
+ self.assertEqual(pac.buffers[7].type, krb5pac.PAC_TYPE_REQUESTER_SID)
+
+ self.assertEqual(claim_metadata.compression_format,
+ claims.CLAIMS_COMPRESSION_FORMAT_XPRESS_HUFF)
+ self.assertEqual(claim_metadata.claims_set_size,
+ 553)
+ self.confirm_compressed_claims(claim_metadata)
+
+ def test_repack_claims_pac_uncompressed(self):
+ pac = ndr_unpack(krb5pac.PAC_DATA, self.pac_data_uncompressed)
+ client_claims = ndr_unpack(claims.CLAIMS_SET_METADATA_NDR, pac.buffers[5].info.remaining)
+ client_claims_bytes1 = ndr_pack(client_claims)
+ client_claims2 = ndr_unpack(claims.CLAIMS_SET_METADATA_NDR, client_claims_bytes1)
+ client_claims_bytes2 = ndr_pack(client_claims2)
+ self.assertEqual(client_claims_bytes1, client_claims_bytes2)
+
+ claim_metadata = client_claims2.claims.metadata
+ self.assertEqual(claim_metadata.compression_format,
+ claims.CLAIMS_COMPRESSION_FORMAT_NONE)
+ self.confirm_uncompressed_claims(claim_metadata)
+
+ def test_repack_claims_pac_compressed(self):
+ pac = ndr_unpack(krb5pac.PAC_DATA, self.pac_data_compressed)
+ client_claims = ndr_unpack(claims.CLAIMS_SET_METADATA_NDR, pac.buffers[5].info.remaining)
+ client_claims_bytes1 = ndr_pack(client_claims)
+ client_claims2 = ndr_unpack(claims.CLAIMS_SET_METADATA_NDR, client_claims_bytes1)
+ client_claims_bytes2 = ndr_pack(client_claims2)
+ self.assertEqual(client_claims_bytes1, client_claims_bytes2)
+
+ # This confirms that after compression and decompression, we
+ # still get the values we expect
+ claim_metadata = client_claims2.claims.metadata
+ self.assertEqual(claim_metadata.compression_format,
+ claims.CLAIMS_COMPRESSION_FORMAT_XPRESS_HUFF)
+ self.assertEqual(claim_metadata.claims_set_size,
+ 585)
+ self.confirm_compressed_claims(claim_metadata)
+
+ def test_repack_claims_pac_uncompressed_set_compressed(self):
+ pac = ndr_unpack(krb5pac.PAC_DATA, self.pac_data_uncompressed)
+ client_claims = ndr_unpack(claims.CLAIMS_SET_METADATA_NDR, pac.buffers[5].info.remaining)
+ client_claims.claims.metadata.compression_format = claims.CLAIMS_COMPRESSION_FORMAT_XPRESS_HUFF
+ client_claims_bytes1 = ndr_pack(client_claims)
+ client_claims2 = ndr_unpack(claims.CLAIMS_SET_METADATA_NDR, client_claims_bytes1)
+
+ # Confirm that despite setting FORMAT_XPRESS_HUFF compression is never attempted
+ self.assertEqual(client_claims2.claims.metadata.uncompressed_claims_set_size,
+ 344)
+ self.assertEqual(client_claims2.claims.metadata.claims_set_size,
+ 344)
+ self.assertEqual(client_claims2.claims.metadata.compression_format,
+ claims.CLAIMS_COMPRESSION_FORMAT_NONE)
+
+ # Confirm we match the originally uncompressed sample
+ claim_metadata = client_claims2.claims.metadata
+ self.confirm_uncompressed_claims(claim_metadata)
+
+ # Finally confirm a re-pack gets identical bytes
+ client_claims_bytes2 = ndr_pack(client_claims2)
+ self.assertEqual(client_claims_bytes1, client_claims_bytes2)
+
+
+ def test_repack_claims_pac_compressed_set_uncompressed(self):
+ pac = ndr_unpack(krb5pac.PAC_DATA, self.pac_data_compressed)
+ client_claims = ndr_unpack(claims.CLAIMS_SET_METADATA_NDR, pac.buffers[5].info.remaining)
+ client_claims.claims.metadata.compression_format = claims.CLAIMS_COMPRESSION_FORMAT_NONE
+ client_claims_bytes1 = ndr_pack(client_claims)
+ client_claims2 = ndr_unpack(claims.CLAIMS_SET_METADATA_NDR, client_claims_bytes1)
+
+ # Confirm that by setting FORMAT_NONE compression is never attempted
+ self.assertEqual(client_claims2.claims.metadata.uncompressed_claims_set_size,
+ 8232)
+ self.assertEqual(client_claims2.claims.metadata.claims_set_size,
+ 8232)
+ self.assertEqual(client_claims2.claims.metadata.compression_format,
+ claims.CLAIMS_COMPRESSION_FORMAT_NONE)
+
+ # This confirms that after pack and unpack, despite being
+ # larger than the compression minimum we get add the data and
+ # the values we expect for the originally-compressed data
+ claim_metadata = client_claims2.claims.metadata
+ self.confirm_compressed_claims(claim_metadata)
+
+ # Finally confirm a re-pack gets identical bytes
+ client_claims_bytes2 = ndr_pack(client_claims2)
+ self.assertEqual(client_claims_bytes1, client_claims_bytes2)
+
+ def test_repack_claims_pac_uncompressed_uninit_lengths(self):
+ pac = ndr_unpack(krb5pac.PAC_DATA, self.pac_data_uncompressed)
+ client_claims = ndr_unpack(claims.CLAIMS_SET_METADATA_NDR, pac.buffers[5].info.remaining)
+ # This matches what we expect the KDC to do, which is to ask for compression always
+ client_claims.claims.metadata.compression_format = claims.CLAIMS_COMPRESSION_FORMAT_XPRESS_HUFF
+ client_claims.claims.metadata.uncompressed_claims_set_size = 0
+ client_claims.claims.metadata.claims_set_size = 0
+
+ client_claims_bytes1 = ndr_pack(client_claims)
+ client_claims2 = ndr_unpack(claims.CLAIMS_SET_METADATA_NDR, client_claims_bytes1)
+
+ # Confirm that the NDR code did not compress and sent FORMAT_NONE on the wire
+ self.assertEqual(client_claims2.claims.metadata.uncompressed_claims_set_size,
+ 344)
+ self.assertEqual(client_claims2.claims.metadata.claims_set_size,
+ 344)
+ self.assertEqual(client_claims2.claims.metadata.compression_format,
+ claims.CLAIMS_COMPRESSION_FORMAT_NONE)
+
+ claim_metadata = client_claims2.claims.metadata
+ self.confirm_uncompressed_claims(claim_metadata)
+
+ # Finally confirm a re-pack gets identical bytes
+ client_claims_bytes2 = ndr_pack(client_claims2)
+ self.assertEqual(client_claims_bytes1, client_claims_bytes2)
+
+ def test_repack_claims_pac_compressed_uninit_lengths(self):
+ pac = ndr_unpack(krb5pac.PAC_DATA, self.pac_data_compressed)
+ client_claims = ndr_unpack(claims.CLAIMS_SET_METADATA_NDR, pac.buffers[5].info.remaining)
+ client_claims.claims.metadata.compression_format = claims.CLAIMS_COMPRESSION_FORMAT_XPRESS_HUFF
+ client_claims.claims.metadata.uncompressed_claims_set_size = 0
+ client_claims.claims.metadata.claims_set_size = 0
+
+ client_claims_bytes1 = ndr_pack(client_claims)
+ client_claims2 = ndr_unpack(claims.CLAIMS_SET_METADATA_NDR, client_claims_bytes1)
+
+ # Confirm that despite no lengths being set, the data is compressed correctly
+ self.assertEqual(client_claims2.claims.metadata.uncompressed_claims_set_size,
+ 8232)
+ self.assertEqual(client_claims2.claims.metadata.claims_set_size,
+ 585)
+ self.assertEqual(client_claims2.claims.metadata.compression_format,
+ claims.CLAIMS_COMPRESSION_FORMAT_XPRESS_HUFF)
+
+ claim_metadata = client_claims2.claims.metadata
+ self.confirm_compressed_claims(claim_metadata)
+
+ # Finally confirm a re-pack gets identical bytes
+ client_claims_bytes2 = ndr_pack(client_claims2)
+ self.assertEqual(client_claims_bytes1, client_claims_bytes2)
+
+ def test_pac_int64_claims(self):
+ """Test that we can parse a PAC containing INT64 claims."""
+
+ # Decode the PAC.
+ pac = ndr_unpack(krb5pac.PAC_DATA, self.pac_data_int64_claim)
+
+ # Get the PAC buffer which contains the client claims.
+ self.assertEqual(8, pac.num_buffers)
+ client_claims_buf = pac.buffers[5]
+ self.assertEqual(krb5pac.PAC_TYPE_CLIENT_CLAIMS_INFO,
+ client_claims_buf.type)
+
+ # Ensure that we can decode the client claims.
+ client_claims_data = client_claims_buf.info.remaining
+ client_claims = ndr_unpack(claims.CLAIMS_SET_METADATA_NDR,
+ client_claims_data)
+
+ claims_set = client_claims.claims.metadata.claims_set.claims.claims
+
+ # We should find a single claims array, …
+ self.assertEqual(1, claims_set.claims_array_count)
+ claims_array = claims_set.claims_arrays[0]
+ self.assertEqual(claims.CLAIMS_SOURCE_TYPE_AD,
+ claims_array.claims_source_type)
+
+ # …containing our INT64 claim.
+ self.assertEqual(1, claims_array.claims_count)
+ claim_entry = claims_array.claim_entries[0]
+ self.assertEqual('a claim', claim_entry.id)
+ self.assertEqual(claims.CLAIM_TYPE_INT64, claim_entry.type)
+
+ # Ensure that the values have been decoded correctly.
+ self.assertEqual([3, 42, -999, 1000, 20000], claim_entry.values.values)
+
+ # Re-encode the claims buffer and ensure that the result is identical
+ # to the original encoded claims produced by Windows.
+ client_claims_packed = ndr_pack(client_claims)
+ self.assertEqual(client_claims_data, client_claims_packed)
+
+
+if __name__ == '__main__':
+ import unittest
+ unittest.main()
diff --git a/python/samba/tests/krb5/claims_tests.py b/python/samba/tests/krb5/claims_tests.py
new file mode 100755
index 0000000..074147e
--- /dev/null
+++ b/python/samba/tests/krb5/claims_tests.py
@@ -0,0 +1,2032 @@
+#!/usr/bin/env python3
+# Unix SMB/CIFS implementation.
+# Copyright (C) Stefan Metzmacher 2020
+# Copyright (C) Catalyst.Net Ltd 2022
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+import os
+
+sys.path.insert(0, 'bin/python')
+os.environ['PYTHONUNBUFFERED'] = '1'
+
+import re
+import ldb
+
+from samba.dcerpc import claims, krb5pac, security
+from samba.ndr import ndr_pack
+
+from samba.tests import DynamicTestCase, env_get_var_value
+from samba.tests.krb5 import kcrypto
+from samba.tests.krb5.kcrypto import Enctype
+from samba.tests.krb5.kdc_base_test import GroupType, KDCBaseTest, Principal
+from samba.tests.krb5.raw_testcase import Krb5EncryptionKey, RawKerberosTest
+from samba.tests.krb5.rfc4120_constants import (
+ AES256_CTS_HMAC_SHA1_96,
+ ARCFOUR_HMAC_MD5,
+ KRB_TGS_REP,
+ NT_PRINCIPAL,
+)
+import samba.tests.krb5.rfc4120_pyasn1 as krb5_asn1
+
+SidType = RawKerberosTest.SidType
+
+global_asn1_print = False
+global_hexdump = False
+
+
+class UnorderedList(tuple):
+ def __eq__(self, other):
+ if not isinstance(other, UnorderedList):
+ raise AssertionError('unexpected comparison attempt')
+ return sorted(self) == sorted(other)
+
+ def __hash__(self):
+ return hash(tuple(sorted(self)))
+
+
+@DynamicTestCase
+class ClaimsTests(KDCBaseTest):
+ # Placeholder objects that represent accounts undergoing testing.
+ user = object()
+ mach = object()
+
+ # Constants for group SID attributes.
+ default_attrs = security.SE_GROUP_DEFAULT_FLAGS
+ resource_attrs = default_attrs | security.SE_GROUP_RESOURCE
+
+ asserted_identity = security.SID_AUTHENTICATION_AUTHORITY_ASSERTED_IDENTITY
+ compounded_auth = security.SID_COMPOUNDED_AUTHENTICATION
+
+ @classmethod
+ def setUpClass(cls):
+ super().setUpClass()
+
+ cls._search_iterator = None
+
+ def setUp(self):
+ super().setUp()
+ self.do_asn1_print = global_asn1_print
+ self.do_hexdump = global_hexdump
+
+ def get_sample_dn(self):
+ if self._search_iterator is None:
+ samdb = self.get_samdb()
+ type(self)._search_iterator = samdb.search_iterator()
+
+ return str(next(self._search_iterator).dn)
+
+ def get_binary_dn(self):
+ return 'B:8:01010101:' + self.get_sample_dn()
+
+ def setup_claims(self, all_claims):
+ expected_claims = {}
+ unexpected_claims = set()
+
+ details = {}
+ mod_msg = ldb.Message()
+ security_desc = None
+
+ for claim in all_claims:
+ # Make a copy to avoid modifying the original.
+ claim = dict(claim)
+
+ claim_id = self.get_new_username()
+
+ expected = claim.pop('expected', False)
+ expected_values = claim.pop('expected_values', None)
+ if not expected:
+ self.assertIsNone(expected_values,
+ 'claim not expected, '
+ 'but expected values provided')
+
+ values = claim.pop('values', None)
+ if values is not None:
+ def get_placeholder(val):
+ if val is self.sample_dn:
+ return self.get_sample_dn()
+ elif val is self.binary_dn:
+ return self.get_binary_dn()
+ else:
+ return val
+
+ def ldb_transform(val):
+ if val is True:
+ return 'TRUE'
+ elif val is False:
+ return 'FALSE'
+ elif isinstance(val, int):
+ return str(val)
+ else:
+ return val
+
+ values_type = type(values)
+ values = values_type(map(get_placeholder, values))
+ transformed_values = values_type(map(ldb_transform, values))
+
+ attribute = claim['attribute']
+ if attribute in details:
+ self.assertEqual(details[attribute], transformed_values,
+ 'conflicting values set for attribute')
+ details[attribute] = transformed_values
+
+ readable = claim.pop('readable', True)
+ if not readable:
+ if security_desc is None:
+ security_desc = security.descriptor()
+
+ # Deny all read property access to the attribute.
+ ace = security.ace()
+ ace.type = security.SEC_ACE_TYPE_ACCESS_DENIED_OBJECT
+ ace.access_mask = security.SEC_ADS_READ_PROP
+ ace.trustee = security.dom_sid(security.SID_WORLD)
+ ace.object.flags |= security.SEC_ACE_OBJECT_TYPE_PRESENT
+ ace.object.type = self.get_schema_id_guid_from_attribute(
+ attribute)
+
+ security_desc.dacl_add(ace)
+
+ if expected_values is None:
+ expected_values = values
+
+ mod_values = claim.pop('mod_values', None)
+ if mod_values is not None:
+ flag = (ldb.FLAG_MOD_REPLACE
+ if values is not None else ldb.FLAG_MOD_ADD)
+ mod_msg[attribute] = ldb.MessageElement(mod_values,
+ flag,
+ attribute)
+
+ if expected:
+ self.assertIsNotNone(expected_values,
+ 'expected claim, but no value(s) set')
+ value_type = claim['value_type']
+
+ expected_claims[claim_id] = {
+ 'source_type': claims.CLAIMS_SOURCE_TYPE_AD,
+ 'type': value_type,
+ 'values': expected_values,
+ }
+ else:
+ unexpected_claims.add(claim_id)
+
+ self.create_claim(claim_id, **claim)
+
+ if security_desc is not None:
+ self.assertNotIn('nTSecurityDescriptor', details)
+ details['nTSecurityDescriptor'] = ndr_pack(security_desc)
+
+ return details, mod_msg, expected_claims, unexpected_claims
+
+ def modify_pac_remove_client_claims(self, pac):
+ pac_buffers = pac.buffers
+ for pac_buffer in pac_buffers:
+ if pac_buffer.type == krb5pac.PAC_TYPE_CLIENT_CLAIMS_INFO:
+ pac.num_buffers -= 1
+ pac_buffers.remove(pac_buffer)
+
+ break
+ else:
+ self.fail('expected client claims in PAC')
+
+ pac.buffers = pac_buffers
+
+ return pac
+
+ def remove_client_claims(self, ticket):
+ return self.modified_ticket(
+ ticket,
+ modify_pac_fn=self.modify_pac_remove_client_claims,
+ checksum_keys=self.get_krbtgt_checksum_key())
+
+ def remove_client_claims_tgt_from_rodc(self, ticket):
+ rodc_krbtgt_creds = self.get_mock_rodc_krbtgt_creds()
+ rodc_krbtgt_key = self.TicketDecryptionKey_from_creds(
+ rodc_krbtgt_creds)
+
+ checksum_keys = {
+ krb5pac.PAC_TYPE_KDC_CHECKSUM: rodc_krbtgt_key
+ }
+
+ return self.modified_ticket(
+ ticket,
+ new_ticket_key=rodc_krbtgt_key,
+ modify_pac_fn=self.modify_pac_remove_client_claims,
+ checksum_keys=checksum_keys)
+
+ def test_tgs_claims(self):
+ self.run_tgs_test(remove_claims=False, to_krbtgt=False)
+
+ def test_tgs_claims_remove_claims(self):
+ self.run_tgs_test(remove_claims=True, to_krbtgt=False)
+
+ def test_tgs_claims_to_krbtgt(self):
+ self.run_tgs_test(remove_claims=False, to_krbtgt=True)
+
+ def test_tgs_claims_remove_claims_to_krbtgt(self):
+ self.run_tgs_test(remove_claims=True, to_krbtgt=True)
+
+ def test_delegation_claims(self):
+ self.run_delegation_test(remove_claims=False)
+
+ def test_delegation_claims_remove_claims(self):
+ self.run_delegation_test(remove_claims=True)
+
+ def test_rodc_issued_claims_modify(self):
+ self.run_rodc_tgs_test(remove_claims=False, delete_claim=False)
+
+ def test_rodc_issued_claims_delete(self):
+ self.run_rodc_tgs_test(remove_claims=False, delete_claim=True)
+
+ def test_rodc_issued_claims_remove_claims_modify(self):
+ self.run_rodc_tgs_test(remove_claims=True, delete_claim=False)
+
+ def test_rodc_issued_claims_remove_claims_delete(self):
+ self.run_rodc_tgs_test(remove_claims=True, delete_claim=True)
+
+ def test_rodc_issued_device_claims_modify(self):
+ self.run_device_rodc_tgs_test(remove_claims=False, delete_claim=False)
+
+ def test_rodc_issued_device_claims_delete(self):
+ self.run_device_rodc_tgs_test(remove_claims=False, delete_claim=True)
+
+ def test_rodc_issued_device_claims_remove_claims_modify(self):
+ self.run_device_rodc_tgs_test(remove_claims=True, delete_claim=False)
+
+ def test_rodc_issued_device_claims_remove_claims_delete(self):
+ self.run_device_rodc_tgs_test(remove_claims=True, delete_claim=True)
+
+ # Create a user account with an applicable claim for the 'middleName'
+ # attribute. After obtaining a TGT, from which we optionally remove the
+ # claims, change the middleName attribute values for the account in the
+ # database to a different value. By which we may observe, when examining
+ # the reply to our following Kerberos TGS request, whether the claims
+ # contained therein are taken directly from the ticket, or obtained fresh
+ # from the database.
+ def run_tgs_test(self, remove_claims, to_krbtgt):
+ samdb = self.get_samdb()
+ user_creds, user_dn = self.create_account(samdb,
+ self.get_new_username(),
+ additional_details={
+ 'middleName': 'foo',
+ })
+
+ claim_id = self.get_new_username()
+ self.create_claim(claim_id,
+ enabled=True,
+ attribute='middleName',
+ single_valued=True,
+ source_type='AD',
+ for_classes=['user'],
+ value_type=claims.CLAIM_TYPE_STRING)
+
+ expected_claims = {
+ claim_id: {
+ 'source_type': claims.CLAIMS_SOURCE_TYPE_AD,
+ 'type': claims.CLAIM_TYPE_STRING,
+ 'values': ('foo',),
+ },
+ }
+
+ # Get a TGT for the user.
+ tgt = self.get_tgt(user_creds, expect_pac=True,
+ expect_client_claims=True,
+ expected_client_claims=expected_claims)
+
+ if remove_claims:
+ tgt = self.remove_client_claims(tgt)
+
+ # Change the value of the attribute used for the claim.
+ msg = ldb.Message(ldb.Dn(samdb, user_dn))
+ msg['middleName'] = ldb.MessageElement('bar',
+ ldb.FLAG_MOD_REPLACE,
+ 'middleName')
+ samdb.modify(msg)
+
+ if to_krbtgt:
+ target_creds = self.get_krbtgt_creds()
+ sname = self.get_krbtgt_sname()
+ else:
+ target_creds = self.get_service_creds()
+ sname = None
+
+ # Get a service ticket for the user. The claim value should not have
+ # changed, indicating that the client claims are propagated straight
+ # through.
+ self.get_service_ticket(
+ tgt, target_creds,
+ sname=sname,
+ expect_pac=True,
+ expect_client_claims=not remove_claims,
+ expected_client_claims=(expected_claims
+ if not remove_claims else None))
+
+ # Perform a test similar to that preceding. This time, create both a user
+ # and a computer account, each having an applicable claim. After obtaining
+ # tickets, from which the claims are optionally removed, change the claim
+ # attribute of each account to a different value. Then perform constrained
+ # delegation with the user's service ticket, verifying that the user's
+ # claims are carried into the resulting ticket.
+ def run_delegation_test(self, remove_claims):
+ service_creds = self.get_service_creds()
+ service_spn = service_creds.get_spn()
+
+ user_name = self.get_new_username()
+ mach_name = self.get_new_username()
+
+ samdb = self.get_samdb()
+ user_creds, user_dn = self.create_account(
+ samdb,
+ user_name,
+ self.AccountType.USER,
+ additional_details={
+ 'middleName': 'user_old',
+ })
+ mach_creds, mach_dn = self.create_account(
+ samdb,
+ mach_name,
+ self.AccountType.COMPUTER,
+ spn=f'host/{mach_name}',
+ additional_details={
+ 'middleName': 'mach_old',
+ 'msDS-AllowedToDelegateTo': service_spn,
+ })
+
+ claim_id = self.get_new_username()
+ self.create_claim(claim_id,
+ enabled=True,
+ attribute='middleName',
+ single_valued=True,
+ source_type='AD',
+ for_classes=['user', 'computer'],
+ value_type=claims.CLAIM_TYPE_STRING)
+
+ options = 'forwardable'
+ expected_flags = krb5_asn1.TicketFlags(options)
+
+ expected_claims_user = {
+ claim_id: {
+ 'source_type': claims.CLAIMS_SOURCE_TYPE_AD,
+ 'type': claims.CLAIM_TYPE_STRING,
+ 'values': ('user_old',),
+ },
+ }
+ expected_claims_mach = {
+ claim_id: {
+ 'source_type': claims.CLAIMS_SOURCE_TYPE_AD,
+ 'type': claims.CLAIM_TYPE_STRING,
+ 'values': ('mach_old',),
+ },
+ }
+
+ user_tgt = self.get_tgt(user_creds,
+ kdc_options=options,
+ expect_pac=True,
+ expected_flags=expected_flags,
+ expect_client_claims=True,
+ expected_client_claims=expected_claims_user)
+ user_ticket = self.get_service_ticket(
+ user_tgt,
+ mach_creds,
+ kdc_options=options,
+ expect_pac=True,
+ expected_flags=expected_flags,
+ expect_client_claims=True,
+ expected_client_claims=expected_claims_user)
+
+ mach_tgt = self.get_tgt(mach_creds,
+ expect_pac=True,
+ expect_client_claims=True,
+ expected_client_claims=expected_claims_mach)
+
+ if remove_claims:
+ user_ticket = self.remove_client_claims(user_ticket)
+ mach_tgt = self.remove_client_claims(mach_tgt)
+
+ # Change the value of the attribute used for the user claim.
+ msg = ldb.Message(ldb.Dn(samdb, user_dn))
+ msg['middleName'] = ldb.MessageElement('user_new',
+ ldb.FLAG_MOD_REPLACE,
+ 'middleName')
+ samdb.modify(msg)
+
+ # Change the value of the attribute used for the machine claim.
+ msg = ldb.Message(ldb.Dn(samdb, mach_dn))
+ msg['middleName'] = ldb.MessageElement('mach_new',
+ ldb.FLAG_MOD_REPLACE,
+ 'middleName')
+ samdb.modify(msg)
+
+ additional_tickets = [user_ticket.ticket]
+ options = str(krb5_asn1.KDCOptions('cname-in-addl-tkt'))
+
+ user_realm = user_creds.get_realm()
+ user_cname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=[user_name])
+
+ user_sid = user_creds.get_sid()
+
+ mach_realm = mach_creds.get_realm()
+
+ service_name = service_creds.get_username()[:-1]
+ service_realm = service_creds.get_realm()
+ service_sname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=['host', service_name])
+ service_decryption_key = self.TicketDecryptionKey_from_creds(
+ service_creds)
+ service_etypes = service_creds.tgs_supported_enctypes
+
+ expected_proxy_target = service_creds.get_spn()
+ expected_transited_services = [f'host/{mach_name}@{mach_realm}']
+
+ authenticator_subkey = self.RandomKey(Enctype.AES256)
+
+ etypes = (AES256_CTS_HMAC_SHA1_96, ARCFOUR_HMAC_MD5)
+
+ # The user's claims are propagated into the new ticket, while the
+ # machine's claims are dispensed with.
+ expected_claims = expected_claims_user if not remove_claims else None
+
+ # Perform constrained delegation.
+ kdc_exchange_dict = self.tgs_exchange_dict(
+ creds=user_creds,
+ expected_crealm=user_realm,
+ expected_cname=user_cname,
+ expected_srealm=service_realm,
+ expected_sname=service_sname,
+ expected_account_name=user_name,
+ expected_sid=user_sid,
+ expected_supported_etypes=service_etypes,
+ ticket_decryption_key=service_decryption_key,
+ check_rep_fn=self.generic_check_kdc_rep,
+ check_kdc_private_fn=self.generic_check_kdc_private,
+ tgt=mach_tgt,
+ authenticator_subkey=authenticator_subkey,
+ kdc_options=options,
+ expected_proxy_target=expected_proxy_target,
+ expected_transited_services=expected_transited_services,
+ expect_client_claims=not remove_claims,
+ expected_client_claims=expected_claims,
+ expect_device_claims=False,
+ expect_pac=True)
+
+ rep = self._generic_kdc_exchange(kdc_exchange_dict,
+ cname=None,
+ realm=service_realm,
+ sname=service_sname,
+ etypes=etypes,
+ additional_tickets=additional_tickets)
+ self.check_reply(rep, KRB_TGS_REP)
+
+ def run_rodc_tgs_test(self, remove_claims, delete_claim):
+ samdb = self.get_samdb()
+ # Create a user account permitted to replicate to the RODC.
+ user_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER,
+ opts={
+ # Set the value of the claim attribute.
+ 'additional_details': (('middleName', 'foo'),),
+ 'allowed_replication_mock': True,
+ 'revealed_to_mock_rodc': True,
+ },
+ use_cache=False)
+ user_dn = user_creds.get_dn()
+
+ # Create a claim that applies to the user.
+ claim_id = self.get_new_username()
+ self.create_claim(claim_id,
+ enabled=True,
+ attribute='middleName',
+ single_valued=True,
+ source_type='AD',
+ for_classes=['user'],
+ value_type=claims.CLAIM_TYPE_STRING)
+
+ expected_claims = {
+ claim_id: {
+ 'source_type': claims.CLAIMS_SOURCE_TYPE_AD,
+ 'type': claims.CLAIM_TYPE_STRING,
+ 'values': ('foo',),
+ },
+ }
+
+ # Get a TGT for the user.
+ tgt = self.get_tgt(user_creds, expect_pac=True,
+ expect_client_claims=True,
+ expected_client_claims=expected_claims)
+
+ # Modify the TGT to be issued by an RODC. Optionally remove the client
+ # claims.
+ if remove_claims:
+ tgt = self.remove_client_claims_tgt_from_rodc(tgt)
+ else:
+ tgt = self.issued_by_rodc(tgt)
+
+ # Modify or delete the value of the attribute used for the claim. Modify
+ # our test expectations accordingly.
+ msg = ldb.Message(user_dn)
+ if delete_claim:
+ msg['middleName'] = ldb.MessageElement([],
+ ldb.FLAG_MOD_DELETE,
+ 'middleName')
+ expected_claims = None
+ unexpected_claims = {claim_id}
+ else:
+ msg['middleName'] = ldb.MessageElement('bar',
+ ldb.FLAG_MOD_REPLACE,
+ 'middleName')
+ expected_claims = {
+ claim_id: {
+ 'source_type': claims.CLAIMS_SOURCE_TYPE_AD,
+ 'type': claims.CLAIM_TYPE_STRING,
+ 'values': ('bar',),
+ },
+ }
+ unexpected_claims = None
+ samdb.modify(msg)
+
+ target_creds = self.get_service_creds()
+
+ # Get a service ticket for the user. The claim value should have
+ # changed, indicating that the client claims have been regenerated or
+ # removed, depending on whether the corresponding attribute is still
+ # present on the account.
+ self.get_service_ticket(
+ tgt, target_creds,
+ expect_pac=True,
+ # Expect the CLIENT_CLAIMS_INFO PAC buffer. It may be empty.
+ expect_client_claims=True,
+ expected_client_claims=expected_claims,
+ unexpected_client_claims=unexpected_claims)
+
+ def run_device_rodc_tgs_test(self, remove_claims, delete_claim):
+ samdb = self.get_samdb()
+
+ # Create the user account.
+ user_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER)
+ user_name = user_creds.get_username()
+
+ # Create a machine account permitted to replicate to the RODC.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={
+ # Set the value of the claim attribute.
+ 'additional_details': (('middleName', 'foo'),),
+ 'allowed_replication_mock': True,
+ 'revealed_to_mock_rodc': True,
+ },
+ use_cache=False)
+ mach_dn = mach_creds.get_dn()
+
+ # Create a claim that applies to the computer.
+ claim_id = self.get_new_username()
+ self.create_claim(claim_id,
+ enabled=True,
+ attribute='middleName',
+ single_valued=True,
+ source_type='AD',
+ for_classes=['computer'],
+ value_type=claims.CLAIM_TYPE_STRING)
+
+ expected_claims = {
+ claim_id: {
+ 'source_type': claims.CLAIMS_SOURCE_TYPE_AD,
+ 'type': claims.CLAIM_TYPE_STRING,
+ 'values': ('foo',),
+ },
+ }
+
+ # Get a TGT for the user.
+ user_tgt = self.get_tgt(user_creds)
+
+ # Get a TGT for the computer.
+ mach_tgt = self.get_tgt(mach_creds, expect_pac=True,
+ expect_client_claims=True,
+ expected_client_claims=expected_claims)
+
+ # Modify the computer's TGT to be issued by an RODC. Optionally remove
+ # the client claims.
+ if remove_claims:
+ mach_tgt = self.remove_client_claims_tgt_from_rodc(mach_tgt)
+ else:
+ mach_tgt = self.issued_by_rodc(mach_tgt)
+
+ # Modify or delete the value of the attribute used for the claim. Modify
+ # our test expectations accordingly.
+ msg = ldb.Message(mach_dn)
+ if delete_claim:
+ msg['middleName'] = ldb.MessageElement([],
+ ldb.FLAG_MOD_DELETE,
+ 'middleName')
+ expected_claims = None
+ unexpected_claims = {claim_id}
+ else:
+ msg['middleName'] = ldb.MessageElement('bar',
+ ldb.FLAG_MOD_REPLACE,
+ 'middleName')
+ expected_claims = {
+ claim_id: {
+ 'source_type': claims.CLAIMS_SOURCE_TYPE_AD,
+ 'type': claims.CLAIM_TYPE_STRING,
+ 'values': ('bar',),
+ },
+ }
+ unexpected_claims = None
+ samdb.modify(msg)
+
+ subkey = self.RandomKey(user_tgt.session_key.etype)
+
+ armor_subkey = self.RandomKey(subkey.etype)
+ explicit_armor_key = self.generate_armor_key(armor_subkey,
+ mach_tgt.session_key)
+ armor_key = kcrypto.cf2(explicit_armor_key.key,
+ subkey.key,
+ b'explicitarmor',
+ b'tgsarmor')
+ armor_key = Krb5EncryptionKey(armor_key, None)
+
+ target_creds = self.get_service_creds()
+ target_name = target_creds.get_username()
+ if target_name[-1] == '$':
+ target_name = target_name[:-1]
+
+ sname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=['host', target_name])
+ srealm = target_creds.get_realm()
+
+ decryption_key = self.TicketDecryptionKey_from_creds(
+ target_creds)
+
+ target_supported_etypes = target_creds.tgs_supported_enctypes
+
+ etypes = (AES256_CTS_HMAC_SHA1_96, ARCFOUR_HMAC_MD5)
+
+ kdc_options = '0'
+ pac_options = '1' # claims support
+
+ # Perform a TGS-REQ for the user. The device claim value should have
+ # changed, indicating that the computer's client claims have been
+ # regenerated or removed, depending on whether the corresponding
+ # attribute is still present on the account.
+
+ kdc_exchange_dict = self.tgs_exchange_dict(
+ creds=user_creds,
+ expected_crealm=user_tgt.crealm,
+ expected_cname=user_tgt.cname,
+ expected_srealm=srealm,
+ expected_sname=sname,
+ expected_account_name=user_name,
+ ticket_decryption_key=decryption_key,
+ generate_fast_fn=self.generate_simple_fast,
+ generate_fast_armor_fn=self.generate_ap_req,
+ check_rep_fn=self.generic_check_kdc_rep,
+ check_kdc_private_fn=self.generic_check_kdc_private,
+ tgt=user_tgt,
+ armor_key=armor_key,
+ armor_tgt=mach_tgt,
+ armor_subkey=armor_subkey,
+ pac_options=pac_options,
+ authenticator_subkey=subkey,
+ kdc_options=kdc_options,
+ expect_pac=True,
+ expected_supported_etypes=target_supported_etypes,
+ # Expect the DEVICE_CLAIMS_INFO PAC buffer. It may be empty.
+ expect_device_claims=True,
+ expected_device_claims=expected_claims,
+ unexpected_device_claims=unexpected_claims)
+
+ rep = self._generic_kdc_exchange(kdc_exchange_dict,
+ cname=None,
+ realm=srealm,
+ sname=sname,
+ etypes=etypes)
+ self.check_reply(rep, KRB_TGS_REP)
+
+ @classmethod
+ def setUpDynamicTestCases(cls):
+ FILTER = env_get_var_value('FILTER', allow_missing=True)
+ for case in cls.cases:
+ name = case.pop('name')
+ name = re.sub(r'\W+', '_', name)
+ if FILTER and not re.search(FILTER, name):
+ continue
+
+ # Run tests making requests both to the krbtgt and to our own
+ # account.
+ cls.generate_dynamic_test('test_claims', name,
+ dict(case), False)
+ cls.generate_dynamic_test('test_claims', name + '_to_self',
+ dict(case), True)
+
+ for case in cls.device_claims_cases:
+ name = case.pop('test')
+ name = re.sub(r'\W+', '_', name)
+ if FILTER and not re.search(FILTER, name):
+ continue
+
+ cls.generate_dynamic_test('test_device_claims', name,
+ dict(case))
+
+ def _test_claims_with_args(self, case, to_self):
+ account_class = case.pop('class')
+ if account_class == 'user':
+ account_type = self.AccountType.USER
+ elif account_class == 'computer':
+ account_type = self.AccountType.COMPUTER
+ else:
+ self.fail(f'Unknown class "{account_class}"')
+
+ all_claims = case.pop('claims')
+ (details, mod_msg,
+ expected_claims,
+ unexpected_claims) = self.setup_claims(all_claims)
+ self.assertFalse(mod_msg,
+ 'mid-test modifications not supported in this test')
+ creds = self.get_cached_creds(
+ account_type=account_type,
+ opts={
+ 'additional_details': self.freeze(details),
+ })
+
+ # Whether to specify claims support in PA-PAC-OPTIONS.
+ pac_options_claims = case.pop('pac-options:claims-support', None)
+
+ self.assertFalse(case, 'unexpected parameters in testcase')
+
+ if pac_options_claims is None:
+ pac_options_claims = True
+
+ if to_self:
+ service_creds = self.get_service_creds()
+ sname = self.PrincipalName_create(
+ name_type=NT_PRINCIPAL,
+ names=[service_creds.get_username()])
+ ticket_etype = Enctype.RC4
+ else:
+ service_creds = None
+ sname = None
+ ticket_etype = None
+
+ if pac_options_claims:
+ pac_options = '1' # claims support
+ else:
+ pac_options = '0' # no claims support
+
+ self.get_tgt(creds,
+ sname=sname,
+ target_creds=service_creds,
+ ticket_etype=ticket_etype,
+ pac_options=pac_options,
+ expect_pac=True,
+ expect_client_claims=True,
+ expected_client_claims=expected_claims or None,
+ unexpected_client_claims=unexpected_claims or None)
+
+ sample_dn = object()
+ binary_dn = object()
+ security_descriptor = (b'\x01\x00\x04\x95\x14\x00\x00\x00\x00\x00\x00\x00'
+ b'\x00\x00\x00\x00$\x00\x00\x00\x01\x02\x00\x00\x00'
+ b'\x00\x00\x05 \x00\x00\x00 \x02\x00\x00\x04\x00'
+ b'\x1c\x00\x01\x00\x00\x00\x00\x1f\x14\x00\xff\x01'
+ b'\x0f\xf0\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00'
+ b'\x00\x00')
+
+ cases = [
+ {
+ 'name': 'no claims',
+ 'claims': [],
+ 'class': 'user',
+ },
+ {
+ 'name': 'simple AD-sourced claim',
+ 'claims': [
+ {
+ # 2.5.5.12
+ 'enabled': True,
+ 'attribute': 'carLicense',
+ 'single_valued': True,
+ 'source_type': 'AD',
+ 'for_classes': ['user'],
+ 'value_type': claims.CLAIM_TYPE_STRING,
+ 'values': ('foo',),
+ 'expected': True,
+ },
+ ],
+ 'class': 'user',
+ },
+ {
+ 'name': 'no claims support in pac options',
+ 'claims': [
+ {
+ # 2.5.5.12
+ 'enabled': True,
+ 'attribute': 'carLicense',
+ 'single_valued': True,
+ 'source_type': 'AD',
+ 'for_classes': ['user'],
+ 'value_type': claims.CLAIM_TYPE_STRING,
+ 'values': ('foo',),
+ # We still get claims in the PAC even if we don't specify
+ # claims support in PA-PAC-OPTIONS.
+ 'expected': True,
+ },
+ ],
+ 'class': 'user',
+ 'pac-options:claims-support': False,
+ },
+ {
+ 'name': 'deny RP',
+ 'claims': [
+ {
+ # 2.5.5.12
+ 'enabled': True,
+ 'attribute': 'carLicense',
+ 'single_valued': True,
+ 'source_type': 'AD',
+ 'for_classes': ['user'],
+ 'value_type': claims.CLAIM_TYPE_STRING,
+ 'values': ('foo',),
+ # Deny read access to the attribute. It still shows up in
+ # the claim.
+ 'readable': False,
+ 'expected': True,
+ },
+ ],
+ 'class': 'user',
+ },
+ {
+ # Note: The order of these DNs may differ on Windows.
+ 'name': 'dn string syntax',
+ 'claims': [
+ {
+ # 2.5.5.1
+ 'enabled': True,
+ 'attribute': 'msDS-AuthenticatedAtDC',
+ 'single_valued': True,
+ 'source_type': 'AD',
+ 'for_classes': ['user'],
+ 'value_type': claims.CLAIM_TYPE_STRING,
+ 'values': UnorderedList([sample_dn, sample_dn, sample_dn]),
+ 'expected': True,
+ },
+ ],
+ 'class': 'user',
+ },
+ {
+ 'name': 'dn string syntax, wrong value type',
+ 'claims': [
+ {
+ # 2.5.5.1
+ 'enabled': True,
+ 'attribute': 'msDS-AuthenticatedAtDC',
+ 'single_valued': True,
+ 'source_type': 'AD',
+ 'for_classes': ['user'],
+ 'value_type': claims.CLAIM_TYPE_BOOLEAN,
+ 'values': UnorderedList([sample_dn, sample_dn, sample_dn]),
+ },
+ ],
+ 'class': 'user',
+ },
+ {
+ 'name': 'oid syntax',
+ 'claims': [
+ {
+ # 2.5.5.2
+ 'enabled': True,
+ 'attribute': 'objectClass',
+ 'single_valued': True,
+ 'source_type': 'AD',
+ 'for_classes': ['user'],
+ 'value_type': claims.CLAIM_TYPE_UINT64,
+ 'expected_values': [655369, 65543, 65542, 65536],
+ 'expected': True,
+ },
+ ],
+ 'class': 'user',
+ },
+ {
+ 'name': 'oid syntax 2',
+ 'claims': [
+ {
+ # 2.5.5.2
+ 'enabled': True,
+ 'attribute': 'objectClass',
+ 'single_valued': True,
+ 'source_type': 'AD',
+ 'for_classes': ['computer'],
+ 'value_type': claims.CLAIM_TYPE_UINT64,
+ 'expected_values': [196638, 655369, 65543, 65542, 65536],
+ 'expected': True,
+ },
+ ],
+ 'class': 'computer',
+ },
+ {
+ 'name': 'oid syntax, wrong value type',
+ 'claims': [
+ {
+ # 2.5.5.2
+ 'enabled': True,
+ 'attribute': 'objectClass',
+ 'single_valued': True,
+ 'source_type': 'AD',
+ 'for_classes': ['user'],
+ 'value_type': claims.CLAIM_TYPE_INT64,
+ },
+ ],
+ 'class': 'user',
+ },
+ {
+ 'name': 'boolean syntax, true',
+ 'claims': [
+ {
+ # 2.5.5.8
+ 'enabled': True,
+ 'attribute': 'msTSAllowLogon',
+ 'single_valued': True,
+ 'source_type': 'AD',
+ 'for_classes': ['user'],
+ 'value_type': claims.CLAIM_TYPE_BOOLEAN,
+ 'values': (True,),
+ 'expected': True,
+ },
+ ],
+ 'class': 'user',
+ },
+ {
+ 'name': 'boolean syntax, false',
+ 'claims': [
+ {
+ # 2.5.5.8
+ 'enabled': True,
+ 'attribute': 'msTSAllowLogon',
+ 'single_valued': True,
+ 'source_type': 'AD',
+ 'for_classes': ['user'],
+ 'value_type': claims.CLAIM_TYPE_BOOLEAN,
+ 'values': (False,),
+ 'expected': True,
+ },
+ ],
+ 'class': 'user',
+ },
+ {
+ 'name': 'boolean syntax, wrong value type',
+ 'claims': [
+ {
+ # 2.5.5.8
+ 'enabled': True,
+ 'attribute': 'msTSAllowLogon',
+ 'single_valued': True,
+ 'source_type': 'AD',
+ 'for_classes': ['user'],
+ 'value_type': claims.CLAIM_TYPE_STRING,
+ 'values': (True,),
+ },
+ ],
+ 'class': 'user',
+ },
+ {
+ 'name': 'integer syntax',
+ 'claims': [
+ {
+ # 2.5.5.9
+ 'enabled': True,
+ 'attribute': 'localeID',
+ 'single_valued': True,
+ 'source_type': 'AD',
+ 'for_classes': ['user'],
+ 'value_type': claims.CLAIM_TYPE_INT64,
+ 'values': (3, 42, -999, 1000, 20000),
+ 'expected': True,
+ },
+ ],
+ 'class': 'user',
+ },
+ {
+ 'name': 'integer syntax, duplicate claim',
+ 'claims': [
+ {
+ # 2.5.5.9
+ 'enabled': True,
+ 'attribute': 'localeID',
+ 'single_valued': True,
+ 'source_type': 'AD',
+ 'for_classes': ['user'],
+ 'value_type': claims.CLAIM_TYPE_INT64,
+ 'values': (3, 42, -999, 1000, 20000),
+ 'expected': True,
+ },
+ ] * 2, # Create two integer syntax claims.
+ 'class': 'user',
+ },
+ {
+ 'name': 'integer syntax, wrong value type',
+ 'claims': [
+ {
+ # 2.5.5.9
+ 'enabled': True,
+ 'attribute': 'localeID',
+ 'single_valued': True,
+ 'source_type': 'AD',
+ 'for_classes': ['user'],
+ 'value_type': claims.CLAIM_TYPE_UINT64,
+ 'values': (3, 42, -999, 1000),
+ },
+ ],
+ 'class': 'user',
+ },
+ {
+ 'name': 'security descriptor syntax',
+ 'claims': [
+ {
+ # 2.5.5.15
+ 'enabled': True,
+ 'attribute': 'msDS-AllowedToActOnBehalfOfOtherIdentity',
+ 'single_valued': True,
+ 'source_type': 'AD',
+ 'for_classes': ['computer'],
+ 'value_type': claims.CLAIM_TYPE_STRING,
+ 'values': (security_descriptor,),
+ 'expected_values': (
+ 'O:BAD:PARAI(A;OICINPIOID;CCDCLCSWRPWPDTLOCRSDRCWDWOGAGXGWGR;;;S-1-0-0)',
+ ),
+ 'expected': True,
+ },
+ ],
+ 'class': 'computer',
+ },
+ {
+ 'name': 'security descriptor syntax, wrong value type',
+ 'claims': [
+ {
+ # 2.5.5.15
+ 'enabled': True,
+ 'attribute': 'msDS-AllowedToActOnBehalfOfOtherIdentity',
+ 'single_valued': True,
+ 'source_type': 'AD',
+ 'for_classes': ['computer'],
+ 'value_type': claims.CLAIM_TYPE_UINT64,
+ 'values': (security_descriptor,),
+ },
+ ],
+ 'class': 'computer',
+ },
+ {
+ 'name': 'case insensitive string syntax (invalid)',
+ 'claims': [
+ {
+ # 2.5.5.4
+ 'enabled': True,
+ 'attribute': 'networkAddress',
+ 'single_valued': True,
+ 'source_type': 'AD',
+ 'for_classes': ['user'],
+ 'value_type': claims.CLAIM_TYPE_STRING,
+ 'values': ('foo', 'bar'),
+ },
+ ],
+ 'class': 'user',
+ },
+ {
+ 'name': 'printable string syntax (invalid)',
+ 'claims': [
+ {
+ # 2.5.5.5
+ 'enabled': True,
+ 'attribute': 'displayNamePrintable',
+ 'single_valued': True,
+ 'source_type': 'AD',
+ 'for_classes': ['user'],
+ 'value_type': claims.CLAIM_TYPE_STRING,
+ 'values': ('foo',),
+ },
+ ],
+ 'class': 'user',
+ },
+ {
+ 'name': 'numeric string syntax (invalid)',
+ 'claims': [
+ {
+ # 2.5.5.6
+ 'enabled': True,
+ 'attribute': 'internationalISDNNumber',
+ 'single_valued': True,
+ 'source_type': 'AD',
+ 'for_classes': ['user'],
+ 'value_type': claims.CLAIM_TYPE_STRING,
+ 'values': ('foo', 'bar'),
+ },
+ ],
+ 'class': 'user',
+ },
+ {
+ 'name': 'dn binary syntax (invalid)',
+ 'claims': [
+ {
+ # 2.5.5.7
+ 'enabled': True,
+ 'attribute': 'msDS-RevealedUsers',
+ 'single_valued': True,
+ 'source_type': 'AD',
+ 'for_classes': ['user'],
+ 'value_type': claims.CLAIM_TYPE_STRING,
+ 'values': (binary_dn, binary_dn, binary_dn),
+ },
+ ],
+ 'class': 'computer',
+ },
+ {
+ 'name': 'octet string syntax (invalid)',
+ 'claims': [
+ {
+ # 2.5.5.10
+ 'enabled': True,
+ 'attribute': 'jpegPhoto',
+ 'single_valued': True,
+ 'source_type': 'AD',
+ 'for_classes': ['user'],
+ 'value_type': claims.CLAIM_TYPE_STRING,
+ 'values': ('foo', 'bar'),
+ },
+ ],
+ 'class': 'user',
+ },
+ {
+ 'name': 'utc time syntax (invalid)',
+ 'claims': [
+ {
+ # 2.5.5.11
+ 'enabled': True,
+ 'attribute': 'msTSExpireDate2',
+ 'single_valued': True,
+ 'source_type': 'AD',
+ 'for_classes': ['user'],
+ 'value_type': claims.CLAIM_TYPE_STRING,
+ 'values': ('19700101000000.0Z',),
+ },
+ ],
+ 'class': 'user',
+ },
+ {
+ 'name': 'access point syntax (invalid)',
+ 'claims': [
+ {
+ # 2.5.5.17
+ 'enabled': True,
+ 'attribute': 'mS-DS-CreatorSID',
+ 'single_valued': True,
+ 'source_type': 'AD',
+ 'for_classes': ['user'],
+ 'value_type': claims.CLAIM_TYPE_STRING,
+ },
+ ],
+ 'class': 'user',
+ },
+ {
+ 'name': 'no value set',
+ 'claims': [
+ {
+ # 2.5.5.12
+ 'enabled': True,
+ 'attribute': 'carLicense',
+ 'single_valued': True,
+ 'source_type': 'AD',
+ 'for_classes': ['user'],
+ 'value_type': claims.CLAIM_TYPE_STRING,
+ },
+ ],
+ 'class': 'user',
+ },
+ {
+ 'name': 'multi-valued claim',
+ 'claims': [
+ {
+ # 2.5.5.12
+ 'enabled': True,
+ 'attribute': 'carLicense',
+ 'single_valued': True,
+ 'source_type': 'AD',
+ 'for_classes': ['user'],
+ 'value_type': claims.CLAIM_TYPE_STRING,
+ 'values': ('foo', 'bar', 'baz'),
+ 'expected': True,
+ },
+ ],
+ 'class': 'user',
+ },
+ {
+ 'name': 'missing attribute',
+ 'claims': [
+ {
+ 'enabled': True,
+ 'single_valued': True,
+ 'source_type': 'AD',
+ 'for_classes': ['user'],
+ 'value_type': claims.CLAIM_TYPE_STRING,
+ },
+ ],
+ 'class': 'user',
+ },
+ {
+ 'name': 'invalid attribute',
+ 'claims': [
+ {
+ # 2.5.5.10
+ 'enabled': True,
+ 'attribute': 'unicodePwd',
+ 'single_valued': True,
+ 'source_type': 'AD',
+ 'for_classes': ['user'],
+ 'value_type': claims.CLAIM_TYPE_STRING,
+ },
+ ],
+ 'class': 'user',
+ },
+ {
+ 'name': 'incorrect value type',
+ 'claims': [
+ {
+ # 2.5.5.12
+ 'enabled': True,
+ 'attribute': 'carLicense',
+ 'single_valued': True,
+ 'source_type': 'AD',
+ 'for_classes': ['user'],
+ 'value_type': claims.CLAIM_TYPE_INT64,
+ 'values': ('foo',),
+ },
+ ],
+ 'class': 'user',
+ },
+ {
+ 'name': 'invalid value type',
+ 'claims': [
+ {
+ # 2.5.5.12
+ 'enabled': True,
+ 'attribute': 'carLicense',
+ 'single_valued': True,
+ 'source_type': 'AD',
+ 'for_classes': ['user'],
+ 'value_type': 0,
+ 'values': ('foo',),
+ },
+ ],
+ 'class': 'user',
+ },
+ {
+ 'name': 'missing value type',
+ 'claims': [
+ {
+ # 2.5.5.12
+ 'enabled': True,
+ 'attribute': 'carLicense',
+ 'single_valued': True,
+ 'source_type': 'AD',
+ 'for_classes': ['user'],
+ 'values': ('foo',),
+ },
+ ],
+ 'class': 'user',
+ },
+ {
+ 'name': 'string syntax, duplicate claim',
+ 'claims': [
+ {
+ # 2.5.5.12
+ 'enabled': True,
+ 'attribute': 'carLicense',
+ 'single_valued': True,
+ 'source_type': 'AD',
+ 'for_classes': ['user'],
+ 'value_type': claims.CLAIM_TYPE_STRING,
+ 'values': ('foo',),
+ 'expected': True,
+ },
+ ] * 2, # Create two string syntax claims.
+ 'class': 'user',
+ },
+ {
+ 'name': 'multiple claims',
+ 'claims': [
+ {
+ # 2.5.5.12
+ 'enabled': True,
+ 'attribute': 'carLicense',
+ 'single_valued': True,
+ 'source_type': 'AD',
+ 'for_classes': ['user'],
+ 'value_type': claims.CLAIM_TYPE_STRING,
+ 'values': ('foo', 'bar', 'baz'),
+ 'expected': True,
+ },
+ {
+ # 2.5.5.8
+ 'enabled': True,
+ 'attribute': 'msTSAllowLogon',
+ 'single_valued': True,
+ 'source_type': 'AD',
+ 'for_classes': ['user'],
+ 'value_type': claims.CLAIM_TYPE_BOOLEAN,
+ 'values': (True,),
+ 'expected': True,
+ },
+ ],
+ 'class': 'user',
+ },
+ {
+ 'name': 'case difference for source type',
+ 'claims': [
+ {
+ # 2.5.5.12
+ 'enabled': True,
+ 'attribute': 'carLicense',
+ 'single_valued': True,
+ 'source_type': 'ad',
+ 'for_classes': ['user'],
+ 'value_type': claims.CLAIM_TYPE_STRING,
+ 'values': ('foo',),
+ 'expected': True,
+ },
+ ],
+ 'class': 'user',
+ },
+ {
+ 'name': 'unhandled source type',
+ 'claims': [
+ {
+ # 2.5.5.12
+ 'enabled': True,
+ 'attribute': 'carLicense',
+ 'single_valued': True,
+ 'source_type': '<unknown>',
+ 'for_classes': ['user'],
+ 'value_type': claims.CLAIM_TYPE_STRING,
+ 'values': ('foo',),
+ },
+ ],
+ 'class': 'user',
+ },
+ {
+ 'name': 'disabled claim',
+ 'claims': [
+ {
+ # 2.5.5.12
+ 'enabled': False,
+ 'attribute': 'carLicense',
+ 'single_valued': True,
+ 'source_type': 'AD',
+ 'for_classes': ['user'],
+ 'value_type': claims.CLAIM_TYPE_STRING,
+ 'values': ('foo',),
+ },
+ ],
+ 'class': 'user',
+ },
+ {
+ 'name': 'not enabled claim',
+ 'claims': [
+ {
+ # 2.5.5.12
+ 'attribute': 'carLicense',
+ 'single_valued': True,
+ 'source_type': 'AD',
+ 'for_classes': ['user'],
+ 'value_type': claims.CLAIM_TYPE_STRING,
+ 'values': ('foo',),
+ },
+ ],
+ 'class': 'user',
+ },
+ {
+ 'name': 'not applicable to any class',
+ 'claims': [
+ {
+ # 2.5.5.12
+ 'enabled': True,
+ 'attribute': 'carLicense',
+ 'single_valued': True,
+ 'source_type': 'AD',
+ 'value_type': claims.CLAIM_TYPE_STRING,
+ 'values': ('foo',),
+ },
+ ],
+ 'class': 'user',
+ },
+ {
+ 'name': 'not applicable to class',
+ 'claims': [
+ {
+ # 2.5.5.12
+ 'enabled': True,
+ 'attribute': 'carLicense',
+ 'single_valued': True,
+ 'source_type': 'AD',
+ 'for_classes': ['user'],
+ 'value_type': claims.CLAIM_TYPE_STRING,
+ 'values': ('foo',),
+ },
+ ],
+ 'class': 'computer',
+ },
+ {
+ 'name': 'applicable to class',
+ 'claims': [
+ {
+ # 2.5.5.12
+ 'enabled': True,
+ 'attribute': 'carLicense',
+ 'single_valued': True,
+ 'source_type': 'AD',
+ 'for_classes': ['user', 'computer'],
+ 'value_type': claims.CLAIM_TYPE_STRING,
+ 'values': ('foo',),
+ 'expected': True,
+ },
+ ],
+ 'class': 'computer',
+ },
+ {
+ 'name': 'applicable to base class',
+ 'claims': [
+ {
+ # 2.5.5.12
+ 'enabled': True,
+ 'attribute': 'carLicense',
+ 'single_valued': True,
+ 'source_type': 'AD',
+ 'for_classes': ['top'],
+ 'value_type': claims.CLAIM_TYPE_STRING,
+ 'values': ('foo',),
+ },
+ ],
+ 'class': 'user',
+ },
+ {
+ 'name': 'applicable to base class 2',
+ 'claims': [
+ {
+ # 2.5.5.12
+ 'enabled': True,
+ 'attribute': 'carLicense',
+ 'single_valued': True,
+ 'source_type': 'AD',
+ 'for_classes': ['organizationalPerson'],
+ 'value_type': claims.CLAIM_TYPE_STRING,
+ 'values': ('foo',),
+ },
+ ],
+ 'class': 'user',
+ },
+ {
+ 'name': 'large compressed claim',
+ 'claims': [
+ {
+ # 2.5.5.12
+ 'enabled': True,
+ 'attribute': 'carLicense',
+ 'single_valued': True,
+ 'source_type': 'AD',
+ 'for_classes': ['user'],
+ 'value_type': claims.CLAIM_TYPE_STRING,
+ # a large value that should cause the claim to be
+ # compressed.
+ 'values': ('a' * 10000,),
+ 'expected': True,
+ },
+ ],
+ 'class': 'user',
+ },
+ ]
+
+ def _test_device_claims_with_args(self, case):
+ # The group arrangement for the test.
+ group_setup = case.pop('groups')
+
+ # Groups that should be the primary group for the user and machine
+ # respectively.
+ primary_group = case.pop('primary_group', None)
+ mach_primary_group = case.pop('mach:primary_group', None)
+
+ # Whether the TGS-REQ should be directed to the krbtgt.
+ tgs_to_krbtgt = case.pop('tgs:to_krbtgt', None)
+
+ # Whether the target server of the TGS-REQ should support compound
+ # identity or resource SID compression.
+ tgs_compound_id = case.pop('tgs:compound_id', None)
+ tgs_compression = case.pop('tgs:compression', None)
+
+ # Optional SIDs to replace those in the machine account PAC prior to a
+ # TGS-REQ.
+ tgs_mach_sids = case.pop('tgs:mach:sids', None)
+
+ # Optional machine SID to replace that in the PAC prior to a TGS-REQ.
+ tgs_mach_sid = case.pop('tgs:mach_sid', None)
+
+ # User flags that may be set or reset in the PAC prior to a TGS-REQ.
+ tgs_mach_set_user_flags = case.pop('tgs:mach:set_user_flags', None)
+ tgs_mach_reset_user_flags = case.pop('tgs:mach:reset_user_flags', None)
+
+ # The SIDs we expect to see in the PAC after a AS-REQ or a TGS-REQ.
+ as_expected = case.pop('as:expected', None)
+ as_mach_expected = case.pop('as:mach:expected', None)
+ tgs_expected = case.pop('tgs:expected', None)
+ tgs_device_expected = case.pop('tgs:device:expected', None)
+
+ # Whether to specify claims support in PA-PAC-OPTIONS.
+ pac_options_claims = case.pop('pac-options:claims-support', None)
+
+ all_claims = case.pop('claims')
+
+ # There should be no parameters remaining in the testcase.
+ self.assertFalse(case, 'unexpected parameters in testcase')
+
+ if as_expected is None:
+ self.assertIsNotNone(tgs_expected,
+ 'no set of expected SIDs is provided')
+
+ if as_mach_expected is None:
+ self.assertIsNotNone(tgs_expected,
+ 'no set of expected machine SIDs is provided')
+
+ if tgs_to_krbtgt is None:
+ tgs_to_krbtgt = False
+
+ if tgs_compound_id is None and not tgs_to_krbtgt:
+ # Assume the service supports compound identity by default.
+ tgs_compound_id = True
+
+ if tgs_to_krbtgt:
+ self.assertIsNone(tgs_device_expected,
+ 'device SIDs are not added for a krbtgt request')
+
+ self.assertIsNotNone(tgs_expected,
+ 'no set of expected TGS SIDs is provided')
+
+ if tgs_mach_sid is not None:
+ self.assertIsNotNone(tgs_mach_sids,
+ 'specified TGS-REQ mach SID, but no '
+ 'accompanying machine SIDs provided')
+
+ if tgs_mach_set_user_flags is None:
+ tgs_mach_set_user_flags = 0
+ else:
+ self.assertIsNotNone(tgs_mach_sids,
+ 'specified TGS-REQ set user flags, but no '
+ 'accompanying machine SIDs provided')
+
+ if tgs_mach_reset_user_flags is None:
+ tgs_mach_reset_user_flags = 0
+ else:
+ self.assertIsNotNone(tgs_mach_sids,
+ 'specified TGS-REQ reset user flags, but no '
+ 'accompanying machine SIDs provided')
+
+ if pac_options_claims is None:
+ pac_options_claims = True
+
+ (details, mod_msg,
+ expected_claims,
+ unexpected_claims) = self.setup_claims(all_claims)
+
+ samdb = self.get_samdb()
+
+ domain_sid = samdb.get_domain_sid()
+
+ user_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER)
+ user_dn = user_creds.get_dn()
+ user_sid = user_creds.get_sid()
+
+ mach_name = self.get_new_username()
+ mach_creds, mach_dn_str = self.create_account(
+ samdb,
+ mach_name,
+ account_type=self.AccountType.COMPUTER,
+ additional_details=details)
+ mach_dn = ldb.Dn(samdb, mach_dn_str)
+ mach_sid = mach_creds.get_sid()
+
+ user_principal = Principal(user_dn, user_sid)
+ mach_principal = Principal(mach_dn, mach_sid)
+ preexisting_groups = {
+ self.user: user_principal,
+ self.mach: mach_principal,
+ }
+ primary_groups = {}
+ if primary_group is not None:
+ primary_groups[user_principal] = primary_group
+ if mach_primary_group is not None:
+ primary_groups[mach_principal] = mach_primary_group
+ groups = self.setup_groups(samdb,
+ preexisting_groups,
+ group_setup,
+ primary_groups)
+ del group_setup
+
+ tgs_user_sid = user_sid
+ tgs_user_domain_sid, tgs_user_rid = tgs_user_sid.rsplit('-', 1)
+
+ if tgs_mach_sid is None:
+ tgs_mach_sid = mach_sid
+ elif tgs_mach_sid in groups:
+ tgs_mach_sid = groups[tgs_mach_sid].sid
+
+ tgs_mach_domain_sid, tgs_mach_rid = tgs_mach_sid.rsplit('-', 1)
+
+ expected_groups = self.map_sids(as_expected, groups,
+ domain_sid)
+ mach_expected_groups = self.map_sids(as_mach_expected, groups,
+ domain_sid)
+ tgs_mach_sids_mapped = self.map_sids(tgs_mach_sids, groups,
+ tgs_mach_domain_sid)
+ tgs_expected_mapped = self.map_sids(tgs_expected, groups,
+ tgs_user_domain_sid)
+ tgs_device_expected_mapped = self.map_sids(tgs_device_expected, groups,
+ tgs_mach_domain_sid)
+
+ user_tgt = self.get_tgt(user_creds, expected_groups=expected_groups)
+
+ # Get a TGT for the computer.
+ mach_tgt = self.get_tgt(mach_creds, expect_pac=True,
+ expected_groups=mach_expected_groups,
+ expect_client_claims=True,
+ expected_client_claims=expected_claims,
+ unexpected_client_claims=unexpected_claims)
+
+ if tgs_mach_sids is not None:
+ # Replace the SIDs in the PAC with the ones provided by the test.
+ mach_tgt = self.ticket_with_sids(mach_tgt,
+ tgs_mach_sids_mapped,
+ tgs_mach_domain_sid,
+ tgs_mach_rid,
+ set_user_flags=tgs_mach_set_user_flags,
+ reset_user_flags=tgs_mach_reset_user_flags)
+
+ if mod_msg:
+ self.assertFalse(tgs_to_krbtgt,
+ 'device claims are omitted for a krbtgt request, '
+ 'so specifying mod_values is probably a mistake!')
+
+ # Change the value of attributes used for claims.
+ mod_msg.dn = mach_dn
+ samdb.modify(mod_msg)
+
+ domain_sid = samdb.get_domain_sid()
+
+ subkey = self.RandomKey(user_tgt.session_key.etype)
+
+ armor_subkey = self.RandomKey(subkey.etype)
+ explicit_armor_key = self.generate_armor_key(armor_subkey,
+ mach_tgt.session_key)
+ armor_key = kcrypto.cf2(explicit_armor_key.key,
+ subkey.key,
+ b'explicitarmor',
+ b'tgsarmor')
+ armor_key = Krb5EncryptionKey(armor_key, None)
+
+ target_creds, sname = self.get_target(
+ to_krbtgt=tgs_to_krbtgt,
+ compound_id=tgs_compound_id,
+ compression=tgs_compression)
+ srealm = target_creds.get_realm()
+
+ decryption_key = self.TicketDecryptionKey_from_creds(
+ target_creds)
+
+ etypes = (AES256_CTS_HMAC_SHA1_96, ARCFOUR_HMAC_MD5)
+
+ kdc_options = '0'
+ if pac_options_claims:
+ pac_options = '1' # claims support
+ else:
+ pac_options = '0' # no claims support
+
+ requester_sid = None
+ if tgs_to_krbtgt:
+ requester_sid = user_sid
+
+ if not tgs_compound_id:
+ expected_claims = None
+ unexpected_claims = None
+
+ # Get a service ticket for the user, using the computer's TGT as an
+ # armor TGT. The claim value should not have changed.
+
+ kdc_exchange_dict = self.tgs_exchange_dict(
+ creds=user_creds,
+ expected_crealm=user_tgt.crealm,
+ expected_cname=user_tgt.cname,
+ expected_srealm=srealm,
+ expected_sname=sname,
+ ticket_decryption_key=decryption_key,
+ generate_fast_fn=self.generate_simple_fast,
+ generate_fast_armor_fn=self.generate_ap_req,
+ check_rep_fn=self.generic_check_kdc_rep,
+ check_kdc_private_fn=self.generic_check_kdc_private,
+ tgt=user_tgt,
+ armor_key=armor_key,
+ armor_tgt=mach_tgt,
+ armor_subkey=armor_subkey,
+ pac_options=pac_options,
+ authenticator_subkey=subkey,
+ kdc_options=kdc_options,
+ expect_pac=True,
+ expect_pac_attrs=tgs_to_krbtgt,
+ expect_pac_attrs_pac_request=tgs_to_krbtgt,
+ expected_sid=tgs_user_sid,
+ expected_requester_sid=requester_sid,
+ expected_domain_sid=tgs_user_domain_sid,
+ expected_device_domain_sid=tgs_mach_domain_sid,
+ expected_groups=tgs_expected_mapped,
+ unexpected_groups=None,
+ expect_client_claims=True,
+ expected_client_claims=None,
+ expect_device_info=bool(tgs_compound_id),
+ expected_device_groups=tgs_device_expected_mapped,
+ expect_device_claims=bool(tgs_compound_id),
+ expected_device_claims=expected_claims,
+ unexpected_device_claims=unexpected_claims)
+
+ rep = self._generic_kdc_exchange(kdc_exchange_dict,
+ cname=None,
+ realm=srealm,
+ sname=sname,
+ etypes=etypes)
+ self.check_reply(rep, KRB_TGS_REP)
+
+ device_claims_cases = [
+ {
+ # Make a TGS request containing claims, but omit the Claims Valid
+ # SID.
+ 'test': 'device to service no claims valid sid',
+ 'groups': {
+ # Some groups to test how the device info is generated.
+ 'foo': (GroupType.DOMAIN_LOCAL, {mach}),
+ 'bar': (GroupType.DOMAIN_LOCAL, {mach}),
+ },
+ 'claims': [
+ {
+ # 2.5.5.10
+ 'enabled': True,
+ 'attribute': 'middleName',
+ 'single_valued': True,
+ 'source_type': 'AD',
+ 'for_classes': ['computer'],
+ 'value_type': claims.CLAIM_TYPE_STRING,
+ 'values': ('foo',),
+ 'expected': True,
+ 'mod_values': ['bar'],
+ },
+ ],
+ 'as:expected': {
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'as:mach:expected': {
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:mach:sids': {
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ # Omit the Claims Valid SID, and verify that this doesn't
+ # affect the propagation of claims into the final ticket.
+
+ # Some extra SIDs to show how they are propagated into the
+ # final ticket.
+ ('S-1-5-22-1-2-3-4', SidType.EXTRA_SID, default_attrs),
+ ('S-1-5-22-1-2-3-5', SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:to_krbtgt': False,
+ 'tgs:expected': {
+ (security.SID_AUTHENTICATION_AUTHORITY_ASSERTED_IDENTITY, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ (security.SID_COMPOUNDED_AUTHENTICATION, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ },
+ 'tgs:device:expected': {
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ ('S-1-5-22-1-2-3-4', SidType.EXTRA_SID, default_attrs),
+ ('S-1-5-22-1-2-3-5', SidType.EXTRA_SID, default_attrs),
+ frozenset([
+ ('foo', SidType.RESOURCE_SID, resource_attrs),
+ ('bar', SidType.RESOURCE_SID, resource_attrs),
+ ]),
+ },
+ },
+ {
+ # Make a TGS request containing claims to a service that lacks
+ # support for compound identity. The claims are not propagated to
+ # the final ticket.
+ 'test': 'device to service no compound id',
+ 'groups': {
+ 'foo': (GroupType.DOMAIN_LOCAL, {mach}),
+ 'bar': (GroupType.DOMAIN_LOCAL, {mach}),
+ },
+ 'claims': [
+ {
+ # 2.5.5.10
+ 'enabled': True,
+ 'attribute': 'middleName',
+ 'single_valued': True,
+ 'source_type': 'AD',
+ 'for_classes': ['computer'],
+ 'value_type': claims.CLAIM_TYPE_STRING,
+ 'values': ('foo',),
+ 'expected': True,
+ 'mod_values': ['bar'],
+ },
+ ],
+ 'as:expected': {
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'as:mach:expected': {
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:to_krbtgt': False,
+ # Compound identity is unsupported.
+ 'tgs:compound_id': False,
+ 'tgs:expected': {
+ (security.SID_AUTHENTICATION_AUTHORITY_ASSERTED_IDENTITY, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ # The Compounded Authentication SID should not be present.
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ },
+ },
+ {
+ # Make a TGS request containing claims to a service, but don't
+ # specify support for claims in PA-PAC-OPTIONS. We still expect the
+ # final PAC to contain claims.
+ 'test': 'device to service no claims support in pac options',
+ 'groups': {
+ 'foo': (GroupType.DOMAIN_LOCAL, {mach}),
+ 'bar': (GroupType.DOMAIN_LOCAL, {mach}),
+ },
+ 'claims': [
+ {
+ # 2.5.5.10
+ 'enabled': True,
+ 'attribute': 'middleName',
+ 'single_valued': True,
+ 'source_type': 'AD',
+ 'for_classes': ['computer'],
+ 'value_type': claims.CLAIM_TYPE_STRING,
+ 'values': ('foo',),
+ 'expected': True,
+ 'mod_values': ['bar'],
+ },
+ ],
+ 'as:expected': {
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'as:mach:expected': {
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:to_krbtgt': False,
+ # Claims are unsupported.
+ 'pac-options:claims-support': False,
+ 'tgs:expected': {
+ (security.SID_AUTHENTICATION_AUTHORITY_ASSERTED_IDENTITY, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ (security.SID_COMPOUNDED_AUTHENTICATION, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ },
+ 'tgs:device:expected': {
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ frozenset([
+ ('foo', SidType.RESOURCE_SID, resource_attrs),
+ ('bar', SidType.RESOURCE_SID, resource_attrs),
+ ]),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ frozenset([(security.SID_CLAIMS_VALID, SidType.RESOURCE_SID, default_attrs)]),
+ },
+ },
+ ]
+
+ def test_auth_silo_claim(self):
+ self.run_auth_silo_claim_test()
+
+ def test_auth_silo_claim_unenforced(self):
+ # The claim is not present if the silo is unenforced.
+ self.run_auth_silo_claim_test(enforced=False,
+ expect_claim=False)
+
+ def test_auth_silo_claim_not_a_member(self):
+ # The claim is not present if the user is not a member of the silo.
+ self.run_auth_silo_claim_test(add_to_silo=False,
+ expect_claim=False)
+
+ def test_auth_silo_claim_unassigned(self):
+ # The claim is not present if the user is not assigned to the silo.
+ self.run_auth_silo_claim_test(assigned=False,
+ expect_claim=False)
+
+ def test_auth_silo_claim_assigned_to_wrong_dn(self):
+ samdb = self.get_samdb()
+
+ # The claim is not present if the user is assigned to some other DN.
+ self.run_auth_silo_claim_test(assigned=self.get_server_dn(samdb),
+ expect_claim=False)
+
+ def run_auth_silo_claim_test(self, *,
+ enforced=True,
+ add_to_silo=True,
+ assigned=True,
+ expect_claim=True):
+ # Create a new authentication silo.
+ silo = self.create_authn_silo(enforced=enforced)
+
+ account_options = None
+ if assigned is not False:
+ if assigned is True:
+ assigned = silo.dn
+
+ account_options = {
+ 'additional_details': self.freeze({
+ # The user is assigned to the authentication silo we just
+ # created, or to some DN specified by a test.
+ 'msDS-AssignedAuthNPolicySilo': str(assigned),
+ }),
+ }
+
+ # Create the user account.
+ creds = self.get_cached_creds(
+ account_type=self.AccountType.USER,
+ opts=account_options)
+
+ if add_to_silo:
+ # Add the account to the silo.
+ self.add_to_group(str(creds.get_dn()),
+ silo.dn,
+ 'msDS-AuthNPolicySiloMembers',
+ expect_attr=False)
+
+ claim_id = self.create_authn_silo_claim_id()
+
+ if expect_claim:
+ expected_claims = {
+ claim_id: {
+ 'source_type': claims.CLAIMS_SOURCE_TYPE_AD,
+ 'type': claims.CLAIM_TYPE_STRING,
+ # Expect a claim containing the name of the silo.
+ 'values': (silo.name,),
+ },
+ }
+ unexpected_claims = None
+ else:
+ expected_claims = None
+ unexpected_claims = {claim_id}
+
+ # Get a TGT and check whether the claim is present or missing.
+ self.get_tgt(creds,
+ expect_pac=True,
+ expect_client_claims=True,
+ expected_client_claims=expected_claims,
+ unexpected_client_claims=unexpected_claims)
+
+
+if __name__ == '__main__':
+ global_asn1_print = False
+ global_hexdump = False
+ import unittest
+ unittest.main()
diff --git a/python/samba/tests/krb5/compatability_tests.py b/python/samba/tests/krb5/compatability_tests.py
new file mode 100755
index 0000000..e1ebe18
--- /dev/null
+++ b/python/samba/tests/krb5/compatability_tests.py
@@ -0,0 +1,227 @@
+#!/usr/bin/env python3
+# Unix SMB/CIFS implementation.
+# Copyright (C) Stefan Metzmacher 2020
+# Copyright (C) Catalyst.Net Ltd 2020
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+import os
+
+sys.path.insert(0, "bin/python")
+os.environ["PYTHONUNBUFFERED"] = "1"
+
+from samba.tests.krb5.kdc_base_test import KDCBaseTest
+import samba.tests.krb5.rfc4120_pyasn1 as krb5_asn1
+from samba.tests.krb5.rfc4120_constants import (
+ AES128_CTS_HMAC_SHA1_96,
+ AES256_CTS_HMAC_SHA1_96,
+ ARCFOUR_HMAC_MD5,
+ KDC_ERR_PREAUTH_REQUIRED,
+ KRB_AS_REP,
+ KRB_ERROR,
+ KU_AS_REP_ENC_PART,
+ KU_PA_ENC_TIMESTAMP,
+ PADATA_ENC_TIMESTAMP,
+ PADATA_ETYPE_INFO2,
+ NT_PRINCIPAL,
+ NT_SRV_INST,
+)
+
+global_asn1_print = False
+global_hexdump = False
+
+HEIMDAL_ENC_AS_REP_PART_TYPE_TAG = 0x79
+# MIT uses the EncTGSRepPart tag for the EncASRepPart
+MIT_ENC_AS_REP_PART_TYPE_TAG = 0x7A
+
+ENC_PA_REP_FLAG = 0x00010000
+
+
+class CompatabilityTests(KDCBaseTest):
+
+ def setUp(self):
+ super().setUp()
+ self.do_asn1_print = global_asn1_print
+ self.do_hexdump = global_hexdump
+
+ def test_mit_EncASRepPart_tag(self):
+ creds = self.get_user_creds()
+ (enc, _) = self.as_req(creds)
+ self.assertEqual(MIT_ENC_AS_REP_PART_TYPE_TAG, enc[0])
+
+ def test_heimdal_EncASRepPart_tag(self):
+ creds = self.get_user_creds()
+ (enc, _) = self.as_req(creds)
+ self.assertEqual(HEIMDAL_ENC_AS_REP_PART_TYPE_TAG, enc[0])
+
+ def test_mit_EncryptedData_kvno(self):
+ creds = self.get_user_creds()
+ (_, enc) = self.as_req(creds)
+ if 'kvno' in enc:
+ self.fail("kvno present in EncryptedData")
+
+ def test_heimdal_EncryptedData_kvno(self):
+ creds = self.get_user_creds()
+ (_, enc) = self.as_req(creds)
+ if 'kvno' not in enc:
+ self.fail("kvno absent in EncryptedData")
+
+ def test_mit_EncASRepPart_FAST_support(self):
+ creds = self.get_user_creds()
+ (enc, _) = self.as_req(creds)
+ self.assertEqual(MIT_ENC_AS_REP_PART_TYPE_TAG, enc[0])
+ as_rep = self.der_decode(enc, asn1Spec=krb5_asn1.EncTGSRepPart())
+ flags = int(as_rep['flags'], base=2)
+ # MIT sets enc-pa-rep, flag bit 15
+ # RFC 6806 11. Negotiation of FAST and Detecting Modified Requests
+ self.assertTrue(ENC_PA_REP_FLAG & flags)
+
+ def test_heimdal_and_windows_EncASRepPart_FAST_support(self):
+ creds = self.get_user_creds()
+ (enc, _) = self.as_req(creds)
+ self.assertEqual(HEIMDAL_ENC_AS_REP_PART_TYPE_TAG, enc[0])
+ as_rep = self.der_decode(enc, asn1Spec=krb5_asn1.EncASRepPart())
+ flags = as_rep['flags']
+ flags = int(as_rep['flags'], base=2)
+ # Heimdal and Windows does set enc-pa-rep, flag bit 15
+ # RFC 6806 11. Negotiation of FAST and Detecting Modified Requests
+ self.assertTrue(ENC_PA_REP_FLAG & flags)
+
+ def test_mit_arcfour_salt(self):
+ creds = self.get_user_creds()
+ etypes = (ARCFOUR_HMAC_MD5,)
+ (rep, *_) = self.as_pre_auth_req(creds, etypes)
+ self.check_preauth_rep(rep)
+ etype_info2 = self.get_etype_info2(rep)
+ if 'salt' not in etype_info2[0]:
+ self.fail(
+ "(MIT) Salt not populated for ARCFOUR_HMAC_MD5 encryption")
+
+ def test_heimdal_arcfour_salt(self):
+ creds = self.get_user_creds()
+ etypes = (ARCFOUR_HMAC_MD5,)
+ (rep, *_) = self.as_pre_auth_req(creds, etypes)
+ self.check_preauth_rep(rep)
+ etype_info2 = self.get_etype_info2(rep)
+ if 'salt' in etype_info2[0]:
+ self.fail(
+ "(Heimdal) Salt populated for ARCFOUR_HMAC_MD5 encryption")
+
+ def as_pre_auth_req(self, creds, etypes):
+ user = creds.get_username()
+ realm = creds.get_realm()
+
+ cname = self.PrincipalName_create(
+ name_type=NT_PRINCIPAL,
+ names=[user])
+ sname = self.PrincipalName_create(
+ name_type=NT_SRV_INST,
+ names=["krbtgt", realm])
+
+ till = self.get_KerberosTime(offset=36000)
+
+ kdc_options = krb5_asn1.KDCOptions('forwardable')
+ padata = None
+
+ req = self.AS_REQ_create(padata=padata,
+ kdc_options=str(kdc_options),
+ cname=cname,
+ realm=realm,
+ sname=sname,
+ from_time=None,
+ till_time=till,
+ renew_time=None,
+ nonce=0x7fffffff,
+ etypes=etypes,
+ addresses=None,
+ additional_tickets=None)
+ rep = self.send_recv_transaction(req)
+
+ return (rep, cname, sname, realm, till)
+
+ def check_preauth_rep(self, rep):
+ self.assertIsNotNone(rep)
+ self.assertEqual(rep['msg-type'], KRB_ERROR)
+ self.assertEqual(rep['error-code'], KDC_ERR_PREAUTH_REQUIRED)
+
+ def get_etype_info2(self, rep):
+
+ rep_padata = self.der_decode(
+ rep['e-data'],
+ asn1Spec=krb5_asn1.METHOD_DATA())
+
+ for pa in rep_padata:
+ if pa['padata-type'] == PADATA_ETYPE_INFO2:
+ etype_info2 = pa['padata-value']
+ break
+
+ etype_info2 = self.der_decode(
+ etype_info2,
+ asn1Spec=krb5_asn1.ETYPE_INFO2())
+ return etype_info2
+
+ def as_req(self, creds):
+ etypes = (
+ AES256_CTS_HMAC_SHA1_96,
+ AES128_CTS_HMAC_SHA1_96,
+ ARCFOUR_HMAC_MD5)
+ (rep, cname, sname, realm, till) = self.as_pre_auth_req(creds, etypes)
+ self.check_preauth_rep(rep)
+
+ etype_info2 = self.get_etype_info2(rep)
+ key = self.PasswordKey_from_etype_info2(creds, etype_info2[0])
+
+ (patime, pausec) = self.get_KerberosTimeWithUsec()
+ pa_ts = self.PA_ENC_TS_ENC_create(patime, pausec)
+ pa_ts = self.der_encode(pa_ts, asn1Spec=krb5_asn1.PA_ENC_TS_ENC())
+
+ pa_ts = self.EncryptedData_create(key, KU_PA_ENC_TIMESTAMP, pa_ts)
+ pa_ts = self.der_encode(pa_ts, asn1Spec=krb5_asn1.EncryptedData())
+
+ pa_ts = self.PA_DATA_create(PADATA_ENC_TIMESTAMP, pa_ts)
+
+ kdc_options = krb5_asn1.KDCOptions('forwardable')
+ padata = [pa_ts]
+
+ req = self.AS_REQ_create(padata=padata,
+ kdc_options=str(kdc_options),
+ cname=cname,
+ realm=realm,
+ sname=sname,
+ from_time=None,
+ till_time=till,
+ renew_time=None,
+ nonce=0x7fffffff,
+ etypes=etypes,
+ addresses=None,
+ additional_tickets=None)
+ rep = self.send_recv_transaction(req)
+ self.assertIsNotNone(rep)
+
+ msg_type = rep['msg-type']
+ self.assertEqual(msg_type, KRB_AS_REP)
+
+ enc_part = rep['enc-part']
+ enc_as_rep_part = key.decrypt(
+ KU_AS_REP_ENC_PART, rep['enc-part']['cipher'])
+ return (enc_as_rep_part, enc_part)
+
+
+if __name__ == "__main__":
+ global_asn1_print = False
+ global_hexdump = False
+ import unittest
+ unittest.main()
diff --git a/python/samba/tests/krb5/conditional_ace_tests.py b/python/samba/tests/krb5/conditional_ace_tests.py
new file mode 100755
index 0000000..f8dc0ef
--- /dev/null
+++ b/python/samba/tests/krb5/conditional_ace_tests.py
@@ -0,0 +1,5588 @@
+#!/usr/bin/env python3
+# Unix SMB/CIFS implementation.
+# Copyright (C) Stefan Metzmacher 2020
+# Copyright (C) Catalyst.Net Ltd 2023
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+import os
+
+sys.path.insert(0, 'bin/python')
+os.environ['PYTHONUNBUFFERED'] = '1'
+
+from collections import OrderedDict
+from functools import partial
+import re
+from string import Formatter
+
+import ldb
+
+from samba import dsdb, ntstatus
+from samba.dcerpc import claims, krb5pac, netlogon, security
+from samba.ndr import ndr_pack, ndr_unpack
+from samba.sd_utils import escaped_claim_id
+
+from samba.tests import DynamicTestCase, env_get_var_value
+from samba.tests.krb5.authn_policy_tests import (
+ AuditEvent,
+ AuditReason,
+ AuthnPolicyBaseTests,
+)
+from samba.tests.krb5.raw_testcase import RawKerberosTest
+from samba.tests.krb5.rfc4120_constants import (
+ KDC_ERR_BADOPTION,
+ KDC_ERR_GENERIC,
+ KDC_ERR_POLICY,
+ NT_PRINCIPAL,
+)
+import samba.tests.krb5.rfc4120_pyasn1 as krb5_asn1
+
+SidType = RawKerberosTest.SidType
+
+global_asn1_print = False
+global_hexdump = False
+
+
+# When used as a test outcome, indicates that the test can cause a Windows
+# server to crash, and is to be run with caution.
+CRASHES_WINDOWS = object()
+
+
+class ConditionalAceBaseTests(AuthnPolicyBaseTests):
+ # Constants for group SID attributes.
+ default_attrs = security.SE_GROUP_DEFAULT_FLAGS
+ resource_attrs = default_attrs | security.SE_GROUP_RESOURCE
+
+ aa_asserted_identity = (
+ security.SID_AUTHENTICATION_AUTHORITY_ASSERTED_IDENTITY)
+ service_asserted_identity = security.SID_SERVICE_ASSERTED_IDENTITY
+
+ @classmethod
+ def setUpClass(cls):
+ super().setUpClass()
+
+ cls._setup = False
+
+ def setUp(self):
+ super().setUp()
+ self.do_asn1_print = global_asn1_print
+ self.do_hexdump = global_hexdump
+
+ if not self._setup:
+ samdb = self.get_samdb()
+ cls = type(self)
+
+ # Create a machine account with which to perform FAST.
+ cls._mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+
+ # Create an account with which to perform SamLogon.
+ cls._mach_creds_ntlm = self._get_creds(
+ account_type=self.AccountType.USER,
+ ntlm=True)
+
+ # Create some new groups.
+
+ group0_name = self.get_new_username()
+ group0_dn = self.create_group(samdb, group0_name)
+ cls._group0_sid = self.get_objectSid(samdb, group0_dn)
+
+ group1_name = self.get_new_username()
+ group1_dn = self.create_group(samdb, group1_name)
+ cls._group1_sid = self.get_objectSid(samdb, group1_dn)
+
+ # Create machine accounts with which to perform FAST that belong to
+ # various arrangements of the groups.
+
+ cls._member_of_both_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'member_of': (group0_dn, group1_dn)})
+
+ cls._member_of_one_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'member_of': (group1_dn,)})
+
+ cls._member_of_both_creds_ntlm = self.get_cached_creds(
+ account_type=self.AccountType.USER,
+ opts={
+ 'member_of': (group0_dn, group1_dn),
+ 'kerberos_enabled': False,
+ })
+
+ # Create some authentication silos.
+ cls._unenforced_silo = self.create_authn_silo(enforced=False)
+ cls._enforced_silo = self.create_authn_silo(enforced=True)
+
+ # Create machine accounts with which to perform FAST that belong to
+ # the respective silos.
+
+ cls._member_of_unenforced_silo = self._get_creds(
+ account_type=self.AccountType.COMPUTER,
+ assigned_silo=self._unenforced_silo,
+ cached=True)
+ self.add_to_group(str(self._member_of_unenforced_silo.get_dn()),
+ self._unenforced_silo.dn,
+ 'msDS-AuthNPolicySiloMembers',
+ expect_attr=False)
+
+ cls._member_of_enforced_silo = self._get_creds(
+ account_type=self.AccountType.COMPUTER,
+ assigned_silo=self._enforced_silo,
+ cached=True)
+ self.add_to_group(str(self._member_of_enforced_silo.get_dn()),
+ self._enforced_silo.dn,
+ 'msDS-AuthNPolicySiloMembers',
+ expect_attr=False)
+
+ cls._member_of_enforced_silo_ntlm = self._get_creds(
+ account_type=self.AccountType.USER,
+ assigned_silo=self._enforced_silo,
+ ntlm=True,
+ cached=True)
+ self.add_to_group(str(self._member_of_enforced_silo_ntlm.get_dn()),
+ self._enforced_silo.dn,
+ 'msDS-AuthNPolicySiloMembers',
+ expect_attr=False)
+
+ # Create a couple of multi‐valued string claims for testing claim
+ # value comparisons.
+
+ cls.claim0_attr = 'carLicense'
+ cls.claim0_id = self.get_new_username()
+ self.create_claim(cls.claim0_id,
+ enabled=True,
+ attribute=cls.claim0_attr,
+ single_valued=False,
+ source_type='AD',
+ for_classes=['computer', 'user'],
+ value_type=claims.CLAIM_TYPE_STRING)
+
+ cls.claim1_attr = 'departmentNumber'
+ cls.claim1_id = self.get_new_username()
+ self.create_claim(cls.claim1_id,
+ enabled=True,
+ attribute=cls.claim1_attr,
+ single_valued=False,
+ source_type='AD',
+ for_classes=['computer', 'user'],
+ value_type=claims.CLAIM_TYPE_STRING)
+
+ cls._setup = True
+
+ # For debugging purposes. Prints out the SDDL representation of
+ # authentication policy conditions set by the Windows GUI.
+ def _print_authn_policy_sddl(self, policy_id):
+ policy_dn = self.get_authn_policies_dn()
+ policy_dn.add_child(f'CN={policy_id}')
+
+ attrs = [
+ 'msDS-ComputerAllowedToAuthenticateTo',
+ 'msDS-ServiceAllowedToAuthenticateFrom',
+ 'msDS-ServiceAllowedToAuthenticateTo',
+ 'msDS-UserAllowedToAuthenticateFrom',
+ 'msDS-UserAllowedToAuthenticateTo',
+ ]
+
+ samdb = self.get_samdb()
+ res = samdb.search(policy_dn, scope=ldb.SCOPE_BASE, attrs=attrs)
+ self.assertEqual(1, len(res),
+ f'Authentication policy {policy_id} not found')
+ result = res[0]
+
+ def print_sddl(attr):
+ sd = result.get(attr, idx=0)
+ if sd is None:
+ return
+
+ sec_desc = ndr_unpack(security.descriptor, sd)
+ print(f'{attr}: {sec_desc.as_sddl()}')
+
+ for attr in attrs:
+ print_sddl(attr)
+
+ def sddl_array_from_sids(self, sids):
+ def sddl_from_sid_entry(sid_entry):
+ sid, _, _ = sid_entry
+ return f'SID({sid})'
+
+ return f"{{{', '.join(map(sddl_from_sid_entry, sids))}}}"
+
+ def allow_if(self, condition):
+ return f'O:SYD:(XA;;CR;;;WD;({condition}))'
+
+
+@DynamicTestCase
+class ConditionalAceTests(ConditionalAceBaseTests):
+ @classmethod
+ def setUpDynamicTestCases(cls):
+ FILTER = env_get_var_value('FILTER', allow_missing=True)
+
+ # These operators are arranged so that each operator precedes its own
+ # affixes.
+ op_names = OrderedDict([
+ ('!=', 'does not equal'),
+ ('!', 'not'),
+ ('&&', 'and'),
+ ('<=', 'is less than or equals'),
+ ('<', 'is less than'),
+ ('==', 'equals'),
+ ('>=', 'exceeds or equals'),
+ ('>', 'exceeds'),
+ ('Not_Any_of', 'matches none of'),
+ ('Any_of', 'matches any of'),
+ ('Not_Contains', 'does not contain'),
+ ('Contains', 'contains'),
+ ('Not_Member_of_Any', 'the user belongs to none of'),
+ ('Not_Device_Member_of_Any', 'the device belongs to none of'), # TODO: no test for this yet
+ ('Device_Member_of_Any', 'the device belongs to any of'), # TODO: no test for this yet
+ ('Not_Device_Member_of', 'the device does not belong to'), # TODO: no test for this yet
+ ('Device_Member_of', 'the device belongs to'),
+ ('Not_Exists', 'there does not exist'),
+ ('Exists', 'there exists'),
+ ('Member_of_Any', 'the user belongs to any of'),
+ ('Not_Member_of', 'the user does not belong to'),
+ ('Member_of', 'the user belongs to'),
+ ('||', 'or'),
+ ])
+
+ # This is a safety measure to ensure correct ordering of op_names
+ keys = list(op_names.keys())
+ for i in range(len(keys)):
+ for j in range(i + 1, len(keys)):
+ if keys[i] in keys[j]:
+ raise AssertionError((keys[i], keys[j]))
+
+ for case in cls.pac_claim_cases:
+ if len(case) == 3:
+ pac_claims, expression, outcome = case
+ claim_map = None
+ elif len(case) == 4:
+ pac_claims, expression, claim_map, outcome = case
+ else:
+ raise AssertionError(
+ f'found {len(case)} items in case, expected 3–4')
+
+ expression_name = expression
+ for op, op_name in op_names.items():
+ expression_name = expression_name.replace(op, op_name)
+
+ name = f'{pac_claims}_{expression_name}'
+
+ if claim_map is not None:
+ name += f'_{claim_map}'
+
+ name = re.sub(r'\W+', '_', name)
+ if len(name) > 150:
+ name = f'{name[:125]}+{len(name) - 125}‐more'
+
+ if FILTER and not re.search(FILTER, name):
+ continue
+
+ cls.generate_dynamic_test('test_pac_claim_cmp', name,
+ pac_claims, expression, claim_map,
+ outcome)
+
+ for case in cls.claim_against_claim_cases:
+ lhs, op, rhs, outcome = case
+ op_name = op_names[op]
+
+ name = f'{lhs}_{op_name}_{rhs}'
+
+ name = re.sub(r'\W+', '_', name)
+ if FILTER and not re.search(FILTER, name):
+ continue
+
+ cls.generate_dynamic_test('test_cmp', name,
+ lhs, op, rhs, outcome)
+
+ for case in cls.claim_against_literal_cases:
+ lhs, op, rhs, outcome = case
+ op_name = op_names[op]
+
+ name = f'{lhs}_{op_name}_literal_{rhs}'
+
+ name = re.sub(r'\W+', '_', name)
+ if FILTER and not re.search(FILTER, name):
+ continue
+
+ cls.generate_dynamic_test('test_cmp', name,
+ lhs, op, rhs, outcome, True)
+
+ def test_allowed_from_member_of_each(self):
+ # Create an authentication policy that allows accounts belonging to
+ # both groups.
+ policy = self.create_authn_policy(
+ enforced=True,
+ user_allowed_from=(
+ f'O:SYD:(XA;;CR;;;WD;(Member_of '
+ f'{{SID({self._group0_sid}), SID({self._group1_sid})}}))'),
+ )
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+
+ # Show that we get a policy error if the machine account does not
+ # belong to both groups.
+ armor_tgt = self.get_tgt(self._member_of_one_creds)
+ self._get_tgt(client_creds, armor_tgt=armor_tgt,
+ expected_error=KDC_ERR_POLICY)
+
+ # Otherwise, authentication should succeed.
+ armor_tgt = self.get_tgt(self._member_of_both_creds)
+ self._get_tgt(client_creds, armor_tgt=armor_tgt,
+ expected_error=0)
+
+ def test_allowed_from_member_of_any(self):
+ # Create an authentication policy that allows accounts belonging to
+ # either group.
+ policy = self.create_authn_policy(
+ enforced=True,
+ user_allowed_from=(
+ f'O:SYD:(XA;;CR;;;WD;(Member_of_Any '
+ f'{{SID({self._group0_sid}), SID({self._group1_sid})}}))'),
+ )
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+
+ # Show that we get a policy error if the machine account belongs to
+ # neither group.
+ armor_tgt = self.get_tgt(self._mach_creds)
+ self._get_tgt(client_creds, armor_tgt=armor_tgt,
+ expected_error=KDC_ERR_POLICY)
+
+ # Otherwise, authentication should succeed.
+ armor_tgt = self.get_tgt(self._member_of_one_creds)
+ self._get_tgt(client_creds, armor_tgt=armor_tgt,
+ expected_error=0)
+
+ def test_allowed_from_not_member_of_each(self):
+ # Create an authentication policy that allows accounts not belonging to
+ # both groups.
+ policy = self.create_authn_policy(
+ enforced=True,
+ user_allowed_from=(
+ f'O:SYD:(XA;;CR;;;WD;(Not_Member_of '
+ f'{{SID({self._group0_sid}), SID({self._group1_sid})}}))'),
+ )
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+
+ # Show that we get a policy error if the machine account belongs to
+ # both groups.
+ armor_tgt = self.get_tgt(self._member_of_both_creds)
+ self._get_tgt(client_creds, armor_tgt=armor_tgt,
+ expected_error=KDC_ERR_POLICY)
+
+ # Otherwise, authentication should succeed.
+ armor_tgt = self.get_tgt(self._member_of_one_creds)
+ self._get_tgt(client_creds, armor_tgt=armor_tgt,
+ expected_error=0)
+
+ def test_allowed_from_not_member_of_any(self):
+ # Create an authentication policy that allows accounts belonging to
+ # neither group.
+ policy = self.create_authn_policy(
+ enforced=True,
+ user_allowed_from=(
+ f'O:SYD:(XA;;CR;;;WD;(Not_Member_of_Any '
+ f'{{SID({self._group0_sid}), SID({self._group1_sid})}}))'),
+ )
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+
+ # Show that we get a policy error if the machine account belongs to one
+ # of the groups.
+ armor_tgt = self.get_tgt(self._member_of_one_creds)
+ self._get_tgt(client_creds, armor_tgt=armor_tgt,
+ expected_error=KDC_ERR_POLICY)
+
+ # Otherwise, authentication should succeed.
+ armor_tgt = self.get_tgt(self._mach_creds)
+ self._get_tgt(client_creds, armor_tgt=armor_tgt,
+ expected_error=0)
+
+ def test_allowed_from_member_of_each_deny(self):
+ # Create an authentication policy that denies accounts belonging to
+ # both groups, and allows other accounts.
+ policy = self.create_authn_policy(
+ enforced=True,
+ user_allowed_from=(
+ f'O:SYD:(XD;;CR;;;WD;(Member_of '
+ f'{{SID({self._group0_sid}), SID({self._group1_sid})}}))'
+ f'(A;;CR;;;WD)'),
+ )
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+
+ # Show that we get a policy error if the machine account belongs to
+ # both groups.
+ armor_tgt = self.get_tgt(self._member_of_both_creds)
+ self._get_tgt(client_creds, armor_tgt=armor_tgt,
+ expected_error=KDC_ERR_POLICY)
+
+ # Otherwise, authentication should succeed.
+ armor_tgt = self.get_tgt(self._member_of_one_creds)
+ self._get_tgt(client_creds, armor_tgt=armor_tgt,
+ expected_error=0)
+
+ def test_allowed_from_member_of_any_deny(self):
+ # Create an authentication policy that denies accounts belonging to
+ # either group, and allows other accounts.
+ policy = self.create_authn_policy(
+ enforced=True,
+ user_allowed_from=(
+ f'O:SYD:(XD;;CR;;;WD;(Member_of_Any '
+ f'{{SID({self._group0_sid}), SID({self._group1_sid})}}))'
+ f'(A;;CR;;;WD)'),
+ )
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+
+ # Show that we get a policy error if the machine account belongs to
+ # either group.
+ armor_tgt = self.get_tgt(self._member_of_one_creds)
+ self._get_tgt(client_creds, armor_tgt=armor_tgt,
+ expected_error=KDC_ERR_POLICY)
+
+ # Otherwise, authentication should succeed.
+ armor_tgt = self.get_tgt(self._mach_creds)
+ self._get_tgt(client_creds, armor_tgt=armor_tgt,
+ expected_error=0)
+
+ def test_allowed_from_not_member_of_each_deny(self):
+ # Create an authentication policy that denies accounts not belonging to
+ # both groups, and allows other accounts.
+ policy = self.create_authn_policy(
+ enforced=True,
+ user_allowed_from=(
+ f'O:SYD:(XD;;CR;;;WD;(Not_Member_of '
+ f'{{SID({self._group0_sid}), SID({self._group1_sid})}}))'
+ f'(A;;CR;;;WD)'),
+ )
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+
+ # Show that we get a policy error if the machine account doesn’t belong
+ # to both groups.
+ armor_tgt = self.get_tgt(self._member_of_one_creds)
+ self._get_tgt(client_creds, armor_tgt=armor_tgt,
+ expected_error=KDC_ERR_POLICY)
+
+ # Otherwise, authentication should succeed.
+ armor_tgt = self.get_tgt(self._member_of_both_creds)
+ self._get_tgt(client_creds, armor_tgt=armor_tgt,
+ expected_error=0)
+
+ def test_allowed_from_not_member_of_any_deny(self):
+ # Create an authentication policy that denies accounts belonging to
+ # neither group, and allows other accounts.
+ policy = self.create_authn_policy(
+ enforced=True,
+ user_allowed_from=(
+ f'O:SYD:(XD;;CR;;;WD;(Not_Member_of_Any '
+ f'{{SID({self._group0_sid}), SID({self._group1_sid})}}))'
+ f'(A;;CR;;;WD)'),
+ )
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+
+ # Show that we get a policy error if the machine account belongs to
+ # neither group.
+ armor_tgt = self.get_tgt(self._mach_creds)
+ self._get_tgt(client_creds, armor_tgt=armor_tgt,
+ expected_error=KDC_ERR_POLICY)
+
+ # Otherwise, authentication should succeed.
+ armor_tgt = self.get_tgt(self._member_of_one_creds)
+ self._get_tgt(client_creds, armor_tgt=armor_tgt,
+ expected_error=0)
+
+ def test_allowed_from_unenforced_silo_equals(self):
+ # Create an authentication policy that allows accounts belonging to the
+ # unenforced silo.
+ policy = self.create_authn_policy(
+ enforced=True,
+ user_allowed_from=(
+ f'O:SYD:(XA;;CR;;;WD;'
+ f'(@User.ad://ext/AuthenticationSilo == '
+ f'"{self._unenforced_silo}"))'),
+ )
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+
+ # As the policy is unenforced, the ‘ad://ext/AuthenticationSilo’ claim
+ # will not be present in the TGT, and the ACE will never allow access.
+
+ armor_tgt = self.get_tgt(self._mach_creds)
+ self._get_tgt(client_creds, armor_tgt=armor_tgt,
+ expected_error=KDC_ERR_POLICY)
+
+ armor_tgt = self.get_tgt(self._member_of_unenforced_silo)
+ self._get_tgt(client_creds, armor_tgt=armor_tgt,
+ expected_error=KDC_ERR_POLICY)
+
+ armor_tgt = self.get_tgt(self._member_of_enforced_silo)
+ self._get_tgt(client_creds, armor_tgt=armor_tgt,
+ expected_error=KDC_ERR_POLICY)
+
+ def test_allowed_from_enforced_silo_equals(self):
+ # Create an authentication policy that allows accounts belonging to the
+ # enforced silo.
+ policy = self.create_authn_policy(
+ enforced=True,
+ user_allowed_from=(
+ f'O:SYD:(XA;;CR;;;WD;'
+ f'(@User.ad://ext/AuthenticationSilo == '
+ f'"{self._enforced_silo}"))'),
+ )
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+
+ # Show that we get a policy error if the machine account does not
+ # belong to the silo.
+ armor_tgt = self.get_tgt(self._mach_creds)
+ self._get_tgt(client_creds, armor_tgt=armor_tgt,
+ expected_error=KDC_ERR_POLICY)
+
+ armor_tgt = self.get_tgt(self._member_of_unenforced_silo)
+ self._get_tgt(client_creds, armor_tgt=armor_tgt,
+ expected_error=KDC_ERR_POLICY)
+
+ # Otherwise, authentication should succeed.
+ armor_tgt = self.get_tgt(self._member_of_enforced_silo)
+ self._get_tgt(client_creds, armor_tgt=armor_tgt,
+ expected_error=0)
+
+ def test_allowed_from_unenforced_silo_not_equals(self):
+ # Create an authentication policy that allows accounts not belonging to
+ # the unenforced silo.
+ policy = self.create_authn_policy(
+ enforced=True,
+ user_allowed_from=(
+ f'O:SYD:(XA;;CR;;;WD;'
+ f'(@User.ad://ext/AuthenticationSilo != '
+ f'"{self._unenforced_silo}"))'),
+ )
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+
+ # Show that authentication fails unless the account belongs to a silo
+ # other than the unenforced silo.
+
+ armor_tgt = self.get_tgt(self._mach_creds)
+ self._get_tgt(client_creds, armor_tgt=armor_tgt,
+ expected_error=KDC_ERR_POLICY)
+
+ armor_tgt = self.get_tgt(self._member_of_unenforced_silo)
+ self._get_tgt(client_creds, armor_tgt=armor_tgt,
+ expected_error=KDC_ERR_POLICY)
+
+ armor_tgt = self.get_tgt(self._member_of_enforced_silo)
+ self._get_tgt(client_creds, armor_tgt=armor_tgt,
+ expected_error=0)
+
+ def test_allowed_from_enforced_silo_not_equals(self):
+ # Create an authentication policy that allows accounts not belonging to
+ # the enforced silo.
+ policy = self.create_authn_policy(
+ enforced=True,
+ user_allowed_from=(
+ f'O:SYD:(XA;;CR;;;WD;'
+ f'(@User.ad://ext/AuthenticationSilo != '
+ f'"{self._enforced_silo}"))'),
+ )
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+
+ # Show that authentication always fails, as none of the machine
+ # accounts belong to a silo that is not the enforced one. (The
+ # unenforced silo doesn’t count, as it will never appear in a claim.)
+
+ armor_tgt = self.get_tgt(self._mach_creds)
+ self._get_tgt(client_creds, armor_tgt=armor_tgt,
+ expected_error=KDC_ERR_POLICY)
+
+ armor_tgt = self.get_tgt(self._member_of_unenforced_silo)
+ self._get_tgt(client_creds, armor_tgt=armor_tgt,
+ expected_error=KDC_ERR_POLICY)
+
+ armor_tgt = self.get_tgt(self._member_of_enforced_silo)
+ self._get_tgt(client_creds, armor_tgt=armor_tgt,
+ expected_error=KDC_ERR_POLICY)
+
+ def test_allowed_from_unenforced_silo_equals_deny(self):
+ # Create an authentication policy that denies accounts belonging to the
+ # unenforced silo, and allows other accounts.
+ policy = self.create_authn_policy(
+ enforced=True,
+ user_allowed_from=(
+ f'O:SYD:(XD;;CR;;;WD;'
+ f'(@User.ad://ext/AuthenticationSilo == '
+ f'"{self._unenforced_silo}"))'
+ f'(A;;CR;;;WD)'),
+ )
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+
+ # Show that authentication fails unless the account belongs to a silo
+ # other than the unenforced silo.
+
+ armor_tgt = self.get_tgt(self._mach_creds)
+ self._get_tgt(client_creds, armor_tgt=armor_tgt,
+ expected_error=KDC_ERR_POLICY)
+
+ armor_tgt = self.get_tgt(self._member_of_unenforced_silo)
+ self._get_tgt(client_creds, armor_tgt=armor_tgt,
+ expected_error=KDC_ERR_POLICY)
+
+ armor_tgt = self.get_tgt(self._member_of_enforced_silo)
+ self._get_tgt(client_creds, armor_tgt=armor_tgt,
+ expected_error=0)
+
+ def test_allowed_from_enforced_silo_equals_deny(self):
+ # Create an authentication policy that denies accounts belonging to the
+ # enforced silo, and allows other accounts.
+ policy = self.create_authn_policy(
+ enforced=True,
+ user_allowed_from=(
+ f'O:SYD:(XD;;CR;;;WD;'
+ f'(@User.ad://ext/AuthenticationSilo == '
+ f'"{self._enforced_silo}"))'
+ f'(A;;CR;;;WD)'),
+ )
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+
+ # Show that authentication always fails, as none of the machine
+ # accounts belong to a silo that is not the enforced one. (The
+ # unenforced silo doesn’t count, as it will never appear in a claim.)
+
+ armor_tgt = self.get_tgt(self._mach_creds)
+ self._get_tgt(client_creds, armor_tgt=armor_tgt,
+ expected_error=KDC_ERR_POLICY)
+
+ armor_tgt = self.get_tgt(self._member_of_unenforced_silo)
+ self._get_tgt(client_creds, armor_tgt=armor_tgt,
+ expected_error=KDC_ERR_POLICY)
+
+ armor_tgt = self.get_tgt(self._member_of_enforced_silo)
+ self._get_tgt(client_creds, armor_tgt=armor_tgt,
+ expected_error=KDC_ERR_POLICY)
+
+ def test_allowed_from_unenforced_silo_not_equals_deny(self):
+ # Create an authentication policy that denies accounts not belonging to
+ # the unenforced silo, and allows other accounts.
+ policy = self.create_authn_policy(
+ enforced=True,
+ user_allowed_from=(
+ f'O:SYD:(XD;;CR;;;WD;'
+ f'(@User.ad://ext/AuthenticationSilo != '
+ f'"{self._unenforced_silo}"))'
+ f'(A;;CR;;;WD)'),
+ )
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+
+ # Show that authentication always fails, as the unenforced silo will
+ # never appear in a claim.
+
+ armor_tgt = self.get_tgt(self._mach_creds)
+ self._get_tgt(client_creds, armor_tgt=armor_tgt,
+ expected_error=KDC_ERR_POLICY)
+
+ armor_tgt = self.get_tgt(self._member_of_unenforced_silo)
+ self._get_tgt(client_creds, armor_tgt=armor_tgt,
+ expected_error=KDC_ERR_POLICY)
+
+ armor_tgt = self.get_tgt(self._member_of_enforced_silo)
+ self._get_tgt(client_creds, armor_tgt=armor_tgt,
+ expected_error=KDC_ERR_POLICY)
+
+ def test_allowed_from_enforced_silo_not_equals_deny(self):
+ # Create an authentication policy that denies accounts not belonging to
+ # the enforced silo, and allows other accounts.
+ policy = self.create_authn_policy(
+ enforced=True,
+ user_allowed_from=(
+ f'O:SYD:(XD;;CR;;;WD;'
+ f'(@User.ad://ext/AuthenticationSilo != '
+ f'"{self._enforced_silo}"))'
+ f'(A;;CR;;;WD)'),
+ )
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+
+ # Show that authentication fails unless the account belongs to the
+ # enforced silo.
+
+ armor_tgt = self.get_tgt(self._mach_creds)
+ self._get_tgt(client_creds, armor_tgt=armor_tgt,
+ expected_error=KDC_ERR_POLICY)
+
+ armor_tgt = self.get_tgt(self._member_of_unenforced_silo)
+ self._get_tgt(client_creds, armor_tgt=armor_tgt,
+ expected_error=KDC_ERR_POLICY)
+
+ armor_tgt = self.get_tgt(self._member_of_enforced_silo)
+ self._get_tgt(client_creds, armor_tgt=armor_tgt,
+ expected_error=0)
+
+ def test_allowed_from_claim_equals_claim(self):
+ # Create a couple of claim types.
+
+ claim0_id = self.get_new_username()
+ self.create_claim(claim0_id,
+ enabled=True,
+ attribute='carLicense',
+ single_valued=True,
+ source_type='AD',
+ for_classes=['computer'],
+ value_type=claims.CLAIM_TYPE_STRING)
+
+ claim1_id = self.get_new_username()
+ self.create_claim(claim1_id,
+ enabled=True,
+ attribute='comment',
+ single_valued=True,
+ source_type='AD',
+ for_classes=['computer'],
+ value_type=claims.CLAIM_TYPE_STRING)
+
+ # Create an authentication policy that allows accounts having the two
+ # claims be equal.
+ policy = self.create_authn_policy(
+ enforced=True,
+ user_allowed_from=(
+ f'O:SYD:(XA;;CR;;;WD;'
+ f'(@User.{claim0_id} == @User.{claim1_id}))'),
+ )
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+
+ armor_tgt = self.get_tgt(self._mach_creds)
+ self._get_tgt(client_creds, armor_tgt=armor_tgt,
+ expected_error=KDC_ERR_POLICY)
+
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={
+ 'additional_details': (
+ ('carLicense', 'foo'),
+ ('comment', 'foo'),
+ ),
+ })
+ armor_tgt = self.get_tgt(
+ mach_creds,
+ expect_client_claims=True,
+ expected_client_claims={
+ claim0_id: {
+ 'source_type': claims.CLAIMS_SOURCE_TYPE_AD,
+ 'type': claims.CLAIM_TYPE_STRING,
+ 'values': ('foo',),
+ },
+ claim1_id: {
+ 'source_type': claims.CLAIMS_SOURCE_TYPE_AD,
+ 'type': claims.CLAIM_TYPE_STRING,
+ 'values': ('foo',),
+ },
+ })
+ self._get_tgt(client_creds, armor_tgt=armor_tgt,
+ expected_error=0)
+
+ def test_allowed_to_client_equals(self):
+ client_claim_attr = 'carLicense'
+ client_claim_value = 'foo bar'
+ client_claim_values = client_claim_value,
+
+ client_claim_id = self.get_new_username()
+ self.create_claim(client_claim_id,
+ enabled=True,
+ attribute=client_claim_attr,
+ single_valued=True,
+ source_type='AD',
+ for_classes=['user'],
+ value_type=claims.CLAIM_TYPE_STRING)
+
+ # Create an authentication policy that allows authorization if the
+ # client has a particular claim value.
+ policy = self.create_authn_policy(
+ enforced=True,
+ computer_allowed_to=(
+ f'O:SYD:(XA;;CR;;;WD;'
+ f'((@User.{client_claim_id} == "{client_claim_value}")))'),
+ )
+
+ # Create a computer account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=policy)
+
+ armor_tgt = self.get_tgt(self._mach_creds)
+
+ # Create a user account without the claim value.
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER)
+ tgt = self.get_tgt(client_creds)
+ # Show that obtaining a service ticket is denied.
+ self._tgs_req(
+ tgt, KDC_ERR_POLICY, client_creds, target_creds,
+ armor_tgt=armor_tgt,
+ expect_edata=self.expect_padata_outer,
+ # We aren’t particular about whether or not we get an NTSTATUS.
+ expect_status=None,
+ expected_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ check_patypes=False)
+
+ # Create a user account with the claim value.
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER,
+ opts={
+ 'additional_details': (
+ (client_claim_attr, client_claim_values),
+ ),
+ })
+ tgt = self.get_tgt(
+ client_creds,
+ expect_client_claims=True,
+ expected_client_claims={
+ client_claim_id: {
+ 'source_type': claims.CLAIMS_SOURCE_TYPE_AD,
+ 'type': claims.CLAIM_TYPE_STRING,
+ 'values': client_claim_values,
+ },
+ })
+ # Show that obtaining a service ticket is allowed.
+ self._tgs_req(tgt, 0, client_creds, target_creds,
+ armor_tgt=armor_tgt)
+
+ def test_allowed_to_device_equals(self):
+ device_claim_attr = 'carLicense'
+ device_claim_value = 'bar'
+ device_claim_values = device_claim_value,
+
+ device_claim_id = self.get_new_username()
+ self.create_claim(device_claim_id,
+ enabled=True,
+ attribute=device_claim_attr,
+ single_valued=True,
+ source_type='AD',
+ for_classes=['computer'],
+ value_type=claims.CLAIM_TYPE_STRING)
+
+ # Create a user account.
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER)
+ tgt = self.get_tgt(client_creds)
+
+ # Create an authentication policy that allows authorization if the
+ # device has a particular claim value.
+ policy = self.create_authn_policy(
+ enforced=True,
+ computer_allowed_to=(
+ f'O:SYD:(XA;;CR;;;WD;'
+ f'(@Device.{device_claim_id} == "{device_claim_value}"))'),
+ )
+
+ # Create a computer account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=policy)
+
+ armor_tgt = self.get_tgt(self._mach_creds)
+ # Show that obtaining a service ticket is denied when the claim value
+ # is not present.
+ self._tgs_req(
+ tgt, KDC_ERR_POLICY, client_creds, target_creds,
+ armor_tgt=armor_tgt,
+ expect_edata=self.expect_padata_outer,
+ # We aren’t particular about whether or not we get an NTSTATUS.
+ expect_status=None,
+ expected_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ check_patypes=False)
+
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={
+ 'additional_details': (
+ (device_claim_attr, device_claim_values),
+ ),
+ })
+ armor_tgt = self.get_tgt(
+ mach_creds,
+ expect_client_claims=True,
+ expected_client_claims={
+ device_claim_id: {
+ 'source_type': claims.CLAIMS_SOURCE_TYPE_AD,
+ 'type': claims.CLAIM_TYPE_STRING,
+ 'values': device_claim_values,
+ },
+ })
+ # Show that obtaining a service ticket is allowed when the claim value
+ # is present.
+ self._tgs_req(tgt, 0, client_creds, target_creds,
+ armor_tgt=armor_tgt)
+
+ claim_against_claim_cases = [
+ # If either side is missing, the result is unknown.
+ ((), '==', (), None),
+ ((), '!=', (), None),
+ ('a', '==', (), None),
+ ((), '==', 'b', None),
+ # Straightforward equality and inequality checks work.
+ ('foo', '==', 'foo', True),
+ ('foo', '==', 'bar', False),
+ ('foo', '!=', 'foo', False),
+ ('foo', '!=', 'bar', True),
+ # We can perform less‐than and greater‐than operations.
+ ('cat', '<', 'dog', True),
+ ('cat', '<=', 'dog', True),
+ ('cat', '>', 'dog', False),
+ ('cat', '>=', 'dog', False),
+ ('foo', '<=', 'foo', True),
+ ('foo', '>=', 'foo', True),
+ ('foo', '<', 'foo bar', True),
+ ('foo bar', '>', 'foo', True),
+ # String comparison is case‐sensitive.
+ ('foo bar', '==', 'Foo BAR', True),
+ ('foo bar', '==', 'FOO BAR', True),
+ ('ćàț', '==', 'ĆÀȚ', True),
+ ('ḽ', '==', 'Ḽ', True),
+ ('ⅸ', '==', 'Ⅸ', True),
+ ('ꙭ', '==', 'Ꙭ', True),
+ ('ⱦ', '==', 'Ⱦ', True), # Lowercased variant added in Unicode 5.0.
+ ('ԛԣ', '==', 'ԚԢ', True), # All added in Unicode 5.1.
+ ('foo', '<', 'foo', True),
+ ('ćàș', '<', 'ĆÀȚ', True),
+ ('cat', '<', 'ćàț', True),
+ # This is done by converting to UPPER CASE. Hence, both ‘A’ (U+41) and
+ # ‘a’ (U+61) compare less than ‘_’ (U+5F).
+ ('A', '<', '_', True),
+ ('a', '<', '_', True),
+ # But not all uppercased/lowercased pairs are considered to be equal in
+ # this way.
+ ('ß', '<', 'ẞ', True),
+ ('ß', '>', 'SS', True),
+ ('ⳬ', '>', 'Ⳬ', True), # Added in Unicode 5.2.
+ ('ʞ', '<', 'Ʞ', True), # Uppercased variant added in Unicode 6.0.
+ ('ʞ', '<', 'ʟ', True), # U+029E < U+029F < U+A7B0 (upper variant, Ʞ)
+ ('ꞧ', '>', 'Ꞧ', True), # Added in Unicode 6.0.
+ ('ɜ', '<', 'Ɜ', True), # Uppercased variant added in Unicode 7.0.
+ #
+ # Strings are compared as UTF‐16 code units, rather than as Unicode
+ # codepoints. So while you might expect ‘𐀀’ (U+10000) to compare
+ # greater than ‘豈’ (U+F900), it is actually considered to be the
+ # *smaller* of the pair. That is because it is encoded as a sequence of
+ # two code units, 0xd800 and 0xdc00, which combination compares less
+ # than the single code unit 0xf900.
+ ('ퟻ', '<', '𐀀', True),
+ ('𐀀', '<', '豈', True),
+ ('ퟻ', '<', '豈', True),
+ # Composites can be compared.
+ (('foo', 'bar'), '==', ('foo', 'bar'), True),
+ (('foo', 'bar'), '==', ('foo', 'baz'), False),
+ # The individual components don’t have to match in case.
+ (('foo', 'bar'), '==', ('FOO', 'BAR'), True),
+ # Nor must they match in order.
+ (('foo', 'bar'), '==', ('bar', 'foo'), True),
+ # Composites of different lengths compare unequal.
+ (('foo', 'bar'), '!=', 'foo', True),
+ (('foo', 'bar'), '!=', ('foo', 'bar', 'baz'), True),
+ # But composites don’t have a defined ordering, and aren’t considered
+ # greater or lesser than one another.
+ (('foo', 'bar'), '<', ('foo', 'bar'), None),
+ (('foo', 'bar'), '<=', ('foo', 'bar'), None),
+ (('foo', 'bar'), '>', ('foo', 'bar', 'baz'), None),
+ (('foo', 'bar'), '>=', ('foo', 'bar', 'baz'), None),
+ # We can test for containment.
+ (('foo', 'bar'), 'Contains', ('FOO'), True),
+ (('foo', 'bar'), 'Contains', ('foo', 'bar'), True),
+ (('foo', 'bar'), 'Not_Contains', ('foo', 'bar'), False),
+ (('foo', 'bar'), 'Contains', ('foo', 'bar', 'baz'), False),
+ (('foo', 'bar'), 'Not_Contains', ('foo', 'bar', 'baz'), True),
+ # We can test whether the operands have any elements in common.
+ ('foo', 'Any_of', 'foo', True),
+ (('foo', 'bar'), 'Any_of', 'BAR', True),
+ (('foo', 'bar'), 'Any_of', 'baz', False),
+ (('foo', 'bar'), 'Not_Any_of', 'baz', True),
+ (('foo', 'bar'), 'Any_of', ('bar', 'baz'), True),
+ (('foo', 'bar'), 'Not_Any_of', ('bar', 'baz'), False),
+ ]
+
+ claim_against_literal_cases = [
+ # String comparisons also work against literals.
+ ('foo bar', '==', '"foo bar"', True),
+ # Composites can be compared with literals.
+ ((), '==', '{{}}', None),
+ ('foo', '!=', '{{}}', True),
+ ('bar', '==', '{{"bar"}}', True),
+ (('apple', 'banana'), '==', '{{"APPLE", "BANANA"}}', True),
+ (('apple', 'banana'), '==', '{{"BANANA", "APPLE"}}', True),
+ (('apple', 'banana'), '==', '{{"apple", "banana", "apple"}}', False),
+ # We can test for containment.
+ ((), 'Contains', '{{}}', False),
+ ((), 'Not_Contains', '{{}}', True),
+ ((), 'Contains', '{{"foo"}}', None),
+ ((), 'Not_Contains', '{{"foo", "bar"}}', None),
+ ('foo', 'Contains', '{{}}', False),
+ ('bar', 'Contains', '{{"bar"}}', True),
+ (('foo', 'bar'), 'Contains', '{{"foo", "bar"}}', True),
+ (('foo', 'bar'), 'Contains', '{{"foo", "bar", "baz"}}', False),
+ # The right‐hand side of Contains or Not_Contains does not have to be a
+ # composite.
+ ('foo', 'Contains', '"foo"', True),
+ (('foo', 'bar'), 'Not_Contains', '"foo"', False),
+ # It’s fine if the right‐hand side contains duplicate elements.
+ (('foo', 'bar'), 'Contains', '{{"foo", "bar", "bar"}}', True),
+ # We can test whether the operands have any elements in common.
+ ((), 'Any_of', '{{}}', None),
+ ((), 'Not_Any_of', '{{}}', None),
+ ('foo', 'Any_of', '{{}}', False),
+ ('foo', 'Not_Any_of', '{{}}', True),
+ ('bar', 'Any_of', '{{"bar"}}', True),
+ (('foo', 'bar'), 'Any_of', '{{"bar", "baz"}}', True),
+ (('foo', 'bar'), 'Any_of', '{{"baz"}}', False),
+ # The right‐hand side of Any_of or Not_Any_of must be a composite.
+ ('foo', 'Any_of', '"foo"', None),
+ (('foo', 'bar'), 'Not_Any_of', '"baz"', None),
+ # A string won’t compare equal to a numeric literal.
+ ('42', '==', '"42"', True),
+ ('42', '==', '42', None),
+ # Nor can composites that mismatch in type be compared.
+ (('123', '456'), '==', '{{"123", "456"}}', True),
+ (('654', '321'), '==', '{{654, 321}}', None),
+ (('foo', 'bar'), 'Contains', '{{1, 2, 3}}', None),
+ ]
+
+ def _test_cmp_with_args(self, lhs, op, rhs, outcome, rhs_is_literal=False):
+ # Construct a conditional ACE expression that evaluates to True if the
+ # two claim values are equal.
+ if rhs_is_literal:
+ self.assertIsInstance(rhs, str)
+ rhs = rhs.format(self=self)
+ expression = f'(@User.{self.claim0_id} {op} {rhs})'
+ else:
+ expression = f'(@User.{self.claim0_id} {op} @User.{self.claim1_id})'
+
+ # Create an authentication policy that will allow authentication when
+ # the expression is true, and a second that will deny authentication in
+ # the same circumstance. By observing the results of authenticating
+ # against each of these policies in turn, we can determine whether the
+ # expression evaluates to a True, False, or Unknown value.
+
+ allowed_sddl = f'O:SYD:(XA;;CR;;;WD;{expression})'
+ denied_sddl = f'O:SYD:(XD;;CR;;;WD;{expression})(A;;CR;;;WD)'
+
+ allowed_policy = self.create_authn_policy(
+ enforced=True,
+ user_allowed_from=allowed_sddl)
+ denied_policy = self.create_authn_policy(
+ enforced=True,
+ user_allowed_from=denied_sddl)
+
+ # Create a user account assigned to each policy.
+ allowed_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=allowed_policy)
+ denied_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=denied_policy)
+
+ additional_details = ()
+ if lhs:
+ additional_details += ((self.claim0_attr, lhs),)
+ if rhs and not rhs_is_literal:
+ additional_details += ((self.claim1_attr, rhs),)
+
+ # Create a computer account with the provided attribute values.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'additional_details': additional_details})
+
+ def expected_values(val):
+ if isinstance(val, (str, bytes)):
+ return val,
+
+ return val
+
+ expected_client_claims = {}
+ if lhs:
+ expected_client_claims[self.claim0_id] = {
+ 'source_type': claims.CLAIMS_SOURCE_TYPE_AD,
+ 'type': claims.CLAIM_TYPE_STRING,
+ 'values': expected_values(lhs),
+ }
+ if rhs and not rhs_is_literal:
+ expected_client_claims[self.claim1_id] = {
+ 'source_type': claims.CLAIMS_SOURCE_TYPE_AD,
+ 'type': claims.CLAIM_TYPE_STRING,
+ 'values': expected_values(rhs),
+ }
+
+ # Fetch the computer account’s TGT, and ensure it contains the claims.
+ armor_tgt = self.get_tgt(
+ mach_creds,
+ expect_client_claims=bool(expected_client_claims) or None,
+ expected_client_claims=expected_client_claims)
+
+ # The first or the second authentication request is expected to succeed
+ # if the outcome is True or False, respectively. An Unknown outcome,
+ # represented by None, will result in a policy error in either case.
+ allowed_error = 0 if outcome is True else KDC_ERR_POLICY
+ denied_error = 0 if outcome is False else KDC_ERR_POLICY
+
+ # Attempt to authenticate and ensure that we observe the expected
+ # results.
+ self._get_tgt(allowed_creds, armor_tgt=armor_tgt,
+ expected_error=allowed_error)
+ self._get_tgt(denied_creds, armor_tgt=armor_tgt,
+ expected_error=denied_error)
+
+ pac_claim_cases = [
+ # Test a very simple expression with various claims.
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{non_empty_string}', claims.CLAIM_TYPE_STRING, ['foo bar']),
+ ]),
+ ], '{non_empty_string}', True),
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{zero_uint}', claims.CLAIM_TYPE_UINT64, [0]),
+ ]),
+ ], '{zero_uint}', False),
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{nonzero_uint}', claims.CLAIM_TYPE_UINT64, [1]),
+ ]),
+ ], '{nonzero_uint}', True),
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{zero_uints}', claims.CLAIM_TYPE_UINT64, [0, 0]),
+ ]),
+ ], '{zero_uints}', KDC_ERR_GENERIC),
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{zero_and_one_uint}', claims.CLAIM_TYPE_UINT64, [0, 1]),
+ ]),
+ ], '{zero_and_one_uint}', True),
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{one_and_zero_uint}', claims.CLAIM_TYPE_UINT64, [1, 0]),
+ ]),
+ ], '{one_and_zero_uint}', True),
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{zero_int}', claims.CLAIM_TYPE_INT64, [0]),
+ ]),
+ ], '{zero_int}', False),
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{nonzero_int}', claims.CLAIM_TYPE_INT64, [1]),
+ ]),
+ ], '{nonzero_int}', True),
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{zero_ints}', claims.CLAIM_TYPE_INT64, [0, 0]),
+ ]),
+ ], '{zero_ints}', KDC_ERR_GENERIC),
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{zero_and_one_int}', claims.CLAIM_TYPE_INT64, [0, 1]),
+ ]),
+ ], '{zero_and_one_int}', True),
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{one_and_zero_int}', claims.CLAIM_TYPE_INT64, [1, 0]),
+ ]),
+ ], '{one_and_zero_int}', True),
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{false_boolean}', claims.CLAIM_TYPE_BOOLEAN, [0]),
+ ]),
+ ], '{false_boolean}', False),
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{true_boolean}', claims.CLAIM_TYPE_BOOLEAN, [1]),
+ ]),
+ ], '{true_boolean}', True),
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{false_booleans}', claims.CLAIM_TYPE_BOOLEAN, [0, 0]),
+ ]),
+ ], '{false_booleans}', KDC_ERR_GENERIC),
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{false_and_true_boolean}', claims.CLAIM_TYPE_BOOLEAN, [0, 1]),
+ ]),
+ ], '{false_and_true_boolean}', True),
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{true_and_false_boolean}', claims.CLAIM_TYPE_BOOLEAN, [1, 0]),
+ ]),
+ ], '{true_and_false_boolean}', True),
+ # Test a basic comparison against a literal.
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{a}', claims.CLAIM_TYPE_STRING, ['foo bar']),
+ ]),
+ ], '{a} == "foo bar"', True),
+ # Claims can be compared against one another.
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{a}', claims.CLAIM_TYPE_STRING, ['foo bar']),
+ ('{b}', claims.CLAIM_TYPE_STRING, ['FOO BAR']),
+ ]),
+ ], '{a} == {b}', True),
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{b}', claims.CLAIM_TYPE_STRING, ['FOO', 'BAR', 'BAZ']),
+ ('{a}', claims.CLAIM_TYPE_STRING, ['foo', 'bar', 'baz']),
+ ]),
+ ], '{a} != {b}', False),
+ # Certificate claims are also valid.
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_CERTIFICATE, [
+ ('{a}', claims.CLAIM_TYPE_STRING, ['foo']),
+ ]),
+ ], '{a} == "foo"', True),
+ # Other claim source types are ignored.
+ ([
+ (0, [
+ ('{a}', claims.CLAIM_TYPE_STRING, ['foo']),
+ ]),
+ ], '{a} == "foo"', None),
+ ([
+ (3, [
+ ('{a}', claims.CLAIM_TYPE_STRING, ['foo']),
+ ]),
+ ], '{a} == "foo"', None),
+ # If multiple claims have the same ID, the *last* one takes precedence.
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{a}', claims.CLAIM_TYPE_STRING, ['this is not the value…']),
+ ('{a}', claims.CLAIM_TYPE_STRING, ['…nor is this…']),
+ ]),
+ (claims.CLAIMS_SOURCE_TYPE_CERTIFICATE, [
+ ('{a}', claims.CLAIM_TYPE_STRING, ['…and this isn’t either.']),
+ ]),
+ (claims.CLAIMS_SOURCE_TYPE_CERTIFICATE, [
+ ('{a}', claims.CLAIM_TYPE_STRING, ['here’s the actual value!']),
+ ]),
+ (3, [
+ ('{a}', claims.CLAIM_TYPE_STRING, ['this is a red herring.']),
+ ]),
+ ], '{a} == "here’s the actual value!"', True),
+ # Claim values can be empty.
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{empty_claim_string}', claims.CLAIM_TYPE_STRING, []),
+ ]),
+ ], '{empty_claim_string} != "foo bar"', None),
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{empty_claim_boolean}', claims.CLAIM_TYPE_BOOLEAN, []),
+ ]),
+ ], 'Exists {empty_claim_boolean}', None),
+ # Test unsigned integer equality.
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{a}', claims.CLAIM_TYPE_UINT64, [42]),
+ ]),
+ ], '{a} == 42', True),
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{a}', claims.CLAIM_TYPE_UINT64, [0]),
+ ]),
+ ], '{a} == 3', False),
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{a}', claims.CLAIM_TYPE_UINT64, [1, 2, 3]),
+ ]),
+ ], '{a} == {{1, 2, 3}}', True),
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{a}', claims.CLAIM_TYPE_UINT64, [4, 5, 6]),
+ ]),
+ ], '{a} != {{1, 2, 3}}', True),
+ # Test unsigned integer comparison. Ensure we don’t run into any
+ # integer overflow issues.
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{a}', claims.CLAIM_TYPE_UINT64, [1 << 32]),
+ ]),
+ ], '{a} > 0', True),
+ # Test signed integer comparisons.
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{a}', claims.CLAIM_TYPE_INT64, [42]),
+ ]),
+ ], '{a} == 42', True),
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{a}', claims.CLAIM_TYPE_INT64, [42 << 32]),
+ ]),
+ ], f'{{a}} == {42 << 32}', True),
+ # Test boolean claims. Be careful! Windows will *crash* if you send it
+ # claims that aren’t real booleans (not 0 or 1). I doubt Microsoft will
+ # consider this a security issue though.
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{a}', claims.CLAIM_TYPE_BOOLEAN, [2]),
+ ('{b}', claims.CLAIM_TYPE_BOOLEAN, [3]),
+ ]),
+ ], '{a} == {b}', (None, CRASHES_WINDOWS)),
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{a}', claims.CLAIM_TYPE_BOOLEAN, [1]),
+ ('{b}', claims.CLAIM_TYPE_BOOLEAN, [1]),
+ ]),
+ ], '{a} == {b}', True),
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{a}', claims.CLAIM_TYPE_BOOLEAN, [1]),
+ ]),
+ ], '{a} == 42', None),
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{a}', claims.CLAIM_TYPE_BOOLEAN, [1]),
+ ('{b}', claims.CLAIM_TYPE_BOOLEAN, [1]),
+ ]),
+ ], '{a} && {b}', True),
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{a}', claims.CLAIM_TYPE_BOOLEAN, [0]),
+ ('{b}', claims.CLAIM_TYPE_BOOLEAN, [1]),
+ ]),
+ ], '{a} && {b}', False),
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{a}', claims.CLAIM_TYPE_BOOLEAN, [0]),
+ ('{b}', claims.CLAIM_TYPE_BOOLEAN, [0]),
+ ]),
+ ], '{a} && {b}', False),
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{a}', claims.CLAIM_TYPE_BOOLEAN, [1]),
+ ('{b}', claims.CLAIM_TYPE_BOOLEAN, [1]),
+ ]),
+ ], '{a} || {b}', True),
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{a}', claims.CLAIM_TYPE_BOOLEAN, [1]),
+ ('{b}', claims.CLAIM_TYPE_BOOLEAN, [0]),
+ ]),
+ ], '{a} || {b}', True),
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{a}', claims.CLAIM_TYPE_BOOLEAN, [0]),
+ ('{b}', claims.CLAIM_TYPE_BOOLEAN, [0]),
+ ]),
+ ], '{a} || {b}', False),
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{a}', claims.CLAIM_TYPE_BOOLEAN, [0]),
+ ]),
+ ], '!({a})', True),
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{a}', claims.CLAIM_TYPE_BOOLEAN, [0]),
+ ]),
+ ], '!(!(!(!({a}))))', False),
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{a}', claims.CLAIM_TYPE_BOOLEAN, [0]),
+ ]),
+ ], '!({a} && {a})', True),
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{a}', claims.CLAIM_TYPE_BOOLEAN, [1]),
+ ('{b}', claims.CLAIM_TYPE_BOOLEAN, [0]),
+ ]),
+ ], '{a} && !({b} || {b})', True),
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{a}', claims.CLAIM_TYPE_BOOLEAN, [0]),
+ ]),
+ ], '!({a}) || !({a})', True),
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{a}', claims.CLAIM_TYPE_BOOLEAN, [1]),
+ ('{b}', claims.CLAIM_TYPE_BOOLEAN, [0]),
+ ]),
+ ], '{a} && !({b})', None),
+ # Expressions containing the ‘not’ operator are occasionally evaluated
+ # inconsistently, as evidenced here. ‘a || !a’ evaluates to ‘unknown’…
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{a}', claims.CLAIM_TYPE_BOOLEAN, [1]),
+ ]),
+ ], '{a} || !({a})', None),
+ # …but ‘!a || a’ — the same expression, just with the operands switched
+ # round — evaluates to ‘true’.
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{a}', claims.CLAIM_TYPE_BOOLEAN, [1]),
+ ]),
+ ], '!({a}) || {a}', True),
+ # This inconsistency is not observed with other boolean expressions,
+ # such as ‘a || a’.
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{a}', claims.CLAIM_TYPE_BOOLEAN, [1]),
+ ]),
+ ], '{a} || ({a} || {a})', True),
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{b}', claims.CLAIM_TYPE_BOOLEAN, [1]),
+ ]),
+ ], '({b} || {b}) || {b}', True),
+ # Test a very large claim. Much larger than this, and
+ # conditional_ace_encode_binary() will refuse to encode the conditions.
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{large_claim}', claims.CLAIM_TYPE_STRING, ['z' * 4900]),
+ ]),
+ ], f'{{large_claim}} == "{"z" * 4900}"', True),
+ # Test an even larger claim. Windows does not appear to like receiving
+ # a claim this large.
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{larger_claim}', claims.CLAIM_TYPE_STRING, ['z' * 100000]),
+ ]),
+ ], '{larger_claim} > "z"', (True, CRASHES_WINDOWS)),
+ # Test a great number of claims. Windows does not appear to like
+ # receiving this many claims.
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{many_claims}', claims.CLAIM_TYPE_UINT64,
+ list(range(0, 100000))),
+ ]),
+ ], '{many_claims} Any_of "99999"', (True, CRASHES_WINDOWS)),
+ # Test a claim with a very long name. Much larger than this, and
+ # conditional_ace_encode_binary() will refuse to encode the conditions.
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{long_name}', claims.CLAIM_TYPE_STRING, ['a']),
+ ]),
+ ], '{long_name} == "a"', {'long_name': 'z' * 4900}, True),
+ # Test attribute name escaping.
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{escaped_claim}', claims.CLAIM_TYPE_STRING, ['claim value']),
+ ]),
+ ], '{escaped_claim} == "claim value"',
+ {'escaped_claim': '(:foo:! /&/ :bar:!)'}, True),
+ # Test a claim whose name consists entirely of dots.
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{dotty_claim}', claims.CLAIM_TYPE_STRING, ['a']),
+ ]),
+ ], '{dotty_claim} == "a"', {'dotty_claim': '...'}, True),
+ # Test a claim whose name consists of the first thousand non‐zero
+ # Unicode codepoints.
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{1000_unicode}', claims.CLAIM_TYPE_STRING, ['a']),
+ ]),
+ ], '{1000_unicode} == "a"',
+ {'1000_unicode': ''.join(map(chr, range(1, 1001)))}, True),
+ # Test a claim whose name consists of some higher Unicode codepoints,
+ # including non‐BMP ones.
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{higher_unicode}', claims.CLAIM_TYPE_STRING, ['a']),
+ ]),
+ ], '{higher_unicode} == "a"',
+ {'higher_unicode': ''.join(map(chr, range(0xfe00, 0x10800)))}, True),
+ # Duplicate claim values are not allowed…
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{a}', claims.CLAIM_TYPE_INT64, [42, 42, 42]),
+ ]),
+ ], '{a} == {a}', KDC_ERR_GENERIC),
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{a}', claims.CLAIM_TYPE_UINT64, [42, 42]),
+ ]),
+ ], '{a} == {a}', KDC_ERR_GENERIC),
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{a}', claims.CLAIM_TYPE_STRING, ['foo', 'foo']),
+ ]),
+ ], '{a} == {a}', KDC_ERR_GENERIC),
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{a}', claims.CLAIM_TYPE_STRING, ['FOO', 'foo']),
+ ]),
+ ], '{a} == {a}', KDC_ERR_GENERIC),
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{a}', claims.CLAIM_TYPE_BOOLEAN, [0, 0]),
+ ]),
+ ], '{a} == {a}', KDC_ERR_GENERIC),
+ # …but it’s OK if duplicate values are spread across multiple claim
+ # entries.
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{dup}', claims.CLAIM_TYPE_STRING, ['foo']),
+ ('{dup}', claims.CLAIM_TYPE_STRING, ['foo']),
+ ]),
+ (claims.CLAIMS_SOURCE_TYPE_CERTIFICATE, [
+ ('{dup}', claims.CLAIM_TYPE_UINT64, [42]),
+ ('{dup}', claims.CLAIM_TYPE_UINT64, [42]),
+ ]),
+ (claims.CLAIMS_SOURCE_TYPE_CERTIFICATE, [
+ ('{dup}', claims.CLAIM_TYPE_STRING, ['foo']),
+ ('{dup}', claims.CLAIM_TYPE_STRING, ['foo']),
+ ('{dup}', claims.CLAIM_TYPE_STRING, ['foo', 'bar']),
+ ('{dup}', claims.CLAIM_TYPE_STRING, ['foo', 'bar']),
+ ]),
+ ], '{dup} == {dup}', True),
+ # Test invalid claim types. Be careful! Windows will *crash* if you
+ # send it invalid claim types. I doubt Microsoft will consider this a
+ # security issue though.
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{invalid_sid}', 5, []),
+ ]),
+ ], '{invalid_sid} == {invalid_sid}', (None, CRASHES_WINDOWS)),
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{invalid_octet_string}', 16, []),
+ ]),
+ ], '{invalid_octet_string} == {invalid_octet_string}', (None, CRASHES_WINDOWS)),
+ # Sending an empty string will crash Windows.
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{empty_string}', claims.CLAIM_TYPE_STRING, ['']),
+ ]),
+ ], '{empty_string}', (None, CRASHES_WINDOWS)),
+ # But sending empty arrays is OK.
+ ([
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ ('{empty_array}', claims.CLAIM_TYPE_INT64, []),
+ ('{empty_array}', claims.CLAIM_TYPE_UINT64, []),
+ ('{empty_array}', claims.CLAIM_TYPE_BOOLEAN, []),
+ ('{empty_array}', claims.CLAIM_TYPE_STRING, []),
+ ]),
+ ], '{empty_array}', None),
+ ]
+
+ def _test_pac_claim_cmp_with_args(self,
+ pac_claims,
+ expression,
+ claim_map,
+ outcome):
+ self.assertIsInstance(expression, str)
+
+ try:
+ outcome, crashes_windows = outcome
+ self.assertIs(crashes_windows, CRASHES_WINDOWS)
+ if not self.crash_windows:
+ self.skipTest('test crashes Windows servers')
+ except TypeError:
+ self.assertIsNot(outcome, CRASHES_WINDOWS)
+
+ if claim_map is None:
+ claim_map = {}
+
+ claim_ids = {}
+
+ def get_claim_id(claim_name):
+ claim = claim_ids.get(claim_name)
+ if claim is None:
+ claim = claim_map.pop(claim_name, None)
+ if claim is None:
+ claim = self.get_new_username()
+
+ claim_ids[claim_name] = claim
+
+ return claim
+
+ def formatted_claim_expression(expr):
+ formatter = Formatter()
+ result = []
+
+ for literal_text, field_name, format_spec, conversion in (
+ formatter.parse(expr)):
+ self.assertFalse(format_spec,
+ f'format specifier ({format_spec}) should '
+ f'not be specified')
+ self.assertFalse(conversion,
+ f'conversion ({conversion}) should not be '
+ 'specified')
+
+ result.append(literal_text)
+
+ if field_name is not None:
+ self.assertTrue(field_name,
+ 'a field name should be specified')
+
+ claim_id = get_claim_id(field_name)
+ claim_id = escaped_claim_id(claim_id)
+ result.append(f'@User.{claim_id}')
+
+ return ''.join(result)
+
+ # Construct the conditional ACE expression.
+ expression = formatted_claim_expression(expression)
+
+ self.assertFalse(claim_map, 'unused claim mapping(s) remain')
+
+ # Create an authentication policy that will allow authentication when
+ # the expression is true, and a second that will deny authentication in
+ # the same circumstance. By observing the results of authenticating
+ # against each of these policies in turn, we can determine whether the
+ # expression evaluates to a True, False, or Unknown value.
+
+ allowed_sddl = f'O:SYD:(XA;;CR;;;WD;({expression}))'
+ denied_sddl = f'O:SYD:(XD;;CR;;;WD;({expression}))(A;;CR;;;WD)'
+
+ allowed_policy = self.create_authn_policy(
+ enforced=True,
+ user_allowed_from=allowed_sddl)
+ denied_policy = self.create_authn_policy(
+ enforced=True,
+ user_allowed_from=denied_sddl)
+
+ # Create a user account assigned to each policy.
+ allowed_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=allowed_policy)
+ denied_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=denied_policy)
+
+ # Create a computer account.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+
+ def expected_values(val):
+ if isinstance(val, (str, bytes)):
+ return val,
+
+ return val
+
+ # Fetch the computer account’s TGT.
+ armor_tgt = self.get_tgt(mach_creds)
+
+ if pac_claims:
+ # Replace the claims in the PAC with our own.
+ armor_tgt = self.modified_ticket(
+ armor_tgt,
+ modify_pac_fn=partial(self.set_pac_claims,
+ client_claims=pac_claims,
+ claim_ids=claim_ids),
+ checksum_keys=self.get_krbtgt_checksum_key())
+
+ # The first or the second authentication request is expected to succeed
+ # if the outcome is True or False, respectively. An Unknown outcome,
+ # represented by None, will result in a policy error in either case.
+ if outcome is True:
+ allowed_error, denied_error = 0, KDC_ERR_POLICY
+ elif outcome is False:
+ allowed_error, denied_error = KDC_ERR_POLICY, 0
+ elif outcome is None:
+ allowed_error, denied_error = KDC_ERR_POLICY, KDC_ERR_POLICY
+ else:
+ allowed_error, denied_error = outcome, outcome
+
+ # Attempt to authenticate and ensure that we observe the expected
+ # results.
+ self._get_tgt(allowed_creds, armor_tgt=armor_tgt,
+ expected_error=allowed_error)
+ self._get_tgt(denied_creds, armor_tgt=armor_tgt,
+ expected_error=denied_error)
+
+ def test_rbcd_without_aa_asserted_identity(self):
+ service_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ }
+
+ self._rbcd(f'Member_of SID({self.aa_asserted_identity})',
+ service_sids=service_sids,
+ code=KDC_ERR_BADOPTION,
+ status=ntstatus.NT_STATUS_UNSUCCESSFUL,
+ edata=self.expect_padata_outer)
+
+ self._rbcd(target_policy=f'Member_of SID({self.aa_asserted_identity})',
+ service_sids=service_sids,
+ code=KDC_ERR_POLICY,
+ status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED,
+ edata=self.expect_padata_outer)
+
+ def test_rbcd_with_aa_asserted_identity(self):
+ service_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (self.aa_asserted_identity, SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ expected_groups = service_sids | {
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ self._rbcd(f'Member_of SID({self.aa_asserted_identity})',
+ service_sids=service_sids,
+ expected_groups=expected_groups)
+
+ self._rbcd(target_policy=f'Member_of SID({self.aa_asserted_identity})',
+ service_sids=service_sids,
+ expected_groups=expected_groups)
+
+ def test_rbcd_without_service_asserted_identity(self):
+ service_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ }
+
+ self._rbcd(f'Member_of SID({self.service_asserted_identity})',
+ service_sids=service_sids,
+ code=KDC_ERR_BADOPTION,
+ status=ntstatus.NT_STATUS_UNSUCCESSFUL,
+ edata=self.expect_padata_outer)
+
+ self._rbcd(target_policy=f'Member_of SID({self.service_asserted_identity})',
+ service_sids=service_sids,
+ code=KDC_ERR_POLICY,
+ status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED,
+ edata=self.expect_padata_outer)
+
+ def test_rbcd_with_service_asserted_identity(self):
+ service_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (self.service_asserted_identity, SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ expected_groups = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ # The Application Authority Asserted Identity SID has replaced the
+ # Service Asserted Identity SID.
+ (self.aa_asserted_identity, SidType.EXTRA_SID, self.default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ self._rbcd(f'Member_of SID({self.service_asserted_identity})',
+ service_sids=service_sids,
+ expected_groups=expected_groups)
+
+ self._rbcd(target_policy=f'Member_of SID({self.service_asserted_identity})',
+ service_sids=service_sids,
+ expected_groups=expected_groups)
+
+ def test_rbcd_without_claims_valid(self):
+ service_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ }
+
+ self._rbcd(f'Member_of SID({security.SID_CLAIMS_VALID})',
+ service_sids=service_sids,
+ code=KDC_ERR_BADOPTION,
+ status=ntstatus.NT_STATUS_UNSUCCESSFUL,
+ edata=self.expect_padata_outer)
+
+ self._rbcd(target_policy=f'Member_of SID({security.SID_CLAIMS_VALID})',
+ service_sids=service_sids,
+ code=KDC_ERR_POLICY,
+ status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED,
+ edata=self.expect_padata_outer)
+
+ def test_rbcd_with_claims_valid(self):
+ service_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ expected_groups = service_sids | {
+ (self.aa_asserted_identity, SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ self._rbcd(f'Member_of SID({security.SID_CLAIMS_VALID})',
+ service_sids=service_sids,
+ expected_groups=expected_groups)
+
+ self._rbcd(target_policy=f'Member_of SID({security.SID_CLAIMS_VALID})',
+ service_sids=service_sids,
+ expected_groups=expected_groups)
+
+ def test_rbcd_without_compounded_authentication(self):
+ service_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ }
+
+ self._rbcd(f'Member_of SID({security.SID_COMPOUNDED_AUTHENTICATION})',
+ service_sids=service_sids,
+ code=KDC_ERR_BADOPTION,
+ status=ntstatus.NT_STATUS_UNSUCCESSFUL,
+ edata=self.expect_padata_outer)
+
+ self._rbcd(target_policy=f'Member_of SID({security.SID_COMPOUNDED_AUTHENTICATION})',
+ service_sids=service_sids,
+ code=KDC_ERR_POLICY,
+ status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED,
+ edata=self.expect_padata_outer)
+
+ def test_rbcd_with_compounded_authentication(self):
+ service_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_COMPOUNDED_AUTHENTICATION, SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ expected_groups = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (self.aa_asserted_identity, SidType.EXTRA_SID, self.default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ self._rbcd(f'Member_of SID({security.SID_COMPOUNDED_AUTHENTICATION})',
+ service_sids=service_sids,
+ expected_groups=expected_groups)
+
+ self._rbcd(target_policy=f'Member_of SID({security.SID_COMPOUNDED_AUTHENTICATION})',
+ service_sids=service_sids,
+ expected_groups=expected_groups)
+
+ def test_rbcd_client_without_aa_asserted_identity(self):
+ client_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ }
+
+ self._rbcd(f'Member_of SID({self.aa_asserted_identity})',
+ client_sids=client_sids)
+
+ self._rbcd(target_policy=f'Member_of SID({self.aa_asserted_identity})',
+ client_sids=client_sids)
+
+ def test_rbcd_client_with_aa_asserted_identity(self):
+ client_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (self.aa_asserted_identity, SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ self._rbcd(f'Member_of SID({self.aa_asserted_identity})',
+ client_sids=client_sids,
+ expected_groups=client_sids)
+
+ self._rbcd(target_policy=f'Member_of SID({self.aa_asserted_identity})',
+ client_sids=client_sids,
+ expected_groups=client_sids)
+
+ def test_rbcd_client_without_service_asserted_identity(self):
+ client_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ }
+
+ self._rbcd(f'Member_of SID({self.service_asserted_identity})',
+ client_sids=client_sids,
+ code=KDC_ERR_BADOPTION,
+ status=ntstatus.NT_STATUS_UNSUCCESSFUL,
+ edata=self.expect_padata_outer)
+
+ self._rbcd(target_policy=f'Member_of SID({self.service_asserted_identity})',
+ client_sids=client_sids,
+ code=KDC_ERR_POLICY,
+ status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED,
+ edata=self.expect_padata_outer)
+
+ def test_rbcd_client_with_service_asserted_identity(self):
+ client_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (self.service_asserted_identity, SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ self._rbcd(f'Not_Member_of SID({self.service_asserted_identity})',
+ client_sids=client_sids,
+ expected_groups=client_sids)
+
+ self._rbcd(target_policy=f'Not_Member_of SID({self.service_asserted_identity})',
+ client_sids=client_sids,
+ expected_groups=client_sids)
+
+ def test_rbcd_client_without_claims_valid(self):
+ client_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ }
+
+ self._rbcd(f'Member_of SID({security.SID_CLAIMS_VALID})',
+ client_sids=client_sids)
+
+ self._rbcd(target_policy=f'Member_of SID({security.SID_CLAIMS_VALID})',
+ client_sids=client_sids)
+
+ def test_rbcd_client_with_claims_valid(self):
+ client_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ self._rbcd(f'Member_of SID({security.SID_CLAIMS_VALID})',
+ client_sids=client_sids,
+ expected_groups=client_sids)
+
+ self._rbcd(target_policy=f'Member_of SID({security.SID_CLAIMS_VALID})',
+ client_sids=client_sids,
+ expected_groups=client_sids)
+
+ def test_rbcd_client_without_compounded_authentication(self):
+ client_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ }
+
+ self._rbcd(f'Member_of SID({security.SID_COMPOUNDED_AUTHENTICATION})',
+ client_sids=client_sids,
+ code=KDC_ERR_BADOPTION,
+ status=ntstatus.NT_STATUS_UNSUCCESSFUL,
+ edata=self.expect_padata_outer)
+
+ self._rbcd(target_policy=f'Member_of SID({security.SID_COMPOUNDED_AUTHENTICATION})',
+ client_sids=client_sids,
+ code=KDC_ERR_POLICY,
+ status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED,
+ edata=self.expect_padata_outer)
+
+ def test_rbcd_client_with_compounded_authentication(self):
+ client_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_COMPOUNDED_AUTHENTICATION, SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ self._rbcd(f'Not_Member_of SID({security.SID_COMPOUNDED_AUTHENTICATION})',
+ client_sids=client_sids,
+ expected_groups=client_sids)
+
+ self._rbcd(target_policy=f'Not_Member_of SID({security.SID_COMPOUNDED_AUTHENTICATION})',
+ client_sids=client_sids,
+ expected_groups=client_sids)
+
+ def test_rbcd_device_without_aa_asserted_identity(self):
+ device_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ }
+
+ self._rbcd(f'Device_Member_of SID({self.aa_asserted_identity})',
+ device_sids=device_sids,
+ code=KDC_ERR_BADOPTION,
+ status=ntstatus.NT_STATUS_UNSUCCESSFUL,
+ edata=self.expect_padata_outer)
+
+ self._rbcd(target_policy=f'Device_Member_of SID({self.aa_asserted_identity})',
+ device_sids=device_sids,
+ code=KDC_ERR_POLICY,
+ status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED,
+ edata=self.expect_padata_outer)
+
+ def test_rbcd_device_without_aa_asserted_identity_not_memberof(self):
+ device_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ }
+
+ self._rbcd(f'Not_Device_Member_of SID({self.aa_asserted_identity})',
+ device_sids=device_sids)
+
+ self._rbcd(target_policy=f'Not_Device_Member_of SID({self.aa_asserted_identity})',
+ device_sids=device_sids)
+
+ def test_rbcd_device_with_aa_asserted_identity(self):
+ device_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (self.aa_asserted_identity, SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ self._rbcd(f'Device_Member_of SID({self.aa_asserted_identity})',
+ device_sids=device_sids)
+
+ self._rbcd(target_policy=f'Device_Member_of SID({self.aa_asserted_identity})',
+ device_sids=device_sids)
+
+ def test_rbcd_device_without_service_asserted_identity(self):
+ device_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ }
+
+ self._rbcd(f'Device_Member_of SID({self.service_asserted_identity})',
+ device_sids=device_sids,
+ code=KDC_ERR_BADOPTION,
+ status=ntstatus.NT_STATUS_UNSUCCESSFUL,
+ edata=self.expect_padata_outer)
+
+ self._rbcd(target_policy=f'Device_Member_of SID({self.service_asserted_identity})',
+ device_sids=device_sids,
+ code=KDC_ERR_POLICY,
+ status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED,
+ edata=self.expect_padata_outer)
+
+ def test_rbcd_device_with_service_asserted_identity(self):
+ device_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (self.service_asserted_identity, SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ self._rbcd(f'Device_Member_of SID({self.service_asserted_identity})',
+ device_sids=device_sids)
+
+ self._rbcd(target_policy=f'Device_Member_of SID({self.service_asserted_identity})',
+ device_sids=device_sids)
+
+ def test_rbcd_device_without_claims_valid(self):
+ device_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ }
+
+ self._rbcd(f'Device_Member_of SID({security.SID_CLAIMS_VALID})',
+ device_sids=device_sids,
+ code=KDC_ERR_BADOPTION,
+ status=ntstatus.NT_STATUS_UNSUCCESSFUL,
+ edata=self.expect_padata_outer)
+
+ self._rbcd(target_policy=f'Device_Member_of SID({security.SID_CLAIMS_VALID})',
+ device_sids=device_sids,
+ code=KDC_ERR_POLICY,
+ status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED,
+ edata=self.expect_padata_outer)
+
+ def test_rbcd_device_with_claims_valid(self):
+ device_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ self._rbcd(f'Device_Member_of SID({security.SID_CLAIMS_VALID})',
+ device_sids=device_sids)
+
+ self._rbcd(target_policy=f'Device_Member_of SID({security.SID_CLAIMS_VALID})',
+ device_sids=device_sids)
+
+ def test_rbcd_device_without_compounded_authentication(self):
+ device_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ }
+
+ self._rbcd(f'Device_Member_of SID({security.SID_COMPOUNDED_AUTHENTICATION})',
+ device_sids=device_sids,
+ code=KDC_ERR_BADOPTION,
+ status=ntstatus.NT_STATUS_UNSUCCESSFUL,
+ edata=self.expect_padata_outer)
+
+ self._rbcd(target_policy=f'Device_Member_of SID({security.SID_COMPOUNDED_AUTHENTICATION})',
+ device_sids=device_sids,
+ code=KDC_ERR_POLICY,
+ status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED,
+ edata=self.expect_padata_outer)
+
+ def test_rbcd_device_with_compounded_authentication(self):
+ device_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_COMPOUNDED_AUTHENTICATION, SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ self._rbcd(f'Device_Member_of SID({security.SID_COMPOUNDED_AUTHENTICATION})',
+ device_sids=device_sids)
+
+ self._rbcd(target_policy=f'Device_Member_of SID({security.SID_COMPOUNDED_AUTHENTICATION})',
+ device_sids=device_sids)
+
+ def test_rbcd(self):
+ self._rbcd('Member_of SID({service_sid})')
+
+ def test_rbcd_device_from_rodc(self):
+ self._rbcd('Member_of SID({service_sid})',
+ device_from_rodc=True,
+ code=(0, CRASHES_WINDOWS))
+
+ def test_rbcd_service_from_rodc(self):
+ self._rbcd('Member_of SID({service_sid})',
+ service_from_rodc=True)
+
+ def test_rbcd_device_and_service_from_rodc(self):
+ self._rbcd('Member_of SID({service_sid})',
+ service_from_rodc=True,
+ device_from_rodc=True,
+ code=(0, CRASHES_WINDOWS))
+
+ def test_rbcd_client_from_rodc(self):
+ self._rbcd('Member_of SID({service_sid})',
+ client_from_rodc=True)
+
+ def test_rbcd_client_and_device_from_rodc(self):
+ self._rbcd('Member_of SID({service_sid})',
+ client_from_rodc=True,
+ device_from_rodc=True,
+ code=(0, CRASHES_WINDOWS))
+
+ def test_rbcd_client_and_service_from_rodc(self):
+ self._rbcd('Member_of SID({service_sid})',
+ client_from_rodc=True,
+ service_from_rodc=True)
+
+ def test_rbcd_all_from_rodc(self):
+ self._rbcd('Member_of SID({service_sid})',
+ client_from_rodc=True,
+ service_from_rodc=True,
+ device_from_rodc=True,
+ code=(0, CRASHES_WINDOWS))
+
+ def test_delegating_proxy_in_world_group_rbcd(self):
+ self._check_delegating_proxy_in_group_rbcd(security.SID_WORLD)
+
+ def test_delegating_proxy_in_network_group_rbcd(self):
+ self._check_delegating_proxy_not_in_group_rbcd(security.SID_NT_NETWORK)
+
+ def test_delegating_proxy_in_authenticated_users_rbcd(self):
+ self._check_delegating_proxy_in_group_rbcd(
+ security.SID_NT_AUTHENTICATED_USERS)
+
+ def test_delegating_proxy_in_aa_asserted_identity_rbcd(self):
+ self._check_delegating_proxy_in_group_rbcd(
+ security.SID_AUTHENTICATION_AUTHORITY_ASSERTED_IDENTITY)
+
+ def test_delegating_proxy_in_service_asserted_identity_rbcd(self):
+ self._check_delegating_proxy_not_in_group_rbcd(
+ security.SID_SERVICE_ASSERTED_IDENTITY)
+
+ def test_delegating_proxy_in_compounded_authentication_rbcd(self):
+ self._check_delegating_proxy_not_in_group_rbcd(
+ security.SID_COMPOUNDED_AUTHENTICATION)
+
+ def test_delegating_proxy_in_claims_valid_rbcd(self):
+ self._check_delegating_proxy_in_group_rbcd(security.SID_CLAIMS_VALID)
+
+ def test_device_in_world_group_rbcd(self):
+ self._check_device_in_group_rbcd(security.SID_WORLD)
+
+ def test_device_in_network_group_rbcd(self):
+ self._check_device_not_in_group_rbcd(security.SID_NT_NETWORK)
+
+ def test_device_in_authenticated_users_rbcd(self):
+ self._check_device_in_group_rbcd(security.SID_NT_AUTHENTICATED_USERS)
+
+ def test_device_in_aa_asserted_identity_rbcd(self):
+ self._check_device_in_group_rbcd(
+ security.SID_AUTHENTICATION_AUTHORITY_ASSERTED_IDENTITY)
+
+ def test_device_in_service_asserted_identity_rbcd(self):
+ self._check_device_not_in_group_rbcd(
+ security.SID_SERVICE_ASSERTED_IDENTITY)
+
+ def test_device_in_compounded_authentication_rbcd(self):
+ self._check_device_not_in_group_rbcd(
+ security.SID_COMPOUNDED_AUTHENTICATION)
+
+ def test_device_in_claims_valid_rbcd(self):
+ self._check_device_in_group_rbcd(security.SID_CLAIMS_VALID)
+
+ def _check_delegating_proxy_in_group_rbcd(self, group):
+ self._check_membership_rbcd(group, expect_in_group=True)
+
+ def _check_delegating_proxy_not_in_group_rbcd(self, group):
+ self._check_membership_rbcd(group, expect_in_group=False)
+
+ def _check_device_in_group_rbcd(self, group):
+ self._check_membership_rbcd(group, expect_in_group=True, device=True)
+
+ def _check_device_not_in_group_rbcd(self, group):
+ self._check_membership_rbcd(group, expect_in_group=False, device=True)
+
+ def _check_membership_rbcd(self,
+ group,
+ *,
+ expect_in_group,
+ device=False):
+ """Test that authentication succeeds or fails when the delegating proxy
+ is required to belong to a certain group.
+ """
+
+ sddl_op = 'Device_Member_of' if device else 'Member_of'
+
+ samdb = self.get_samdb()
+ functional_level = self.get_domain_functional_level(samdb)
+
+ if functional_level < dsdb.DS_DOMAIN_FUNCTION_2008:
+ self.skipTest('RBCD requires FL2008')
+
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'id': 'device'})
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a user account.
+ client_creds = self._get_creds(account_type=self.AccountType.USER)
+
+ client_tkt_options = 'forwardable'
+ expected_flags = krb5_asn1.TicketFlags(client_tkt_options)
+ client_tgt = self.get_tgt(client_creds,
+ kdc_options=client_tkt_options,
+ expected_flags=expected_flags)
+
+ client_sid = client_creds.get_sid()
+
+ client_username = client_creds.get_username()
+ client_cname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=[client_username])
+
+ service_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'id': 'service'})
+ service_tgt = self.get_tgt(service_creds)
+
+ client_service_tkt = self.get_service_ticket(
+ client_tgt,
+ service_creds,
+ kdc_options=client_tkt_options,
+ expected_flags=expected_flags)
+
+ domain_sid_str = samdb.get_domain_sid()
+ domain_sid = security.dom_sid(domain_sid_str)
+
+ # Require the principal to belong to a certain group.
+ in_group_sddl = self.allow_if(f'{sddl_op} {{SID({group})}}')
+ in_group_descriptor = security.descriptor.from_sddl(in_group_sddl,
+ domain_sid)
+
+ # Create a target account that allows RBCD if the principal belongs to
+ # the group.
+ in_group_target_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={
+ 'additional_details': (
+ ('msDS-AllowedToActOnBehalfOfOtherIdentity',
+ ndr_pack(in_group_descriptor)),
+ ),
+ })
+
+ kdc_options = str(krb5_asn1.KDCOptions('cname-in-addl-tkt'))
+
+ in_group_target_key = self.TicketDecryptionKey_from_creds(
+ in_group_target_creds)
+ in_group_target_etypes = in_group_target_creds.tgs_supported_enctypes
+
+ service_name = service_creds.get_username()
+ if service_name[-1] == '$':
+ service_name = service_name[:-1]
+ expected_transited_services = [
+ f'host/{service_name}@{service_creds.get_realm()}'
+ ]
+
+ pac_options = '1001' # supports claims, RBCD
+
+ success_result = 0, None, None
+ failure_result = (
+ KDC_ERR_BADOPTION,
+ ntstatus.NT_STATUS_UNSUCCESSFUL,
+ self.expect_padata_outer,
+ )
+
+ code, status, expect_edata = (success_result if expect_in_group
+ else failure_result)
+
+ # Test whether obtaining a service ticket with RBCD is allowed.
+ self._tgs_req(service_tgt,
+ code,
+ service_creds,
+ in_group_target_creds,
+ armor_tgt=mach_tgt,
+ kdc_options=kdc_options,
+ pac_options=pac_options,
+ expected_cname=client_cname,
+ expected_account_name=client_username,
+ additional_ticket=client_service_tkt,
+ decryption_key=in_group_target_key,
+ expected_sid=client_sid,
+ expected_supported_etypes=in_group_target_etypes,
+ expected_proxy_target=in_group_target_creds.get_spn(),
+ expected_transited_services=expected_transited_services,
+ expected_status=status,
+ expect_edata=expect_edata)
+
+ effective_client_creds = service_creds if code else client_creds
+ self.check_tgs_log(effective_client_creds, in_group_target_creds,
+ checked_creds=service_creds,
+ status=status)
+
+ # Require the principal not to belong to a certain group.
+ not_in_group_sddl = self.allow_if(f'Not_{sddl_op} {{SID({group})}}')
+ not_in_group_descriptor = security.descriptor.from_sddl(
+ not_in_group_sddl, domain_sid)
+
+ # Create a target account that allows RBCD if the principal does not
+ # belong to the group.
+ not_in_group_target_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={
+ 'additional_details': (
+ ('msDS-AllowedToActOnBehalfOfOtherIdentity',
+ ndr_pack(not_in_group_descriptor)),
+ ),
+ })
+
+ not_in_group_target_key = self.TicketDecryptionKey_from_creds(
+ not_in_group_target_creds)
+ not_in_group_target_etypes = (
+ not_in_group_target_creds.tgs_supported_enctypes)
+
+ code, status, expect_edata = (failure_result if expect_in_group
+ else success_result)
+
+ # Test whether obtaining a service ticket with RBCD is allowed.
+ self._tgs_req(service_tgt,
+ code,
+ service_creds,
+ not_in_group_target_creds,
+ armor_tgt=mach_tgt,
+ kdc_options=kdc_options,
+ pac_options=pac_options,
+ expected_cname=client_cname,
+ expected_account_name=client_username,
+ additional_ticket=client_service_tkt,
+ decryption_key=not_in_group_target_key,
+ expected_sid=client_sid,
+ expected_supported_etypes=not_in_group_target_etypes,
+ expected_proxy_target=not_in_group_target_creds.get_spn(),
+ expected_transited_services=expected_transited_services,
+ expected_status=status,
+ expect_edata=expect_edata)
+
+ effective_client_creds = service_creds if code else client_creds
+ self.check_tgs_log(effective_client_creds, not_in_group_target_creds,
+ checked_creds=service_creds,
+ status=status)
+
+ def _rbcd(self,
+ rbcd_expression=None,
+ *,
+ code=0,
+ status=None,
+ event=AuditEvent.OK,
+ reason=AuditReason.NONE,
+ edata=False,
+ target_policy=None,
+ client_from_rodc=False,
+ service_from_rodc=False,
+ device_from_rodc=False,
+ client_sids=None,
+ client_claims=None,
+ service_sids=None,
+ service_claims=None,
+ device_sids=None,
+ device_claims=None,
+ expected_groups=None,
+ expected_claims=None):
+ try:
+ code, crashes_windows = code
+ self.assertIs(crashes_windows, CRASHES_WINDOWS)
+ if not self.crash_windows:
+ self.skipTest('test crashes Windows servers')
+ except TypeError:
+ self.assertIsNot(code, CRASHES_WINDOWS)
+
+ samdb = self.get_samdb()
+ functional_level = self.get_domain_functional_level(samdb)
+
+ if functional_level < dsdb.DS_DOMAIN_FUNCTION_2008:
+ self.skipTest('RBCD requires FL2008')
+
+ domain_sid_str = samdb.get_domain_sid()
+ domain_sid = security.dom_sid(domain_sid_str)
+
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER,
+ opts={
+ 'allowed_replication_mock': client_from_rodc,
+ 'revealed_to_mock_rodc': client_from_rodc,
+ })
+ client_sid = client_creds.get_sid()
+
+ client_username = client_creds.get_username()
+ client_cname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=[client_username])
+
+ client_tkt_options = 'forwardable'
+ expected_flags = krb5_asn1.TicketFlags(client_tkt_options)
+
+ checksum_key = self.get_krbtgt_checksum_key()
+
+ if client_from_rodc or service_from_rodc or device_from_rodc:
+ rodc_krbtgt_creds = self.get_mock_rodc_krbtgt_creds()
+ rodc_krbtgt_key = self.TicketDecryptionKey_from_creds(rodc_krbtgt_creds)
+ rodc_checksum_key = {
+ krb5pac.PAC_TYPE_KDC_CHECKSUM: rodc_krbtgt_key,
+ }
+
+ client_tgt = self.get_tgt(client_creds,
+ kdc_options=client_tkt_options,
+ expected_flags=expected_flags)
+
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={
+ 'allowed_replication_mock': device_from_rodc,
+ 'revealed_to_mock_rodc': device_from_rodc,
+ })
+ mach_tgt = self.get_tgt(mach_creds)
+ device_modify_pac_fn = []
+ if device_sids is not None:
+ device_modify_pac_fn.append(partial(self.set_pac_sids,
+ new_sids=device_sids))
+ if device_claims is not None:
+ device_modify_pac_fn.append(partial(self.set_pac_claims,
+ client_claims=device_claims))
+ mach_tgt = self.modified_ticket(
+ mach_tgt,
+ modify_pac_fn=device_modify_pac_fn,
+ new_ticket_key=rodc_krbtgt_key if device_from_rodc else None,
+ checksum_keys=rodc_checksum_key if device_from_rodc else checksum_key)
+
+ service_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={
+ 'id': 1,
+ 'allowed_replication_mock': service_from_rodc,
+ 'revealed_to_mock_rodc': service_from_rodc,
+ })
+ service_tgt = self.get_tgt(service_creds)
+
+ service_modify_pac_fn = []
+ if service_sids is not None:
+ service_modify_pac_fn.append(partial(self.set_pac_sids,
+ new_sids=service_sids))
+ if service_claims is not None:
+ service_modify_pac_fn.append(partial(self.set_pac_claims,
+ client_claims=service_claims))
+ service_tgt = self.modified_ticket(
+ service_tgt,
+ modify_pac_fn=service_modify_pac_fn,
+ new_ticket_key=rodc_krbtgt_key if service_from_rodc else None,
+ checksum_keys=rodc_checksum_key if service_from_rodc else checksum_key)
+
+ if target_policy is None:
+ policy = None
+ assigned_policy = None
+ else:
+ sddl = f'O:SYD:(XA;;CR;;;WD;({target_policy.format(service_sid=service_creds.get_sid())}))'
+ policy = self.create_authn_policy(enforced=True,
+ computer_allowed_to=sddl)
+ assigned_policy = str(policy.dn)
+
+ if rbcd_expression is not None:
+ sddl = f'O:SYD:(XA;;CR;;;WD;({rbcd_expression.format(service_sid=service_creds.get_sid())}))'
+ else:
+ sddl = 'O:SYD:(A;;CR;;;WD)'
+ descriptor = security.descriptor.from_sddl(sddl, domain_sid)
+ descriptor = ndr_pack(descriptor)
+
+ # Create a target account with the assigned policy.
+ target_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={
+ 'assigned_policy': assigned_policy,
+ 'additional_details': (
+ ('msDS-AllowedToActOnBehalfOfOtherIdentity', descriptor),
+ ),
+ })
+
+ client_service_tkt = self.get_service_ticket(
+ client_tgt,
+ service_creds,
+ kdc_options=client_tkt_options,
+ expected_flags=expected_flags)
+ client_modify_pac_fn = []
+ if client_sids is not None:
+ client_modify_pac_fn.append(partial(self.set_pac_sids,
+ new_sids=client_sids))
+ if client_claims is not None:
+ client_modify_pac_fn.append(partial(self.set_pac_claims,
+ client_claims=client_claims))
+ client_service_tkt = self.modified_ticket(client_service_tkt,
+ modify_pac_fn=client_modify_pac_fn,
+ checksum_keys=rodc_checksum_key if client_from_rodc else checksum_key)
+
+ kdc_options = str(krb5_asn1.KDCOptions('cname-in-addl-tkt'))
+
+ target_decryption_key = self.TicketDecryptionKey_from_creds(
+ target_creds)
+ target_etypes = target_creds.tgs_supported_enctypes
+
+ service_name = service_creds.get_username()
+ if service_name[-1] == '$':
+ service_name = service_name[:-1]
+ expected_transited_services = [
+ f'host/{service_name}@{service_creds.get_realm()}'
+ ]
+
+ expected_groups = self.map_sids(expected_groups, None, domain_sid_str)
+
+ # Show that obtaining a service ticket with RBCD is allowed.
+ self._tgs_req(service_tgt, code, service_creds, target_creds,
+ armor_tgt=mach_tgt,
+ kdc_options=kdc_options,
+ pac_options='1001', # supports claims, RBCD
+ expected_cname=client_cname,
+ expected_account_name=client_username,
+ additional_ticket=client_service_tkt,
+ decryption_key=target_decryption_key,
+ expected_sid=client_sid,
+ expected_groups=expected_groups,
+ expect_client_claims=bool(expected_claims) or None,
+ expected_client_claims=expected_claims,
+ expected_supported_etypes=target_etypes,
+ expected_proxy_target=target_creds.get_spn(),
+ expected_transited_services=expected_transited_services,
+ expected_status=status,
+ expect_edata=edata)
+
+ if code:
+ effective_client_creds = service_creds
+ else:
+ effective_client_creds = client_creds
+
+ self.check_tgs_log(effective_client_creds, target_creds,
+ policy=policy,
+ checked_creds=service_creds,
+ status=status,
+ event=event,
+ reason=reason)
+
+ def test_tgs_claims_valid_missing(self):
+ """Test that the Claims Valid SID is not added to the PAC when
+ performing a TGS‐REQ."""
+ client_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (self.aa_asserted_identity, SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ self._tgs(use_fast=False,
+ client_sids=client_sids,
+ expected_groups=client_sids)
+
+ def test_tgs_claims_valid_missing_from_rodc(self):
+ """Test that the Claims Valid SID *is* added to the PAC when
+ performing a TGS‐REQ with an RODC‐issued TGT."""
+ client_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (self.aa_asserted_identity, SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ expected_groups = client_sids | {
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ self._tgs(use_fast=False,
+ client_from_rodc=True,
+ client_sids=client_sids,
+ expected_groups=expected_groups)
+
+ def test_tgs_aa_asserted_identity(self):
+ """Test performing a TGS‐REQ with the Authentication Identity Asserted
+ Identity SID present."""
+ client_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (self.aa_asserted_identity, SidType.EXTRA_SID, self.default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ self._tgs(use_fast=False,
+ client_sids=client_sids,
+ expected_groups=client_sids)
+
+ def test_tgs_aa_asserted_identity_no_attrs(self):
+ """Test performing a TGS‐REQ with the Authentication Identity Asserted
+ Identity SID present, albeit without any attributes."""
+ client_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ # Put the Asserted Identity SID in the PAC without any flags set.
+ (self.aa_asserted_identity, SidType.EXTRA_SID, 0),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ self._tgs(use_fast=False,
+ client_sids=client_sids,
+ expected_groups=client_sids)
+
+ def test_tgs_aa_asserted_identity_from_rodc(self):
+ """Test that the Authentication Identity Asserted Identity SID in an
+ RODC‐issued PAC is preserved when performing a TGS‐REQ."""
+ client_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (self.aa_asserted_identity, SidType.EXTRA_SID, self.default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ self._tgs(use_fast=False,
+ client_from_rodc=True,
+ client_sids=client_sids,
+ expected_groups=client_sids)
+
+ def test_tgs_aa_asserted_identity_from_rodc_no_attrs_from_rodc(self):
+ """Test that the Authentication Identity Asserted Identity SID without
+ attributes in an RODC‐issued PAC is preserved when performing a
+ TGS‐REQ."""
+ client_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ # Put the Asserted Identity SID in the PAC without any flags set.
+ (self.aa_asserted_identity, SidType.EXTRA_SID, 0),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ expected_groups = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ # The SID in the resulting PAC has the default attributes.
+ (self.aa_asserted_identity, SidType.EXTRA_SID, self.default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ self._tgs(use_fast=False,
+ client_from_rodc=True,
+ client_sids=client_sids,
+ expected_groups=expected_groups)
+
+ def test_tgs_compound_authentication(self):
+ """Test performing a TGS‐REQ with the Compounded Authentication SID
+ present."""
+ client_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, self.default_attrs),
+ (security.SID_COMPOUNDED_AUTHENTICATION, SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ self._tgs(use_fast=False,
+ client_sids=client_sids,
+ expected_groups=client_sids)
+
+ def test_tgs_compound_authentication_from_rodc(self):
+ """Test that the Compounded Authentication SID in an
+ RODC‐issued PAC is not preserved when performing a TGS‐REQ."""
+ client_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, self.default_attrs),
+ (security.SID_COMPOUNDED_AUTHENTICATION, SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ expected_groups = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ self._tgs(use_fast=False,
+ client_from_rodc=True,
+ client_sids=client_sids,
+ expected_groups=expected_groups)
+
+ def test_tgs_asserted_identity_missing(self):
+ """Test that the Authentication Identity Asserted Identity SID is not
+ added to the PAC when performing a TGS‐REQ."""
+ client_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ self._tgs(use_fast=False,
+ client_sids=client_sids,
+ expected_groups=client_sids)
+
+ def test_tgs_asserted_identity_missing_from_rodc(self):
+ """Test that the Authentication Identity Asserted Identity SID is not
+ added to an RODC‐issued PAC when performing a TGS‐REQ."""
+ client_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ self._tgs(use_fast=False,
+ client_from_rodc=True,
+ client_sids=client_sids,
+ expected_groups=client_sids)
+
+ def test_tgs_service_asserted_identity(self):
+ """Test performing a TGS‐REQ with the Service Asserted Identity SID
+ present."""
+ client_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (self.service_asserted_identity, SidType.EXTRA_SID, self.default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ self._tgs(use_fast=False,
+ client_sids=client_sids,
+ expected_groups=client_sids)
+
+ def test_tgs_service_asserted_identity_from_rodc(self):
+ """Test that the Service Asserted Identity SID in an
+ RODC‐issued PAC is not preserved when performing a TGS‐REQ."""
+ client_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (self.service_asserted_identity, SidType.EXTRA_SID, self.default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ expected_groups = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ # Don’t expect the Service Asserted Identity SID.
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ self._tgs(use_fast=False,
+ client_from_rodc=True,
+ client_sids=client_sids,
+ expected_groups=expected_groups)
+
+ def test_tgs_without_aa_asserted_identity(self):
+ client_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ }
+
+ self._tgs(f'Member_of SID({self.aa_asserted_identity})',
+ client_sids=client_sids,
+ code=KDC_ERR_POLICY,
+ status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED,
+ edata=self.expect_padata_outer)
+
+ def test_tgs_without_aa_asserted_identity_client_from_rodc(self):
+ client_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ }
+
+ self._tgs(f'Member_of SID({self.aa_asserted_identity})',
+ client_from_rodc=True,
+ client_sids=client_sids,
+ code=KDC_ERR_POLICY,
+ status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED,
+ edata=self.expect_padata_outer)
+
+ def test_tgs_without_aa_asserted_identity_device_from_rodc(self):
+ client_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ }
+
+ self._tgs(f'Member_of SID({self.aa_asserted_identity})',
+ device_from_rodc=True,
+ client_sids=client_sids,
+ code=(KDC_ERR_POLICY, CRASHES_WINDOWS),
+ status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED,
+ edata=self.expect_padata_outer)
+
+ def test_tgs_without_aa_asserted_identity_both_from_rodc(self):
+ client_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ }
+
+ self._tgs(f'Member_of SID({self.aa_asserted_identity})',
+ client_from_rodc=True,
+ device_from_rodc=True,
+ client_sids=client_sids,
+ code=(KDC_ERR_POLICY, CRASHES_WINDOWS),
+ status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED,
+ edata=self.expect_padata_outer)
+
+ def test_tgs_with_aa_asserted_identity(self):
+ client_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (self.aa_asserted_identity, SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ self._tgs(f'Member_of SID({self.aa_asserted_identity})',
+ client_sids=client_sids,
+ expected_groups=client_sids)
+
+ def test_tgs_with_aa_asserted_identity_client_from_rodc(self):
+ client_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (self.aa_asserted_identity, SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ expected_groups = client_sids | {
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ self._tgs(f'Member_of SID({self.aa_asserted_identity})',
+ client_from_rodc=True,
+ client_sids=client_sids,
+ expected_groups=expected_groups)
+
+ def test_tgs_with_aa_asserted_identity_device_from_rodc(self):
+ client_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (self.aa_asserted_identity, SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ self._tgs(f'Member_of SID({self.aa_asserted_identity})',
+ device_from_rodc=True,
+ client_sids=client_sids,
+ expected_groups=client_sids,
+ code=(0, CRASHES_WINDOWS))
+
+ def test_tgs_with_aa_asserted_identity_both_from_rodc(self):
+ client_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (self.aa_asserted_identity, SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ expected_groups = client_sids | {
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ self._tgs(f'Member_of SID({self.aa_asserted_identity})',
+ client_from_rodc=True,
+ device_from_rodc=True,
+ client_sids=client_sids,
+ expected_groups=expected_groups,
+ code=(0, CRASHES_WINDOWS))
+
+ def test_tgs_without_service_asserted_identity(self):
+ client_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ }
+
+ self._tgs(f'Member_of SID({self.service_asserted_identity})',
+ client_sids=client_sids,
+ code=KDC_ERR_POLICY,
+ status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED,
+ edata=self.expect_padata_outer)
+
+ def test_tgs_without_service_asserted_identity_client_from_rodc(self):
+ client_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ }
+
+ self._tgs(f'Member_of SID({self.service_asserted_identity})',
+ client_from_rodc=True,
+ client_sids=client_sids,
+ code=KDC_ERR_POLICY,
+ status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED,
+ edata=self.expect_padata_outer)
+
+ def test_tgs_without_service_asserted_identity_device_from_rodc(self):
+ client_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ }
+
+ self._tgs(f'Member_of SID({self.service_asserted_identity})',
+ device_from_rodc=True,
+ client_sids=client_sids,
+ code=(KDC_ERR_POLICY, CRASHES_WINDOWS),
+ status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED,
+ edata=self.expect_padata_outer)
+
+ def test_tgs_without_service_asserted_identity_both_from_rodc(self):
+ client_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ }
+
+ self._tgs(f'Member_of SID({self.service_asserted_identity})',
+ client_from_rodc=True,
+ device_from_rodc=True,
+ client_sids=client_sids,
+ code=(KDC_ERR_POLICY, CRASHES_WINDOWS),
+ status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED,
+ edata=self.expect_padata_outer)
+
+ def test_tgs_with_service_asserted_identity(self):
+ client_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (self.service_asserted_identity, SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ self._tgs(f'Member_of SID({self.service_asserted_identity})',
+ client_sids=client_sids,
+ expected_groups=client_sids)
+
+ def test_tgs_with_service_asserted_identity_client_from_rodc(self):
+ client_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (self.service_asserted_identity, SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ self._tgs(f'Member_of SID({self.service_asserted_identity})',
+ client_from_rodc=True,
+ client_sids=client_sids,
+ code=KDC_ERR_POLICY,
+ status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED,
+ edata=self.expect_padata_outer)
+
+ def test_tgs_with_service_asserted_identity_device_from_rodc(self):
+ client_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (self.service_asserted_identity, SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ self._tgs(f'Member_of SID({self.service_asserted_identity})',
+ device_from_rodc=True,
+ client_sids=client_sids,
+ expected_groups=client_sids,
+ code=(0, CRASHES_WINDOWS))
+
+ def test_tgs_with_service_asserted_identity_both_from_rodc(self):
+ client_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (self.service_asserted_identity, SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ self._tgs(f'Member_of SID({self.service_asserted_identity})',
+ client_from_rodc=True,
+ device_from_rodc=True,
+ client_sids=client_sids,
+ code=(KDC_ERR_POLICY, CRASHES_WINDOWS),
+ status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED,
+ edata=self.expect_padata_outer)
+
+ def test_tgs_without_claims_valid(self):
+ client_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ }
+
+ self._tgs(f'Member_of SID({security.SID_CLAIMS_VALID})',
+ client_sids=client_sids,
+ code=KDC_ERR_POLICY,
+ status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED,
+ edata=self.expect_padata_outer)
+
+ def test_tgs_without_claims_valid_client_from_rodc(self):
+ client_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ }
+
+ expected_groups = client_sids | {
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ self._tgs(f'Member_of SID({security.SID_CLAIMS_VALID})',
+ client_from_rodc=True,
+ client_sids=client_sids,
+ expected_groups=expected_groups)
+
+ def test_tgs_without_claims_valid_device_from_rodc(self):
+ client_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ }
+
+ self._tgs(f'Member_of SID({security.SID_CLAIMS_VALID})',
+ device_from_rodc=True,
+ client_sids=client_sids,
+ code=(KDC_ERR_POLICY, CRASHES_WINDOWS),
+ status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED,
+ edata=self.expect_padata_outer)
+
+ def test_tgs_without_claims_valid_both_from_rodc(self):
+ client_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ }
+
+ expected_groups = client_sids | {
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ self._tgs(f'Member_of SID({security.SID_CLAIMS_VALID})',
+ client_from_rodc=True,
+ device_from_rodc=True,
+ client_sids=client_sids,
+ expected_groups=expected_groups,
+ code=(0, CRASHES_WINDOWS))
+
+ def test_tgs_with_claims_valid(self):
+ client_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ self._tgs(f'Member_of SID({security.SID_CLAIMS_VALID})',
+ client_sids=client_sids,
+ expected_groups=client_sids)
+
+ def test_tgs_with_claims_valid_client_from_rodc(self):
+ client_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ self._tgs(f'Member_of SID({security.SID_CLAIMS_VALID})',
+ client_from_rodc=True,
+ client_sids=client_sids,
+ expected_groups=client_sids)
+
+ def test_tgs_with_claims_valid_device_from_rodc(self):
+ client_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ self._tgs(f'Member_of SID({security.SID_CLAIMS_VALID})',
+ device_from_rodc=True,
+ client_sids=client_sids,
+ expected_groups=client_sids,
+ code=(0, CRASHES_WINDOWS))
+
+ def test_tgs_with_claims_valid_both_from_rodc(self):
+ client_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ self._tgs(f'Member_of SID({security.SID_CLAIMS_VALID})',
+ client_from_rodc=True,
+ device_from_rodc=True,
+ client_sids=client_sids,
+ expected_groups=client_sids,
+ code=(0, CRASHES_WINDOWS))
+
+ def _tgs(self,
+ target_policy=None,
+ *,
+ code=0,
+ event=AuditEvent.OK,
+ reason=AuditReason.NONE,
+ status=None,
+ edata=False,
+ use_fast=True,
+ client_from_rodc=None,
+ device_from_rodc=None,
+ client_sids=None,
+ client_claims=None,
+ device_sids=None,
+ device_claims=None,
+ expected_groups=None,
+ expected_claims=None):
+ try:
+ code, crashes_windows = code
+ self.assertIs(crashes_windows, CRASHES_WINDOWS)
+ if not self.crash_windows:
+ self.skipTest('test crashes Windows servers')
+ except TypeError:
+ self.assertIsNot(code, CRASHES_WINDOWS)
+
+ if not use_fast:
+ self.assertIsNone(device_from_rodc)
+ self.assertIsNone(device_sids)
+ self.assertIsNone(device_claims)
+
+ if client_from_rodc is None:
+ client_from_rodc = False
+
+ if device_from_rodc is None:
+ device_from_rodc = False
+
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER,
+ opts={
+ 'allowed_replication_mock': client_from_rodc,
+ 'revealed_to_mock_rodc': client_from_rodc,
+ })
+ client_sid = client_creds.get_sid()
+
+ client_username = client_creds.get_username()
+ client_cname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=[client_username])
+
+ client_tkt_options = 'forwardable'
+ expected_flags = krb5_asn1.TicketFlags(client_tkt_options)
+
+ checksum_key = self.get_krbtgt_checksum_key()
+
+ if client_from_rodc or device_from_rodc:
+ rodc_krbtgt_creds = self.get_mock_rodc_krbtgt_creds()
+ rodc_krbtgt_key = self.TicketDecryptionKey_from_creds(rodc_krbtgt_creds)
+ rodc_checksum_key = {
+ krb5pac.PAC_TYPE_KDC_CHECKSUM: rodc_krbtgt_key,
+ }
+
+ client_tgt = self.get_tgt(client_creds,
+ kdc_options=client_tkt_options,
+ expected_flags=expected_flags)
+
+ client_modify_pac_fn = []
+ if client_sids is not None:
+ client_modify_pac_fn.append(partial(self.set_pac_sids,
+ new_sids=client_sids))
+ if client_claims is not None:
+ client_modify_pac_fn.append(partial(self.set_pac_claims,
+ client_claims=client_claims))
+ client_tgt = self.modified_ticket(
+ client_tgt,
+ modify_pac_fn=client_modify_pac_fn,
+ new_ticket_key=rodc_krbtgt_key if client_from_rodc else None,
+ checksum_keys=rodc_checksum_key if client_from_rodc else checksum_key)
+
+ if use_fast:
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={
+ 'allowed_replication_mock': device_from_rodc,
+ 'revealed_to_mock_rodc': device_from_rodc,
+ })
+ mach_tgt = self.get_tgt(mach_creds)
+ device_modify_pac_fn = []
+ if device_sids is not None:
+ device_modify_pac_fn.append(partial(self.set_pac_sids,
+ new_sids=device_sids))
+ if device_claims is not None:
+ device_modify_pac_fn.append(partial(self.set_pac_claims,
+ client_claims=device_claims))
+ mach_tgt = self.modified_ticket(
+ mach_tgt,
+ modify_pac_fn=device_modify_pac_fn,
+ new_ticket_key=rodc_krbtgt_key if device_from_rodc else None,
+ checksum_keys=rodc_checksum_key if device_from_rodc else checksum_key)
+ else:
+ mach_tgt = None
+
+ if target_policy is None:
+ policy = None
+ assigned_policy = None
+ else:
+ sddl = f'O:SYD:(XA;;CR;;;WD;({target_policy.format(client_sid=client_creds.get_sid())}))'
+ policy = self.create_authn_policy(enforced=True,
+ computer_allowed_to=sddl)
+ assigned_policy = str(policy.dn)
+
+ # Create a target account with the assigned policy.
+ target_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'assigned_policy': assigned_policy})
+
+ target_decryption_key = self.TicketDecryptionKey_from_creds(
+ target_creds)
+ target_etypes = target_creds.tgs_supported_enctypes
+
+ samdb = self.get_samdb()
+ domain_sid_str = samdb.get_domain_sid()
+
+ expected_groups = self.map_sids(expected_groups, None, domain_sid_str)
+
+ # Show that obtaining a service ticket is allowed.
+ self._tgs_req(client_tgt, code, client_creds, target_creds,
+ armor_tgt=mach_tgt,
+ expected_cname=client_cname,
+ expected_account_name=client_username,
+ decryption_key=target_decryption_key,
+ expected_sid=client_sid,
+ expected_groups=expected_groups,
+ expect_client_claims=bool(expected_claims) or None,
+ expected_client_claims=expected_claims,
+ expected_supported_etypes=target_etypes,
+ expected_status=status,
+ expect_edata=edata)
+
+ self.check_tgs_log(client_creds, target_creds,
+ policy=policy,
+ checked_creds=client_creds,
+ status=status,
+ event=event,
+ reason=reason)
+
+ def test_conditional_ace_allowed_from_user_allow(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create an authentication policy that explicitly allows the machine
+ # account for a user.
+ allowed = (f'O:SYD:(XA;;CR;;;{mach_creds.get_sid()};'
+ f'(Member_of SID({mach_creds.get_sid()})))')
+ denied = 'O:SYD:(D;;CR;;;WD)'
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_from=allowed,
+ service_allowed_from=denied)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+
+ # Show that authentication succeeds.
+ self._get_tgt(client_creds, armor_tgt=mach_tgt,
+ expected_error=0)
+
+ self.check_as_log(
+ client_creds,
+ armor_creds=mach_creds,
+ client_policy=policy)
+
+ def test_conditional_ace_allowed_from_user_deny(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create an authentication policy that explicitly denies the machine
+ # account for a user.
+ allowed = 'O:SYD:(A;;CR;;;WD)'
+ denied = (f'O:SYD:(XD;;CR;;;{mach_creds.get_sid()};'
+ f'(Member_of SID({mach_creds.get_sid()})))'
+ f'(A;;CR;;;WD)')
+ policy = self.create_authn_policy(enforced=True,
+ user_allowed_from=denied,
+ service_allowed_from=allowed)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=policy)
+
+ # Show that we get a policy error when trying to authenticate.
+ self._get_tgt(client_creds, armor_tgt=mach_tgt,
+ expected_error=KDC_ERR_POLICY)
+
+ self.check_as_log(
+ client_creds,
+ armor_creds=mach_creds,
+ client_policy=policy,
+ client_policy_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_DEVICE_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED,
+ status=ntstatus.NT_STATUS_INVALID_WORKSTATION)
+
+
+class DeviceRestrictionTests(ConditionalAceBaseTests):
+ def test_pac_groups_not_present(self):
+ """Test that authentication fails if the device does not belong to some
+ required groups.
+ """
+
+ required_sids = {
+ ('S-1-2-3-4', SidType.EXTRA_SID, self.default_attrs),
+ ('S-1-9-8-7', SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'id': 'device'})
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create an authentication policy that requires the device to belong to
+ # certain groups.
+ client_policy_sddl = self.allow_if(
+ f'Member_of {self.sddl_array_from_sids(required_sids)}')
+ client_policy = self.create_authn_policy(
+ enforced=True, user_allowed_from=client_policy_sddl)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=client_policy)
+
+ # Show that authentication fails.
+ self._armored_as_req(client_creds,
+ self.get_krbtgt_creds(),
+ mach_tgt,
+ expected_error=KDC_ERR_POLICY)
+
+ self.check_as_log(
+ client_creds,
+ armor_creds=mach_creds,
+ client_policy=client_policy,
+ client_policy_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_DEVICE_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED,
+ status=ntstatus.NT_STATUS_INVALID_WORKSTATION)
+
+ def test_pac_groups_present(self):
+ """Test that authentication succeeds if the device belongs to some
+ required groups.
+ """
+
+ required_sids = {
+ ('S-1-2-3-4', SidType.EXTRA_SID, self.default_attrs),
+ ('S-1-9-8-7', SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ device_sids = required_sids | {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ }
+
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'id': 'device'})
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Add the required groups to the machine account’s TGT.
+ mach_tgt = self.modified_ticket(
+ mach_tgt,
+ modify_pac_fn=partial(self.set_pac_sids,
+ new_sids=device_sids),
+ checksum_keys=self.get_krbtgt_checksum_key())
+
+ # Create an authentication policy that requires the device to belong to
+ # certain groups.
+ client_policy_sddl = self.allow_if(
+ f'Member_of {self.sddl_array_from_sids(required_sids)}')
+ client_policy = self.create_authn_policy(
+ enforced=True, user_allowed_from=client_policy_sddl)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=client_policy)
+
+ # Show that authentication succeeds.
+ self._armored_as_req(client_creds,
+ self.get_krbtgt_creds(),
+ mach_tgt)
+
+ self.check_as_log(client_creds,
+ armor_creds=mach_creds,
+ client_policy=client_policy)
+
+ def test_pac_resource_groups_present(self):
+ """Test that authentication succeeds if the device belongs to some
+ required resource groups.
+ """
+
+ required_sids = {
+ ('S-1-2-3-4', SidType.RESOURCE_SID, self.resource_attrs),
+ ('S-1-2-3-5', SidType.RESOURCE_SID, self.resource_attrs),
+ ('S-1-2-3-6', SidType.RESOURCE_SID, self.resource_attrs),
+ }
+
+ device_sids = required_sids | {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ }
+
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'id': 'device'})
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Add the required groups to the machine account’s TGT.
+ mach_tgt = self.modified_ticket(
+ mach_tgt,
+ modify_pac_fn=partial(self.set_pac_sids,
+ new_sids=device_sids),
+ checksum_keys=self.get_krbtgt_checksum_key())
+
+ # Create an authentication policy that requires the device to belong to
+ # certain groups.
+ client_policy_sddl = self.allow_if(
+ f'Member_of {self.sddl_array_from_sids(required_sids)}')
+ client_policy = self.create_authn_policy(
+ enforced=True, user_allowed_from=client_policy_sddl)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=client_policy)
+
+ # Show that authentication fails.
+ self._armored_as_req(client_creds,
+ self.get_krbtgt_creds(),
+ mach_tgt,
+ expected_error=KDC_ERR_POLICY)
+
+ self.check_as_log(
+ client_creds,
+ armor_creds=mach_creds,
+ client_policy=client_policy,
+ client_policy_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_DEVICE_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED,
+ status=ntstatus.NT_STATUS_INVALID_WORKSTATION)
+
+ def test_pac_resource_groups_present_to_service_sid_compression(self):
+ """Test that authentication succeeds if the device belongs to some
+ required resource groups, and the request is to a service that supports
+ SID compression.
+ """
+
+ required_sids = {
+ ('S-1-2-3-4', SidType.RESOURCE_SID, self.resource_attrs),
+ ('S-1-2-3-5', SidType.RESOURCE_SID, self.resource_attrs),
+ ('S-1-2-3-6', SidType.RESOURCE_SID, self.resource_attrs),
+ }
+
+ device_sids = required_sids | {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ }
+
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'id': 'device'})
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Add the required groups to the machine account’s TGT.
+ mach_tgt = self.modified_ticket(
+ mach_tgt,
+ modify_pac_fn=partial(self.set_pac_sids,
+ new_sids=device_sids),
+ checksum_keys=self.get_krbtgt_checksum_key())
+
+ # Create an authentication policy that requires the device to belong to
+ # certain groups.
+ client_policy_sddl = self.allow_if(
+ f'Member_of {self.sddl_array_from_sids(required_sids)}')
+ client_policy = self.create_authn_policy(
+ enforced=True, user_allowed_from=client_policy_sddl)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=client_policy)
+
+ target_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'id': 'target'})
+
+ # Show that authentication fails.
+ self._armored_as_req(client_creds,
+ target_creds,
+ mach_tgt,
+ expected_error=KDC_ERR_POLICY)
+
+ self.check_as_log(
+ client_creds,
+ armor_creds=mach_creds,
+ client_policy=client_policy,
+ client_policy_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_DEVICE_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED,
+ status=ntstatus.NT_STATUS_INVALID_WORKSTATION)
+
+ def test_pac_resource_groups_present_to_service_no_sid_compression(self):
+ """Test that authentication succeeds if the device belongs to some
+ required resource groups, and the request is to a service that does not
+ support SID compression.
+ """
+
+ required_sids = {
+ ('S-1-2-3-4', SidType.RESOURCE_SID, self.resource_attrs),
+ ('S-1-2-3-5', SidType.RESOURCE_SID, self.resource_attrs),
+ ('S-1-2-3-6', SidType.RESOURCE_SID, self.resource_attrs),
+ }
+
+ device_sids = required_sids | {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ }
+
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'id': 'device'})
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Add the required groups to the machine account’s TGT.
+ mach_tgt = self.modified_ticket(
+ mach_tgt,
+ modify_pac_fn=partial(self.set_pac_sids,
+ new_sids=device_sids),
+ checksum_keys=self.get_krbtgt_checksum_key())
+
+ # Create an authentication policy that requires the device to belong to
+ # certain groups.
+ client_policy_sddl = self.allow_if(
+ f'Member_of {self.sddl_array_from_sids(required_sids)}')
+ client_policy = self.create_authn_policy(
+ enforced=True, user_allowed_from=client_policy_sddl)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=client_policy)
+
+ target_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={
+ 'id': 'target',
+ 'supported_enctypes': (
+ security.KERB_ENCTYPE_RC4_HMAC_MD5) | (
+ security.KERB_ENCTYPE_AES256_CTS_HMAC_SHA1_96_SK),
+ 'sid_compression_support': False,
+ })
+
+ # Show that authentication fails.
+ self._armored_as_req(client_creds,
+ target_creds,
+ mach_tgt,
+ expected_error=KDC_ERR_POLICY)
+
+ self.check_as_log(
+ client_creds,
+ armor_creds=mach_creds,
+ client_policy=client_policy,
+ client_policy_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_DEVICE_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED,
+ status=ntstatus.NT_STATUS_INVALID_WORKSTATION)
+
+ def test_pac_well_known_groups_not_present(self):
+ """Test that authentication fails if the device does not belong to one
+ or more required well‐known groups.
+ """
+
+ required_sids = {
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, self.default_attrs),
+ (security.SID_COMPOUNDED_AUTHENTICATION, SidType.EXTRA_SID, self.default_attrs),
+ (self.aa_asserted_identity, SidType.EXTRA_SID, self.default_attrs),
+ (self.service_asserted_identity, SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ device_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ }
+
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'id': 'device'})
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Modify the machine account’s TGT to contain only the SID of the
+ # machine account’s primary group.
+ mach_tgt = self.modified_ticket(
+ mach_tgt,
+ modify_pac_fn=partial(self.set_pac_sids,
+ new_sids=device_sids),
+ checksum_keys=self.get_krbtgt_checksum_key())
+
+ # Create an authentication policy that requires the device to belong to
+ # certain groups.
+ client_policy_sddl = self.allow_if(
+ f'Member_of_any {self.sddl_array_from_sids(required_sids)}')
+ client_policy = self.create_authn_policy(
+ enforced=True, user_allowed_from=client_policy_sddl)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=client_policy)
+
+ # Show that authentication fails.
+ self._armored_as_req(client_creds,
+ self.get_krbtgt_creds(),
+ mach_tgt,
+ expected_error=KDC_ERR_POLICY)
+
+ self.check_as_log(
+ client_creds,
+ armor_creds=mach_creds,
+ client_policy=client_policy,
+ client_policy_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_DEVICE_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED,
+ status=ntstatus.NT_STATUS_INVALID_WORKSTATION)
+
+ def test_pac_device_info(self):
+ """Test the groups of the client and the device after performing a
+ FAST‐armored AS‐REQ.
+ """
+
+ device_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ }
+
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'id': 'device'})
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Add the required groups to the machine account’s TGT.
+ mach_tgt = self.modified_ticket(
+ mach_tgt,
+ modify_pac_fn=partial(self.set_pac_sids,
+ new_sids=device_sids),
+ checksum_keys=self.get_krbtgt_checksum_key())
+
+ # Create a user account.
+ client_creds = self._get_creds(account_type=self.AccountType.USER)
+
+ target_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'id': 'target'})
+
+ expected_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ # The client’s groups are to include the Asserted Identity and
+ # Claims Valid SIDs.
+ (self.aa_asserted_identity, SidType.EXTRA_SID, self.default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ samdb = self.get_samdb()
+ domain_sid_str = samdb.get_domain_sid()
+
+ expected_sids = self.map_sids(expected_sids, None, domain_sid_str)
+
+ # Show that authentication succeeds. Check that the groups in the PAC
+ # are as expected.
+ self._armored_as_req(client_creds,
+ target_creds,
+ mach_tgt,
+ expected_groups=expected_sids,
+ expect_device_info=False,
+ expected_device_groups=None)
+
+ self.check_as_log(
+ client_creds,
+ armor_creds=mach_creds)
+
+ def test_pac_claims_not_present(self):
+ """Test that authentication fails if the device does not have a
+ required claim.
+ """
+
+ claim_id = 'the name of the claim'
+ claim_value = 'the value of the claim'
+
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'id': 'device'})
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create an authentication policy that requires the device to have a
+ # certain claim.
+ client_policy_sddl = self.allow_if(
+ f'@User.{escaped_claim_id(claim_id)} == "{claim_value}"')
+ client_policy = self.create_authn_policy(
+ enforced=True, user_allowed_from=client_policy_sddl)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=client_policy)
+
+ # Show that authentication fails.
+ self._armored_as_req(client_creds,
+ self.get_krbtgt_creds(),
+ mach_tgt,
+ expected_error=KDC_ERR_POLICY)
+
+ self.check_as_log(
+ client_creds,
+ armor_creds=mach_creds,
+ client_policy=client_policy,
+ client_policy_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_DEVICE_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED,
+ status=ntstatus.NT_STATUS_INVALID_WORKSTATION)
+
+ def test_pac_claims_present(self):
+ """Test that authentication succeeds if the device has a required
+ claim.
+ """
+
+ claim_id = 'the name of the claim'
+ claim_value = 'the value of the claim'
+
+ pac_claims = [
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ (claim_id, claims.CLAIM_TYPE_STRING, [claim_value]),
+ ]),
+ ]
+
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'id': 'device'})
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Add the required claim to the machine account’s TGT.
+ mach_tgt = self.modified_ticket(
+ mach_tgt,
+ modify_pac_fn=partial(self.set_pac_claims,
+ client_claims=pac_claims),
+ checksum_keys=self.get_krbtgt_checksum_key())
+
+ # Create an authentication policy that requires the device to have a
+ # certain claim.
+ client_policy_sddl = self.allow_if(
+ f'@User.{escaped_claim_id(claim_id)} == "{claim_value}"')
+ client_policy = self.create_authn_policy(
+ enforced=True, user_allowed_from=client_policy_sddl)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=client_policy)
+
+ # Show that authentication succeeds.
+ self._armored_as_req(client_creds,
+ self.get_krbtgt_creds(),
+ mach_tgt)
+
+ self.check_as_log(client_creds,
+ armor_creds=mach_creds,
+ client_policy=client_policy)
+
+ def test_pac_claims_invalid(self):
+ """Test that authentication fails if the device’s required claim is not
+ valid.
+ """
+
+ claim_id = 'the name of the claim'
+ claim_value = 'the value of the claim'
+
+ pac_claims = [
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ (claim_id, claims.CLAIM_TYPE_STRING, [claim_value]),
+ ]),
+ ]
+
+ # The device’s SIDs do not include the Claims Valid SID.
+ device_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ }
+
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'id': 'device'})
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Add the SIDs and the required claim to the machine account’s TGT.
+ mach_tgt = self.modified_ticket(
+ mach_tgt,
+ modify_pac_fn=[
+ partial(self.set_pac_claims, client_claims=pac_claims),
+ partial(self.set_pac_sids, new_sids=device_sids)],
+ checksum_keys=self.get_krbtgt_checksum_key())
+
+ # Create an authentication policy that requires the device to have a
+ # certain claim.
+ client_policy_sddl = self.allow_if(
+ f'@User.{escaped_claim_id(claim_id)} == "{claim_value}"')
+ client_policy = self.create_authn_policy(
+ enforced=True, user_allowed_from=client_policy_sddl)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=client_policy)
+
+ # Show that authentication fails.
+ self._armored_as_req(client_creds,
+ self.get_krbtgt_creds(),
+ mach_tgt,
+ expected_error=KDC_ERR_POLICY)
+
+ self.check_as_log(
+ client_creds,
+ armor_creds=mach_creds,
+ client_policy=client_policy,
+ client_policy_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_DEVICE_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED,
+ status=ntstatus.NT_STATUS_INVALID_WORKSTATION)
+
+ def test_device_in_world_group(self):
+ self._check_device_in_group(security.SID_WORLD)
+
+ def test_device_in_network_group(self):
+ self._check_device_not_in_group(security.SID_NT_NETWORK)
+
+ def test_device_in_authenticated_users(self):
+ self._check_device_in_group(security.SID_NT_AUTHENTICATED_USERS)
+
+ def test_device_in_aa_asserted_identity(self):
+ self._check_device_in_group(
+ security.SID_AUTHENTICATION_AUTHORITY_ASSERTED_IDENTITY)
+
+ def test_device_in_service_asserted_identity(self):
+ self._check_device_not_in_group(security.SID_SERVICE_ASSERTED_IDENTITY)
+
+ def test_device_in_compounded_authentication(self):
+ self._check_device_not_in_group(security.SID_COMPOUNDED_AUTHENTICATION)
+
+ def test_device_in_claims_valid(self):
+ self._check_device_in_group(security.SID_CLAIMS_VALID)
+
+ def _check_device_in_group(self, group):
+ self._check_device_membership(group, expect_in_group=True)
+
+ def _check_device_not_in_group(self, group):
+ self._check_device_membership(group, expect_in_group=False)
+
+ def _check_device_membership(self, group, *, expect_in_group):
+ """Test that authentication succeeds or fails when the device is
+ required to belong to a certain group.
+ """
+
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'id': 'device'})
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create an authentication policy that requires the device to belong to
+ # a certain group.
+ in_group_sddl = self.allow_if(f'Member_of {{SID({group})}}')
+ in_group_policy = self.create_authn_policy(
+ enforced=True, user_allowed_from=in_group_sddl)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=in_group_policy)
+
+ krbtgt_creds = self.get_krbtgt_creds()
+
+ # Test whether authentication succeeds or fails.
+ self._armored_as_req(
+ client_creds,
+ krbtgt_creds,
+ mach_tgt,
+ expected_error=0 if expect_in_group else KDC_ERR_POLICY)
+
+ policy_success_args = {}
+ policy_failure_args = {
+ 'client_policy_status': ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ 'event': AuditEvent.KERBEROS_DEVICE_RESTRICTION,
+ 'reason': AuditReason.ACCESS_DENIED,
+ 'status': ntstatus.NT_STATUS_INVALID_WORKSTATION,
+ }
+
+ self.check_as_log(client_creds,
+ armor_creds=mach_creds,
+ client_policy=in_group_policy,
+ **(policy_success_args if expect_in_group
+ else policy_failure_args))
+
+ # Create an authentication policy that requires the device not to belong
+ # to the group.
+ not_in_group_sddl = self.allow_if(f'Not_Member_of {{SID({group})}}')
+ not_in_group_policy = self.create_authn_policy(
+ enforced=True, user_allowed_from=not_in_group_sddl)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=not_in_group_policy)
+
+ # Test whether authentication succeeds or fails.
+ self._armored_as_req(
+ client_creds,
+ krbtgt_creds,
+ mach_tgt,
+ expected_error=KDC_ERR_POLICY if expect_in_group else 0)
+
+ self.check_as_log(client_creds,
+ armor_creds=mach_creds,
+ client_policy=not_in_group_policy,
+ **(policy_failure_args if expect_in_group
+ else policy_success_args))
+
+
+class TgsReqServicePolicyTests(ConditionalAceBaseTests):
+ def test_pac_groups_not_present(self):
+ """Test that authorization succeeds if the client does not belong to
+ some required groups.
+ """
+
+ required_sids = {
+ ('S-1-2-3-4', SidType.EXTRA_SID, self.default_attrs),
+ ('S-1-9-8-7', SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'id': 'device'})
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a user account.
+ client_creds = self._get_creds(account_type=self.AccountType.USER)
+ client_tgt = self.get_tgt(client_creds)
+
+ # Create an authentication policy that requires the client to belong to
+ # certain groups.
+ target_policy_sddl = self.allow_if(
+ f'Member_of {self.sddl_array_from_sids(required_sids)}')
+ target_policy = self.create_authn_policy(
+ enforced=True, computer_allowed_to=target_policy_sddl)
+
+ # Create a target account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=target_policy)
+
+ # Show that authorization fails.
+ self._tgs_req(
+ client_tgt, KDC_ERR_POLICY, client_creds, target_creds,
+ armor_tgt=mach_tgt,
+ expect_edata=self.expect_padata_outer,
+ # We aren’t particular about whether or not we get an NTSTATUS.
+ expect_status=None,
+ expected_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED)
+
+ self.check_tgs_log(
+ client_creds, target_creds,
+ policy=target_policy,
+ status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED)
+
+ def test_pac_groups_present(self):
+ """Test that authorization succeeds if the client belongs to some
+ required groups.
+ """
+
+ required_sids = {
+ ('S-1-2-3-4', SidType.EXTRA_SID, self.default_attrs),
+ ('S-1-9-8-7', SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ client_sids = required_sids | {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ }
+
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'id': 'device'})
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a user account.
+ client_creds = self._get_creds(account_type=self.AccountType.USER)
+ client_tgt = self.get_tgt(client_creds)
+
+ # Add the required groups to the client’s TGT.
+ client_tgt = self.modified_ticket(
+ client_tgt,
+ modify_pac_fn=partial(self.set_pac_sids,
+ new_sids=client_sids),
+ checksum_keys=self.get_krbtgt_checksum_key())
+
+ # Create an authentication policy that requires the client to belong to
+ # certain groups.
+ target_policy_sddl = self.allow_if(
+ f'Member_of {self.sddl_array_from_sids(required_sids)}')
+ target_policy = self.create_authn_policy(
+ enforced=True, computer_allowed_to=target_policy_sddl)
+
+ # Create a target account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=target_policy)
+
+ # Show that authorization succeeds.
+ self._tgs_req(client_tgt, 0, client_creds, target_creds, armor_tgt=mach_tgt)
+
+ self.check_tgs_log(client_creds, target_creds,
+ policy=target_policy)
+
+ def test_pac_resource_groups_present_to_service_sid_compression(self):
+ """Test that authorization succeeds if the client belongs to some
+ required resource groups, and the request is to a service that supports
+ SID compression.
+ """
+
+ required_sids = {
+ ('S-1-2-3-4', SidType.RESOURCE_SID, self.resource_attrs),
+ ('S-1-2-3-5', SidType.RESOURCE_SID, self.resource_attrs),
+ ('S-1-2-3-6', SidType.RESOURCE_SID, self.resource_attrs),
+ }
+
+ client_sids = required_sids | {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ }
+
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'id': 'device'})
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a user account.
+ client_creds = self._get_creds(account_type=self.AccountType.USER)
+ client_tgt = self.get_tgt(client_creds)
+
+ # Add the required groups to the client’s TGT.
+ client_tgt = self.modified_ticket(
+ client_tgt,
+ modify_pac_fn=partial(self.set_pac_sids,
+ new_sids=client_sids),
+ checksum_keys=self.get_krbtgt_checksum_key())
+
+ # Create an authentication policy that requires the client to belong to
+ # certain groups.
+ target_policy_sddl = self.allow_if(
+ f'Member_of {self.sddl_array_from_sids(required_sids)}')
+ target_policy = self.create_authn_policy(
+ enforced=True, computer_allowed_to=target_policy_sddl)
+
+ # Create a target account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=target_policy)
+
+ # Show that authorization fails.
+ self._tgs_req(
+ client_tgt, KDC_ERR_POLICY, client_creds, target_creds,
+ armor_tgt=mach_tgt,
+ expect_edata=self.expect_padata_outer,
+ # We aren’t particular about whether or not we get an NTSTATUS.
+ expect_status=None,
+ expected_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED)
+
+ self.check_tgs_log(
+ client_creds, target_creds,
+ policy=target_policy,
+ status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED)
+
+ def test_pac_resource_groups_present_to_service_no_sid_compression(self):
+ """Test that authorization succeeds if the client belongs to some
+ required resource groups, and the request is to a service that does not
+ support SID compression.
+ """
+
+ required_sids = {
+ ('S-1-2-3-4', SidType.RESOURCE_SID, self.resource_attrs),
+ ('S-1-2-3-5', SidType.RESOURCE_SID, self.resource_attrs),
+ ('S-1-2-3-6', SidType.RESOURCE_SID, self.resource_attrs),
+ }
+
+ client_sids = required_sids | {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ }
+
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'id': 'device'})
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a user account.
+ client_creds = self._get_creds(account_type=self.AccountType.USER)
+ client_tgt = self.get_tgt(client_creds)
+
+ # Add the required groups to the client’s TGT.
+ client_tgt = self.modified_ticket(
+ client_tgt,
+ modify_pac_fn=partial(self.set_pac_sids,
+ new_sids=client_sids),
+ checksum_keys=self.get_krbtgt_checksum_key())
+
+ # Create an authentication policy that requires the client to belong to
+ # certain groups.
+ target_policy_sddl = self.allow_if(
+ f'Member_of {self.sddl_array_from_sids(required_sids)}')
+ target_policy = self.create_authn_policy(
+ enforced=True, computer_allowed_to=target_policy_sddl)
+
+ # Create a target account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=target_policy,
+ additional_details={
+ 'msDS-SupportedEncryptionTypes': str((
+ security.KERB_ENCTYPE_RC4_HMAC_MD5) | (
+ security.KERB_ENCTYPE_AES256_CTS_HMAC_SHA1_96_SK) | (
+ security.KERB_ENCTYPE_RESOURCE_SID_COMPRESSION_DISABLED))})
+
+ # Show that authorization fails.
+ self._tgs_req(
+ client_tgt, KDC_ERR_POLICY, client_creds, target_creds,
+ armor_tgt=mach_tgt,
+ expect_edata=self.expect_padata_outer,
+ # We aren’t particular about whether or not we get an NTSTATUS.
+ expect_status=None,
+ expected_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED)
+
+ self.check_tgs_log(
+ client_creds, target_creds,
+ policy=target_policy,
+ status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED)
+
+ def test_pac_well_known_groups_not_present(self):
+ """Test that authorization fails if the client does not belong to one
+ or more required well‐known groups.
+ """
+
+ required_sids = {
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, self.default_attrs),
+ (security.SID_COMPOUNDED_AUTHENTICATION, SidType.EXTRA_SID, self.default_attrs),
+ (self.aa_asserted_identity, SidType.EXTRA_SID, self.default_attrs),
+ (self.service_asserted_identity, SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ client_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ }
+
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'id': 'device'})
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create a user account.
+ client_creds = self._get_creds(account_type=self.AccountType.USER)
+ client_tgt = self.get_tgt(client_creds)
+
+ # Modify the client’s TGT to contain only the SID of the client’s
+ # primary group.
+ client_tgt = self.modified_ticket(
+ client_tgt,
+ modify_pac_fn=partial(self.set_pac_sids,
+ new_sids=client_sids),
+ checksum_keys=self.get_krbtgt_checksum_key())
+
+ # Create an authentication policy that requires the client to belong to
+ # certain groups.
+ target_policy_sddl = self.allow_if(
+ f'Member_of_any {self.sddl_array_from_sids(required_sids)}')
+ target_policy = self.create_authn_policy(
+ enforced=True, computer_allowed_to=target_policy_sddl)
+
+ # Create a target account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=target_policy)
+
+ # Show that authorization fails.
+ self._tgs_req(
+ client_tgt, KDC_ERR_POLICY, client_creds, target_creds,
+ armor_tgt=mach_tgt,
+ expect_edata=self.expect_padata_outer,
+ # We aren’t particular about whether or not we get an NTSTATUS.
+ expect_status=None,
+ expected_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED)
+
+ self.check_tgs_log(
+ client_creds, target_creds,
+ policy=target_policy,
+ status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED)
+
+ def test_pac_device_info(self):
+ self._run_pac_device_info_test()
+
+ def test_pac_device_info_target_policy(self):
+ target_policy = self.allow_if('Device_Member_of {{SID({device_0})}}')
+ self._run_pac_device_info_test(target_policy=target_policy)
+
+ def test_pac_device_info_rodc_issued(self):
+ self._run_pac_device_info_test(rodc_issued=True)
+
+ def test_pac_device_info_existing_device_info(self):
+ self._run_pac_device_info_test(existing_device_info=True)
+
+ def test_pac_device_info_existing_device_info_target_policy(self):
+ target_policy = self.allow_if('Device_Member_of {{SID({device_0})}}')
+ self._run_pac_device_info_test(target_policy=target_policy,
+ existing_device_info=True)
+
+ def test_pac_device_info_existing_device_info_rodc_issued(self):
+ self._run_pac_device_info_test(rodc_issued=True,
+ existing_device_info=True)
+
+ def test_pac_device_info_existing_device_claims(self):
+ self._run_pac_device_info_test(existing_device_claims=True)
+
+ def test_pac_device_info_existing_device_claims_target_policy(self):
+ target_policy = self.allow_if('Device_Member_of {{SID({device_0})}}')
+ self._run_pac_device_info_test(target_policy=target_policy,
+ existing_device_claims=True)
+
+ def test_pac_device_info_existing_device_claims_rodc_issued(self):
+ self._run_pac_device_info_test(rodc_issued=True,
+ existing_device_claims=True)
+
+ def test_pac_device_info_existing_device_info_and_claims(self):
+ self._run_pac_device_info_test(existing_device_claims=True,
+ existing_device_info=True)
+
+ def test_pac_device_info_existing_device_info_and_claims_target_policy(self):
+ target_policy = self.allow_if('Device_Member_of {{SID({device_0})}}')
+ self._run_pac_device_info_test(target_policy=target_policy,
+ existing_device_claims=True,
+ existing_device_info=True)
+
+ def test_pac_device_info_existing_device_info_and_claims_rodc_issued(self):
+ self._run_pac_device_info_test(rodc_issued=True,
+ existing_device_claims=True,
+ existing_device_info=True)
+
+ def test_pac_device_info_no_compound_id_support(self):
+ self._run_pac_device_info_test(compound_id_support=False)
+
+ def test_pac_device_info_no_compound_id_support_target_policy(self):
+ target_policy = self.allow_if('Device_Member_of {{SID({device_0})}}')
+ self._run_pac_device_info_test(target_policy=target_policy,
+ compound_id_support=False)
+
+ def test_pac_device_info_no_compound_id_support_rodc_issued(self):
+ self._run_pac_device_info_test(rodc_issued=True,
+ compound_id_support=False)
+
+ def test_pac_device_info_no_compound_id_support_existing_device_info(self):
+ self._run_pac_device_info_test(compound_id_support=False,
+ existing_device_info=True)
+
+ def test_pac_device_info_no_compound_id_support_existing_device_info_target_policy(self):
+ target_policy = self.allow_if('Device_Member_of {{SID({device_0})}}')
+ self._run_pac_device_info_test(target_policy=target_policy,
+ compound_id_support=False,
+ existing_device_info=True)
+
+ def test_pac_device_info_no_compound_id_support_existing_device_info_rodc_issued(self):
+ self._run_pac_device_info_test(rodc_issued=True,
+ compound_id_support=False,
+ existing_device_info=True)
+
+ def test_pac_device_info_no_compound_id_support_existing_device_claims(self):
+ self._run_pac_device_info_test(compound_id_support=False,
+ existing_device_claims=True)
+
+ def test_pac_device_info_no_compound_id_support_existing_device_claims_target_policy(self):
+ target_policy = self.allow_if('Device_Member_of {{SID({device_0})}}')
+ self._run_pac_device_info_test(target_policy=target_policy,
+ compound_id_support=False,
+ existing_device_claims=True)
+
+ def test_pac_device_info_no_compound_id_support_existing_device_claims_rodc_issued(self):
+ self._run_pac_device_info_test(rodc_issued=True,
+ compound_id_support=False,
+ existing_device_claims=True)
+
+ def test_pac_device_info_no_compound_id_support_existing_device_info_and_claims(self):
+ self._run_pac_device_info_test(compound_id_support=False,
+ existing_device_claims=True,
+ existing_device_info=True)
+
+ def test_pac_device_info_no_compound_id_support_existing_device_info_and_claims_target_policy(self):
+ target_policy = self.allow_if('Device_Member_of {{SID({device_0})}}')
+ self._run_pac_device_info_test(target_policy=target_policy,
+ compound_id_support=False,
+ existing_device_claims=True,
+ existing_device_info=True)
+
+ def test_pac_device_info_no_compound_id_support_existing_device_info_and_claims_rodc_issued(self):
+ self._run_pac_device_info_test(rodc_issued=True,
+ compound_id_support=False,
+ existing_device_claims=True,
+ existing_device_info=True)
+
+ def test_pac_device_info_no_compound_id_support_no_claims_valid_existing_device_info(self):
+ self._run_pac_device_info_test(device_claims_valid=False,
+ compound_id_support=False,
+ existing_device_info=True)
+
+ def test_pac_device_info_no_compound_id_support_no_claims_valid_existing_device_info_target_policy(self):
+ target_policy = self.allow_if('Device_Member_of {{SID({device_0})}}')
+ self._run_pac_device_info_test(target_policy=target_policy,
+ device_claims_valid=False,
+ compound_id_support=False,
+ existing_device_info=True)
+
+ def test_pac_device_info_no_compound_id_support_no_claims_valid_existing_device_info_rodc_issued(self):
+ self._run_pac_device_info_test(rodc_issued=True,
+ device_claims_valid=False,
+ compound_id_support=False,
+ existing_device_info=True)
+
+ def test_pac_device_info_no_compound_id_support_no_claims_valid_existing_device_claims(self):
+ self._run_pac_device_info_test(device_claims_valid=False,
+ compound_id_support=False,
+ existing_device_claims=True)
+
+ def test_pac_device_info_no_compound_id_support_no_claims_valid_existing_device_claims_target_policy(self):
+ target_policy = self.allow_if('Device_Member_of {{SID({device_0})}}')
+ self._run_pac_device_info_test(target_policy=target_policy,
+ device_claims_valid=False,
+ compound_id_support=False,
+ existing_device_claims=True)
+
+ def test_pac_device_info_no_compound_id_support_no_claims_valid_existing_device_claims_rodc_issued(self):
+ self._run_pac_device_info_test(rodc_issued=True,
+ device_claims_valid=False,
+ compound_id_support=False,
+ existing_device_claims=True)
+
+ def test_pac_device_info_no_compound_id_support_no_claims_valid_existing_device_info_and_claims(self):
+ self._run_pac_device_info_test(device_claims_valid=False,
+ compound_id_support=False,
+ existing_device_claims=True,
+ existing_device_info=True)
+
+ def test_pac_device_info_no_compound_id_support_no_claims_valid_existing_device_info_and_claims_target_policy(self):
+ target_policy = self.allow_if('Device_Member_of {{SID({device_0})}}')
+ self._run_pac_device_info_test(target_policy=target_policy,
+ device_claims_valid=False,
+ compound_id_support=False,
+ existing_device_claims=True,
+ existing_device_info=True)
+
+ def test_pac_device_info_no_compound_id_support_no_claims_valid_existing_device_info_and_claims_rodc_issued(self):
+ self._run_pac_device_info_test(rodc_issued=True,
+ device_claims_valid=False,
+ compound_id_support=False,
+ existing_device_claims=True,
+ existing_device_info=True)
+
+ def test_pac_device_info_no_claims_valid(self):
+ self._run_pac_device_info_test(device_claims_valid=False)
+
+ def test_pac_device_info_no_claims_valid_target_policy(self):
+ target_policy = self.allow_if('Device_Member_of {{SID({device_0})}}')
+ self._run_pac_device_info_test(target_policy=target_policy,
+ device_claims_valid=False)
+
+ def test_pac_device_info_no_claims_valid_rodc_issued(self):
+ self._run_pac_device_info_test(rodc_issued=True,
+ device_claims_valid=False)
+
+ def test_pac_device_info_no_claims_valid_existing_device_info(self):
+ self._run_pac_device_info_test(device_claims_valid=False,
+ existing_device_info=True)
+
+ def test_pac_device_info_no_claims_valid_existing_device_info_target_policy(self):
+ target_policy = self.allow_if('Device_Member_of {{SID({device_0})}}')
+ self._run_pac_device_info_test(target_policy=target_policy,
+ device_claims_valid=False,
+ existing_device_info=True)
+
+ def test_pac_device_info_no_claims_valid_existing_device_info_rodc_issued(self):
+ self._run_pac_device_info_test(rodc_issued=True,
+ device_claims_valid=False,
+ existing_device_info=True)
+
+ def test_pac_device_info_no_claims_valid_existing_device_claims(self):
+ self._run_pac_device_info_test(device_claims_valid=False,
+ existing_device_claims=True)
+
+ def test_pac_device_info_no_claims_valid_existing_device_claims_target_policy(self):
+ target_policy = self.allow_if('Device_Member_of {{SID({device_0})}}')
+ self._run_pac_device_info_test(target_policy=target_policy,
+ device_claims_valid=False,
+ existing_device_claims=True)
+
+ def test_pac_device_info_no_claims_valid_existing_device_claims_rodc_issued(self):
+ self._run_pac_device_info_test(rodc_issued=True,
+ device_claims_valid=False,
+ existing_device_claims=True)
+
+ def test_pac_device_info_no_claims_valid_existing_device_info_and_claims(self):
+ self._run_pac_device_info_test(device_claims_valid=False,
+ existing_device_claims=True,
+ existing_device_info=True)
+
+ def test_pac_device_info_no_claims_valid_existing_device_info_and_claims_target_policy(self):
+ target_policy = self.allow_if('Device_Member_of {{SID({device_0})}}')
+ self._run_pac_device_info_test(target_policy=target_policy,
+ device_claims_valid=False,
+ existing_device_claims=True,
+ existing_device_info=True)
+
+ def test_pac_device_info_no_claims_valid_existing_device_info_and_claims_rodc_issued(self):
+ self._run_pac_device_info_test(rodc_issued=True,
+ device_claims_valid=False,
+ existing_device_claims=True,
+ existing_device_info=True)
+
+ def _run_pac_device_info_test(self, *,
+ target_policy=None,
+ rodc_issued=False,
+ compound_id_support=True,
+ device_claims_valid=True,
+ existing_device_claims=False,
+ existing_device_info=False):
+ """Test the groups of the client and the device after performing a
+ FAST‐armored TGS‐REQ.
+ """
+
+ client_claim_id = 'the name of the client’s client claim'
+ client_claim_value = 'the value of the client’s client claim'
+
+ client_claims = [
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ (client_claim_id, claims.CLAIM_TYPE_STRING, [client_claim_value]),
+ ]),
+ ]
+
+ if not rodc_issued:
+ expected_client_claims = {
+ client_claim_id: {
+ 'source_type': claims.CLAIMS_SOURCE_TYPE_AD,
+ 'type': claims.CLAIM_TYPE_STRING,
+ 'values': (client_claim_value,),
+ },
+ }
+ else:
+ expected_client_claims = None
+
+ device_claim_id = 'the name of the device’s client claim'
+ device_claim_value = 'the value of the device’s client claim'
+
+ device_claims = [
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ (device_claim_id, claims.CLAIM_TYPE_STRING, [device_claim_value]),
+ ]),
+ ]
+
+ existing_claim_id = 'the name of an existing device claim'
+ existing_claim_value = 'the value of an existing device claim'
+
+ existing_claims = [
+ (claims.CLAIMS_SOURCE_TYPE_CERTIFICATE, [
+ (existing_claim_id, claims.CLAIM_TYPE_STRING, [existing_claim_value]),
+ ]),
+ ]
+
+ if rodc_issued:
+ expected_device_claims = None
+ elif existing_device_info and existing_device_claims:
+ expected_device_claims = {
+ existing_claim_id: {
+ 'source_type': claims.CLAIMS_SOURCE_TYPE_CERTIFICATE,
+ 'type': claims.CLAIM_TYPE_STRING,
+ 'values': (existing_claim_value,),
+ },
+ }
+ elif compound_id_support and not existing_device_info and not existing_device_claims:
+ expected_device_claims = {
+ device_claim_id: {
+ 'source_type': claims.CLAIMS_SOURCE_TYPE_AD,
+ 'type': claims.CLAIM_TYPE_STRING,
+ 'values': (device_claim_value,),
+ },
+ }
+ else:
+ expected_device_claims = None
+
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'id': 'device'})
+ mach_tgt = self.get_tgt(mach_creds)
+
+ client_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ # This to ensure we have EXTRA_SIDS set already, as
+ # windows won't set that flag otherwise when adding one
+ # more
+ ('S-1-2-3-4', SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ device_sid_0 = 'S-1-3-4-5'
+ device_sid_1 = 'S-1-4-5-6'
+
+ policy_sids = {
+ 'device_0': device_sid_0,
+ 'device_1': device_sid_1,
+ }
+
+ device_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (device_sid_0, SidType.EXTRA_SID, self.resource_attrs),
+ (device_sid_1, SidType.EXTRA_SID, self.resource_attrs),
+ }
+
+ if device_claims_valid:
+ device_sids.add((security.SID_CLAIMS_VALID, SidType.EXTRA_SID, self.default_attrs))
+
+ checksum_key = self.get_krbtgt_checksum_key()
+
+ # Modify the machine account’s TGT to contain only the SID of the
+ # machine account’s primary group.
+ mach_tgt = self.modified_ticket(
+ mach_tgt,
+ modify_pac_fn=[
+ partial(self.set_pac_sids,
+ new_sids=device_sids),
+ partial(self.set_pac_claims, client_claims=device_claims),
+ ],
+ checksum_keys=checksum_key)
+
+ # Create a user account.
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER,
+ opts={
+ 'allowed_replication_mock': rodc_issued,
+ 'revealed_to_mock_rodc': rodc_issued,
+ })
+ client_tgt = self.get_tgt(client_creds)
+
+ client_modify_pac_fns = [
+ partial(self.set_pac_sids,
+ new_sids=client_sids),
+ partial(self.set_pac_claims, client_claims=client_claims),
+ ]
+
+ if existing_device_claims:
+ client_modify_pac_fns.append(
+ partial(self.set_pac_claims, device_claims=existing_claims))
+ if existing_device_info:
+ # These are different from the SIDs in the device’s TGT.
+ existing_sid_0 = 'S-1-7-8-9'
+ existing_sid_1 = 'S-1-9-8-7'
+
+ policy_sids.update({
+ 'existing_0': existing_sid_0,
+ 'existing_1': existing_sid_1,
+ })
+
+ existing_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (existing_sid_0, SidType.EXTRA_SID, self.resource_attrs),
+ (existing_sid_1, SidType.EXTRA_SID, self.resource_attrs),
+ }
+
+ client_modify_pac_fns.append(partial(
+ self.set_pac_device_sids, new_sids=existing_sids, user_rid=mach_creds.get_rid()))
+
+ if rodc_issued:
+ rodc_krbtgt_creds = self.get_mock_rodc_krbtgt_creds()
+ rodc_krbtgt_key = self.TicketDecryptionKey_from_creds(rodc_krbtgt_creds)
+ rodc_checksum_key = {
+ krb5pac.PAC_TYPE_KDC_CHECKSUM: rodc_krbtgt_key,
+ }
+
+ # Modify the client’s TGT to contain only the SID of the client’s
+ # primary group.
+ client_tgt = self.modified_ticket(
+ client_tgt,
+ modify_pac_fn=client_modify_pac_fns,
+ new_ticket_key=rodc_krbtgt_key if rodc_issued else None,
+ checksum_keys=rodc_checksum_key if rodc_issued else checksum_key)
+
+ if target_policy is None:
+ policy = None
+ assigned_policy = None
+ else:
+ policy = self.create_authn_policy(
+ enforced=True,
+ computer_allowed_to=target_policy.format_map(policy_sids))
+ assigned_policy = str(policy.dn)
+
+ target_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={
+ 'supported_enctypes':
+ security.KERB_ENCTYPE_RC4_HMAC_MD5
+ | security.KERB_ENCTYPE_AES256_CTS_HMAC_SHA1_96,
+ # Indicate that Compound Identity is supported.
+ 'compound_id_support': compound_id_support,
+ 'assigned_policy': assigned_policy,
+ })
+
+ expected_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ # The client’s groups are not to include the Asserted Identity and
+ # Claims Valid SIDs.
+ }
+ if rodc_issued:
+ expected_sids.add((security.SID_CLAIMS_VALID, SidType.EXTRA_SID, self.default_attrs))
+ else:
+ expected_sids.add(('S-1-2-3-4', SidType.EXTRA_SID, self.default_attrs))
+
+ if rodc_issued:
+ expected_device_sids = None
+ elif existing_device_info:
+ expected_device_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ ('S-1-7-8-9', SidType.EXTRA_SID, self.resource_attrs),
+ ('S-1-9-8-7', SidType.EXTRA_SID, self.resource_attrs),
+ }
+ elif compound_id_support and not existing_device_claims:
+ expected_sids.add((security.SID_COMPOUNDED_AUTHENTICATION, SidType.EXTRA_SID, self.default_attrs))
+
+ expected_device_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ ('S-1-3-4-5', SidType.EXTRA_SID, self.resource_attrs),
+ ('S-1-4-5-6', SidType.EXTRA_SID, self.resource_attrs),
+ }
+
+ if device_claims_valid:
+ expected_device_sids.add(frozenset([(security.SID_CLAIMS_VALID, SidType.RESOURCE_SID, self.default_attrs)]))
+ else:
+ expected_device_sids = None
+
+ samdb = self.get_samdb()
+ domain_sid_str = samdb.get_domain_sid()
+
+ expected_sids = self.map_sids(expected_sids, None, domain_sid_str)
+ # The device SIDs will be put into the PAC unmodified.
+ expected_device_sids = self.map_sids(expected_device_sids, None, domain_sid_str)
+
+ # Show that authorization succeeds.
+ self._tgs_req(client_tgt, 0, client_creds, target_creds, armor_tgt=mach_tgt,
+ expected_groups=expected_sids,
+ expect_device_info=bool(expected_device_sids),
+ expected_device_domain_sid=domain_sid_str,
+ expected_device_groups=expected_device_sids,
+ expect_client_claims=True,
+ expected_client_claims=expected_client_claims,
+ expect_device_claims=bool(expected_device_claims),
+ expected_device_claims=expected_device_claims)
+
+ self.check_tgs_log(client_creds, target_creds, policy=policy)
+
+ def test_pac_extra_sids_behaviour(self):
+ """Test the groups of the client and the device after performing a
+ FAST‐armored TGS‐REQ.
+ """
+
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'id': 'device'})
+ mach_tgt = self.get_tgt(mach_creds)
+
+ client_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ }
+
+ # Create a user account.
+ client_creds = self._get_creds(account_type=self.AccountType.USER)
+ client_tgt = self.get_tgt(client_creds)
+
+ # Modify the client’s TGT to contain only the SID of the client’s
+ # primary group.
+ client_tgt = self.modified_ticket(
+ client_tgt,
+ modify_pac_fn=partial(self.set_pac_sids,
+ new_sids=client_sids),
+ checksum_keys=self.get_krbtgt_checksum_key())
+
+ # Indicate that Compound Identity is supported.
+ target_creds, _ = self.get_target(to_krbtgt=False, compound_id=True)
+
+ expected_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_COMPOUNDED_AUTHENTICATION, SidType.EXTRA_SID, self.default_attrs)
+ # The client’s groups are not to include the Asserted Identity and
+ # Claims Valid SIDs.
+ }
+
+ samdb = self.get_samdb()
+ domain_sid_str = samdb.get_domain_sid()
+
+ expected_sids = self.map_sids(expected_sids, None, domain_sid_str)
+
+ # Show that authorization succeeds.
+ self._tgs_req(client_tgt, 0, client_creds, target_creds, armor_tgt=mach_tgt,
+ expected_groups=expected_sids)
+
+ self.check_tgs_log(client_creds, target_creds)
+
+ def test_pac_claims_not_present(self):
+ """Test that authentication fails if the device does not have a
+ required claim.
+ """
+
+ claim_id = 'the name of the claim'
+ claim_value = 'the value of the claim'
+
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'id': 'device'})
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create an authentication policy that requires the device to have a
+ # certain claim.
+ target_policy_sddl = self.allow_if(
+ f'@User.{escaped_claim_id(claim_id)} == "{claim_value}"')
+ target_policy = self.create_authn_policy(
+ enforced=True, computer_allowed_to=target_policy_sddl)
+
+ # Create a user account.
+ client_creds = self._get_creds(account_type=self.AccountType.USER)
+ client_tgt = self.get_tgt(client_creds)
+
+ # Create a target account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=target_policy)
+
+ # Show that authorization fails.
+ self._tgs_req(
+ client_tgt, KDC_ERR_POLICY, client_creds, target_creds,
+ armor_tgt=mach_tgt,
+ expect_edata=self.expect_padata_outer,
+ # We aren’t particular about whether or not we get an NTSTATUS.
+ expect_status=None,
+ expected_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED)
+
+ self.check_tgs_log(
+ client_creds,
+ target_creds,
+ policy=target_policy,
+ status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED)
+
+ def test_pac_claims_present(self):
+ """Test that authentication succeeds if the user has a required
+ claim.
+ """
+
+ claim_id = 'the name of the claim'
+ claim_value = 'the value of the claim'
+
+ pac_claims = [
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ (claim_id, claims.CLAIM_TYPE_STRING, [claim_value]),
+ ]),
+ ]
+
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'id': 'device'})
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create an authentication policy that requires the user to have a
+ # certain claim.
+ target_policy_sddl = self.allow_if(
+ f'@User.{escaped_claim_id(claim_id)} == "{claim_value}"')
+ target_policy = self.create_authn_policy(
+ enforced=True, computer_allowed_to=target_policy_sddl)
+
+ # Create a user account.
+ client_creds = self._get_creds(account_type=self.AccountType.USER)
+ client_tgt = self.get_tgt(client_creds)
+
+ # Add the required claim to the client’s TGT.
+ client_tgt = self.modified_ticket(
+ client_tgt,
+ modify_pac_fn=partial(self.set_pac_claims,
+ client_claims=pac_claims),
+ checksum_keys=self.get_krbtgt_checksum_key())
+
+ # Create a target account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=target_policy)
+
+ # Show that authorization succeeds.
+ self._tgs_req(client_tgt, 0, client_creds, target_creds, armor_tgt=mach_tgt)
+
+ self.check_tgs_log(client_creds, target_creds,
+ policy=target_policy)
+
+ def test_pac_claims_invalid(self):
+ """Test that authentication fails if the device’s required claim is not
+ valid.
+ """
+
+ claim_id = 'the name of the claim'
+ claim_value = 'the value of the claim'
+
+ pac_claims = [
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ (claim_id, claims.CLAIM_TYPE_STRING, [claim_value]),
+ ]),
+ ]
+
+ # The device’s SIDs do not include the Claims Valid SID.
+ device_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ }
+
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'id': 'device'})
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create an authentication policy that requires the device to have a
+ # certain claim.
+ target_policy_sddl = self.allow_if(
+ f'@User.{escaped_claim_id(claim_id)} == "{claim_value}"')
+ target_policy = self.create_authn_policy(
+ enforced=True, computer_allowed_to=target_policy_sddl)
+
+ # Create a user account.
+ client_creds = self._get_creds(account_type=self.AccountType.USER)
+ client_tgt = self.get_tgt(client_creds)
+
+ # Add the SIDs and the required claim to the client’s TGT.
+ client_tgt = self.modified_ticket(
+ client_tgt,
+ modify_pac_fn=[
+ partial(self.set_pac_claims, client_claims=pac_claims),
+ partial(self.set_pac_sids, new_sids=device_sids)],
+ checksum_keys=self.get_krbtgt_checksum_key())
+
+ # Create a target account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=target_policy)
+
+ # Show that authorization fails.
+ self._tgs_req(
+ client_tgt, KDC_ERR_POLICY, client_creds, target_creds,
+ armor_tgt=mach_tgt,
+ expect_edata=self.expect_padata_outer,
+ # We aren’t particular about whether or not we get an NTSTATUS.
+ expect_status=None,
+ expected_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED)
+
+ self.check_tgs_log(
+ client_creds,
+ target_creds,
+ policy=target_policy,
+ status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED)
+
+ def test_pac_device_claims_not_present(self):
+ """Test that authorization fails if the device does not have a
+ required claim.
+ """
+
+ claim_id = 'the name of the claim'
+ claim_value = 'the value of the claim'
+
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'id': 'device'})
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create an authentication policy that requires the device to have a
+ # certain device claim.
+ target_policy_sddl = self.allow_if(
+ f'@Device.{escaped_claim_id(claim_id)} == "{claim_value}"')
+ target_policy = self.create_authn_policy(
+ enforced=True, computer_allowed_to=target_policy_sddl)
+
+ # Create a user account.
+ client_creds = self._get_creds(account_type=self.AccountType.USER)
+ client_tgt = self.get_tgt(client_creds)
+
+ # Create a target account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=target_policy)
+
+ # Show that authorization fails.
+ self._tgs_req(
+ client_tgt, KDC_ERR_POLICY, client_creds, target_creds,
+ armor_tgt=mach_tgt,
+ expect_edata=self.expect_padata_outer,
+ # We aren’t particular about whether or not we get an NTSTATUS.
+ expect_status=None,
+ expected_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED)
+
+ self.check_tgs_log(
+ client_creds,
+ target_creds,
+ policy=target_policy,
+ status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED)
+
+ def test_pac_device_claims_present(self):
+ """Test that authorization succeeds if the device has a required claim.
+ """
+
+ claim_id = 'the name of the claim'
+ claim_value = 'the value of the claim'
+
+ pac_claims = [
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ (claim_id, claims.CLAIM_TYPE_STRING, [claim_value]),
+ ]),
+ ]
+
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'id': 'device'})
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Add the required claim to the machine account’s TGT.
+ mach_tgt = self.modified_ticket(
+ mach_tgt,
+ modify_pac_fn=partial(self.set_pac_claims,
+ client_claims=pac_claims),
+ checksum_keys=self.get_krbtgt_checksum_key())
+
+ # Create an authentication policy that requires the device to have a
+ # certain device claim.
+ target_policy_sddl = self.allow_if(
+ f'@Device.{escaped_claim_id(claim_id)} == "{claim_value}"')
+ target_policy = self.create_authn_policy(
+ enforced=True, computer_allowed_to=target_policy_sddl)
+
+ # Create a user account.
+ client_creds = self._get_creds(account_type=self.AccountType.USER)
+ client_tgt = self.get_tgt(client_creds)
+
+ # Create a target account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=target_policy)
+
+ # Show that authorization succeeds.
+ self._tgs_req(client_tgt, 0, client_creds, target_creds, armor_tgt=mach_tgt)
+
+ self.check_tgs_log(client_creds, target_creds,
+ policy=target_policy)
+
+ def test_pac_device_claims_invalid(self):
+ """Test that authorization fails if the device’s required claim is not
+ valid.
+ """
+
+ claim_id = 'the name of the claim'
+ claim_value = 'the value of the claim'
+
+ pac_claims = [
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ (claim_id, claims.CLAIM_TYPE_STRING, [claim_value]),
+ ]),
+ ]
+
+ # The device’s SIDs do not include the Claims Valid SID.
+ device_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ }
+
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'id': 'device'})
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Add the SIDs and the required claim to the machine account’s TGT.
+ mach_tgt = self.modified_ticket(
+ mach_tgt,
+ modify_pac_fn=[
+ partial(self.set_pac_claims, client_claims=pac_claims),
+ partial(self.set_pac_sids, new_sids=device_sids)],
+ checksum_keys=self.get_krbtgt_checksum_key())
+
+ # Create an authentication policy that requires the device to have a
+ # certain claim.
+ target_policy_sddl = self.allow_if(
+ f'@Device.{escaped_claim_id(claim_id)} == "{claim_value}"')
+ target_policy = self.create_authn_policy(
+ enforced=True, computer_allowed_to=target_policy_sddl)
+
+ # Create a user account.
+ client_creds = self._get_creds(account_type=self.AccountType.USER)
+ client_tgt = self.get_tgt(client_creds)
+
+ # Create a target account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=target_policy)
+
+ # Show that authorization fails.
+ self._tgs_req(
+ client_tgt, KDC_ERR_POLICY, client_creds, target_creds,
+ armor_tgt=mach_tgt,
+ expect_edata=self.expect_padata_outer,
+ # We aren’t particular about whether or not we get an NTSTATUS.
+ expect_status=None,
+ expected_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED)
+
+ self.check_tgs_log(
+ client_creds,
+ target_creds,
+ policy=target_policy,
+ status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.KERBEROS_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED)
+
+ def test_pac_device_claims_invalid_no_attrs(self):
+ """Test that authorization fails if the device’s required claim is not
+ valid.
+ """
+
+ claim_id = 'the name of the claim'
+ claim_value = 'the value of the claim'
+
+ pac_claims = [
+ (claims.CLAIMS_SOURCE_TYPE_AD, [
+ (claim_id, claims.CLAIM_TYPE_STRING, [claim_value]),
+ ]),
+ ]
+
+ device_sids = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ # The device’s SIDs include the Claims Valid SID, but it has no
+ # attributes.
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, 0),
+ }
+
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'id': 'device'})
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Add the SIDs and the required claim to the machine account’s TGT.
+ mach_tgt = self.modified_ticket(
+ mach_tgt,
+ modify_pac_fn=[
+ partial(self.set_pac_claims, client_claims=pac_claims),
+ partial(self.set_pac_sids, new_sids=device_sids)],
+ checksum_keys=self.get_krbtgt_checksum_key())
+
+ # Create an authentication policy that requires the device to have a
+ # certain claim.
+ target_policy_sddl = self.allow_if(
+ f'@Device.{escaped_claim_id(claim_id)} == "{claim_value}"')
+ target_policy = self.create_authn_policy(
+ enforced=True, computer_allowed_to=target_policy_sddl)
+
+ # Create a user account.
+ client_creds = self._get_creds(account_type=self.AccountType.USER)
+ client_tgt = self.get_tgt(client_creds)
+
+ # Create a target account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=target_policy)
+
+ # Show that authorization succeeds.
+ self._tgs_req(client_tgt, 0, client_creds, target_creds, armor_tgt=mach_tgt)
+
+ self.check_tgs_log(client_creds, target_creds,
+ policy=target_policy)
+
+ def test_simple_as_req_client_and_target_policy(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create an authentication policy that explicitly allows the machine
+ # account for a user.
+ client_policy_sddl = f'O:SYD:(XA;;CR;;;{mach_creds.get_sid()};(Member_of {{SID({mach_creds.get_sid()}), SID({mach_creds.get_sid()})}}))'
+ client_policy = self.create_authn_policy(enforced=True,
+ user_allowed_from=client_policy_sddl)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=client_policy)
+
+ # Create an authentication policy that applies to a computer and
+ # explicitly allows the user account to obtain a service ticket.
+ target_policy_sddl = f'O:SYD:(XA;;CR;;;{client_creds.get_sid()};(Member_of SID({client_creds.get_sid()})))'
+ target_policy = self.create_authn_policy(enforced=True,
+ computer_allowed_to=target_policy_sddl)
+
+ # Create a computer account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=target_policy)
+
+ expected_groups = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_AUTHENTICATION_AUTHORITY_ASSERTED_IDENTITY, SidType.EXTRA_SID, self.default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ # Show that obtaining a service ticket with an AS‐REQ is allowed.
+ self._armored_as_req(client_creds,
+ target_creds,
+ mach_tgt,
+ expected_groups=expected_groups)
+
+ self.check_as_log(client_creds,
+ armor_creds=mach_creds,
+ client_policy=client_policy,
+ server_policy=target_policy)
+
+ def test_device_in_world_group(self):
+ self._check_device_in_group(security.SID_WORLD)
+
+ def test_device_in_network_group(self):
+ self._check_device_not_in_group(security.SID_NT_NETWORK)
+
+ def test_device_in_authenticated_users(self):
+ self._check_device_in_group(security.SID_NT_AUTHENTICATED_USERS)
+
+ def test_device_in_aa_asserted_identity(self):
+ self._check_device_in_group(
+ security.SID_AUTHENTICATION_AUTHORITY_ASSERTED_IDENTITY)
+
+ def test_device_in_service_asserted_identity(self):
+ self._check_device_not_in_group(security.SID_SERVICE_ASSERTED_IDENTITY)
+
+ def test_device_in_compounded_authentication(self):
+ self._check_device_not_in_group(security.SID_COMPOUNDED_AUTHENTICATION)
+
+ def test_device_in_claims_valid(self):
+ self._check_device_in_group(security.SID_CLAIMS_VALID)
+
+ def _check_device_in_group(self, group):
+ self._check_device_membership(group, expect_in_group=True)
+
+ def _check_device_not_in_group(self, group):
+ self._check_device_membership(group, expect_in_group=False)
+
+ def _check_device_membership(self, group, *, expect_in_group):
+ """Test that authentication succeeds or fails when the device is
+ required to belong to a certain group.
+ """
+
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'id': 'device'})
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create an authentication policy that requires the device to belong to
+ # a certain group.
+ in_group_sddl = self.allow_if(f'Device_Member_of {{SID({group})}}')
+ in_group_policy = self.create_authn_policy(
+ enforced=True, computer_allowed_to=in_group_sddl)
+
+ # Create a user account.
+ client_creds = self._get_creds(account_type=self.AccountType.USER)
+ client_tgt = self.get_tgt(client_creds)
+
+ # Create a target account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=in_group_policy)
+
+ tgs_success_args = {}
+ tgs_failure_args = {
+ 'expect_edata': self.expect_padata_outer,
+ # We aren’t particular about whether or not we get an NTSTATUS.
+ 'expect_status': None,
+ 'expected_status': ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ }
+
+ # Test whether authorization succeeds or fails.
+ self._tgs_req(client_tgt,
+ 0 if expect_in_group else KDC_ERR_POLICY,
+ client_creds,
+ target_creds,
+ armor_tgt=mach_tgt,
+ **(tgs_success_args if expect_in_group
+ else tgs_failure_args))
+
+ policy_success_args = {}
+ policy_failure_args = {
+ 'status': ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ 'event': AuditEvent.KERBEROS_SERVER_RESTRICTION,
+ 'reason': AuditReason.ACCESS_DENIED,
+ }
+
+ self.check_tgs_log(client_creds, target_creds,
+ policy=in_group_policy,
+ **(policy_success_args if expect_in_group
+ else policy_failure_args))
+
+ # Create an authentication policy that requires the device not to belong
+ # to the group.
+ not_in_group_sddl = self.allow_if(
+ f'Not_Device_Member_of {{SID({group})}}')
+ not_in_group_policy = self.create_authn_policy(
+ enforced=True, computer_allowed_to=not_in_group_sddl)
+
+ # Create a target account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=not_in_group_policy)
+
+ # Test whether authorization succeeds or fails.
+ self._tgs_req(client_tgt,
+ KDC_ERR_POLICY if expect_in_group else 0,
+ client_creds,
+ target_creds,
+ armor_tgt=mach_tgt,
+ **(tgs_failure_args if expect_in_group
+ else tgs_success_args))
+
+ self.check_tgs_log(client_creds, target_creds,
+ policy=not_in_group_policy,
+ **(policy_failure_args if expect_in_group
+ else policy_success_args))
+
+ def test_simple_as_req_client_policy_only(self):
+ # Create a machine account with which to perform FAST.
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER)
+ mach_tgt = self.get_tgt(mach_creds)
+
+ # Create an authentication policy that explicitly allows the machine
+ # account for a user.
+ client_policy_sddl = f'O:SYD:(XA;;CR;;;{mach_creds.get_sid()};(Member_of SID({mach_creds.get_sid()})))'
+ client_policy = self.create_authn_policy(enforced=True,
+ user_allowed_from=client_policy_sddl)
+
+ # Create a user account with the assigned policy.
+ client_creds = self._get_creds(account_type=self.AccountType.USER,
+ assigned_policy=client_policy)
+
+ expected_groups = {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, self.default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_AUTHENTICATION_AUTHORITY_ASSERTED_IDENTITY, SidType.EXTRA_SID, self.default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, self.default_attrs),
+ }
+
+ # Show that obtaining a service ticket with an AS‐REQ is allowed.
+ self._armored_as_req(client_creds,
+ self.get_krbtgt_creds(),
+ mach_tgt,
+ expected_groups=expected_groups)
+
+ self.check_as_log(client_creds,
+ armor_creds=mach_creds,
+ client_policy=client_policy)
+
+
+class SamLogonTests(ConditionalAceBaseTests):
+ # These tests show that although conditional ACEs work with SamLogon,
+ # claims do not appear to be used at all.
+
+ def test_samlogon_allowed_to_computer_member_of(self):
+ # Create an authentication policy that applies to a computer and
+ # requires that the account should belong to both groups.
+ allowed = (f'O:SYD:(XA;;CR;;;WD;(Member_of '
+ f'{{SID({self._group0_sid}), SID({self._group1_sid})}}))')
+ policy = self.create_authn_policy(enforced=True,
+ computer_allowed_to=allowed)
+
+ # Create a computer account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=policy)
+
+ # When the account is a member of both groups, network SamLogon
+ # succeeds.
+ self._test_samlogon(creds=self._member_of_both_creds_ntlm,
+ domain_joined_mach_creds=target_creds,
+ logon_type=netlogon.NetlogonNetworkInformation)
+
+ self.check_samlogon_network_log(self._member_of_both_creds_ntlm,
+ server_policy=policy)
+
+ # Interactive SamLogon also succeeds.
+ self._test_samlogon(creds=self._member_of_both_creds_ntlm,
+ domain_joined_mach_creds=target_creds,
+ logon_type=netlogon.NetlogonInteractiveInformation)
+
+ self.check_samlogon_interactive_log(self._member_of_both_creds_ntlm,
+ server_policy=policy)
+
+ # When the account is a member of neither group, network SamLogon
+ # fails.
+ self._test_samlogon(
+ creds=self._mach_creds_ntlm,
+ domain_joined_mach_creds=target_creds,
+ logon_type=netlogon.NetlogonNetworkInformation,
+ expect_error=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED)
+
+ self.check_samlogon_network_log(
+ self._mach_creds_ntlm,
+ server_policy=policy,
+ server_policy_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.NTLM_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED)
+
+ # Interactive SamLogon also fails.
+ self._test_samlogon(
+ creds=self._mach_creds_ntlm,
+ domain_joined_mach_creds=target_creds,
+ logon_type=netlogon.NetlogonInteractiveInformation,
+ expect_error=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED)
+
+ self.check_samlogon_interactive_log(
+ self._mach_creds_ntlm,
+ server_policy=policy,
+ server_policy_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.NTLM_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED)
+
+ def test_samlogon_allowed_to_service_member_of(self):
+ # Create an authentication policy that applies to a managed service and
+ # requires that the account should belong to both groups.
+ allowed = (f'O:SYD:(XA;;CR;;;WD;(Member_of '
+ f'{{SID({self._group0_sid}), SID({self._group1_sid})}}))')
+ policy = self.create_authn_policy(enforced=True,
+ service_allowed_to=allowed)
+
+ # Create a managed service account with the assigned policy.
+ target_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ assigned_policy=policy)
+
+ # When the account is a member of both groups, network SamLogon
+ # succeeds.
+ self._test_samlogon(creds=self._member_of_both_creds_ntlm,
+ domain_joined_mach_creds=target_creds,
+ logon_type=netlogon.NetlogonNetworkInformation)
+
+ self.check_samlogon_network_log(self._member_of_both_creds_ntlm,
+ server_policy=policy)
+
+ # Interactive SamLogon also succeeds.
+ self._test_samlogon(creds=self._member_of_both_creds_ntlm,
+ domain_joined_mach_creds=target_creds,
+ logon_type=netlogon.NetlogonInteractiveInformation)
+
+ self.check_samlogon_interactive_log(self._member_of_both_creds_ntlm,
+ server_policy=policy)
+
+ # When the account is a member of neither group, network SamLogon
+ # fails.
+ self._test_samlogon(
+ creds=self._mach_creds_ntlm,
+ domain_joined_mach_creds=target_creds,
+ logon_type=netlogon.NetlogonNetworkInformation,
+ expect_error=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED)
+
+ self.check_samlogon_network_log(
+ self._mach_creds_ntlm,
+ server_policy=policy,
+ server_policy_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.NTLM_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED)
+
+ # Interactive SamLogon also fails.
+ self._test_samlogon(
+ creds=self._mach_creds_ntlm,
+ domain_joined_mach_creds=target_creds,
+ logon_type=netlogon.NetlogonInteractiveInformation,
+ expect_error=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED)
+
+ self.check_samlogon_interactive_log(
+ self._mach_creds_ntlm,
+ server_policy=policy,
+ server_policy_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.NTLM_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED)
+
+ def test_samlogon_allowed_to_computer_silo(self):
+ # Create an authentication policy that applies to a computer and
+ # requires that the account belong to the enforced silo.
+ allowed = (f'O:SYD:(XA;;CR;;;WD;'
+ f'(@User.ad://ext/AuthenticationSilo == '
+ f'"{self._enforced_silo}"))')
+ policy = self.create_authn_policy(enforced=True,
+ computer_allowed_to=allowed)
+
+ # Create a computer account with the assigned policy.
+ target_creds = self._get_creds(account_type=self.AccountType.COMPUTER,
+ assigned_policy=policy)
+
+ # Even though the account is a member of the silo, its claims are
+ # ignored, and network SamLogon fails.
+ self._test_samlogon(
+ creds=self._member_of_enforced_silo_ntlm,
+ domain_joined_mach_creds=target_creds,
+ logon_type=netlogon.NetlogonNetworkInformation,
+ expect_error=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED)
+
+ self.check_samlogon_network_log(
+ self._member_of_enforced_silo_ntlm,
+ server_policy=policy,
+ server_policy_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.NTLM_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED)
+
+ # Interactive SamLogon also fails.
+ self._test_samlogon(
+ creds=self._member_of_enforced_silo_ntlm,
+ domain_joined_mach_creds=target_creds,
+ logon_type=netlogon.NetlogonInteractiveInformation,
+ expect_error=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED)
+
+ self.check_samlogon_interactive_log(
+ self._member_of_enforced_silo_ntlm,
+ server_policy=policy,
+ server_policy_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.NTLM_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED)
+
+ def test_samlogon_allowed_to_service_silo(self):
+ # Create an authentication policy that applies to a managed service and
+ # requires that the account belong to the enforced silo.
+ allowed = (f'O:SYD:(XA;;CR;;;WD;'
+ f'(@User.ad://ext/AuthenticationSilo == '
+ f'"{self._enforced_silo}"))')
+ policy = self.create_authn_policy(enforced=True,
+ service_allowed_to=allowed)
+
+ # Create a managed service account with the assigned policy.
+ target_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ assigned_policy=policy)
+
+ # Even though the account is a member of the silo, its claims are
+ # ignored, and network SamLogon fails.
+ self._test_samlogon(
+ creds=self._member_of_enforced_silo_ntlm,
+ domain_joined_mach_creds=target_creds,
+ logon_type=netlogon.NetlogonNetworkInformation,
+ expect_error=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED)
+
+ self.check_samlogon_network_log(
+ self._member_of_enforced_silo_ntlm,
+ server_policy=policy,
+ server_policy_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.NTLM_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED)
+
+ # Interactive SamLogon also fails.
+ self._test_samlogon(
+ creds=self._member_of_enforced_silo_ntlm,
+ domain_joined_mach_creds=target_creds,
+ logon_type=netlogon.NetlogonInteractiveInformation,
+ expect_error=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED)
+
+ self.check_samlogon_interactive_log(
+ self._member_of_enforced_silo_ntlm,
+ server_policy=policy,
+ server_policy_status=ntstatus.NT_STATUS_AUTHENTICATION_FIREWALL_FAILED,
+ event=AuditEvent.NTLM_SERVER_RESTRICTION,
+ reason=AuditReason.ACCESS_DENIED)
+
+
+if __name__ == '__main__':
+ global_asn1_print = False
+ global_hexdump = False
+ import unittest
+ unittest.main()
diff --git a/python/samba/tests/krb5/device_tests.py b/python/samba/tests/krb5/device_tests.py
new file mode 100755
index 0000000..ec2fce6
--- /dev/null
+++ b/python/samba/tests/krb5/device_tests.py
@@ -0,0 +1,2211 @@
+#!/usr/bin/env python3
+# Unix SMB/CIFS implementation.
+# Copyright (C) Stefan Metzmacher 2020
+# Copyright (C) Catalyst.Net Ltd 2022
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+import os
+
+sys.path.insert(0, 'bin/python')
+os.environ['PYTHONUNBUFFERED'] = '1'
+
+import random
+import re
+
+from samba.dcerpc import netlogon, security
+from samba.tests import DynamicTestCase, env_get_var_value
+from samba.tests.krb5 import kcrypto
+from samba.tests.krb5.kdc_base_test import GroupType, KDCBaseTest, Principal
+from samba.tests.krb5.raw_testcase import Krb5EncryptionKey, RawKerberosTest
+from samba.tests.krb5.rfc4120_constants import (
+ AES256_CTS_HMAC_SHA1_96,
+ ARCFOUR_HMAC_MD5,
+ KRB_TGS_REP,
+)
+
+SidType = RawKerberosTest.SidType
+
+global_asn1_print = False
+global_hexdump = False
+
+
+@DynamicTestCase
+class DeviceTests(KDCBaseTest):
+ # Placeholder objects that represent accounts undergoing testing.
+ user = object()
+ mach = object()
+ trust_user = object()
+ trust_mach = object()
+
+ # Constants for group SID attributes.
+ default_attrs = security.SE_GROUP_DEFAULT_FLAGS
+ resource_attrs = default_attrs | security.SE_GROUP_RESOURCE
+
+ asserted_identity = security.SID_AUTHENTICATION_AUTHORITY_ASSERTED_IDENTITY
+ compounded_auth = security.SID_COMPOUNDED_AUTHENTICATION
+
+ user_trust_domain = 'S-1-5-21-123-456-111'
+ mach_trust_domain = 'S-1-5-21-123-456-222'
+
+ def setUp(self):
+ super().setUp()
+ self.do_asn1_print = global_asn1_print
+ self.do_hexdump = global_hexdump
+
+ # Some general information on how Windows handles device info:
+
+ # All the SIDs in the computer's info3.sids end up in device.domain_groups
+ # (if they are in any domain), or in device.sids (if they are not). Even if
+ # netlogon.NETLOGON_EXTRA_SIDS is not set.
+
+ # The remainder of the SIDs in device.domain_groups come from an LDAP
+ # search of the computer's domain-local groups.
+
+ # None of the SIDs in the computer's logon_info.resource_groups.groups go
+ # anywhere. Even if netlogon.NETLOGON_RESOURCE_GROUPS is set.
+
+ # In summary:
+ # info3.base.groups => device.groups
+ # info3.sids => device.sids (if not in a domain)
+ # info3.sids => device.domain_groups (if in a domain)
+ # searched-for domain-local groups => device.domain_groups
+
+ # These searched-for domain-local groups are based on _all_ the groups in
+ # info3.base.groups and info3.sids. So if the account is no longer a member
+ # of a (universal or global) group that belongs to a domain-local group,
+ # but has that universal or global group in info3.base.groups or
+ # info3.sids, then the domain-local group will still get added to the
+ # PAC. But the resource groups don't affect this (presumably, they are
+ # being filtered out). Also, those groups the search is based on do not go
+ # in themselves, even if they are domain-local groups.
+
+ cases = [
+ {
+ # Make a TGS request to the krbtgt.
+ 'test': 'basic to krbtgt',
+ 'as:expected': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'as:mach:expected': {
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ # Indicate this request is to the krbtgt.
+ 'tgs:to_krbtgt': True,
+ 'tgs:expected': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ # Make a TGS request to a service that supports SID compression.
+ 'test': 'device to service compressed',
+ 'as:expected': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'as:mach:expected': {
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:to_krbtgt': False,
+ 'tgs:compression': True,
+ 'tgs:expected': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ # The compounded authentication SID indicates that we used FAST
+ # with a device's TGT.
+ (compounded_auth, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:device:expected': {
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ frozenset([(security.SID_CLAIMS_VALID, SidType.RESOURCE_SID, default_attrs)]),
+ },
+ },
+ {
+ # Make a TGS request to a service that lacks support for SID
+ # compression.
+ 'test': 'device to service uncompressed',
+ 'as:expected': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'as:mach:expected': {
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:to_krbtgt': False,
+ # SID compression is unsupported.
+ 'tgs:compression': False,
+ # There is no change in the reply PAC.
+ 'tgs:expected': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (compounded_auth, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:device:expected': {
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ frozenset([(security.SID_CLAIMS_VALID, SidType.RESOURCE_SID, default_attrs)]),
+ },
+ },
+ {
+ # Make a TGS request to a service that lacks support for compound
+ # identity.
+ 'test': 'device to service no compound id',
+ 'as:expected': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'as:mach:expected': {
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:to_krbtgt': False,
+ # Compound identity is unsupported.
+ 'tgs:compound_id': False,
+ 'tgs:expected': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ # The Compounded Authentication SID should not be present.
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ 'test': 'universal groups to krbtgt',
+ 'groups': {
+ # The user and computer each belong to a couple of universal
+ # groups.
+ 'group0': (GroupType.UNIVERSAL, {'group1'}),
+ 'group1': (GroupType.UNIVERSAL, {user}),
+ 'group2': (GroupType.UNIVERSAL, {'group3'}),
+ 'group3': (GroupType.UNIVERSAL, {mach}),
+ },
+ 'as:expected': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ # The user's groups appear in the PAC of the TGT.
+ ('group0', SidType.BASE_SID, default_attrs),
+ ('group1', SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'as:mach:expected': {
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ # So too for the computer's groups.
+ ('group2', SidType.BASE_SID, default_attrs),
+ ('group3', SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:to_krbtgt': True,
+ 'tgs:expected': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ # The user's groups appear in the TGS reply PAC.
+ ('group0', SidType.BASE_SID, default_attrs),
+ ('group1', SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ 'test': 'universal groups to service',
+ 'groups': {
+ 'group0': (GroupType.UNIVERSAL, {'group1'}),
+ 'group1': (GroupType.UNIVERSAL, {user}),
+ 'group2': (GroupType.UNIVERSAL, {'group3'}),
+ 'group3': (GroupType.UNIVERSAL, {mach}),
+ },
+ 'as:expected': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ ('group0', SidType.BASE_SID, default_attrs),
+ ('group1', SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'as:mach:expected': {
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ ('group2', SidType.BASE_SID, default_attrs),
+ ('group3', SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:to_krbtgt': False,
+ 'tgs:expected': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ ('group0', SidType.BASE_SID, default_attrs),
+ ('group1', SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (compounded_auth, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:device:expected': {
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ # The computer's groups appear in the device info structure of
+ # the TGS reply PAC.
+ ('group2', SidType.BASE_SID, default_attrs),
+ ('group3', SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ frozenset([(security.SID_CLAIMS_VALID, SidType.RESOURCE_SID, default_attrs)]),
+ },
+ },
+ {
+ 'test': 'domain-local groups to krbtgt',
+ 'groups': {
+ # The user and computer each belong to a couple of domain-local
+ # groups.
+ 'group0': (GroupType.DOMAIN_LOCAL, {'group1'}),
+ 'group1': (GroupType.DOMAIN_LOCAL, {user}),
+ 'group2': (GroupType.DOMAIN_LOCAL, {'group3'}),
+ 'group3': (GroupType.DOMAIN_LOCAL, {mach}),
+ },
+ 'as:expected': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ # The user's domain-local group memberships do not appear.
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'as:mach:expected': {
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ # Nor do the computer's.
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:to_krbtgt': True,
+ 'tgs:expected': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ # The user's groups do not appear in the TGS reply PAC.
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ 'test': 'domain-local groups to service compressed',
+ 'groups': {
+ 'group0': (GroupType.DOMAIN_LOCAL, {'group1'}),
+ 'group1': (GroupType.DOMAIN_LOCAL, {user}),
+ 'group2': (GroupType.DOMAIN_LOCAL, {'group3'}),
+ 'group3': (GroupType.DOMAIN_LOCAL, {mach}),
+ },
+ 'as:expected': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'as:mach:expected': {
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:to_krbtgt': False,
+ 'tgs:compression': True,
+ 'tgs:expected': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ # These groups appear as resource SIDs.
+ ('group0', SidType.RESOURCE_SID, resource_attrs),
+ ('group1', SidType.RESOURCE_SID, resource_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (compounded_auth, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:device:expected': {
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ # The computer's groups appear together as resource SIDs.
+ frozenset([
+ ('group2', SidType.RESOURCE_SID, resource_attrs),
+ ('group3', SidType.RESOURCE_SID, resource_attrs),
+ ]),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ frozenset([(security.SID_CLAIMS_VALID, SidType.RESOURCE_SID, default_attrs)]),
+ },
+ },
+ {
+ 'test': 'domain-local groups to service uncompressed',
+ 'groups': {
+ 'group0': (GroupType.DOMAIN_LOCAL, {'group1'}),
+ 'group1': (GroupType.DOMAIN_LOCAL, {user}),
+ 'group2': (GroupType.DOMAIN_LOCAL, {'group3'}),
+ 'group3': (GroupType.DOMAIN_LOCAL, {mach}),
+ },
+ 'as:expected': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'as:mach:expected': {
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:to_krbtgt': False,
+ 'tgs:compression': False,
+ 'tgs:expected': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ # The user's groups now appear as extra SIDs.
+ ('group0', SidType.EXTRA_SID, resource_attrs),
+ ('group1', SidType.EXTRA_SID, resource_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (compounded_auth, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:device:expected': {
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ # The computer's groups are still resource SIDs.
+ frozenset([
+ ('group2', SidType.RESOURCE_SID, resource_attrs),
+ ('group3', SidType.RESOURCE_SID, resource_attrs),
+ ]),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ frozenset([(security.SID_CLAIMS_VALID, SidType.RESOURCE_SID, default_attrs)]),
+ },
+ },
+ # Test what happens if the computer is removed from a group prior to
+ # the TGS request.
+ {
+ 'test': 'remove transitive domain-local groups to krbtgt',
+ 'groups': {
+ # The computer is transitively a member of a couple of
+ # domain-local groups...
+ 'dom-local-outer-0': (GroupType.DOMAIN_LOCAL, {'dom-local-inner'}),
+ 'dom-local-outer-1': (GroupType.DOMAIN_LOCAL, {'universal-inner'}),
+ # ...via another domain-local group and a universal group.
+ 'dom-local-inner': (GroupType.DOMAIN_LOCAL, {mach}),
+ 'universal-inner': (GroupType.UNIVERSAL, {mach}),
+ },
+ # Just prior to the TGS request, the computer is removed from both
+ # inner groups. Domain-local groups will have not been added to the
+ # PAC at this point.
+ 'tgs:mach:removed': {
+ 'dom-local-inner',
+ 'universal-inner',
+ },
+ 'as:mach:expected': {
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ # Only the universal group appears in the PAC.
+ ('universal-inner', SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:to_krbtgt': True,
+ 'tgs:expected': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ 'test': 'remove transitive domain-local groups to service compressed',
+ 'groups': {
+ 'dom-local-outer-0': (GroupType.DOMAIN_LOCAL, {'dom-local-inner'}),
+ 'dom-local-outer-1': (GroupType.DOMAIN_LOCAL, {'universal-inner'}),
+ 'dom-local-inner': (GroupType.DOMAIN_LOCAL, {mach}),
+ 'universal-inner': (GroupType.UNIVERSAL, {mach}),
+ },
+ 'tgs:mach:removed': {
+ 'dom-local-inner',
+ 'universal-inner',
+ },
+ 'as:mach:expected': {
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ ('universal-inner', SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:to_krbtgt': False,
+ 'tgs:compression': True,
+ 'tgs:expected': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (compounded_auth, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:device:expected': {
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ # The universal group appears in the device info...
+ ('universal-inner', SidType.BASE_SID, default_attrs),
+ # ...along with the second domain-local group, even though the
+ # computer no longer belongs to it.
+ frozenset([
+ ('dom-local-outer-1', SidType.RESOURCE_SID, resource_attrs),
+ ]),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ frozenset([(security.SID_CLAIMS_VALID, SidType.RESOURCE_SID, default_attrs)]),
+ },
+ },
+ {
+ 'test': 'remove transitive domain-local groups to service uncompressed',
+ 'groups': {
+ 'dom-local-outer-0': (GroupType.DOMAIN_LOCAL, {'dom-local-inner'}),
+ 'dom-local-outer-1': (GroupType.DOMAIN_LOCAL, {'universal-inner'}),
+ 'dom-local-inner': (GroupType.DOMAIN_LOCAL, {mach}),
+ 'universal-inner': (GroupType.UNIVERSAL, {mach}),
+ },
+ 'tgs:mach:removed': {
+ 'dom-local-inner',
+ 'universal-inner',
+ },
+ 'as:mach:expected': {
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ ('universal-inner', SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:to_krbtgt': False,
+ 'tgs:compression': False,
+ 'tgs:expected': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (compounded_auth, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:device:expected': {
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ ('universal-inner', SidType.BASE_SID, default_attrs),
+ frozenset([
+ ('dom-local-outer-1', SidType.RESOURCE_SID, resource_attrs),
+ ]),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ frozenset([(security.SID_CLAIMS_VALID, SidType.RESOURCE_SID, default_attrs)]),
+ },
+ },
+ # Test what happens if the computer is added to a group prior to the
+ # TGS request.
+ {
+ 'test': 'add transitive domain-local groups to krbtgt',
+ 'groups': {
+ # We create a pair of groups, to be used presently.
+ 'dom-local-outer': (GroupType.DOMAIN_LOCAL, {'universal-inner'}),
+ 'universal-inner': (GroupType.UNIVERSAL, {}),
+ },
+ # Just prior to the TGS request, the computer is added to the inner
+ # group.
+ 'tgs:mach:added': {
+ 'universal-inner',
+ },
+ 'as:mach:expected': {
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:to_krbtgt': True,
+ 'tgs:expected': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ 'test': 'add transitive domain-local groups to service compressed',
+ 'groups': {
+ 'dom-local-outer': (GroupType.DOMAIN_LOCAL, {'universal-inner'}),
+ 'universal-inner': (GroupType.UNIVERSAL, {}),
+ },
+ 'tgs:mach:added': {
+ 'universal-inner',
+ },
+ 'as:mach:expected': {
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:to_krbtgt': False,
+ 'tgs:compression': True,
+ 'tgs:expected': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (compounded_auth, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:device:expected': {
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ # The computer was not a member of the universal group at the
+ # time of obtaining a TGT, and said group did not make it into
+ # the PAC. Group expansion is only concerned with domain-local
+ # groups, none of which the machine currently belongs
+ # to. Therefore, neither group is present in the device info
+ # structure.
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ frozenset([(security.SID_CLAIMS_VALID, SidType.RESOURCE_SID, default_attrs)]),
+ },
+ },
+ {
+ 'test': 'add transitive domain-local groups to service uncompressed',
+ 'groups': {
+ 'dom-local-outer': (GroupType.DOMAIN_LOCAL, {'universal-inner'}),
+ 'universal-inner': (GroupType.UNIVERSAL, {}),
+ },
+ 'tgs:mach:added': {
+ 'universal-inner',
+ },
+ 'as:mach:expected': {
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:to_krbtgt': False,
+ 'tgs:compression': False,
+ 'tgs:expected': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (compounded_auth, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:device:expected': {
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ frozenset([(security.SID_CLAIMS_VALID, SidType.RESOURCE_SID, default_attrs)]),
+ },
+ },
+ # Simulate a machine ticket coming in over a trust.
+ {
+ 'test': 'from trust domain-local groups to service compressed',
+ 'groups': {
+ # The machine belongs to a couple of domain-local groups in our
+ # domain.
+ 'foo': (GroupType.DOMAIN_LOCAL, {trust_mach}),
+ 'bar': (GroupType.DOMAIN_LOCAL, {'foo'}),
+ },
+ 'tgs:to_krbtgt': False,
+ 'tgs:compression': True,
+ # The machine SID is from a different domain.
+ 'tgs:mach_sid': trust_mach,
+ 'tgs:mach:sids': {
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ # This dummy resource SID comes from the trusted domain.
+ (f'{mach_trust_domain}-333', SidType.RESOURCE_SID, resource_attrs),
+ },
+ 'tgs:expected': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (compounded_auth, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:device:expected': {
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ # The domain-local groups end up in the device info.
+ frozenset([
+ ('foo', SidType.RESOURCE_SID, resource_attrs),
+ ('bar', SidType.RESOURCE_SID, resource_attrs),
+ ]),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ frozenset([(security.SID_CLAIMS_VALID, SidType.RESOURCE_SID, default_attrs)]),
+ },
+ },
+ {
+ 'test': 'from trust domain-local groups to service uncompressed',
+ 'groups': {
+ 'foo': (GroupType.DOMAIN_LOCAL, {trust_mach}),
+ 'bar': (GroupType.DOMAIN_LOCAL, {'foo'}),
+ },
+ 'tgs:to_krbtgt': False,
+ 'tgs:compression': False,
+ 'tgs:mach_sid': trust_mach,
+ 'tgs:mach:sids': {
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ (f'{mach_trust_domain}-333', SidType.RESOURCE_SID, resource_attrs),
+ },
+ 'tgs:expected': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (compounded_auth, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:device:expected': {
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ frozenset([
+ ('foo', SidType.RESOURCE_SID, resource_attrs),
+ ('bar', SidType.RESOURCE_SID, resource_attrs),
+ ]),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ frozenset([(security.SID_CLAIMS_VALID, SidType.RESOURCE_SID, default_attrs)]),
+ },
+ },
+ # Simulate the user ticket coming in over a trust.
+ {
+ 'test': 'user from trust domain-local groups to krbtgt',
+ 'groups': {
+ # The user belongs to a couple of domain-local groups in our
+ # domain.
+ 'group0': (GroupType.DOMAIN_LOCAL, {trust_user}),
+ 'group1': (GroupType.DOMAIN_LOCAL, {'group0'}),
+ },
+ 'tgs:to_krbtgt': True,
+ # Both SIDs are from a different domain.
+ 'tgs:user_sid': trust_user,
+ 'tgs:user:sids': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ # This dummy resource SID comes from the trusted domain.
+ (f'{mach_trust_domain}-333', SidType.RESOURCE_SID, resource_attrs),
+ },
+ 'tgs:expected': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ # The dummy resource SID remains in the PAC.
+ (f'{mach_trust_domain}-333', SidType.RESOURCE_SID, resource_attrs),
+ },
+ },
+ {
+ 'test': 'user from trust domain-local groups to service compressed',
+ 'groups': {
+ 'group0': (GroupType.DOMAIN_LOCAL, {trust_user}),
+ 'group1': (GroupType.DOMAIN_LOCAL, {'group0'}),
+ },
+ 'tgs:to_krbtgt': False,
+ 'tgs:compression': True,
+ 'tgs:user_sid': trust_user,
+ 'tgs:user:sids': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ # This dummy resource SID comes from the trusted domain.
+ (f'{mach_trust_domain}-333', SidType.RESOURCE_SID, resource_attrs),
+ },
+ 'tgs:expected': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (compounded_auth, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ ('group0', SidType.RESOURCE_SID, resource_attrs),
+ ('group1', SidType.RESOURCE_SID, resource_attrs),
+ },
+ 'tgs:device:expected': {
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ frozenset([(security.SID_CLAIMS_VALID, SidType.RESOURCE_SID, default_attrs)]),
+ },
+ },
+ {
+ 'test': 'user from trust domain-local groups to service uncompressed',
+ 'groups': {
+ 'group0': (GroupType.DOMAIN_LOCAL, {trust_user}),
+ 'group1': (GroupType.DOMAIN_LOCAL, {'group0'}),
+ },
+ 'tgs:to_krbtgt': False,
+ 'tgs:compression': False,
+ 'tgs:user_sid': trust_user,
+ 'tgs:user:sids': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ # This dummy resource SID comes from the trusted domain.
+ (f'{mach_trust_domain}-333', SidType.RESOURCE_SID, resource_attrs),
+ },
+ 'tgs:expected': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (compounded_auth, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ ('group0', SidType.EXTRA_SID, resource_attrs),
+ ('group1', SidType.EXTRA_SID, resource_attrs),
+ },
+ 'tgs:device:expected': {
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ frozenset([(security.SID_CLAIMS_VALID, SidType.RESOURCE_SID, default_attrs)]),
+ },
+ },
+ # Simulate both tickets coming in over a trust.
+ {
+ 'test': 'both from trust domain-local groups to krbtgt',
+ 'groups': {
+ # The user and machine each belong to a couple of domain-local
+ # groups in our domain.
+ 'group0': (GroupType.DOMAIN_LOCAL, {trust_user}),
+ 'group1': (GroupType.DOMAIN_LOCAL, {'group0'}),
+ 'group2': (GroupType.DOMAIN_LOCAL, {trust_mach}),
+ 'group3': (GroupType.DOMAIN_LOCAL, {'group2'}),
+ },
+ 'tgs:to_krbtgt': True,
+ # Both SIDs are from a different domain.
+ 'tgs:user_sid': trust_user,
+ 'tgs:mach_sid': trust_mach,
+ 'tgs:user:sids': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ # This dummy resource SID comes from the trusted domain.
+ (f'{mach_trust_domain}-333', SidType.RESOURCE_SID, resource_attrs),
+ },
+ 'tgs:mach:sids': {
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ # This dummy resource SID comes from the trusted domain.
+ (f'{mach_trust_domain}-444', SidType.RESOURCE_SID, resource_attrs),
+ },
+ 'tgs:expected': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ # The dummy resource SID remains in the PAC.
+ (f'{mach_trust_domain}-333', SidType.RESOURCE_SID, resource_attrs),
+ },
+ },
+ {
+ 'test': 'both from trust domain-local groups to service compressed',
+ 'groups': {
+ # The machine belongs to a couple of domain-local groups in our
+ # domain.
+ 'group0': (GroupType.DOMAIN_LOCAL, {trust_user}),
+ 'group1': (GroupType.DOMAIN_LOCAL, {'group0'}),
+ 'group2': (GroupType.DOMAIN_LOCAL, {trust_mach}),
+ 'group3': (GroupType.DOMAIN_LOCAL, {'group2'}),
+ },
+ 'tgs:to_krbtgt': False,
+ 'tgs:compression': True,
+ 'tgs:user_sid': trust_user,
+ 'tgs:mach_sid': trust_mach,
+ 'tgs:user:sids': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ # This dummy resource SID comes from the trusted domain.
+ (f'{mach_trust_domain}-333', SidType.RESOURCE_SID, resource_attrs),
+ },
+ 'tgs:mach:sids': {
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ # This dummy resource SID comes from the trusted domain.
+ (f'{mach_trust_domain}-444', SidType.RESOURCE_SID, resource_attrs),
+ },
+ 'tgs:expected': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (compounded_auth, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ ('group0', SidType.RESOURCE_SID, resource_attrs),
+ ('group1', SidType.RESOURCE_SID, resource_attrs),
+ },
+ 'tgs:device:expected': {
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ # The domain-local groups end up in the device info.
+ frozenset([
+ ('group2', SidType.RESOURCE_SID, resource_attrs),
+ ('group3', SidType.RESOURCE_SID, resource_attrs),
+ ]),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ frozenset([(security.SID_CLAIMS_VALID, SidType.RESOURCE_SID, default_attrs)]),
+ },
+ },
+ {
+ 'test': 'both from trust domain-local groups to service uncompressed',
+ 'groups': {
+ 'group0': (GroupType.DOMAIN_LOCAL, {trust_user}),
+ 'group1': (GroupType.DOMAIN_LOCAL, {'group0'}),
+ 'group2': (GroupType.DOMAIN_LOCAL, {trust_mach}),
+ 'group3': (GroupType.DOMAIN_LOCAL, {'group2'}),
+ },
+ 'tgs:to_krbtgt': False,
+ 'tgs:compression': False,
+ 'tgs:user_sid': trust_user,
+ 'tgs:mach_sid': trust_mach,
+ 'tgs:user:sids': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ # This dummy resource SID comes from the trusted domain.
+ (f'{mach_trust_domain}-333', SidType.RESOURCE_SID, resource_attrs),
+ },
+ 'tgs:mach:sids': {
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ # This dummy resource SID comes from the trusted domain.
+ (f'{mach_trust_domain}-444', SidType.RESOURCE_SID, resource_attrs),
+ },
+ 'tgs:expected': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (compounded_auth, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ ('group0', SidType.EXTRA_SID, resource_attrs),
+ ('group1', SidType.EXTRA_SID, resource_attrs),
+ },
+ 'tgs:device:expected': {
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ frozenset([
+ ('group2', SidType.RESOURCE_SID, resource_attrs),
+ ('group3', SidType.RESOURCE_SID, resource_attrs),
+ ]),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ frozenset([(security.SID_CLAIMS_VALID, SidType.RESOURCE_SID, default_attrs)]),
+ },
+ },
+ # Test how resource SIDs are propagated into the device info structure.
+ {
+ 'test': 'mach resource sids',
+ 'tgs:mach:sids': {
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ # Of these SIDs, the Base SIDs and Extra SIDs are all
+ # propagated into the device info structure, regardless of
+ # their attrs, while the Resource SIDs are all dropped.
+ (123, SidType.BASE_SID, default_attrs),
+ (333, SidType.BASE_SID, default_attrs),
+ (333, SidType.BASE_SID, resource_attrs),
+ (1000, SidType.BASE_SID, resource_attrs),
+ (497, SidType.EXTRA_SID, resource_attrs), # the Claims Valid RID.
+ (333, SidType.RESOURCE_SID, default_attrs),
+ (498, SidType.RESOURCE_SID, resource_attrs),
+ (99999, SidType.RESOURCE_SID, default_attrs),
+ (12345678, SidType.RESOURCE_SID, resource_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:to_krbtgt': False,
+ 'tgs:expected': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (compounded_auth, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:device:expected': {
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ (123, SidType.BASE_SID, default_attrs),
+ (333, SidType.BASE_SID, default_attrs),
+ (333, SidType.BASE_SID, resource_attrs),
+ (1000, SidType.BASE_SID, resource_attrs),
+ frozenset({
+ (497, SidType.RESOURCE_SID, resource_attrs),
+ }),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ frozenset([(security.SID_CLAIMS_VALID, SidType.RESOURCE_SID, default_attrs)]),
+ },
+ },
+ # Add a Base SID to the user's PAC, and confirm it is propagated into
+ # the PAC of the service ticket.
+ {
+ 'test': 'base sid to krbtgt',
+ 'tgs:user:sids': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (123, SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:to_krbtgt': True,
+ 'tgs:expected': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (123, SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ 'test': 'base sid to service',
+ 'tgs:user:sids': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (123, SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:to_krbtgt': False,
+ 'tgs:expected': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (123, SidType.BASE_SID, default_attrs),
+ (compounded_auth, SidType.EXTRA_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:device:expected': {
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ frozenset([(security.SID_CLAIMS_VALID, SidType.RESOURCE_SID, default_attrs)]),
+ },
+ },
+ # Add a Base SID with resource attrs to the user's PAC, and confirm it
+ # is propagated into the PAC of the service ticket.
+ {
+ 'test': 'base sid resource attrs to krbtgt',
+ 'tgs:user:sids': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (123, SidType.BASE_SID, resource_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:to_krbtgt': True,
+ 'tgs:expected': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (123, SidType.BASE_SID, resource_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ 'test': 'base sid resource attrs to service',
+ 'tgs:user:sids': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (123, SidType.BASE_SID, resource_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:to_krbtgt': False,
+ 'tgs:expected': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (123, SidType.BASE_SID, resource_attrs),
+ (compounded_auth, SidType.EXTRA_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:device:expected': {
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ frozenset([(security.SID_CLAIMS_VALID, SidType.RESOURCE_SID, default_attrs)]),
+ },
+ },
+ # Add a couple of Extra SIDs to the user's PAC, and confirm they are
+ # propagated into the PAC of the service ticket.
+ {
+ 'test': 'extra sids to krbtgt',
+ 'tgs:user:sids': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ ('S-1-5-2-3-4', SidType.EXTRA_SID, default_attrs),
+ ('S-1-5-2-3-5', SidType.EXTRA_SID, resource_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:to_krbtgt': True,
+ 'tgs:expected': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ ('S-1-5-2-3-4', SidType.EXTRA_SID, default_attrs),
+ ('S-1-5-2-3-5', SidType.EXTRA_SID, resource_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ 'test': 'extra sids to service',
+ 'tgs:user:sids': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ ('S-1-5-2-3-4', SidType.EXTRA_SID, default_attrs),
+ ('S-1-5-2-3-5', SidType.EXTRA_SID, resource_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:to_krbtgt': False,
+ 'tgs:expected': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ ('S-1-5-2-3-4', SidType.EXTRA_SID, default_attrs),
+ ('S-1-5-2-3-5', SidType.EXTRA_SID, resource_attrs),
+ (compounded_auth, SidType.EXTRA_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:device:expected': {
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ frozenset([(security.SID_CLAIMS_VALID, SidType.RESOURCE_SID, default_attrs)]),
+ },
+ },
+ # Test what happens if we remove the CLAIMS_VALID and ASSERTED_IDENTITY
+ # SIDs from either of the PACs, so we can see at what point these SIDs
+ # are added.
+ {
+ 'test': 'removed special sids to krbtgt',
+ 'tgs:user:sids': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ ('S-1-5-2-3-4', SidType.EXTRA_SID, default_attrs),
+ # We don't specify asserted identity or claims valid SIDs for
+ # the user...
+ },
+ 'tgs:mach:sids': {
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ # ...nor for the computer.
+ },
+ 'tgs:to_krbtgt': True,
+ 'tgs:expected': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ ('S-1-5-2-3-4', SidType.EXTRA_SID, default_attrs),
+ # They don't show up in the service ticket.
+ },
+ },
+ {
+ 'test': 'removed special sids to service',
+ 'tgs:user:sids': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ ('S-1-5-2-3-4', SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:mach:sids': {
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ },
+ 'tgs:to_krbtgt': False,
+ 'tgs:expected': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ ('S-1-5-2-3-4', SidType.EXTRA_SID, default_attrs),
+ (compounded_auth, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:device:expected': {
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ # These special SIDs don't show up in the device info either.
+ },
+ },
+ # Test a group being the primary one for the user and machine.
+ {
+ 'test': 'primary universal to krbtgt',
+ 'groups': {
+ 'primary-user': (GroupType.UNIVERSAL, {user}),
+ 'primary-mach': (GroupType.UNIVERSAL, {mach}),
+ },
+ # Set these groups as the account's primary groups.
+ 'primary_group': 'primary-user',
+ 'mach:primary_group': 'primary-mach',
+ 'as:expected': {
+ # They appear in the PAC as normal.
+ ('primary-user', SidType.BASE_SID, default_attrs),
+ ('primary-user', SidType.PRIMARY_GID, None),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'as:mach:expected': {
+ ('primary-mach', SidType.BASE_SID, default_attrs),
+ ('primary-mach', SidType.PRIMARY_GID, None),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:to_krbtgt': True,
+ 'tgs:expected': {
+ ('primary-user', SidType.BASE_SID, default_attrs),
+ ('primary-user', SidType.PRIMARY_GID, None),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ 'test': 'primary universal to service compressed',
+ 'groups': {
+ 'primary-user': (GroupType.UNIVERSAL, {user}),
+ 'primary-mach': (GroupType.UNIVERSAL, {mach}),
+ },
+ 'primary_group': 'primary-user',
+ 'mach:primary_group': 'primary-mach',
+ 'as:expected': {
+ ('primary-user', SidType.BASE_SID, default_attrs),
+ ('primary-user', SidType.PRIMARY_GID, None),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'as:mach:expected': {
+ ('primary-mach', SidType.BASE_SID, default_attrs),
+ ('primary-mach', SidType.PRIMARY_GID, None),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:to_krbtgt': False,
+ 'tgs:compression': True,
+ 'tgs:expected': {
+ ('primary-user', SidType.BASE_SID, default_attrs),
+ ('primary-user', SidType.PRIMARY_GID, None),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (compounded_auth, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:device:expected': {
+ ('primary-mach', SidType.BASE_SID, default_attrs),
+ ('primary-mach', SidType.PRIMARY_GID, None),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ frozenset([(security.SID_CLAIMS_VALID, SidType.RESOURCE_SID, default_attrs)]),
+ },
+ },
+ {
+ 'test': 'primary universal to service uncompressed',
+ 'groups': {
+ 'primary-user': (GroupType.UNIVERSAL, {user}),
+ 'primary-mach': (GroupType.UNIVERSAL, {mach}),
+ },
+ 'primary_group': 'primary-user',
+ 'mach:primary_group': 'primary-mach',
+ 'as:expected': {
+ ('primary-user', SidType.BASE_SID, default_attrs),
+ ('primary-user', SidType.PRIMARY_GID, None),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'as:mach:expected': {
+ ('primary-mach', SidType.BASE_SID, default_attrs),
+ ('primary-mach', SidType.PRIMARY_GID, None),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:to_krbtgt': False,
+ # SID compression is unsupported.
+ 'tgs:compression': False,
+ 'tgs:expected': {
+ ('primary-user', SidType.BASE_SID, default_attrs),
+ ('primary-user', SidType.PRIMARY_GID, None),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (compounded_auth, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:device:expected': {
+ ('primary-mach', SidType.BASE_SID, default_attrs),
+ ('primary-mach', SidType.PRIMARY_GID, None),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ frozenset([(security.SID_CLAIMS_VALID, SidType.RESOURCE_SID, default_attrs)]),
+ },
+ },
+ # Test domain-local primary groups.
+ {
+ 'test': 'primary domain-local to krbtgt',
+ 'groups': {
+ 'primary-user': (GroupType.DOMAIN_LOCAL, {user}),
+ 'primary-mach': (GroupType.DOMAIN_LOCAL, {mach}),
+ },
+ # Though Windows normally disallows setting domain-locals group as
+ # primary groups, Samba does not.
+ 'primary_group': 'primary-user',
+ 'mach:primary_group': 'primary-mach',
+ 'as:expected': {
+ # The domain-local groups appear as our primary GIDs, but do
+ # not appear in the base SIDs.
+ ('primary-user', SidType.PRIMARY_GID, None),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'as:mach:expected': {
+ ('primary-mach', SidType.PRIMARY_GID, None),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:to_krbtgt': True,
+ 'tgs:expected': {
+ ('primary-user', SidType.PRIMARY_GID, None),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ 'test': 'primary domain-local to service compressed',
+ 'groups': {
+ 'primary-user': (GroupType.DOMAIN_LOCAL, {user}),
+ 'primary-mach': (GroupType.DOMAIN_LOCAL, {mach}),
+ },
+ 'primary_group': 'primary-user',
+ 'mach:primary_group': 'primary-mach',
+ 'as:expected': {
+ ('primary-user', SidType.PRIMARY_GID, None),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'as:mach:expected': {
+ ('primary-mach', SidType.PRIMARY_GID, None),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:to_krbtgt': False,
+ 'tgs:compression': True,
+ 'tgs:expected': {
+ ('primary-user', SidType.PRIMARY_GID, None),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (compounded_auth, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:device:expected': {
+ ('primary-mach', SidType.PRIMARY_GID, None),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ frozenset([(security.SID_CLAIMS_VALID, SidType.RESOURCE_SID, default_attrs)]),
+ },
+ },
+ {
+ 'test': 'primary domain-local to service uncompressed',
+ 'groups': {
+ 'primary-user': (GroupType.DOMAIN_LOCAL, {user}),
+ 'primary-mach': (GroupType.DOMAIN_LOCAL, {mach}),
+ },
+ 'primary_group': 'primary-user',
+ 'mach:primary_group': 'primary-mach',
+ 'as:expected': {
+ ('primary-user', SidType.PRIMARY_GID, None),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'as:mach:expected': {
+ ('primary-mach', SidType.PRIMARY_GID, None),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:to_krbtgt': False,
+ # SID compression is unsupported.
+ 'tgs:compression': False,
+ 'tgs:expected': {
+ ('primary-user', SidType.PRIMARY_GID, None),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (compounded_auth, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:device:expected': {
+ ('primary-mach', SidType.PRIMARY_GID, None),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ frozenset([(security.SID_CLAIMS_VALID, SidType.RESOURCE_SID, default_attrs)]),
+ },
+ },
+ # Test the scenario where we belong to a now-domain-local group, and
+ # possess an old TGT issued when the group was still our primary one.
+ {
+ 'test': 'old primary domain-local to krbtgt',
+ 'groups': {
+ # Domain-local groups to which the accounts belong.
+ 'primary-user': (GroupType.DOMAIN_LOCAL, {user}),
+ 'primary-mach': (GroupType.DOMAIN_LOCAL, {mach}),
+ },
+ 'tgs:user:sids': {
+ # In the PACs, the groups have the attributes of an ordinary
+ # group...
+ ('primary-user', SidType.BASE_SID, default_attrs),
+ # ...and remain our primary ones.
+ ('primary-user', SidType.PRIMARY_GID, None),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:mach:sids': {
+ ('primary-mach', SidType.BASE_SID, default_attrs),
+ ('primary-mach', SidType.PRIMARY_GID, None),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:to_krbtgt': True,
+ 'tgs:expected': {
+ # The groups don't change.
+ ('primary-user', SidType.BASE_SID, default_attrs),
+ ('primary-user', SidType.PRIMARY_GID, None),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ 'test': 'old primary domain-local to service compressed',
+ 'groups': {
+ 'primary-user': (GroupType.DOMAIN_LOCAL, {user}),
+ 'primary-mach': (GroupType.DOMAIN_LOCAL, {mach}),
+ },
+ 'tgs:user:sids': {
+ ('primary-user', SidType.BASE_SID, default_attrs),
+ ('primary-user', SidType.PRIMARY_GID, None),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:mach:sids': {
+ ('primary-mach', SidType.BASE_SID, default_attrs),
+ ('primary-mach', SidType.PRIMARY_GID, None),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:to_krbtgt': False,
+ 'tgs:compression': True,
+ 'tgs:expected': {
+ ('primary-user', SidType.BASE_SID, default_attrs),
+ ('primary-user', SidType.PRIMARY_GID, None),
+ # The groups are added a second time to the PAC, now as
+ # resource groups.
+ ('primary-user', SidType.RESOURCE_SID, resource_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (compounded_auth, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:device:expected': {
+ ('primary-mach', SidType.BASE_SID, default_attrs),
+ ('primary-mach', SidType.PRIMARY_GID, None),
+ frozenset([('primary-mach', SidType.RESOURCE_SID, resource_attrs)]),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ frozenset([(security.SID_CLAIMS_VALID, SidType.RESOURCE_SID, default_attrs)]),
+ },
+ },
+ {
+ 'test': 'old primary domain-local to service uncompressed',
+ 'groups': {
+ 'primary-user': (GroupType.DOMAIN_LOCAL, {user}),
+ 'primary-mach': (GroupType.DOMAIN_LOCAL, {mach}),
+ },
+ 'tgs:user:sids': {
+ ('primary-user', SidType.BASE_SID, default_attrs),
+ ('primary-user', SidType.PRIMARY_GID, None),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:mach:sids': {
+ ('primary-mach', SidType.BASE_SID, default_attrs),
+ ('primary-mach', SidType.PRIMARY_GID, None),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:to_krbtgt': False,
+ # SID compression is unsupported.
+ 'tgs:compression': False,
+ 'tgs:expected': {
+ ('primary-user', SidType.BASE_SID, default_attrs),
+ ('primary-user', SidType.PRIMARY_GID, None),
+ # This time, the group is added to Extra SIDs.
+ ('primary-user', SidType.EXTRA_SID, resource_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (compounded_auth, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:device:expected': {
+ ('primary-mach', SidType.BASE_SID, default_attrs),
+ ('primary-mach', SidType.PRIMARY_GID, None),
+ frozenset([('primary-mach', SidType.RESOURCE_SID, resource_attrs)]),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ frozenset([(security.SID_CLAIMS_VALID, SidType.RESOURCE_SID, default_attrs)]),
+ },
+ },
+ # Test the scenario where each account possesses an old TGT issued when
+ # a now-domain-local group was still its primary one. The accounts no
+ # longer belong to those groups, which themselves belong to other
+ # domain-local groups.
+ {
+ 'test': 'old primary domain-local transitive to krbtgt',
+ 'groups': {
+ 'user-outer': (GroupType.DOMAIN_LOCAL, {'user-inner'}),
+ 'user-inner': (GroupType.DOMAIN_LOCAL, {}),
+ 'mach-outer': (GroupType.DOMAIN_LOCAL, {'mach-inner'}),
+ 'mach-inner': (GroupType.DOMAIN_LOCAL, {}),
+ },
+ 'tgs:user:sids': {
+ # In the PACs, the groups have the attributes of an ordinary
+ # group...
+ ('user-inner', SidType.BASE_SID, default_attrs),
+ # ...and remain our primary ones.
+ ('user-inner', SidType.PRIMARY_GID, None),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:mach:sids': {
+ ('mach-inner', SidType.BASE_SID, default_attrs),
+ ('mach-inner', SidType.PRIMARY_GID, None),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:to_krbtgt': True,
+ 'tgs:expected': {
+ # The groups don't change.
+ ('user-inner', SidType.BASE_SID, default_attrs),
+ ('user-inner', SidType.PRIMARY_GID, None),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ 'test': 'old primary domain-local transitive to service compressed',
+ 'groups': {
+ 'user-outer': (GroupType.DOMAIN_LOCAL, {'user-inner'}),
+ 'user-inner': (GroupType.DOMAIN_LOCAL, {}),
+ 'mach-outer': (GroupType.DOMAIN_LOCAL, {'mach-inner'}),
+ 'mach-inner': (GroupType.DOMAIN_LOCAL, {}),
+ },
+ 'tgs:user:sids': {
+ ('user-inner', SidType.BASE_SID, default_attrs),
+ ('user-inner', SidType.PRIMARY_GID, None),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:mach:sids': {
+ ('mach-inner', SidType.BASE_SID, default_attrs),
+ ('mach-inner', SidType.PRIMARY_GID, None),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:to_krbtgt': False,
+ 'tgs:compression': True,
+ 'tgs:expected': {
+ ('user-inner', SidType.BASE_SID, default_attrs),
+ ('user-inner', SidType.PRIMARY_GID, None),
+ # The second resource groups are added a second time to the PAC
+ # as resource groups.
+ ('user-outer', SidType.RESOURCE_SID, resource_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (compounded_auth, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:device:expected': {
+ ('mach-inner', SidType.BASE_SID, default_attrs),
+ ('mach-inner', SidType.PRIMARY_GID, None),
+ frozenset([('mach-outer', SidType.RESOURCE_SID, resource_attrs)]),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ frozenset([(security.SID_CLAIMS_VALID, SidType.RESOURCE_SID, default_attrs)]),
+ },
+ },
+ {
+ 'test': 'old primary domain-local transitive to service uncompressed',
+ 'groups': {
+ 'user-outer': (GroupType.DOMAIN_LOCAL, {'user-inner'}),
+ 'user-inner': (GroupType.DOMAIN_LOCAL, {}),
+ 'mach-outer': (GroupType.DOMAIN_LOCAL, {'mach-inner'}),
+ 'mach-inner': (GroupType.DOMAIN_LOCAL, {}),
+ },
+ 'tgs:user:sids': {
+ ('user-inner', SidType.BASE_SID, default_attrs),
+ ('user-inner', SidType.PRIMARY_GID, None),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:mach:sids': {
+ ('mach-inner', SidType.BASE_SID, default_attrs),
+ ('mach-inner', SidType.PRIMARY_GID, None),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:to_krbtgt': False,
+ # SID compression is unsupported.
+ 'tgs:compression': False,
+ 'tgs:expected': {
+ ('user-inner', SidType.BASE_SID, default_attrs),
+ ('user-inner', SidType.PRIMARY_GID, None),
+ # This time, the group is added to Extra SIDs.
+ ('user-outer', SidType.EXTRA_SID, resource_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (compounded_auth, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:device:expected': {
+ ('mach-inner', SidType.BASE_SID, default_attrs),
+ ('mach-inner', SidType.PRIMARY_GID, None),
+ frozenset([('mach-outer', SidType.RESOURCE_SID, resource_attrs)]),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ frozenset([(security.SID_CLAIMS_VALID, SidType.RESOURCE_SID, default_attrs)]),
+ },
+ },
+ # Test how the various categories of SIDs are propagated into the
+ # device info structure.
+ {
+ 'test': 'device info sid grouping',
+ 'tgs:mach:sids': {
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ # These base SIDs are simply propagated into the device info,
+ # irrespective of whatever attributes they have.
+ (1, SidType.BASE_SID, default_attrs),
+ (2, SidType.BASE_SID, 12345),
+ # Extra SIDs not from a domain are also propagated.
+ ('S-1-5-2-3-4', SidType.EXTRA_SID, 789),
+ ('S-1-5-20', SidType.EXTRA_SID, 999),
+ ('S-1-5-21', SidType.EXTRA_SID, 999),
+ ('S-1-6-0', SidType.EXTRA_SID, 999),
+ ('S-1-6-2-3-4', SidType.EXTRA_SID, 789),
+ # Extra SIDs from our own domain are collated into a group.
+ (3, SidType.EXTRA_SID, default_attrs),
+ (4, SidType.EXTRA_SID, 12345),
+ # Extra SIDs from other domains are collated into separate groups.
+ ('S-1-5-21-0-0-0-490', SidType.EXTRA_SID, 5),
+ ('S-1-5-21-0-0-0-491', SidType.EXTRA_SID, 6),
+ ('S-1-5-21-0-0-1-492', SidType.EXTRA_SID, 7),
+ ('S-1-5-21-0-0-1-493', SidType.EXTRA_SID, 8),
+ ('S-1-5-21-0-0-1-494', SidType.EXTRA_SID, 9),
+ # A non-domain SID (too few subauths), ...
+ ('S-1-5-21-242424-12345-2', SidType.EXTRA_SID, 1111111111),
+ # ... a domain SID, ...
+ ('S-1-5-21-242424-12345-321321-2', SidType.EXTRA_SID, 1111111111),
+ # ... and a non-domain SID (too many subauths).
+ ('S-1-5-21-242424-12345-321321-654321-2', SidType.EXTRA_SID, default_attrs),
+ # Special SIDs.
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:to_krbtgt': False,
+ 'tgs:expected': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (compounded_auth, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:device:expected': {
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ # Base SIDs.
+ (1, SidType.BASE_SID, default_attrs),
+ (2, SidType.BASE_SID, 12345),
+ # Extra SIDs from other domains.
+ ('S-1-5-2-3-4', SidType.EXTRA_SID, 789),
+ ('S-1-5-20', SidType.EXTRA_SID, 999),
+ ('S-1-5-21', SidType.EXTRA_SID, 999),
+ ('S-1-6-0', SidType.EXTRA_SID, 999),
+ ('S-1-6-2-3-4', SidType.EXTRA_SID, 789),
+ # Extra SIDs from our own domain.
+ frozenset({
+ (3, SidType.RESOURCE_SID, default_attrs),
+ (4, SidType.RESOURCE_SID, 12345),
+ }),
+ # Extra SIDs from other domains.
+ frozenset({
+ ('S-1-5-21-0-0-0-490', SidType.RESOURCE_SID, 5),
+ ('S-1-5-21-0-0-0-491', SidType.RESOURCE_SID, 6),
+ # These SIDs end up placed with the CLAIMS_VALID SID.
+ (security.SID_CLAIMS_VALID, SidType.RESOURCE_SID, default_attrs),
+ }),
+ frozenset({
+ ('S-1-5-21-0-0-1-492', SidType.RESOURCE_SID, 7),
+ ('S-1-5-21-0-0-1-493', SidType.RESOURCE_SID, 8),
+ ('S-1-5-21-0-0-1-494', SidType.RESOURCE_SID, 9),
+ }),
+ # Non-domain SID.
+ ('S-1-5-21-242424-12345-2', SidType.EXTRA_SID, 1111111111),
+ # Domain SID.
+ frozenset({
+ ('S-1-5-21-242424-12345-321321-2', SidType.RESOURCE_SID, 1111111111),
+ }),
+ # Non-domain SID.
+ ('S-1-5-21-242424-12345-321321-654321-2', SidType.EXTRA_SID, default_attrs),
+ # Special SIDs.
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ # Test RODC-issued device claims.
+ 'test': 'rodc-issued device claims attack',
+ 'groups': {
+ # A couple of groups to which the machine belongs.
+ 'dom-local': (GroupType.DOMAIN_LOCAL, {mach}),
+ 'universal': (GroupType.UNIVERSAL, {mach}),
+ },
+ 'as:expected': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:mach:sids': {
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ # Try to sneak a few extra SIDs into the machine's RODC-issued
+ # PAC.
+ (security.BUILTIN_RID_ADMINISTRATORS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_ENTERPRISE_READONLY_DCS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_KRBTGT, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_CERT_ADMINS, SidType.RESOURCE_SID, resource_attrs),
+ (security.SID_NT_SYSTEM, SidType.EXTRA_SID, default_attrs),
+ # Don't include the groups of which the machine is a member.
+ },
+ # The armor ticket was issued by an RODC.
+ 'tgs:mach:from_rodc': True,
+ 'tgs:to_krbtgt': False,
+ 'tgs:compression': True,
+ 'tgs:expected': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (compounded_auth, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:device:expected': {
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ # The machine's groups are now included.
+ ('universal', SidType.BASE_SID, default_attrs),
+ frozenset([
+ ('dom-local', SidType.RESOURCE_SID, resource_attrs),
+ # Note that we're not considered a "member" of 'Allowed
+ # RODC Password Replication Group'.
+ ]),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ frozenset([(security.SID_CLAIMS_VALID, SidType.RESOURCE_SID, default_attrs)]),
+ # The device groups should have been regenerated, our extra
+ # SIDs removed, and our elevation of privilege attack foiled.
+ },
+ },
+ {
+ 'test': 'rodc-issued without claims valid',
+ 'as:expected': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:mach:sids': {
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ # The Claims Valid SID is missing.
+ },
+ # The armor ticket was issued by an RODC.
+ 'tgs:mach:from_rodc': True,
+ 'tgs:to_krbtgt': False,
+ 'tgs:compression': True,
+ 'tgs:expected': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (compounded_auth, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:device:expected': {
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ # The Claims Valid SID is still added to the device info.
+ frozenset([(security.SID_CLAIMS_VALID, SidType.RESOURCE_SID, default_attrs)]),
+ },
+ },
+ {
+ 'test': 'rodc-issued without asserted identity',
+ 'as:expected': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:mach:sids': {
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ # The Asserted Identity SID is missing.
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ # The armor ticket was issued by an RODC.
+ 'tgs:mach:from_rodc': True,
+ 'tgs:to_krbtgt': False,
+ 'tgs:compression': True,
+ 'tgs:expected': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (compounded_auth, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:device:expected': {
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ # The Asserted Identity SID is not added to the device info.
+ frozenset([(security.SID_CLAIMS_VALID, SidType.RESOURCE_SID, default_attrs)]),
+ },
+ },
+ {
+ 'test': 'rodc-issued asserted identity without attributes',
+ 'as:expected': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:mach:sids': {
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ # The Asserted Identity SID has no attributes set.
+ (asserted_identity, SidType.EXTRA_SID, 0),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ # The armor ticket was issued by an RODC.
+ 'tgs:mach:from_rodc': True,
+ 'tgs:to_krbtgt': False,
+ 'tgs:compression': True,
+ 'tgs:expected': {
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (compounded_auth, SidType.EXTRA_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:device:expected': {
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_DOMAIN_MEMBERS, SidType.PRIMARY_GID, None),
+ # The Asserted Identity SID appears in the device info with its
+ # attributes as normal.
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ frozenset([(security.SID_CLAIMS_VALID, SidType.RESOURCE_SID, default_attrs)]),
+ },
+ },
+ ]
+
+ @classmethod
+ def setUpDynamicTestCases(cls):
+ FILTER = env_get_var_value('FILTER', allow_missing=True)
+ SKIP_INVALID = env_get_var_value('SKIP_INVALID', allow_missing=True)
+
+ for case in cls.cases:
+ invalid = case.pop('configuration_invalid', False)
+ if SKIP_INVALID and invalid:
+ # Some group setups are invalid on Windows, so we allow them to
+ # be skipped.
+ continue
+ name = case.pop('test')
+ name = re.sub(r'\W+', '_', name)
+ if FILTER and not re.search(FILTER, name):
+ continue
+
+ cls.generate_dynamic_test('test_device_info', name,
+ dict(case))
+
+ def _test_device_info_with_args(self, case):
+ # The group arrangement for the test.
+ group_setup = case.pop('groups', None)
+
+ # Groups that should be the primary group for the user and machine
+ # respectively.
+ primary_group = case.pop('primary_group', None)
+ mach_primary_group = case.pop('mach:primary_group', None)
+
+ # Whether the TGS-REQ should be directed to the krbtgt.
+ tgs_to_krbtgt = case.pop('tgs:to_krbtgt', None)
+
+ # Whether the target server of the TGS-REQ should support compound
+ # identity or resource SID compression.
+ tgs_compound_id = case.pop('tgs:compound_id', None)
+ tgs_compression = case.pop('tgs:compression', None)
+
+ # Optional SIDs to replace those in the PACs prior to a TGS-REQ.
+ tgs_user_sids = case.pop('tgs:user:sids', None)
+ tgs_mach_sids = case.pop('tgs:mach:sids', None)
+
+ # Whether the machine's TGT should be issued by an RODC.
+ tgs_mach_from_rodc = case.pop('tgs:mach:from_rodc', None)
+
+ # Optional groups which the machine is added to or removed from prior
+ # to a TGS-REQ, to test how the groups in the device PAC are expanded.
+ tgs_mach_added = case.pop('tgs:mach:added', None)
+ tgs_mach_removed = case.pop('tgs:mach:removed', None)
+
+ # Optional account SIDs to replace those in the PACs prior to a
+ # TGS-REQ.
+ tgs_user_sid = case.pop('tgs:user_sid', None)
+ tgs_mach_sid = case.pop('tgs:mach_sid', None)
+
+ # User flags that may be set or reset in the PAC prior to a TGS-REQ.
+ tgs_mach_set_user_flags = case.pop('tgs:mach:set_user_flags', None)
+ tgs_mach_reset_user_flags = case.pop('tgs:mach:reset_user_flags', None)
+
+ # The SIDs we expect to see in the PAC after a AS-REQ or a TGS-REQ.
+ as_expected = case.pop('as:expected', None)
+ as_mach_expected = case.pop('as:mach:expected', None)
+ tgs_expected = case.pop('tgs:expected', None)
+ tgs_device_expected = case.pop('tgs:device:expected', None)
+
+ # There should be no parameters remaining in the testcase.
+ self.assertFalse(case, 'unexpected parameters in testcase')
+
+ if as_expected is None:
+ self.assertIsNotNone(tgs_expected,
+ 'no set of expected SIDs is provided')
+
+ if as_mach_expected is None:
+ self.assertIsNotNone(tgs_expected,
+ 'no set of expected machine SIDs is provided')
+
+ if tgs_to_krbtgt is None:
+ tgs_to_krbtgt = False
+
+ if tgs_compound_id is None and not tgs_to_krbtgt:
+ # Assume the service supports compound identity by default.
+ tgs_compound_id = True
+
+ if tgs_to_krbtgt:
+ self.assertIsNone(tgs_device_expected,
+ 'device SIDs are not added for a krbtgt request')
+
+ self.assertIsNotNone(tgs_expected,
+ 'no set of expected TGS SIDs is provided')
+
+ if tgs_user_sid is not None:
+ self.assertIsNotNone(tgs_user_sids,
+ 'specified TGS-REQ user SID, but no '
+ 'accompanying user SIDs provided')
+
+ if tgs_mach_sid is not None:
+ self.assertIsNotNone(tgs_mach_sids,
+ 'specified TGS-REQ mach SID, but no '
+ 'accompanying machine SIDs provided')
+
+ if tgs_mach_set_user_flags is None:
+ tgs_mach_set_user_flags = 0
+ else:
+ self.assertIsNotNone(tgs_mach_sids,
+ 'specified TGS-REQ set user flags, but no '
+ 'accompanying machine SIDs provided')
+
+ if tgs_mach_reset_user_flags is None:
+ tgs_mach_reset_user_flags = 0
+ else:
+ self.assertIsNotNone(tgs_mach_sids,
+ 'specified TGS-REQ reset user flags, but no '
+ 'accompanying machine SIDs provided')
+
+ if tgs_mach_from_rodc is None:
+ tgs_mach_from_rodc = False
+
+ user_use_cache = not group_setup and (
+ not primary_group)
+ mach_use_cache = not group_setup and (
+ not mach_primary_group) and (
+ not tgs_mach_added) and (
+ not tgs_mach_removed)
+
+ samdb = self.get_samdb()
+
+ domain_sid = samdb.get_domain_sid()
+
+ # Create the user account. It needs to be freshly created rather than
+ # cached if there is a possibility of adding it to one or more groups.
+ user_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER,
+ use_cache=user_use_cache)
+ user_dn = user_creds.get_dn()
+ user_sid = user_creds.get_sid()
+ user_name = user_creds.get_username()
+
+ trust_user_rid = random.randint(2000, 0xfffffffe)
+ trust_user_sid = f'{self.user_trust_domain}-{trust_user_rid}'
+
+ trust_mach_rid = random.randint(2000, 0xfffffffe)
+ trust_mach_sid = f'{self.mach_trust_domain}-{trust_mach_rid}'
+
+ # Create the machine account. It needs to be freshly created rather
+ # than cached if there is a possibility of adding it to one or more
+ # groups.
+ if tgs_mach_from_rodc:
+ # If the machine's TGT is to be issued by an RODC, ensure the
+ # machine account is allowed to replicate to an RODC.
+ mach_opts = {
+ 'allowed_replication_mock': True,
+ 'revealed_to_mock_rodc': True,
+ }
+ else:
+ mach_opts = None
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts=mach_opts,
+ use_cache=mach_use_cache)
+ mach_dn = mach_creds.get_dn()
+ mach_dn_str = str(mach_dn)
+ mach_sid = mach_creds.get_sid()
+
+ user_principal = Principal(user_dn, user_sid)
+ mach_principal = Principal(mach_dn, mach_sid)
+ trust_user_principal = Principal(None, trust_user_sid)
+ trust_mach_principal = Principal(None, trust_mach_sid)
+ preexisting_groups = {
+ self.user: user_principal,
+ self.mach: mach_principal,
+ self.trust_user: trust_user_principal,
+ self.trust_mach: trust_mach_principal,
+ }
+ primary_groups = {}
+ if primary_group is not None:
+ primary_groups[user_principal] = primary_group
+ if mach_primary_group is not None:
+ primary_groups[mach_principal] = mach_primary_group
+ groups = self.setup_groups(samdb,
+ preexisting_groups,
+ group_setup,
+ primary_groups)
+ del group_setup
+
+ if tgs_user_sid is None:
+ tgs_user_sid = user_sid
+ elif tgs_user_sid in groups:
+ tgs_user_sid = groups[tgs_user_sid].sid
+
+ tgs_user_domain_sid, tgs_user_rid = tgs_user_sid.rsplit('-', 1)
+
+ if tgs_mach_sid is None:
+ tgs_mach_sid = mach_sid
+ elif tgs_mach_sid in groups:
+ tgs_mach_sid = groups[tgs_mach_sid].sid
+
+ tgs_mach_domain_sid, tgs_mach_rid = tgs_mach_sid.rsplit('-', 1)
+
+ expected_groups = self.map_sids(as_expected, groups,
+ domain_sid)
+ mach_expected_groups = self.map_sids(as_mach_expected, groups,
+ domain_sid)
+ tgs_user_sids_mapped = self.map_sids(tgs_user_sids, groups,
+ tgs_user_domain_sid)
+ tgs_mach_sids_mapped = self.map_sids(tgs_mach_sids, groups,
+ tgs_mach_domain_sid)
+ tgs_expected_mapped = self.map_sids(tgs_expected, groups,
+ tgs_user_domain_sid)
+ tgs_device_expected_mapped = self.map_sids(tgs_device_expected, groups,
+ tgs_mach_domain_sid)
+
+ user_tgt = self.get_tgt(user_creds,
+ expected_groups=expected_groups,
+ unexpected_groups=None)
+
+ mach_tgt = self.get_tgt(mach_creds,
+ expected_groups=mach_expected_groups,
+ unexpected_groups=None)
+
+ if tgs_user_sids is not None:
+ # Replace the SIDs in the user's PAC with the ones provided by the
+ # test.
+ user_tgt = self.ticket_with_sids(user_tgt,
+ tgs_user_sids_mapped,
+ tgs_user_domain_sid,
+ tgs_user_rid)
+
+ if tgs_mach_sids is not None:
+ # Replace the SIDs in the machine's PAC with the ones provided by
+ # the test.
+ mach_tgt = self.ticket_with_sids(mach_tgt,
+ tgs_mach_sids_mapped,
+ tgs_mach_domain_sid,
+ tgs_mach_rid,
+ set_user_flags=tgs_mach_set_user_flags,
+ reset_user_flags=tgs_mach_reset_user_flags,
+ from_rodc=tgs_mach_from_rodc)
+ elif tgs_mach_from_rodc:
+ mach_tgt = self.issued_by_rodc(mach_tgt)
+
+ if tgs_mach_removed is not None:
+ for removed in tgs_mach_removed:
+ group_dn = self.map_to_dn(removed, groups, domain_sid=None)
+ self.remove_from_group(mach_dn, group_dn)
+
+ if tgs_mach_added is not None:
+ for added in tgs_mach_added:
+ group_dn = self.map_to_dn(added, groups, domain_sid=None)
+ self.add_to_group(mach_dn_str, group_dn, 'member',
+ expect_attr=False)
+
+ subkey = self.RandomKey(user_tgt.session_key.etype)
+
+ armor_subkey = self.RandomKey(subkey.etype)
+ explicit_armor_key = self.generate_armor_key(armor_subkey,
+ mach_tgt.session_key)
+ armor_key = kcrypto.cf2(explicit_armor_key.key,
+ subkey.key,
+ b'explicitarmor',
+ b'tgsarmor')
+ armor_key = Krb5EncryptionKey(armor_key, None)
+
+ target_creds, sname = self.get_target(
+ to_krbtgt=tgs_to_krbtgt,
+ compound_id=tgs_compound_id,
+ compression=tgs_compression)
+ srealm = target_creds.get_realm()
+
+ decryption_key = self.TicketDecryptionKey_from_creds(
+ target_creds)
+
+ target_supported_etypes = target_creds.tgs_supported_enctypes
+
+ etypes = (AES256_CTS_HMAC_SHA1_96, ARCFOUR_HMAC_MD5)
+
+ kdc_options = '0'
+ pac_options = '1' # claims support
+
+ requester_sid = None
+ if tgs_to_krbtgt:
+ requester_sid = user_sid
+
+ expect_resource_groups_flag = None
+ if tgs_mach_reset_user_flags & netlogon.NETLOGON_RESOURCE_GROUPS:
+ expect_resource_groups_flag = False
+ elif tgs_mach_set_user_flags & netlogon.NETLOGON_RESOURCE_GROUPS:
+ expect_resource_groups_flag = True
+
+ # Perform a TGS-REQ with the user account.
+
+ kdc_exchange_dict = self.tgs_exchange_dict(
+ creds=user_creds,
+ expected_crealm=user_tgt.crealm,
+ expected_cname=user_tgt.cname,
+ expected_srealm=srealm,
+ expected_sname=sname,
+ expected_account_name=user_name,
+ ticket_decryption_key=decryption_key,
+ generate_fast_fn=self.generate_simple_fast,
+ generate_fast_armor_fn=self.generate_ap_req,
+ check_rep_fn=self.generic_check_kdc_rep,
+ check_kdc_private_fn=self.generic_check_kdc_private,
+ tgt=user_tgt,
+ armor_key=armor_key,
+ armor_tgt=mach_tgt,
+ armor_subkey=armor_subkey,
+ pac_options=pac_options,
+ authenticator_subkey=subkey,
+ kdc_options=kdc_options,
+ expect_pac=True,
+ expect_pac_attrs=tgs_to_krbtgt,
+ expect_pac_attrs_pac_request=tgs_to_krbtgt,
+ expected_sid=tgs_user_sid,
+ expected_requester_sid=requester_sid,
+ expected_domain_sid=tgs_user_domain_sid,
+ expected_device_domain_sid=tgs_mach_domain_sid,
+ expected_supported_etypes=target_supported_etypes,
+ expect_resource_groups_flag=expect_resource_groups_flag,
+ expected_groups=tgs_expected_mapped,
+ expect_device_info=bool(tgs_compound_id),
+ expected_device_groups=tgs_device_expected_mapped)
+
+ rep = self._generic_kdc_exchange(kdc_exchange_dict,
+ cname=None,
+ realm=srealm,
+ sname=sname,
+ etypes=etypes)
+ self.check_reply(rep, KRB_TGS_REP)
+
+
+if __name__ == '__main__':
+ global_asn1_print = False
+ global_hexdump = False
+ import unittest
+ unittest.main()
diff --git a/python/samba/tests/krb5/etype_tests.py b/python/samba/tests/krb5/etype_tests.py
new file mode 100755
index 0000000..7ac76f9
--- /dev/null
+++ b/python/samba/tests/krb5/etype_tests.py
@@ -0,0 +1,597 @@
+#!/usr/bin/env python3
+# Unix SMB/CIFS implementation.
+# Copyright (C) Stefan Metzmacher 2020
+# Copyright (C) 2022 Catalyst.Net Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+import os
+
+sys.path.insert(0, "bin/python")
+os.environ["PYTHONUNBUFFERED"] = "1"
+
+import itertools
+
+from samba.dcerpc import security
+
+from samba.tests import DynamicTestCase
+from samba.tests.krb5.kdc_tgs_tests import KdcTgsBaseTests
+from samba.tests.krb5.raw_testcase import KerberosCredentials
+from samba.tests.krb5.rfc4120_constants import (
+ AES128_CTS_HMAC_SHA1_96,
+ AES256_CTS_HMAC_SHA1_96,
+ ARCFOUR_HMAC_MD5,
+ KDC_ERR_ETYPE_NOSUPP,
+)
+import samba.tests.krb5.rfc4120_pyasn1 as krb5_asn1
+
+
+global_asn1_print = False
+global_hexdump = False
+
+des_bits = security.KERB_ENCTYPE_DES_CBC_MD5 | security.KERB_ENCTYPE_DES_CBC_CRC
+rc4_bit = security.KERB_ENCTYPE_RC4_HMAC_MD5
+aes128_bit = security.KERB_ENCTYPE_AES128_CTS_HMAC_SHA1_96
+aes256_bit = security.KERB_ENCTYPE_AES256_CTS_HMAC_SHA1_96
+aes256_sk_bit = security.KERB_ENCTYPE_AES256_CTS_HMAC_SHA1_96_SK
+fast_bit = security.KERB_ENCTYPE_FAST_SUPPORTED
+
+etype_bits = rc4_bit | aes128_bit | aes256_bit
+extra_bits = aes256_sk_bit | fast_bit
+
+
+@DynamicTestCase
+class EtypeTests(KdcTgsBaseTests):
+ def setUp(self):
+ super().setUp()
+ self.do_asn1_print = global_asn1_print
+ self.do_hexdump = global_hexdump
+
+ self.default_supported_enctypes = self.default_etypes
+ if self.default_supported_enctypes is None:
+ lp = self.get_lp()
+ self.default_supported_enctypes = lp.get(
+ 'kdc default domain supported enctypes')
+ if self.default_supported_enctypes == 0:
+ self.default_supported_enctypes = rc4_bit | aes256_sk_bit
+
+ def _server_creds(self, supported=None, force_nt4_hash=False,
+ account_type=None):
+ if account_type is None:
+ account_type= self.AccountType.COMPUTER
+ return self.get_cached_creds(
+ account_type=account_type,
+ opts={
+ 'supported_enctypes': supported,
+ 'force_nt4_hash': force_nt4_hash,
+ })
+
+ def only_non_etype_bits_set(self, bits):
+ return bits is not None and (
+ bits & extra_bits and
+ not (bits & etype_bits))
+
+ @classmethod
+ def setUpDynamicTestCases(cls):
+ all_etypes = (AES256_CTS_HMAC_SHA1_96,
+ AES128_CTS_HMAC_SHA1_96,
+ ARCFOUR_HMAC_MD5)
+
+ # An iterator yielding all permutations consisting of at least one
+ # etype.
+ requested_etype_cases = itertools.chain.from_iterable(
+ itertools.permutations(all_etypes, x)
+ for x in range(1, len(all_etypes) + 1))
+
+ # Some combinations of msDS-SupportedEncryptionTypes bits to be set on
+ # the target server.
+ supported_etype_cases = (
+ # Not set.
+ None,
+ # Every possible combination of RC4, AES128, AES256, and AES256-SK.
+ 0,
+ rc4_bit,
+ aes256_sk_bit,
+ aes256_sk_bit | rc4_bit,
+ aes256_bit,
+ aes256_bit | rc4_bit,
+ aes256_bit | aes256_sk_bit,
+ aes256_bit | aes256_sk_bit | rc4_bit,
+ aes128_bit,
+ aes128_bit | rc4_bit,
+ aes128_bit | aes256_sk_bit,
+ aes128_bit | aes256_sk_bit | rc4_bit,
+ aes128_bit | aes256_bit,
+ aes128_bit | aes256_bit | rc4_bit,
+ aes128_bit | aes256_bit | aes256_sk_bit,
+ aes128_bit | aes256_bit | aes256_sk_bit | rc4_bit,
+ # Some combinations with an extra bit (the FAST-supported bit) set.
+ fast_bit,
+ fast_bit | rc4_bit,
+ fast_bit | aes256_sk_bit,
+ fast_bit | aes256_bit,
+ )
+
+ for _requested_etypes in requested_etype_cases:
+ _s = str(_requested_etypes)
+ _t = _s.maketrans(",", "_", "( )")
+ requested_etypes = _s.translate(_t)
+
+ for _supported_etypes in supported_etype_cases:
+ if _supported_etypes is None:
+ supported_etypes = "None"
+ else:
+ supported_etypes = f'0x{_supported_etypes:X}'
+
+ for account_type in ["member", "dc"]:
+ if account_type == "dc":
+ _account_type = cls.AccountType.SERVER
+ elif account_type == "member":
+ _account_type = cls.AccountType.COMPUTER
+
+ for stored_type in ["aes_rc4", "rc4_only"]:
+ if stored_type == "aes_rc4":
+ force_nt4_hash = False
+ elif stored_type == "rc4_only":
+ force_nt4_hash = True
+
+ tname = (f'{supported_etypes}_supported_'
+ f'{requested_etypes}_requested_'
+ f'{account_type}_account_'
+ f'stored_{stored_type}')
+ targs = _supported_etypes, _requested_etypes, _account_type, force_nt4_hash
+ cls.generate_dynamic_test('test_etype_as', tname, *targs)
+ cls.generate_dynamic_test('test_etype_tgs', tname, *targs)
+
+ def _test_etype_as_with_args(self, supported_bits, requested_etypes, account_type, force_nt4_hash):
+ # The ticket will be encrypted with the strongest enctype for which the
+ # server explicitly declares support, falling back to RC4 if the server
+ # has no declared supported encryption types. The enctype of the
+ # session key is the first enctype listed in the request that the
+ # server supports, taking the AES-SK bit as an indication of support
+ # for both AES types.
+
+ # If none of the enctypes in the request are supported by the target
+ # server, implicitly or explicitly, return ETYPE_NOSUPP.
+
+ expected_error = 0
+
+ if not supported_bits:
+ # If msDS-SupportedEncryptionTypes is missing or set to zero, the
+ # default value, provided by smb.conf, is assumed.
+ supported_bits = self.default_supported_enctypes
+
+ # If msDS-SupportedEncryptionTypes specifies only non-etype bits, we
+ # expect an error.
+ if self.only_non_etype_bits_set(supported_bits):
+ expected_error = KDC_ERR_ETYPE_NOSUPP
+
+ virtual_bits = supported_bits
+
+ if self.forced_rc4 and not (virtual_bits & rc4_bit):
+ # If our fallback smb.conf option is set, force in RC4 support.
+ virtual_bits |= rc4_bit
+
+ if force_nt4_hash and not (virtual_bits & rc4_bit):
+ virtual_bits |= rc4_bit
+
+ if virtual_bits & aes256_sk_bit:
+ # If strong session keys are enabled, force in the AES bits.
+ virtual_bits |= aes256_bit | aes128_bit
+
+ if account_type == self.AccountType.SERVER:
+ virtual_bits |= etype_bits
+ expected_error = 0
+
+ virtual_etypes = KerberosCredentials.bits_to_etypes(virtual_bits)
+
+ # The enctype of the session key is the first listed in the request
+ # that the server supports, implicitly or explicitly.
+ for requested_etype in requested_etypes:
+ if requested_etype in virtual_etypes:
+ expected_session_etype = requested_etype
+ break
+ else:
+ # If there is no such enctype, expect an error.
+ expected_error = KDC_ERR_ETYPE_NOSUPP
+
+ # Get the credentials of the client and server accounts.
+ creds = self.get_client_creds()
+ target_creds = self._server_creds(supported=supported_bits,
+ account_type=account_type,
+ force_nt4_hash=force_nt4_hash)
+ if account_type == self.AccountType.SERVER:
+ target_supported_etypes = target_creds.tgs_supported_enctypes
+ target_supported_etypes |= des_bits
+ target_supported_etypes |= etype_bits
+ target_creds.set_tgs_supported_enctypes(target_supported_etypes)
+ supported_bits |= (target_supported_etypes & etype_bits)
+
+ # We expect the ticket etype to be the strongest the server claims to
+ # support, with a fallback to RC4.
+ expected_etype = ARCFOUR_HMAC_MD5
+ if not force_nt4_hash and supported_bits is not None:
+ if supported_bits & aes256_bit:
+ expected_etype = AES256_CTS_HMAC_SHA1_96
+ elif supported_bits & aes128_bit:
+ expected_etype = AES128_CTS_HMAC_SHA1_96
+
+ # Perform the AS-REQ.
+ ticket = self._as_req(creds, expected_error=expected_error,
+ target_creds=target_creds,
+ etype=requested_etypes,
+ expected_ticket_etype=expected_etype)
+ if expected_error:
+ # There's no more to check. Return.
+ return
+
+ # Check the etypes of the ticket and session key.
+ self.assertEqual(expected_etype, ticket.decryption_key.etype)
+ self.assertEqual(expected_session_etype, ticket.session_key.etype)
+
+ def _test_etype_tgs_with_args(self, supported_bits, requested_etypes, account_type, force_nt4_hash):
+ expected_error = 0
+
+ if not supported_bits:
+ # If msDS-SupportedEncryptionTypes is missing or set to zero, the
+ # default value, provided by smb.conf, is assumed.
+ supported_bits = self.default_supported_enctypes
+
+ # If msDS-SupportedEncryptionTypes specifies only non-etype bits, we
+ # expect an error.
+ if self.only_non_etype_bits_set(supported_bits):
+ expected_error = KDC_ERR_ETYPE_NOSUPP
+
+ virtual_bits = supported_bits
+
+ if self.forced_rc4 and not (virtual_bits & rc4_bit):
+ # If our fallback smb.conf option is set, force in RC4 support.
+ virtual_bits |= rc4_bit
+
+ if force_nt4_hash and not (virtual_bits & rc4_bit):
+ virtual_bits |= rc4_bit
+
+ if virtual_bits & aes256_sk_bit:
+ # If strong session keys are enabled, force in the AES bits.
+ virtual_bits |= aes256_bit | aes128_bit
+
+ if account_type == self.AccountType.SERVER:
+ virtual_bits |= etype_bits
+ expected_error = 0
+
+ virtual_etypes = KerberosCredentials.bits_to_etypes(virtual_bits)
+
+ # The enctype of the session key is the first listed in the request
+ # that the server supports, implicitly or explicitly.
+ for requested_etype in requested_etypes:
+ if requested_etype in virtual_etypes:
+ expected_session_etype = requested_etype
+ break
+ else:
+ # If there is no such enctype, expect an error.
+ expected_error = KDC_ERR_ETYPE_NOSUPP
+
+ # Get the credentials of the client and server accounts.
+ creds = self.get_client_creds()
+ tgt = self.get_tgt(creds)
+ target_creds = self._server_creds(supported=supported_bits,
+ account_type=account_type,
+ force_nt4_hash=force_nt4_hash)
+ if account_type == self.AccountType.SERVER:
+ target_supported_etypes = target_creds.tgs_supported_enctypes
+ target_supported_etypes |= des_bits
+ target_supported_etypes |= etype_bits
+ target_creds.set_tgs_supported_enctypes(target_supported_etypes)
+ supported_bits |= (target_supported_etypes & etype_bits)
+
+ # We expect the ticket etype to be the strongest the server claims to
+ # support, with a fallback to RC4.
+ expected_etype = ARCFOUR_HMAC_MD5
+ if not force_nt4_hash and supported_bits is not None:
+ if supported_bits & aes256_bit:
+ expected_etype = AES256_CTS_HMAC_SHA1_96
+ elif supported_bits & aes128_bit:
+ expected_etype = AES128_CTS_HMAC_SHA1_96
+
+ # Perform the TGS-REQ.
+ ticket = self._tgs_req(tgt, expected_error=expected_error,
+ creds=creds, target_creds=target_creds,
+ kdc_options=str(krb5_asn1.KDCOptions('canonicalize')),
+ expected_supported_etypes=target_creds.tgs_supported_enctypes,
+ expected_ticket_etype=expected_etype,
+ etypes=requested_etypes)
+ if expected_error:
+ # There's no more to check. Return.
+ return
+
+ # Check the etypes of the ticket and session key.
+ self.assertEqual(expected_etype, ticket.decryption_key.etype)
+ self.assertEqual(expected_session_etype, ticket.session_key.etype)
+
+ # Perform an AS-REQ for a service ticket, specifying AES, when the target
+ # service only supports AES. The resulting ticket should be encrypted with
+ # AES, with an AES session key.
+ def test_as_aes_supported_aes_requested(self):
+ creds = self.get_client_creds()
+ target_creds = self._server_creds(supported=aes256_bit)
+
+ ticket = self._as_req(creds, expected_error=0,
+ target_creds=target_creds,
+ etype=(AES256_CTS_HMAC_SHA1_96,))
+
+ self.assertEqual(AES256_CTS_HMAC_SHA1_96, ticket.decryption_key.etype)
+ self.assertEqual(AES256_CTS_HMAC_SHA1_96, ticket.session_key.etype)
+
+ # Perform an AS-REQ for a service ticket, specifying RC4, when the target
+ # service only supports AES. The request should fail with an error.
+ def test_as_aes_supported_rc4_requested(self):
+ creds = self.get_client_creds()
+ target_creds = self._server_creds(supported=aes256_bit)
+
+ if self.forced_rc4:
+ expected_error = 0
+ expected_session_etype = ARCFOUR_HMAC_MD5
+ else:
+ expected_error = KDC_ERR_ETYPE_NOSUPP
+ expected_session_etype = AES256_CTS_HMAC_SHA1_96
+
+ ticket = self._as_req(creds, expected_error=expected_error,
+ target_creds=target_creds,
+ etype=(ARCFOUR_HMAC_MD5,))
+
+ if not self.forced_rc4:
+ return
+
+ self.assertEqual(AES256_CTS_HMAC_SHA1_96, ticket.decryption_key.etype)
+ self.assertEqual(expected_session_etype, ticket.session_key.etype)
+
+ # Perform an AS-REQ for a service ticket, specifying AES, when the target
+ # service only supports AES, and supports AES256 session keys. The
+ # resulting ticket should be encrypted with AES, with an AES session key.
+ def test_as_aes_supported_aes_session_aes_requested(self):
+ creds = self.get_client_creds()
+ target_creds = self._server_creds(supported=aes256_bit | aes256_sk_bit)
+
+ ticket = self._as_req(creds, expected_error=0,
+ target_creds=target_creds,
+ etype=(AES256_CTS_HMAC_SHA1_96,))
+
+ self.assertEqual(AES256_CTS_HMAC_SHA1_96, ticket.decryption_key.etype)
+ self.assertEqual(AES256_CTS_HMAC_SHA1_96, ticket.session_key.etype)
+
+ # Perform an AS-REQ for a service ticket, specifying RC4, when the target
+ # service only supports AES, and supports AES256 session keys. The request
+ # should fail with an error.
+ def test_as_aes_supported_aes_session_rc4_requested(self):
+ creds = self.get_client_creds()
+ target_creds = self._server_creds(supported=aes256_bit | aes256_sk_bit)
+
+ if self.forced_rc4:
+ expected_error = 0
+ expected_session_etype = ARCFOUR_HMAC_MD5
+ else:
+ expected_error = KDC_ERR_ETYPE_NOSUPP
+ expected_session_etype = AES256_CTS_HMAC_SHA1_96
+
+ ticket = self._as_req(creds, expected_error=expected_error,
+ target_creds=target_creds,
+ etype=(ARCFOUR_HMAC_MD5,))
+
+ if not self.forced_rc4:
+ return
+
+ self.assertEqual(AES256_CTS_HMAC_SHA1_96, ticket.decryption_key.etype)
+ self.assertEqual(expected_session_etype, ticket.session_key.etype)
+
+ # Perform an AS-REQ for a service ticket, specifying AES, when the target
+ # service only supports RC4. The request should fail with an error.
+ def test_as_rc4_supported_aes_requested(self):
+ creds = self.get_client_creds()
+ target_creds = self._server_creds(supported=rc4_bit)
+
+ self._as_req(creds, expected_error=KDC_ERR_ETYPE_NOSUPP,
+ target_creds=target_creds,
+ etype=(AES256_CTS_HMAC_SHA1_96,))
+
+ # Perform an AS-REQ for a service ticket, specifying RC4, when the target
+ # service only supports RC4. The resulting ticket should be encrypted with
+ # RC4, with an RC4 session key.
+ def test_as_rc4_supported_rc4_requested(self):
+ creds = self.get_client_creds()
+ target_creds = self._server_creds(supported=rc4_bit)
+
+ ticket = self._as_req(creds, expected_error=0,
+ target_creds=target_creds,
+ etype=(ARCFOUR_HMAC_MD5,))
+
+ self.assertEqual(ARCFOUR_HMAC_MD5, ticket.decryption_key.etype)
+ self.assertEqual(ARCFOUR_HMAC_MD5, ticket.session_key.etype)
+
+ # Perform an AS-REQ for a service ticket, specifying AES, when the target
+ # service only supports RC4, but supports AES256 session keys. The
+ # resulting ticket should be encrypted with RC4, with an AES256 session
+ # key.
+ def test_as_rc4_supported_aes_session_aes_requested(self):
+ creds = self.get_client_creds()
+ target_creds = self._server_creds(supported=rc4_bit | aes256_sk_bit)
+
+ ticket = self._as_req(creds, expected_error=0,
+ target_creds=target_creds,
+ etype=(AES256_CTS_HMAC_SHA1_96,))
+
+ self.assertEqual(ARCFOUR_HMAC_MD5, ticket.decryption_key.etype)
+ self.assertEqual(AES256_CTS_HMAC_SHA1_96, ticket.session_key.etype)
+
+ # Perform an AS-REQ for a service ticket, specifying RC4, when the target
+ # service only supports RC4, but supports AES256 session keys. The
+ # resulting ticket should be encrypted with RC4, with an RC4 session key.
+ def test_as_rc4_supported_aes_session_rc4_requested(self):
+ creds = self.get_client_creds()
+ target_creds = self._server_creds(supported=rc4_bit | aes256_sk_bit)
+
+ ticket = self._as_req(creds, expected_error=0,
+ target_creds=target_creds,
+ etype=(ARCFOUR_HMAC_MD5,))
+
+ self.assertEqual(ARCFOUR_HMAC_MD5, ticket.decryption_key.etype)
+ self.assertEqual(ARCFOUR_HMAC_MD5, ticket.session_key.etype)
+
+ # Perform a TGS-REQ for a service ticket, specifying AES, when the target
+ # service only supports AES. The resulting ticket should be encrypted with
+ # AES, with an AES session key.
+ def test_tgs_aes_supported_aes_requested(self):
+ creds = self.get_client_creds()
+ tgt = self.get_tgt(creds)
+
+ target_creds = self._server_creds(supported=aes256_bit)
+
+ ticket = self._tgs_req(tgt, expected_error=0,
+ creds=creds, target_creds=target_creds,
+ etypes=(AES256_CTS_HMAC_SHA1_96,))
+
+ self.assertEqual(AES256_CTS_HMAC_SHA1_96, ticket.decryption_key.etype)
+ self.assertEqual(AES256_CTS_HMAC_SHA1_96, ticket.session_key.etype)
+
+ # Perform a TGS-REQ for a service ticket, specifying RC4, when the target
+ # service only supports AES. The request should fail with an error.
+ def test_tgs_aes_supported_rc4_requested(self):
+ creds = self.get_client_creds()
+ tgt = self.get_tgt(creds)
+
+ target_creds = self._server_creds(supported=aes256_bit)
+
+ if self.forced_rc4:
+ expected_error = 0
+ else:
+ expected_error = KDC_ERR_ETYPE_NOSUPP
+
+ ticket = self._tgs_req(tgt, expected_error=expected_error,
+ creds=creds, target_creds=target_creds,
+ etypes=(ARCFOUR_HMAC_MD5,))
+
+ if not self.forced_rc4:
+ return
+
+ self.assertEqual(AES256_CTS_HMAC_SHA1_96, ticket.decryption_key.etype)
+ self.assertEqual(ARCFOUR_HMAC_MD5, ticket.session_key.etype)
+
+ # Perform a TGS-REQ for a service ticket, specifying AES, when the target
+ # service only supports AES, and supports AES256 session keys. The
+ # resulting ticket should be encrypted with AES, with an AES session key.
+ def test_tgs_aes_supported_aes_session_aes_requested(self):
+ creds = self.get_client_creds()
+ tgt = self.get_tgt(creds)
+
+ target_creds = self._server_creds(supported=aes256_bit | aes256_sk_bit)
+
+ ticket = self._tgs_req(tgt, expected_error=0,
+ creds=creds, target_creds=target_creds,
+ etypes=(AES256_CTS_HMAC_SHA1_96,))
+
+ self.assertEqual(AES256_CTS_HMAC_SHA1_96, ticket.decryption_key.etype)
+ self.assertEqual(AES256_CTS_HMAC_SHA1_96, ticket.session_key.etype)
+
+ # Perform a TGS-REQ for a service ticket, specifying RC4, when the target
+ # service only supports AES, and supports AES256 session keys. The request
+ # should fail with an error.
+ def test_tgs_aes_supported_aes_session_rc4_requested(self):
+ creds = self.get_client_creds()
+ tgt = self.get_tgt(creds)
+
+ target_creds = self._server_creds(supported=aes256_bit | aes256_sk_bit)
+
+ if self.forced_rc4:
+ expected_error = 0
+ else:
+ expected_error = KDC_ERR_ETYPE_NOSUPP
+
+ ticket = self._tgs_req(tgt, expected_error=expected_error,
+ creds=creds, target_creds=target_creds,
+ etypes=(ARCFOUR_HMAC_MD5,))
+
+ if not self.forced_rc4:
+ return
+
+ self.assertEqual(AES256_CTS_HMAC_SHA1_96, ticket.decryption_key.etype)
+ self.assertEqual(ARCFOUR_HMAC_MD5, ticket.session_key.etype)
+
+ # Perform a TGS-REQ for a service ticket, specifying AES, when the target
+ # service only supports RC4. The request should fail with an error.
+ def test_tgs_rc4_supported_aes_requested(self):
+ creds = self.get_client_creds()
+ tgt = self.get_tgt(creds)
+
+ target_creds = self._server_creds(supported=rc4_bit)
+
+ self._tgs_req(tgt, expected_error=KDC_ERR_ETYPE_NOSUPP,
+ creds=creds, target_creds=target_creds,
+ etypes=(AES256_CTS_HMAC_SHA1_96,))
+
+ # Perform a TGS-REQ for a service ticket, specifying RC4, when the target
+ # service only supports RC4. The resulting ticket should be encrypted with
+ # RC4, with an RC4 session key.
+ def test_tgs_rc4_supported_rc4_requested(self):
+ creds = self.get_client_creds()
+ tgt = self.get_tgt(creds)
+
+ target_creds = self._server_creds(supported=rc4_bit)
+
+ ticket = self._tgs_req(tgt, expected_error=0,
+ creds=creds, target_creds=target_creds,
+ etypes=(ARCFOUR_HMAC_MD5,))
+
+ self.assertEqual(ARCFOUR_HMAC_MD5, ticket.decryption_key.etype)
+ self.assertEqual(ARCFOUR_HMAC_MD5, ticket.session_key.etype)
+
+ # Perform a TGS-REQ for a service ticket, specifying AES, when the target
+ # service only supports RC4, but supports AES256 session keys. The
+ # resulting ticket should be encrypted with RC4, with an AES256 session
+ # key.
+ def test_tgs_rc4_supported_aes_session_aes_requested(self):
+ creds = self.get_client_creds()
+ tgt = self.get_tgt(creds)
+
+ target_creds = self._server_creds(supported=rc4_bit | aes256_sk_bit)
+
+ ticket = self._tgs_req(tgt, expected_error=0,
+ creds=creds, target_creds=target_creds,
+ etypes=(AES256_CTS_HMAC_SHA1_96,))
+
+ self.assertEqual(ARCFOUR_HMAC_MD5, ticket.decryption_key.etype)
+ self.assertEqual(AES256_CTS_HMAC_SHA1_96, ticket.session_key.etype)
+
+ # Perform a TGS-REQ for a service ticket, specifying RC4, when the target
+ # service only supports RC4, but supports AES256 session keys. The
+ # resulting ticket should be encrypted with RC4, with an RC4 session key.
+ def test_tgs_rc4_supported_aes_session_rc4_requested(self):
+ creds = self.get_client_creds()
+ tgt = self.get_tgt(creds)
+
+ target_creds = self._server_creds(supported=rc4_bit | aes256_sk_bit)
+
+ ticket = self._tgs_req(tgt, expected_error=0,
+ creds=creds, target_creds=target_creds,
+ etypes=(ARCFOUR_HMAC_MD5,))
+
+ self.assertEqual(ARCFOUR_HMAC_MD5, ticket.decryption_key.etype)
+ self.assertEqual(ARCFOUR_HMAC_MD5, ticket.session_key.etype)
+
+
+if __name__ == "__main__":
+ global_asn1_print = False
+ global_hexdump = False
+ import unittest
+ unittest.main()
diff --git a/python/samba/tests/krb5/fast_tests.py b/python/samba/tests/krb5/fast_tests.py
new file mode 100755
index 0000000..3feafc2
--- /dev/null
+++ b/python/samba/tests/krb5/fast_tests.py
@@ -0,0 +1,2108 @@
+#!/usr/bin/env python3
+# Unix SMB/CIFS implementation.
+# Copyright (C) Stefan Metzmacher 2020
+# Copyright (C) 2020 Catalyst.Net Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+import os
+
+sys.path.insert(0, "bin/python")
+os.environ["PYTHONUNBUFFERED"] = "1"
+
+import functools
+import collections
+
+import ldb
+
+from samba.dcerpc import krb5pac, security
+from samba.tests.krb5.raw_testcase import Krb5EncryptionKey, ZeroedChecksumKey
+from samba.tests.krb5.kdc_base_test import KDCBaseTest
+from samba.tests.krb5.rfc4120_constants import (
+ AD_FX_FAST_ARMOR,
+ AD_FX_FAST_USED,
+ AES256_CTS_HMAC_SHA1_96,
+ ARCFOUR_HMAC_MD5,
+ FX_FAST_ARMOR_AP_REQUEST,
+ KDC_ERR_BAD_INTEGRITY,
+ KDC_ERR_ETYPE_NOSUPP,
+ KDC_ERR_GENERIC,
+ KDC_ERR_S_PRINCIPAL_UNKNOWN,
+ KDC_ERR_MODIFIED,
+ KDC_ERR_NOT_US,
+ KDC_ERR_POLICY,
+ KDC_ERR_PREAUTH_FAILED,
+ KDC_ERR_PREAUTH_REQUIRED,
+ KDC_ERR_SKEW,
+ KDC_ERR_UNKNOWN_CRITICAL_FAST_OPTIONS,
+ KRB_AS_REP,
+ KRB_TGS_REP,
+ KU_TGS_REQ_AUTH_DAT_SESSION,
+ KU_TGS_REQ_AUTH_DAT_SUBKEY,
+ NT_PRINCIPAL,
+ NT_SRV_HST,
+ NT_SRV_INST,
+ PADATA_FX_COOKIE,
+ PADATA_FX_FAST,
+ PADATA_REQ_ENC_PA_REP,
+)
+import samba.tests.krb5.rfc4120_pyasn1 as krb5_asn1
+import samba.tests.krb5.kcrypto as kcrypto
+
+global_asn1_print = False
+global_hexdump = False
+
+
+class FAST_Tests(KDCBaseTest):
+ def setUp(self):
+ super().setUp()
+ self.do_asn1_print = global_asn1_print
+ self.do_hexdump = global_hexdump
+
+ def test_simple(self):
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': KDC_ERR_PREAUTH_REQUIRED,
+ 'use_fast': False
+ },
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': 0,
+ 'use_fast': False,
+ 'gen_padata_fn': self.generate_enc_timestamp_padata
+ }
+ ])
+
+ def test_simple_as_req_self(self):
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': KDC_ERR_PREAUTH_REQUIRED,
+ 'use_fast': False,
+ 'as_req_self': True
+ },
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': 0,
+ 'use_fast': False,
+ 'gen_padata_fn': self.generate_enc_timestamp_padata,
+ 'as_req_self': True
+ }
+ ], client_account=self.AccountType.COMPUTER)
+
+ def test_simple_as_req_self_no_auth_data(self):
+ self._run_test_sequence(
+ [
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': KDC_ERR_PREAUTH_REQUIRED,
+ 'use_fast': False,
+ 'as_req_self': True
+ },
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': 0,
+ 'use_fast': False,
+ 'gen_padata_fn': self.generate_enc_timestamp_padata,
+ 'as_req_self': True,
+ 'expect_pac': True
+ }
+ ],
+ client_account=self.AccountType.COMPUTER,
+ client_opts={'no_auth_data_required': True})
+
+ def test_simple_as_req_self_pac_request_false(self):
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': KDC_ERR_PREAUTH_REQUIRED,
+ 'use_fast': False,
+ 'as_req_self': True
+ },
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': 0,
+ 'use_fast': False,
+ 'gen_padata_fn': self.generate_enc_timestamp_padata,
+ 'as_req_self': True,
+ 'pac_request': False,
+ 'expect_pac': False
+ }
+ ], client_account=self.AccountType.COMPUTER)
+
+ def test_simple_as_req_self_pac_request_none(self):
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': KDC_ERR_PREAUTH_REQUIRED,
+ 'use_fast': False,
+ 'as_req_self': True
+ },
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': 0,
+ 'use_fast': False,
+ 'gen_padata_fn': self.generate_enc_timestamp_padata,
+ 'as_req_self': True,
+ 'pac_request': None,
+ 'expect_pac': True
+ }
+ ], client_account=self.AccountType.COMPUTER)
+
+ def test_simple_as_req_self_pac_request_true(self):
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': KDC_ERR_PREAUTH_REQUIRED,
+ 'use_fast': False,
+ 'as_req_self': True
+ },
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': 0,
+ 'use_fast': False,
+ 'gen_padata_fn': self.generate_enc_timestamp_padata,
+ 'as_req_self': True,
+ 'pac_request': True,
+ 'expect_pac': True
+ }
+ ], client_account=self.AccountType.COMPUTER)
+
+ def test_simple_tgs(self):
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_TGS_REP,
+ 'expected_error_mode': 0,
+ 'use_fast': False,
+ 'gen_tgt_fn': self.get_user_tgt
+ }
+ ])
+
+ def test_fast_rodc_issued_armor(self):
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': KDC_ERR_PREAUTH_REQUIRED,
+ 'use_fast': True,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.get_rodc_issued_mach_tgt,
+ },
+ {
+ 'rep_type': KRB_AS_REP,
+ # Test that RODC-issued armor tickets are permitted.
+ 'expected_error_mode': 0,
+ 'use_fast': True,
+ 'gen_padata_fn': self.generate_enc_challenge_padata,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.get_rodc_issued_mach_tgt,
+ }
+ ],
+ armor_opts={
+ 'allowed_replication_mock': True,
+ 'revealed_to_mock_rodc': True,
+ })
+
+ def test_fast_tgs_rodc_issued_armor(self):
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_TGS_REP,
+ # Test that RODC-issued armor tickets are not permitted.
+ 'expected_error_mode': 0,
+ 'use_fast': True,
+ 'gen_tgt_fn': self.get_user_tgt,
+ 'gen_armor_tgt_fn': self.get_rodc_issued_mach_tgt,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ }
+ ],
+ armor_opts={
+ 'allowed_replication_mock': True,
+ 'revealed_to_mock_rodc': True,
+ })
+
+ def test_simple_enc_pa_rep(self):
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': KDC_ERR_PREAUTH_REQUIRED,
+ 'use_fast': False
+ },
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': 0,
+ 'use_fast': False,
+ 'gen_padata_fn': self.generate_enc_pa_rep_timestamp_padata,
+ 'expected_flags': 'enc-pa-rep'
+ }
+ ])
+
+ # Currently we only send PADATA-REQ-ENC-PA-REP for AS-REQ requests.
+ def test_simple_tgs_enc_pa_rep(self):
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_TGS_REP,
+ 'expected_error_mode': 0,
+ 'use_fast': False,
+ 'gen_tgt_fn': self.get_user_tgt,
+ 'gen_padata_fn': self.generate_enc_pa_rep_padata,
+ 'expected_flags': 'enc-pa-rep'
+ }
+ ])
+
+ def test_simple_no_sname(self):
+ expected_sname = self.get_krbtgt_sname()
+
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': (KDC_ERR_GENERIC, KDC_ERR_S_PRINCIPAL_UNKNOWN),
+ 'use_fast': False,
+ 'sname': None,
+ 'expected_sname': expected_sname,
+ 'expect_edata': False
+ }
+ ])
+
+ def test_simple_tgs_no_sname(self):
+ expected_sname = self.get_krbtgt_sname()
+
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_TGS_REP,
+ 'expected_error_mode': (KDC_ERR_GENERIC, KDC_ERR_S_PRINCIPAL_UNKNOWN),
+ 'use_fast': False,
+ 'gen_tgt_fn': self.get_user_tgt,
+ 'sname': None,
+ 'expected_sname': expected_sname,
+ 'expect_edata': False
+ }
+ ])
+
+ def test_fast_no_sname(self):
+ expected_sname = self.get_krbtgt_sname()
+
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': (KDC_ERR_GENERIC,
+ KDC_ERR_S_PRINCIPAL_UNKNOWN),
+ 'use_fast': True,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.get_mach_tgt,
+ 'sname': None,
+ 'expected_sname': expected_sname,
+ 'strict_edata_checking': False
+ }
+ ])
+
+ def test_fast_tgs_no_sname(self):
+ expected_sname = self.get_krbtgt_sname()
+
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_TGS_REP,
+ 'expected_error_mode': (KDC_ERR_GENERIC, KDC_ERR_S_PRINCIPAL_UNKNOWN),
+ 'use_fast': True,
+ 'gen_tgt_fn': self.get_user_tgt,
+ 'fast_armor': None,
+ 'sname': None,
+ 'expected_sname': expected_sname,
+ 'strict_edata_checking': False
+ }
+ ])
+
+ def test_fast_inner_no_sname(self):
+ expected_sname = self.get_krbtgt_sname()
+
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': (KDC_ERR_GENERIC,
+ KDC_ERR_S_PRINCIPAL_UNKNOWN),
+ 'use_fast': True,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.get_mach_tgt,
+ 'inner_req': {
+ 'sname': None # should be ignored
+ },
+ 'expected_sname': expected_sname,
+ 'strict_edata_checking': False
+ }
+ ])
+
+ def test_fast_tgs_inner_no_sname(self):
+ expected_sname = self.get_krbtgt_sname()
+
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_TGS_REP,
+ 'expected_error_mode': (KDC_ERR_GENERIC,
+ KDC_ERR_S_PRINCIPAL_UNKNOWN),
+ 'use_fast': True,
+ 'gen_tgt_fn': self.get_user_tgt,
+ 'fast_armor': None,
+ 'inner_req': {
+ 'sname': None # should be ignored
+ },
+ 'expected_sname': expected_sname,
+ 'strict_edata_checking': False
+ }
+ ])
+
+ def test_simple_tgs_wrong_principal(self):
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_TGS_REP,
+ 'expected_error_mode': 0,
+ 'use_fast': False,
+ 'gen_tgt_fn': self.get_mach_tgt
+ }
+ ])
+
+ def test_simple_tgs_service_ticket(self):
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_TGS_REP,
+ 'expected_error_mode': (KDC_ERR_NOT_US,
+ KDC_ERR_POLICY),
+ 'use_fast': False,
+ 'gen_tgt_fn': self.get_user_service_ticket,
+ 'expect_edata': False
+ }
+ ])
+
+ def test_simple_tgs_service_ticket_mach(self):
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_TGS_REP,
+ 'expected_error_mode': (KDC_ERR_NOT_US,
+ KDC_ERR_POLICY),
+ 'use_fast': False,
+ 'gen_tgt_fn': self.get_mach_service_ticket,
+ 'expect_edata': False
+ }
+ ])
+
+ def test_fast_no_claims(self):
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': KDC_ERR_PREAUTH_REQUIRED,
+ 'use_fast': True,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.get_mach_tgt,
+ 'pac_options': '0'
+ },
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': 0,
+ 'use_fast': True,
+ 'gen_padata_fn': self.generate_enc_challenge_padata,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.get_mach_tgt,
+ 'pac_options': '0'
+ }
+ ])
+
+ def test_fast_tgs_no_claims(self):
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_TGS_REP,
+ 'expected_error_mode': 0,
+ 'use_fast': True,
+ 'gen_tgt_fn': self.get_user_tgt,
+ 'fast_armor': None,
+ 'pac_options': '0'
+ }
+ ])
+
+ def test_fast_no_claims_or_canon(self):
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': KDC_ERR_PREAUTH_REQUIRED,
+ 'use_fast': True,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.get_mach_tgt,
+ 'pac_options': '0',
+ 'kdc_options': '0'
+ },
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': 0,
+ 'use_fast': True,
+ 'gen_padata_fn': self.generate_enc_challenge_padata,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.get_mach_tgt,
+ 'pac_options': '0',
+ 'kdc_options': '0'
+ }
+ ])
+
+ def test_fast_tgs_no_claims_or_canon(self):
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_TGS_REP,
+ 'expected_error_mode': 0,
+ 'use_fast': True,
+ 'gen_tgt_fn': self.get_user_tgt,
+ 'fast_armor': None,
+ 'pac_options': '0',
+ 'kdc_options': '0'
+ }
+ ])
+
+ def test_fast_no_canon(self):
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': KDC_ERR_PREAUTH_REQUIRED,
+ 'use_fast': True,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.get_mach_tgt,
+ 'kdc_options': '0'
+ },
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': 0,
+ 'use_fast': True,
+ 'gen_padata_fn': self.generate_enc_challenge_padata,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.get_mach_tgt,
+ 'kdc_options': '0'
+ }
+ ])
+
+ def test_fast_tgs_no_canon(self):
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_TGS_REP,
+ 'expected_error_mode': 0,
+ 'use_fast': True,
+ 'gen_tgt_fn': self.get_user_tgt,
+ 'fast_armor': None,
+ 'kdc_options': '0'
+ }
+ ])
+
+ def test_simple_tgs_no_etypes(self):
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_TGS_REP,
+ 'expected_error_mode': KDC_ERR_ETYPE_NOSUPP,
+ 'use_fast': False,
+ 'gen_tgt_fn': self.get_mach_tgt,
+ 'etypes': (),
+ 'expect_edata': False
+ }
+ ])
+
+ def test_fast_tgs_no_etypes(self):
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_TGS_REP,
+ 'expected_error_mode': KDC_ERR_ETYPE_NOSUPP,
+ 'use_fast': True,
+ 'gen_tgt_fn': self.get_mach_tgt,
+ 'fast_armor': None,
+ 'etypes': (),
+ 'strict_edata_checking': False
+ }
+ ])
+
+ def test_simple_no_etypes(self):
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': KDC_ERR_ETYPE_NOSUPP,
+ 'use_fast': False,
+ 'etypes': ()
+ }
+ ])
+
+ def test_simple_fast_no_etypes(self):
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': KDC_ERR_ETYPE_NOSUPP,
+ 'use_fast': True,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.get_mach_tgt,
+ 'etypes': (),
+ 'strict_edata_checking': False
+ }
+ ])
+
+ def test_empty_fast(self):
+ # Add an empty PA-FX-FAST in the initial AS-REQ. This should get
+ # rejected with a Generic error.
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': (KDC_ERR_GENERIC,
+ KDC_ERR_PREAUTH_FAILED),
+ 'use_fast': True,
+ 'gen_fast_fn': self.generate_empty_fast,
+ 'fast_armor': None,
+ 'gen_armor_tgt_fn': self.get_mach_tgt,
+ 'expect_edata': False
+ }
+ ])
+
+ # Expected to fail against Windows - Windows does not produce an error.
+ def test_fast_unknown_critical_option(self):
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': KDC_ERR_PREAUTH_REQUIRED,
+ 'use_fast': True,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.get_mach_tgt
+ },
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': KDC_ERR_UNKNOWN_CRITICAL_FAST_OPTIONS,
+ 'use_fast': True,
+ 'gen_padata_fn': self.generate_enc_challenge_padata,
+ 'fast_options': '001', # unsupported critical option
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.get_mach_tgt
+ }
+ ])
+
+ def test_unarmored_as_req(self):
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': (KDC_ERR_GENERIC,
+ KDC_ERR_PREAUTH_FAILED),
+ 'use_fast': True,
+ 'fast_armor': None, # no armor,
+ 'gen_armor_tgt_fn': self.get_mach_tgt,
+ 'expect_edata': False
+ }
+ ])
+
+ def test_fast_invalid_armor_type(self):
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': KDC_ERR_PREAUTH_FAILED,
+ 'use_fast': True,
+ 'fast_armor': 0, # invalid armor type
+ 'gen_armor_tgt_fn': self.get_mach_tgt
+ }
+ ])
+
+ def test_fast_invalid_armor_type2(self):
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': KDC_ERR_PREAUTH_FAILED,
+ 'use_fast': True,
+ 'fast_armor': 2, # invalid armor type
+ 'gen_armor_tgt_fn': self.get_mach_tgt
+ }
+ ])
+
+ def test_fast_encrypted_challenge(self):
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': KDC_ERR_PREAUTH_REQUIRED,
+ 'use_fast': True,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.get_mach_tgt
+ },
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': 0,
+ 'use_fast': True,
+ 'gen_padata_fn': self.generate_enc_challenge_padata,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.get_mach_tgt
+ }
+ ])
+
+ def test_fast_encrypted_challenge_as_req_self(self):
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': KDC_ERR_PREAUTH_REQUIRED,
+ 'use_fast': True,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.get_mach_tgt,
+ 'as_req_self': True
+ },
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': 0,
+ 'use_fast': True,
+ 'gen_padata_fn': self.generate_enc_challenge_padata,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.get_mach_tgt,
+ 'as_req_self': True
+ }
+ ], client_account=self.AccountType.COMPUTER)
+
+ def test_fast_encrypted_challenge_wrong_key(self):
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': KDC_ERR_PREAUTH_REQUIRED,
+ 'use_fast': True,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.get_mach_tgt
+ },
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': KDC_ERR_PREAUTH_FAILED,
+ 'use_fast': True,
+ 'gen_padata_fn': self.generate_enc_challenge_padata_wrong_key,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.get_mach_tgt
+ }
+ ])
+
+ def test_fast_encrypted_challenge_wrong_key_kdc(self):
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': KDC_ERR_PREAUTH_REQUIRED,
+ 'use_fast': True,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.get_mach_tgt
+ },
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': KDC_ERR_PREAUTH_FAILED,
+ 'use_fast': True,
+ 'gen_padata_fn':
+ self.generate_enc_challenge_padata_wrong_key_kdc,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.get_mach_tgt
+ }
+ ])
+
+ def test_fast_encrypted_challenge_no_fast(self):
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': KDC_ERR_PREAUTH_REQUIRED,
+ 'use_fast': False
+ },
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': (KDC_ERR_PREAUTH_FAILED,
+ KDC_ERR_PREAUTH_REQUIRED),
+ 'use_fast': False,
+ 'gen_padata_fn': self.generate_enc_challenge_padata_wrong_key
+ }
+ ])
+
+ # Expected to fail against Windows - Windows does not produce an error.
+ def test_fast_encrypted_challenge_clock_skew(self):
+ # The KDC is supposed to confirm that the timestamp is within its
+ # current clock skew, and return KRB_APP_ERR_SKEW if it is not (RFC6113
+ # 5.4.6). However, this test fails against Windows, which accepts a
+ # skewed timestamp in the encrypted challenge.
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': KDC_ERR_PREAUTH_REQUIRED,
+ 'use_fast': True,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.get_mach_tgt
+ },
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': KDC_ERR_SKEW,
+ 'use_fast': True,
+ 'gen_padata_fn': functools.partial(
+ self.generate_enc_challenge_padata,
+ skew=10000),
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.get_mach_tgt
+ }
+ ])
+
+ def test_fast_invalid_tgt(self):
+ # The armor ticket 'sname' field is required to identify the target
+ # realm TGS (RFC6113 5.4.1.1). However, this test fails against
+ # Windows, which will still accept a service ticket identifying a
+ # different server principal.
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': (KDC_ERR_POLICY,
+ KDC_ERR_S_PRINCIPAL_UNKNOWN),
+ 'use_fast': True,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.get_user_service_ticket
+ # ticket not identifying TGS of current
+ # realm
+ }
+ ])
+
+ # Similarly, this test fails against Windows, which accepts a service
+ # ticket identifying a different server principal.
+ def test_fast_invalid_tgt_mach(self):
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': (KDC_ERR_POLICY,
+ KDC_ERR_S_PRINCIPAL_UNKNOWN),
+ 'use_fast': True,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.get_mach_service_ticket
+ # ticket not identifying TGS of current
+ # realm
+ }
+ ])
+
+ def test_fast_invalid_checksum_tgt(self):
+ # The armor ticket 'sname' field is required to identify the target
+ # realm TGS (RFC6113 5.4.1.1). However, this test fails against
+ # Windows, which will still accept a service ticket identifying a
+ # different server principal even if the ticket checksum is invalid.
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': (KDC_ERR_POLICY,
+ KDC_ERR_S_PRINCIPAL_UNKNOWN),
+ 'use_fast': True,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.get_service_ticket_invalid_checksum
+ }
+ ])
+
+ def test_fast_enc_timestamp(self):
+ # Provide ENC-TIMESTAMP as FAST padata when we should be providing
+ # ENCRYPTED-CHALLENGE - ensure that we get PREAUTH_REQUIRED.
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': KDC_ERR_PREAUTH_REQUIRED,
+ 'use_fast': True,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.get_mach_tgt
+ },
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': (KDC_ERR_PREAUTH_REQUIRED,
+ KDC_ERR_POLICY),
+ 'use_fast': True,
+ 'gen_padata_fn': self.generate_enc_timestamp_padata,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.get_mach_tgt
+ }
+ ])
+
+ def test_fast(self):
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': KDC_ERR_PREAUTH_REQUIRED,
+ 'use_fast': True,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.get_mach_tgt
+ },
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': 0,
+ 'use_fast': True,
+ 'gen_padata_fn': self.generate_enc_challenge_padata,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.get_mach_tgt
+ }
+ ])
+
+ def test_fast_tgs(self):
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_TGS_REP,
+ 'expected_error_mode': 0,
+ 'use_fast': True,
+ 'gen_tgt_fn': self.get_user_tgt,
+ 'fast_armor': None
+ }
+ ])
+
+ def test_fast_tgs_armor(self):
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_TGS_REP,
+ 'expected_error_mode': 0,
+ 'use_fast': True,
+ 'gen_tgt_fn': self.get_user_tgt,
+ 'gen_armor_tgt_fn': self.get_mach_tgt,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST
+ }
+ ])
+
+ def test_fast_session_key(self):
+ # Ensure that specified APOptions are ignored.
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': KDC_ERR_PREAUTH_REQUIRED,
+ 'use_fast': True,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.get_mach_tgt,
+ 'fast_ap_options': str(krb5_asn1.APOptions('use-session-key'))
+ },
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': 0,
+ 'use_fast': True,
+ 'gen_padata_fn': self.generate_enc_challenge_padata,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.get_mach_tgt,
+ 'fast_ap_options': str(krb5_asn1.APOptions('use-session-key'))
+ }
+ ])
+
+ def test_fast_tgs_armor_session_key(self):
+ # Ensure that specified APOptions are ignored.
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_TGS_REP,
+ 'expected_error_mode': 0,
+ 'use_fast': True,
+ 'gen_tgt_fn': self.get_user_tgt,
+ 'gen_armor_tgt_fn': self.get_mach_tgt,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'fast_ap_options': str(krb5_asn1.APOptions('use-session-key'))
+ }
+ ])
+
+ def test_fast_enc_pa_rep(self):
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': KDC_ERR_PREAUTH_REQUIRED,
+ 'use_fast': True,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.get_mach_tgt,
+ 'expected_flags': 'enc-pa-rep'
+ },
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': 0,
+ 'use_fast': True,
+ 'gen_padata_fn': self.generate_enc_pa_rep_challenge_padata,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.get_mach_tgt,
+ 'expected_flags': 'enc-pa-rep'
+ }
+ ])
+
+ # Currently we only send PADATA-REQ-ENC-PA-REP for AS-REQ requests.
+ def test_fast_tgs_enc_pa_rep(self):
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_TGS_REP,
+ 'expected_error_mode': 0,
+ 'use_fast': True,
+ 'gen_tgt_fn': self.get_user_tgt,
+ 'fast_armor': None,
+ 'gen_padata_fn': self.generate_enc_pa_rep_padata,
+ 'expected_flags': 'enc-pa-rep'
+ }
+ ])
+
+ # Currently we only send PADATA-REQ-ENC-PA-REP for AS-REQ requests.
+ def test_fast_tgs_armor_enc_pa_rep(self):
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_TGS_REP,
+ 'expected_error_mode': 0,
+ 'use_fast': True,
+ 'gen_tgt_fn': self.get_user_tgt,
+ 'gen_armor_tgt_fn': self.get_mach_tgt,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_padata_fn': self.generate_enc_pa_rep_padata,
+ 'expected_flags': 'enc-pa-rep'
+ }
+ ])
+
+ def test_fast_outer_wrong_realm(self):
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': KDC_ERR_PREAUTH_REQUIRED,
+ 'use_fast': True,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.get_mach_tgt,
+ 'outer_req': {
+ 'realm': 'TEST' # should be ignored
+ }
+ },
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': 0,
+ 'use_fast': True,
+ 'gen_padata_fn': self.generate_enc_challenge_padata,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.get_mach_tgt,
+ 'outer_req': {
+ 'realm': 'TEST' # should be ignored
+ }
+ }
+ ])
+
+ def test_fast_tgs_outer_wrong_realm(self):
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_TGS_REP,
+ 'expected_error_mode': 0,
+ 'use_fast': True,
+ 'gen_tgt_fn': self.get_user_tgt,
+ 'fast_armor': None,
+ 'outer_req': {
+ 'realm': 'TEST' # should be ignored
+ }
+ }
+ ])
+
+ def test_fast_outer_wrong_nonce(self):
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': KDC_ERR_PREAUTH_REQUIRED,
+ 'use_fast': True,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.get_mach_tgt,
+ 'outer_req': {
+ 'nonce': '123' # should be ignored
+ }
+ },
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': 0,
+ 'use_fast': True,
+ 'gen_padata_fn': self.generate_enc_challenge_padata,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.get_mach_tgt,
+ 'outer_req': {
+ 'nonce': '123' # should be ignored
+ }
+ }
+ ])
+
+ def test_fast_tgs_outer_wrong_nonce(self):
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_TGS_REP,
+ 'expected_error_mode': 0,
+ 'use_fast': True,
+ 'gen_tgt_fn': self.get_user_tgt,
+ 'fast_armor': None,
+ 'outer_req': {
+ 'nonce': '123' # should be ignored
+ }
+ }
+ ])
+
+ def test_fast_outer_wrong_flags(self):
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': KDC_ERR_PREAUTH_REQUIRED,
+ 'use_fast': True,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.get_mach_tgt,
+ 'outer_req': {
+ 'kdc-options': '11111111111111111' # should be ignored
+ }
+ },
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': 0,
+ 'use_fast': True,
+ 'gen_padata_fn': self.generate_enc_challenge_padata,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.get_mach_tgt,
+ 'outer_req': {
+ 'kdc-options': '11111111111111111' # should be ignored
+ }
+ }
+ ])
+
+ def test_fast_tgs_outer_wrong_flags(self):
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_TGS_REP,
+ 'expected_error_mode': 0,
+ 'use_fast': True,
+ 'gen_tgt_fn': self.get_user_tgt,
+ 'fast_armor': None,
+ 'outer_req': {
+ 'kdc-options': '11111111111111111' # should be ignored
+ }
+ }
+ ])
+
+ def test_fast_outer_no_sname(self):
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': KDC_ERR_PREAUTH_REQUIRED,
+ 'use_fast': True,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.get_mach_tgt,
+ 'outer_req': {
+ 'sname': None # should be ignored
+ }
+ },
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': 0,
+ 'use_fast': True,
+ 'gen_padata_fn': self.generate_enc_challenge_padata,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.get_mach_tgt,
+ 'outer_req': {
+ 'sname': None # should be ignored
+ }
+ }
+ ])
+
+ def test_fast_tgs_outer_no_sname(self):
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_TGS_REP,
+ 'expected_error_mode': 0,
+ 'use_fast': True,
+ 'gen_tgt_fn': self.get_user_tgt,
+ 'fast_armor': None,
+ 'outer_req': {
+ 'sname': None # should be ignored
+ }
+ }
+ ])
+
+ def test_fast_outer_wrong_till(self):
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': KDC_ERR_PREAUTH_REQUIRED,
+ 'use_fast': True,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.get_mach_tgt,
+ 'outer_req': {
+ 'till': '15000101000000Z' # should be ignored
+ }
+ },
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': 0,
+ 'use_fast': True,
+ 'gen_padata_fn': self.generate_enc_challenge_padata,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.get_mach_tgt,
+ 'outer_req': {
+ 'till': '15000101000000Z' # should be ignored
+ }
+ }
+ ])
+
+ def test_fast_tgs_outer_wrong_till(self):
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_TGS_REP,
+ 'expected_error_mode': 0,
+ 'use_fast': True,
+ 'gen_tgt_fn': self.get_user_tgt,
+ 'fast_armor': None,
+ 'outer_req': {
+ 'till': '15000101000000Z' # should be ignored
+ }
+ }
+ ])
+
+ def test_fast_authdata_fast_used(self):
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_TGS_REP,
+ 'expected_error_mode': 0,
+ 'use_fast': True,
+ 'gen_authdata_fn': self.generate_fast_used_auth_data,
+ 'gen_tgt_fn': self.get_user_tgt,
+ 'fast_armor': None
+ }
+ ])
+
+ def test_fast_authdata_fast_not_used(self):
+ # The AD-fx-fast-used authdata type can be included in the
+ # authenticator or the TGT authentication data to indicate that FAST
+ # must be used. The KDC must return KRB_APP_ERR_MODIFIED if it receives
+ # this authdata type in a request not using FAST (RFC6113 5.4.2).
+ self._run_test_sequence([
+ # This request works without FAST.
+ {
+ 'rep_type': KRB_TGS_REP,
+ 'expected_error_mode': 0,
+ 'use_fast': False,
+ 'gen_tgt_fn': self.get_user_tgt
+ },
+ # Add the 'FAST used' auth data and it now fails.
+ {
+ 'rep_type': KRB_TGS_REP,
+ 'expected_error_mode': (KDC_ERR_MODIFIED,
+ KDC_ERR_GENERIC),
+ 'use_fast': False,
+ 'gen_authdata_fn': self.generate_fast_used_auth_data,
+ 'gen_tgt_fn': self.get_user_tgt,
+ 'expect_edata': False
+ }
+ ])
+
+ def test_fast_ad_fx_fast_armor(self):
+ expected_sname = self.get_krbtgt_sname()
+
+ # If the authenticator or TGT authentication data contains the
+ # AD-fx-fast-armor authdata type, the KDC must reject the request
+ # (RFC6113 5.4.1.1).
+ self._run_test_sequence([
+ # This request works.
+ {
+ 'rep_type': KRB_TGS_REP,
+ 'expected_error_mode': 0,
+ 'use_fast': True,
+ 'gen_tgt_fn': self.get_user_tgt,
+ 'fast_armor': None
+ },
+ # Add the 'FAST armor' auth data and it now fails.
+ {
+ 'rep_type': KRB_TGS_REP,
+ 'expected_error_mode': (KDC_ERR_GENERIC,
+ KDC_ERR_BAD_INTEGRITY),
+ 'use_fast': True,
+ 'gen_authdata_fn': self.generate_fast_armor_auth_data,
+ 'gen_tgt_fn': self.get_user_tgt,
+ 'fast_armor': None,
+ 'expected_sname': expected_sname,
+ 'expect_edata': False
+ }
+ ])
+
+ def test_fast_ad_fx_fast_armor2(self):
+ # Show that we can still use the AD-fx-fast-armor authorization data in
+ # FAST armor tickets.
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': KDC_ERR_PREAUTH_REQUIRED,
+ 'use_fast': True,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.get_mach_tgt
+ },
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': 0,
+ 'use_fast': True,
+ 'gen_padata_fn': self.generate_enc_challenge_padata,
+ 'gen_authdata_fn': self.generate_fast_armor_auth_data,
+ # include the auth data in the FAST armor.
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.get_mach_tgt
+ }
+ ])
+
+ def test_fast_ad_fx_fast_armor_ticket(self):
+ expected_sname = self.get_krbtgt_sname()
+
+ # If the authenticator or TGT authentication data contains the
+ # AD-fx-fast-armor authdata type, the KDC must reject the request
+ # (RFC6113 5.4.2).
+ self._run_test_sequence([
+ # This request works.
+ {
+ 'rep_type': KRB_TGS_REP,
+ 'expected_error_mode': 0,
+ 'use_fast': True,
+ 'gen_tgt_fn': self.get_user_tgt,
+ 'fast_armor': None
+ },
+ # Add AD-fx-fast-armor authdata element to user TGT. This request
+ # fails.
+ {
+ 'rep_type': KRB_TGS_REP,
+ 'expected_error_mode': (KDC_ERR_GENERIC,
+ KDC_ERR_BAD_INTEGRITY),
+ 'use_fast': True,
+ 'gen_tgt_fn': self.gen_tgt_fast_armor_auth_data,
+ 'fast_armor': None,
+ 'expected_sname': expected_sname,
+ 'expect_edata': False
+ }
+ ])
+
+ def test_fast_ad_fx_fast_armor_enc_auth_data(self):
+ # If the authenticator or TGT authentication data contains the
+ # AD-fx-fast-armor authdata type, the KDC must reject the request
+ # (RFC6113 5.4.2). However, the KDC should not reject a request that
+ # contains this authdata type in enc-authorization-data.
+ self._run_test_sequence([
+ # This request works.
+ {
+ 'rep_type': KRB_TGS_REP,
+ 'expected_error_mode': 0,
+ 'use_fast': True,
+ 'gen_tgt_fn': self.get_user_tgt,
+ 'fast_armor': None
+ },
+ # Add AD-fx-fast-armor authdata element to
+ # enc-authorization-data. This request also works.
+ {
+ 'rep_type': KRB_TGS_REP,
+ 'expected_error_mode': 0,
+ 'use_fast': True,
+ 'gen_enc_authdata_fn': self.generate_fast_armor_auth_data,
+ 'gen_tgt_fn': self.get_user_tgt,
+ 'fast_armor': None
+ }
+ ])
+
+ def test_fast_ad_fx_fast_armor_ticket2(self):
+ self._run_test_sequence([
+ # Show that we can still use the modified ticket as armor.
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': KDC_ERR_PREAUTH_REQUIRED,
+ 'use_fast': True,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.get_mach_tgt
+ },
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': 0,
+ 'use_fast': True,
+ 'gen_padata_fn': self.generate_enc_challenge_padata,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.gen_tgt_fast_armor_auth_data
+ }
+ ])
+
+ def test_fast_tgs_service_ticket(self):
+ # Try to use a non-TGT ticket to establish an armor key, which fails
+ # (RFC6113 5.4.2).
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_TGS_REP,
+ 'expected_error_mode': (KDC_ERR_NOT_US,
+ KDC_ERR_POLICY),
+ 'use_fast': True,
+ 'gen_tgt_fn': self.get_user_service_ticket, # fails
+ 'fast_armor': None
+ }
+ ])
+
+ def test_fast_tgs_service_ticket_mach(self):
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_TGS_REP,
+ 'expected_error_mode': (KDC_ERR_NOT_US, # fails
+ KDC_ERR_POLICY),
+ 'use_fast': True,
+ 'gen_tgt_fn': self.get_mach_service_ticket,
+ 'fast_armor': None
+ }
+ ])
+
+ def test_simple_tgs_no_subkey(self):
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_TGS_REP,
+ 'expected_error_mode': 0,
+ 'use_fast': False,
+ 'gen_tgt_fn': self.get_user_tgt,
+ 'include_subkey': False
+ }
+ ])
+
+ def test_fast_tgs_no_subkey(self):
+ expected_sname = self.get_krbtgt_sname()
+
+ # Show that omitting the subkey in the TGS-REQ authenticator fails
+ # (RFC6113 5.4.2).
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_TGS_REP,
+ 'expected_error_mode': (KDC_ERR_GENERIC,
+ KDC_ERR_PREAUTH_FAILED),
+ 'use_fast': True,
+ 'gen_tgt_fn': self.get_user_tgt,
+ 'fast_armor': None,
+ 'include_subkey': False,
+ 'expected_sname': expected_sname,
+ 'expect_edata': False
+ }
+ ])
+
+ def test_fast_hide_client_names(self):
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': KDC_ERR_PREAUTH_REQUIRED,
+ 'use_fast': True,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.get_mach_tgt,
+ 'fast_options': str(krb5_asn1.FastOptions(
+ 'hide-client-names')),
+ 'expected_anon': True
+ },
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': 0,
+ 'use_fast': True,
+ 'gen_padata_fn': self.generate_enc_challenge_padata,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.get_mach_tgt,
+ 'fast_options': str(krb5_asn1.FastOptions(
+ 'hide-client-names')),
+ 'expected_anon': True
+ }
+ ])
+
+ def test_fast_tgs_hide_client_names(self):
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_TGS_REP,
+ 'expected_error_mode': 0,
+ 'use_fast': True,
+ 'gen_tgt_fn': self.get_user_tgt,
+ 'fast_armor': None,
+ 'fast_options': str(krb5_asn1.FastOptions(
+ 'hide-client-names')),
+ 'expected_anon': True
+ }
+ ])
+
+ def test_fast_encrypted_challenge_replay(self):
+ # The KDC is supposed to check that encrypted challenges are not
+ # replays (RFC6113 5.4.6), but timestamps may be reused; an encrypted
+ # challenge is only considered a replay if the ciphertext is identical
+ # to a previous challenge. Windows does not perform this check.
+
+ self._run_test_sequence([
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': KDC_ERR_PREAUTH_REQUIRED,
+ 'use_fast': True,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.get_mach_tgt
+ },
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': 0,
+ 'use_fast': True,
+ 'gen_padata_fn': self.generate_enc_challenge_padata_replay,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.get_mach_tgt,
+ 'repeat': 2
+ }
+ ])
+
+ def test_fx_cookie_fast(self):
+ """Test that the FAST cookie is present and that its value is as
+ expected when FAST is used."""
+ kdc_exchange_dict = self._run_test_sequence([
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': KDC_ERR_PREAUTH_REQUIRED,
+ 'use_fast': True,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.get_mach_tgt
+ },
+ ])
+
+ cookie = kdc_exchange_dict.get('fast_cookie')
+ self.assertEqual(b'Microsoft', cookie)
+
+ def test_fx_cookie_no_fast(self):
+ """Test that the FAST cookie is present and that its value is as
+ expected when FAST is not used."""
+ kdc_exchange_dict = self._run_test_sequence([
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': KDC_ERR_PREAUTH_REQUIRED,
+ 'use_fast': False
+ },
+ ])
+
+ cookie = kdc_exchange_dict.get('fast_cookie')
+ self.assertEqual(b'Microsof\x00', cookie)
+
+ def test_unsolicited_fx_cookie_preauth(self):
+ """Test sending an unsolicited FX-COOKIE in an AS-REQ without
+ pre-authentication data."""
+
+ # Include a FAST cookie.
+ fast_cookie = self.create_fast_cookie('Samba-Test')
+
+ kdc_exchange_dict = self._run_test_sequence([
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': KDC_ERR_PREAUTH_REQUIRED,
+ 'use_fast': True,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.get_mach_tgt,
+ 'fast_cookie': fast_cookie,
+ },
+ ])
+
+ got_cookie = kdc_exchange_dict.get('fast_cookie')
+ self.assertEqual(b'Microsoft', got_cookie)
+
+ def test_unsolicited_fx_cookie_fast(self):
+ """Test sending an unsolicited FX-COOKIE in an AS-REQ with
+ pre-authentication data."""
+
+ # Include a FAST cookie.
+ fast_cookie = self.create_fast_cookie('Samba-Test')
+
+ kdc_exchange_dict = self._run_test_sequence([
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': KDC_ERR_PREAUTH_REQUIRED,
+ 'use_fast': True,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.get_mach_tgt,
+ },
+ {
+ 'rep_type': KRB_AS_REP,
+ 'expected_error_mode': 0,
+ 'use_fast': True,
+ 'gen_padata_fn': self.generate_enc_challenge_padata,
+ 'fast_armor': FX_FAST_ARMOR_AP_REQUEST,
+ 'gen_armor_tgt_fn': self.get_mach_tgt,
+ 'fast_cookie': fast_cookie,
+ }
+ ])
+
+ got_cookie = kdc_exchange_dict.get('fast_cookie')
+ self.assertIsNone(got_cookie)
+
+ def generate_enc_timestamp_padata(self,
+ kdc_exchange_dict,
+ callback_dict,
+ req_body):
+ key = kdc_exchange_dict['preauth_key']
+
+ padata = self.get_enc_timestamp_pa_data_from_key(key)
+ return [padata], req_body
+
+ def generate_enc_challenge_padata(self,
+ kdc_exchange_dict,
+ callback_dict,
+ req_body,
+ skew=0):
+ armor_key = kdc_exchange_dict['armor_key']
+ key = kdc_exchange_dict['preauth_key']
+
+ client_challenge_key = (
+ self.generate_client_challenge_key(armor_key, key))
+ padata = self.get_challenge_pa_data(client_challenge_key, skew=skew)
+ return [padata], req_body
+
+ def generate_enc_challenge_padata_wrong_key_kdc(self,
+ kdc_exchange_dict,
+ callback_dict,
+ req_body):
+ armor_key = kdc_exchange_dict['armor_key']
+ key = kdc_exchange_dict['preauth_key']
+
+ kdc_challenge_key = (
+ self.generate_kdc_challenge_key(armor_key, key))
+ padata = self.get_challenge_pa_data(kdc_challenge_key)
+ return [padata], req_body
+
+ def generate_enc_challenge_padata_wrong_key(self,
+ kdc_exchange_dict,
+ callback_dict,
+ req_body):
+ key = kdc_exchange_dict['preauth_key']
+
+ padata = self.get_challenge_pa_data(key)
+ return [padata], req_body
+
+ def generate_enc_challenge_padata_replay(self,
+ kdc_exchange_dict,
+ callback_dict,
+ req_body):
+ padata = callback_dict.get('replay_padata')
+
+ if padata is None:
+ armor_key = kdc_exchange_dict['armor_key']
+ key = kdc_exchange_dict['preauth_key']
+
+ client_challenge_key = (
+ self.generate_client_challenge_key(armor_key, key))
+ padata = self.get_challenge_pa_data(client_challenge_key)
+ callback_dict['replay_padata'] = padata
+
+ return [padata], req_body
+
+ def generate_empty_fast(self,
+ _kdc_exchange_dict,
+ _callback_dict,
+ _req_body,
+ _fast_padata,
+ _fast_armor,
+ _checksum,
+ _fast_options=''):
+ fast_padata = self.PA_DATA_create(PADATA_FX_FAST, b'')
+
+ return fast_padata
+
+ def _run_test_sequence(self, test_sequence,
+ client_account=KDCBaseTest.AccountType.USER,
+ client_opts=None,
+ armor_opts=None):
+ if self.strict_checking:
+ self.check_kdc_fast_support()
+
+ kdc_options_default = str(krb5_asn1.KDCOptions('forwardable,'
+ 'canonicalize'))
+
+ client_creds = self.get_cached_creds(account_type=client_account,
+ opts=client_opts)
+ target_creds = self.get_service_creds()
+ krbtgt_creds = self.get_krbtgt_creds()
+
+ client_username = client_creds.get_username()
+ client_realm = client_creds.get_realm()
+ client_cname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=[client_username])
+
+ krbtgt_username = krbtgt_creds.get_username()
+ krbtgt_realm = krbtgt_creds.get_realm()
+ krbtgt_sname = self.PrincipalName_create(
+ name_type=NT_SRV_INST, names=[krbtgt_username, krbtgt_realm])
+ krbtgt_decryption_key = self.TicketDecryptionKey_from_creds(
+ krbtgt_creds)
+ krbtgt_etypes = krbtgt_creds.tgs_supported_enctypes
+
+ target_username = target_creds.get_username()[:-1]
+ target_realm = target_creds.get_realm()
+ target_service = 'host'
+ target_sname = self.PrincipalName_create(
+ name_type=NT_SRV_HST, names=[target_service, target_username])
+ target_decryption_key = self.TicketDecryptionKey_from_creds(
+ target_creds)
+ target_etypes = target_creds.tgs_supported_enctypes
+
+ client_decryption_key = self.TicketDecryptionKey_from_creds(
+ client_creds)
+ client_etypes = client_creds.tgs_supported_enctypes
+
+ fast_cookie = None
+ preauth_etype_info2 = None
+
+ for kdc_dict in test_sequence:
+ rep_type = kdc_dict.pop('rep_type')
+ self.assertIn(rep_type, (KRB_AS_REP, KRB_TGS_REP))
+
+ expected_error_mode = kdc_dict.pop('expected_error_mode')
+ if expected_error_mode == 0:
+ expected_error_mode = ()
+ elif not isinstance(expected_error_mode, collections.abc.Container):
+ expected_error_mode = (expected_error_mode,)
+ for error in expected_error_mode:
+ self.assertIn(error, range(240))
+
+ use_fast = kdc_dict.pop('use_fast')
+ self.assertIs(type(use_fast), bool)
+
+ if use_fast:
+ self.assertIn('fast_armor', kdc_dict)
+ fast_armor_type = kdc_dict.pop('fast_armor')
+
+ if fast_armor_type is not None:
+ self.assertIn('gen_armor_tgt_fn', kdc_dict)
+ elif KDC_ERR_GENERIC not in expected_error_mode:
+ self.assertNotIn('gen_armor_tgt_fn', kdc_dict)
+
+ gen_armor_tgt_fn = kdc_dict.pop('gen_armor_tgt_fn', None)
+ if gen_armor_tgt_fn is not None:
+ armor_tgt = gen_armor_tgt_fn(armor_opts)
+ else:
+ armor_tgt = None
+
+ fast_options = kdc_dict.pop('fast_options', '')
+ else:
+ fast_armor_type = None
+ armor_tgt = None
+
+ self.assertNotIn('fast_options', kdc_dict)
+ fast_options = None
+
+ if rep_type == KRB_TGS_REP:
+ gen_tgt_fn = kdc_dict.pop('gen_tgt_fn')
+ tgt = gen_tgt_fn(opts=client_opts)
+ else:
+ self.assertNotIn('gen_tgt_fn', kdc_dict)
+ tgt = None
+
+ if len(expected_error_mode) != 0:
+ check_error_fn = self.generic_check_kdc_error
+ check_rep_fn = None
+ else:
+ check_error_fn = None
+ check_rep_fn = self.generic_check_kdc_rep
+
+ etypes = kdc_dict.pop('etypes', (AES256_CTS_HMAC_SHA1_96,
+ ARCFOUR_HMAC_MD5))
+
+ cname = client_cname if rep_type == KRB_AS_REP else None
+ crealm = client_realm
+
+ as_req_self = kdc_dict.pop('as_req_self', False)
+ if as_req_self:
+ self.assertEqual(KRB_AS_REP, rep_type)
+
+ if 'sname' in kdc_dict:
+ sname = kdc_dict.pop('sname')
+ else:
+ if as_req_self:
+ sname = client_cname
+ elif rep_type == KRB_AS_REP:
+ sname = krbtgt_sname
+ else: # KRB_TGS_REP
+ sname = target_sname
+
+ if rep_type == KRB_AS_REP:
+ srealm = krbtgt_realm
+ else: # KRB_TGS_REP
+ srealm = target_realm
+
+ if rep_type == KRB_TGS_REP:
+ tgt_cname = tgt.cname
+ else:
+ tgt_cname = client_cname
+
+ expect_edata = kdc_dict.pop('expect_edata', None)
+ if expect_edata is not None:
+ self.assertTrue(expected_error_mode)
+
+ expected_cname = kdc_dict.pop('expected_cname', tgt_cname)
+ expected_anon = kdc_dict.pop('expected_anon',
+ False)
+ expected_crealm = kdc_dict.pop('expected_crealm', client_realm)
+ expected_sname = kdc_dict.pop('expected_sname', sname)
+ expected_srealm = kdc_dict.pop('expected_srealm', srealm)
+
+ expected_salt = client_creds.get_salt()
+
+ authenticator_subkey = self.RandomKey(kcrypto.Enctype.AES256)
+ if rep_type == KRB_AS_REP:
+ if use_fast:
+ armor_key = self.generate_armor_key(authenticator_subkey,
+ armor_tgt.session_key)
+ armor_subkey = authenticator_subkey
+ else:
+ armor_key = None
+ armor_subkey = authenticator_subkey
+ else: # KRB_TGS_REP
+ if fast_armor_type is not None:
+ armor_subkey = self.RandomKey(kcrypto.Enctype.AES256)
+ explicit_armor_key = self.generate_armor_key(
+ armor_subkey,
+ armor_tgt.session_key)
+ armor_key = kcrypto.cf2(explicit_armor_key.key,
+ authenticator_subkey.key,
+ b'explicitarmor',
+ b'tgsarmor')
+ armor_key = Krb5EncryptionKey(armor_key, None)
+ else:
+ armor_key = self.generate_armor_key(authenticator_subkey,
+ tgt.session_key)
+ armor_subkey = authenticator_subkey
+
+ if not kdc_dict.pop('include_subkey', True):
+ authenticator_subkey = None
+
+ if use_fast:
+ generate_fast_fn = kdc_dict.pop('gen_fast_fn', None)
+ if generate_fast_fn is None:
+ generate_fast_fn = functools.partial(
+ self.generate_simple_fast,
+ fast_options=fast_options)
+ else:
+ generate_fast_fn = None
+
+ generate_fast_armor_fn = (
+ self.generate_ap_req
+ if fast_armor_type is not None
+ else None)
+
+ def _generate_padata_copy(_kdc_exchange_dict,
+ _callback_dict,
+ req_body,
+ padata):
+ return list(padata), req_body
+
+ pac_request = kdc_dict.pop('pac_request', None)
+ expect_pac = kdc_dict.pop('expect_pac', True)
+
+ pac_options = kdc_dict.pop('pac_options', '1') # claims support
+
+ kdc_options = kdc_dict.pop('kdc_options', kdc_options_default)
+
+ gen_padata_fn = kdc_dict.pop('gen_padata_fn', None)
+
+ if rep_type == KRB_AS_REP and gen_padata_fn is not None:
+ self.assertIsNotNone(preauth_etype_info2)
+
+ preauth_key = self.PasswordKey_from_etype_info2(
+ client_creds,
+ preauth_etype_info2[0],
+ client_creds.get_kvno())
+ else:
+ preauth_key = None
+
+ if use_fast:
+ try:
+ fast_cookie = kdc_dict.pop('fast_cookie')
+ except KeyError:
+ pass
+
+ generate_fast_padata_fn = gen_padata_fn
+ generate_padata_fn = (functools.partial(_generate_padata_copy,
+ padata=[fast_cookie])
+ if fast_cookie is not None else None)
+ else:
+ generate_fast_padata_fn = None
+ generate_padata_fn = gen_padata_fn
+
+ gen_authdata_fn = kdc_dict.pop('gen_authdata_fn', None)
+ if gen_authdata_fn is not None:
+ auth_data = [gen_authdata_fn()]
+ else:
+ auth_data = None
+
+ gen_enc_authdata_fn = kdc_dict.pop('gen_enc_authdata_fn', None)
+ if gen_enc_authdata_fn is not None:
+ enc_auth_data = [gen_enc_authdata_fn()]
+
+ enc_auth_data_key = authenticator_subkey
+ enc_auth_data_usage = KU_TGS_REQ_AUTH_DAT_SUBKEY
+ if enc_auth_data_key is None:
+ enc_auth_data_key = tgt.session_key
+ enc_auth_data_usage = KU_TGS_REQ_AUTH_DAT_SESSION
+ else:
+ enc_auth_data = None
+
+ enc_auth_data_key = None
+ enc_auth_data_usage = None
+
+ if not use_fast:
+ self.assertNotIn('inner_req', kdc_dict)
+ self.assertNotIn('outer_req', kdc_dict)
+ inner_req = kdc_dict.pop('inner_req', None)
+ outer_req = kdc_dict.pop('outer_req', None)
+
+ expected_flags = kdc_dict.pop('expected_flags', None)
+ if expected_flags is not None:
+ expected_flags = krb5_asn1.TicketFlags(expected_flags)
+ unexpected_flags = kdc_dict.pop('unexpected_flags', None)
+ if unexpected_flags is not None:
+ unexpected_flags = krb5_asn1.TicketFlags(unexpected_flags)
+
+ fast_ap_options = kdc_dict.pop('fast_ap_options', None)
+
+ strict_edata_checking = kdc_dict.pop('strict_edata_checking', True)
+
+ if rep_type == KRB_AS_REP:
+ if as_req_self:
+ expected_supported_etypes = client_etypes
+ decryption_key = client_decryption_key
+ else:
+ expected_supported_etypes = krbtgt_etypes
+ decryption_key = krbtgt_decryption_key
+
+ kdc_exchange_dict = self.as_exchange_dict(
+ creds=client_creds,
+ expected_crealm=expected_crealm,
+ expected_cname=expected_cname,
+ expected_anon=expected_anon,
+ expected_srealm=expected_srealm,
+ expected_sname=expected_sname,
+ expected_supported_etypes=expected_supported_etypes,
+ expected_flags=expected_flags,
+ unexpected_flags=unexpected_flags,
+ ticket_decryption_key=decryption_key,
+ generate_fast_fn=generate_fast_fn,
+ generate_fast_armor_fn=generate_fast_armor_fn,
+ generate_fast_padata_fn=generate_fast_padata_fn,
+ fast_armor_type=fast_armor_type,
+ generate_padata_fn=generate_padata_fn,
+ check_error_fn=check_error_fn,
+ check_rep_fn=check_rep_fn,
+ check_kdc_private_fn=self.generic_check_kdc_private,
+ callback_dict={},
+ expected_error_mode=expected_error_mode,
+ expected_salt=expected_salt,
+ authenticator_subkey=authenticator_subkey,
+ preauth_key=preauth_key,
+ auth_data=auth_data,
+ armor_key=armor_key,
+ armor_tgt=armor_tgt,
+ armor_subkey=armor_subkey,
+ kdc_options=kdc_options,
+ inner_req=inner_req,
+ outer_req=outer_req,
+ expect_pac=expect_pac,
+ pac_request=pac_request,
+ pac_options=pac_options,
+ fast_ap_options=fast_ap_options,
+ strict_edata_checking=strict_edata_checking,
+ expect_edata=expect_edata)
+ else: # KRB_TGS_REP
+ kdc_exchange_dict = self.tgs_exchange_dict(
+ creds=client_creds,
+ expected_crealm=expected_crealm,
+ expected_cname=expected_cname,
+ expected_anon=expected_anon,
+ expected_srealm=expected_srealm,
+ expected_sname=expected_sname,
+ expected_supported_etypes=target_etypes,
+ expected_flags=expected_flags,
+ unexpected_flags=unexpected_flags,
+ ticket_decryption_key=target_decryption_key,
+ generate_fast_fn=generate_fast_fn,
+ generate_fast_armor_fn=generate_fast_armor_fn,
+ generate_fast_padata_fn=generate_fast_padata_fn,
+ fast_armor_type=fast_armor_type,
+ generate_padata_fn=generate_padata_fn,
+ check_error_fn=check_error_fn,
+ check_rep_fn=check_rep_fn,
+ check_kdc_private_fn=self.generic_check_kdc_private,
+ expected_error_mode=expected_error_mode,
+ callback_dict={},
+ tgt=tgt,
+ armor_key=armor_key,
+ armor_tgt=armor_tgt,
+ armor_subkey=armor_subkey,
+ authenticator_subkey=authenticator_subkey,
+ auth_data=auth_data,
+ body_checksum_type=None,
+ kdc_options=kdc_options,
+ inner_req=inner_req,
+ outer_req=outer_req,
+ expect_pac=expect_pac,
+ pac_request=pac_request,
+ pac_options=pac_options,
+ fast_ap_options=fast_ap_options,
+ strict_edata_checking=strict_edata_checking,
+ expect_edata=expect_edata)
+
+ repeat = kdc_dict.pop('repeat', 1)
+ for _ in range(repeat):
+ rep = self._generic_kdc_exchange(
+ kdc_exchange_dict,
+ cname=cname,
+ realm=crealm,
+ sname=sname,
+ etypes=etypes,
+ EncAuthorizationData=enc_auth_data,
+ EncAuthorizationData_key=enc_auth_data_key,
+ EncAuthorizationData_usage=enc_auth_data_usage)
+ if len(expected_error_mode) == 0:
+ self.check_reply(rep, rep_type)
+
+ fast_cookie = None
+ preauth_etype_info2 = None
+
+ # Check whether the ticket contains a PAC.
+ ticket = kdc_exchange_dict['rep_ticket_creds']
+ pac = self.get_ticket_pac(ticket, expect_pac=expect_pac)
+ if expect_pac:
+ self.assertIsNotNone(pac)
+ else:
+ self.assertIsNone(pac)
+ else:
+ self.check_error_rep(rep, expected_error_mode)
+
+ if 'fast_cookie' in kdc_exchange_dict:
+ fast_cookie = self.create_fast_cookie(
+ kdc_exchange_dict['fast_cookie'])
+ else:
+ fast_cookie = None
+
+ if KDC_ERR_PREAUTH_REQUIRED in expected_error_mode:
+ preauth_etype_info2 = (
+ kdc_exchange_dict['preauth_etype_info2'])
+ else:
+ preauth_etype_info2 = None
+
+ # Ensure we used all the parameters given to us.
+ self.assertEqual({}, kdc_dict)
+
+ return kdc_exchange_dict
+
+ def generate_enc_pa_rep_padata(self,
+ kdc_exchange_dict,
+ callback_dict,
+ req_body):
+ padata = self.PA_DATA_create(PADATA_REQ_ENC_PA_REP, b'')
+
+ return [padata], req_body
+
+ def generate_enc_pa_rep_challenge_padata(self,
+ kdc_exchange_dict,
+ callback_dict,
+ req_body):
+ padata, req_body = self.generate_enc_challenge_padata(kdc_exchange_dict,
+ callback_dict,
+ req_body)
+
+ padata.append(self.PA_DATA_create(PADATA_REQ_ENC_PA_REP, b''))
+
+ return padata, req_body
+
+ def generate_enc_pa_rep_timestamp_padata(self,
+ kdc_exchange_dict,
+ callback_dict,
+ req_body):
+ padata, req_body = self.generate_enc_timestamp_padata(kdc_exchange_dict,
+ callback_dict,
+ req_body)
+
+ padata.append(self.PA_DATA_create(PADATA_REQ_ENC_PA_REP, b''))
+
+ return padata, req_body
+
+ def generate_fast_armor_auth_data(self):
+ auth_data = self.AuthorizationData_create(AD_FX_FAST_ARMOR, b'')
+
+ return auth_data
+
+ def generate_fast_used_auth_data(self):
+ auth_data = self.AuthorizationData_create(AD_FX_FAST_USED, b'')
+
+ return auth_data
+
+ def gen_tgt_fast_armor_auth_data(self, opts):
+ user_tgt = self.get_user_tgt(opts)
+
+ auth_data = self.generate_fast_armor_auth_data()
+
+ def modify_fn(enc_part):
+ enc_part['authorization-data'].append(auth_data)
+
+ return enc_part
+
+ checksum_keys = self.get_krbtgt_checksum_key()
+
+ # Use our modified TGT to replace the one in the request.
+ return self.modified_ticket(user_tgt,
+ modify_fn=modify_fn,
+ checksum_keys=checksum_keys)
+
+ def create_fast_cookie(self, cookie):
+ self.assertIsNotNone(cookie)
+ if self.strict_checking:
+ self.assertNotEqual(0, len(cookie))
+
+ return self.PA_DATA_create(PADATA_FX_COOKIE, cookie)
+
+ def check_kdc_fast_support(self):
+ # Check that the KDC supports FAST
+
+ samdb = self.get_samdb()
+
+ krbtgt_rid = security.DOMAIN_RID_KRBTGT
+ krbtgt_sid = '%s-%d' % (samdb.get_domain_sid(), krbtgt_rid)
+
+ res = samdb.search(base='<SID=%s>' % krbtgt_sid,
+ scope=ldb.SCOPE_BASE,
+ attrs=['msDS-SupportedEncryptionTypes'])
+
+ krbtgt_etypes = int(res[0]['msDS-SupportedEncryptionTypes'][0])
+
+ self.assertTrue(
+ security.KERB_ENCTYPE_FAST_SUPPORTED & krbtgt_etypes)
+ self.assertTrue(
+ security.KERB_ENCTYPE_COMPOUND_IDENTITY_SUPPORTED & krbtgt_etypes)
+ self.assertTrue(
+ security.KERB_ENCTYPE_CLAIMS_SUPPORTED & krbtgt_etypes)
+
+ def get_mach_tgt(self, opts):
+ if opts is None:
+ opts = {}
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={
+ **opts,
+ 'fast_support': True,
+ 'claims_support': True,
+ 'compound_id_support': True,
+ 'supported_enctypes': (
+ security.KERB_ENCTYPE_RC4_HMAC_MD5 |
+ security.KERB_ENCTYPE_AES256_CTS_HMAC_SHA1_96_SK
+ ),
+ })
+ return self.get_tgt(mach_creds)
+
+ def get_rodc_issued_mach_tgt(self, opts):
+ return self.issued_by_rodc(self.get_mach_tgt(opts))
+
+ def get_user_tgt(self, opts):
+ user_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER,
+ opts=opts)
+ return self.get_tgt(user_creds)
+
+ def get_user_service_ticket(self, opts):
+ user_tgt = self.get_user_tgt(opts)
+ service_creds = self.get_service_creds()
+ return self.get_service_ticket(user_tgt, service_creds)
+
+ def get_mach_service_ticket(self, opts):
+ mach_tgt = self.get_mach_tgt(opts)
+ service_creds = self.get_service_creds()
+ return self.get_service_ticket(mach_tgt, service_creds)
+
+ def get_service_ticket_invalid_checksum(self, opts):
+ ticket = self.get_user_service_ticket(opts)
+
+ krbtgt_creds = self.get_krbtgt_creds()
+ krbtgt_key = self.TicketDecryptionKey_from_creds(krbtgt_creds)
+
+ zeroed_key = ZeroedChecksumKey(krbtgt_key.key,
+ krbtgt_key.kvno)
+
+ server_key = ticket.decryption_key
+ checksum_keys = {
+ krb5pac.PAC_TYPE_SRV_CHECKSUM: server_key,
+ krb5pac.PAC_TYPE_KDC_CHECKSUM: krbtgt_key,
+ krb5pac.PAC_TYPE_TICKET_CHECKSUM: zeroed_key,
+ }
+
+ return self.modified_ticket(
+ ticket,
+ checksum_keys=checksum_keys,
+ include_checksums={krb5pac.PAC_TYPE_TICKET_CHECKSUM: True})
+
+
+if __name__ == "__main__":
+ global_asn1_print = False
+ global_hexdump = False
+ import unittest
+ unittest.main()
diff --git a/python/samba/tests/krb5/gkdi_tests.py b/python/samba/tests/krb5/gkdi_tests.py
new file mode 100755
index 0000000..58a65c4
--- /dev/null
+++ b/python/samba/tests/krb5/gkdi_tests.py
@@ -0,0 +1,745 @@
+#!/usr/bin/env python3
+# Unix SMB/CIFS implementation.
+# Copyright (C) Catalyst.Net Ltd 2023
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <https://www.gnu.org/licenses/>.
+#
+
+import sys
+import os
+
+sys.path.insert(0, "bin/python")
+os.environ["PYTHONUNBUFFERED"] = "1"
+
+import secrets
+
+from typing import ClassVar, Optional
+
+from samba.dcerpc import gkdi, misc
+from samba.gkdi import (
+ Algorithm,
+ Gkid,
+ KEY_CYCLE_DURATION,
+ KEY_LEN_BYTES,
+ MAX_CLOCK_SKEW,
+ NtTime,
+ NtTimeDelta,
+ SeedKeyPair,
+)
+from samba.hresult import HRES_E_INVALIDARG, HRES_NTE_BAD_KEY, HRES_NTE_NO_KEY
+from samba.nt_time import timedelta_from_nt_time_delta
+
+from samba.tests.gkdi import GetKeyError, GkdiBaseTest, ROOT_KEY_START_TIME
+from samba.tests.krb5.kdc_base_test import KDCBaseTest
+
+
+class GkdiKdcBaseTest(GkdiBaseTest, KDCBaseTest):
+ def new_root_key(self, *args, **kwargs) -> misc.GUID:
+ samdb = self.get_samdb()
+ domain_dn = self.get_server_dn(samdb)
+ return self.create_root_key(samdb, domain_dn, *args, **kwargs)
+
+ def gkdi_conn(self) -> gkdi.gkdi:
+ # The seed keys used by Group Managed Service Accounts correspond to the
+ # Enterprise DCs SID (S-1-5-9); as such they can be retrieved only by
+ # server accounts.
+ return self.gkdi_connect(
+ self.dc_host,
+ self.get_lp(),
+ self.get_cached_creds(account_type=self.AccountType.SERVER),
+ )
+
+ def check_rpc_get_key(
+ self, root_key_id: Optional[misc.GUID], gkid: Gkid
+ ) -> SeedKeyPair:
+ got_key_pair = self.rpc_get_key(
+ self.gkdi_conn(), self.gmsa_sd, root_key_id, gkid
+ )
+ expected_key_pair = self.get_key(
+ self.get_samdb(),
+ self.gmsa_sd,
+ root_key_id,
+ gkid,
+ root_key_id_hint=got_key_pair.root_key_id if root_key_id is None else None,
+ )
+ self.assertEqual(
+ got_key_pair.root_key_id,
+ expected_key_pair.root_key_id,
+ "root key IDs must match",
+ )
+ self.assertEqual(got_key_pair, expected_key_pair, "key pairs must match")
+
+ return got_key_pair
+
+
+class GkdiExplicitRootKeyTests(GkdiKdcBaseTest):
+ def test_current_l0_idx(self):
+ """Request a key with the current L0 index. This index is regularly
+ incremented every 427 days or so."""
+ root_key_id = self.new_root_key()
+
+ # It actually doesn’t matter what we specify for the L1 and L2 indices.
+ # We’ll get the same result regardless — they just cannot specify a key
+ # from the future.
+ current_gkid = self.current_gkid()
+ key = self.check_rpc_get_key(root_key_id, current_gkid)
+
+ self.assertEqual(current_gkid, key.gkid)
+ self.assertEqual(root_key_id, key.root_key_id)
+
+ def test_previous_l0_idx(self):
+ """Request a key with a previous L0 index."""
+ root_key_id = self.new_root_key(use_start_time=ROOT_KEY_START_TIME)
+
+ # It actually doesn’t matter what we specify for the L1 and L2 indices.
+ # We’ll get the same result regardless.
+ previous_l0_idx = self.current_gkid().l0_idx - 1
+ key = self.check_rpc_get_key(root_key_id, Gkid(previous_l0_idx, 0, 0))
+
+ # Expect to get an L1 seed key.
+ self.assertIsNotNone(key.l1_key)
+ self.assertIsNone(key.l2_key)
+ self.assertEqual(Gkid(previous_l0_idx, 31, 31), key.gkid)
+ self.assertEqual(root_key_id, key.root_key_id)
+
+ def test_algorithm_sha1(self):
+ """Test with the SHA1 algorithm."""
+ key = self.check_rpc_get_key(
+ self.new_root_key(hash_algorithm=Algorithm.SHA1),
+ self.current_gkid(),
+ )
+ self.assertIs(Algorithm.SHA1, key.hash_algorithm)
+
+ def test_algorithm_sha256(self):
+ """Test with the SHA256 algorithm."""
+ key = self.check_rpc_get_key(
+ self.new_root_key(hash_algorithm=Algorithm.SHA256),
+ self.current_gkid(),
+ )
+ self.assertIs(Algorithm.SHA256, key.hash_algorithm)
+
+ def test_algorithm_sha384(self):
+ """Test with the SHA384 algorithm."""
+ key = self.check_rpc_get_key(
+ self.new_root_key(hash_algorithm=Algorithm.SHA384),
+ self.current_gkid(),
+ )
+ self.assertIs(Algorithm.SHA384, key.hash_algorithm)
+
+ def test_algorithm_sha512(self):
+ """Test with the SHA512 algorithm."""
+ key = self.check_rpc_get_key(
+ self.new_root_key(hash_algorithm=Algorithm.SHA512),
+ self.current_gkid(),
+ )
+ self.assertIs(Algorithm.SHA512, key.hash_algorithm)
+
+ def test_algorithm_none(self):
+ """Test without a specified algorithm."""
+ key = self.check_rpc_get_key(
+ self.new_root_key(hash_algorithm=None),
+ self.current_gkid(),
+ )
+ self.assertIs(Algorithm.SHA256, key.hash_algorithm)
+
+ def test_future_key(self):
+ """Try to request a key from the future."""
+ root_key_id = self.new_root_key(use_start_time=ROOT_KEY_START_TIME)
+
+ future_gkid = self.current_gkid(
+ offset=timedelta_from_nt_time_delta(
+ NtTimeDelta(KEY_CYCLE_DURATION + MAX_CLOCK_SKEW)
+ )
+ )
+
+ with self.assertRaises(GetKeyError) as err:
+ self.get_key(self.get_samdb(), self.gmsa_sd, root_key_id, future_gkid)
+
+ self.assertEqual(
+ HRES_E_INVALIDARG,
+ err.exception.args[0],
+ "requesting a key from the future should fail with INVALID_PARAMETER",
+ )
+
+ with self.assertRaises(GetKeyError) as rpc_err:
+ self.rpc_get_key(self.gkdi_conn(), self.gmsa_sd, root_key_id, future_gkid)
+
+ self.assertEqual(
+ HRES_E_INVALIDARG,
+ rpc_err.exception.args[0],
+ "requesting a key from the future should fail with INVALID_PARAMETER",
+ )
+
+ def test_root_key_use_start_time_zero(self):
+ """Attempt to use a root key with an effective time of zero."""
+ root_key_id = self.new_root_key(use_start_time=NtTime(0))
+
+ gkid = self.current_gkid()
+
+ with self.assertRaises(GetKeyError) as err:
+ self.get_key(self.get_samdb(), self.gmsa_sd, root_key_id, gkid)
+
+ self.assertEqual(
+ HRES_NTE_BAD_KEY,
+ err.exception.args[0],
+ "using a root key with an effective time of zero should fail with BAD_KEY",
+ )
+
+ with self.assertRaises(GetKeyError) as rpc_err:
+ self.rpc_get_key(self.gkdi_conn(), self.gmsa_sd, root_key_id, gkid)
+
+ self.assertEqual(
+ HRES_NTE_BAD_KEY,
+ rpc_err.exception.args[0],
+ "using a root key with an effective time of zero should fail with BAD_KEY",
+ )
+
+ def test_root_key_use_start_time_too_low(self):
+ """Attempt to use a root key with an effective time set too low."""
+ root_key_id = self.new_root_key(use_start_time=NtTime(ROOT_KEY_START_TIME - 1))
+
+ gkid = self.current_gkid()
+
+ with self.assertRaises(GetKeyError) as err:
+ self.get_key(self.get_samdb(), self.gmsa_sd, root_key_id, gkid)
+
+ self.assertEqual(
+ HRES_E_INVALIDARG,
+ err.exception.args[0],
+ "using a root key with too low effective time should fail with"
+ " INVALID_PARAMETER",
+ )
+
+ with self.assertRaises(GetKeyError) as rpc_err:
+ self.rpc_get_key(self.gkdi_conn(), self.gmsa_sd, root_key_id, gkid)
+
+ self.assertEqual(
+ HRES_E_INVALIDARG,
+ rpc_err.exception.args[0],
+ "using a root key with too low effective time should fail with"
+ " INVALID_PARAMETER",
+ )
+
+ def test_before_valid(self):
+ """Attempt to use a key before it is valid."""
+ gkid = self.current_gkid()
+ valid_start_time = NtTime(
+ gkid.start_nt_time() + KEY_CYCLE_DURATION + MAX_CLOCK_SKEW
+ )
+
+ # Using a valid root key is allowed.
+ valid_root_key_id = self.new_root_key(use_start_time=valid_start_time)
+ self.check_rpc_get_key(valid_root_key_id, gkid)
+
+ # But attempting to use a root key that is not yet valid will result in
+ # an INVALID_PARAMETER error.
+ invalid_root_key_id = self.new_root_key(use_start_time=valid_start_time + 1)
+
+ with self.assertRaises(GetKeyError) as err:
+ self.get_key(self.get_samdb(), self.gmsa_sd, invalid_root_key_id, gkid)
+
+ self.assertEqual(
+ HRES_E_INVALIDARG,
+ err.exception.args[0],
+ "using a key before it is valid should fail with INVALID_PARAMETER",
+ )
+
+ with self.assertRaises(GetKeyError) as rpc_err:
+ self.rpc_get_key(self.gkdi_conn(), self.gmsa_sd, invalid_root_key_id, gkid)
+
+ self.assertEqual(
+ HRES_E_INVALIDARG,
+ rpc_err.exception.args[0],
+ "using a key before it is valid should fail with INVALID_PARAMETER",
+ )
+
+ def test_non_existent_root_key(self):
+ """Attempt to use a non‐existent root key."""
+ root_key_id = misc.GUID(secrets.token_bytes(16))
+
+ gkid = self.current_gkid()
+
+ with self.assertRaises(GetKeyError) as err:
+ self.get_key(self.get_samdb(), self.gmsa_sd, root_key_id, gkid)
+
+ self.assertEqual(
+ HRES_NTE_NO_KEY,
+ err.exception.args[0],
+ "using a non‐existent root key should fail with NO_KEY",
+ )
+
+ with self.assertRaises(GetKeyError) as rpc_err:
+ self.rpc_get_key(self.gkdi_conn(), self.gmsa_sd, root_key_id, gkid)
+
+ self.assertEqual(
+ HRES_NTE_NO_KEY,
+ rpc_err.exception.args[0],
+ "using a non‐existent root key should fail with NO_KEY",
+ )
+
+ def test_root_key_wrong_length(self):
+ """Attempt to use a root key that is the wrong length."""
+ root_key_id = self.new_root_key(data=bytes(KEY_LEN_BYTES // 2))
+
+ gkid = self.current_gkid()
+
+ with self.assertRaises(GetKeyError) as err:
+ self.get_key(self.get_samdb(), self.gmsa_sd, root_key_id, gkid)
+
+ self.assertEqual(
+ HRES_NTE_BAD_KEY,
+ err.exception.args[0],
+ "using a root key that is the wrong length should fail with BAD_KEY",
+ )
+
+ with self.assertRaises(GetKeyError) as rpc_err:
+ self.rpc_get_key(self.gkdi_conn(), self.gmsa_sd, root_key_id, gkid)
+
+ self.assertEqual(
+ HRES_NTE_BAD_KEY,
+ rpc_err.exception.args[0],
+ "using a root key that is the wrong length should fail with BAD_KEY",
+ )
+
+
+class GkdiImplicitRootKeyTests(GkdiKdcBaseTest):
+ _root_key: ClassVar[misc.GUID]
+
+ @classmethod
+ def setUpClass(cls) -> None:
+ super().setUpClass()
+
+ cls._root_key = None
+
+ def setUp(self) -> None:
+ super().setUp()
+
+ if self._root_key is None:
+ # GKDI requires a root key to operate. Creating a root key here
+ # saves creating one before every test.
+ #
+ # We cannot delete this key after the tests have run, as Windows
+ # might have decided to cache it to be used in subsequent runs. It
+ # will keep a root key cached even if the corresponding AD object
+ # has been deleted, leading to various problems later.
+ cls = type(self)
+ cls._root_key = self.new_root_key(use_start_time=ROOT_KEY_START_TIME)
+
+ def test_l1_seed_key(self):
+ """Request a key and expect to receive an L1 seed key."""
+ gkid = Gkid(300, 0, 31)
+ key = self.check_rpc_get_key(None, gkid)
+
+ # Expect to get an L1 seed key.
+ self.assertIsNotNone(key.l1_key)
+ self.assertIsNone(key.l2_key)
+ self.assertEqual(gkid, key.gkid)
+
+ def test_l2_seed_key(self):
+ """Request a key and expect to receive an L2 seed key."""
+ gkid = Gkid(300, 0, 0)
+ key = self.check_rpc_get_key(None, gkid)
+
+ # Expect to get an L2 seed key.
+ self.assertIsNone(key.l1_key)
+ self.assertIsNotNone(key.l2_key)
+ self.assertEqual(gkid, key.gkid)
+
+ def test_both_seed_keys(self):
+ """Request a key and expect to receive L1 and L2 seed keys."""
+ gkid = Gkid(300, 1, 0)
+ key = self.check_rpc_get_key(None, gkid)
+
+ # Expect to get both L1 and L2 seed keys.
+ self.assertIsNotNone(key.l1_key)
+ self.assertIsNotNone(key.l2_key)
+ self.assertEqual(gkid, key.gkid)
+
+ def test_both_seed_keys_no_hint(self):
+ """Request a key, but don’t specify ‘root_key_id_hint’."""
+ gkid = Gkid(300, 1, 0)
+ key = self.get_key(
+ self.get_samdb(),
+ self.gmsa_sd,
+ None,
+ gkid,
+ )
+
+ # Expect to get both L1 and L2 seed keys.
+ self.assertIsNotNone(key.l1_key)
+ self.assertIsNotNone(key.l2_key)
+ self.assertEqual(gkid, key.gkid)
+
+ def test_request_l0_seed_key(self):
+ """Attempt to request an L0 seed key."""
+ gkid = Gkid.l0_seed_key(300)
+
+ with self.assertRaises(GetKeyError) as err:
+ self.get_key(self.get_samdb(), self.gmsa_sd, None, gkid)
+
+ self.assertEqual(
+ HRES_E_INVALIDARG,
+ err.exception.args[0],
+ "requesting an L0 seed key should fail with INVALID_PARAMETER",
+ )
+
+ with self.assertRaises(GetKeyError) as rpc_err:
+ self.rpc_get_key(self.gkdi_conn(), self.gmsa_sd, None, gkid)
+
+ self.assertEqual(
+ HRES_E_INVALIDARG,
+ rpc_err.exception.args[0],
+ "requesting an L0 seed key should fail with INVALID_PARAMETER",
+ )
+
+ def test_request_l1_seed_key(self):
+ """Attempt to request an L1 seed key."""
+ gkid = Gkid.l1_seed_key(300, 0)
+
+ with self.assertRaises(GetKeyError) as err:
+ self.get_key(self.get_samdb(), self.gmsa_sd, None, gkid)
+
+ self.assertEqual(
+ HRES_E_INVALIDARG,
+ err.exception.args[0],
+ "requesting an L1 seed key should fail with INVALID_PARAMETER",
+ )
+
+ with self.assertRaises(GetKeyError) as rpc_err:
+ self.rpc_get_key(self.gkdi_conn(), self.gmsa_sd, None, gkid)
+
+ self.assertEqual(
+ HRES_E_INVALIDARG,
+ rpc_err.exception.args[0],
+ "requesting an L1 seed key should fail with INVALID_PARAMETER",
+ )
+
+ def test_request_default_seed_key(self):
+ """Try to make a request with the default GKID."""
+ gkid = Gkid.default()
+
+ self.assertRaises(
+ NotImplementedError,
+ self.get_key,
+ self.get_samdb(),
+ self.gmsa_sd,
+ None,
+ gkid,
+ )
+
+ self.rpc_get_key(self.gkdi_conn(), self.gmsa_sd, None, gkid)
+
+
+class GkdiSelfTests(GkdiKdcBaseTest):
+ def test_current_l0_idx_l1_seed_key(self):
+ """Request a key with the current L0 index, expecting to receive an L1
+ seed key."""
+ root_key_id = self.new_root_key(
+ use_start_time=ROOT_KEY_START_TIME,
+ hash_algorithm=Algorithm.SHA512,
+ guid=misc.GUID("89f70521-9d66-441f-c314-1b462f9b1052"),
+ data=bytes.fromhex(
+ "a6ef87dbbbf86b6bbe55750b941f13ca99efe5185e2e2bded5b838d8a0e77647"
+ "0537e68cae45a7a0f4b1d6c9bf5494c3f879e172e326557cdbb6a56e8799a722"
+ ),
+ )
+
+ current_gkid = Gkid(255, 24, 31)
+ key = self.get_key(
+ self.get_samdb(),
+ self.gmsa_sd,
+ root_key_id,
+ Gkid(255, 2, 5),
+ current_gkid=current_gkid,
+ )
+
+ # Expect to get an L1 seed key.
+ self.assertEqual(current_gkid, key.gkid)
+ self.assertEqual(root_key_id, key.root_key_id)
+ self.assertEqual(Algorithm.SHA512, key.hash_algorithm)
+ self.assertEqual(
+ bytes.fromhex(
+ "bd538a073490f3cf9451c933025de9b22c97eaddaffa94b379e2b919a4bed147"
+ "5bc67f6a9175b139c69204c57d4300a0141ffe34d12ced84614593b1aa13af1c"
+ ),
+ key.l1_key,
+ )
+ self.assertIsNone(key.l2_key)
+
+ def test_current_l0_idx_l2_seed_key(self):
+ """Request a key with the current L0 index, expecting to receive an L2
+ seed key."""
+ root_key_id = self.new_root_key(
+ use_start_time=ROOT_KEY_START_TIME,
+ hash_algorithm=Algorithm.SHA512,
+ guid=misc.GUID("1a3d6c30-aa81-cb7f-d3fe-80775d135dfe"),
+ data=bytes.fromhex(
+ "dfd95be3153a0805c65694e7d284aace5ab0aa493350025eb8dbc6df0b4e9256"
+ "fb4cbfbe6237ce3732694e2608760076b67082d39abd3c0fedba1b8873645064"
+ ),
+ )
+
+ current_gkid = Gkid(321, 0, 12)
+ key = self.get_key(
+ self.get_samdb(),
+ self.gmsa_sd,
+ root_key_id,
+ Gkid(321, 0, 1),
+ current_gkid=current_gkid,
+ )
+
+ # Expect to get an L2 seed key.
+ self.assertEqual(current_gkid, key.gkid)
+ self.assertEqual(root_key_id, key.root_key_id)
+ self.assertEqual(Algorithm.SHA512, key.hash_algorithm)
+ self.assertIsNone(key.l1_key)
+ self.assertEqual(
+ bytes.fromhex(
+ "bbbd9376cd16c247ed40f5912d1908218c08f0915bae02fe02cbfb3753bde406"
+ "f9c553acd95143cf63906a0440e3cf237d2335ae4e4b9cd2d946a71351ebcb7b"
+ ),
+ key.l2_key,
+ )
+
+ def test_current_l0_idx_both_seed_keys(self):
+ """Request a key with the current L0 index, expecting to receive L1 and
+ L2 seed keys."""
+ root_key_id = self.new_root_key(
+ use_start_time=ROOT_KEY_START_TIME,
+ hash_algorithm=Algorithm.SHA512,
+ guid=misc.GUID("09de0b38-c743-7abf-44ea-7a3c3e404314"),
+ data=bytes.fromhex(
+ "d5912d0eb3bd60e1371b1e525dd83be7fc5baf77018b0dba6bd948b7a98ebe5a"
+ "f37674332506a46c52c108a62f2a3e89251ad1bde6d539004679c0658853bb68"
+ ),
+ )
+
+ current_gkid = Gkid(123, 21, 0)
+ key = self.get_key(
+ self.get_samdb(),
+ self.gmsa_sd,
+ root_key_id,
+ Gkid(123, 2, 1),
+ current_gkid=current_gkid,
+ )
+
+ # Expect to get both L1 and L2 seed keys.
+ self.assertEqual(current_gkid, key.gkid)
+ self.assertEqual(root_key_id, key.root_key_id)
+ self.assertEqual(Algorithm.SHA512, key.hash_algorithm)
+ self.assertEqual(
+ bytes.fromhex(
+ "b1f7c5896e7dc791d9c0aaf8ca7dbab8c172a4f8b873db488a3c4cbd0f559b11"
+ "52ffba39d4aff2d9e8aada90b27a3c94a5af996f4b8f584a4f37ccab4d505d3d"
+ ),
+ key.l1_key,
+ )
+ self.assertEqual(
+ bytes.fromhex(
+ "133c9bbd20d9227aeb38dfcd3be6bcbfc5983ba37202088ff5c8a70511214506"
+ "a69c195a8807cd844bcb955e9569c8e4d197759f28577cc126d15f16a7da4ee0"
+ ),
+ key.l2_key,
+ )
+
+ def test_previous_l0_idx(self):
+ """Request a key with a previous L0 index."""
+ root_key_id = self.new_root_key(
+ use_start_time=ROOT_KEY_START_TIME,
+ hash_algorithm=Algorithm.SHA512,
+ guid=misc.GUID("27136e8f-e093-6fe3-e57f-1d915b102e1c"),
+ data=bytes.fromhex(
+ "b41118c60a19cafa5ecf858d1a2a2216527b2daedf386e9d599e42a46add6c7d"
+ "c93868619761c880ff3674a77c6e5fbf3434d130a9727bb2cd2a2557bdcfc752"
+ ),
+ )
+
+ key = self.get_key(
+ self.get_samdb(),
+ self.gmsa_sd,
+ root_key_id,
+ Gkid(100, 20, 30),
+ current_gkid=Gkid(101, 2, 3),
+ )
+
+ # Expect to get an L1 seed key.
+ self.assertEqual(Gkid(100, 31, 31), key.gkid)
+ self.assertEqual(root_key_id, key.root_key_id)
+ self.assertEqual(Algorithm.SHA512, key.hash_algorithm)
+ self.assertEqual(
+ bytes.fromhex(
+ "935cbdc06198eb28fa44b8d8278f51072c4613999236585041ede8e72d02fe95"
+ "e3454f046382cbc0a700779b79474dd7e080509d76302d2937407e96e3d3d022"
+ ),
+ key.l1_key,
+ )
+ self.assertIsNone(key.l2_key)
+
+ def test_sha1(self):
+ """Request a key derived with SHA1."""
+ root_key_id = self.new_root_key(
+ use_start_time=ROOT_KEY_START_TIME,
+ hash_algorithm=Algorithm.SHA1,
+ guid=misc.GUID("970abad6-fe55-073a-caf1-b801d3f26bd3"),
+ data=bytes.fromhex(
+ "3bed03bf0fb7d4013149154f24ca2d59b98db6d588cb1f54eca083855e25eb28"
+ "d3562a01adc78c4b70e0b72a59515863e7732b853fba02dd7646e63108441211"
+ ),
+ )
+
+ current_gkid = Gkid(1, 2, 3)
+ key = self.get_key(
+ self.get_samdb(),
+ self.gmsa_sd,
+ root_key_id,
+ Gkid(1, 1, 1),
+ current_gkid=current_gkid,
+ )
+
+ # Expect to get both L1 and L2 seed keys.
+ self.assertEqual(current_gkid, key.gkid)
+ self.assertEqual(root_key_id, key.root_key_id)
+ self.assertEqual(Algorithm.SHA1, key.hash_algorithm)
+ self.assertEqual(
+ bytes.fromhex(
+ "576cb68f2e52eb739f817b488c3590d86f1c2c365f3fc9201d9c7fee7494853d"
+ "58746ee13e48f18aa6fa69f7157de3d07de34e13836792b7c088ffb6914a89c2"
+ ),
+ key.l1_key,
+ )
+ self.assertEqual(
+ bytes.fromhex(
+ "3ffb825adaf116b6533207d568a30ed3d3f21c68840941c9456684f9afa11b05"
+ "6e0c59391b4d88c495d984c3d680029cc5c594630f34179119c1c5acaae5e90e"
+ ),
+ key.l2_key,
+ )
+
+ def test_sha256(self):
+ """Request a key derived with SHA256."""
+ root_key_id = self.new_root_key(
+ use_start_time=ROOT_KEY_START_TIME,
+ hash_algorithm=Algorithm.SHA256,
+ guid=misc.GUID("45e26207-ed33-dcd5-925a-518a0deef69e"),
+ data=bytes.fromhex(
+ "28b5b6503d3c1d24814de781bb7bfce3ef69eed1ce4809372bee2c506270c5f0"
+ "b5c6df597472623f256c86daa0991e8a11a1705f21b2cfdc0bb9db4ba23246a2"
+ ),
+ )
+
+ current_gkid = Gkid(222, 22, 22)
+ key = self.get_key(
+ self.get_samdb(),
+ self.gmsa_sd,
+ root_key_id,
+ Gkid(222, 11, 0),
+ current_gkid=current_gkid,
+ )
+
+ # Expect to get both L1 and L2 seed keys.
+ self.assertEqual(current_gkid, key.gkid)
+ self.assertEqual(root_key_id, key.root_key_id)
+ self.assertEqual(Algorithm.SHA256, key.hash_algorithm)
+ self.assertEqual(
+ bytes.fromhex(
+ "57aced6e75f83f3af4f879b38b60f090b42e4bfa022fae3e6fd94280b469b0ec"
+ "15d8b853a870b5fbdf28708cce19273b74a573acbe0deda8ef515db4691e2dcb"
+ ),
+ key.l1_key,
+ )
+ self.assertEqual(
+ bytes.fromhex(
+ "752a0879ae2424c0504c7493599f13e588e1bbdc252f83325ad5b1fb91c24c89"
+ "01d440f3ff9ffba59fcd65bb975732d9f383dd50b898174bb9393e383d25d540"
+ ),
+ key.l2_key,
+ )
+
+ def test_sha384(self):
+ """Request a key derived with SHA384."""
+ root_key_id = self.new_root_key(
+ use_start_time=ROOT_KEY_START_TIME,
+ hash_algorithm=Algorithm.SHA384,
+ guid=misc.GUID("66e6d9f7-4924-f3fc-fe34-605634d42ebd"),
+ data=bytes.fromhex(
+ "23e5ba86cbd88f7b432ee66dbb03bf4eebf401cbfc3df735d4d728b503c87f84"
+ "3207c6f6153f190dfe85a86cb8d8b74df13b25305981be8d7e29c96ee54c9630"
+ ),
+ )
+
+ current_gkid = Gkid(287, 28, 27)
+ key = self.get_key(
+ self.get_samdb(),
+ self.gmsa_sd,
+ root_key_id,
+ Gkid(287, 8, 7),
+ current_gkid=current_gkid,
+ )
+
+ # Expect to get both L1 and L2 seed keys.
+ self.assertEqual(current_gkid, key.gkid)
+ self.assertEqual(root_key_id, key.root_key_id)
+ self.assertEqual(Algorithm.SHA384, key.hash_algorithm)
+ self.assertEqual(
+ bytes.fromhex(
+ "fabadd7a9a63df57d6832df7a735aebb6e181888b2eaf301a2e4ff9a70246d38"
+ "ab1d2416325bf3eb726a0267bab4bd950c7291f05ea5f17197ece56992af3eb8"
+ ),
+ key.l1_key,
+ )
+ self.assertEqual(
+ bytes.fromhex(
+ "ec1c65634b5694818e1d341da9996db8f2a1ef6a2c776a7126a7ebd18b37a073"
+ "afdac44c41b167b14e4b872d485bbb6d7b70964215d0e84a2ff142a9d943f205"
+ ),
+ key.l2_key,
+ )
+
+ def test_derive_key_exact(self):
+ """Derive a key at an exact GKID."""
+ root_key_id = self.new_root_key(
+ use_start_time=ROOT_KEY_START_TIME,
+ hash_algorithm=Algorithm.SHA512,
+ guid=misc.GUID("d95fb06f-5a9c-1829-e20d-27f3f2ecfbeb"),
+ data=bytes.fromhex(
+ "489f3531c537774d432d6b97e3bc1f43d2e8c6dc17eb0e4fd9a0870d2f1ebf92"
+ "e2496668a8b5bd11aea2d32d0aab716f48fe569f5c9b50ff3f9bf5deaea572fb"
+ ),
+ )
+
+ gkid = Gkid(333, 22, 11)
+ key = self.get_key_exact(
+ self.get_samdb(),
+ self.gmsa_sd,
+ root_key_id,
+ gkid,
+ current_gkid=self.current_gkid(),
+ )
+
+ self.assertEqual(gkid, key.gkid)
+ self.assertEqual(root_key_id, key.root_key_id)
+ self.assertEqual(Algorithm.SHA512, key.hash_algorithm)
+ self.assertEqual(
+ bytes.fromhex(
+ "d6ab3b14f4f4c8908aa3464011b39f10a8bfadb9974af90f7d9a9fede2fdc6e5"
+ "f68a628ec00f9994a3abd8a52ae9e2db4f68e83648311e9d7765f2535515b5e2"
+ ),
+ key.key,
+ )
+
+
+if __name__ == "__main__":
+ import unittest
+
+ unittest.main()
diff --git a/python/samba/tests/krb5/group_tests.py b/python/samba/tests/krb5/group_tests.py
new file mode 100755
index 0000000..1214fa2
--- /dev/null
+++ b/python/samba/tests/krb5/group_tests.py
@@ -0,0 +1,1967 @@
+#!/usr/bin/env python3
+# Unix SMB/CIFS implementation.
+# Copyright (C) Stefan Metzmacher 2020
+# Copyright (C) Catalyst.Net Ltd 2022
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+import os
+
+sys.path.insert(0, 'bin/python')
+os.environ['PYTHONUNBUFFERED'] = '1'
+
+import random
+import re
+
+import ldb
+
+from samba import werror
+from samba.dcerpc import netlogon, security
+from samba.tests import DynamicTestCase, env_get_var_value
+from samba.tests.krb5 import kcrypto
+from samba.tests.krb5.kdc_base_test import GroupType, KDCBaseTest, Principal
+from samba.tests.krb5.raw_testcase import RawKerberosTest
+from samba.tests.krb5.rfc4120_constants import (
+ KRB_TGS_REP,
+ NT_PRINCIPAL,
+)
+
+SidType = RawKerberosTest.SidType
+
+global_asn1_print = False
+global_hexdump = False
+
+
+@DynamicTestCase
+class GroupTests(KDCBaseTest):
+ # Placeholder objects that represent the user account undergoing testing.
+ user = object()
+ trust_user = object()
+
+ # Constants for group SID attributes.
+ default_attrs = security.SE_GROUP_DEFAULT_FLAGS
+ resource_attrs = default_attrs | security.SE_GROUP_RESOURCE
+
+ asserted_identity = security.SID_AUTHENTICATION_AUTHORITY_ASSERTED_IDENTITY
+
+ trust_domain = 'S-1-5-21-123-456-789'
+
+ def setUp(self):
+ super().setUp()
+ self.do_asn1_print = global_asn1_print
+ self.do_hexdump = global_hexdump
+
+ @classmethod
+ def setUpDynamicTestCases(cls):
+ FILTER = env_get_var_value('FILTER', allow_missing=True)
+ SKIP_INVALID = env_get_var_value('SKIP_INVALID', allow_missing=True)
+
+ for case in cls.cases:
+ invalid = case.pop('configuration_invalid', False)
+ if SKIP_INVALID and invalid:
+ # Some group setups are invalid on Windows, so we allow them to
+ # be skipped.
+ continue
+ name = case.pop('test')
+ name = re.sub(r'\W+', '_', name)
+ if FILTER and not re.search(FILTER, name):
+ continue
+
+ cls.generate_dynamic_test('test_group', name,
+ dict(case))
+
+ def test_set_universal_primary_group(self):
+ samdb = self.get_samdb()
+
+ # Create a universal group.
+ universal_dn = self.create_group(samdb,
+ self.get_new_username(),
+ gtype=GroupType.UNIVERSAL.value)
+
+ # Get the SID of the universal group.
+ universal_sid = self.get_objectSid(samdb, universal_dn)
+
+ # Create a user account belonging to the group.
+ creds = self.get_cached_creds(
+ account_type=self.AccountType.USER,
+ opts={
+ 'member_of': (
+ universal_dn,
+ ),
+ 'kerberos_enabled': False,
+ },
+ use_cache=False)
+
+ # Set the user's primary group.
+ self.set_primary_group(samdb, creds.get_dn(), universal_sid)
+
+ def test_set_domain_local_primary_group(self):
+ samdb = self.get_samdb()
+
+ # Create a domain-local group.
+ domain_local_dn = self.create_group(samdb,
+ self.get_new_username(),
+ gtype=GroupType.DOMAIN_LOCAL.value)
+
+ # Get the SID of the domain-local group.
+ domain_local_sid = self.get_objectSid(samdb, domain_local_dn)
+
+ # Create a user account belonging to the group.
+ creds = self.get_cached_creds(
+ account_type=self.AccountType.USER,
+ opts={
+ 'member_of': (
+ domain_local_dn,
+ ),
+ 'kerberos_enabled': False,
+ },
+ use_cache=False)
+
+ # Setting the user's primary group fails.
+ self.set_primary_group(
+ samdb, creds.get_dn(), domain_local_sid,
+ expected_error=ldb.ERR_UNWILLING_TO_PERFORM,
+ expected_werror=werror.WERR_MEMBER_NOT_IN_GROUP)
+
+ def test_change_universal_primary_group_to_global(self):
+ samdb = self.get_samdb()
+
+ # Create a universal group.
+ universal_dn = self.create_group(samdb,
+ self.get_new_username(),
+ gtype=GroupType.UNIVERSAL.value)
+
+ # Get the SID of the universal group.
+ universal_sid = self.get_objectSid(samdb, universal_dn)
+
+ # Create a user account belonging to the group.
+ creds = self.get_cached_creds(
+ account_type=self.AccountType.USER,
+ opts={
+ 'member_of': (
+ universal_dn,
+ ),
+ 'kerberos_enabled': False,
+ },
+ use_cache=False)
+
+ # Set the user's primary group.
+ self.set_primary_group(samdb, creds.get_dn(), universal_sid)
+
+ # Change the group to a global group.
+ self.set_group_type(samdb,
+ ldb.Dn(samdb, universal_dn),
+ GroupType.GLOBAL)
+
+ def test_change_universal_primary_group_to_domain_local(self):
+ samdb = self.get_samdb()
+
+ # Create a universal group.
+ universal_dn = self.create_group(samdb,
+ self.get_new_username(),
+ gtype=GroupType.UNIVERSAL.value)
+
+ # Get the SID of the universal group.
+ universal_sid = self.get_objectSid(samdb, universal_dn)
+
+ # Create a user account belonging to the group.
+ creds = self.get_cached_creds(
+ account_type=self.AccountType.USER,
+ opts={
+ 'member_of': (
+ universal_dn,
+ ),
+ 'kerberos_enabled': False,
+ },
+ use_cache=False)
+
+ # Set the user's primary group.
+ self.set_primary_group(samdb, creds.get_dn(), universal_sid)
+
+ # Change the group to a domain-local group. This works, even though the
+ # group is still the user's primary group.
+ self.set_group_type(samdb,
+ ldb.Dn(samdb, universal_dn),
+ GroupType.DOMAIN_LOCAL)
+
+ # Check the groups in a SamInfo structure returned by SamLogon.
+ def test_samlogon_SamInfo(self):
+ samdb = self.get_samdb()
+
+ # Create a universal and a domain-local group.
+ universal_dn = self.create_group(samdb,
+ self.get_new_username(),
+ gtype=GroupType.UNIVERSAL.value)
+ domain_local_dn = self.create_group(samdb,
+ self.get_new_username(),
+ gtype=GroupType.DOMAIN_LOCAL.value)
+
+ # Create a user account belonging to both groups.
+ creds = self.get_cached_creds(
+ account_type=self.AccountType.USER,
+ opts={
+ 'member_of': (
+ universal_dn,
+ domain_local_dn,
+ ),
+ 'kerberos_enabled': False,
+ })
+
+ # Get the SID and RID of the user account.
+ user_sid = creds.get_sid()
+ user_rid = int(user_sid.rsplit('-', 1)[1])
+
+ # Get the SID and RID of the universal group.
+ universal_sid = self.get_objectSid(samdb, universal_dn)
+ universal_rid = int(universal_sid.rsplit('-', 1)[1])
+
+ # We don't expect the EXTRA_SIDS flag to be set.
+ unexpected_flags = netlogon.NETLOGON_EXTRA_SIDS
+
+ # Do a SamLogon call and check we get back the right structure.
+ interactive = netlogon.NetlogonInteractiveInformation
+ level = netlogon.NetlogonValidationSamInfo
+ validation = self._test_samlogon(creds=creds,
+ logon_type=interactive,
+ validation_level=level)
+ self.assertIsInstance(validation, netlogon.netr_SamInfo2)
+
+ base = validation.base
+
+ # Check some properties of the base structure.
+ self.assertEqual(user_rid, base.rid)
+ self.assertEqual(security.DOMAIN_RID_USERS, base.primary_gid)
+ self.assertEqual(samdb.get_domain_sid(), str(base.domain_sid))
+ self.assertFalse(unexpected_flags & base.user_flags,
+ f'0x{unexpected_flags:x} unexpectedly set in '
+ f'user_flags (0x{base.user_flags:x})')
+
+ # Check we have two groups in the base.
+ self.assertEqual(2, base.groups.count)
+
+ rids = base.groups.rids
+
+ # The first group should be Domain Users.
+ self.assertEqual(security.DOMAIN_RID_USERS, rids[0].rid)
+ self.assertEqual(self.default_attrs, rids[0].attributes)
+
+ # The second should be our universal group.
+ self.assertEqual(universal_rid, rids[1].rid)
+ self.assertEqual(self.default_attrs, rids[1].attributes)
+
+ # The domain-local group is nowhere to be found.
+
+ # Check the groups in a SamInfo2 structure returned by SamLogon.
+ def test_samlogon_SamInfo2(self):
+ samdb = self.get_samdb()
+
+ # Create a universal and a domain-local group.
+ universal_dn = self.create_group(samdb,
+ self.get_new_username(),
+ gtype=GroupType.UNIVERSAL.value)
+ domain_local_dn = self.create_group(samdb,
+ self.get_new_username(),
+ gtype=GroupType.DOMAIN_LOCAL.value)
+
+ # Create a user account belonging to both groups.
+ creds = self.get_cached_creds(
+ account_type=self.AccountType.USER,
+ opts={
+ 'member_of': (
+ universal_dn,
+ domain_local_dn,
+ ),
+ 'kerberos_enabled': False,
+ })
+
+ # Get the SID and RID of the user account.
+ user_sid = creds.get_sid()
+ user_rid = int(user_sid.rsplit('-', 1)[1])
+
+ # Get the SID and RID of the universal group.
+ universal_sid = self.get_objectSid(samdb, universal_dn)
+ universal_rid = int(universal_sid.rsplit('-', 1)[1])
+
+ # Get the SID of the domain-local group.
+ domain_local_sid = self.get_objectSid(samdb, domain_local_dn)
+
+ # We expect the EXTRA_SIDS flag to be set.
+ expected_flags = netlogon.NETLOGON_EXTRA_SIDS
+
+ # Do a SamLogon call and check we get back the right structure.
+ interactive = netlogon.NetlogonInteractiveInformation
+ level = netlogon.NetlogonValidationSamInfo2
+ validation = self._test_samlogon(creds=creds,
+ logon_type=interactive,
+ validation_level=level)
+ self.assertIsInstance(validation, netlogon.netr_SamInfo3)
+
+ base = validation.base
+
+ # Check some properties of the base structure.
+ self.assertEqual(user_rid, base.rid)
+ self.assertEqual(security.DOMAIN_RID_USERS, base.primary_gid)
+ self.assertEqual(samdb.get_domain_sid(), str(base.domain_sid))
+ self.assertTrue(expected_flags & base.user_flags,
+ f'0x{expected_flags:x} unexpectedly reset in '
+ f'user_flags (0x{base.user_flags:x})')
+
+ # Check we have two groups in the base.
+ self.assertEqual(2, base.groups.count)
+
+ rids = base.groups.rids
+
+ # The first group should be Domain Users.
+ self.assertEqual(security.DOMAIN_RID_USERS, rids[0].rid)
+ self.assertEqual(self.default_attrs, rids[0].attributes)
+
+ # The second should be our universal group.
+ self.assertEqual(universal_rid, rids[1].rid)
+ self.assertEqual(self.default_attrs, rids[1].attributes)
+
+ # Check that we have one group in the SIDs array.
+ self.assertEqual(1, validation.sidcount)
+
+ sids = validation.sids
+
+ # That group should be our domain-local group.
+ self.assertEqual(domain_local_sid, str(sids[0].sid))
+ self.assertEqual(self.resource_attrs, sids[0].attributes)
+
+ # Check the groups in a SamInfo4 structure returned by SamLogon.
+ def test_samlogon_SamInfo4(self):
+ samdb = self.get_samdb()
+
+ # Create a universal and a domain-local group.
+ universal_dn = self.create_group(samdb,
+ self.get_new_username(),
+ gtype=GroupType.UNIVERSAL.value)
+ domain_local_dn = self.create_group(samdb,
+ self.get_new_username(),
+ gtype=GroupType.DOMAIN_LOCAL.value)
+
+ # Create a user account belonging to both groups.
+ creds = self.get_cached_creds(
+ account_type=self.AccountType.USER,
+ opts={
+ 'member_of': (
+ universal_dn,
+ domain_local_dn,
+ ),
+ 'kerberos_enabled': False,
+ })
+
+ # Get the SID and RID of the user account.
+ user_sid = creds.get_sid()
+ user_rid = int(user_sid.rsplit('-', 1)[1])
+
+ # Get the SID and RID of the universal group.
+ universal_sid = self.get_objectSid(samdb, universal_dn)
+ universal_rid = int(universal_sid.rsplit('-', 1)[1])
+
+ # Get the SID of the domain-local group.
+ domain_local_sid = self.get_objectSid(samdb, domain_local_dn)
+
+ # We expect the EXTRA_SIDS flag to be set.
+ expected_flags = netlogon.NETLOGON_EXTRA_SIDS
+
+ # Do a SamLogon call and check we get back the right structure.
+ interactive = netlogon.NetlogonInteractiveInformation
+ level = netlogon.NetlogonValidationSamInfo4
+ validation = self._test_samlogon(creds=creds,
+ logon_type=interactive,
+ validation_level=level)
+ self.assertIsInstance(validation, netlogon.netr_SamInfo6)
+
+ base = validation.base
+
+ # Check some properties of the base structure.
+ self.assertEqual(user_rid, base.rid)
+ self.assertEqual(security.DOMAIN_RID_USERS, base.primary_gid)
+ self.assertEqual(samdb.get_domain_sid(), str(base.domain_sid))
+ self.assertTrue(expected_flags & base.user_flags,
+ f'0x{expected_flags:x} unexpectedly reset in '
+ f'user_flags (0x{base.user_flags:x})')
+
+ # Check we have two groups in the base.
+ self.assertEqual(2, base.groups.count)
+
+ rids = base.groups.rids
+
+ # The first group should be Domain Users.
+ self.assertEqual(security.DOMAIN_RID_USERS, rids[0].rid)
+ self.assertEqual(self.default_attrs, rids[0].attributes)
+
+ # The second should be our universal group.
+ self.assertEqual(universal_rid, rids[1].rid)
+ self.assertEqual(self.default_attrs, rids[1].attributes)
+
+ # Check that we have one group in the SIDs array.
+ self.assertEqual(1, validation.sidcount)
+
+ sids = validation.sids
+
+ # That group should be our domain-local group.
+ self.assertEqual(domain_local_sid, str(sids[0].sid))
+ self.assertEqual(self.resource_attrs, sids[0].attributes)
+
+ # A list of test cases.
+ cases = [
+ # AS-REQ tests.
+ {
+ 'test': 'universal; as-req to krbtgt',
+ 'groups': {
+ # A Universal group containing the user.
+ 'foo': (GroupType.UNIVERSAL, {user}),
+ },
+ # Make an AS-REQ to the krbtgt with the user's account.
+ 'as:to_krbtgt': True,
+ 'as:expected': {
+ # Ignoring the user ID, or base RID, expect the PAC to contain
+ # precisely the following SIDS in any order:
+ ('foo', SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ 'test': 'universal; as-req to service',
+ 'groups': {
+ 'foo': (GroupType.UNIVERSAL, {user}),
+ },
+ # The same again, but this time perform the AS-REQ to a service.
+ 'as:to_krbtgt': False,
+ 'as:expected': {
+ ('foo', SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ 'test': 'global; as-req to krbtgt',
+ 'groups': {
+ # The behaviour should be the same with a Global group.
+ 'foo': (GroupType.GLOBAL, {user}),
+ },
+ 'as:to_krbtgt': True,
+ 'as:expected': {
+ ('foo', SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ 'test': 'global; as-req to service',
+ 'groups': {
+ 'foo': (GroupType.GLOBAL, {user}),
+ },
+ 'as:to_krbtgt': False,
+ 'as:expected': {
+ ('foo', SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ 'test': 'domain-local; as-req to krbtgt',
+ 'groups': {
+ # A Domain-local group containing the user.
+ 'foo': (GroupType.DOMAIN_LOCAL, {user}),
+ },
+ 'as:to_krbtgt': True,
+ 'as:expected': {
+ # A TGT will not contain domain-local groups the user belongs
+ # to.
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ 'test': 'domain-local; compression; as-req to service',
+ 'groups': {
+ 'foo': (GroupType.DOMAIN_LOCAL, {user}),
+ },
+ 'as:to_krbtgt': False,
+ 'as:expected': {
+ # However, a service ticket will include domain-local
+ # groups. The account supports SID compression, so they are
+ # added as resource SIDs.
+ ('foo', SidType.RESOURCE_SID, resource_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ 'test': 'domain-local; no compression; as-req to service',
+ 'groups': {
+ 'foo': (GroupType.DOMAIN_LOCAL, {user}),
+ },
+ 'as:to_krbtgt': False,
+ # This time, the target account disclaims support for SID
+ # compression.
+ 'as:compression': False,
+ 'as:expected': {
+ # The SIDs in the PAC are the same, except the group SID is
+ # placed in Extra SIDs, not Resource SIDs.
+ ('foo', SidType.EXTRA_SID, resource_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ 'test': 'nested domain-local; as-req to krbtgt',
+ 'groups': {
+ # A Universal group containing a Domain-local group containing
+ # the user.
+ 'universal': (GroupType.UNIVERSAL, {'dom-local'}),
+ 'dom-local': (GroupType.DOMAIN_LOCAL, {user}),
+ },
+ # It is not possible in Windows for a Universal group to contain a
+ # Domain-local group without exploiting bugs. This flag provides a
+ # convenient means by which these tests can be skipped.
+ 'configuration_invalid': True,
+ 'as:to_krbtgt': True,
+ 'as:expected': {
+ # While Windows would exclude the universal group from the PAC,
+ # expecting its inclusion is more sensible on the whole.
+ ('universal', SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ 'test': 'nested domain-local; compression; as-req to service',
+ 'groups': {
+ 'universal': (GroupType.UNIVERSAL, {'dom-local'}),
+ 'dom-local': (GroupType.DOMAIN_LOCAL, {user}),
+ },
+ 'configuration_invalid': True,
+ 'as:to_krbtgt': False,
+ 'as:expected': {
+ # A service ticket is expected to include both SIDs.
+ ('universal', SidType.BASE_SID, default_attrs),
+ ('dom-local', SidType.RESOURCE_SID, resource_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ 'test': 'nested domain-local; no compression; as-req to service',
+ 'groups': {
+ 'universal': (GroupType.UNIVERSAL, {'dom-local'}),
+ 'dom-local': (GroupType.DOMAIN_LOCAL, {user}),
+ },
+ 'configuration_invalid': True,
+ 'as:to_krbtgt': False,
+ 'as:compression': False,
+ 'as:expected': {
+ # As before, but disclaiming SID compression support, so the
+ # domain-local SID goes in Extra SIDs.
+ ('universal', SidType.BASE_SID, default_attrs),
+ ('dom-local', SidType.EXTRA_SID, resource_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ 'test': 'nested universal; as-req to krbtgt',
+ 'groups': {
+ # A similar scenario, except flipped around: a Domain-local
+ # group containing a Universal group containing the user.
+ 'dom-local': (GroupType.DOMAIN_LOCAL, {'universal'}),
+ 'universal': (GroupType.UNIVERSAL, {user}),
+ },
+ 'as:to_krbtgt': True,
+ 'as:expected': {
+ # Expect the Universal group's inclusion in the PAC.
+ ('universal', SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ 'test': 'nested universal; compression; as-req to service',
+ 'groups': {
+ 'dom-local': (GroupType.DOMAIN_LOCAL, {'universal'}),
+ 'universal': (GroupType.UNIVERSAL, {user}),
+ },
+ 'as:to_krbtgt': False,
+ 'as:expected': {
+ # Expect a service ticket to contain both SIDs.
+ ('universal', SidType.BASE_SID, default_attrs),
+ ('dom-local', SidType.RESOURCE_SID, resource_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ 'test': 'nested universal; no compression; as-req to service',
+ 'groups': {
+ 'dom-local': (GroupType.DOMAIN_LOCAL, {'universal'}),
+ 'universal': (GroupType.UNIVERSAL, {user}),
+ },
+ 'as:to_krbtgt': False,
+ 'as:compression': False,
+ 'as:expected': {
+ # As before, but disclaiming SID compression support, so the
+ # domain-local SID goes in Extra SIDs.
+ ('universal', SidType.BASE_SID, default_attrs),
+ ('dom-local', SidType.EXTRA_SID, resource_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ # TGS-REQ tests.
+ {
+ 'test': 'tgs-req to krbtgt',
+ 'groups': {
+ # A Universal group containing the user.
+ 'foo': (GroupType.UNIVERSAL, {user}),
+ },
+ 'as:to_krbtgt': True,
+ 'as:expected': {
+ ('foo', SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ # Make a TGS-REQ to the krbtgt with the user's account.
+ 'tgs:to_krbtgt': True,
+ 'tgs:expected': {
+ # Expect the same results as with an AS-REQ.
+ ('foo', SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ 'test': 'tgs-req to service',
+ 'groups': {
+ # A Universal group containing the user.
+ 'foo': (GroupType.UNIVERSAL, {user}),
+ },
+ 'as:to_krbtgt': True,
+ 'as:expected': {
+ ('foo', SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ # Make a TGS-REQ to a service with the user's account.
+ 'tgs:to_krbtgt': False,
+ 'tgs:expected': {
+ ('foo', SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ 'test': 'domain-local; tgs-req to krbtgt',
+ 'groups': {
+ # A Domain-local group containing the user.
+ 'foo': (GroupType.UNIVERSAL, {user}),
+ },
+ 'as:to_krbtgt': True,
+ 'as:expected': {
+ ('foo', SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:to_krbtgt': True,
+ 'tgs:expected': {
+ # Expect the same results as with an AS-REQ.
+ ('foo', SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ 'test': 'domain-local; compression; tgs-req to service',
+ 'groups': {
+ # A Domain-local group containing the user.
+ 'foo': (GroupType.DOMAIN_LOCAL, {user}),
+ },
+ 'as:to_krbtgt': True,
+ 'as:expected': {
+ # The Domain-local group is not present in the PAC after an
+ # AS-REQ.
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:to_krbtgt': False,
+ 'tgs:expected': {
+ # Now it's added as a resource SID after the TGS-REQ.
+ ('foo', SidType.RESOURCE_SID, resource_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ 'test': 'domain-local; no compression; tgs-req to service',
+ 'groups': {
+ # A Domain-local group containing the user.
+ 'foo': (GroupType.DOMAIN_LOCAL, {user}),
+ },
+ 'as:to_krbtgt': True,
+ # This time, the target account disclaims support for SID
+ # compression.
+ 'as:expected': {
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:to_krbtgt': False,
+ 'tgs:compression': False,
+ 'tgs:expected': {
+ # The SIDs in the PAC are the same, except the group SID is
+ # placed in Extra SIDs, not Resource SIDs.
+ ('foo', SidType.EXTRA_SID, resource_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ 'test': 'exclude asserted identity; tgs-req to krbtgt',
+ 'groups': {
+ 'foo': (GroupType.UNIVERSAL, {user}),
+ },
+ 'as:to_krbtgt': True,
+ 'tgs:to_krbtgt': True,
+ 'tgs:sids': {
+ # Remove the Asserted Identity SID from the PAC.
+ ('foo', SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:expected': {
+ # It should not be re-added in the TGS-REQ.
+ ('foo', SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ 'test': 'exclude asserted identity; tgs-req to service',
+ 'groups': {
+ 'foo': (GroupType.UNIVERSAL, {user}),
+ },
+ 'as:to_krbtgt': True,
+ # Nor should it be re-added if the TGS-REQ is directed to a
+ # service.
+ 'tgs:to_krbtgt': False,
+ 'tgs:sids': {
+ ('foo', SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:expected': {
+ ('foo', SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ 'test': 'exclude claims valid; tgs-req to krbtgt',
+ 'groups': {
+ 'foo': (GroupType.UNIVERSAL, {user}),
+ },
+ 'as:to_krbtgt': True,
+ 'tgs:to_krbtgt': True,
+ 'tgs:sids': {
+ # Remove the Claims Valid SID from the PAC.
+ ('foo', SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ },
+ 'tgs:expected': {
+ # It should not be re-added in the TGS-REQ.
+ ('foo', SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ },
+ },
+ {
+ 'test': 'exclude claims valid; tgs-req to service',
+ 'groups': {
+ 'foo': (GroupType.UNIVERSAL, {user}),
+ },
+ 'as:to_krbtgt': True,
+ # Nor should it be re-added if the TGS-REQ is directed to a
+ # service.
+ 'tgs:to_krbtgt': False,
+ 'tgs:sids': {
+ ('foo', SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ },
+ 'tgs:expected': {
+ ('foo', SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ },
+ },
+ {
+ 'test': 'user group removal; tgs-req to krbtgt',
+ 'groups': {
+ # The user has been removed from the group...
+ 'foo': (GroupType.UNIVERSAL, {}),
+ },
+ 'as:to_krbtgt': True,
+ 'tgs:to_krbtgt': True,
+ 'tgs:sids': {
+ # ...but the user's PAC still contains the group SID.
+ ('foo', SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:expected': {
+ # The group SID should not be removed when a TGS-REQ is
+ # performed.
+ ('foo', SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ 'test': 'user group removal; tgs-req to service',
+ 'groups': {
+ 'foo': (GroupType.UNIVERSAL, {}),
+ },
+ 'as:to_krbtgt': True,
+ # Likewise, but to a service.
+ 'tgs:to_krbtgt': False,
+ 'tgs:sids': {
+ ('foo', SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:expected': {
+ ('foo', SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ 'test': 'nested group removal; tgs-req to krbtgt',
+ 'groups': {
+ # A Domain-local group contains a Universal group, of which the
+ # user is no longer a member...
+ 'dom-local': (GroupType.DOMAIN_LOCAL, {'universal'}),
+ 'universal': (GroupType.UNIVERSAL, {}),
+ },
+ 'as:to_krbtgt': True,
+ 'tgs:to_krbtgt': True,
+ 'tgs:sids': {
+ # ...but the user's PAC still contains the group SID.
+ ('universal', SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:expected': {
+ # The group SID should not be removed when a TGS-REQ is
+ # performed.
+ ('universal', SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ 'test': 'nested group removal; compression; tgs-req to service',
+ 'groups': {
+ # A Domain-local group contains a Universal group, of which the
+ # user is no longer a member...
+ 'dom-local': (GroupType.DOMAIN_LOCAL, {'universal'}),
+ 'universal': (GroupType.UNIVERSAL, {}),
+ },
+ 'as:to_krbtgt': True,
+ 'tgs:to_krbtgt': False,
+ 'tgs:sids': {
+ # ...but the user's PAC still contains the group SID.
+ ('universal', SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:expected': {
+ # Both SIDs should be present in the PAC when a TGS-REQ is
+ # performed.
+ ('universal', SidType.BASE_SID, default_attrs),
+ ('dom-local', SidType.RESOURCE_SID, resource_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ 'test': 'nested group removal; no compression; tgs-req to service',
+ 'groups': {
+ 'dom-local': (GroupType.DOMAIN_LOCAL, {'universal'}),
+ 'universal': (GroupType.UNIVERSAL, {}),
+ },
+ 'as:to_krbtgt': True,
+ 'tgs:to_krbtgt': False,
+ # The same again, but with the server not supporting compression.
+ 'tgs:compression': False,
+ 'tgs:sids': {
+ ('universal', SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:expected': {
+ # The domain-local SID will go into Extra SIDs.
+ ('universal', SidType.BASE_SID, default_attrs),
+ ('dom-local', SidType.EXTRA_SID, resource_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ 'test': 'user group addition; tgs-req to krbtgt',
+ 'groups': {
+ # The user is a member of the group...
+ 'foo': (GroupType.UNIVERSAL, {user}),
+ },
+ 'as:to_krbtgt': True,
+ 'tgs:to_krbtgt': True,
+ 'tgs:sids': {
+ # ...but the user's PAC still lacks the group SID.
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:expected': {
+ # The group SID should be omitted when a TGS-REQ is
+ # performed.
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ 'test': 'user group addition; tgs-req to service',
+ 'groups': {
+ 'foo': (GroupType.UNIVERSAL, {user}),
+ },
+ 'as:to_krbtgt': True,
+ # Likewise, but to a service.
+ 'tgs:to_krbtgt': False,
+ 'tgs:sids': {
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:expected': {
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ 'test': 'nested group addition; tgs-req to krbtgt',
+ 'groups': {
+ # A Domain-local group contains a Universal group, of which the
+ # user is now a member...
+ 'dom-local': (GroupType.DOMAIN_LOCAL, {'universal'}),
+ 'universal': (GroupType.UNIVERSAL, {user}),
+ },
+ 'as:to_krbtgt': True,
+ 'tgs:to_krbtgt': True,
+ 'tgs:sids': {
+ # ...but the user's PAC still lacks the group SID.
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:expected': {
+ # The group SID should still be missing when a TGS-REQ is
+ # performed.
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ 'test': 'nested group addition; compression; tgs-req to service',
+ 'groups': {
+ # A Domain-local group contains a Universal group, of which the
+ # user is now a member...
+ 'dom-local': (GroupType.DOMAIN_LOCAL, {'universal'}),
+ 'universal': (GroupType.UNIVERSAL, {user}),
+ },
+ 'as:to_krbtgt': True,
+ 'tgs:to_krbtgt': False,
+ 'tgs:sids': {
+ # ...but the user's PAC still lacks the group SID.
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:expected': {
+ # Both SIDs should be omitted from the PAC when a TGS-REQ is
+ # performed.
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ 'test': 'nested group addition; no compression; tgs-req to service',
+ 'groups': {
+ 'dom-local': (GroupType.DOMAIN_LOCAL, {'universal'}),
+ 'universal': (GroupType.UNIVERSAL, {user}),
+ },
+ 'as:to_krbtgt': True,
+ 'tgs:to_krbtgt': False,
+ # The same again, but with the server not supporting compression.
+ 'tgs:compression': False,
+ 'tgs:sids': {
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:expected': {
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ 'test': 'resource sids given; tgs-req to krbtgt',
+ 'groups': {
+ # A couple of independent domain-local groups.
+ 'dom-local-0': (GroupType.DOMAIN_LOCAL, {}),
+ 'dom-local-1': (GroupType.DOMAIN_LOCAL, {}),
+ },
+ 'as:to_krbtgt': True,
+ 'tgs:to_krbtgt': True,
+ 'tgs:sids': {
+ # The TGT contains two resource SIDs for the domain-local
+ # groups.
+ ('dom-local-0', SidType.RESOURCE_SID, resource_attrs),
+ ('dom-local-1', SidType.RESOURCE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:expected': {
+ # The resource SIDs remain after performing a TGS-REQ to the
+ # krbtgt.
+ ('dom-local-0', SidType.RESOURCE_SID, resource_attrs),
+ ('dom-local-1', SidType.RESOURCE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ 'test': 'resource sids wrongly given; tgs-req to krbtgt',
+ 'groups': {
+ 'dom-local-0': (GroupType.DOMAIN_LOCAL, {}),
+ 'dom-local-1': (GroupType.DOMAIN_LOCAL, {}),
+ },
+ 'as:to_krbtgt': True,
+ 'tgs:to_krbtgt': True,
+ # Though we have provided resource SIDs, we have reset the flag
+ # indicating that they are present.
+ 'tgs:reset_user_flags': netlogon.NETLOGON_RESOURCE_GROUPS,
+ 'tgs:sids': {
+ ('dom-local-0', SidType.RESOURCE_SID, resource_attrs),
+ ('dom-local-1', SidType.RESOURCE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:expected': {
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ # The resource SIDs remain in the PAC.
+ ('dom-local-0', SidType.RESOURCE_SID, resource_attrs),
+ ('dom-local-1', SidType.RESOURCE_SID, default_attrs),
+ },
+ },
+ {
+ 'test': 'resource sids claimed given; tgs-req to krbtgt',
+ 'groups': {
+ },
+ 'as:to_krbtgt': True,
+ 'tgs:to_krbtgt': True,
+ # Though we claim to have provided resource SIDs, we have not
+ # actually done so.
+ 'tgs:set_user_flags': netlogon.NETLOGON_RESOURCE_GROUPS,
+ 'tgs:sids': {
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:expected': {
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ 'test': 'resource sids given; compression; tgs-req to service',
+ 'groups': {
+ 'dom-local-0': (GroupType.DOMAIN_LOCAL, {}),
+ 'dom-local-1': (GroupType.DOMAIN_LOCAL, {}),
+ },
+ 'as:to_krbtgt': True,
+ 'tgs:to_krbtgt': False,
+ 'tgs:sids': {
+ ('dom-local-0', SidType.RESOURCE_SID, resource_attrs),
+ ('dom-local-1', SidType.RESOURCE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:expected': {
+ # The resource SIDs are removed upon issuing a service ticket.
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ 'test': 'resource sids given; no compression; tgs-req to service',
+ 'groups': {
+ 'dom-local-0': (GroupType.DOMAIN_LOCAL, {}),
+ 'dom-local-1': (GroupType.DOMAIN_LOCAL, {}),
+ },
+ 'as:to_krbtgt': True,
+ 'tgs:to_krbtgt': False,
+ # Compression is disabled on the service account.
+ 'tgs:compression': False,
+ 'tgs:sids': {
+ ('dom-local-0', SidType.RESOURCE_SID, resource_attrs),
+ ('dom-local-1', SidType.RESOURCE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:expected': {
+ # The resource SIDs are again removed.
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ # Testing operability with older Samba versions.
+ {
+ 'test': 'domain-local; Samba 4.17; tgs-req to krbtgt',
+ 'groups': {
+ 'foo': (GroupType.DOMAIN_LOCAL, {user}),
+ },
+ 'as:to_krbtgt': True,
+ 'tgs:to_krbtgt': True,
+ 'tgs:sids': {
+ # In Samba 4.17, domain-local groups are contained within the
+ # TGT, and do not have the SE_GROUP_RESOURCE bit set.
+ ('foo', SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ },
+ 'tgs:expected': {
+ # After the TGS-REQ, the domain-local group remains in the PAC
+ # with its original attributes.
+ ('foo', SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ },
+ },
+ {
+ 'test': 'domain-local; Samba 4.17; compression; tgs-req to service',
+ 'groups': {
+ 'foo': (GroupType.DOMAIN_LOCAL, {user}),
+ },
+ 'as:to_krbtgt': True,
+ # The same scenario, but requesting a service ticket.
+ 'tgs:to_krbtgt': False,
+ 'tgs:compression': True,
+ 'tgs:sids': {
+ ('foo', SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ },
+ 'tgs:expected': {
+ # The domain-local group remains in the PAC...
+ ('foo', SidType.BASE_SID, default_attrs),
+ # and another copy is added in Resource SIDs. This one has the
+ # SE_GROUP_RESOURCE bit set.
+ ('foo', SidType.RESOURCE_SID, resource_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ },
+ },
+ {
+ 'test': 'domain-local; Samba 4.17; no compression; tgs-req to service',
+ 'groups': {
+ 'foo': (GroupType.DOMAIN_LOCAL, {user}),
+ },
+ 'as:to_krbtgt': True,
+ 'tgs:to_krbtgt': False,
+ # In this case compression is disabled on the service.
+ 'tgs:compression': False,
+ 'tgs:sids': {
+ ('foo', SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ },
+ 'tgs:expected': {
+ ('foo', SidType.BASE_SID, default_attrs),
+ # Without compression, the extra SID appears in Extra SIDs.
+ ('foo', SidType.EXTRA_SID, resource_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ },
+ },
+ # Simulate a ticket coming in over a trust.
+ {
+ 'test': 'from trust; to krbtgt',
+ 'groups': {
+ # The user belongs to a couple of domain-local groups in our
+ # domain.
+ 'foo': (GroupType.DOMAIN_LOCAL, {trust_user}),
+ 'bar': (GroupType.DOMAIN_LOCAL, {'foo'}),
+ },
+ 'as:to_krbtgt': True,
+ 'tgs:to_krbtgt': True,
+ # The user SID is from a different domain.
+ 'tgs:user_sid': trust_user,
+ 'tgs:sids': {
+ (trust_user, SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ # This dummy resource SID comes from the trusted domain.
+ (f'{trust_domain}-333', SidType.RESOURCE_SID, resource_attrs),
+ },
+ 'tgs:expected': {
+ # After performing a TGS-REQ to the krbtgt, the PAC remains
+ # unchanged.
+ (trust_user, SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ (f'{trust_domain}-333', SidType.RESOURCE_SID, resource_attrs),
+ },
+ },
+ {
+ 'test': 'from trust; compression; to service',
+ 'groups': {
+ 'foo': (GroupType.DOMAIN_LOCAL, {trust_user}),
+ 'bar': (GroupType.DOMAIN_LOCAL, {'foo'}),
+ },
+ 'as:to_krbtgt': True,
+ # The same thing, but to a service.
+ 'tgs:to_krbtgt': False,
+ 'tgs:compression': True,
+ 'tgs:user_sid': trust_user,
+ 'tgs:sids': {
+ (trust_user, SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (f'{trust_domain}-333', SidType.RESOURCE_SID, resource_attrs),
+ },
+ 'tgs:expected': {
+ (trust_user, SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ # The resource SIDs are added to the PAC.
+ ('foo', SidType.RESOURCE_SID, resource_attrs),
+ ('bar', SidType.RESOURCE_SID, resource_attrs),
+ },
+ },
+ # Simulate a ticket coming in over a trust
+ {
+ 'test': 'from trust; no compression; to service',
+ 'groups': {
+ 'foo': (GroupType.DOMAIN_LOCAL, {trust_user}),
+ 'bar': (GroupType.DOMAIN_LOCAL, {'foo'}),
+ },
+ 'as:to_krbtgt': True,
+ 'tgs:to_krbtgt': False,
+ # And again, but this time compression is disabled.
+ 'tgs:compression': False,
+ 'tgs:user_sid': trust_user,
+ 'tgs:sids': {
+ (trust_user, SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (f'{trust_domain}-333', SidType.RESOURCE_SID, resource_attrs),
+ },
+ 'tgs:expected': {
+ (trust_user, SidType.BASE_SID, default_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.PRIMARY_GID, None),
+ # The resource SIDs are added again, but this time to Extra
+ # SIDs.
+ ('foo', SidType.EXTRA_SID, resource_attrs),
+ ('bar', SidType.EXTRA_SID, resource_attrs),
+ },
+ },
+ # Test a group being the primary one for the user.
+ {
+ 'test': 'primary universal; as-req to krbtgt',
+ 'groups': {
+ 'foo': (GroupType.UNIVERSAL, {user}),
+ },
+ # Set this group as our primary group.
+ 'primary_group': 'foo',
+ 'as:to_krbtgt': True,
+ 'as:expected': {
+ # It appears in the PAC as normal.
+ ('foo', SidType.BASE_SID, default_attrs),
+ ('foo', SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ 'test': 'primary universal; as-req to service',
+ 'groups': {
+ 'foo': (GroupType.UNIVERSAL, {user}),
+ },
+ # Set this group as our primary group.
+ 'primary_group': 'foo',
+ # The request is made to a service.
+ 'as:to_krbtgt': False,
+ 'as:expected': {
+ # The group appears in the PAC as normal.
+ ('foo', SidType.BASE_SID, default_attrs),
+ ('foo', SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ # Test domain-local primary groups.
+ {
+ 'test': 'primary domain-local; as-req to krbtgt',
+ 'groups': {
+ 'foo': (GroupType.DOMAIN_LOCAL, {user}),
+ },
+ # Though Windows normally disallows setting a domain-local group as
+ # a primary group, Samba does not.
+ 'primary_group': 'foo',
+ 'as:to_krbtgt': True,
+ 'as:expected': {
+ # The domain-local group appears as our primary GID, but does
+ # not appear in the base SIDs.
+ ('foo', SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ 'test': 'primary domain-local; compression; as-req to service',
+ 'groups': {
+ 'foo': (GroupType.DOMAIN_LOCAL, {user}),
+ },
+ 'primary_group': 'foo',
+ # The same test, but the request is made to a service.
+ 'as:to_krbtgt': False,
+ 'as:expected': {
+ # The domain-local still only appears as our primary GID.
+ ('foo', SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ 'test': 'primary domain-local; no compression; as-req to service',
+ 'groups': {
+ 'foo': (GroupType.DOMAIN_LOCAL, {user}),
+ },
+ 'primary_group': 'foo',
+ 'as:to_krbtgt': False,
+ # This time, the target account disclaims support for SID
+ # compression.
+ 'as:compression': False,
+ 'as:expected': {
+ ('foo', SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ 'test': 'primary domain-local; tgs-req to krbtgt',
+ 'groups': {
+ 'foo': (GroupType.DOMAIN_LOCAL, {user}),
+ },
+ # Though Windows normally disallows setting a domain-local group as
+ # a primary group, Samba does not.
+ 'primary_group': 'foo',
+ 'as:to_krbtgt': True,
+ 'as:expected': {
+ # The domain-local group appears as our primary GID, but does
+ # not appear in the base SIDs.
+ ('foo', SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:to_krbtgt': True,
+ 'tgs:expected': {
+ # The domain-local group does not appear in the base SIDs.
+ ('foo', SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ 'test': 'primary domain-local; compression; tgs-req to service',
+ 'groups': {
+ 'foo': (GroupType.DOMAIN_LOCAL, {user}),
+ },
+ # Though Windows normally disallows setting a domain-local group as
+ # a primary group, Samba does not.
+ 'primary_group': 'foo',
+ 'as:to_krbtgt': True,
+ 'as:expected': {
+ # The domain-local group appears as our primary GID, but does
+ # not appear in the base SIDs.
+ ('foo', SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ # The service is made to a service.
+ 'tgs:to_krbtgt': False,
+ 'tgs:expected': {
+ # The domain-local still only appears as our primary GID.
+ ('foo', SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ 'test': 'primary domain-local; no compression; tgs-req to service',
+ 'groups': {
+ 'foo': (GroupType.DOMAIN_LOCAL, {user}),
+ },
+ # Though Windows normally disallows setting a domain-local group as
+ # a primary group, Samba does not.
+ 'primary_group': 'foo',
+ 'as:to_krbtgt': True,
+ 'as:expected': {
+ # The domain-local group appears as our primary GID, but does
+ # not appear in the base SIDs.
+ ('foo', SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:to_krbtgt': False,
+ # The service does not support compression.
+ 'tgs:compression': False,
+ 'tgs:expected': {
+ ('foo', SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ # Test the scenario where we belong to a now-domain-local group, and
+ # possess an old TGT issued when the group was still our primary one.
+ {
+ 'test': 'old primary domain-local; tgs-req to krbtgt',
+ 'groups': {
+ # A domain-local group to which we belong.
+ 'foo': (GroupType.DOMAIN_LOCAL, {user}),
+ },
+ 'as:to_krbtgt': True,
+ 'tgs:to_krbtgt': True,
+ 'tgs:sids': {
+ # In the PAC, the group has the attributes of an ordinary
+ # group...
+ ('foo', SidType.BASE_SID, default_attrs),
+ # ...and remains our primary one.
+ ('foo', SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:expected': {
+ # The groups don't change.
+ ('foo', SidType.BASE_SID, default_attrs),
+ ('foo', SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ 'test': 'old primary domain-local; compression; tgs-req to service',
+ 'groups': {
+ 'foo': (GroupType.DOMAIN_LOCAL, {user}),
+ },
+ 'as:to_krbtgt': True,
+ # The TGS request is made to a service.
+ 'tgs:to_krbtgt': False,
+ 'tgs:sids': {
+ ('foo', SidType.BASE_SID, default_attrs),
+ ('foo', SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:expected': {
+ ('foo', SidType.BASE_SID, default_attrs),
+ ('foo', SidType.PRIMARY_GID, None),
+ # The group is added a second time to the PAC, now as a
+ # resource group.
+ ('foo', SidType.RESOURCE_SID, resource_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ 'test': 'old primary domain-local; no compression; tgs-req to service',
+ 'groups': {
+ 'foo': (GroupType.DOMAIN_LOCAL, {user}),
+ },
+ 'as:to_krbtgt': True,
+ 'tgs:to_krbtgt': False,
+ # The target service doesn't support SID compression.
+ 'tgs:compression': False,
+ 'tgs:sids': {
+ ('foo', SidType.BASE_SID, default_attrs),
+ ('foo', SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:expected': {
+ ('foo', SidType.BASE_SID, default_attrs),
+ ('foo', SidType.PRIMARY_GID, None),
+ # This time, the group is added to Extra SIDs.
+ ('foo', SidType.EXTRA_SID, resource_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ # Test the scenario where we possess an old TGT issued when a
+ # now-domain-local group was still our primary one. We no longer belong
+ # to that group, which itself belongs to another domain-local group.
+ {
+ 'test': 'old primary domain-local; transitive; tgs-req to krbtgt',
+ 'groups': {
+ 'bar': (GroupType.DOMAIN_LOCAL, {'foo'}),
+ 'foo': (GroupType.DOMAIN_LOCAL, {}),
+ },
+ 'as:to_krbtgt': True,
+ 'tgs:to_krbtgt': True,
+ 'tgs:sids': {
+ # In the PAC, the group has the attributes of an ordinary
+ # group...
+ ('foo', SidType.BASE_SID, default_attrs),
+ # ...and remains our primary one.
+ ('foo', SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:expected': {
+ # The groups don't change.
+ ('foo', SidType.BASE_SID, default_attrs),
+ ('foo', SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ 'test': 'old primary domain-local; transitive; compression; tgs-req to service',
+ 'groups': {
+ 'bar': (GroupType.DOMAIN_LOCAL, {'foo'}),
+ 'foo': (GroupType.DOMAIN_LOCAL, {}),
+ },
+ 'as:to_krbtgt': True,
+ # The TGS request is made to a service.
+ 'tgs:to_krbtgt': False,
+ 'tgs:sids': {
+ ('foo', SidType.BASE_SID, default_attrs),
+ ('foo', SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:expected': {
+ ('foo', SidType.BASE_SID, default_attrs),
+ ('foo', SidType.PRIMARY_GID, None),
+ # The second resource group is added to the PAC as a resource
+ # group.
+ ('bar', SidType.RESOURCE_SID, resource_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ {
+ 'test': 'old primary domain-local; transitive; no compression; tgs-req to service',
+ 'groups': {
+ 'bar': (GroupType.DOMAIN_LOCAL, {'foo'}),
+ 'foo': (GroupType.DOMAIN_LOCAL, {}),
+ },
+ 'as:to_krbtgt': True,
+ 'tgs:to_krbtgt': False,
+ # The target service doesn't support SID compression.
+ 'tgs:compression': False,
+ 'tgs:sids': {
+ ('foo', SidType.BASE_SID, default_attrs),
+ ('foo', SidType.PRIMARY_GID, None),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ 'tgs:expected': {
+ ('foo', SidType.BASE_SID, default_attrs),
+ ('foo', SidType.PRIMARY_GID, None),
+ # This time, the group is added to Extra SIDs.
+ ('bar', SidType.EXTRA_SID, resource_attrs),
+ (asserted_identity, SidType.EXTRA_SID, default_attrs),
+ (security.DOMAIN_RID_USERS, SidType.BASE_SID, default_attrs),
+ (security.SID_CLAIMS_VALID, SidType.EXTRA_SID, default_attrs),
+ },
+ },
+ ]
+
+ # This is the main function to handle a single testcase.
+ def _test_group_with_args(self, case):
+ # The group arrangement for the test.
+ group_setup = case.pop('groups')
+
+ # A group that should be the primary group for the user.
+ primary_group = case.pop('primary_group', None)
+
+ # Whether the AS-REQ or TGS-REQ should be directed to the krbtgt.
+ as_to_krbtgt = case.pop('as:to_krbtgt')
+ tgs_to_krbtgt = case.pop('tgs:to_krbtgt', None)
+
+ # Whether the target server of the AS-REQ or TGS-REQ should support
+ # resource SID compression.
+ as_compression = case.pop('as:compression', None)
+ tgs_compression = case.pop('tgs:compression', None)
+
+ # Optional SIDs to replace those in the PAC prior to a TGS-REQ.
+ tgs_sids = case.pop('tgs:sids', None)
+
+ # Optional user SID to replace that in the PAC prior to a TGS-REQ.
+ tgs_user_sid = case.pop('tgs:user_sid', None)
+
+ # User flags that may be set or reset in the PAC prior to a TGS-REQ.
+ tgs_set_user_flags = case.pop('tgs:set_user_flags', None)
+ tgs_reset_user_flags = case.pop('tgs:reset_user_flags', None)
+
+ # The SIDs we expect to see in the PAC after a AS-REQ or a TGS-REQ.
+ as_expected = case.pop('as:expected', None)
+ tgs_expected = case.pop('tgs:expected', None)
+
+ # There should be no parameters remaining in the testcase.
+ self.assertFalse(case, 'unexpected parameters in testcase')
+
+ if as_expected is None:
+ self.assertIsNotNone(tgs_expected,
+ 'no set of expected SIDs is provided')
+
+ if as_to_krbtgt is None:
+ as_to_krbtgt = False
+
+ if not as_to_krbtgt:
+ self.assertIsNone(tgs_expected,
+ "if we're performing a TGS-REQ, then AS-REQ "
+ "should be directed to the krbtgt")
+
+ if tgs_to_krbtgt is None:
+ tgs_to_krbtgt = False
+ else:
+ self.assertIsNotNone(tgs_expected,
+ 'specified TGS request to krbtgt, but no '
+ 'expected SIDs provided')
+
+ if tgs_compression is not None:
+ self.assertIsNotNone(tgs_expected,
+ 'specified compression for TGS request, but '
+ 'no expected SIDs provided')
+
+ if tgs_user_sid is not None:
+ self.assertIsNotNone(tgs_sids,
+ 'specified TGS-REQ user SID, but no '
+ 'accompanying SIDs provided')
+
+ if tgs_set_user_flags is None:
+ tgs_set_user_flags = 0
+ else:
+ self.assertIsNotNone(tgs_sids,
+ 'specified TGS-REQ set user flags, but no '
+ 'accompanying SIDs provided')
+
+ if tgs_reset_user_flags is None:
+ tgs_reset_user_flags = 0
+ else:
+ self.assertIsNotNone(tgs_sids,
+ 'specified TGS-REQ reset user flags, but no '
+ 'accompanying SIDs provided')
+
+ samdb = self.get_samdb()
+
+ domain_sid = samdb.get_domain_sid()
+
+ # Create the user account. It needs to be freshly created rather than
+ # cached because we will probably add it to one or more groups.
+ user_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER,
+ use_cache=False)
+ user_dn = user_creds.get_dn()
+ user_sid = user_creds.get_sid()
+ user_name = user_creds.get_username()
+ salt = user_creds.get_salt()
+
+ trust_user_rid = random.randint(2000, 0xfffffffe)
+ trust_user_sid = f'{self.trust_domain}-{trust_user_rid}'
+
+ cname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=user_name.split('/'))
+
+ preauth_key = self.PasswordKey_from_creds(user_creds,
+ kcrypto.Enctype.AES256)
+
+ ts_enc_padata = self.get_enc_timestamp_pa_data_from_key(preauth_key)
+ padata = [ts_enc_padata]
+
+ target_creds, sname = self.get_target(as_to_krbtgt,
+ compression=as_compression)
+ decryption_key = self.TicketDecryptionKey_from_creds(target_creds)
+
+ target_supported_etypes = target_creds.tgs_supported_enctypes
+ realm = target_creds.get_realm()
+
+ # Initialise the group mapping with the user and trust principals.
+ user_principal = Principal(user_dn, user_sid)
+ trust_principal = Principal(None, trust_user_sid)
+ preexisting_groups = {
+ self.user: user_principal,
+ self.trust_user: trust_principal,
+ }
+ if primary_group is not None:
+ primary_groups = {
+ user_principal: primary_group,
+ }
+ else:
+ primary_groups = None
+ groups = self.setup_groups(samdb,
+ preexisting_groups,
+ group_setup,
+ primary_groups)
+ del group_setup
+
+ if tgs_user_sid is None:
+ tgs_user_sid = user_sid
+ elif tgs_user_sid in groups:
+ tgs_user_sid = groups[tgs_user_sid].sid
+
+ tgs_domain_sid, tgs_user_rid = tgs_user_sid.rsplit('-', 1)
+
+ expected_groups = self.map_sids(as_expected, groups,
+ domain_sid)
+ tgs_sids_mapped = self.map_sids(tgs_sids, groups,
+ tgs_domain_sid)
+ tgs_expected_mapped = self.map_sids(tgs_expected, groups,
+ tgs_domain_sid)
+
+ till = self.get_KerberosTime(offset=36000)
+ kdc_options = '0'
+
+ etypes = self.get_default_enctypes(user_creds)
+
+ # Perform an AS-REQ with the user account.
+ as_rep, kdc_exchange_dict = self._test_as_exchange(
+ creds=user_creds,
+ cname=cname,
+ realm=realm,
+ sname=sname,
+ till=till,
+ expected_error_mode=0,
+ expected_crealm=realm,
+ expected_cname=cname,
+ expected_srealm=realm,
+ expected_sname=sname,
+ expected_salt=salt,
+ etypes=etypes,
+ padata=padata,
+ kdc_options=kdc_options,
+ expected_account_name=user_name,
+ expected_groups=expected_groups,
+ expected_sid=user_sid,
+ expected_domain_sid=domain_sid,
+ expected_supported_etypes=target_supported_etypes,
+ preauth_key=preauth_key,
+ ticket_decryption_key=decryption_key)
+ self.check_as_reply(as_rep)
+
+ ticket = kdc_exchange_dict['rep_ticket_creds']
+
+ if tgs_expected is None:
+ # We're not performing a TGS-REQ, so we're done.
+ self.assertIsNone(tgs_sids,
+ 'provided SIDs to populate PAC for TGS-REQ, but '
+ 'failed to specify expected SIDs')
+ return
+
+ if tgs_sids is not None:
+ # Replace the SIDs in the PAC with the ones provided by the test.
+ ticket = self.ticket_with_sids(ticket,
+ tgs_sids_mapped,
+ tgs_domain_sid,
+ tgs_user_rid,
+ set_user_flags=tgs_set_user_flags,
+ reset_user_flags=tgs_reset_user_flags)
+
+ target_creds, sname = self.get_target(tgs_to_krbtgt,
+ compression=tgs_compression)
+ decryption_key = self.TicketDecryptionKey_from_creds(target_creds)
+
+ subkey = self.RandomKey(ticket.session_key.etype)
+
+ requester_sid = None
+ if tgs_to_krbtgt:
+ requester_sid = user_sid
+
+ expect_resource_groups_flag = None
+ if tgs_reset_user_flags & netlogon.NETLOGON_RESOURCE_GROUPS:
+ expect_resource_groups_flag = False
+ elif tgs_set_user_flags & netlogon.NETLOGON_RESOURCE_GROUPS:
+ expect_resource_groups_flag = True
+
+ # Perform a TGS-REQ with the user account.
+
+ kdc_exchange_dict = self.tgs_exchange_dict(
+ creds=user_creds,
+ expected_crealm=ticket.crealm,
+ expected_cname=cname,
+ expected_srealm=realm,
+ expected_sname=sname,
+ expected_account_name=user_name,
+ expected_groups=tgs_expected_mapped,
+ expected_sid=tgs_user_sid,
+ expected_requester_sid=requester_sid,
+ expected_domain_sid=tgs_domain_sid,
+ expected_supported_etypes=target_supported_etypes,
+ expect_resource_groups_flag=expect_resource_groups_flag,
+ ticket_decryption_key=decryption_key,
+ check_rep_fn=self.generic_check_kdc_rep,
+ check_kdc_private_fn=self.generic_check_kdc_private,
+ tgt=ticket,
+ authenticator_subkey=subkey,
+ kdc_options=kdc_options)
+
+ rep = self._generic_kdc_exchange(kdc_exchange_dict,
+ cname=None,
+ realm=realm,
+ sname=sname,
+ till_time=till,
+ etypes=etypes)
+ self.check_reply(rep, KRB_TGS_REP)
+
+
+if __name__ == '__main__':
+ global_asn1_print = False
+ global_hexdump = False
+ import unittest
+ unittest.main()
diff --git a/python/samba/tests/krb5/kcrypto.py b/python/samba/tests/krb5/kcrypto.py
new file mode 100755
index 0000000..c0a0990
--- /dev/null
+++ b/python/samba/tests/krb5/kcrypto.py
@@ -0,0 +1,969 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2013 by the Massachusetts Institute of Technology.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+# OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# XXX current status:
+# * Done and tested
+# - AES encryption, checksum, string2key, prf
+# - cf2 (needed for FAST)
+# * Still to do:
+# - DES enctypes and cksumtypes
+# - RC4 exported enctype (if we need it for anything)
+# - Unkeyed checksums
+# - Special RC4, raw DES/DES3 operations for GSSAPI
+# * Difficult or low priority:
+# - Camellia not supported by PyCrypto
+# - Cipher state only needed for kcmd suite
+# - Nonstandard enctypes and cksumtypes like des-hmac-sha1
+
+import sys
+import os
+
+sys.path.insert(0, "bin/python")
+os.environ["PYTHONUNBUFFERED"] = "1"
+
+from math import gcd
+from functools import reduce
+from struct import pack, unpack
+from binascii import crc32, b2a_hex
+from cryptography.hazmat.primitives import hashes
+from cryptography.hazmat.primitives import hmac
+from cryptography.hazmat.primitives.ciphers import algorithms as ciphers
+from cryptography.hazmat.primitives.ciphers import modes
+from cryptography.hazmat.primitives.ciphers.base import Cipher
+from cryptography.hazmat.backends import default_backend
+from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
+from samba.tests import TestCase
+from samba.credentials import Credentials
+from samba import generate_random_bytes as get_random_bytes
+from samba.common import get_string, get_bytes
+
+
+class Enctype(object):
+ DES_CRC = 1
+ DES_MD4 = 2
+ DES_MD5 = 3
+ DES3 = 16
+ AES128 = 17
+ AES256 = 18
+ RC4 = 23
+
+
+class Cksumtype(object):
+ CRC32 = 1
+ MD4 = 2
+ MD4_DES = 3
+ MD5 = 7
+ MD5_DES = 8
+ SHA1_DES3 = 12
+ SHA1 = 14
+ SHA1_AES128 = 15
+ SHA1_AES256 = 16
+ HMAC_MD5 = -138
+
+
+class InvalidChecksum(ValueError):
+ pass
+
+
+def _zeropad(s, padsize):
+ # Return s padded with 0 bytes to a multiple of padsize.
+ padlen = (padsize - (len(s) % padsize)) % padsize
+ return s + bytes(padlen)
+
+
+def _xorbytes(b1, b2):
+ # xor two strings together and return the resulting string.
+ assert len(b1) == len(b2)
+ return bytes([x ^ y for x, y in zip(b1, b2)])
+
+
+def _mac_equal(mac1, mac2):
+ # Constant-time comparison function. (We can't use HMAC.verify
+ # since we use truncated macs.)
+ assert len(mac1) == len(mac2)
+ res = 0
+ for x, y in zip(mac1, mac2):
+ res |= x ^ y
+ return res == 0
+
+
+def SIMPLE_HASH(string, algo_cls):
+ hash_ctx = hashes.Hash(algo_cls(), default_backend())
+ hash_ctx.update(string)
+ return hash_ctx.finalize()
+
+
+def HMAC_HASH(key, string, algo_cls):
+ hmac_ctx = hmac.HMAC(key, algo_cls(), default_backend())
+ hmac_ctx.update(string)
+ return hmac_ctx.finalize()
+
+
+def _nfold(str, nbytes):
+ # Convert str to a string of length nbytes using the RFC 3961 nfold
+ # operation.
+
+ # Rotate the bytes in str to the right by nbits bits.
+ def rotate_right(str, nbits):
+ nbytes, remain = (nbits // 8) % len(str), nbits % 8
+ return bytes([
+ (str[i - nbytes] >> remain)
+ | (str[i - nbytes - 1] << (8 - remain) & 0xff)
+ for i in range(len(str))])
+
+ # Add equal-length strings together with end-around carry.
+ def add_ones_complement(str1, str2):
+ n = len(str1)
+ v = [a + b for a, b in zip(str1, str2)]
+ # Propagate carry bits to the left until there aren't any left.
+ while any(x & ~0xff for x in v):
+ v = [(v[i - n + 1] >> 8) + (v[i] & 0xff) for i in range(n)]
+ return bytes([x for x in v])
+
+ # Concatenate copies of str to produce the least common multiple
+ # of len(str) and nbytes, rotating each copy of str to the right
+ # by 13 bits times its list position. Decompose the concatenation
+ # into slices of length nbytes, and add them together as
+ # big-endian ones' complement integers.
+ slen = len(str)
+ lcm = nbytes * slen // gcd(nbytes, slen)
+ bigstr = b''.join((rotate_right(str, 13 * i) for i in range(lcm // slen)))
+ slices = (bigstr[p:p + nbytes] for p in range(0, lcm, nbytes))
+ return reduce(add_ones_complement, slices)
+
+
+def _is_weak_des_key(keybytes):
+ return keybytes in (b'\x01\x01\x01\x01\x01\x01\x01\x01',
+ b'\xFE\xFE\xFE\xFE\xFE\xFE\xFE\xFE',
+ b'\x1F\x1F\x1F\x1F\x0E\x0E\x0E\x0E',
+ b'\xE0\xE0\xE0\xE0\xF1\xF1\xF1\xF1',
+ b'\x01\xFE\x01\xFE\x01\xFE\x01\xFE',
+ b'\xFE\x01\xFE\x01\xFE\x01\xFE\x01',
+ b'\x1F\xE0\x1F\xE0\x0E\xF1\x0E\xF1',
+ b'\xE0\x1F\xE0\x1F\xF1\x0E\xF1\x0E',
+ b'\x01\xE0\x01\xE0\x01\xF1\x01\xF1',
+ b'\xE0\x01\xE0\x01\xF1\x01\xF1\x01',
+ b'\x1F\xFE\x1F\xFE\x0E\xFE\x0E\xFE',
+ b'\xFE\x1F\xFE\x1F\xFE\x0E\xFE\x0E',
+ b'\x01\x1F\x01\x1F\x01\x0E\x01\x0E',
+ b'\x1F\x01\x1F\x01\x0E\x01\x0E\x01',
+ b'\xE0\xFE\xE0\xFE\xF1\xFE\xF1\xFE',
+ b'\xFE\xE0\xFE\xE0\xFE\xF1\xFE\xF1')
+
+
+class _EnctypeProfile(object):
+ # Base class for enctype profiles. Usable enctype classes must define:
+ # * enctype: enctype number
+ # * keysize: protocol size of key in bytes
+ # * seedsize: random_to_key input size in bytes
+ # * random_to_key (if the keyspace is not dense)
+ # * string_to_key
+ # * encrypt
+ # * decrypt
+ # * prf
+
+ @classmethod
+ def random_to_key(cls, seed):
+ if len(seed) != cls.seedsize:
+ raise ValueError('Wrong seed length')
+ return Key(cls.enctype, seed)
+
+
+class _SimplifiedEnctype(_EnctypeProfile):
+ # Base class for enctypes using the RFC 3961 simplified profile.
+ # Defines the encrypt, decrypt, and prf methods. Subclasses must
+ # define:
+ # * blocksize: Underlying cipher block size in bytes
+ # * padsize: Underlying cipher padding multiple (1 or blocksize)
+ # * macsize: Size of integrity MAC in bytes
+ # * hashmod: PyCrypto hash module for underlying hash function
+ # * basic_encrypt, basic_decrypt: Underlying CBC/CTS cipher
+
+ @classmethod
+ def derive(cls, key, constant):
+ # RFC 3961 only says to n-fold the constant only if it is
+ # shorter than the cipher block size. But all Unix
+ # implementations n-fold constants if their length is larger
+ # than the block size as well, and n-folding when the length
+ # is equal to the block size is a no-op.
+ plaintext = _nfold(constant, cls.blocksize)
+ rndseed = b''
+ while len(rndseed) < cls.seedsize:
+ ciphertext = cls.basic_encrypt(key, plaintext)
+ rndseed += ciphertext
+ plaintext = ciphertext
+ return cls.random_to_key(rndseed[0:cls.seedsize])
+
+ @classmethod
+ def encrypt(cls, key, keyusage, plaintext, confounder):
+ ki = cls.derive(key, pack('>iB', keyusage, 0x55))
+ ke = cls.derive(key, pack('>iB', keyusage, 0xAA))
+ if confounder is None:
+ confounder = get_random_bytes(cls.blocksize)
+ basic_plaintext = confounder + _zeropad(plaintext, cls.padsize)
+ hmac = HMAC_HASH(ki.contents, basic_plaintext, cls.hashalgo)
+ return cls.basic_encrypt(ke, basic_plaintext) + hmac[:cls.macsize]
+
+ @classmethod
+ def decrypt(cls, key, keyusage, ciphertext):
+ ki = cls.derive(key, pack('>iB', keyusage, 0x55))
+ ke = cls.derive(key, pack('>iB', keyusage, 0xAA))
+ if len(ciphertext) < cls.blocksize + cls.macsize:
+ raise ValueError('ciphertext too short')
+ basic_ctext, mac = ciphertext[:-cls.macsize], ciphertext[-cls.macsize:]
+ if len(basic_ctext) % cls.padsize != 0:
+ raise ValueError('ciphertext does not meet padding requirement')
+ basic_plaintext = cls.basic_decrypt(ke, basic_ctext)
+ hmac = HMAC_HASH(ki.contents, basic_plaintext, cls.hashalgo)
+ expmac = hmac[:cls.macsize]
+ if not _mac_equal(mac, expmac):
+ raise InvalidChecksum('ciphertext integrity failure')
+ # Discard the confounder.
+ return basic_plaintext[cls.blocksize:]
+
+ @classmethod
+ def prf(cls, key, string):
+ # Hash the input. RFC 3961 says to truncate to the padding
+ # size, but implementations truncate to the block size.
+ hashval = SIMPLE_HASH(string, cls.hashalgo)
+ truncated = hashval[:-(len(hashval) % cls.blocksize)]
+ # Encrypt the hash with a derived key.
+ kp = cls.derive(key, b'prf')
+ return cls.basic_encrypt(kp, truncated)
+
+
+class _DES3CBC(_SimplifiedEnctype):
+ enctype = Enctype.DES3
+ keysize = 24
+ seedsize = 21
+ blocksize = 8
+ padsize = 8
+ macsize = 20
+ hashalgo = hashes.SHA1
+
+ @classmethod
+ def random_to_key(cls, seed):
+ # XXX Maybe reframe as _DESEnctype.random_to_key and use that
+ # way from DES3 random-to-key when DES is implemented, since
+ # MIT does this instead of the RFC 3961 random-to-key.
+ def expand(seed):
+ def parity(b):
+ # Return b with the low-order bit set to yield odd parity.
+ b &= ~1
+ return b if bin(b & ~1).count('1') % 2 else b | 1
+ assert len(seed) == 7
+ firstbytes = bytes(parity(b & ~1) for b in seed)
+ lastbyte = parity(sum((seed[i] & 1) << i + 1 for i in range(7)))
+ keybytes = firstbytes + bytes([lastbyte])
+ if _is_weak_des_key(keybytes):
+ keybytes = firstbytes + bytes([lastbyte ^ 0xF0])
+ return keybytes
+
+ if len(seed) != 21:
+ raise ValueError('Wrong seed length')
+ k1, k2, k3 = expand(seed[:7]), expand(seed[7:14]), expand(seed[14:])
+ return Key(cls.enctype, k1 + k2 + k3)
+
+ @classmethod
+ def string_to_key(cls, string, salt, params):
+ if params is not None and params != b'':
+ raise ValueError('Invalid DES3 string-to-key parameters')
+ k = cls.random_to_key(_nfold(string + salt, 21))
+ return cls.derive(k, b'kerberos')
+
+ @classmethod
+ def basic_encrypt(cls, key, plaintext):
+ assert len(plaintext) % 8 == 0
+ algo = ciphers.TripleDES(key.contents)
+ cbc = modes.CBC(bytes(8))
+ encryptor = Cipher(algo, cbc, default_backend()).encryptor()
+ ciphertext = encryptor.update(plaintext)
+ return ciphertext
+
+ @classmethod
+ def basic_decrypt(cls, key, ciphertext):
+ assert len(ciphertext) % 8 == 0
+ algo = ciphers.TripleDES(key.contents)
+ cbc = modes.CBC(bytes(8))
+ decryptor = Cipher(algo, cbc, default_backend()).decryptor()
+ plaintext = decryptor.update(ciphertext)
+ return plaintext
+
+
+class _AESEnctype(_SimplifiedEnctype):
+ # Base class for aes128-cts and aes256-cts.
+ blocksize = 16
+ padsize = 1
+ macsize = 12
+ hashalgo = hashes.SHA1
+
+ @classmethod
+ def string_to_key(cls, string, salt, params):
+ (iterations,) = unpack('>L', params or b'\x00\x00\x10\x00')
+ pwbytes = get_bytes(string)
+ kdf = PBKDF2HMAC(algorithm=hashes.SHA1(),
+ length=cls.seedsize,
+ salt=salt,
+ iterations=iterations,
+ backend=default_backend())
+ seed = kdf.derive(pwbytes)
+ tkey = cls.random_to_key(seed)
+ return cls.derive(tkey, b'kerberos')
+
+ @classmethod
+ def basic_encrypt(cls, key, plaintext):
+ assert len(plaintext) >= 16
+
+ algo = ciphers.AES(key.contents)
+ cbc = modes.CBC(bytes(16))
+ aes_ctx = Cipher(algo, cbc, default_backend())
+
+ def aes_encrypt(plaintext):
+ encryptor = aes_ctx.encryptor()
+ ciphertext = encryptor.update(plaintext)
+ return ciphertext
+
+ ctext = aes_encrypt(_zeropad(plaintext, 16))
+ if len(plaintext) > 16:
+ # Swap the last two ciphertext blocks and truncate the
+ # final block to match the plaintext length.
+ lastlen = len(plaintext) % 16 or 16
+ ctext = ctext[:-32] + ctext[-16:] + ctext[-32:-16][:lastlen]
+ return ctext
+
+ @classmethod
+ def basic_decrypt(cls, key, ciphertext):
+ assert len(ciphertext) >= 16
+
+ algo = ciphers.AES(key.contents)
+ cbc = modes.CBC(bytes(16))
+ aes_ctx = Cipher(algo, cbc, default_backend())
+
+ def aes_decrypt(ciphertext):
+ decryptor = aes_ctx.decryptor()
+ plaintext = decryptor.update(ciphertext)
+ return plaintext
+
+ if len(ciphertext) == 16:
+ return aes_decrypt(ciphertext)
+ # Split the ciphertext into blocks. The last block may be partial.
+ cblocks = [ciphertext[p:p + 16] for p in range(0, len(ciphertext), 16)]
+ lastlen = len(cblocks[-1])
+ # CBC-decrypt all but the last two blocks.
+ prev_cblock = bytes(16)
+ plaintext = b''
+ for b in cblocks[:-2]:
+ plaintext += _xorbytes(aes_decrypt(b), prev_cblock)
+ prev_cblock = b
+ # Decrypt the second-to-last cipher block. The left side of
+ # the decrypted block will be the final block of plaintext
+ # xor'd with the final partial cipher block; the right side
+ # will be the omitted bytes of ciphertext from the final
+ # block.
+ b = aes_decrypt(cblocks[-2])
+ lastplaintext = _xorbytes(b[:lastlen], cblocks[-1])
+ omitted = b[lastlen:]
+ # Decrypt the final cipher block plus the omitted bytes to get
+ # the second-to-last plaintext block.
+ plaintext += _xorbytes(aes_decrypt(cblocks[-1] + omitted), prev_cblock)
+ return plaintext + lastplaintext
+
+
+class _AES128CTS(_AESEnctype):
+ enctype = Enctype.AES128
+ keysize = 16
+ seedsize = 16
+
+
+class _AES256CTS(_AESEnctype):
+ enctype = Enctype.AES256
+ keysize = 32
+ seedsize = 32
+
+
+class _RC4(_EnctypeProfile):
+ enctype = Enctype.RC4
+ keysize = 16
+ seedsize = 16
+
+ @staticmethod
+ def usage_str(keyusage):
+ # Return a four-byte string for an RFC 3961 keyusage, using
+ # the RFC 4757 rules. Per the errata, do not map 9 to 8.
+ table = {3: 8, 23: 13}
+ msusage = table[keyusage] if keyusage in table else keyusage
+ return pack('<i', msusage)
+
+ @classmethod
+ def string_to_key(cls, string, salt, params):
+ utf8string = get_string(string)
+ tmp = Credentials()
+ tmp.set_anonymous()
+ tmp.set_password(utf8string)
+ nthash = tmp.get_nt_hash()
+ return Key(cls.enctype, nthash)
+
+ @classmethod
+ def encrypt(cls, key, keyusage, plaintext, confounder):
+ if confounder is None:
+ confounder = get_random_bytes(8)
+ ki = HMAC_HASH(key.contents, cls.usage_str(keyusage), hashes.MD5)
+ cksum = HMAC_HASH(ki, confounder + plaintext, hashes.MD5)
+ ke = HMAC_HASH(ki, cksum, hashes.MD5)
+
+ encryptor = Cipher(
+ ciphers.ARC4(ke), None, default_backend()).encryptor()
+ ctext = encryptor.update(confounder + plaintext)
+
+ return cksum + ctext
+
+ @classmethod
+ def decrypt(cls, key, keyusage, ciphertext):
+ if len(ciphertext) < 24:
+ raise ValueError('ciphertext too short')
+ cksum, basic_ctext = ciphertext[:16], ciphertext[16:]
+ ki = HMAC_HASH(key.contents, cls.usage_str(keyusage), hashes.MD5)
+ ke = HMAC_HASH(ki, cksum, hashes.MD5)
+
+ decryptor = Cipher(
+ ciphers.ARC4(ke), None, default_backend()).decryptor()
+ basic_plaintext = decryptor.update(basic_ctext)
+
+ exp_cksum = HMAC_HASH(ki, basic_plaintext, hashes.MD5)
+ ok = _mac_equal(cksum, exp_cksum)
+ if not ok and keyusage == 9:
+ # Try again with usage 8, due to RFC 4757 errata.
+ ki = HMAC_HASH(key.contents, pack('<i', 8), hashes.MD5)
+ exp_cksum = HMAC_HASH(ki, basic_plaintext, hashes.MD5)
+ ok = _mac_equal(cksum, exp_cksum)
+ if not ok:
+ raise InvalidChecksum('ciphertext integrity failure')
+ # Discard the confounder.
+ return basic_plaintext[8:]
+
+ @classmethod
+ def prf(cls, key, string):
+ return HMAC_HASH(key.contents, string, hashes.SHA1)
+
+
+class _ChecksumProfile(object):
+ # Base class for checksum profiles. Usable checksum classes must
+ # define:
+ # * checksum
+ # * verify (if verification is not just checksum-and-compare)
+ # * checksum_len
+ @classmethod
+ def verify(cls, key, keyusage, text, cksum):
+ expected = cls.checksum(key, keyusage, text)
+ if not _mac_equal(cksum, expected):
+ raise InvalidChecksum('checksum verification failure')
+
+
+class _SimplifiedChecksum(_ChecksumProfile):
+ # Base class for checksums using the RFC 3961 simplified profile.
+ # Defines the checksum and verify methods. Subclasses must
+ # define:
+ # * macsize: Size of checksum in bytes
+ # * enc: Profile of associated enctype
+
+ @classmethod
+ def checksum(cls, key, keyusage, text):
+ kc = cls.enc.derive(key, pack('>iB', keyusage, 0x99))
+ hmac = HMAC_HASH(kc.contents, text, cls.enc.hashalgo)
+ return hmac[:cls.macsize]
+
+ @classmethod
+ def verify(cls, key, keyusage, text, cksum):
+ if key.enctype != cls.enc.enctype:
+ raise ValueError('Wrong key type for checksum')
+ super(_SimplifiedChecksum, cls).verify(key, keyusage, text, cksum)
+
+ @classmethod
+ def checksum_len(cls):
+ return cls.macsize
+
+
+class _SHA1AES128(_SimplifiedChecksum):
+ macsize = 12
+ enc = _AES128CTS
+
+
+class _SHA1AES256(_SimplifiedChecksum):
+ macsize = 12
+ enc = _AES256CTS
+
+
+class _SHA1DES3(_SimplifiedChecksum):
+ macsize = 20
+ enc = _DES3CBC
+
+
+class _HMACMD5(_ChecksumProfile):
+ @classmethod
+ def checksum(cls, key, keyusage, text):
+ ksign = HMAC_HASH(key.contents, b'signaturekey\0', hashes.MD5)
+ md5hash = SIMPLE_HASH(_RC4.usage_str(keyusage) + text, hashes.MD5)
+ return HMAC_HASH(ksign, md5hash, hashes.MD5)
+
+ @classmethod
+ def verify(cls, key, keyusage, text, cksum):
+ if key.enctype != Enctype.RC4:
+ raise ValueError('Wrong key type for checksum')
+ super(_HMACMD5, cls).verify(key, keyusage, text, cksum)
+
+ @classmethod
+ def checksum_len(cls):
+ return hashes.MD5.digest_size
+
+
+class _MD5(_ChecksumProfile):
+ @classmethod
+ def checksum(cls, key, keyusage, text):
+ # This is unkeyed!
+ return SIMPLE_HASH(text, hashes.MD5)
+
+ @classmethod
+ def checksum_len(cls):
+ return hashes.MD5.digest_size
+
+
+class _SHA1(_ChecksumProfile):
+ @classmethod
+ def checksum(cls, key, keyusage, text):
+ # This is unkeyed!
+ return SIMPLE_HASH(text, hashes.SHA1)
+
+ @classmethod
+ def checksum_len(cls):
+ return hashes.SHA1.digest_size
+
+
+class _CRC32(_ChecksumProfile):
+ @classmethod
+ def checksum(cls, key, keyusage, text):
+ # This is unkeyed!
+ cksum = (~crc32(text, 0xffffffff)) & 0xffffffff
+ return pack('<I', cksum)
+
+ @classmethod
+ def checksum_len(cls):
+ return 4
+
+
+_enctype_table = {
+ Enctype.DES3: _DES3CBC,
+ Enctype.AES128: _AES128CTS,
+ Enctype.AES256: _AES256CTS,
+ Enctype.RC4: _RC4
+}
+
+
+_checksum_table = {
+ Cksumtype.SHA1_DES3: _SHA1DES3,
+ Cksumtype.SHA1_AES128: _SHA1AES128,
+ Cksumtype.SHA1_AES256: _SHA1AES256,
+ Cksumtype.HMAC_MD5: _HMACMD5,
+ Cksumtype.MD5: _MD5,
+ Cksumtype.SHA1: _SHA1,
+ Cksumtype.CRC32: _CRC32,
+}
+
+
+def _get_enctype_profile(enctype):
+ if enctype not in _enctype_table:
+ raise ValueError('Invalid enctype %d' % enctype)
+ return _enctype_table[enctype]
+
+
+def _get_checksum_profile(cksumtype):
+ if cksumtype not in _checksum_table:
+ raise ValueError('Invalid cksumtype %d' % cksumtype)
+ return _checksum_table[cksumtype]
+
+
+class Key(object):
+ def __init__(self, enctype, contents):
+ e = _get_enctype_profile(enctype)
+ if len(contents) != e.keysize:
+ raise ValueError('Wrong key length')
+ self.enctype = enctype
+ self.contents = contents
+
+ def __str__(self):
+ return "enctype=%d contents=%s" % (self.enctype,
+ b2a_hex(self.contents).decode('ascii'))
+
+def seedsize(enctype):
+ e = _get_enctype_profile(enctype)
+ return e.seedsize
+
+
+def random_to_key(enctype, seed):
+ e = _get_enctype_profile(enctype)
+ if len(seed) != e.seedsize:
+ raise ValueError('Wrong crypto seed length')
+ return e.random_to_key(seed)
+
+
+def string_to_key(enctype, string, salt, params=None):
+ e = _get_enctype_profile(enctype)
+ return e.string_to_key(string, salt, params)
+
+
+def encrypt(key, keyusage, plaintext, confounder=None):
+ e = _get_enctype_profile(key.enctype)
+ return e.encrypt(key, keyusage, plaintext, confounder)
+
+
+def decrypt(key, keyusage, ciphertext):
+ # Throw InvalidChecksum on checksum failure. Throw ValueError on
+ # invalid key enctype or malformed ciphertext.
+ e = _get_enctype_profile(key.enctype)
+ return e.decrypt(key, keyusage, ciphertext)
+
+
+def prf(key, string):
+ e = _get_enctype_profile(key.enctype)
+ return e.prf(key, string)
+
+
+def make_checksum(cksumtype, key, keyusage, text):
+ c = _get_checksum_profile(cksumtype)
+ return c.checksum(key, keyusage, text)
+
+
+def verify_checksum(cksumtype, key, keyusage, text, cksum):
+ # Throw InvalidChecksum exception on checksum failure. Throw
+ # ValueError on invalid cksumtype, invalid key enctype, or
+ # malformed checksum.
+ c = _get_checksum_profile(cksumtype)
+ c.verify(key, keyusage, text, cksum)
+
+
+def checksum_len(cksumtype):
+ c = _get_checksum_profile(cksumtype)
+ return c.checksum_len()
+
+
+def prfplus(key, pepper, ln):
+ # Produce ln bytes of output using the RFC 6113 PRF+ function.
+ out = b''
+ count = 1
+ while len(out) < ln:
+ out += prf(key, bytes([count]) + pepper)
+ count += 1
+ return out[:ln]
+
+
+def cf2(key1, key2, pepper1, pepper2, enctype=None):
+ # Combine two keys and two pepper strings to produce a result key
+ # of type enctype, using the RFC 6113 KRB-FX-CF2 function.
+ if enctype is None:
+ enctype = key1.enctype
+ e = _get_enctype_profile(enctype)
+ return e.random_to_key(_xorbytes(prfplus(key1, pepper1, e.seedsize),
+ prfplus(key2, pepper2, e.seedsize)))
+
+
+def h(hexstr):
+ return bytes.fromhex(hexstr)
+
+
+class KcrytoTest(TestCase):
+ """kcrypto Test case."""
+
+ def test_aes128_crypr(self):
+ # AES128 encrypt and decrypt
+ kb = h('9062430C8CDA3388922E6D6A509F5B7A')
+ conf = h('94B491F481485B9A0678CD3C4EA386AD')
+ keyusage = 2
+ plain = b'9 bytesss'
+ ctxt = h('68FB9679601F45C78857B2BF820FD6E53ECA8D42FD4B1D7024A09205ABB7'
+ 'CD2EC26C355D2F')
+ k = Key(Enctype.AES128, kb)
+ self.assertEqual(encrypt(k, keyusage, plain, conf), ctxt)
+ self.assertEqual(decrypt(k, keyusage, ctxt), plain)
+
+ def test_aes256_crypt(self):
+ # AES256 encrypt and decrypt
+ kb = h('F1C795E9248A09338D82C3F8D5B567040B0110736845041347235B14042313'
+ '98')
+ conf = h('E45CA518B42E266AD98E165E706FFB60')
+ keyusage = 4
+ plain = b'30 bytes bytes bytes bytes byt'
+ ctxt = h('D1137A4D634CFECE924DBC3BF6790648BD5CFF7DE0E7B99460211D0DAEF3'
+ 'D79A295C688858F3B34B9CBD6EEBAE81DAF6B734D4D498B6714F1C1D')
+ k = Key(Enctype.AES256, kb)
+ self.assertEqual(encrypt(k, keyusage, plain, conf), ctxt)
+ self.assertEqual(decrypt(k, keyusage, ctxt), plain)
+
+ def test_aes128_checksum(self):
+ # AES128 checksum
+ kb = h('9062430C8CDA3388922E6D6A509F5B7A')
+ keyusage = 3
+ plain = b'eight nine ten eleven twelve thirteen'
+ cksum = h('01A4B088D45628F6946614E3')
+ k = Key(Enctype.AES128, kb)
+ verify_checksum(Cksumtype.SHA1_AES128, k, keyusage, plain, cksum)
+
+ def test_aes256_checksum(self):
+ # AES256 checksum
+ kb = h('B1AE4CD8462AFF1677053CC9279AAC30B796FB81CE21474DD3DDBC'
+ 'FEA4EC76D7')
+ keyusage = 4
+ plain = b'fourteen'
+ cksum = h('E08739E3279E2903EC8E3836')
+ k = Key(Enctype.AES256, kb)
+ verify_checksum(Cksumtype.SHA1_AES256, k, keyusage, plain, cksum)
+
+ def test_aes128_string_to_key(self):
+ # AES128 string-to-key
+ string = b'password'
+ salt = b'ATHENA.MIT.EDUraeburn'
+ params = h('00000002')
+ kb = h('C651BF29E2300AC27FA469D693BDDA13')
+ k = string_to_key(Enctype.AES128, string, salt, params)
+ self.assertEqual(k.contents, kb)
+
+ def test_aes256_string_to_key(self):
+ # AES256 string-to-key
+ string = b'X' * 64
+ salt = b'pass phrase equals block size'
+ params = h('000004B0')
+ kb = h('89ADEE3608DB8BC71F1BFBFE459486B05618B70CBAE22092534E56'
+ 'C553BA4B34')
+ k = string_to_key(Enctype.AES256, string, salt, params)
+ self.assertEqual(k.contents, kb)
+
+ def test_aes128_prf(self):
+ # AES128 prf
+ kb = h('77B39A37A868920F2A51F9DD150C5717')
+ k = string_to_key(Enctype.AES128, b'key1', b'key1')
+ self.assertEqual(prf(k, b'\x01\x61'), kb)
+
+ def test_aes256_prf(self):
+ # AES256 prf
+ kb = h('0D674DD0F9A6806525A4D92E828BD15A')
+ k = string_to_key(Enctype.AES256, b'key2', b'key2')
+ self.assertEqual(prf(k, b'\x02\x62'), kb)
+
+ def test_aes128_cf2(self):
+ # AES128 cf2
+ kb = h('97DF97E4B798B29EB31ED7280287A92A')
+ k1 = string_to_key(Enctype.AES128, b'key1', b'key1')
+ k2 = string_to_key(Enctype.AES128, b'key2', b'key2')
+ k = cf2(k1, k2, b'a', b'b')
+ self.assertEqual(k.contents, kb)
+
+ def test_aes256_cf2(self):
+ # AES256 cf2
+ kb = h('4D6CA4E629785C1F01BAF55E2E548566B9617AE3A96868C337CB93B5'
+ 'E72B1C7B')
+ k1 = string_to_key(Enctype.AES256, b'key1', b'key1')
+ k2 = string_to_key(Enctype.AES256, b'key2', b'key2')
+ k = cf2(k1, k2, b'a', b'b')
+ self.assertEqual(k.contents, kb)
+
+ def test_des3_crypt(self):
+ # DES3 encrypt and decrypt
+ kb = h('0DD52094E0F41CECCB5BE510A764B35176E3981332F1E598')
+ conf = h('94690A17B2DA3C9B')
+ keyusage = 3
+ plain = b'13 bytes byte'
+ ctxt = h('839A17081ECBAFBCDC91B88C6955DD3C4514023CF177B77BF0D0177A16F7'
+ '05E849CB7781D76A316B193F8D30')
+ k = Key(Enctype.DES3, kb)
+ self.assertEqual(encrypt(k, keyusage, plain, conf), ctxt)
+ self.assertEqual(decrypt(k, keyusage, ctxt), _zeropad(plain, 8))
+
+ def test_des3_string_to_key(self):
+ # DES3 string-to-key
+ string = b'password'
+ salt = b'ATHENA.MIT.EDUraeburn'
+ kb = h('850BB51358548CD05E86768C313E3BFEF7511937DCF72C3E')
+ k = string_to_key(Enctype.DES3, string, salt)
+ self.assertEqual(k.contents, kb)
+
+ def test_des3_checksum(self):
+ # DES3 checksum
+ kb = h('7A25DF8992296DCEDA0E135BC4046E2375B3C14C98FBC162')
+ keyusage = 2
+ plain = b'six seven'
+ cksum = h('0EEFC9C3E049AABC1BA5C401677D9AB699082BB4')
+ k = Key(Enctype.DES3, kb)
+ verify_checksum(Cksumtype.SHA1_DES3, k, keyusage, plain, cksum)
+
+ def test_des3_cf2(self):
+ # DES3 cf2
+ kb = h('E58F9EB643862C13AD38E529313462A7F73E62834FE54A01')
+ k1 = string_to_key(Enctype.DES3, b'key1', b'key1')
+ k2 = string_to_key(Enctype.DES3, b'key2', b'key2')
+ k = cf2(k1, k2, b'a', b'b')
+ self.assertEqual(k.contents, kb)
+
+ def test_rc4_crypt(self):
+ # RC4 encrypt and decrypt
+ kb = h('68F263DB3FCE15D031C9EAB02D67107A')
+ conf = h('37245E73A45FBF72')
+ keyusage = 4
+ plain = b'30 bytes bytes bytes bytes byt'
+ ctxt = h('95F9047C3AD75891C2E9B04B16566DC8B6EB9CE4231AFB2542EF87A7B5A0'
+ 'F260A99F0460508DE0CECC632D07C354124E46C5D2234EB8')
+ k = Key(Enctype.RC4, kb)
+ self.assertEqual(encrypt(k, keyusage, plain, conf), ctxt)
+ self.assertEqual(decrypt(k, keyusage, ctxt), plain)
+
+ def test_rc4_string_to_key(self):
+ # RC4 string-to-key
+ string = b'foo'
+ kb = h('AC8E657F83DF82BEEA5D43BDAF7800CC')
+ k = string_to_key(Enctype.RC4, string, None)
+ self.assertEqual(k.contents, kb)
+
+ def test_rc4_checksum(self):
+ # RC4 checksum
+ kb = h('F7D3A155AF5E238A0B7A871A96BA2AB2')
+ keyusage = 6
+ plain = b'seventeen eighteen nineteen twenty'
+ cksum = h('EB38CC97E2230F59DA4117DC5859D7EC')
+ k = Key(Enctype.RC4, kb)
+ verify_checksum(Cksumtype.HMAC_MD5, k, keyusage, plain, cksum)
+
+ def test_rc4_cf2(self):
+ # RC4 cf2
+ kb = h('24D7F6B6BAE4E5C00D2082C5EBAB3672')
+ k1 = string_to_key(Enctype.RC4, b'key1', b'key1')
+ k2 = string_to_key(Enctype.RC4, b'key2', b'key2')
+ k = cf2(k1, k2, b'a', b'b')
+ self.assertEqual(k.contents, kb)
+
+ def _test_md5_unkeyed_checksum(self, etype, usage):
+ # MD5 unkeyed checksum
+ pw = b'pwd'
+ salt = b'bytes'
+ key = string_to_key(etype, pw, salt)
+ plain = b'seventeen eighteen nineteen twenty'
+ cksum = h('9d9588cdef3a8cefc9d2c208d978f60c')
+ verify_checksum(Cksumtype.MD5, key, usage, plain, cksum)
+
+ def test_md5_unkeyed_checksum_des3_usage_40(self):
+ return self._test_md5_unkeyed_checksum(Enctype.DES3, 40)
+
+ def test_md5_unkeyed_checksum_des3_usage_50(self):
+ return self._test_md5_unkeyed_checksum(Enctype.DES3, 50)
+
+ def test_md5_unkeyed_checksum_rc4_usage_40(self):
+ return self._test_md5_unkeyed_checksum(Enctype.RC4, 40)
+
+ def test_md5_unkeyed_checksum_rc4_usage_50(self):
+ return self._test_md5_unkeyed_checksum(Enctype.RC4, 50)
+
+ def test_md5_unkeyed_checksum_aes128_usage_40(self):
+ return self._test_md5_unkeyed_checksum(Enctype.AES128, 40)
+
+ def test_md5_unkeyed_checksum_aes128_usage_50(self):
+ return self._test_md5_unkeyed_checksum(Enctype.AES128, 50)
+
+ def test_md5_unkeyed_checksum_aes256_usage_40(self):
+ return self._test_md5_unkeyed_checksum(Enctype.AES256, 40)
+
+ def test_md5_unkeyed_checksum_aes256_usage_50(self):
+ return self._test_md5_unkeyed_checksum(Enctype.AES256, 50)
+
+ def _test_sha1_unkeyed_checksum(self, etype, usage):
+ # SHA1 unkeyed checksum
+ pw = b'password'
+ salt = b'salt'
+ key = string_to_key(etype, pw, salt)
+ plain = b'twenty nineteen eighteen seventeen'
+ cksum = h('381c870d8875d1913555de19af5c885fd27b7da9')
+ verify_checksum(Cksumtype.SHA1, key, usage, plain, cksum)
+
+ def test_sha1_unkeyed_checksum_des3_usage_40(self):
+ return self._test_sha1_unkeyed_checksum(Enctype.DES3, 40)
+
+ def test_sha1_unkeyed_checksum_des3_usage_50(self):
+ return self._test_sha1_unkeyed_checksum(Enctype.DES3, 50)
+
+ def test_sha1_unkeyed_checksum_rc4_usage_40(self):
+ return self._test_sha1_unkeyed_checksum(Enctype.RC4, 40)
+
+ def test_sha1_unkeyed_checksum_rc4_usage_50(self):
+ return self._test_sha1_unkeyed_checksum(Enctype.RC4, 50)
+
+ def test_sha1_unkeyed_checksum_aes128_usage_40(self):
+ return self._test_sha1_unkeyed_checksum(Enctype.AES128, 40)
+
+ def test_sha1_unkeyed_checksum_aes128_usage_50(self):
+ return self._test_sha1_unkeyed_checksum(Enctype.AES128, 50)
+
+ def test_sha1_unkeyed_checksum_aes256_usage_40(self):
+ return self._test_sha1_unkeyed_checksum(Enctype.AES256, 40)
+
+ def test_sha1_unkeyed_checksum_aes256_usage_50(self):
+ return self._test_sha1_unkeyed_checksum(Enctype.AES256, 50)
+
+ def _test_crc32_unkeyed_checksum(self, etype, usage):
+ # CRC32 unkeyed checksum
+ pw = b'password'
+ salt = b'salt'
+ key = string_to_key(etype, pw, salt)
+ plain = b'africa america asia australia europe'
+ cksum = h('ce595a53')
+ verify_checksum(Cksumtype.CRC32, key, usage, plain, cksum)
+
+ def test_crc32_unkeyed_checksum_des3_usage_40(self):
+ return self._test_crc32_unkeyed_checksum(Enctype.DES3, 40)
+
+ def test_crc32_unkeyed_checksum_des3_usage_50(self):
+ return self._test_crc32_unkeyed_checksum(Enctype.DES3, 50)
+
+ def test_crc32_unkeyed_checksum_rc4_usage_40(self):
+ return self._test_crc32_unkeyed_checksum(Enctype.RC4, 40)
+
+ def test_crc32_unkeyed_checksum_rc4_usage_50(self):
+ return self._test_crc32_unkeyed_checksum(Enctype.RC4, 50)
+
+ def test_crc32_unkeyed_checksum_aes128_usage_40(self):
+ return self._test_crc32_unkeyed_checksum(Enctype.AES128, 40)
+
+ def test_crc32_unkeyed_checksum_aes128_usage_50(self):
+ return self._test_crc32_unkeyed_checksum(Enctype.AES128, 50)
+
+ def test_crc32_unkeyed_checksum_aes256_usage_40(self):
+ return self._test_crc32_unkeyed_checksum(Enctype.AES256, 40)
+
+ def test_crc32_unkeyed_checksum_aes256_usage_50(self):
+ return self._test_crc32_unkeyed_checksum(Enctype.AES256, 50)
+
+
+if __name__ == "__main__":
+ import unittest
+ unittest.main()
diff --git a/python/samba/tests/krb5/kdc_base_test.py b/python/samba/tests/krb5/kdc_base_test.py
new file mode 100644
index 0000000..373c73e
--- /dev/null
+++ b/python/samba/tests/krb5/kdc_base_test.py
@@ -0,0 +1,3755 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Stefan Metzmacher 2020
+# Copyright (C) 2020-2021 Catalyst.Net Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+import os
+
+sys.path.insert(0, "bin/python")
+os.environ["PYTHONUNBUFFERED"] = "1"
+
+import binascii
+import collections
+from datetime import datetime, timezone
+from enum import Enum
+from functools import partial
+import numbers
+import secrets
+import tempfile
+
+from collections import namedtuple
+import ldb
+from ldb import SCOPE_BASE
+from samba import (
+ NTSTATUSError,
+ arcfour_encrypt,
+ common,
+ generate_random_password,
+ ntstatus,
+)
+from samba.auth import system_session
+from samba.credentials import (
+ Credentials,
+ DONT_USE_KERBEROS,
+ MUST_USE_KERBEROS,
+ SPECIFIED,
+)
+from samba.crypto import des_crypt_blob_16, md4_hash_blob
+from samba.dcerpc import (
+ claims,
+ drsblobs,
+ drsuapi,
+ krb5ccache,
+ krb5pac,
+ lsa,
+ misc,
+ netlogon,
+ ntlmssp,
+ samr,
+ security,
+)
+from samba.drs_utils import drs_Replicate, drsuapi_connect
+from samba.dsdb import (
+ DSDB_SYNTAX_BINARY_DN,
+ DS_DOMAIN_FUNCTION_2000,
+ DS_DOMAIN_FUNCTION_2008,
+ DS_GUID_COMPUTERS_CONTAINER,
+ DS_GUID_DOMAIN_CONTROLLERS_CONTAINER,
+ DS_GUID_MANAGED_SERVICE_ACCOUNTS_CONTAINER,
+ DS_GUID_USERS_CONTAINER,
+ GTYPE_SECURITY_DOMAIN_LOCAL_GROUP,
+ GTYPE_SECURITY_GLOBAL_GROUP,
+ GTYPE_SECURITY_UNIVERSAL_GROUP,
+ UF_NORMAL_ACCOUNT,
+ UF_NOT_DELEGATED,
+ UF_NO_AUTH_DATA_REQUIRED,
+ UF_PARTIAL_SECRETS_ACCOUNT,
+ UF_SERVER_TRUST_ACCOUNT,
+ UF_TRUSTED_TO_AUTHENTICATE_FOR_DELEGATION,
+ UF_WORKSTATION_TRUST_ACCOUNT,
+)
+from samba.dcerpc.misc import (
+ SEC_CHAN_BDC,
+ SEC_CHAN_NULL,
+ SEC_CHAN_WKSTA,
+)
+from samba.join import DCJoinContext
+from samba.ndr import ndr_pack, ndr_unpack
+from samba import net
+from samba.netcmd.domain.models import AuthenticationPolicy, AuthenticationSilo
+from samba.samdb import SamDB, dsdb_Dn
+
+rc4_bit = security.KERB_ENCTYPE_RC4_HMAC_MD5
+aes256_sk_bit = security.KERB_ENCTYPE_AES256_CTS_HMAC_SHA1_96_SK
+
+from samba.tests import TestCaseInTempDir, delete_force
+import samba.tests.krb5.kcrypto as kcrypto
+from samba.tests.krb5.raw_testcase import (
+ KerberosCredentials,
+ KerberosTicketCreds,
+ RawKerberosTest,
+)
+import samba.tests.krb5.rfc4120_pyasn1 as krb5_asn1
+from samba.tests.krb5.rfc4120_constants import (
+ AD_IF_RELEVANT,
+ AD_WIN2K_PAC,
+ AES256_CTS_HMAC_SHA1_96,
+ ARCFOUR_HMAC_MD5,
+ KDC_ERR_PREAUTH_REQUIRED,
+ KDC_ERR_TGT_REVOKED,
+ KRB_AS_REP,
+ KRB_ERROR,
+ KRB_TGS_REP,
+ KU_AS_REP_ENC_PART,
+ KU_ENC_CHALLENGE_CLIENT,
+ KU_PA_ENC_TIMESTAMP,
+ KU_TICKET,
+ NT_PRINCIPAL,
+ NT_SRV_INST,
+ PADATA_ENCRYPTED_CHALLENGE,
+ PADATA_ENC_TIMESTAMP,
+ PADATA_ETYPE_INFO2,
+)
+
+global_asn1_print = False
+global_hexdump = False
+
+
+class GroupType(Enum):
+ GLOBAL = GTYPE_SECURITY_GLOBAL_GROUP
+ DOMAIN_LOCAL = GTYPE_SECURITY_DOMAIN_LOCAL_GROUP
+ UNIVERSAL = GTYPE_SECURITY_UNIVERSAL_GROUP
+
+
+# This simple class encapsulates the DN and SID of a Principal.
+class Principal:
+ __slots__ = ['dn', 'sid']
+
+ def __init__(self, dn, sid):
+ if dn is not None and not isinstance(dn, ldb.Dn):
+ raise AssertionError(f'expected {dn} to be an ldb.Dn')
+
+ self.dn = dn
+ self.sid = sid
+
+
+class KDCBaseTest(TestCaseInTempDir, RawKerberosTest):
+ """ Base class for KDC tests.
+ """
+
+ class AccountType(Enum):
+ USER = object()
+ COMPUTER = object()
+ SERVER = object()
+ RODC = object()
+ MANAGED_SERVICE = object()
+ GROUP_MANAGED_SERVICE = object()
+
+ @classmethod
+ def setUpClass(cls):
+ super().setUpClass()
+ cls._lp = None
+
+ cls._ldb = None
+ cls._rodc_ldb = None
+
+ cls._drsuapi_connection = None
+
+ cls._functional_level = None
+
+ # An identifier to ensure created accounts have unique names. Windows
+ # caches accounts based on usernames, so account names being different
+ # across test runs avoids previous test runs affecting the results.
+ cls.account_base = f'{secrets.token_hex(4)}_'
+ cls.account_id = 0
+
+ # A list containing DNs of accounts created as part of testing.
+ cls.accounts = []
+
+ cls.account_cache = {}
+ cls.policy_cache = {}
+ cls.tkt_cache = {}
+
+ cls._rodc_ctx = None
+
+ cls.ldb_cleanups = []
+
+ cls._claim_types_dn = None
+ cls._authn_policy_config_dn = None
+ cls._authn_policies_dn = None
+ cls._authn_silos_dn = None
+
+ def get_claim_types_dn(self):
+ samdb = self.get_samdb()
+
+ if self._claim_types_dn is None:
+ claim_config_dn = samdb.get_config_basedn()
+
+ claim_config_dn.add_child('CN=Claims Configuration,CN=Services')
+ details = {
+ 'dn': claim_config_dn,
+ 'objectClass': 'container',
+ }
+ try:
+ samdb.add(details)
+ except ldb.LdbError as err:
+ num, _ = err.args
+ if num != ldb.ERR_ENTRY_ALREADY_EXISTS:
+ raise
+ else:
+ self.accounts.append(str(claim_config_dn))
+
+ claim_types_dn = claim_config_dn
+ claim_types_dn.add_child('CN=Claim Types')
+ details = {
+ 'dn': claim_types_dn,
+ 'objectClass': 'msDS-ClaimTypes',
+ }
+ try:
+ samdb.add(details)
+ except ldb.LdbError as err:
+ num, _ = err.args
+ if num != ldb.ERR_ENTRY_ALREADY_EXISTS:
+ raise
+ else:
+ self.accounts.append(str(claim_types_dn))
+
+ type(self)._claim_types_dn = claim_types_dn
+
+ # Return a copy of the DN.
+ return ldb.Dn(samdb, str(self._claim_types_dn))
+
+ def get_authn_policy_config_dn(self):
+ samdb = self.get_samdb()
+
+ if self._authn_policy_config_dn is None:
+ authn_policy_config_dn = samdb.get_config_basedn()
+
+ authn_policy_config_dn.add_child(
+ 'CN=AuthN Policy Configuration,CN=Services')
+ details = {
+ 'dn': authn_policy_config_dn,
+ 'objectClass': 'container',
+ 'description': ('Contains configuration for authentication '
+ 'policy'),
+ }
+ try:
+ samdb.add(details)
+ except ldb.LdbError as err:
+ num, _ = err.args
+ if num != ldb.ERR_ENTRY_ALREADY_EXISTS:
+ raise
+ else:
+ self.accounts.append(str(authn_policy_config_dn))
+
+ type(self)._authn_policy_config_dn = authn_policy_config_dn
+
+ # Return a copy of the DN.
+ return ldb.Dn(samdb, str(self._authn_policy_config_dn))
+
+ def get_authn_policies_dn(self):
+ samdb = self.get_samdb()
+
+ if self._authn_policies_dn is None:
+ authn_policies_dn = self.get_authn_policy_config_dn()
+ authn_policies_dn.add_child('CN=AuthN Policies')
+ details = {
+ 'dn': authn_policies_dn,
+ 'objectClass': 'msDS-AuthNPolicies',
+ 'description': 'Contains authentication policy objects',
+ }
+ try:
+ samdb.add(details)
+ except ldb.LdbError as err:
+ num, _ = err.args
+ if num != ldb.ERR_ENTRY_ALREADY_EXISTS:
+ raise
+ else:
+ self.accounts.append(str(authn_policies_dn))
+
+ type(self)._authn_policies_dn = authn_policies_dn
+
+ # Return a copy of the DN.
+ return ldb.Dn(samdb, str(self._authn_policies_dn))
+
+ def get_authn_silos_dn(self):
+ samdb = self.get_samdb()
+
+ if self._authn_silos_dn is None:
+ authn_silos_dn = self.get_authn_policy_config_dn()
+ authn_silos_dn.add_child('CN=AuthN Silos')
+ details = {
+ 'dn': authn_silos_dn,
+ 'objectClass': 'msDS-AuthNPolicySilos',
+ 'description': 'Contains authentication policy silo objects',
+ }
+ try:
+ samdb.add(details)
+ except ldb.LdbError as err:
+ num, _ = err.args
+ if num != ldb.ERR_ENTRY_ALREADY_EXISTS:
+ raise
+ else:
+ self.accounts.append(str(authn_silos_dn))
+
+ type(self)._authn_silos_dn = authn_silos_dn
+
+ # Return a copy of the DN.
+ return ldb.Dn(samdb, str(self._authn_silos_dn))
+
+ @staticmethod
+ def freeze(m):
+ return frozenset((k, v) for k, v in m.items())
+
+ def tearDown(self):
+ # Run any cleanups that may modify accounts prior to deleting those
+ # accounts.
+ self.doCleanups()
+
+ # Clean up any accounts created for single tests.
+ if self._ldb is not None:
+ for dn in reversed(self.test_accounts):
+ delete_force(self._ldb, dn)
+
+ super().tearDown()
+
+ @classmethod
+ def tearDownClass(cls):
+ # Clean up any accounts created by create_account. This is
+ # done in tearDownClass() rather than tearDown(), so that
+ # accounts need only be created once for permutation tests.
+ if cls._ldb is not None:
+ for cleanup in reversed(cls.ldb_cleanups):
+ try:
+ cls._ldb.modify(cleanup)
+ except ldb.LdbError:
+ pass
+
+ for dn in reversed(cls.accounts):
+ delete_force(cls._ldb, dn)
+
+ if cls._rodc_ctx is not None:
+ cls._rodc_ctx.cleanup_old_join(force=True)
+
+ super().tearDownClass()
+
+ def setUp(self):
+ super().setUp()
+ self.do_asn1_print = global_asn1_print
+ self.do_hexdump = global_hexdump
+
+ # A list containing DNs of accounts that should be removed when the
+ # current test finishes.
+ self.test_accounts = []
+
+ def get_lp(self):
+ if self._lp is None:
+ type(self)._lp = self.get_loadparm()
+
+ return self._lp
+
+ def get_samdb(self):
+ if self._ldb is None:
+ creds = self.get_admin_creds()
+ lp = self.get_lp()
+
+ session = system_session()
+ type(self)._ldb = SamDB(url="ldap://%s" % self.dc_host,
+ session_info=session,
+ credentials=creds,
+ lp=lp)
+
+ return self._ldb
+
+ def get_rodc_samdb(self):
+ if self._rodc_ldb is None:
+ creds = self.get_admin_creds()
+ lp = self.get_lp()
+
+ session = system_session()
+ type(self)._rodc_ldb = SamDB(url="ldap://%s" % self.host,
+ session_info=session,
+ credentials=creds,
+ lp=lp,
+ am_rodc=True)
+
+ return self._rodc_ldb
+
+ def get_drsuapi_connection(self):
+ if self._drsuapi_connection is None:
+ admin_creds = self.get_admin_creds()
+ samdb = self.get_samdb()
+ dns_hostname = samdb.host_dns_name()
+ type(self)._drsuapi_connection = drsuapi_connect(dns_hostname,
+ self.get_lp(),
+ admin_creds,
+ ip=self.dc_host)
+
+ return self._drsuapi_connection
+
+ def get_server_dn(self, samdb):
+ server = samdb.get_serverName()
+
+ res = samdb.search(base=server,
+ scope=ldb.SCOPE_BASE,
+ attrs=['serverReference'])
+ dn = ldb.Dn(samdb, res[0]['serverReference'][0].decode('utf8'))
+
+ return dn
+
+ def get_mock_rodc_ctx(self):
+ if self._rodc_ctx is None:
+ admin_creds = self.get_admin_creds()
+ lp = self.get_lp()
+
+ rodc_name = self.get_new_username()
+ site_name = 'Default-First-Site-Name'
+
+ rodc_ctx = DCJoinContext(server=self.dc_host,
+ creds=admin_creds,
+ lp=lp,
+ site=site_name,
+ netbios_name=rodc_name,
+ targetdir=None,
+ domain=None)
+ self.create_rodc(rodc_ctx)
+
+ type(self)._rodc_ctx = rodc_ctx
+
+ return self._rodc_ctx
+
+ def get_domain_functional_level(self, ldb=None):
+ if self._functional_level is None:
+ if ldb is None:
+ ldb = self.get_samdb()
+
+ res = ldb.search(base='',
+ scope=SCOPE_BASE,
+ attrs=['domainFunctionality'])
+ try:
+ functional_level = int(res[0]['domainFunctionality'][0])
+ except KeyError:
+ functional_level = DS_DOMAIN_FUNCTION_2000
+
+ type(self)._functional_level = functional_level
+
+ return self._functional_level
+
+ def get_default_enctypes(self, creds):
+ self.assertIsNotNone(creds, 'expected client creds to be passed in')
+
+ functional_level = self.get_domain_functional_level()
+
+ default_enctypes = []
+
+ if functional_level >= DS_DOMAIN_FUNCTION_2008:
+ # AES is only supported at functional level 2008 or higher
+ default_enctypes.append(kcrypto.Enctype.AES256)
+ default_enctypes.append(kcrypto.Enctype.AES128)
+
+ if self.expect_nt_hash or creds.get_workstation():
+ default_enctypes.append(kcrypto.Enctype.RC4)
+
+ return default_enctypes
+
+ def create_group(self, samdb, name, ou=None, gtype=None):
+ if ou is None:
+ ou = samdb.get_wellknown_dn(samdb.get_default_basedn(),
+ DS_GUID_USERS_CONTAINER)
+
+ dn = f'CN={name},{ou}'
+
+ # Remove the group if it exists; this will happen if a previous test
+ # run failed.
+ delete_force(samdb, dn)
+
+ # Save the group name so it can be deleted in tearDownClass.
+ self.accounts.append(dn)
+
+ details = {
+ 'dn': dn,
+ 'objectClass': 'group'
+ }
+ if gtype is not None:
+ details['groupType'] = common.normalise_int32(gtype)
+ samdb.add(details)
+
+ return dn
+
+ def get_dn_from_attribute(self, attribute):
+ return self.get_from_attribute(attribute).dn
+
+ def get_dn_from_class(self, attribute):
+ return self.get_from_class(attribute).dn
+
+ def get_schema_id_guid_from_attribute(self, attribute):
+ guid = self.get_from_attribute(attribute).get('schemaIDGUID', idx=0)
+ return misc.GUID(guid)
+
+ def get_from_attribute(self, attribute):
+ return self.get_from_schema(attribute, 'attributeSchema')
+
+ def get_from_class(self, attribute):
+ return self.get_from_schema(attribute, 'classSchema')
+
+ def get_from_schema(self, name, object_class):
+ samdb = self.get_samdb()
+ schema_dn = samdb.get_schema_basedn()
+
+ res = samdb.search(base=schema_dn,
+ scope=ldb.SCOPE_ONELEVEL,
+ attrs=['schemaIDGUID'],
+ expression=(f'(&(objectClass={object_class})'
+ f'(lDAPDisplayName={name}))'))
+ self.assertEqual(1, len(res),
+ f'could not locate {name} in {object_class}')
+
+ return res[0]
+
+ def create_authn_silo(self, *,
+ members=None,
+ user_policy=None,
+ computer_policy=None,
+ service_policy=None,
+ enforced=None):
+ samdb = self.get_samdb()
+
+ silo_id = self.get_new_username()
+
+ authn_silo_dn = self.get_authn_silos_dn()
+ authn_silo_dn.add_child(f'CN={silo_id}')
+
+ details = {
+ 'dn': authn_silo_dn,
+ 'objectClass': 'msDS-AuthNPolicySilo',
+ }
+
+ if enforced is True:
+ enforced = 'TRUE'
+ elif enforced is False:
+ enforced = 'FALSE'
+
+ if members is not None:
+ details['msDS-AuthNPolicySiloMembers'] = members
+ if user_policy is not None:
+ details['msDS-UserAuthNPolicy'] = str(user_policy.dn)
+ if computer_policy is not None:
+ details['msDS-ComputerAuthNPolicy'] = str(computer_policy.dn)
+ if service_policy is not None:
+ details['msDS-ServiceAuthNPolicy'] = str(service_policy.dn)
+ if enforced is not None:
+ details['msDS-AuthNPolicySiloEnforced'] = enforced
+
+ # Save the silo DN so it can be deleted in tearDownClass().
+ self.accounts.append(str(authn_silo_dn))
+
+ # Remove the silo if it exists; this will happen if a previous test run
+ # failed.
+ delete_force(samdb, authn_silo_dn)
+
+ samdb.add(details)
+
+ return AuthenticationSilo.get(samdb, dn=authn_silo_dn)
+
+ def create_authn_silo_claim_id(self):
+ claim_id = 'ad://ext/AuthenticationSilo'
+
+ for_classes = [
+ 'msDS-GroupManagedServiceAccount',
+ 'user',
+ 'msDS-ManagedServiceAccount',
+ 'computer',
+ ]
+
+ self.create_claim(claim_id,
+ enabled=True,
+ single_valued=True,
+ value_space_restricted=False,
+ source_type='Constructed',
+ for_classes=for_classes,
+ value_type=claims.CLAIM_TYPE_STRING,
+ # It's OK if the claim type already exists.
+ force=False)
+
+ return claim_id
+
+ def create_authn_policy(self, *,
+ use_cache=True,
+ **kwargs):
+
+ if use_cache:
+ cache_key = self.freeze(kwargs)
+
+ authn_policy = self.policy_cache.get(cache_key)
+ if authn_policy is not None:
+ return authn_policy
+
+ authn_policy = self.create_authn_policy_opts(**kwargs)
+ if use_cache:
+ self.policy_cache[cache_key] = authn_policy
+
+ return authn_policy
+
+ def create_authn_policy_opts(self, *,
+ enforced=None,
+ strong_ntlm_policy=None,
+ user_allowed_from=None,
+ user_allowed_ntlm=None,
+ user_allowed_to=None,
+ user_tgt_lifetime=None,
+ computer_allowed_to=None,
+ computer_tgt_lifetime=None,
+ service_allowed_from=None,
+ service_allowed_ntlm=None,
+ service_allowed_to=None,
+ service_tgt_lifetime=None):
+ samdb = self.get_samdb()
+
+ policy_id = self.get_new_username()
+
+ policy_dn = self.get_authn_policies_dn()
+ policy_dn.add_child(f'CN={policy_id}')
+
+ details = {
+ 'dn': policy_dn,
+ 'objectClass': 'msDS-AuthNPolicy',
+ }
+
+ _domain_sid = None
+
+ def sd_from_sddl(sddl):
+ nonlocal _domain_sid
+ if _domain_sid is None:
+ _domain_sid = security.dom_sid(samdb.get_domain_sid())
+
+ return ndr_pack(security.descriptor.from_sddl(sddl, _domain_sid))
+
+ if enforced is True:
+ enforced = 'TRUE'
+ elif enforced is False:
+ enforced = 'FALSE'
+
+ if user_allowed_ntlm is True:
+ user_allowed_ntlm = 'TRUE'
+ elif user_allowed_ntlm is False:
+ user_allowed_ntlm = 'FALSE'
+
+ if service_allowed_ntlm is True:
+ service_allowed_ntlm = 'TRUE'
+ elif service_allowed_ntlm is False:
+ service_allowed_ntlm = 'FALSE'
+
+ if enforced is not None:
+ details['msDS-AuthNPolicyEnforced'] = enforced
+ if strong_ntlm_policy is not None:
+ details['msDS-StrongNTLMPolicy'] = strong_ntlm_policy
+
+ if user_allowed_from is not None:
+ details['msDS-UserAllowedToAuthenticateFrom'] = sd_from_sddl(
+ user_allowed_from)
+ if user_allowed_ntlm is not None:
+ details['msDS-UserAllowedNTLMNetworkAuthentication'] = (
+ user_allowed_ntlm)
+ if user_allowed_to is not None:
+ details['msDS-UserAllowedToAuthenticateTo'] = sd_from_sddl(
+ user_allowed_to)
+ if user_tgt_lifetime is not None:
+ if isinstance(user_tgt_lifetime, numbers.Number):
+ user_tgt_lifetime = str(int(user_tgt_lifetime * 10_000_000))
+ details['msDS-UserTGTLifetime'] = user_tgt_lifetime
+
+ if computer_allowed_to is not None:
+ details['msDS-ComputerAllowedToAuthenticateTo'] = sd_from_sddl(
+ computer_allowed_to)
+ if computer_tgt_lifetime is not None:
+ if isinstance(computer_tgt_lifetime, numbers.Number):
+ computer_tgt_lifetime = str(
+ int(computer_tgt_lifetime * 10_000_000))
+ details['msDS-ComputerTGTLifetime'] = computer_tgt_lifetime
+
+ if service_allowed_from is not None:
+ details['msDS-ServiceAllowedToAuthenticateFrom'] = sd_from_sddl(
+ service_allowed_from)
+ if service_allowed_ntlm is not None:
+ details['msDS-ServiceAllowedNTLMNetworkAuthentication'] = (
+ service_allowed_ntlm)
+ if service_allowed_to is not None:
+ details['msDS-ServiceAllowedToAuthenticateTo'] = sd_from_sddl(
+ service_allowed_to)
+ if service_tgt_lifetime is not None:
+ if isinstance(service_tgt_lifetime, numbers.Number):
+ service_tgt_lifetime = str(
+ int(service_tgt_lifetime * 10_000_000))
+ details['msDS-ServiceTGTLifetime'] = service_tgt_lifetime
+
+ # Save the policy DN so it can be deleted in tearDownClass().
+ self.accounts.append(str(policy_dn))
+
+ # Remove the policy if it exists; this will happen if a previous test
+ # run failed.
+ delete_force(samdb, policy_dn)
+
+ samdb.add(details)
+
+ return AuthenticationPolicy.get(samdb, dn=policy_dn)
+
+ def create_claim(self,
+ claim_id,
+ enabled=None,
+ attribute=None,
+ single_valued=None,
+ value_space_restricted=None,
+ source=None,
+ source_type=None,
+ for_classes=None,
+ value_type=None,
+ force=True):
+ samdb = self.get_samdb()
+
+ claim_dn = self.get_claim_types_dn()
+ claim_dn.add_child(f'CN={claim_id}')
+
+ details = {
+ 'dn': claim_dn,
+ 'objectClass': 'msDS-ClaimType',
+ }
+
+ if enabled is True:
+ enabled = 'TRUE'
+ elif enabled is False:
+ enabled = 'FALSE'
+
+ if attribute is not None:
+ attribute = str(self.get_dn_from_attribute(attribute))
+
+ if single_valued is True:
+ single_valued = 'TRUE'
+ elif single_valued is False:
+ single_valued = 'FALSE'
+
+ if value_space_restricted is True:
+ value_space_restricted = 'TRUE'
+ elif value_space_restricted is False:
+ value_space_restricted = 'FALSE'
+
+ if for_classes is not None:
+ for_classes = [str(self.get_dn_from_class(name))
+ for name in for_classes]
+
+ if isinstance(value_type, int):
+ value_type = str(value_type)
+
+ if enabled is not None:
+ details['Enabled'] = enabled
+ if attribute is not None:
+ details['msDS-ClaimAttributeSource'] = attribute
+ if single_valued is not None:
+ details['msDS-ClaimIsSingleValued'] = single_valued
+ if value_space_restricted is not None:
+ details['msDS-ClaimIsValueSpaceRestricted'] = (
+ value_space_restricted)
+ if source is not None:
+ details['msDS-ClaimSource'] = source
+ if source_type is not None:
+ details['msDS-ClaimSourceType'] = source_type
+ if for_classes is not None:
+ details['msDS-ClaimTypeAppliesToClass'] = for_classes
+ if value_type is not None:
+ details['msDS-ClaimValueType'] = value_type
+
+ if force:
+ # Remove the claim if it exists; this will happen if a previous
+ # test run failed
+ delete_force(samdb, claim_dn)
+
+ try:
+ samdb.add(details)
+ except ldb.LdbError as err:
+ num, estr = err.args
+ if num != ldb.ERR_ENTRY_ALREADY_EXISTS:
+ raise
+ self.assertFalse(force, 'should not fail with force=True')
+ else:
+ # Save the claim DN so it can be deleted in tearDownClass()
+ self.accounts.append(str(claim_dn))
+
+ def create_account(self, samdb, name, account_type=AccountType.USER,
+ spn=None, upn=None, additional_details=None,
+ ou=None, account_control=0, add_dollar=None,
+ expired_password=False, force_nt4_hash=False,
+ preserve=True):
+ """Create an account for testing.
+ The dn of the created account is added to self.accounts,
+ which is used by tearDownClass to clean up the created accounts.
+ """
+ if add_dollar is None and account_type is not self.AccountType.USER:
+ add_dollar = True
+
+ if ou is None:
+ if account_type is self.AccountType.COMPUTER:
+ guid = DS_GUID_COMPUTERS_CONTAINER
+ elif account_type is self.AccountType.MANAGED_SERVICE or (
+ account_type is self.AccountType.GROUP_MANAGED_SERVICE):
+ guid = DS_GUID_MANAGED_SERVICE_ACCOUNTS_CONTAINER
+ elif account_type is self.AccountType.SERVER:
+ guid = DS_GUID_DOMAIN_CONTROLLERS_CONTAINER
+ else:
+ guid = DS_GUID_USERS_CONTAINER
+
+ ou = samdb.get_wellknown_dn(samdb.get_default_basedn(), guid)
+
+ dn = "CN=%s,%s" % (name, ou)
+
+ # remove the account if it exists, this will happen if a previous test
+ # run failed
+ delete_force(samdb, dn)
+ account_name = name
+ if add_dollar:
+ account_name += '$'
+ secure_schannel_type = SEC_CHAN_NULL
+ if account_type is self.AccountType.USER:
+ object_class = "user"
+ account_control |= UF_NORMAL_ACCOUNT
+ elif account_type is self.AccountType.MANAGED_SERVICE:
+ object_class = "msDS-ManagedServiceAccount"
+ account_control |= UF_WORKSTATION_TRUST_ACCOUNT
+ secure_schannel_type = SEC_CHAN_WKSTA
+ elif account_type is self.AccountType.GROUP_MANAGED_SERVICE:
+ object_class = "msDS-GroupManagedServiceAccount"
+ account_control |= UF_WORKSTATION_TRUST_ACCOUNT
+ secure_schannel_type = SEC_CHAN_WKSTA
+ else:
+ object_class = "computer"
+ if account_type is self.AccountType.COMPUTER:
+ account_control |= UF_WORKSTATION_TRUST_ACCOUNT
+ secure_schannel_type = SEC_CHAN_WKSTA
+ elif account_type is self.AccountType.SERVER:
+ account_control |= UF_SERVER_TRUST_ACCOUNT
+ secure_schannel_type = SEC_CHAN_BDC
+ else:
+ self.fail()
+
+ details = {
+ "dn": dn,
+ "objectClass": object_class,
+ "sAMAccountName": account_name,
+ "userAccountControl": str(account_control),
+ }
+
+ if account_type is self.AccountType.GROUP_MANAGED_SERVICE:
+ password = None
+ else:
+ password = generate_random_password(32, 32)
+ utf16pw = ('"%s"' % password).encode('utf-16-le')
+
+ details['unicodePwd'] = utf16pw
+
+ if upn is not None:
+ upn = upn.format(account=account_name)
+ if spn is not None:
+ if isinstance(spn, str):
+ spn = spn.format(account=account_name)
+ else:
+ spn = tuple(s.format(account=account_name) for s in spn)
+ details["servicePrincipalName"] = spn
+ if upn is not None:
+ details["userPrincipalName"] = upn
+ if expired_password:
+ details["pwdLastSet"] = "0"
+ if additional_details is not None:
+ details.update(additional_details)
+ if preserve:
+ # Mark this account for deletion in tearDownClass() after all the
+ # tests in this class finish.
+ self.accounts.append(dn)
+ else:
+ # Mark this account for deletion in tearDown() after the current
+ # test finishes. Because the time complexity of deleting an account
+ # in Samba scales with the number of accounts, it is faster to
+ # delete accounts as soon as possible than to keep them around
+ # until all the tests are finished.
+ self.test_accounts.append(dn)
+ samdb.add(details)
+
+ expected_kvno = 1
+
+ if force_nt4_hash:
+ admin_creds = self.get_admin_creds()
+ lp = self.get_lp()
+ net_ctx = net.Net(admin_creds, lp, server=self.dc_host)
+ domain = samdb.domain_netbios_name().upper()
+
+ password = generate_random_password(32, 32)
+ utf16pw = ('"%s"' % password).encode('utf-16-le')
+
+ try:
+ net_ctx.set_password(newpassword=password,
+ account_name=account_name,
+ domain_name=domain,
+ force_samr_18=True)
+ expected_kvno += 1
+ except Exception as e:
+ self.fail(e)
+
+ creds = KerberosCredentials()
+ creds.guess(self.get_lp())
+ creds.set_realm(samdb.domain_dns_name().upper())
+ creds.set_domain(samdb.domain_netbios_name().upper())
+ if password is not None:
+ creds.set_password(password)
+ creds.set_username(account_name)
+ if account_type is self.AccountType.USER:
+ creds.set_workstation('')
+ else:
+ creds.set_workstation(name)
+ creds.set_secure_channel_type(secure_schannel_type)
+ creds.set_dn(ldb.Dn(samdb, dn))
+ creds.set_upn(upn)
+ creds.set_spn(spn)
+ creds.set_type(account_type)
+
+ self.creds_set_enctypes(creds)
+
+ res = samdb.search(base=dn,
+ scope=ldb.SCOPE_BASE,
+ attrs=['msDS-KeyVersionNumber',
+ 'objectSid'])
+
+ kvno = res[0].get('msDS-KeyVersionNumber', idx=0)
+ if kvno is not None:
+ self.assertEqual(int(kvno), expected_kvno)
+ creds.set_kvno(expected_kvno)
+
+ sid = res[0].get('objectSid', idx=0)
+ sid = samdb.schema_format_value('objectSID', sid)
+ sid = sid.decode('utf-8')
+ creds.set_sid(sid)
+
+ return (creds, dn)
+
+ def get_security_descriptor(self, dn):
+ samdb = self.get_samdb()
+
+ sid = self.get_objectSid(samdb, dn)
+
+ owner_sid = security.dom_sid(security.SID_BUILTIN_ADMINISTRATORS)
+
+ ace = security.ace()
+ ace.access_mask = security.SEC_ADS_CONTROL_ACCESS
+
+ ace.trustee = security.dom_sid(sid)
+
+ dacl = security.acl()
+ dacl.revision = security.SECURITY_ACL_REVISION_ADS
+ dacl.aces = [ace]
+ dacl.num_aces = 1
+
+ security_desc = security.descriptor()
+ security_desc.type |= security.SEC_DESC_DACL_PRESENT
+ security_desc.owner_sid = owner_sid
+ security_desc.dacl = dacl
+
+ return ndr_pack(security_desc)
+
+ def create_rodc(self, ctx):
+ ctx.nc_list = [ctx.base_dn, ctx.config_dn, ctx.schema_dn]
+ ctx.full_nc_list = [ctx.base_dn, ctx.config_dn, ctx.schema_dn]
+ ctx.krbtgt_dn = f'CN=krbtgt_{ctx.myname},CN=Users,{ctx.base_dn}'
+
+ ctx.never_reveal_sid = [f'<SID={ctx.domsid}-{security.DOMAIN_RID_RODC_DENY}>',
+ f'<SID={security.SID_BUILTIN_ADMINISTRATORS}>',
+ f'<SID={security.SID_BUILTIN_SERVER_OPERATORS}>',
+ f'<SID={security.SID_BUILTIN_BACKUP_OPERATORS}>',
+ f'<SID={security.SID_BUILTIN_ACCOUNT_OPERATORS}>']
+ ctx.reveal_sid = f'<SID={ctx.domsid}-{security.DOMAIN_RID_RODC_ALLOW}>'
+
+ mysid = ctx.get_mysid()
+ admin_dn = f'<SID={mysid}>'
+ ctx.managedby = admin_dn
+
+ ctx.userAccountControl = (UF_WORKSTATION_TRUST_ACCOUNT |
+ UF_TRUSTED_TO_AUTHENTICATE_FOR_DELEGATION |
+ UF_PARTIAL_SECRETS_ACCOUNT)
+
+ ctx.connection_dn = f'CN=RODC Connection (FRS),{ctx.ntds_dn}'
+ ctx.secure_channel_type = misc.SEC_CHAN_RODC
+ ctx.RODC = True
+ ctx.replica_flags = (drsuapi.DRSUAPI_DRS_INIT_SYNC |
+ drsuapi.DRSUAPI_DRS_PER_SYNC |
+ drsuapi.DRSUAPI_DRS_GET_ANC |
+ drsuapi.DRSUAPI_DRS_NEVER_SYNCED |
+ drsuapi.DRSUAPI_DRS_SPECIAL_SECRET_PROCESSING)
+ ctx.domain_replica_flags = ctx.replica_flags | drsuapi.DRSUAPI_DRS_CRITICAL_ONLY
+
+ ctx.build_nc_lists()
+
+ ctx.cleanup_old_join()
+
+ try:
+ ctx.join_add_objects()
+ except Exception:
+ # cleanup the failed join (checking we still have a live LDB
+ # connection to the remote DC first)
+ ctx.refresh_ldb_connection()
+ ctx.cleanup_old_join()
+ raise
+
+ def replicate_account_to_rodc(self, dn):
+ samdb = self.get_samdb()
+ rodc_samdb = self.get_rodc_samdb()
+
+ repl_val = f'{samdb.get_dsServiceName()}:{dn}:SECRETS_ONLY'
+
+ msg = ldb.Message()
+ msg.dn = ldb.Dn(rodc_samdb, '')
+ msg['replicateSingleObject'] = ldb.MessageElement(
+ repl_val,
+ ldb.FLAG_MOD_REPLACE,
+ 'replicateSingleObject')
+
+ try:
+ # Try replication using the replicateSingleObject rootDSE
+ # operation.
+ rodc_samdb.modify(msg)
+ except ldb.LdbError as err:
+ enum, estr = err.args
+ self.assertEqual(enum, ldb.ERR_UNWILLING_TO_PERFORM)
+ self.assertIn('rootdse_modify: unknown attribute to change!',
+ estr)
+
+ # If that method wasn't supported, we may be in the rodc:local test
+ # environment, where we can try replicating to the local database.
+
+ lp = self.get_lp()
+
+ rodc_creds = Credentials()
+ rodc_creds.guess(lp)
+ rodc_creds.set_machine_account(lp)
+
+ local_samdb = SamDB(url=None, session_info=system_session(),
+ credentials=rodc_creds, lp=lp)
+
+ destination_dsa_guid = misc.GUID(local_samdb.get_ntds_GUID())
+
+ repl = drs_Replicate(f'ncacn_ip_tcp:{self.dc_host}[seal]',
+ lp, rodc_creds,
+ local_samdb, destination_dsa_guid)
+
+ source_dsa_invocation_id = misc.GUID(samdb.invocation_id)
+
+ repl.replicate(dn,
+ source_dsa_invocation_id,
+ destination_dsa_guid,
+ exop=drsuapi.DRSUAPI_EXOP_REPL_SECRET,
+ rodc=True)
+
+ def reveal_account_to_mock_rodc(self, dn):
+ samdb = self.get_samdb()
+ rodc_ctx = self.get_mock_rodc_ctx()
+
+ self.get_secrets(
+ dn,
+ destination_dsa_guid=rodc_ctx.ntds_guid,
+ source_dsa_invocation_id=misc.GUID(samdb.invocation_id))
+
+ def check_revealed(self, dn, rodc_dn, revealed=True):
+ samdb = self.get_samdb()
+
+ res = samdb.search(base=rodc_dn,
+ scope=ldb.SCOPE_BASE,
+ attrs=['msDS-RevealedUsers'])
+
+ revealed_users = res[0].get('msDS-RevealedUsers')
+ if revealed_users is None:
+ self.assertFalse(revealed)
+ return
+
+ revealed_dns = set(str(dsdb_Dn(samdb, str(user),
+ syntax_oid=DSDB_SYNTAX_BINARY_DN).dn)
+ for user in revealed_users)
+
+ if revealed:
+ self.assertIn(str(dn), revealed_dns)
+ else:
+ self.assertNotIn(str(dn), revealed_dns)
+
+ def get_secrets(self, dn,
+ destination_dsa_guid,
+ source_dsa_invocation_id):
+ bind, handle, _ = self.get_drsuapi_connection()
+
+ req = drsuapi.DsGetNCChangesRequest8()
+
+ req.destination_dsa_guid = destination_dsa_guid
+ req.source_dsa_invocation_id = source_dsa_invocation_id
+
+ naming_context = drsuapi.DsReplicaObjectIdentifier()
+ naming_context.dn = dn
+
+ req.naming_context = naming_context
+
+ hwm = drsuapi.DsReplicaHighWaterMark()
+ hwm.tmp_highest_usn = 0
+ hwm.reserved_usn = 0
+ hwm.highest_usn = 0
+
+ req.highwatermark = hwm
+ req.uptodateness_vector = None
+
+ req.replica_flags = 0
+
+ req.max_object_count = 1
+ req.max_ndr_size = 402116
+ req.extended_op = drsuapi.DRSUAPI_EXOP_REPL_SECRET
+
+ attids = [drsuapi.DRSUAPI_ATTID_supplementalCredentials,
+ drsuapi.DRSUAPI_ATTID_unicodePwd,
+ drsuapi.DRSUAPI_ATTID_ntPwdHistory]
+
+ partial_attribute_set = drsuapi.DsPartialAttributeSet()
+ partial_attribute_set.version = 1
+ partial_attribute_set.attids = attids
+ partial_attribute_set.num_attids = len(attids)
+
+ req.partial_attribute_set = partial_attribute_set
+
+ req.partial_attribute_set_ex = None
+ req.mapping_ctr.num_mappings = 0
+ req.mapping_ctr.mappings = None
+
+ _, ctr = bind.DsGetNCChanges(handle, 8, req)
+
+ self.assertEqual(1, ctr.object_count)
+
+ identifier = ctr.first_object.object.identifier
+ attributes = ctr.first_object.object.attribute_ctr.attributes
+
+ self.assertEqual(dn, identifier.dn)
+
+ return bind, identifier, attributes
+
+ def get_keys(self, creds, expected_etypes=None):
+ admin_creds = self.get_admin_creds()
+ samdb = self.get_samdb()
+
+ dn = creds.get_dn()
+
+ bind, identifier, attributes = self.get_secrets(
+ str(dn),
+ destination_dsa_guid=misc.GUID(samdb.get_ntds_GUID()),
+ source_dsa_invocation_id=misc.GUID())
+
+ rid = identifier.sid.split()[1]
+
+ net_ctx = net.Net(admin_creds)
+
+ keys = {}
+
+ for attr in attributes:
+ if attr.attid == drsuapi.DRSUAPI_ATTID_supplementalCredentials:
+ net_ctx.replicate_decrypt(bind, attr, rid)
+ if attr.value_ctr.num_values == 0:
+ continue
+ attr_val = attr.value_ctr.values[0].blob
+
+ spl = ndr_unpack(drsblobs.supplementalCredentialsBlob,
+ attr_val)
+ for pkg in spl.sub.packages:
+ if pkg.name == 'Primary:Kerberos-Newer-Keys':
+ krb5_new_keys_raw = binascii.a2b_hex(pkg.data)
+ krb5_new_keys = ndr_unpack(
+ drsblobs.package_PrimaryKerberosBlob,
+ krb5_new_keys_raw)
+ for key in krb5_new_keys.ctr.keys:
+ keytype = key.keytype
+ if keytype in (kcrypto.Enctype.AES256,
+ kcrypto.Enctype.AES128):
+ keys[keytype] = key.value.hex()
+ elif attr.attid == drsuapi.DRSUAPI_ATTID_unicodePwd:
+ net_ctx.replicate_decrypt(bind, attr, rid)
+ if attr.value_ctr.num_values > 0:
+ pwd = attr.value_ctr.values[0].blob
+ keys[kcrypto.Enctype.RC4] = pwd.hex()
+
+ if expected_etypes is None:
+ expected_etypes = self.get_default_enctypes(creds)
+
+ self.assertCountEqual(expected_etypes, keys)
+
+ return keys
+
+ def creds_set_keys(self, creds, keys):
+ if keys is not None:
+ for enctype, key in keys.items():
+ creds.set_forced_key(enctype, key)
+
+ def creds_set_enctypes(self, creds,
+ extra_bits=None,
+ remove_bits=None):
+ samdb = self.get_samdb()
+
+ res = samdb.search(creds.get_dn(),
+ scope=ldb.SCOPE_BASE,
+ attrs=['msDS-SupportedEncryptionTypes'])
+ supported_enctypes = res[0].get('msDS-SupportedEncryptionTypes', idx=0)
+
+ if supported_enctypes is None:
+ supported_enctypes = self.default_etypes
+ if supported_enctypes is None:
+ lp = self.get_lp()
+ supported_enctypes = lp.get('kdc default domain supported enctypes')
+ if supported_enctypes == 0:
+ supported_enctypes = rc4_bit | aes256_sk_bit
+ supported_enctypes = int(supported_enctypes)
+
+ if extra_bits is not None:
+ # We need to add in implicit or implied encryption types.
+ supported_enctypes |= extra_bits
+ if remove_bits is not None:
+ # We also need to remove certain bits, such as the non-encryption
+ # type bit aes256-sk.
+ supported_enctypes &= ~remove_bits
+
+ creds.set_as_supported_enctypes(supported_enctypes)
+ creds.set_tgs_supported_enctypes(supported_enctypes)
+ creds.set_ap_supported_enctypes(supported_enctypes)
+
+ def creds_set_default_enctypes(self, creds,
+ fast_support=False,
+ claims_support=False,
+ compound_id_support=False):
+ default_enctypes = self.get_default_enctypes(creds)
+ supported_enctypes = KerberosCredentials.etypes_to_bits(
+ default_enctypes)
+
+ if fast_support:
+ supported_enctypes |= security.KERB_ENCTYPE_FAST_SUPPORTED
+ if claims_support:
+ supported_enctypes |= security.KERB_ENCTYPE_CLAIMS_SUPPORTED
+ if compound_id_support:
+ supported_enctypes |= (
+ security.KERB_ENCTYPE_COMPOUND_IDENTITY_SUPPORTED)
+
+ creds.set_as_supported_enctypes(supported_enctypes)
+ creds.set_tgs_supported_enctypes(supported_enctypes)
+ creds.set_ap_supported_enctypes(supported_enctypes)
+
+ def add_to_group(self, account_dn, group_dn, group_attr, expect_attr=True,
+ new_group_type=None):
+ samdb = self.get_samdb()
+
+ try:
+ res = samdb.search(base=group_dn,
+ scope=ldb.SCOPE_BASE,
+ attrs=[group_attr])
+ except ldb.LdbError as err:
+ num, _ = err.args
+ if num != ldb.ERR_NO_SUCH_OBJECT:
+ raise
+
+ self.fail(err)
+
+ orig_msg = res[0]
+ members = orig_msg.get(group_attr)
+ if expect_attr:
+ self.assertIsNotNone(members)
+ elif members is None:
+ members = ()
+ else:
+ members = map(lambda s: s.decode('utf-8'), members)
+
+ # Use a set so we can handle the same group being added twice.
+ members = set(members)
+
+ self.assertNotIsInstance(account_dn, ldb.Dn,
+ 'ldb.MessageElement does not support ldb.Dn')
+ self.assertNotIsInstance(account_dn, bytes)
+
+ if isinstance(account_dn, str):
+ members.add(account_dn)
+ else:
+ members.update(account_dn)
+
+ msg = ldb.Message()
+ msg.dn = group_dn
+ if new_group_type is not None:
+ msg['0'] = ldb.MessageElement(
+ common.normalise_int32(new_group_type),
+ ldb.FLAG_MOD_REPLACE,
+ 'groupType')
+ msg['1'] = ldb.MessageElement(list(members),
+ ldb.FLAG_MOD_REPLACE,
+ group_attr)
+ cleanup = samdb.msg_diff(msg, orig_msg)
+ self.ldb_cleanups.append(cleanup)
+ samdb.modify(msg)
+
+ return cleanup
+
+ def remove_from_group(self, account_dn, group_dn):
+ samdb = self.get_samdb()
+
+ res = samdb.search(base=group_dn,
+ scope=ldb.SCOPE_BASE,
+ attrs=['member'])
+ orig_msg = res[0]
+ self.assertIn('member', orig_msg)
+ members = list(orig_msg['member'])
+
+ account_dn = str(account_dn).encode('utf-8')
+ self.assertIn(account_dn, members)
+ members.remove(account_dn)
+
+ msg = ldb.Message()
+ msg.dn = group_dn
+ msg['member'] = ldb.MessageElement(members,
+ ldb.FLAG_MOD_REPLACE,
+ 'member')
+
+ cleanup = samdb.msg_diff(msg, orig_msg)
+ self.ldb_cleanups.append(cleanup)
+ samdb.modify(msg)
+
+ return cleanup
+
+ # Create a new group and return a Principal object representing it.
+ def create_group_principal(self, samdb, group_type):
+ name = self.get_new_username()
+ dn = self.create_group(samdb, name, gtype=group_type.value)
+ sid = self.get_objectSid(samdb, dn)
+
+ return Principal(ldb.Dn(samdb, dn), sid)
+
+ def set_group_type(self, samdb, dn, gtype):
+ group_type = common.normalise_int32(gtype.value)
+ msg = ldb.Message(dn)
+ msg['groupType'] = ldb.MessageElement(group_type,
+ ldb.FLAG_MOD_REPLACE,
+ 'groupType')
+ samdb.modify(msg)
+
+ def set_primary_group(self, samdb, dn, primary_sid,
+ expected_error=None,
+ expected_werror=None):
+ # Get the RID to be set as our primary group.
+ primary_rid = primary_sid.rsplit('-', 1)[1]
+
+ # Find out our current primary group.
+ res = samdb.search(dn,
+ scope=ldb.SCOPE_BASE,
+ attrs=['primaryGroupId'])
+ orig_msg = res[0]
+
+ # Prepare to modify the attribute.
+ msg = ldb.Message(dn)
+ msg['primaryGroupId'] = ldb.MessageElement(str(primary_rid),
+ ldb.FLAG_MOD_REPLACE,
+ 'primaryGroupId')
+
+ # We'll remove the primaryGroupId attribute after the test, to avoid
+ # problems in the teardown if the user outlives the group.
+ remove_msg = samdb.msg_diff(msg, orig_msg)
+ self.addCleanup(samdb.modify, remove_msg)
+
+ # Set primaryGroupId.
+ if expected_error is None:
+ self.assertIsNone(expected_werror)
+
+ samdb.modify(msg)
+ else:
+ self.assertIsNotNone(expected_werror)
+
+ with self.assertRaises(
+ ldb.LdbError,
+ msg='expected setting primary group to fail'
+ ) as err:
+ samdb.modify(msg)
+
+ error, estr = err.exception.args
+ self.assertEqual(expected_error, error)
+ self.assertIn(f'{expected_werror:08X}', estr)
+
+ # Create an arrangement of groups based on a configuration specified in a
+ # test case. 'user_principal' is a principal representing the user account;
+ # 'trust_principal', a principal representing the account of a user from
+ # another domain.
+ def setup_groups(self,
+ samdb,
+ preexisting_groups,
+ group_setup,
+ primary_groups):
+ groups = dict(preexisting_groups)
+
+ primary_group_types = {}
+
+ # Create each group and add it to the group mapping.
+ if group_setup is not None:
+ for group_id, (group_type, _) in group_setup.items():
+ self.assertNotIn(group_id, preexisting_groups,
+ "don't specify placeholders")
+ self.assertNotIn(group_id, groups,
+ 'group ID specified more than once')
+
+ if primary_groups is not None and (
+ group_id in primary_groups.values()):
+ # Windows disallows setting a domain-local group as a
+ # primary group, unless we create it as Universal first and
+ # change it back to Domain-Local later.
+ primary_group_types[group_id] = group_type
+ group_type = GroupType.UNIVERSAL
+
+ groups[group_id] = self.create_group_principal(samdb,
+ group_type)
+
+ if group_setup is not None:
+ # Map a group ID to that group's DN, and generate an
+ # understandable error message if the mapping fails.
+ def group_id_to_dn(group_id):
+ try:
+ group = groups[group_id]
+ except KeyError:
+ self.fail(f"included group member '{group_id}', but it is "
+ f"not specified in {groups.keys()}")
+ else:
+ if group.dn is not None:
+ return str(group.dn)
+
+ return f'<SID={group.sid}>'
+
+ # Populate each group's members.
+ for group_id, (_, members) in group_setup.items():
+ # Get the group's DN and the mapped DNs of its members.
+ dn = groups[group_id].dn
+ principal_members = map(group_id_to_dn, members)
+
+ # Add the members to the group.
+ self.add_to_group(principal_members, dn, 'member',
+ expect_attr=False)
+
+ # Set primary groups.
+ if primary_groups is not None:
+ for user, primary_group in primary_groups.items():
+ primary_sid = groups[primary_group].sid
+ self.set_primary_group(samdb, user.dn, primary_sid)
+
+ # Change the primary groups to their actual group types.
+ for primary_group, primary_group_type in primary_group_types.items():
+ self.set_group_type(samdb,
+ groups[primary_group].dn,
+ primary_group_type)
+
+ # Return the mapping from group IDs to principals.
+ return groups
+
+ def map_to_sid(self, val, mapping, domain_sid):
+ if isinstance(val, int):
+ # If it's an integer, we assume it's a RID, and prefix the domain
+ # SID.
+ self.assertIsNotNone(domain_sid)
+ return f'{domain_sid}-{val}'
+
+ if mapping is not None and val in mapping:
+ # Or if we have a mapping for it, apply that.
+ return mapping[val].sid
+
+ # Otherwise leave it unmodified.
+ return val
+
+ def map_to_dn(self, val, mapping, domain_sid):
+ sid = self.map_to_sid(val, mapping, domain_sid)
+ return ldb.Dn(self.get_samdb(), f'<SID={sid}>')
+
+ # Return SIDs from principal placeholders based on a supplied mapping.
+ def map_sids(self, sids, mapping, domain_sid):
+ if sids is None:
+ return None
+
+ mapped_sids = set()
+
+ for entry in sids:
+ if isinstance(entry, frozenset):
+ mapped_sids.add(frozenset(self.map_sids(entry,
+ mapping,
+ domain_sid)))
+ else:
+ val, sid_type, attrs = entry
+ sid = self.map_to_sid(val, mapping, domain_sid)
+
+ # There's no point expecting the 'Claims Valid' SID to be
+ # present if we don't support claims. Filter it out to give the
+ # tests a chance of passing.
+ if not self.kdc_claims_support and (
+ sid == security.SID_CLAIMS_VALID):
+ continue
+
+ mapped_sids.add((sid, sid_type, attrs))
+
+ return mapped_sids
+
+ def issued_by_rodc(self, ticket):
+ rodc_krbtgt_creds = self.get_mock_rodc_krbtgt_creds()
+ rodc_krbtgt_key = self.TicketDecryptionKey_from_creds(
+ rodc_krbtgt_creds)
+
+ checksum_keys = {
+ krb5pac.PAC_TYPE_KDC_CHECKSUM: rodc_krbtgt_key,
+ }
+
+ return self.modified_ticket(
+ ticket,
+ new_ticket_key=rodc_krbtgt_key,
+ checksum_keys=checksum_keys)
+
+ def signed_by_rodc(self, ticket):
+ rodc_krbtgt_creds = self.get_mock_rodc_krbtgt_creds()
+ rodc_krbtgt_key = self.TicketDecryptionKey_from_creds(
+ rodc_krbtgt_creds)
+
+ checksum_keys = {
+ krb5pac.PAC_TYPE_KDC_CHECKSUM: rodc_krbtgt_key,
+ }
+
+ return self.modified_ticket(ticket,
+ checksum_keys=checksum_keys)
+
+ # Get a ticket with the SIDs in the PAC replaced with ones we specify. This
+ # is useful for creating arbitrary tickets that can be used to perform a
+ # TGS-REQ.
+ def ticket_with_sids(self,
+ ticket,
+ new_sids,
+ domain_sid,
+ user_rid,
+ set_user_flags=0,
+ reset_user_flags=0,
+ from_rodc=False):
+ if from_rodc:
+ krbtgt_creds = self.get_mock_rodc_krbtgt_creds()
+ else:
+ krbtgt_creds = self.get_krbtgt_creds()
+ krbtgt_key = self.TicketDecryptionKey_from_creds(krbtgt_creds)
+
+ checksum_keys = {
+ krb5pac.PAC_TYPE_KDC_CHECKSUM: krbtgt_key
+ }
+
+ modify_pac_fn = partial(self.set_pac_sids,
+ new_sids=new_sids,
+ domain_sid=domain_sid,
+ user_rid=user_rid,
+ set_user_flags=set_user_flags,
+ reset_user_flags=reset_user_flags)
+
+ return self.modified_ticket(ticket,
+ new_ticket_key=krbtgt_key,
+ modify_pac_fn=modify_pac_fn,
+ checksum_keys=checksum_keys)
+
+ # Replace the SIDs in a PAC with 'new_sids'.
+ def set_pac_sids(self,
+ pac,
+ *,
+ new_sids,
+ domain_sid=None,
+ user_rid=None,
+ set_user_flags=0,
+ reset_user_flags=0):
+ if domain_sid is None:
+ domain_sid = self.get_samdb().get_domain_sid()
+
+ base_sids = []
+ extra_sids = []
+ resource_sids = []
+
+ resource_domain = None
+
+ primary_gid = None
+
+ # Filter our SIDs into three arrays depending on their ultimate
+ # location in the PAC.
+ for sid, sid_type, attrs in new_sids:
+ if sid_type is self.SidType.BASE_SID:
+ if isinstance(sid, int):
+ domain, rid = domain_sid, sid
+ else:
+ domain, rid = sid.rsplit('-', 1)
+ self.assertEqual(domain_sid, domain,
+ f'base SID {sid} must be in our domain')
+
+ base_sid = samr.RidWithAttribute()
+ base_sid.rid = int(rid)
+ base_sid.attributes = attrs
+
+ base_sids.append(base_sid)
+ elif sid_type is self.SidType.EXTRA_SID:
+ extra_sid = netlogon.netr_SidAttr()
+ extra_sid.sid = security.dom_sid(sid)
+ extra_sid.attributes = attrs
+
+ extra_sids.append(extra_sid)
+ elif sid_type is self.SidType.RESOURCE_SID:
+ if isinstance(sid, int):
+ domain, rid = domain_sid, sid
+ else:
+ domain, rid = sid.rsplit('-', 1)
+ if resource_domain is None:
+ resource_domain = domain
+ else:
+ self.assertEqual(resource_domain, domain,
+ 'resource SIDs must share the same '
+ 'domain')
+
+ resource_sid = samr.RidWithAttribute()
+ resource_sid.rid = int(rid)
+ resource_sid.attributes = attrs
+
+ resource_sids.append(resource_sid)
+ elif sid_type is self.SidType.PRIMARY_GID:
+ self.assertIsNone(primary_gid,
+ f'must not specify a second primary GID '
+ f'{sid}')
+ self.assertIsNone(attrs, 'cannot specify primary GID attrs')
+
+ if isinstance(sid, int):
+ domain, primary_gid = domain_sid, sid
+ else:
+ domain, primary_gid = sid.rsplit('-', 1)
+ self.assertEqual(domain_sid, domain,
+ f'primary GID {sid} must be in our domain')
+ else:
+ self.fail(f'invalid SID type {sid_type}')
+
+ found_logon_info = False
+
+ pac_buffers = pac.buffers
+ for pac_buffer in pac_buffers:
+ # Find the LOGON_INFO PAC buffer.
+ if pac_buffer.type == krb5pac.PAC_TYPE_LOGON_INFO:
+ logon_info = pac_buffer.info.info
+
+ # Add Extra SIDs and set the EXTRA_SIDS flag as needed.
+ logon_info.info3.sidcount = len(extra_sids)
+ if extra_sids:
+ logon_info.info3.sids = extra_sids
+ logon_info.info3.base.user_flags |= (
+ netlogon.NETLOGON_EXTRA_SIDS)
+ else:
+ logon_info.info3.sids = None
+ logon_info.info3.base.user_flags &= ~(
+ netlogon.NETLOGON_EXTRA_SIDS)
+
+ # Add Base SIDs.
+ logon_info.info3.base.groups.count = len(base_sids)
+ if base_sids:
+ logon_info.info3.base.groups.rids = base_sids
+ else:
+ logon_info.info3.base.groups.rids = None
+
+ logon_info.info3.base.domain_sid = security.dom_sid(domain_sid)
+ if user_rid is not None:
+ logon_info.info3.base.rid = int(user_rid)
+
+ if primary_gid is not None:
+ logon_info.info3.base.primary_gid = int(primary_gid)
+
+ # Add Resource SIDs and set the RESOURCE_GROUPS flag as needed.
+ logon_info.resource_groups.groups.count = len(resource_sids)
+ if resource_sids:
+ resource_domain = security.dom_sid(resource_domain)
+ logon_info.resource_groups.domain_sid = resource_domain
+ logon_info.resource_groups.groups.rids = resource_sids
+ logon_info.info3.base.user_flags |= (
+ netlogon.NETLOGON_RESOURCE_GROUPS)
+ else:
+ logon_info.resource_groups.domain_sid = None
+ logon_info.resource_groups.groups.rids = None
+ logon_info.info3.base.user_flags &= ~(
+ netlogon.NETLOGON_RESOURCE_GROUPS)
+
+ logon_info.info3.base.user_flags |= set_user_flags
+ logon_info.info3.base.user_flags &= ~reset_user_flags
+
+ found_logon_info = True
+
+ # Also replace the user's SID in the UPN DNS buffer.
+ elif pac_buffer.type == krb5pac.PAC_TYPE_UPN_DNS_INFO:
+ upn_dns_info_ex = pac_buffer.info.ex
+
+ if user_rid is not None:
+ upn_dns_info_ex.objectsid = security.dom_sid(
+ f'{domain_sid}-{user_rid}')
+
+ # But don't replace the user's SID in the Requester SID buffer, or
+ # we'll get a SID mismatch.
+
+ self.assertTrue(found_logon_info, 'no LOGON_INFO PAC buffer')
+
+ pac.buffers = pac_buffers
+
+ return pac
+
+ # Replace the device SIDs in a PAC with 'new_sids'.
+ def set_pac_device_sids(self,
+ pac,
+ *,
+ new_sids,
+ domain_sid=None,
+ user_rid):
+ if domain_sid is None:
+ domain_sid = self.get_samdb().get_domain_sid()
+
+ base_sids = []
+ extra_sids = []
+ resource_sids = []
+
+ primary_gid = None
+
+ # Filter our SIDs into three arrays depending on their ultimate
+ # location in the PAC.
+ for entry in new_sids:
+ if isinstance(entry, frozenset):
+ resource_domain = None
+ domain_sids = []
+
+ for sid, sid_type, attrs in entry:
+ self.assertIs(sid_type, self.SidType.RESOURCE_SID,
+ 'only resource SIDs may be specified in this way')
+
+ if isinstance(sid, int):
+ domain, rid = domain_sid, sid
+ else:
+ domain, rid = sid.rsplit('-', 1)
+ if resource_domain is None:
+ resource_domain = domain
+ else:
+ self.assertEqual(resource_domain, domain,
+ 'resource SIDs must share the same '
+ 'domain')
+
+ resource_sid = samr.RidWithAttribute()
+ resource_sid.rid = int(rid)
+ resource_sid.attributes = attrs
+
+ domain_sids.append(resource_sid)
+
+ membership = krb5pac.PAC_DOMAIN_GROUP_MEMBERSHIP()
+ if resource_domain is not None:
+ membership.domain_sid = security.dom_sid(resource_domain)
+ membership.groups.rids = domain_sids
+ membership.groups.count = len(domain_sids)
+
+ resource_sids.append(membership)
+ else:
+ sid, sid_type, attrs = entry
+ if sid_type is self.SidType.BASE_SID:
+ if isinstance(sid, int):
+ domain, rid = domain_sid, sid
+ else:
+ domain, rid = sid.rsplit('-', 1)
+ self.assertEqual(domain_sid, domain,
+ f'base SID {sid} must be in our domain')
+
+ base_sid = samr.RidWithAttribute()
+ base_sid.rid = int(rid)
+ base_sid.attributes = attrs
+
+ base_sids.append(base_sid)
+ elif sid_type is self.SidType.EXTRA_SID:
+ extra_sid = netlogon.netr_SidAttr()
+ extra_sid.sid = security.dom_sid(sid)
+ extra_sid.attributes = attrs
+
+ extra_sids.append(extra_sid)
+ elif sid_type is self.SidType.RESOURCE_SID:
+ self.fail('specify resource groups in frozenset(s)')
+ elif sid_type is self.SidType.PRIMARY_GID:
+ self.assertIsNone(primary_gid,
+ f'must not specify a second primary GID '
+ f'{sid}')
+ self.assertIsNone(attrs, 'cannot specify primary GID attrs')
+
+ if isinstance(sid, int):
+ domain, primary_gid = domain_sid, sid
+ else:
+ domain, primary_gid = sid.rsplit('-', 1)
+ self.assertEqual(domain_sid, domain,
+ f'primary GID {sid} must be in our domain')
+ else:
+ self.fail(f'invalid SID type {sid_type}')
+
+ pac_buffers = pac.buffers
+ for pac_buffer in pac_buffers:
+ # Find the DEVICE_INFO PAC buffer.
+ if pac_buffer.type == krb5pac.PAC_TYPE_DEVICE_INFO:
+ logon_info = pac_buffer.info.info
+ break
+ else:
+ logon_info = krb5pac.PAC_DEVICE_INFO()
+
+ logon_info_ctr = krb5pac.PAC_DEVICE_INFO_CTR()
+ logon_info_ctr.info = logon_info
+
+ pac_buffer = krb5pac.PAC_BUFFER()
+ pac_buffer.type = krb5pac.PAC_TYPE_DEVICE_INFO
+ pac_buffer.info = logon_info_ctr
+
+ pac_buffers.append(pac_buffer)
+
+ logon_info.domain_sid = security.dom_sid(domain_sid)
+ logon_info.rid = int(user_rid)
+
+ self.assertIsNotNone(primary_gid, 'please specify the primary GID')
+ logon_info.primary_gid = int(primary_gid)
+
+ # Add Base SIDs.
+ if base_sids:
+ logon_info.groups.rids = base_sids
+ else:
+ logon_info.groups.rids = None
+ logon_info.groups.count = len(base_sids)
+
+ # Add Extra SIDs.
+ if extra_sids:
+ logon_info.sids = extra_sids
+ else:
+ logon_info.sids = None
+ logon_info.sid_count = len(extra_sids)
+
+ # Add Resource SIDs.
+ if resource_sids:
+ logon_info.domain_groups = resource_sids
+ else:
+ logon_info.domain_groups = None
+ logon_info.domain_group_count = len(resource_sids)
+
+ pac.buffers = pac_buffers
+ pac.num_buffers = len(pac_buffers)
+
+ return pac
+
+ def set_pac_claims(self, pac, *, client_claims=None, device_claims=None, claim_ids=None):
+ if claim_ids is None:
+ claim_ids = {}
+
+ if client_claims is not None:
+ self.assertIsNone(device_claims,
+ 'don’t specify both client and device claims')
+ pac_claims = client_claims
+ pac_buffer_type = krb5pac.PAC_TYPE_CLIENT_CLAIMS_INFO
+ else:
+ self.assertIsNotNone(device_claims,
+ 'please specify client or device claims')
+ pac_claims = device_claims
+ pac_buffer_type = krb5pac.PAC_TYPE_DEVICE_CLAIMS_INFO
+
+ claim_value_types = {
+ claims.CLAIM_TYPE_INT64: claims.CLAIM_INT64,
+ claims.CLAIM_TYPE_UINT64: claims.CLAIM_UINT64,
+ claims.CLAIM_TYPE_STRING: claims.CLAIM_STRING,
+ claims.CLAIM_TYPE_BOOLEAN: claims.CLAIM_UINT64,
+ }
+
+ claims_arrays = []
+
+ for pac_claim_array in pac_claims:
+ pac_claim_source_type, pac_claim_entries = (
+ pac_claim_array)
+
+ claim_entries = []
+
+ for pac_claim_entry in pac_claim_entries:
+ pac_claim_id, pac_claim_type, pac_claim_values = (
+ pac_claim_entry)
+
+ claim_values_type = claim_value_types.get(
+ pac_claim_type, claims.CLAIM_STRING)
+
+ claim_values_enum = claim_values_type()
+ claim_values_enum.values = pac_claim_values
+ claim_values_enum.value_count = len(
+ pac_claim_values)
+
+ claim_entry = claims.CLAIM_ENTRY()
+ try:
+ claim_entry.id = pac_claim_id.format_map(
+ claim_ids)
+ except KeyError as err:
+ raise RuntimeError(
+ f'unknown claim name(s) '
+ f'in ‘{pac_claim_id}’'
+ ) from err
+ claim_entry.type = pac_claim_type
+ claim_entry.values = claim_values_enum
+
+ claim_entries.append(claim_entry)
+
+ claims_array = claims.CLAIMS_ARRAY()
+ claims_array.claims_source_type = pac_claim_source_type
+ claims_array.claim_entries = claim_entries
+ claims_array.claims_count = len(claim_entries)
+
+ claims_arrays.append(claims_array)
+
+ claims_set = claims.CLAIMS_SET()
+ claims_set.claims_arrays = claims_arrays
+ claims_set.claims_array_count = len(claims_arrays)
+
+ claims_ctr = claims.CLAIMS_SET_CTR()
+ claims_ctr.claims = claims_set
+
+ claims_ndr = claims.CLAIMS_SET_NDR()
+ claims_ndr.claims = claims_ctr
+
+ metadata = claims.CLAIMS_SET_METADATA()
+ metadata.claims_set = claims_ndr
+ metadata.compression_format = (
+ claims.CLAIMS_COMPRESSION_FORMAT_XPRESS_HUFF)
+
+ metadata_ctr = claims.CLAIMS_SET_METADATA_CTR()
+ metadata_ctr.metadata = metadata
+
+ metadata_ndr = claims.CLAIMS_SET_METADATA_NDR()
+ metadata_ndr.claims = metadata_ctr
+
+ pac_buffers = pac.buffers
+ for pac_buffer in pac_buffers:
+ if pac_buffer.type == pac_buffer_type:
+ break
+ else:
+ pac_buffer = krb5pac.PAC_BUFFER()
+ pac_buffer.type = pac_buffer_type
+ pac_buffer.info = krb5pac.DATA_BLOB_REM()
+
+ pac_buffers.append(pac_buffer)
+
+ pac_buffer.info.remaining = ndr_pack(metadata_ndr)
+
+ pac.buffers = pac_buffers
+ pac.num_buffers = len(pac_buffers)
+
+ return pac
+
+ def add_extra_pac_buffers(self, pac, *, buffers=None):
+ if buffers is None:
+ buffers = []
+
+ pac_buffers = pac.buffers
+ for pac_buffer_type in buffers:
+ info = krb5pac.DATA_BLOB_REM()
+ # Having an empty PAC buffer will trigger an assertion failure in
+ # the MIT KDC’s k5_pac_locate_buffer(), so we need at least one
+ # byte.
+ info.remaining = b'0'
+
+ pac_buffer = krb5pac.PAC_BUFFER()
+ pac_buffer.type = pac_buffer_type
+ pac_buffer.info = info
+
+ pac_buffers.append(pac_buffer)
+
+ pac.buffers = pac_buffers
+ pac.num_buffers = len(pac_buffers)
+
+ return pac
+
+ def get_cached_creds(self, *,
+ account_type,
+ opts=None,
+ use_cache=True):
+ if opts is None:
+ opts = {}
+
+ opts_default = {
+ 'name_prefix': None,
+ 'name_suffix': None,
+ 'add_dollar': None,
+ 'upn': None,
+ 'spn': None,
+ 'additional_details': None,
+ 'allowed_replication': False,
+ 'allowed_replication_mock': False,
+ 'denied_replication': False,
+ 'denied_replication_mock': False,
+ 'revealed_to_rodc': False,
+ 'revealed_to_mock_rodc': False,
+ 'no_auth_data_required': False,
+ 'expired_password': False,
+ 'supported_enctypes': None,
+ 'not_delegated': False,
+ 'delegation_to_spn': None,
+ 'delegation_from_dn': None,
+ 'trusted_to_auth_for_delegation': False,
+ 'fast_support': False,
+ 'claims_support': False,
+ 'compound_id_support': False,
+ 'sid_compression_support': True,
+ 'member_of': None,
+ 'kerberos_enabled': True,
+ 'secure_channel_type': None,
+ 'id': None,
+ 'force_nt4_hash': False,
+ 'assigned_policy': None,
+ 'assigned_silo': None,
+ 'logon_hours': None,
+ }
+
+ account_opts = {
+ 'account_type': account_type,
+ **opts_default,
+ **opts
+ }
+
+ if use_cache:
+ cache_key = tuple(sorted(account_opts.items()))
+ creds = self.account_cache.get(cache_key)
+ if creds is not None:
+ return creds
+
+ creds = self.create_account_opts(use_cache, **account_opts)
+ if use_cache:
+ self.account_cache[cache_key] = creds
+
+ return creds
+
+ def create_account_opts(self, use_cache, *,
+ account_type,
+ name_prefix,
+ name_suffix,
+ add_dollar,
+ upn,
+ spn,
+ additional_details,
+ allowed_replication,
+ allowed_replication_mock,
+ denied_replication,
+ denied_replication_mock,
+ revealed_to_rodc,
+ revealed_to_mock_rodc,
+ no_auth_data_required,
+ expired_password,
+ supported_enctypes,
+ not_delegated,
+ delegation_to_spn,
+ delegation_from_dn,
+ trusted_to_auth_for_delegation,
+ fast_support,
+ claims_support,
+ compound_id_support,
+ sid_compression_support,
+ member_of,
+ kerberos_enabled,
+ secure_channel_type,
+ id,
+ force_nt4_hash,
+ assigned_policy,
+ assigned_silo,
+ logon_hours):
+ if account_type is self.AccountType.USER:
+ self.assertIsNone(delegation_to_spn)
+ self.assertIsNone(delegation_from_dn)
+ self.assertFalse(trusted_to_auth_for_delegation)
+ else:
+ self.assertFalse(not_delegated)
+
+ samdb = self.get_samdb()
+
+ user_name = self.get_new_username()
+ if name_prefix is not None:
+ user_name = name_prefix + user_name
+ if name_suffix is not None:
+ user_name += name_suffix
+
+ user_account_control = 0
+ if trusted_to_auth_for_delegation:
+ user_account_control |= UF_TRUSTED_TO_AUTHENTICATE_FOR_DELEGATION
+ if not_delegated:
+ user_account_control |= UF_NOT_DELEGATED
+ if no_auth_data_required:
+ user_account_control |= UF_NO_AUTH_DATA_REQUIRED
+
+ if additional_details:
+ details = {k: v for k, v in additional_details}
+ else:
+ details = {}
+
+ enctypes = supported_enctypes
+ if fast_support:
+ enctypes = enctypes or 0
+ enctypes |= security.KERB_ENCTYPE_FAST_SUPPORTED
+ if claims_support:
+ enctypes = enctypes or 0
+ enctypes |= security.KERB_ENCTYPE_CLAIMS_SUPPORTED
+ if compound_id_support:
+ enctypes = enctypes or 0
+ enctypes |= security.KERB_ENCTYPE_COMPOUND_IDENTITY_SUPPORTED
+ if sid_compression_support is False:
+ enctypes = enctypes or 0
+ enctypes |= security.KERB_ENCTYPE_RESOURCE_SID_COMPRESSION_DISABLED
+
+ if enctypes is not None:
+ details['msDS-SupportedEncryptionTypes'] = str(enctypes)
+
+ if delegation_to_spn:
+ details['msDS-AllowedToDelegateTo'] = delegation_to_spn
+
+ if delegation_from_dn:
+ if isinstance(delegation_from_dn, str):
+ delegation_from_dn = self.get_security_descriptor(
+ delegation_from_dn)
+ details['msDS-AllowedToActOnBehalfOfOtherIdentity'] = (
+ delegation_from_dn)
+
+ if spn is None and account_type is not self.AccountType.USER:
+ spn = 'host/' + user_name
+
+ if assigned_policy is not None:
+ details['msDS-AssignedAuthNPolicy'] = assigned_policy
+
+ if assigned_silo is not None:
+ details['msDS-AssignedAuthNPolicySilo'] = assigned_silo
+
+ if logon_hours is not None:
+ details['logonHours'] = logon_hours
+
+ creds, dn = self.create_account(samdb, user_name,
+ account_type=account_type,
+ upn=upn,
+ spn=spn,
+ additional_details=details,
+ account_control=user_account_control,
+ add_dollar=add_dollar,
+ force_nt4_hash=force_nt4_hash,
+ expired_password=expired_password,
+ preserve=use_cache)
+
+ expected_etypes = None
+ if force_nt4_hash:
+ expected_etypes = {kcrypto.Enctype.RC4}
+ keys = self.get_keys(creds, expected_etypes=expected_etypes)
+ self.creds_set_keys(creds, keys)
+
+ # Handle secret replication to the RODC.
+
+ if allowed_replication or revealed_to_rodc:
+ rodc_samdb = self.get_rodc_samdb()
+ rodc_dn = self.get_server_dn(rodc_samdb)
+
+ # Allow replicating this account's secrets if requested, or allow
+ # it only temporarily if we're about to replicate them.
+ allowed_cleanup = self.add_to_group(
+ dn, rodc_dn,
+ 'msDS-RevealOnDemandGroup')
+
+ if revealed_to_rodc:
+ # Replicate this account's secrets to the RODC.
+ self.replicate_account_to_rodc(dn)
+
+ if not allowed_replication:
+ # If we don't want replicating secrets to be allowed for this
+ # account, disable it again.
+ samdb.modify(allowed_cleanup)
+
+ self.check_revealed(dn,
+ rodc_dn,
+ revealed=revealed_to_rodc)
+
+ if denied_replication:
+ rodc_samdb = self.get_rodc_samdb()
+ rodc_dn = self.get_server_dn(rodc_samdb)
+
+ # Deny replicating this account's secrets to the RODC.
+ self.add_to_group(dn, rodc_dn, 'msDS-NeverRevealGroup')
+
+ # Handle secret replication to the mock RODC.
+
+ if allowed_replication_mock or revealed_to_mock_rodc:
+ # Allow replicating this account's secrets if requested, or allow
+ # it only temporarily if we want to add the account to the mock
+ # RODC's msDS-RevealedUsers.
+ rodc_ctx = self.get_mock_rodc_ctx()
+ mock_rodc_dn = ldb.Dn(samdb, rodc_ctx.acct_dn)
+
+ allowed_mock_cleanup = self.add_to_group(
+ dn, mock_rodc_dn,
+ 'msDS-RevealOnDemandGroup')
+
+ if revealed_to_mock_rodc:
+ # Request replicating this account's secrets to the mock RODC,
+ # which updates msDS-RevealedUsers.
+ self.reveal_account_to_mock_rodc(dn)
+
+ if not allowed_replication_mock:
+ # If we don't want replicating secrets to be allowed for this
+ # account, disable it again.
+ samdb.modify(allowed_mock_cleanup)
+
+ self.check_revealed(dn,
+ mock_rodc_dn,
+ revealed=revealed_to_mock_rodc)
+
+ if denied_replication_mock:
+ # Deny replicating this account's secrets to the mock RODC.
+ rodc_ctx = self.get_mock_rodc_ctx()
+ mock_rodc_dn = ldb.Dn(samdb, rodc_ctx.acct_dn)
+
+ self.add_to_group(dn, mock_rodc_dn, 'msDS-NeverRevealGroup')
+
+ if member_of is not None:
+ for group_dn in member_of:
+ self.add_to_group(dn, ldb.Dn(samdb, group_dn), 'member',
+ expect_attr=False)
+
+ if kerberos_enabled:
+ creds.set_kerberos_state(MUST_USE_KERBEROS)
+ else:
+ creds.set_kerberos_state(DONT_USE_KERBEROS)
+
+ if secure_channel_type is not None:
+ creds.set_secure_channel_type(secure_channel_type)
+
+ return creds
+
+ def get_new_username(self):
+ user_name = self.account_base + str(self.account_id)
+ type(self).account_id += 1
+
+ return user_name
+
+ def get_client_creds(self,
+ allow_missing_password=False,
+ allow_missing_keys=True):
+ def create_client_account():
+ return self.get_cached_creds(account_type=self.AccountType.USER)
+
+ c = self._get_krb5_creds(prefix='CLIENT',
+ allow_missing_password=allow_missing_password,
+ allow_missing_keys=allow_missing_keys,
+ fallback_creds_fn=create_client_account)
+ return c
+
+ def get_mach_creds(self,
+ allow_missing_password=False,
+ allow_missing_keys=True):
+ def create_mach_account():
+ return self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={
+ 'fast_support': True,
+ 'claims_support': True,
+ 'compound_id_support': True,
+ 'supported_enctypes': (
+ security.KERB_ENCTYPE_RC4_HMAC_MD5 |
+ security.KERB_ENCTYPE_AES256_CTS_HMAC_SHA1_96_SK
+ ),
+ })
+
+ c = self._get_krb5_creds(prefix='MAC',
+ allow_missing_password=allow_missing_password,
+ allow_missing_keys=allow_missing_keys,
+ fallback_creds_fn=create_mach_account)
+ return c
+
+ def get_service_creds(self,
+ allow_missing_password=False,
+ allow_missing_keys=True):
+ def create_service_account():
+ return self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={
+ 'trusted_to_auth_for_delegation': True,
+ 'fast_support': True,
+ 'claims_support': True,
+ 'compound_id_support': True,
+ 'supported_enctypes': (
+ security.KERB_ENCTYPE_RC4_HMAC_MD5 |
+ security.KERB_ENCTYPE_AES256_CTS_HMAC_SHA1_96_SK
+ ),
+ })
+
+ c = self._get_krb5_creds(prefix='SERVICE',
+ allow_missing_password=allow_missing_password,
+ allow_missing_keys=allow_missing_keys,
+ fallback_creds_fn=create_service_account)
+ return c
+
+ def get_rodc_krbtgt_creds(self,
+ require_keys=True,
+ require_strongest_key=False):
+ if require_strongest_key:
+ self.assertTrue(require_keys)
+
+ def download_rodc_krbtgt_creds():
+ samdb = self.get_samdb()
+ rodc_samdb = self.get_rodc_samdb()
+
+ rodc_dn = self.get_server_dn(rodc_samdb)
+
+ res = samdb.search(rodc_dn,
+ scope=ldb.SCOPE_BASE,
+ attrs=['msDS-KrbTgtLink'])
+ krbtgt_dn = res[0]['msDS-KrbTgtLink'][0]
+
+ res = samdb.search(krbtgt_dn,
+ scope=ldb.SCOPE_BASE,
+ attrs=['sAMAccountName',
+ 'msDS-KeyVersionNumber',
+ 'msDS-SecondaryKrbTgtNumber'])
+ krbtgt_dn = res[0].dn
+ username = str(res[0]['sAMAccountName'])
+
+ creds = KerberosCredentials()
+ creds.set_domain(self.env_get_var('DOMAIN', 'RODC_KRBTGT'))
+ creds.set_realm(self.env_get_var('REALM', 'RODC_KRBTGT'))
+ creds.set_username(username)
+
+ kvno = int(res[0]['msDS-KeyVersionNumber'][0])
+ krbtgt_number = int(res[0]['msDS-SecondaryKrbTgtNumber'][0])
+
+ rodc_kvno = krbtgt_number << 16 | kvno
+ creds.set_kvno(rodc_kvno)
+ creds.set_dn(krbtgt_dn)
+
+ keys = self.get_keys(creds)
+ self.creds_set_keys(creds, keys)
+
+ # The RODC krbtgt account should support the default enctypes,
+ # although it might not have the msDS-SupportedEncryptionTypes
+ # attribute.
+ self.creds_set_default_enctypes(
+ creds,
+ fast_support=self.kdc_fast_support,
+ claims_support=self.kdc_claims_support,
+ compound_id_support=self.kdc_compound_id_support)
+
+ return creds
+
+ c = self._get_krb5_creds(prefix='RODC_KRBTGT',
+ allow_missing_password=True,
+ allow_missing_keys=not require_keys,
+ require_strongest_key=require_strongest_key,
+ fallback_creds_fn=download_rodc_krbtgt_creds)
+ return c
+
+ def get_mock_rodc_krbtgt_creds(self,
+ require_keys=True,
+ require_strongest_key=False):
+ if require_strongest_key:
+ self.assertTrue(require_keys)
+
+ def create_rodc_krbtgt_account():
+ samdb = self.get_samdb()
+
+ rodc_ctx = self.get_mock_rodc_ctx()
+
+ krbtgt_dn = rodc_ctx.new_krbtgt_dn
+
+ res = samdb.search(base=ldb.Dn(samdb, krbtgt_dn),
+ scope=ldb.SCOPE_BASE,
+ attrs=['msDS-KeyVersionNumber',
+ 'msDS-SecondaryKrbTgtNumber'])
+ dn = res[0].dn
+ username = str(rodc_ctx.krbtgt_name)
+
+ creds = KerberosCredentials()
+ creds.set_domain(self.env_get_var('DOMAIN', 'RODC_KRBTGT'))
+ creds.set_realm(self.env_get_var('REALM', 'RODC_KRBTGT'))
+ creds.set_username(username)
+
+ kvno = int(res[0]['msDS-KeyVersionNumber'][0])
+ krbtgt_number = int(res[0]['msDS-SecondaryKrbTgtNumber'][0])
+
+ rodc_kvno = krbtgt_number << 16 | kvno
+ creds.set_kvno(rodc_kvno)
+ creds.set_dn(dn)
+
+ keys = self.get_keys(creds)
+ self.creds_set_keys(creds, keys)
+
+ if self.get_domain_functional_level() >= DS_DOMAIN_FUNCTION_2008:
+ extra_bits = (security.KERB_ENCTYPE_AES128_CTS_HMAC_SHA1_96 |
+ security.KERB_ENCTYPE_AES256_CTS_HMAC_SHA1_96)
+ else:
+ extra_bits = 0
+ remove_bits = (security.KERB_ENCTYPE_AES256_CTS_HMAC_SHA1_96_SK |
+ security.KERB_ENCTYPE_RC4_HMAC_MD5)
+ self.creds_set_enctypes(creds,
+ extra_bits=extra_bits,
+ remove_bits=remove_bits)
+
+ return creds
+
+ c = self._get_krb5_creds(prefix='MOCK_RODC_KRBTGT',
+ allow_missing_password=True,
+ allow_missing_keys=not require_keys,
+ require_strongest_key=require_strongest_key,
+ fallback_creds_fn=create_rodc_krbtgt_account)
+ return c
+
+ def get_krbtgt_creds(self,
+ require_keys=True,
+ require_strongest_key=False):
+ if require_strongest_key:
+ self.assertTrue(require_keys)
+
+ def download_krbtgt_creds():
+ samdb = self.get_samdb()
+
+ krbtgt_rid = security.DOMAIN_RID_KRBTGT
+ krbtgt_sid = '%s-%d' % (samdb.get_domain_sid(), krbtgt_rid)
+
+ res = samdb.search(base='<SID=%s>' % krbtgt_sid,
+ scope=ldb.SCOPE_BASE,
+ attrs=['sAMAccountName',
+ 'msDS-KeyVersionNumber'])
+ dn = res[0].dn
+ username = str(res[0]['sAMAccountName'])
+
+ creds = KerberosCredentials()
+ creds.set_domain(self.env_get_var('DOMAIN', 'KRBTGT'))
+ creds.set_realm(self.env_get_var('REALM', 'KRBTGT'))
+ creds.set_username(username)
+
+ kvno = int(res[0]['msDS-KeyVersionNumber'][0])
+ creds.set_kvno(kvno)
+ creds.set_dn(dn)
+
+ keys = self.get_keys(creds)
+ self.creds_set_keys(creds, keys)
+
+ # The krbtgt account should support the default enctypes, although
+ # it might not (on Samba) have the msDS-SupportedEncryptionTypes
+ # attribute.
+ self.creds_set_default_enctypes(
+ creds,
+ fast_support=self.kdc_fast_support,
+ claims_support=self.kdc_claims_support,
+ compound_id_support=self.kdc_compound_id_support)
+
+ return creds
+
+ c = self._get_krb5_creds(prefix='KRBTGT',
+ default_username='krbtgt',
+ allow_missing_password=True,
+ allow_missing_keys=not require_keys,
+ require_strongest_key=require_strongest_key,
+ fallback_creds_fn=download_krbtgt_creds)
+ return c
+
+ def get_dc_creds(self,
+ require_keys=True,
+ require_strongest_key=False):
+ if require_strongest_key:
+ self.assertTrue(require_keys)
+
+ def download_dc_creds():
+ samdb = self.get_samdb()
+
+ dc_rid = 1000
+ dc_sid = '%s-%d' % (samdb.get_domain_sid(), dc_rid)
+
+ res = samdb.search(base='<SID=%s>' % dc_sid,
+ scope=ldb.SCOPE_BASE,
+ attrs=['sAMAccountName',
+ 'msDS-KeyVersionNumber'])
+ dn = res[0].dn
+ username = str(res[0]['sAMAccountName'])
+
+ creds = KerberosCredentials()
+ creds.set_domain(self.env_get_var('DOMAIN', 'DC'))
+ creds.set_realm(self.env_get_var('REALM', 'DC'))
+ creds.set_username(username)
+
+ kvno = int(res[0]['msDS-KeyVersionNumber'][0])
+ creds.set_kvno(kvno)
+ creds.set_workstation(username[:-1])
+ creds.set_dn(dn)
+
+ keys = self.get_keys(creds)
+ self.creds_set_keys(creds, keys)
+
+ if self.get_domain_functional_level() >= DS_DOMAIN_FUNCTION_2008:
+ extra_bits = (security.KERB_ENCTYPE_AES128_CTS_HMAC_SHA1_96 |
+ security.KERB_ENCTYPE_AES256_CTS_HMAC_SHA1_96)
+ else:
+ extra_bits = 0
+ remove_bits = security.KERB_ENCTYPE_AES256_CTS_HMAC_SHA1_96_SK
+ self.creds_set_enctypes(creds,
+ extra_bits=extra_bits,
+ remove_bits=remove_bits)
+
+ return creds
+
+ c = self._get_krb5_creds(prefix='DC',
+ allow_missing_password=True,
+ allow_missing_keys=not require_keys,
+ require_strongest_key=require_strongest_key,
+ fallback_creds_fn=download_dc_creds)
+ return c
+
+ def get_server_creds(self,
+ require_keys=True,
+ require_strongest_key=False):
+ if require_strongest_key:
+ self.assertTrue(require_keys)
+
+ def download_server_creds():
+ samdb = self.get_samdb()
+
+ res = samdb.search(base=samdb.get_default_basedn(),
+ expression=(f'(|(sAMAccountName={self.host}*)'
+ f'(dNSHostName={self.host}))'),
+ scope=ldb.SCOPE_SUBTREE,
+ attrs=['sAMAccountName',
+ 'msDS-KeyVersionNumber'])
+ self.assertEqual(1, len(res))
+ dn = res[0].dn
+ username = str(res[0]['sAMAccountName'])
+
+ creds = KerberosCredentials()
+ creds.set_domain(self.env_get_var('DOMAIN', 'SERVER'))
+ creds.set_realm(self.env_get_var('REALM', 'SERVER'))
+ creds.set_username(username)
+
+ kvno = int(res[0]['msDS-KeyVersionNumber'][0])
+ creds.set_kvno(kvno)
+ creds.set_dn(dn)
+
+ keys = self.get_keys(creds)
+ self.creds_set_keys(creds, keys)
+
+ if self.get_domain_functional_level() >= DS_DOMAIN_FUNCTION_2008:
+ extra_bits = (security.KERB_ENCTYPE_AES128_CTS_HMAC_SHA1_96 |
+ security.KERB_ENCTYPE_AES256_CTS_HMAC_SHA1_96)
+ else:
+ extra_bits = 0
+ remove_bits = security.KERB_ENCTYPE_AES256_CTS_HMAC_SHA1_96_SK
+ self.creds_set_enctypes(creds,
+ extra_bits=extra_bits,
+ remove_bits=remove_bits)
+
+ return creds
+
+ c = self._get_krb5_creds(prefix='SERVER',
+ allow_missing_password=True,
+ allow_missing_keys=not require_keys,
+ require_strongest_key=require_strongest_key,
+ fallback_creds_fn=download_server_creds)
+ return c
+
+ # Get the credentials and server principal name of either the krbtgt, or a
+ # specially created account, with resource SID compression either supported
+ # or unsupported.
+ def get_target(self,
+ to_krbtgt, *,
+ compound_id=None,
+ compression=None,
+ extra_enctypes=0):
+ if to_krbtgt:
+ self.assertIsNone(compound_id,
+ "it's no good specifying compound id support "
+ "for the krbtgt")
+ self.assertIsNone(compression,
+ "it's no good specifying compression support "
+ "for the krbtgt")
+ self.assertFalse(extra_enctypes,
+ "it's no good specifying extra enctypes "
+ "for the krbtgt")
+ creds = self.get_krbtgt_creds()
+ sname = self.get_krbtgt_sname()
+ else:
+ creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={
+ 'supported_enctypes':
+ security.KERB_ENCTYPE_RC4_HMAC_MD5 |
+ security.KERB_ENCTYPE_AES256_CTS_HMAC_SHA1_96 |
+ extra_enctypes,
+ 'compound_id_support': compound_id,
+ 'sid_compression_support': compression,
+ })
+ target_name = creds.get_username()
+
+ if target_name[-1] == '$':
+ target_name = target_name[:-1]
+ sname = self.PrincipalName_create(
+ name_type=NT_PRINCIPAL,
+ names=['host', target_name])
+
+ return creds, sname
+
+ def as_req(self, cname, sname, realm, etypes, padata=None, kdc_options=0):
+ """Send a Kerberos AS_REQ, returns the undecoded response
+ """
+
+ till = self.get_KerberosTime(offset=36000)
+
+ req = self.AS_REQ_create(padata=padata,
+ kdc_options=str(kdc_options),
+ cname=cname,
+ realm=realm,
+ sname=sname,
+ from_time=None,
+ till_time=till,
+ renew_time=None,
+ nonce=0x7fffffff,
+ etypes=etypes,
+ addresses=None,
+ additional_tickets=None)
+ rep = self.send_recv_transaction(req)
+ return rep
+
+ def get_as_rep_key(self, creds, rep):
+ """Extract the session key from an AS-REP
+ """
+ rep_padata = self.der_decode(
+ rep['e-data'],
+ asn1Spec=krb5_asn1.METHOD_DATA())
+
+ for pa in rep_padata:
+ if pa['padata-type'] == PADATA_ETYPE_INFO2:
+ padata_value = pa['padata-value']
+ break
+ else:
+ self.fail('expected to find ETYPE-INFO2')
+
+ etype_info2 = self.der_decode(
+ padata_value, asn1Spec=krb5_asn1.ETYPE_INFO2())
+
+ key = self.PasswordKey_from_etype_info2(creds, etype_info2[0],
+ creds.get_kvno())
+ return key
+
+ def get_enc_timestamp_pa_data(self, creds, rep, skew=0):
+ """generate the pa_data data element for an AS-REQ
+ """
+
+ key = self.get_as_rep_key(creds, rep)
+
+ return self.get_enc_timestamp_pa_data_from_key(key, skew=skew)
+
+ def get_enc_timestamp_pa_data_from_key(self, key, skew=0):
+ (patime, pausec) = self.get_KerberosTimeWithUsec(offset=skew)
+ padata = self.PA_ENC_TS_ENC_create(patime, pausec)
+ padata = self.der_encode(padata, asn1Spec=krb5_asn1.PA_ENC_TS_ENC())
+
+ padata = self.EncryptedData_create(key, KU_PA_ENC_TIMESTAMP, padata)
+ padata = self.der_encode(padata, asn1Spec=krb5_asn1.EncryptedData())
+
+ padata = self.PA_DATA_create(PADATA_ENC_TIMESTAMP, padata)
+
+ return padata
+
+ def get_challenge_pa_data(self, client_challenge_key, skew=0):
+ patime, pausec = self.get_KerberosTimeWithUsec(offset=skew)
+ padata = self.PA_ENC_TS_ENC_create(patime, pausec)
+ padata = self.der_encode(padata,
+ asn1Spec=krb5_asn1.PA_ENC_TS_ENC())
+
+ padata = self.EncryptedData_create(client_challenge_key,
+ KU_ENC_CHALLENGE_CLIENT,
+ padata)
+ padata = self.der_encode(padata,
+ asn1Spec=krb5_asn1.EncryptedData())
+
+ padata = self.PA_DATA_create(PADATA_ENCRYPTED_CHALLENGE,
+ padata)
+
+ return padata
+
+ def get_as_rep_enc_data(self, key, rep):
+ """ Decrypt and Decode the encrypted data in an AS-REP
+ """
+ enc_part = key.decrypt(KU_AS_REP_ENC_PART, rep['enc-part']['cipher'])
+ # MIT KDC encodes both EncASRepPart and EncTGSRepPart with
+ # application tag 26
+ try:
+ enc_part = self.der_decode(
+ enc_part, asn1Spec=krb5_asn1.EncASRepPart())
+ except Exception:
+ enc_part = self.der_decode(
+ enc_part, asn1Spec=krb5_asn1.EncTGSRepPart())
+
+ return enc_part
+
+ def check_pre_authentication(self, rep):
+ """ Check that the kdc response was pre-authentication required
+ """
+ self.check_error_rep(rep, KDC_ERR_PREAUTH_REQUIRED)
+
+ def check_as_reply(self, rep):
+ """ Check that the kdc response is an AS-REP and that the
+ values for:
+ msg-type
+ pvno
+ tkt-pvno
+ kvno
+ match the expected values
+ """
+ self.check_reply(rep, msg_type=KRB_AS_REP)
+
+ def check_tgs_reply(self, rep):
+ """ Check that the kdc response is an TGS-REP and that the
+ values for:
+ msg-type
+ pvno
+ tkt-pvno
+ kvno
+ match the expected values
+ """
+ self.check_reply(rep, msg_type=KRB_TGS_REP)
+
+ def check_reply(self, rep, msg_type):
+
+ # Should have a reply, and it should an TGS-REP message.
+ self.assertIsNotNone(rep)
+ self.assertEqual(rep['msg-type'], msg_type, "rep = {%s}" % rep)
+
+ # Protocol version number should be 5
+ pvno = int(rep['pvno'])
+ self.assertEqual(5, pvno, "rep = {%s}" % rep)
+
+ # The ticket version number should be 5
+ tkt_vno = int(rep['ticket']['tkt-vno'])
+ self.assertEqual(5, tkt_vno, "rep = {%s}" % rep)
+
+ # Check that the kvno is not an RODC kvno
+ # MIT kerberos does not provide the kvno, so we treat it as optional.
+ # This is tested in compatability_test.py
+ if 'kvno' in rep['enc-part']:
+ kvno = int(rep['enc-part']['kvno'])
+ # If the high order bits are set this is an RODC kvno.
+ self.assertEqual(0, kvno & 0xFFFF0000, "rep = {%s}" % rep)
+
+ def check_error_rep(self, rep, expected):
+ """ Check that the reply is an error message, with the expected
+ error-code specified.
+ """
+ self.assertIsNotNone(rep)
+ self.assertEqual(rep['msg-type'], KRB_ERROR, "rep = {%s}" % rep)
+ if isinstance(expected, collections.abc.Container):
+ self.assertIn(rep['error-code'], expected, "rep = {%s}" % rep)
+ else:
+ self.assertEqual(rep['error-code'], expected, "rep = {%s}" % rep)
+
+ def tgs_req(self, cname, sname, realm, ticket, key, etypes,
+ expected_error_mode=0, padata=None, kdc_options=0,
+ to_rodc=False, creds=None, service_creds=None, expect_pac=True,
+ expect_edata=None, expected_flags=None, unexpected_flags=None):
+ """Send a TGS-REQ, returns the response and the decrypted and
+ decoded enc-part
+ """
+
+ subkey = self.RandomKey(key.etype)
+
+ (ctime, cusec) = self.get_KerberosTimeWithUsec()
+
+ tgt = KerberosTicketCreds(ticket,
+ key,
+ crealm=realm,
+ cname=cname)
+
+ if service_creds is not None:
+ decryption_key = self.TicketDecryptionKey_from_creds(
+ service_creds)
+ expected_supported_etypes = service_creds.tgs_supported_enctypes
+ else:
+ decryption_key = None
+ expected_supported_etypes = None
+
+ if not expected_error_mode:
+ check_error_fn = None
+ check_rep_fn = self.generic_check_kdc_rep
+ else:
+ check_error_fn = self.generic_check_kdc_error
+ check_rep_fn = None
+
+ def generate_padata(_kdc_exchange_dict,
+ _callback_dict,
+ req_body):
+
+ return padata, req_body
+
+ kdc_exchange_dict = self.tgs_exchange_dict(
+ creds=creds,
+ expected_crealm=realm,
+ expected_cname=cname,
+ expected_srealm=realm,
+ expected_sname=sname,
+ expected_error_mode=expected_error_mode,
+ expected_flags=expected_flags,
+ unexpected_flags=unexpected_flags,
+ expected_supported_etypes=expected_supported_etypes,
+ check_error_fn=check_error_fn,
+ check_rep_fn=check_rep_fn,
+ check_kdc_private_fn=self.generic_check_kdc_private,
+ ticket_decryption_key=decryption_key,
+ generate_padata_fn=generate_padata if padata is not None else None,
+ tgt=tgt,
+ authenticator_subkey=subkey,
+ kdc_options=str(kdc_options),
+ expect_edata=expect_edata,
+ expect_pac=expect_pac,
+ to_rodc=to_rodc)
+
+ rep = self._generic_kdc_exchange(kdc_exchange_dict,
+ cname=None,
+ realm=realm,
+ sname=sname,
+ etypes=etypes)
+
+ if expected_error_mode:
+ enc_part = None
+ else:
+ ticket_creds = kdc_exchange_dict['rep_ticket_creds']
+ enc_part = ticket_creds.encpart_private
+
+ return rep, enc_part
+
+ def get_service_ticket(self, tgt, target_creds, service='host',
+ sname=None,
+ target_name=None, till=None, rc4_support=True,
+ to_rodc=False, kdc_options=None,
+ expected_flags=None, unexpected_flags=None,
+ expected_groups=None,
+ unexpected_groups=None,
+ expect_client_claims=None,
+ expect_device_claims=None,
+ expected_client_claims=None,
+ unexpected_client_claims=None,
+ expected_device_claims=None,
+ unexpected_device_claims=None,
+ pac_request=True, expect_pac=True,
+ expect_requester_sid=None,
+ expect_pac_attrs=None,
+ expect_pac_attrs_pac_request=None,
+ fresh=False):
+ user_name = tgt.cname['name-string'][0]
+ ticket_sname = tgt.sname
+ if target_name is None:
+ target_name = target_creds.get_username()[:-1]
+ else:
+ self.assertIsNone(sname, 'supplied both target name and sname')
+ cache_key = (user_name, target_name, service, to_rodc, kdc_options,
+ pac_request, str(expected_flags), str(unexpected_flags),
+ till, rc4_support,
+ str(ticket_sname),
+ str(sname),
+ str(expected_groups),
+ str(unexpected_groups),
+ expect_client_claims, expect_device_claims,
+ str(expected_client_claims),
+ str(unexpected_client_claims),
+ str(expected_device_claims),
+ str(unexpected_device_claims),
+ expect_pac,
+ expect_requester_sid,
+ expect_pac_attrs,
+ expect_pac_attrs_pac_request)
+
+ if not fresh:
+ ticket = self.tkt_cache.get(cache_key)
+
+ if ticket is not None:
+ return ticket
+
+ etype = (AES256_CTS_HMAC_SHA1_96, ARCFOUR_HMAC_MD5)
+
+ if kdc_options is None:
+ kdc_options = '0'
+ kdc_options = str(krb5_asn1.KDCOptions(kdc_options))
+
+ if sname is None:
+ sname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=[service, target_name])
+
+ srealm = target_creds.get_realm()
+
+ authenticator_subkey = self.RandomKey(kcrypto.Enctype.AES256)
+
+ decryption_key = self.TicketDecryptionKey_from_creds(target_creds)
+
+ kdc_exchange_dict = self.tgs_exchange_dict(
+ expected_crealm=tgt.crealm,
+ expected_cname=tgt.cname,
+ expected_srealm=srealm,
+ expected_sname=sname,
+ expected_supported_etypes=target_creds.tgs_supported_enctypes,
+ expected_flags=expected_flags,
+ unexpected_flags=unexpected_flags,
+ expected_groups=expected_groups,
+ unexpected_groups=unexpected_groups,
+ expect_client_claims=expect_client_claims,
+ expect_device_claims=expect_device_claims,
+ expected_client_claims=expected_client_claims,
+ unexpected_client_claims=unexpected_client_claims,
+ expected_device_claims=expected_device_claims,
+ unexpected_device_claims=unexpected_device_claims,
+ ticket_decryption_key=decryption_key,
+ check_rep_fn=self.generic_check_kdc_rep,
+ check_kdc_private_fn=self.generic_check_kdc_private,
+ tgt=tgt,
+ authenticator_subkey=authenticator_subkey,
+ kdc_options=kdc_options,
+ pac_request=pac_request,
+ expect_pac=expect_pac,
+ expect_requester_sid=expect_requester_sid,
+ expect_pac_attrs=expect_pac_attrs,
+ expect_pac_attrs_pac_request=expect_pac_attrs_pac_request,
+ rc4_support=rc4_support,
+ to_rodc=to_rodc)
+
+ rep = self._generic_kdc_exchange(kdc_exchange_dict,
+ cname=None,
+ realm=srealm,
+ sname=sname,
+ till_time=till,
+ etypes=etype)
+ self.check_tgs_reply(rep)
+
+ service_ticket_creds = kdc_exchange_dict['rep_ticket_creds']
+
+ if to_rodc:
+ krbtgt_creds = self.get_rodc_krbtgt_creds()
+ else:
+ krbtgt_creds = self.get_krbtgt_creds()
+ krbtgt_key = self.TicketDecryptionKey_from_creds(krbtgt_creds)
+
+ is_tgs_princ = self.is_tgs_principal(sname)
+ expect_ticket_checksum = (self.tkt_sig_support
+ and not is_tgs_princ)
+ expect_full_checksum = (self.full_sig_support
+ and not is_tgs_princ)
+ self.verify_ticket(service_ticket_creds, krbtgt_key,
+ service_ticket=True, expect_pac=expect_pac,
+ expect_ticket_checksum=expect_ticket_checksum,
+ expect_full_checksum=expect_full_checksum)
+
+ self.tkt_cache[cache_key] = service_ticket_creds
+
+ return service_ticket_creds
+
+ def get_tgt(self, creds, to_rodc=False, kdc_options=None,
+ client_account=None, client_name_type=NT_PRINCIPAL,
+ target_creds=None, ticket_etype=None,
+ expected_flags=None, unexpected_flags=None,
+ expected_account_name=None, expected_upn_name=None,
+ expected_cname=None,
+ expected_sid=None,
+ sname=None, realm=None,
+ expected_groups=None,
+ unexpected_groups=None,
+ pac_request=True, expect_pac=True,
+ expect_pac_attrs=None, expect_pac_attrs_pac_request=None,
+ pac_options=None,
+ expect_requester_sid=None,
+ rc4_support=True,
+ expect_edata=None,
+ expect_client_claims=None, expect_device_claims=None,
+ expected_client_claims=None, unexpected_client_claims=None,
+ expected_device_claims=None, unexpected_device_claims=None,
+ fresh=False):
+ if client_account is not None:
+ user_name = client_account
+ else:
+ user_name = creds.get_username()
+
+ cache_key = (user_name, to_rodc, kdc_options, pac_request, pac_options,
+ client_name_type,
+ ticket_etype,
+ str(expected_flags), str(unexpected_flags),
+ expected_account_name, expected_upn_name, expected_sid,
+ str(sname), str(realm),
+ str(expected_groups),
+ str(unexpected_groups),
+ str(expected_cname),
+ rc4_support,
+ expect_pac, expect_pac_attrs,
+ expect_pac_attrs_pac_request, expect_requester_sid,
+ expect_client_claims, expect_device_claims,
+ str(expected_client_claims),
+ str(unexpected_client_claims),
+ str(expected_device_claims),
+ str(unexpected_device_claims))
+
+ if not fresh:
+ tgt = self.tkt_cache.get(cache_key)
+
+ if tgt is not None:
+ return tgt
+
+ if realm is None:
+ realm = creds.get_realm()
+
+ salt = creds.get_salt()
+
+ etype = self.get_default_enctypes(creds)
+ cname = self.PrincipalName_create(name_type=client_name_type,
+ names=user_name.split('/'))
+ if sname is None:
+ sname = self.PrincipalName_create(name_type=NT_SRV_INST,
+ names=['krbtgt', realm])
+ expected_sname = self.PrincipalName_create(
+ name_type=NT_SRV_INST, names=['krbtgt', realm.upper()])
+ else:
+ expected_sname = sname
+
+ if expected_cname is None:
+ expected_cname = cname
+
+ till = self.get_KerberosTime(offset=36000)
+
+ if target_creds is not None:
+ krbtgt_creds = target_creds
+ elif to_rodc:
+ krbtgt_creds = self.get_rodc_krbtgt_creds()
+ else:
+ krbtgt_creds = self.get_krbtgt_creds()
+ ticket_decryption_key = (
+ self.TicketDecryptionKey_from_creds(krbtgt_creds,
+ etype=ticket_etype))
+
+ expected_etypes = krbtgt_creds.tgs_supported_enctypes
+
+ if kdc_options is None:
+ kdc_options = ('forwardable,'
+ 'renewable,'
+ 'canonicalize,'
+ 'renewable-ok')
+ kdc_options = krb5_asn1.KDCOptions(kdc_options)
+
+ if pac_options is None:
+ pac_options = '1' # supports claims
+
+ rep, kdc_exchange_dict = self._test_as_exchange(
+ creds=creds,
+ cname=cname,
+ realm=realm,
+ sname=sname,
+ till=till,
+ expected_error_mode=KDC_ERR_PREAUTH_REQUIRED,
+ expected_crealm=realm,
+ expected_cname=expected_cname,
+ expected_srealm=realm,
+ expected_sname=sname,
+ expected_account_name=expected_account_name,
+ expected_upn_name=expected_upn_name,
+ expected_sid=expected_sid,
+ expected_groups=expected_groups,
+ unexpected_groups=unexpected_groups,
+ expected_salt=salt,
+ expected_flags=expected_flags,
+ unexpected_flags=unexpected_flags,
+ expected_supported_etypes=expected_etypes,
+ etypes=etype,
+ padata=None,
+ kdc_options=kdc_options,
+ preauth_key=None,
+ ticket_decryption_key=ticket_decryption_key,
+ pac_request=pac_request,
+ pac_options=pac_options,
+ expect_pac=expect_pac,
+ expect_pac_attrs=expect_pac_attrs,
+ expect_pac_attrs_pac_request=expect_pac_attrs_pac_request,
+ expect_requester_sid=expect_requester_sid,
+ rc4_support=rc4_support,
+ expect_client_claims=expect_client_claims,
+ expect_device_claims=expect_device_claims,
+ expected_client_claims=expected_client_claims,
+ unexpected_client_claims=unexpected_client_claims,
+ expected_device_claims=expected_device_claims,
+ unexpected_device_claims=unexpected_device_claims,
+ to_rodc=to_rodc)
+ self.check_pre_authentication(rep)
+
+ etype_info2 = kdc_exchange_dict['preauth_etype_info2']
+
+ preauth_key = self.PasswordKey_from_etype_info2(creds,
+ etype_info2[0],
+ creds.get_kvno())
+
+ ts_enc_padata = self.get_enc_timestamp_pa_data_from_key(preauth_key)
+
+ padata = [ts_enc_padata]
+
+ expected_realm = realm.upper()
+
+ rep, kdc_exchange_dict = self._test_as_exchange(
+ creds=creds,
+ cname=cname,
+ realm=realm,
+ sname=sname,
+ till=till,
+ expected_error_mode=0,
+ expected_crealm=expected_realm,
+ expected_cname=expected_cname,
+ expected_srealm=expected_realm,
+ expected_sname=expected_sname,
+ expected_account_name=expected_account_name,
+ expected_upn_name=expected_upn_name,
+ expected_sid=expected_sid,
+ expected_groups=expected_groups,
+ unexpected_groups=unexpected_groups,
+ expected_salt=salt,
+ expected_flags=expected_flags,
+ unexpected_flags=unexpected_flags,
+ expected_supported_etypes=expected_etypes,
+ etypes=etype,
+ padata=padata,
+ kdc_options=kdc_options,
+ preauth_key=preauth_key,
+ ticket_decryption_key=ticket_decryption_key,
+ pac_request=pac_request,
+ pac_options=pac_options,
+ expect_pac=expect_pac,
+ expect_pac_attrs=expect_pac_attrs,
+ expect_pac_attrs_pac_request=expect_pac_attrs_pac_request,
+ expect_requester_sid=expect_requester_sid,
+ rc4_support=rc4_support,
+ expect_client_claims=expect_client_claims,
+ expect_device_claims=expect_device_claims,
+ expected_client_claims=expected_client_claims,
+ unexpected_client_claims=unexpected_client_claims,
+ expected_device_claims=expected_device_claims,
+ unexpected_device_claims=unexpected_device_claims,
+ to_rodc=to_rodc)
+ self.check_as_reply(rep)
+
+ ticket_creds = kdc_exchange_dict['rep_ticket_creds']
+
+ self.tkt_cache[cache_key] = ticket_creds
+
+ return ticket_creds
+
+ def _make_tgs_request(self, client_creds, service_creds, tgt,
+ client_account=None,
+ client_name_type=NT_PRINCIPAL,
+ kdc_options=None,
+ pac_request=None, expect_pac=True,
+ expect_error=False,
+ expected_cname=None,
+ expected_account_name=None,
+ expected_upn_name=None,
+ expected_sid=None):
+ if client_account is None:
+ client_account = client_creds.get_username()
+ cname = self.PrincipalName_create(name_type=client_name_type,
+ names=client_account.split('/'))
+
+ service_account = service_creds.get_username()
+ sname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=[service_account])
+
+ realm = service_creds.get_realm()
+
+ expected_crealm = realm
+ if expected_cname is None:
+ expected_cname = cname
+ expected_srealm = realm
+ expected_sname = sname
+
+ expected_supported_etypes = service_creds.tgs_supported_enctypes
+
+ etypes = (AES256_CTS_HMAC_SHA1_96, ARCFOUR_HMAC_MD5)
+
+ if kdc_options is None:
+ kdc_options = 'canonicalize'
+ kdc_options = str(krb5_asn1.KDCOptions(kdc_options))
+
+ target_decryption_key = self.TicketDecryptionKey_from_creds(
+ service_creds)
+
+ authenticator_subkey = self.RandomKey(kcrypto.Enctype.AES256)
+
+ if expect_error:
+ expected_error_mode = expect_error
+ if expected_error_mode is True:
+ expected_error_mode = KDC_ERR_TGT_REVOKED
+ check_error_fn = self.generic_check_kdc_error
+ check_rep_fn = None
+ else:
+ expected_error_mode = 0
+ check_error_fn = None
+ check_rep_fn = self.generic_check_kdc_rep
+
+ kdc_exchange_dict = self.tgs_exchange_dict(
+ expected_crealm=expected_crealm,
+ expected_cname=expected_cname,
+ expected_srealm=expected_srealm,
+ expected_sname=expected_sname,
+ expected_account_name=expected_account_name,
+ expected_upn_name=expected_upn_name,
+ expected_sid=expected_sid,
+ expected_supported_etypes=expected_supported_etypes,
+ ticket_decryption_key=target_decryption_key,
+ check_error_fn=check_error_fn,
+ check_rep_fn=check_rep_fn,
+ check_kdc_private_fn=self.generic_check_kdc_private,
+ expected_error_mode=expected_error_mode,
+ tgt=tgt,
+ authenticator_subkey=authenticator_subkey,
+ kdc_options=kdc_options,
+ pac_request=pac_request,
+ expect_pac=expect_pac,
+ expect_edata=False)
+
+ rep = self._generic_kdc_exchange(kdc_exchange_dict,
+ cname=cname,
+ realm=realm,
+ sname=sname,
+ etypes=etypes)
+ if expect_error:
+ self.check_error_rep(rep, expected_error_mode)
+
+ return None
+ else:
+ self.check_reply(rep, KRB_TGS_REP)
+
+ return kdc_exchange_dict['rep_ticket_creds']
+
+ # Named tuple to contain values of interest when the PAC is decoded.
+ PacData = namedtuple(
+ "PacData",
+ "account_name account_sid logon_name upn domain_name")
+
+ def get_pac_data(self, authorization_data):
+ """Decode the PAC element contained in the authorization-data element
+ """
+ account_name = None
+ user_sid = None
+ logon_name = None
+ upn = None
+ domain_name = None
+
+ # The PAC data will be wrapped in an AD_IF_RELEVANT element
+ ad_if_relevant_elements = (
+ x for x in authorization_data if x['ad-type'] == AD_IF_RELEVANT)
+ for dt in ad_if_relevant_elements:
+ buf = self.der_decode(
+ dt['ad-data'], asn1Spec=krb5_asn1.AD_IF_RELEVANT())
+ # The PAC data is further wrapped in a AD_WIN2K_PAC element
+ for ad in (x for x in buf if x['ad-type'] == AD_WIN2K_PAC):
+ pb = ndr_unpack(krb5pac.PAC_DATA, ad['ad-data'])
+ for pac in pb.buffers:
+ if pac.type == krb5pac.PAC_TYPE_LOGON_INFO:
+ account_name = (
+ pac.info.info.info3.base.account_name)
+ user_sid = (
+ str(pac.info.info.info3.base.domain_sid)
+ + "-" + str(pac.info.info.info3.base.rid))
+ elif pac.type == krb5pac.PAC_TYPE_LOGON_NAME:
+ logon_name = pac.info.account_name
+ elif pac.type == krb5pac.PAC_TYPE_UPN_DNS_INFO:
+ upn = pac.info.upn_name
+ domain_name = pac.info.dns_domain_name
+
+ return self.PacData(
+ account_name,
+ user_sid,
+ logon_name,
+ upn,
+ domain_name)
+
+ def decode_service_ticket(self, creds, ticket):
+ """Decrypt and decode a service ticket
+ """
+
+ enc_part = ticket['enc-part']
+
+ key = self.TicketDecryptionKey_from_creds(creds,
+ enc_part['etype'])
+
+ if key.kvno is not None:
+ self.assertElementKVNO(enc_part, 'kvno', key.kvno)
+
+ enc_part = key.decrypt(KU_TICKET, enc_part['cipher'])
+ enc_ticket_part = self.der_decode(
+ enc_part, asn1Spec=krb5_asn1.EncTicketPart())
+ return enc_ticket_part
+
+ def modify_ticket_flag(self, enc_part, flag, value):
+ self.assertIsInstance(value, bool)
+
+ flag = krb5_asn1.TicketFlags(flag)
+ pos = len(tuple(flag)) - 1
+
+ flags = enc_part['flags']
+ self.assertLessEqual(pos, len(flags))
+
+ new_flags = flags[:pos] + str(int(value)) + flags[pos + 1:]
+ enc_part['flags'] = new_flags
+
+ return enc_part
+
+ def get_objectSid(self, samdb, dn):
+ """ Get the objectSID for a DN
+ Note: performs an Ldb query.
+ """
+ res = samdb.search(dn, scope=SCOPE_BASE, attrs=["objectSID"])
+ self.assertTrue(len(res) == 1, "did not get objectSid for %s" % dn)
+ sid = samdb.schema_format_value("objectSID", res[0]["objectSID"][0])
+ return sid.decode('utf8')
+
+ def add_attribute(self, samdb, dn_str, name, value):
+ if isinstance(value, list):
+ values = value
+ else:
+ values = [value]
+ flag = ldb.FLAG_MOD_ADD
+
+ dn = ldb.Dn(samdb, dn_str)
+ msg = ldb.Message(dn)
+ msg[name] = ldb.MessageElement(values, flag, name)
+ samdb.modify(msg)
+
+ def modify_attribute(self, samdb, dn_str, name, value):
+ if isinstance(value, list):
+ values = value
+ else:
+ values = [value]
+ flag = ldb.FLAG_MOD_REPLACE
+
+ dn = ldb.Dn(samdb, dn_str)
+ msg = ldb.Message(dn)
+ msg[name] = ldb.MessageElement(values, flag, name)
+ samdb.modify(msg)
+
+ def remove_attribute(self, samdb, dn_str, name):
+ flag = ldb.FLAG_MOD_DELETE
+
+ dn = ldb.Dn(samdb, dn_str)
+ msg = ldb.Message(dn)
+ msg[name] = ldb.MessageElement([], flag, name)
+ samdb.modify(msg)
+
+ def create_ccache(self, cname, ticket, enc_part):
+ """ Lay out a version 4 on-disk credentials cache, to be read using the
+ FILE: protocol.
+ """
+
+ field = krb5ccache.DELTATIME_TAG()
+ field.kdc_sec_offset = 0
+ field.kdc_usec_offset = 0
+
+ v4tag = krb5ccache.V4TAG()
+ v4tag.tag = 1
+ v4tag.field = field
+
+ v4tags = krb5ccache.V4TAGS()
+ v4tags.tag = v4tag
+ v4tags.further_tags = b''
+
+ optional_header = krb5ccache.V4HEADER()
+ optional_header.v4tags = v4tags
+
+ cname_string = cname['name-string']
+
+ cprincipal = krb5ccache.PRINCIPAL()
+ cprincipal.name_type = cname['name-type']
+ cprincipal.component_count = len(cname_string)
+ cprincipal.realm = ticket['realm']
+ cprincipal.components = cname_string
+
+ sname = ticket['sname']
+ sname_string = sname['name-string']
+
+ sprincipal = krb5ccache.PRINCIPAL()
+ sprincipal.name_type = sname['name-type']
+ sprincipal.component_count = len(sname_string)
+ sprincipal.realm = ticket['realm']
+ sprincipal.components = sname_string
+
+ key = self.EncryptionKey_import(enc_part['key'])
+
+ key_data = key.export_obj()
+ keyblock = krb5ccache.KEYBLOCK()
+ keyblock.enctype = key_data['keytype']
+ keyblock.data = key_data['keyvalue']
+
+ addresses = krb5ccache.ADDRESSES()
+ addresses.count = 0
+ addresses.data = []
+
+ authdata = krb5ccache.AUTHDATA()
+ authdata.count = 0
+ authdata.data = []
+
+ # Re-encode the ticket, since it was decoded by another layer.
+ ticket_data = self.der_encode(ticket, asn1Spec=krb5_asn1.Ticket())
+
+ authtime = enc_part['authtime']
+ starttime = enc_part.get('starttime', authtime)
+ endtime = enc_part['endtime']
+
+ cred = krb5ccache.CREDENTIAL()
+ cred.client = cprincipal
+ cred.server = sprincipal
+ cred.keyblock = keyblock
+ cred.authtime = self.get_EpochFromKerberosTime(authtime)
+ cred.starttime = self.get_EpochFromKerberosTime(starttime)
+ cred.endtime = self.get_EpochFromKerberosTime(endtime)
+
+ # Account for clock skew of up to five minutes.
+ self.assertLess(cred.authtime - 5 * 60,
+ datetime.now(timezone.utc).timestamp(),
+ "Ticket not yet valid - clocks may be out of sync.")
+ self.assertLess(cred.starttime - 5 * 60,
+ datetime.now(timezone.utc).timestamp(),
+ "Ticket not yet valid - clocks may be out of sync.")
+ self.assertGreater(cred.endtime - 60 * 60,
+ datetime.now(timezone.utc).timestamp(),
+ "Ticket already expired/about to expire - "
+ "clocks may be out of sync.")
+
+ cred.renew_till = cred.endtime
+ cred.is_skey = 0
+ cred.ticket_flags = int(enc_part['flags'], 2)
+ cred.addresses = addresses
+ cred.authdata = authdata
+ cred.ticket = ticket_data
+ cred.second_ticket = b''
+
+ ccache = krb5ccache.CCACHE()
+ ccache.pvno = 5
+ ccache.version = 4
+ ccache.optional_header = optional_header
+ ccache.principal = cprincipal
+ ccache.cred = cred
+
+ # Serialise the credentials cache structure.
+ result = ndr_pack(ccache)
+
+ # Create a temporary file and write the credentials.
+ cachefile = tempfile.NamedTemporaryFile(dir=self.tempdir, delete=False)
+ cachefile.write(result)
+ cachefile.close()
+
+ return cachefile
+
+ def create_ccache_with_ticket(self, user_credentials, ticket, pac=True):
+ # Place the ticket into a newly created credentials cache file.
+
+ user_name = user_credentials.get_username()
+ realm = user_credentials.get_realm()
+
+ cname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=[user_name])
+
+ if not pac:
+ ticket = self.modified_ticket(ticket, exclude_pac=True)
+
+ # Write the ticket into a credentials cache file that can be ingested
+ # by the main credentials code.
+ cachefile = self.create_ccache(cname, ticket.ticket,
+ ticket.encpart_private)
+
+ # Create a credentials object to reference the credentials cache.
+ creds = Credentials()
+ creds.set_kerberos_state(MUST_USE_KERBEROS)
+ creds.set_username(user_name, SPECIFIED)
+ creds.set_realm(realm)
+ creds.set_named_ccache(cachefile.name, SPECIFIED, self.get_lp())
+
+ # Return the credentials along with the cache file.
+ return (creds, cachefile)
+
+ def create_ccache_with_user(self, user_credentials, mach_credentials,
+ service="host", target_name=None, pac=True):
+ # Obtain a service ticket authorising the user and place it into a
+ # newly created credentials cache file.
+
+ tgt = self.get_tgt(user_credentials)
+
+ ticket = self.get_service_ticket(tgt, mach_credentials,
+ service=service,
+ target_name=target_name)
+
+ return self.create_ccache_with_ticket(user_credentials, ticket,
+ pac=pac)
+
+ # Test credentials by connecting to the DC through LDAP.
+ def _connect(self, creds, simple_bind, expect_error=None):
+ samdb = self.get_samdb()
+ dn = creds.get_dn()
+
+ if simple_bind:
+ url = f'ldaps://{samdb.host_dns_name()}'
+ creds.set_bind_dn(str(dn))
+ else:
+ url = f'ldap://{samdb.host_dns_name()}'
+ creds.set_bind_dn(None)
+ try:
+ ldap = SamDB(url=url,
+ credentials=creds,
+ lp=self.get_lp())
+ except ldb.LdbError as err:
+ self.assertIsNotNone(expect_error, 'got unexpected error')
+ num, estr = err.args
+ if num != ldb.ERR_INVALID_CREDENTIALS:
+ raise
+
+ self.assertIn(expect_error, estr)
+
+ return
+ else:
+ self.assertIsNone(expect_error, 'expected to get an error')
+
+ res = ldap.search('',
+ scope=ldb.SCOPE_BASE,
+ attrs=['tokenGroups'])
+ self.assertEqual(1, len(res))
+
+ sid = creds.get_sid()
+
+ token_groups = res[0].get('tokenGroups', idx=0)
+ token_sid = ndr_unpack(security.dom_sid, token_groups)
+
+ self.assertEqual(sid, str(token_sid))
+
+ # Test the two SAMR password change methods implemented in Samba. If the
+ # user is protected, we should get an ACCOUNT_RESTRICTION error indicating
+ # that the password change is not allowed.
+ def _test_samr_change_password(self, creds, expect_error,
+ connect_error=None):
+ samdb = self.get_samdb()
+ server_name = samdb.host_dns_name()
+ try:
+ conn = samr.samr(f'ncacn_np:{server_name}[seal,smb2]',
+ self.get_lp(),
+ creds)
+ except NTSTATUSError as err:
+ self.assertIsNotNone(connect_error,
+ 'connection unexpectedly failed')
+ self.assertIsNone(expect_error, 'don’t specify both errors')
+
+ num, _ = err.args
+ self.assertEqual(num, connect_error)
+
+ return
+ else:
+ self.assertIsNone(connect_error, 'expected connection to fail')
+
+ # Get the NT hash.
+ nt_hash = creds.get_nt_hash()
+
+ # Generate a new UTF-16 password.
+ new_password_str = generate_random_password(32, 32)
+ new_password = new_password_str.encode('utf-16le')
+
+ # Generate the MD4 hash of the password.
+ new_password_md4 = md4_hash_blob(new_password)
+
+ # Prefix the password with padding so it is 512 bytes long.
+ new_password_len = len(new_password)
+ remaining_len = 512 - new_password_len
+ new_password = bytes(remaining_len) + new_password
+
+ # Append the 32-bit length of the password.
+ new_password += int.to_bytes(new_password_len,
+ length=4,
+ byteorder='little')
+
+ # Create a key from the MD4 hash of the new password.
+ key = new_password_md4[:14]
+
+ # Encrypt the old NT hash with DES to obtain the verifier.
+ verifier = des_crypt_blob_16(nt_hash, key)
+
+ server = lsa.String()
+ server.string = server_name
+
+ account = lsa.String()
+ account.string = creds.get_username()
+
+ nt_verifier = samr.Password()
+ nt_verifier.hash = list(verifier)
+
+ nt_password = samr.CryptPassword()
+ nt_password.data = list(arcfour_encrypt(nt_hash, new_password))
+
+ if not self.expect_nt_hash:
+ expect_error = ntstatus.NT_STATUS_NTLM_BLOCKED
+
+ try:
+ conn.ChangePasswordUser2(server=server,
+ account=account,
+ nt_password=nt_password,
+ nt_verifier=nt_verifier,
+ lm_change=False,
+ lm_password=None,
+ lm_verifier=None)
+ except NTSTATUSError as err:
+ num, _ = err.args
+ self.assertIsNotNone(expect_error,
+ f'unexpectedly failed with {num:08X}')
+ self.assertEqual(num, expect_error)
+ else:
+ self.assertIsNone(expect_error, 'expected to fail')
+
+ creds.set_password(new_password_str)
+
+ # Get the NT hash.
+ nt_hash = creds.get_nt_hash()
+
+ # Generate a new UTF-16 password.
+ new_password = generate_random_password(32, 32)
+ new_password = new_password.encode('utf-16le')
+
+ # Generate the MD4 hash of the password.
+ new_password_md4 = md4_hash_blob(new_password)
+
+ # Prefix the password with padding so it is 512 bytes long.
+ new_password_len = len(new_password)
+ remaining_len = 512 - new_password_len
+ new_password = bytes(remaining_len) + new_password
+
+ # Append the 32-bit length of the password.
+ new_password += int.to_bytes(new_password_len,
+ length=4,
+ byteorder='little')
+
+ # Create a key from the MD4 hash of the new password.
+ key = new_password_md4[:14]
+
+ # Encrypt the old NT hash with DES to obtain the verifier.
+ verifier = des_crypt_blob_16(nt_hash, key)
+
+ nt_verifier.hash = list(verifier)
+
+ nt_password.data = list(arcfour_encrypt(nt_hash, new_password))
+
+ try:
+ conn.ChangePasswordUser3(server=server,
+ account=account,
+ nt_password=nt_password,
+ nt_verifier=nt_verifier,
+ lm_change=False,
+ lm_password=None,
+ lm_verifier=None,
+ password3=None)
+ except NTSTATUSError as err:
+ self.assertIsNotNone(expect_error, 'unexpectedly failed')
+
+ num, _ = err.args
+ self.assertEqual(num, expect_error)
+ else:
+ self.assertIsNone(expect_error, 'expected to fail')
+
+ # Test SamLogon. Authentication should succeed for non-protected accounts,
+ # and fail for protected accounts.
+ def _test_samlogon(self, creds, logon_type, expect_error=None,
+ validation_level=netlogon.NetlogonValidationSamInfo2,
+ domain_joined_mach_creds=None):
+ samdb = self.get_samdb()
+
+ if domain_joined_mach_creds is None:
+ domain_joined_mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'secure_channel_type': misc.SEC_CHAN_WKSTA})
+
+ dc_server = samdb.host_dns_name()
+ username, domain = creds.get_ntlm_username_domain()
+ workstation = domain_joined_mach_creds.get_username()
+
+ # Calling this initializes netlogon_creds on mach_creds, as is required
+ # before calling mach_creds.encrypt_samr_password().
+ conn = netlogon.netlogon(f'ncacn_ip_tcp:{dc_server}[schannel,seal]',
+ self.get_lp(),
+ domain_joined_mach_creds)
+
+ if logon_type == netlogon.NetlogonInteractiveInformation:
+ logon = netlogon.netr_PasswordInfo()
+
+ lm_pass = samr.Password()
+ lm_pass.hash = [0] * 16
+
+ nt_pass = samr.Password()
+ nt_pass.hash = list(creds.get_nt_hash())
+ domain_joined_mach_creds.encrypt_samr_password(nt_pass)
+
+ logon.lmpassword = lm_pass
+ logon.ntpassword = nt_pass
+
+ elif logon_type == netlogon.NetlogonNetworkInformation:
+ computername = ntlmssp.AV_PAIR()
+ computername.AvId = ntlmssp.MsvAvNbComputerName
+ computername.Value = workstation
+
+ domainname = ntlmssp.AV_PAIR()
+ domainname.AvId = ntlmssp.MsvAvNbDomainName
+ domainname.Value = domain
+
+ eol = ntlmssp.AV_PAIR()
+ eol.AvId = ntlmssp.MsvAvEOL
+
+ target_info = ntlmssp.AV_PAIR_LIST()
+ target_info.count = 3
+ target_info.pair = [domainname, computername, eol]
+
+ target_info_blob = ndr_pack(target_info)
+
+ challenge = b'abcdefgh'
+ response = creds.get_ntlm_response(flags=0,
+ challenge=challenge,
+ target_info=target_info_blob)
+
+ logon = netlogon.netr_NetworkInfo()
+
+ logon.challenge = list(challenge)
+ logon.nt = netlogon.netr_ChallengeResponse()
+ logon.nt.length = len(response['nt_response'])
+ logon.nt.data = list(response['nt_response'])
+
+ else:
+ self.fail(f'unknown logon type {logon_type}')
+
+ identity_info = netlogon.netr_IdentityInfo()
+ identity_info.domain_name.string = domain
+ identity_info.account_name.string = username
+ identity_info.parameter_control = (
+ netlogon.MSV1_0_ALLOW_SERVER_TRUST_ACCOUNT) | (
+ netlogon.MSV1_0_ALLOW_WORKSTATION_TRUST_ACCOUNT)
+ identity_info.workstation.string = workstation
+
+ logon.identity_info = identity_info
+
+ netr_flags = 0
+
+ validation = None
+
+ if not expect_error and not self.expect_nt_hash:
+ expect_error = ntstatus.NT_STATUS_NTLM_BLOCKED
+
+ try:
+ (validation, authoritative, flags) = (
+ conn.netr_LogonSamLogonEx(dc_server,
+ domain_joined_mach_creds.get_workstation(),
+ logon_type,
+ logon,
+ validation_level,
+ netr_flags))
+ except NTSTATUSError as err:
+ status, _ = err.args
+ self.assertIsNotNone(expect_error,
+ f'unexpectedly failed with {status:08X}')
+ self.assertEqual(expect_error, status, 'got wrong status code')
+ else:
+ self.assertIsNone(expect_error, 'expected error')
+
+ self.assertEqual(1, authoritative)
+ self.assertEqual(0, flags)
+
+ return validation
diff --git a/python/samba/tests/krb5/kdc_tests.py b/python/samba/tests/krb5/kdc_tests.py
new file mode 100755
index 0000000..b4be6f8
--- /dev/null
+++ b/python/samba/tests/krb5/kdc_tests.py
@@ -0,0 +1,228 @@
+#!/usr/bin/env python3
+# Unix SMB/CIFS implementation.
+# Copyright (C) Stefan Metzmacher 2020
+# Copyright (C) 2020 Catalyst.Net Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+import os
+
+sys.path.insert(0, "bin/python")
+os.environ["PYTHONUNBUFFERED"] = "1"
+
+from samba.tests.krb5.raw_testcase import RawKerberosTest
+import samba.tests.krb5.rfc4120_pyasn1 as krb5_asn1
+from samba.tests.krb5.rfc4120_constants import (
+ AES256_CTS_HMAC_SHA1_96,
+ ARCFOUR_HMAC_MD5,
+ KDC_ERR_PREAUTH_FAILED,
+ KDC_ERR_PREAUTH_REQUIRED,
+ KDC_ERR_SKEW,
+ KRB_AS_REP,
+ KRB_ERROR,
+ KU_PA_ENC_TIMESTAMP,
+ PADATA_ENC_TIMESTAMP,
+ PADATA_ETYPE_INFO2,
+ NT_PRINCIPAL,
+ NT_SRV_INST,
+)
+
+global_asn1_print = False
+global_hexdump = False
+
+
+class KdcTests(RawKerberosTest):
+ """ Port of the tests in source4/torture/krb5/kdc-heimdal.c
+ To python.
+ """
+
+ def setUp(self):
+ super().setUp()
+ self.do_asn1_print = global_asn1_print
+ self.do_hexdump = global_hexdump
+
+ def as_req(self, creds, etypes, padata=None):
+ user = creds.get_username()
+ realm = creds.get_realm()
+
+ cname = self.PrincipalName_create(
+ name_type=NT_PRINCIPAL,
+ names=[user])
+ sname = self.PrincipalName_create(
+ name_type=NT_SRV_INST,
+ names=["krbtgt", realm])
+ till = self.get_KerberosTime(offset=36000)
+
+ kdc_options = 0
+
+ req = self.AS_REQ_create(padata=padata,
+ kdc_options=str(kdc_options),
+ cname=cname,
+ realm=realm,
+ sname=sname,
+ from_time=None,
+ till_time=till,
+ renew_time=None,
+ nonce=0x7fffffff,
+ etypes=etypes,
+ addresses=None,
+ additional_tickets=None)
+ rep = self.send_recv_transaction(req)
+ return rep
+
+ def get_enc_timestamp_pa_data(self, creds, rep, skew=0):
+ rep_padata = self.der_decode(
+ rep['e-data'],
+ asn1Spec=krb5_asn1.METHOD_DATA())
+
+ for pa in rep_padata:
+ if pa['padata-type'] == PADATA_ETYPE_INFO2:
+ etype_info2 = pa['padata-value']
+ break
+
+ etype_info2 = self.der_decode(
+ etype_info2, asn1Spec=krb5_asn1.ETYPE_INFO2())
+
+ key = self.PasswordKey_from_etype_info2(creds, etype_info2[0])
+
+ (patime, pausec) = self.get_KerberosTimeWithUsec(offset=skew)
+ pa_ts = self.PA_ENC_TS_ENC_create(patime, pausec)
+ pa_ts = self.der_encode(pa_ts, asn1Spec=krb5_asn1.PA_ENC_TS_ENC())
+
+ pa_ts = self.EncryptedData_create(key, KU_PA_ENC_TIMESTAMP, pa_ts)
+ pa_ts = self.der_encode(pa_ts, asn1Spec=krb5_asn1.EncryptedData())
+
+ pa_ts = self.PA_DATA_create(PADATA_ENC_TIMESTAMP, pa_ts)
+
+ return pa_ts
+
+ def check_pre_authenication(self, rep):
+ """ Check that the kdc response was pre-authentication required
+ """
+ self.check_error_rep(rep, KDC_ERR_PREAUTH_REQUIRED)
+
+ def check_as_reply(self, rep):
+ """ Check that the kdc response is an AS-REP and that the
+ values for:
+ msg-type
+ pvno
+ tkt-pvno
+ kvno
+ match the expected values
+ """
+
+ # Should have a reply, and it should an AS-REP message.
+ self.assertIsNotNone(rep)
+ self.assertEqual(rep['msg-type'], KRB_AS_REP)
+
+ # Protocol version number should be 5
+ pvno = int(rep['pvno'])
+ self.assertEqual(5, pvno)
+
+ # The ticket version number should be 5
+ tkt_vno = int(rep['ticket']['tkt-vno'])
+ self.assertEqual(5, tkt_vno)
+
+ # Check that the kvno is not an RODC kvno
+ # MIT kerberos does not provide the kvno, so we treat it as optional.
+ # This is tested in compatability_test.py
+ if 'kvno' in rep['enc-part']:
+ kvno = int(rep['enc-part']['kvno'])
+ # If the high order bits are set this is an RODC kvno.
+ self.assertEqual(0, kvno & 0xFFFF0000)
+
+ def check_error_rep(self, rep, expected):
+ """ Check that the reply is an error message, with the expected
+ error-code specified.
+ """
+ self.assertIsNotNone(rep)
+ self.assertEqual(rep['msg-type'], KRB_ERROR)
+ self.assertEqual(rep['error-code'], expected)
+
+ def test_aes256_cts_hmac_sha1_96(self):
+ creds = self.get_user_creds()
+ etype = (AES256_CTS_HMAC_SHA1_96,)
+
+ rep = self.as_req(creds, etype)
+ self.check_pre_authenication(rep)
+
+ padata = self.get_enc_timestamp_pa_data(creds, rep)
+ rep = self.as_req(creds, etype, padata=[padata])
+ self.check_as_reply(rep)
+
+ etype = rep['enc-part']['etype']
+ self.assertEqual(AES256_CTS_HMAC_SHA1_96, etype)
+
+ def test_arc4_hmac_md5(self):
+ creds = self.get_user_creds()
+ etype = (ARCFOUR_HMAC_MD5,)
+
+ rep = self.as_req(creds, etype)
+ self.check_pre_authenication(rep)
+
+ padata = self.get_enc_timestamp_pa_data(creds, rep)
+ rep = self.as_req(creds, etype, padata=[padata])
+ self.check_as_reply(rep)
+
+ etype = rep['enc-part']['etype']
+ self.assertEqual(ARCFOUR_HMAC_MD5, etype)
+
+ def test_aes_rc4(self):
+ creds = self.get_user_creds()
+ etype = (AES256_CTS_HMAC_SHA1_96, ARCFOUR_HMAC_MD5)
+
+ rep = self.as_req(creds, etype)
+ self.check_pre_authenication(rep)
+
+ padata = self.get_enc_timestamp_pa_data(creds, rep)
+ rep = self.as_req(creds, etype, padata=[padata])
+ self.check_as_reply(rep)
+
+ etype = rep['enc-part']['etype']
+ self.assertEqual(AES256_CTS_HMAC_SHA1_96, etype)
+
+ def test_clock_skew(self):
+ creds = self.get_user_creds()
+ etype = (AES256_CTS_HMAC_SHA1_96, ARCFOUR_HMAC_MD5)
+
+ rep = self.as_req(creds, etype)
+ self.check_pre_authenication(rep)
+
+ padata = self.get_enc_timestamp_pa_data(creds, rep, skew=3600)
+ rep = self.as_req(creds, etype, padata=[padata])
+
+ self.check_error_rep(rep, KDC_ERR_SKEW)
+
+ def test_invalid_password(self):
+ creds = self.insta_creds(template=self.get_user_creds())
+ creds.set_password("Not the correct password")
+
+ etype = (AES256_CTS_HMAC_SHA1_96,)
+
+ rep = self.as_req(creds, etype)
+ self.check_pre_authenication(rep)
+
+ padata = self.get_enc_timestamp_pa_data(creds, rep)
+ rep = self.as_req(creds, etype, padata=[padata])
+
+ self.check_error_rep(rep, KDC_ERR_PREAUTH_FAILED)
+
+
+if __name__ == "__main__":
+ global_asn1_print = False
+ global_hexdump = False
+ import unittest
+ unittest.main()
diff --git a/python/samba/tests/krb5/kdc_tgs_tests.py b/python/samba/tests/krb5/kdc_tgs_tests.py
new file mode 100755
index 0000000..58ed49d
--- /dev/null
+++ b/python/samba/tests/krb5/kdc_tgs_tests.py
@@ -0,0 +1,3506 @@
+#!/usr/bin/env python3
+# Unix SMB/CIFS implementation.
+# Copyright (C) Stefan Metzmacher 2020
+# Copyright (C) 2020 Catalyst.Net Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+import os
+
+sys.path.insert(0, "bin/python")
+os.environ["PYTHONUNBUFFERED"] = "1"
+
+from functools import partial
+
+import ldb
+
+from samba import dsdb, ntstatus
+
+from samba.dcerpc import krb5pac, security
+
+
+import samba.tests.krb5.kcrypto as kcrypto
+from samba.tests.krb5.kdc_base_test import KDCBaseTest
+from samba.tests.krb5.raw_testcase import Krb5EncryptionKey
+from samba.tests.krb5.rfc4120_constants import (
+ AES256_CTS_HMAC_SHA1_96,
+ ARCFOUR_HMAC_MD5,
+ FX_FAST_ARMOR_AP_REQUEST,
+ KRB_ERROR,
+ KDC_ERR_BADKEYVER,
+ KDC_ERR_BADMATCH,
+ KDC_ERR_ETYPE_NOSUPP,
+ KDC_ERR_GENERIC,
+ KDC_ERR_MODIFIED,
+ KDC_ERR_NOT_US,
+ KDC_ERR_POLICY,
+ KDC_ERR_PREAUTH_REQUIRED,
+ KDC_ERR_C_PRINCIPAL_UNKNOWN,
+ KDC_ERR_S_PRINCIPAL_UNKNOWN,
+ KDC_ERR_SERVER_NOMATCH,
+ KDC_ERR_TKT_EXPIRED,
+ KDC_ERR_TGT_REVOKED,
+ KRB_ERR_TKT_NYV,
+ KDC_ERR_WRONG_REALM,
+ NT_ENTERPRISE_PRINCIPAL,
+ NT_PRINCIPAL,
+ NT_SRV_INST,
+)
+import samba.tests.krb5.rfc4120_pyasn1 as krb5_asn1
+
+global_asn1_print = False
+global_hexdump = False
+
+
+class KdcTgsBaseTests(KDCBaseTest):
+ def _as_req(self,
+ creds,
+ expected_error,
+ target_creds,
+ etype,
+ expected_ticket_etype=None):
+ user_name = creds.get_username()
+ cname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=user_name.split('/'))
+
+ target_name = target_creds.get_username()
+ sname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=['host', target_name[:-1]])
+
+ if expected_error:
+ expected_sname = sname
+ else:
+ expected_sname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=[target_name])
+
+ realm = creds.get_realm()
+ salt = creds.get_salt()
+
+ till = self.get_KerberosTime(offset=36000)
+
+ ticket_decryption_key = (
+ self.TicketDecryptionKey_from_creds(target_creds,
+ etype=expected_ticket_etype))
+ expected_etypes = target_creds.tgs_supported_enctypes
+
+ kdc_options = ('forwardable,'
+ 'renewable,'
+ 'canonicalize,'
+ 'renewable-ok')
+ kdc_options = krb5_asn1.KDCOptions(kdc_options)
+
+ if expected_error:
+ initial_error = (KDC_ERR_PREAUTH_REQUIRED, expected_error)
+ else:
+ initial_error = KDC_ERR_PREAUTH_REQUIRED
+
+ rep, kdc_exchange_dict = self._test_as_exchange(
+ creds=creds,
+ cname=cname,
+ realm=realm,
+ sname=sname,
+ till=till,
+ expected_error_mode=initial_error,
+ expected_crealm=realm,
+ expected_cname=cname,
+ expected_srealm=realm,
+ expected_sname=sname,
+ expected_salt=salt,
+ expected_supported_etypes=expected_etypes,
+ etypes=etype,
+ padata=None,
+ kdc_options=kdc_options,
+ preauth_key=None,
+ ticket_decryption_key=ticket_decryption_key)
+ self.assertIsNotNone(rep)
+ self.assertEqual(KRB_ERROR, rep['msg-type'])
+ error_code = rep['error-code']
+ if expected_error:
+ self.assertIn(error_code, initial_error)
+ if error_code == expected_error:
+ return
+ else:
+ self.assertEqual(initial_error, error_code)
+
+ etype_info2 = kdc_exchange_dict['preauth_etype_info2']
+
+ preauth_key = self.PasswordKey_from_etype_info2(creds,
+ etype_info2[0],
+ creds.get_kvno())
+
+ ts_enc_padata = self.get_enc_timestamp_pa_data_from_key(preauth_key)
+
+ padata = [ts_enc_padata]
+
+ expected_realm = realm.upper()
+
+ rep, kdc_exchange_dict = self._test_as_exchange(
+ creds=creds,
+ cname=cname,
+ realm=realm,
+ sname=sname,
+ till=till,
+ expected_error_mode=expected_error,
+ expected_crealm=expected_realm,
+ expected_cname=cname,
+ expected_srealm=expected_realm,
+ expected_sname=expected_sname,
+ expected_salt=salt,
+ expected_supported_etypes=expected_etypes,
+ etypes=etype,
+ padata=padata,
+ kdc_options=kdc_options,
+ preauth_key=preauth_key,
+ ticket_decryption_key=ticket_decryption_key,
+ expect_edata=False)
+ if expected_error:
+ self.check_error_rep(rep, expected_error)
+ return None
+
+ self.check_as_reply(rep)
+ return kdc_exchange_dict['rep_ticket_creds']
+
+ def _armored_as_req(self,
+ client_creds,
+ target_creds,
+ armor_tgt,
+ *,
+ target_sname=None,
+ expected_error=0,
+ expected_sname=None,
+ expect_edata=None,
+ expect_status=None,
+ expected_status=None,
+ expected_groups=None,
+ expect_device_info=None,
+ expected_device_groups=None,
+ expect_device_claims=None,
+ expected_device_claims=None):
+ client_username = client_creds.get_username()
+ client_realm = client_creds.get_realm()
+ client_cname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=[client_username])
+
+ if target_sname is None:
+ target_name = target_creds.get_username()
+ target_sname = self.PrincipalName_create(
+ name_type=NT_PRINCIPAL, names=[target_name])
+ target_realm = target_creds.get_realm()
+ target_decryption_key = self.TicketDecryptionKey_from_creds(
+ target_creds)
+ target_etypes = target_creds.tgs_supported_enctypes
+
+ authenticator_subkey = self.RandomKey(kcrypto.Enctype.AES256)
+ armor_key = self.generate_armor_key(authenticator_subkey,
+ armor_tgt.session_key)
+
+ preauth_key = self.PasswordKey_from_creds(client_creds,
+ kcrypto.Enctype.AES256)
+
+ client_challenge_key = (
+ self.generate_client_challenge_key(armor_key, preauth_key))
+ fast_padata = [self.get_challenge_pa_data(client_challenge_key)]
+
+ def _generate_fast_padata(kdc_exchange_dict,
+ _callback_dict,
+ req_body):
+ return list(fast_padata), req_body
+
+ etypes = kcrypto.Enctype.AES256, kcrypto.Enctype.RC4
+
+ if expected_error:
+ check_error_fn = self.generic_check_kdc_error
+ check_rep_fn = None
+ else:
+ check_error_fn = None
+ check_rep_fn = self.generic_check_kdc_rep
+
+ pac_options = '1' # claims support
+
+ samdb = self.get_samdb()
+ domain_sid_str = samdb.get_domain_sid()
+
+ if expected_groups is not None:
+ expected_groups = self.map_sids(expected_groups, None, domain_sid_str)
+
+ if expected_device_groups is not None:
+ expected_device_groups = self.map_sids(expected_device_groups, None, domain_sid_str)
+
+ if expected_sname is None:
+ expected_sname = target_sname
+
+ kdc_exchange_dict = self.as_exchange_dict(
+ creds=client_creds,
+ expected_crealm=client_realm,
+ expected_cname=client_cname,
+ expected_srealm=target_realm,
+ expected_sname=expected_sname,
+ expected_supported_etypes=target_etypes,
+ ticket_decryption_key=target_decryption_key,
+ generate_fast_fn=self.generate_simple_fast,
+ generate_fast_armor_fn=self.generate_ap_req,
+ generate_fast_padata_fn=_generate_fast_padata,
+ fast_armor_type=FX_FAST_ARMOR_AP_REQUEST,
+ check_error_fn=check_error_fn,
+ check_rep_fn=check_rep_fn,
+ check_kdc_private_fn=self.generic_check_kdc_private,
+ expected_error_mode=expected_error,
+ expected_salt=client_creds.get_salt(),
+ expect_edata=expect_edata,
+ expect_status=expect_status,
+ expected_status=expected_status,
+ expected_groups=expected_groups,
+ expect_device_info=expect_device_info,
+ expected_device_domain_sid=domain_sid_str,
+ expected_device_groups=expected_device_groups,
+ expect_device_claims=expect_device_claims,
+ expected_device_claims=expected_device_claims,
+ authenticator_subkey=authenticator_subkey,
+ preauth_key=preauth_key,
+ armor_key=armor_key,
+ armor_tgt=armor_tgt,
+ armor_subkey=authenticator_subkey,
+ kdc_options='0',
+ pac_options=pac_options,
+ # PA-DATA types are not important for these tests.
+ check_patypes=False)
+
+ rep = self._generic_kdc_exchange(
+ kdc_exchange_dict,
+ cname=client_cname,
+ realm=client_realm,
+ sname=target_sname,
+ etypes=etypes)
+ if expected_error:
+ self.check_error_rep(rep, expected_error)
+ return None
+ else:
+ self.check_as_reply(rep)
+ return kdc_exchange_dict['rep_ticket_creds']
+
+ def _tgs_req(self, tgt, expected_error, creds, target_creds, *,
+ armor_tgt=None,
+ kdc_options='0',
+ pac_options=None,
+ expected_cname=None,
+ expected_sname=None,
+ expected_account_name=None,
+ expected_flags=None,
+ additional_ticket=None,
+ decryption_key=None,
+ generate_padata_fn=None,
+ generate_fast_padata_fn=None,
+ sname=None,
+ srealm=None,
+ till=None,
+ etypes=None,
+ expected_ticket_etype=None,
+ expected_supported_etypes=None,
+ expect_pac=True,
+ expect_pac_attrs=None,
+ expect_pac_attrs_pac_request=None,
+ expect_requester_sid=None,
+ expect_edata=False,
+ expected_sid=None,
+ expected_groups=None,
+ unexpected_groups=None,
+ expect_device_info=None,
+ expected_device_domain_sid=None,
+ expected_device_groups=None,
+ expect_client_claims=None,
+ expected_client_claims=None,
+ unexpected_client_claims=None,
+ expect_device_claims=None,
+ expected_device_claims=None,
+ expect_status=None,
+ expected_status=None,
+ expected_proxy_target=None,
+ expected_transited_services=None,
+ expected_extra_pac_buffers=None,
+ check_patypes=True):
+ if srealm is False:
+ srealm = None
+ elif srealm is None:
+ srealm = target_creds.get_realm()
+
+ if sname is False:
+ sname = None
+ if expected_sname is None:
+ expected_sname = self.get_krbtgt_sname()
+ else:
+ if sname is None:
+ target_name = target_creds.get_username()
+ if target_name == 'krbtgt':
+ sname = self.PrincipalName_create(
+ name_type=NT_SRV_INST,
+ names=[target_name, srealm])
+ else:
+ if target_name[-1] == '$':
+ target_name = target_name[:-1]
+ sname = self.PrincipalName_create(
+ name_type=NT_PRINCIPAL,
+ names=['host', target_name])
+
+ if expected_sname is None:
+ expected_sname = sname
+
+ if additional_ticket is not None:
+ additional_tickets = [additional_ticket.ticket]
+ if decryption_key is None:
+ decryption_key = additional_ticket.session_key
+ else:
+ additional_tickets = None
+ if decryption_key is None:
+ decryption_key = self.TicketDecryptionKey_from_creds(
+ target_creds, etype=expected_ticket_etype)
+
+ subkey = self.RandomKey(tgt.session_key.etype)
+
+ if armor_tgt is not None:
+ armor_subkey = self.RandomKey(subkey.etype)
+ explicit_armor_key = self.generate_armor_key(armor_subkey,
+ armor_tgt.session_key)
+ armor_key = kcrypto.cf2(explicit_armor_key.key,
+ subkey.key,
+ b'explicitarmor',
+ b'tgsarmor')
+ armor_key = Krb5EncryptionKey(armor_key, None)
+
+ generate_fast_fn = self.generate_simple_fast
+ generate_fast_armor_fn = self.generate_ap_req
+
+ if pac_options is None:
+ pac_options = '1' # claims support
+ else:
+ armor_subkey = None
+ armor_key = None
+ generate_fast_fn = None
+ generate_fast_armor_fn = None
+
+ if etypes is None:
+ etypes = (AES256_CTS_HMAC_SHA1_96, ARCFOUR_HMAC_MD5)
+
+ if expected_error:
+ check_error_fn = self.generic_check_kdc_error
+ check_rep_fn = None
+ else:
+ check_error_fn = None
+ check_rep_fn = self.generic_check_kdc_rep
+
+ if expected_cname is None:
+ expected_cname = tgt.cname
+
+ kdc_exchange_dict = self.tgs_exchange_dict(
+ creds=creds,
+ expected_crealm=tgt.crealm,
+ expected_cname=expected_cname,
+ expected_srealm=srealm,
+ expected_sname=expected_sname,
+ expected_account_name=expected_account_name,
+ expected_flags=expected_flags,
+ ticket_decryption_key=decryption_key,
+ generate_padata_fn=generate_padata_fn,
+ generate_fast_padata_fn=generate_fast_padata_fn,
+ generate_fast_fn=generate_fast_fn,
+ generate_fast_armor_fn=generate_fast_armor_fn,
+ check_error_fn=check_error_fn,
+ check_rep_fn=check_rep_fn,
+ check_kdc_private_fn=self.generic_check_kdc_private,
+ expected_error_mode=expected_error,
+ expect_status=expect_status,
+ expected_status=expected_status,
+ tgt=tgt,
+ armor_key=armor_key,
+ armor_tgt=armor_tgt,
+ armor_subkey=armor_subkey,
+ pac_options=pac_options,
+ authenticator_subkey=subkey,
+ kdc_options=kdc_options,
+ expected_supported_etypes=expected_supported_etypes,
+ expect_edata=expect_edata,
+ expect_pac=expect_pac,
+ expect_pac_attrs=expect_pac_attrs,
+ expect_pac_attrs_pac_request=expect_pac_attrs_pac_request,
+ expect_requester_sid=expect_requester_sid,
+ expected_sid=expected_sid,
+ expected_groups=expected_groups,
+ unexpected_groups=unexpected_groups,
+ expect_device_info=expect_device_info,
+ expected_device_domain_sid=expected_device_domain_sid,
+ expected_device_groups=expected_device_groups,
+ expect_client_claims=expect_client_claims,
+ expected_client_claims=expected_client_claims,
+ unexpected_client_claims=unexpected_client_claims,
+ expect_device_claims=expect_device_claims,
+ expected_device_claims=expected_device_claims,
+ expected_proxy_target=expected_proxy_target,
+ expected_transited_services=expected_transited_services,
+ expected_extra_pac_buffers=expected_extra_pac_buffers,
+ check_patypes=check_patypes)
+
+ rep = self._generic_kdc_exchange(kdc_exchange_dict,
+ cname=None,
+ realm=srealm,
+ sname=sname,
+ till_time=till,
+ etypes=etypes,
+ additional_tickets=additional_tickets)
+ if expected_error:
+ self.check_error_rep(rep, expected_error)
+ return None
+ else:
+ self.check_tgs_reply(rep)
+ return kdc_exchange_dict['rep_ticket_creds']
+
+
+class KdcTgsTests(KdcTgsBaseTests):
+
+ def setUp(self):
+ super().setUp()
+ self.do_asn1_print = global_asn1_print
+ self.do_hexdump = global_hexdump
+
+ def test_tgs_req_cname_does_not_not_match_authenticator_cname(self):
+ """ Try and obtain a ticket from the TGS, but supply a cname
+ that differs from that provided to the krbtgt
+ """
+ # Create the user account
+ samdb = self.get_samdb()
+ user_name = "tsttktusr"
+ (uc, _) = self.create_account(samdb, user_name)
+ realm = uc.get_realm().lower()
+
+ # Do the initial AS-REQ, should get a pre-authentication required
+ # response
+ etype = (AES256_CTS_HMAC_SHA1_96,)
+ cname = self.PrincipalName_create(
+ name_type=NT_PRINCIPAL, names=[user_name])
+ sname = self.PrincipalName_create(
+ name_type=NT_SRV_INST, names=["krbtgt", realm])
+
+ rep = self.as_req(cname, sname, realm, etype)
+ self.check_pre_authentication(rep)
+
+ # Do the next AS-REQ
+ padata = self.get_enc_timestamp_pa_data(uc, rep)
+ key = self.get_as_rep_key(uc, rep)
+ rep = self.as_req(cname, sname, realm, etype, padata=[padata])
+ self.check_as_reply(rep)
+
+ # Request a service ticket, but use a cname that does not match
+ # that in the original AS-REQ
+ enc_part2 = self.get_as_rep_enc_data(key, rep)
+ key = self.EncryptionKey_import(enc_part2['key'])
+ ticket = rep['ticket']
+
+ cname = self.PrincipalName_create(
+ name_type=NT_PRINCIPAL,
+ names=["Administrator"])
+ sname = self.PrincipalName_create(
+ name_type=NT_PRINCIPAL,
+ names=["host", samdb.host_dns_name()])
+
+ (rep, enc_part) = self.tgs_req(cname, sname, realm, ticket, key, etype,
+ creds=uc,
+ expected_error_mode=KDC_ERR_BADMATCH,
+ expect_edata=False)
+
+ self.assertIsNone(
+ enc_part,
+ "rep = {%s}, enc_part = {%s}" % (rep, enc_part))
+ self.assertEqual(KRB_ERROR, rep['msg-type'], "rep = {%s}" % rep)
+ self.assertEqual(
+ KDC_ERR_BADMATCH,
+ rep['error-code'],
+ "rep = {%s}" % rep)
+
+ def test_ldap_service_ticket(self):
+ """Get a ticket to the ldap service
+ """
+ # Create the user account
+ samdb = self.get_samdb()
+ user_name = "tsttktusr"
+ (uc, _) = self.create_account(samdb, user_name)
+ realm = uc.get_realm().lower()
+
+ # Do the initial AS-REQ, should get a pre-authentication required
+ # response
+ etype = (AES256_CTS_HMAC_SHA1_96,)
+ cname = self.PrincipalName_create(
+ name_type=NT_PRINCIPAL, names=[user_name])
+ sname = self.PrincipalName_create(
+ name_type=NT_SRV_INST, names=["krbtgt", realm])
+
+ rep = self.as_req(cname, sname, realm, etype)
+ self.check_pre_authentication(rep)
+
+ # Do the next AS-REQ
+ padata = self.get_enc_timestamp_pa_data(uc, rep)
+ key = self.get_as_rep_key(uc, rep)
+ rep = self.as_req(cname, sname, realm, etype, padata=[padata])
+ self.check_as_reply(rep)
+
+ enc_part2 = self.get_as_rep_enc_data(key, rep)
+ key = self.EncryptionKey_import(enc_part2['key'])
+ ticket = rep['ticket']
+
+ # Request a ticket to the ldap service
+ sname = self.PrincipalName_create(
+ name_type=NT_SRV_INST,
+ names=["ldap", samdb.host_dns_name()])
+
+ (rep, _) = self.tgs_req(
+ cname, sname, uc.get_realm(), ticket, key, etype,
+ service_creds=self.get_dc_creds())
+
+ self.check_tgs_reply(rep)
+
+ def test_get_ticket_for_host_service_of_machine_account(self):
+
+ # Create a user and machine account for the test.
+ #
+ samdb = self.get_samdb()
+ user_name = "tsttktusr"
+ (uc, dn) = self.create_account(samdb, user_name)
+ (mc, _) = self.create_account(samdb, "tsttktmac",
+ account_type=self.AccountType.COMPUTER)
+ realm = uc.get_realm().lower()
+
+ # Do the initial AS-REQ, should get a pre-authentication required
+ # response
+ etype = (AES256_CTS_HMAC_SHA1_96, ARCFOUR_HMAC_MD5)
+ cname = self.PrincipalName_create(
+ name_type=NT_PRINCIPAL, names=[user_name])
+ sname = self.PrincipalName_create(
+ name_type=NT_SRV_INST, names=["krbtgt", realm])
+
+ rep = self.as_req(cname, sname, realm, etype)
+ self.check_pre_authentication(rep)
+
+ # Do the next AS-REQ
+ padata = self.get_enc_timestamp_pa_data(uc, rep)
+ key = self.get_as_rep_key(uc, rep)
+ rep = self.as_req(cname, sname, realm, etype, padata=[padata])
+ self.check_as_reply(rep)
+
+ # Request a ticket to the host service on the machine account
+ ticket = rep['ticket']
+ enc_part2 = self.get_as_rep_enc_data(key, rep)
+ key = self.EncryptionKey_import(enc_part2['key'])
+ cname = self.PrincipalName_create(
+ name_type=NT_PRINCIPAL,
+ names=[user_name])
+ sname = self.PrincipalName_create(
+ name_type=NT_PRINCIPAL,
+ names=[mc.get_username()])
+
+ (rep, enc_part) = self.tgs_req(
+ cname, sname, uc.get_realm(), ticket, key, etype,
+ service_creds=mc)
+ self.check_tgs_reply(rep)
+
+ # Check the contents of the service ticket
+ ticket = rep['ticket']
+ enc_part = self.decode_service_ticket(mc, ticket)
+
+ pac_data = self.get_pac_data(enc_part['authorization-data'])
+ sid = uc.get_sid()
+ upn = "%s@%s" % (uc.get_username(), realm)
+ self.assertEqual(
+ uc.get_username(),
+ str(pac_data.account_name),
+ "rep = {%s},%s" % (rep, pac_data))
+ self.assertEqual(
+ uc.get_username(),
+ pac_data.logon_name,
+ "rep = {%s},%s" % (rep, pac_data))
+ self.assertEqual(
+ uc.get_realm(),
+ pac_data.domain_name,
+ "rep = {%s},%s" % (rep, pac_data))
+ self.assertEqual(
+ upn,
+ pac_data.upn,
+ "rep = {%s},%s" % (rep, pac_data))
+ self.assertEqual(
+ sid,
+ pac_data.account_sid,
+ "rep = {%s},%s" % (rep, pac_data))
+
+ def test_request(self):
+ client_creds = self.get_client_creds()
+ service_creds = self.get_service_creds()
+
+ tgt = self.get_tgt(client_creds)
+
+ pac = self.get_ticket_pac(tgt)
+ self.assertIsNotNone(pac)
+
+ ticket = self._make_tgs_request(client_creds, service_creds, tgt)
+
+ pac = self.get_ticket_pac(ticket)
+ self.assertIsNotNone(pac)
+
+ def test_request_no_pac(self):
+ client_creds = self.get_client_creds()
+ service_creds = self.get_service_creds()
+
+ tgt = self.get_tgt(client_creds, pac_request=False)
+
+ pac = self.get_ticket_pac(tgt)
+ self.assertIsNotNone(pac)
+
+ ticket = self._make_tgs_request(client_creds, service_creds, tgt,
+ pac_request=False, expect_pac=False)
+
+ pac = self.get_ticket_pac(ticket, expect_pac=False)
+ self.assertIsNone(pac)
+
+ def test_request_enterprise_canon(self):
+ upn = self.get_new_username()
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER,
+ opts={'upn': upn})
+ service_creds = self.get_service_creds()
+
+ user_name = client_creds.get_username()
+ realm = client_creds.get_realm()
+ client_account = f'{user_name}@{realm}'
+
+ expected_cname = self.PrincipalName_create(
+ name_type=NT_PRINCIPAL,
+ names=[user_name])
+
+ kdc_options = 'canonicalize'
+
+ tgt = self.get_tgt(client_creds,
+ client_account=client_account,
+ client_name_type=NT_ENTERPRISE_PRINCIPAL,
+ expected_cname=expected_cname,
+ expected_account_name=user_name,
+ kdc_options=kdc_options)
+
+ self._make_tgs_request(
+ client_creds, service_creds, tgt,
+ client_account=client_account,
+ client_name_type=NT_ENTERPRISE_PRINCIPAL,
+ expected_cname=expected_cname,
+ expected_account_name=user_name,
+ kdc_options=kdc_options)
+
+ def test_request_enterprise_canon_case(self):
+ upn = self.get_new_username()
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER,
+ opts={'upn': upn})
+ service_creds = self.get_service_creds()
+
+ user_name = client_creds.get_username()
+ realm = client_creds.get_realm().lower()
+ client_account = f'{user_name}@{realm}'
+
+ expected_cname = self.PrincipalName_create(
+ name_type=NT_PRINCIPAL,
+ names=[user_name])
+
+ kdc_options = 'canonicalize'
+
+ tgt = self.get_tgt(client_creds,
+ client_account=client_account,
+ client_name_type=NT_ENTERPRISE_PRINCIPAL,
+ expected_cname=expected_cname,
+ expected_account_name=user_name,
+ kdc_options=kdc_options)
+
+ self._make_tgs_request(
+ client_creds, service_creds, tgt,
+ client_account=client_account,
+ client_name_type=NT_ENTERPRISE_PRINCIPAL,
+ expected_cname=expected_cname,
+ expected_account_name=user_name,
+ kdc_options=kdc_options)
+
+ def test_request_enterprise_canon_mac(self):
+ upn = self.get_new_username()
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'upn': upn})
+ service_creds = self.get_service_creds()
+
+ user_name = client_creds.get_username()
+ realm = client_creds.get_realm()
+ client_account = f'{user_name}@{realm}'
+
+ expected_cname = self.PrincipalName_create(
+ name_type=NT_PRINCIPAL,
+ names=[user_name])
+
+ kdc_options = 'canonicalize'
+
+ tgt = self.get_tgt(client_creds,
+ client_account=client_account,
+ client_name_type=NT_ENTERPRISE_PRINCIPAL,
+ expected_cname=expected_cname,
+ expected_account_name=user_name,
+ kdc_options=kdc_options)
+
+ self._make_tgs_request(
+ client_creds, service_creds, tgt,
+ client_account=client_account,
+ client_name_type=NT_ENTERPRISE_PRINCIPAL,
+ expected_cname=expected_cname,
+ expected_account_name=user_name,
+ kdc_options=kdc_options)
+
+ def test_request_enterprise_canon_case_mac(self):
+ upn = self.get_new_username()
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'upn': upn})
+ service_creds = self.get_service_creds()
+
+ user_name = client_creds.get_username()
+ realm = client_creds.get_realm().lower()
+ client_account = f'{user_name}@{realm}'
+
+ expected_cname = self.PrincipalName_create(
+ name_type=NT_PRINCIPAL,
+ names=[user_name])
+
+ kdc_options = 'canonicalize'
+
+ tgt = self.get_tgt(client_creds,
+ client_account=client_account,
+ client_name_type=NT_ENTERPRISE_PRINCIPAL,
+ expected_cname=expected_cname,
+ expected_account_name=user_name,
+ kdc_options=kdc_options)
+
+ self._make_tgs_request(
+ client_creds, service_creds, tgt,
+ client_account=client_account,
+ client_name_type=NT_ENTERPRISE_PRINCIPAL,
+ expected_cname=expected_cname,
+ expected_account_name=user_name,
+ kdc_options=kdc_options)
+
+ def test_request_enterprise_no_canon(self):
+ upn = self.get_new_username()
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER,
+ opts={'upn': upn})
+ service_creds = self.get_service_creds()
+
+ user_name = client_creds.get_username()
+ realm = client_creds.get_realm()
+ client_account = f'{user_name}@{realm}'
+
+ kdc_options = '0'
+
+ tgt = self.get_tgt(client_creds,
+ client_account=client_account,
+ client_name_type=NT_ENTERPRISE_PRINCIPAL,
+ expected_account_name=user_name,
+ kdc_options=kdc_options)
+
+ self._make_tgs_request(
+ client_creds, service_creds, tgt,
+ client_account=client_account,
+ client_name_type=NT_ENTERPRISE_PRINCIPAL,
+ expected_account_name=user_name,
+ kdc_options=kdc_options)
+
+ def test_request_enterprise_no_canon_case(self):
+ upn = self.get_new_username()
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER,
+ opts={'upn': upn})
+ service_creds = self.get_service_creds()
+
+ user_name = client_creds.get_username()
+ realm = client_creds.get_realm().lower()
+ client_account = f'{user_name}@{realm}'
+
+ kdc_options = '0'
+
+ tgt = self.get_tgt(client_creds,
+ client_account=client_account,
+ client_name_type=NT_ENTERPRISE_PRINCIPAL,
+ expected_account_name=user_name,
+ kdc_options=kdc_options)
+
+ self._make_tgs_request(
+ client_creds, service_creds, tgt,
+ client_account=client_account,
+ client_name_type=NT_ENTERPRISE_PRINCIPAL,
+ expected_account_name=user_name,
+ kdc_options=kdc_options)
+
+ def test_request_enterprise_no_canon_mac(self):
+ upn = self.get_new_username()
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'upn': upn})
+ service_creds = self.get_service_creds()
+
+ user_name = client_creds.get_username()
+ realm = client_creds.get_realm()
+ client_account = f'{user_name}@{realm}'
+
+ kdc_options = '0'
+
+ tgt = self.get_tgt(client_creds,
+ client_account=client_account,
+ client_name_type=NT_ENTERPRISE_PRINCIPAL,
+ expected_account_name=user_name,
+ kdc_options=kdc_options)
+
+ self._make_tgs_request(
+ client_creds, service_creds, tgt,
+ client_account=client_account,
+ client_name_type=NT_ENTERPRISE_PRINCIPAL,
+ expected_account_name=user_name,
+ kdc_options=kdc_options)
+
+ def test_request_enterprise_no_canon_case_mac(self):
+ upn = self.get_new_username()
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'upn': upn})
+ service_creds = self.get_service_creds()
+
+ user_name = client_creds.get_username()
+ realm = client_creds.get_realm().lower()
+ client_account = f'{user_name}@{realm}'
+
+ kdc_options = '0'
+
+ tgt = self.get_tgt(client_creds,
+ client_account=client_account,
+ client_name_type=NT_ENTERPRISE_PRINCIPAL,
+ expected_account_name=user_name,
+ kdc_options=kdc_options)
+
+ self._make_tgs_request(
+ client_creds, service_creds, tgt,
+ client_account=client_account,
+ client_name_type=NT_ENTERPRISE_PRINCIPAL,
+ expected_account_name=user_name,
+ kdc_options=kdc_options)
+
+ def test_client_no_auth_data_required(self):
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER,
+ opts={'no_auth_data_required': True})
+ service_creds = self.get_service_creds()
+
+ tgt = self.get_tgt(client_creds)
+
+ pac = self.get_ticket_pac(tgt)
+ self.assertIsNotNone(pac)
+
+ ticket = self._make_tgs_request(client_creds, service_creds, tgt)
+
+ pac = self.get_ticket_pac(ticket)
+ self.assertIsNotNone(pac)
+
+ def test_no_pac_client_no_auth_data_required(self):
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER,
+ opts={'no_auth_data_required': True})
+ service_creds = self.get_service_creds()
+
+ tgt = self.get_tgt(client_creds)
+
+ pac = self.get_ticket_pac(tgt)
+ self.assertIsNotNone(pac)
+
+ ticket = self._make_tgs_request(client_creds, service_creds, tgt,
+ pac_request=False, expect_pac=True)
+
+ pac = self.get_ticket_pac(ticket)
+ self.assertIsNotNone(pac)
+
+ def test_service_no_auth_data_required(self):
+ client_creds = self.get_client_creds()
+ service_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'no_auth_data_required': True})
+
+ tgt = self.get_tgt(client_creds)
+
+ pac = self.get_ticket_pac(tgt)
+ self.assertIsNotNone(pac)
+
+ ticket = self._make_tgs_request(client_creds, service_creds, tgt,
+ expect_pac=False)
+
+ pac = self.get_ticket_pac(ticket, expect_pac=False)
+ self.assertIsNone(pac)
+
+ def test_no_pac_service_no_auth_data_required(self):
+ client_creds = self.get_client_creds()
+ service_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'no_auth_data_required': True})
+
+ tgt = self.get_tgt(client_creds, pac_request=False)
+
+ pac = self.get_ticket_pac(tgt)
+ self.assertIsNotNone(pac)
+
+ ticket = self._make_tgs_request(client_creds, service_creds, tgt,
+ pac_request=False, expect_pac=False)
+
+ pac = self.get_ticket_pac(ticket, expect_pac=False)
+ self.assertIsNone(pac)
+
+ def test_remove_pac_service_no_auth_data_required(self):
+ client_creds = self.get_client_creds()
+ service_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'no_auth_data_required': True})
+
+ tgt = self.modified_ticket(self.get_tgt(client_creds),
+ exclude_pac=True)
+
+ pac = self.get_ticket_pac(tgt, expect_pac=False)
+ self.assertIsNone(pac)
+
+ self._make_tgs_request(client_creds, service_creds, tgt,
+ expect_error=True)
+
+ def test_remove_pac_client_no_auth_data_required(self):
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER,
+ opts={'no_auth_data_required': True})
+ service_creds = self.get_service_creds()
+
+ tgt = self.modified_ticket(self.get_tgt(client_creds),
+ exclude_pac=True)
+
+ pac = self.get_ticket_pac(tgt, expect_pac=False)
+ self.assertIsNone(pac)
+
+ self._make_tgs_request(client_creds, service_creds, tgt,
+ expect_error=True)
+
+ def test_remove_pac(self):
+ client_creds = self.get_client_creds()
+ service_creds = self.get_service_creds()
+
+ tgt = self.modified_ticket(self.get_tgt(client_creds),
+ exclude_pac=True)
+
+ pac = self.get_ticket_pac(tgt, expect_pac=False)
+ self.assertIsNone(pac)
+
+ self._make_tgs_request(client_creds, service_creds, tgt,
+ expect_error=True)
+
+ def test_upn_dns_info_ex_user(self):
+ client_creds = self.get_client_creds()
+ self._run_upn_dns_info_ex_test(client_creds)
+
+ def test_upn_dns_info_ex_mac(self):
+ mach_creds = self.get_mach_creds()
+ self._run_upn_dns_info_ex_test(mach_creds)
+
+ def test_upn_dns_info_ex_upn_user(self):
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER,
+ opts={'upn': 'upn_dns_info_test_upn0@bar'})
+ self._run_upn_dns_info_ex_test(client_creds)
+
+ def test_upn_dns_info_ex_upn_mac(self):
+ mach_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'upn': 'upn_dns_info_test_upn1@bar'})
+ self._run_upn_dns_info_ex_test(mach_creds)
+
+ def _run_upn_dns_info_ex_test(self, client_creds):
+ service_creds = self.get_service_creds()
+
+ account_name = client_creds.get_username()
+ upn_name = client_creds.get_upn()
+ if upn_name is None:
+ realm = client_creds.get_realm().lower()
+ upn_name = f'{account_name}@{realm}'
+ sid = client_creds.get_sid()
+
+ tgt = self.get_tgt(client_creds,
+ expected_account_name=account_name,
+ expected_upn_name=upn_name,
+ expected_sid=sid)
+
+ self._make_tgs_request(client_creds, service_creds, tgt,
+ expected_account_name=account_name,
+ expected_upn_name=upn_name,
+ expected_sid=sid)
+
+ # Test making a TGS request.
+ def test_tgs_req(self):
+ creds = self._get_creds()
+ tgt = self._get_tgt(creds)
+ self._run_tgs(tgt, creds, expected_error=0)
+
+ def test_renew_req(self):
+ creds = self._get_creds()
+ tgt = self._get_tgt(creds, renewable=True)
+ self._renew_tgt(tgt, creds, expected_error=0,
+ expect_pac_attrs=True,
+ expect_pac_attrs_pac_request=True,
+ expect_requester_sid=True)
+
+ def test_validate_req(self):
+ creds = self._get_creds()
+ tgt = self._get_tgt(creds, invalid=True)
+ self._validate_tgt(tgt, creds, expected_error=0,
+ expect_pac_attrs=True,
+ expect_pac_attrs_pac_request=True,
+ expect_requester_sid=True)
+
+ def test_s4u2self_req(self):
+ creds = self._get_creds()
+ tgt = self._get_tgt(creds)
+ self._s4u2self(tgt, creds, expected_error=0)
+
+ def test_user2user_req(self):
+ creds = self._get_creds()
+ tgt = self._get_tgt(creds)
+ self._user2user(tgt, creds, expected_error=0)
+
+ def test_user2user_user_self_req(self):
+ creds = self._get_user_creds()
+ tgt = self._get_tgt(creds)
+ username = creds.get_username()
+ sname = self.PrincipalName_create(
+ name_type=NT_PRINCIPAL,
+ names=[username])
+ self._user2user(tgt, creds, sname=sname, user_tgt=tgt, user_creds=creds, expected_error=0)
+
+ def test_user2user_computer_self_princ1_req(self):
+ creds = self._get_creds()
+ tgt = self._get_tgt(creds)
+ username = creds.get_username()
+ sname = self.PrincipalName_create(
+ name_type=NT_PRINCIPAL,
+ names=[username])
+ self._user2user(tgt, creds, sname=sname, user_tgt=tgt, user_creds=creds, expected_error=0)
+
+ def test_user2user_computer_self_princ2_req(self):
+ creds = self._get_creds()
+ tgt = self._get_tgt(creds)
+ self._user2user(tgt, creds, user_tgt=tgt, user_creds=creds, expected_error=0)
+
+ def test_fast_req(self):
+ creds = self._get_creds()
+ tgt = self._get_tgt(creds)
+ self._fast(tgt, creds, expected_error=0)
+
+ def test_tgs_req_invalid(self):
+ creds = self._get_creds()
+ tgt = self._get_tgt(creds, invalid=True)
+ self._run_tgs(tgt, creds, expected_error=KRB_ERR_TKT_NYV)
+
+ def test_s4u2self_req_invalid(self):
+ creds = self._get_creds()
+ tgt = self._get_tgt(creds, invalid=True)
+ self._s4u2self(tgt, creds, expected_error=KRB_ERR_TKT_NYV)
+
+ def test_user2user_req_invalid(self):
+ creds = self._get_creds()
+ tgt = self._get_tgt(creds, invalid=True)
+ self._user2user(tgt, creds, expected_error=KRB_ERR_TKT_NYV)
+
+ def test_fast_req_invalid(self):
+ creds = self._get_creds()
+ tgt = self._get_tgt(creds, invalid=True)
+ self._fast(tgt, creds, expected_error=KRB_ERR_TKT_NYV,
+ expected_sname=self.get_krbtgt_sname())
+
+ def test_tgs_req_no_requester_sid(self):
+ creds = self._get_creds()
+ tgt = self._get_tgt(creds, remove_requester_sid=True)
+
+ self._run_tgs(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_tgs_req_no_pac_attrs(self):
+ creds = self._get_creds()
+ tgt = self._get_tgt(creds, remove_pac_attrs=True)
+
+ self._run_tgs(tgt, creds, expected_error=0, expect_pac=True,
+ expect_pac_attrs=False)
+
+ def test_tgs_req_from_rodc_no_requester_sid(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+ tgt = self._get_tgt(creds, from_rodc=True, remove_requester_sid=True)
+
+ self._run_tgs(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_tgs_req_from_rodc_no_pac_attrs(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+ tgt = self._get_tgt(creds, from_rodc=True, remove_pac_attrs=True)
+ self._run_tgs(tgt, creds, expected_error=0, expect_pac=True,
+ expect_pac_attrs=False)
+
+ def test_tgs_req_extra_pac_buffers(self):
+ extra_pac_buffers = [123, 456, 789]
+
+ creds = self._get_creds()
+ tgt = self._get_tgt(creds, extra_pac_buffers=extra_pac_buffers)
+
+ # Expect that the extra PAC buffers are retained in the TGT.
+ self._run_tgs(tgt, creds, expected_error=0,
+ expected_extra_pac_buffers=extra_pac_buffers)
+
+ def test_tgs_req_from_rodc_extra_pac_buffers(self):
+ extra_pac_buffers = [123, 456, 789]
+
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+ tgt = self._get_tgt(creds, from_rodc=True,
+ extra_pac_buffers=extra_pac_buffers)
+
+ # Expect that the extra PAC buffers are removed from the RODC‐issued
+ # TGT.
+ self._run_tgs(tgt, creds, expected_error=0)
+
+ # Test making a request without a PAC.
+ def test_tgs_no_pac(self):
+ creds = self._get_creds()
+ tgt = self._get_tgt(creds, remove_pac=True)
+ self._run_tgs(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_renew_no_pac(self):
+ creds = self._get_creds()
+ tgt = self._get_tgt(creds, renewable=True, remove_pac=True)
+ self._renew_tgt(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_validate_no_pac(self):
+ creds = self._get_creds()
+ tgt = self._get_tgt(creds, invalid=True, remove_pac=True)
+ self._validate_tgt(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_s4u2self_no_pac(self):
+ creds = self._get_creds()
+ tgt = self._get_tgt(creds, remove_pac=True)
+ self._s4u2self(tgt, creds,
+ expected_error=KDC_ERR_TGT_REVOKED,
+ expect_edata=False)
+
+ def test_user2user_no_pac(self):
+ creds = self._get_creds()
+ tgt = self._get_tgt(creds, remove_pac=True)
+ self._user2user(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_fast_no_pac(self):
+ creds = self._get_creds()
+ tgt = self._get_tgt(creds, remove_pac=True)
+ self._fast(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED,
+ expected_sname=self.get_krbtgt_sname())
+
+ def test_fast_as_req_no_pac(self):
+ creds = self._get_creds()
+ tgt = self._get_tgt(creds, remove_pac=True)
+ self._fast_as_req(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED,
+ expected_sname=self.get_krbtgt_sname())
+
+ # Test making a request with authdata and without a PAC.
+ def test_tgs_authdata_no_pac(self):
+ creds = self._get_creds()
+ tgt = self._get_tgt(creds, remove_pac=True, allow_empty_authdata=True)
+ self._run_tgs(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_renew_authdata_no_pac(self):
+ creds = self._get_creds()
+ tgt = self._get_tgt(creds, renewable=True, remove_pac=True,
+ allow_empty_authdata=True)
+ self._renew_tgt(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_validate_authdata_no_pac(self):
+ creds = self._get_creds()
+ tgt = self._get_tgt(creds, invalid=True, remove_pac=True,
+ allow_empty_authdata=True)
+ self._validate_tgt(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_s4u2self_authdata_no_pac(self):
+ creds = self._get_creds()
+ tgt = self._get_tgt(creds, remove_pac=True, allow_empty_authdata=True)
+ self._s4u2self(tgt, creds,
+ expected_error=KDC_ERR_TGT_REVOKED,
+ expect_edata=False)
+
+ def test_user2user_authdata_no_pac(self):
+ creds = self._get_creds()
+ tgt = self._get_tgt(creds, remove_pac=True, allow_empty_authdata=True)
+ self._user2user(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_fast_authdata_no_pac(self):
+ creds = self._get_creds()
+ tgt = self._get_tgt(creds, remove_pac=True, allow_empty_authdata=True)
+ self._fast(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED,
+ expected_sname=self.get_krbtgt_sname())
+
+ def test_fast_as_req_authdata_no_pac(self):
+ creds = self._get_creds()
+ tgt = self._get_tgt(creds, remove_pac=True, allow_empty_authdata=True)
+ self._fast_as_req(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED,
+ expected_sname=self.get_krbtgt_sname())
+
+ # Test changing the SID in the PAC to that of another account.
+ def test_tgs_sid_mismatch_existing(self):
+ creds = self._get_creds()
+ existing_rid = self._get_existing_rid()
+ tgt = self._get_tgt(creds, new_rid=existing_rid)
+ self._run_tgs(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_renew_sid_mismatch_existing(self):
+ creds = self._get_creds()
+ existing_rid = self._get_existing_rid()
+ tgt = self._get_tgt(creds, renewable=True, new_rid=existing_rid)
+ self._renew_tgt(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_validate_sid_mismatch_existing(self):
+ creds = self._get_creds()
+ existing_rid = self._get_existing_rid()
+ tgt = self._get_tgt(creds, invalid=True, new_rid=existing_rid)
+ self._validate_tgt(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_s4u2self_sid_mismatch_existing(self):
+ creds = self._get_creds()
+ existing_rid = self._get_existing_rid()
+ tgt = self._get_tgt(creds, new_rid=existing_rid)
+ self._s4u2self(tgt, creds,
+ expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_user2user_sid_mismatch_existing(self):
+ creds = self._get_creds()
+ existing_rid = self._get_existing_rid()
+ tgt = self._get_tgt(creds, new_rid=existing_rid)
+ self._user2user(tgt, creds,
+ expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_fast_sid_mismatch_existing(self):
+ creds = self._get_creds()
+ existing_rid = self._get_existing_rid()
+ tgt = self._get_tgt(creds, new_rid=existing_rid)
+ self._fast(tgt, creds,
+ expected_error=KDC_ERR_TGT_REVOKED,
+ expected_sname=self.get_krbtgt_sname())
+
+ def test_fast_as_req_sid_mismatch_existing(self):
+ creds = self._get_creds()
+ existing_rid = self._get_existing_rid()
+ tgt = self._get_tgt(creds, new_rid=existing_rid)
+ self._fast_as_req(tgt, creds,
+ expected_error=KDC_ERR_TGT_REVOKED,
+ expected_sname=self.get_krbtgt_sname())
+
+ def test_requester_sid_mismatch_existing(self):
+ creds = self._get_creds()
+ existing_rid = self._get_existing_rid()
+ tgt = self._get_tgt(creds, new_rid=existing_rid,
+ can_modify_logon_info=False)
+ self._run_tgs(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_logon_info_sid_mismatch_existing(self):
+ creds = self._get_creds()
+ existing_rid = self._get_existing_rid()
+ tgt = self._get_tgt(creds, new_rid=existing_rid,
+ can_modify_requester_sid=False)
+ self._run_tgs(tgt, creds, expected_error=0)
+
+ def test_logon_info_only_sid_mismatch_existing(self):
+ creds = self._get_creds()
+ existing_rid = self._get_existing_rid()
+ tgt = self._get_tgt(creds, new_rid=existing_rid,
+ remove_requester_sid=True)
+ self._run_tgs(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ # Test changing the SID in the PAC to a non-existent one.
+ def test_tgs_sid_mismatch_nonexisting(self):
+ creds = self._get_creds()
+ nonexistent_rid = self._get_non_existent_rid()
+ tgt = self._get_tgt(creds, new_rid=nonexistent_rid)
+ self._run_tgs(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_renew_sid_mismatch_nonexisting(self):
+ creds = self._get_creds()
+ nonexistent_rid = self._get_non_existent_rid()
+ tgt = self._get_tgt(creds, renewable=True,
+ new_rid=nonexistent_rid)
+ self._renew_tgt(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_validate_sid_mismatch_nonexisting(self):
+ creds = self._get_creds()
+ nonexistent_rid = self._get_non_existent_rid()
+ tgt = self._get_tgt(creds, invalid=True,
+ new_rid=nonexistent_rid)
+ self._validate_tgt(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_s4u2self_sid_mismatch_nonexisting(self):
+ creds = self._get_creds()
+ nonexistent_rid = self._get_non_existent_rid()
+ tgt = self._get_tgt(creds, new_rid=nonexistent_rid)
+ self._s4u2self(tgt, creds,
+ expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_user2user_sid_mismatch_nonexisting(self):
+ creds = self._get_creds()
+ nonexistent_rid = self._get_non_existent_rid()
+ tgt = self._get_tgt(creds, new_rid=nonexistent_rid)
+ self._user2user(tgt, creds,
+ expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_fast_sid_mismatch_nonexisting(self):
+ creds = self._get_creds()
+ nonexistent_rid = self._get_non_existent_rid()
+ tgt = self._get_tgt(creds, new_rid=nonexistent_rid)
+ self._fast(tgt, creds,
+ expected_error=KDC_ERR_TGT_REVOKED,
+ expected_sname=self.get_krbtgt_sname())
+
+ def test_fast_as_req_sid_mismatch_nonexisting(self):
+ creds = self._get_creds()
+ nonexistent_rid = self._get_non_existent_rid()
+ tgt = self._get_tgt(creds, new_rid=nonexistent_rid)
+ self._fast_as_req(tgt, creds,
+ expected_error=KDC_ERR_TGT_REVOKED,
+ expected_sname=self.get_krbtgt_sname())
+
+ def test_requester_sid_mismatch_nonexisting(self):
+ creds = self._get_creds()
+ nonexistent_rid = self._get_non_existent_rid()
+ tgt = self._get_tgt(creds, new_rid=nonexistent_rid,
+ can_modify_logon_info=False)
+ self._run_tgs(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_logon_info_sid_mismatch_nonexisting(self):
+ creds = self._get_creds()
+ nonexistent_rid = self._get_non_existent_rid()
+ tgt = self._get_tgt(creds, new_rid=nonexistent_rid,
+ can_modify_requester_sid=False)
+ self._run_tgs(tgt, creds, expected_error=0)
+
+ def test_logon_info_only_sid_mismatch_nonexisting(self):
+ creds = self._get_creds()
+ nonexistent_rid = self._get_non_existent_rid()
+ tgt = self._get_tgt(creds, new_rid=nonexistent_rid,
+ remove_requester_sid=True)
+ self._run_tgs(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ # Test with an RODC-issued ticket where the client is revealed to the RODC.
+ def test_tgs_rodc_revealed(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+ tgt = self._get_tgt(creds, from_rodc=True)
+ self._run_tgs(tgt, creds, expected_error=0)
+
+ def test_renew_rodc_revealed(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+ tgt = self._get_tgt(creds, renewable=True, from_rodc=True)
+ self._renew_tgt(tgt, creds, expected_error=0,
+ expect_pac_attrs=False,
+ expect_requester_sid=True)
+
+ def test_validate_rodc_revealed(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+ tgt = self._get_tgt(creds, invalid=True, from_rodc=True)
+ self._validate_tgt(tgt, creds, expected_error=0,
+ expect_pac_attrs=False,
+ expect_requester_sid=True)
+
+ # This test fails on Windows, which gives KDC_ERR_C_PRINCIPAL_UNKNOWN when
+ # attempting to use S4U2Self with a TGT from an RODC.
+ def test_s4u2self_rodc_revealed(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+ tgt = self._get_tgt(creds, from_rodc=True)
+ self._s4u2self(tgt, creds,
+ expected_error=KDC_ERR_C_PRINCIPAL_UNKNOWN)
+
+ def test_user2user_rodc_revealed(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+ tgt = self._get_tgt(creds, from_rodc=True)
+ self._user2user(tgt, creds, expected_error=0)
+
+ # Test with an RODC-issued ticket where the SID in the PAC is changed to
+ # that of another account.
+ def test_tgs_rodc_sid_mismatch_existing(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+ existing_rid = self._get_existing_rid(replication_allowed=True,
+ revealed_to_rodc=True)
+ tgt = self._get_tgt(creds, from_rodc=True, new_rid=existing_rid)
+ self._run_tgs(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_renew_rodc_sid_mismatch_existing(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+ existing_rid = self._get_existing_rid(replication_allowed=True,
+ revealed_to_rodc=True)
+ tgt = self._get_tgt(creds, renewable=True, from_rodc=True,
+ new_rid=existing_rid)
+ self._renew_tgt(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_validate_rodc_sid_mismatch_existing(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+ existing_rid = self._get_existing_rid(replication_allowed=True,
+ revealed_to_rodc=True)
+ tgt = self._get_tgt(creds, invalid=True, from_rodc=True,
+ new_rid=existing_rid)
+ self._validate_tgt(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_s4u2self_rodc_sid_mismatch_existing(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+ existing_rid = self._get_existing_rid(replication_allowed=True,
+ revealed_to_rodc=True)
+ tgt = self._get_tgt(creds, from_rodc=True, new_rid=existing_rid)
+ self._s4u2self(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_user2user_rodc_sid_mismatch_existing(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+ existing_rid = self._get_existing_rid(replication_allowed=True,
+ revealed_to_rodc=True)
+ tgt = self._get_tgt(creds, from_rodc=True, new_rid=existing_rid)
+ self._user2user(tgt, creds,
+ expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_fast_rodc_sid_mismatch_existing(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+ existing_rid = self._get_existing_rid(replication_allowed=True,
+ revealed_to_rodc=True)
+ tgt = self._get_tgt(creds, from_rodc=True, new_rid=existing_rid)
+ self._fast(tgt, creds,
+ expected_error=KDC_ERR_TGT_REVOKED,
+ expected_sname=self.get_krbtgt_sname())
+
+ def test_tgs_rodc_requester_sid_mismatch_existing(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+ existing_rid = self._get_existing_rid(replication_allowed=True,
+ revealed_to_rodc=True)
+ tgt = self._get_tgt(creds, from_rodc=True, new_rid=existing_rid,
+ can_modify_logon_info=False)
+ self._run_tgs(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_tgs_rodc_logon_info_sid_mismatch_existing(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+ existing_rid = self._get_existing_rid(replication_allowed=True,
+ revealed_to_rodc=True)
+ tgt = self._get_tgt(creds, from_rodc=True, new_rid=existing_rid,
+ can_modify_requester_sid=False)
+ self._run_tgs(tgt, creds, expected_error=0)
+
+ def test_tgs_rodc_logon_info_only_sid_mismatch_existing(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+ existing_rid = self._get_existing_rid(replication_allowed=True,
+ revealed_to_rodc=True)
+ tgt = self._get_tgt(creds, from_rodc=True, new_rid=existing_rid,
+ remove_requester_sid=True)
+ self._run_tgs(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ # Test with an RODC-issued ticket where the SID in the PAC is changed to a
+ # non-existent one.
+ def test_tgs_rodc_sid_mismatch_nonexisting(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+ nonexistent_rid = self._get_non_existent_rid()
+ tgt = self._get_tgt(creds, from_rodc=True, new_rid=nonexistent_rid)
+ self._run_tgs(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_renew_rodc_sid_mismatch_nonexisting(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+ nonexistent_rid = self._get_non_existent_rid()
+ tgt = self._get_tgt(creds, renewable=True, from_rodc=True,
+ new_rid=nonexistent_rid)
+ self._renew_tgt(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_validate_rodc_sid_mismatch_nonexisting(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+ nonexistent_rid = self._get_non_existent_rid()
+ tgt = self._get_tgt(creds, invalid=True, from_rodc=True,
+ new_rid=nonexistent_rid)
+ self._validate_tgt(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_s4u2self_rodc_sid_mismatch_nonexisting(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+ nonexistent_rid = self._get_non_existent_rid()
+ tgt = self._get_tgt(creds, from_rodc=True, new_rid=nonexistent_rid)
+ self._s4u2self(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_user2user_rodc_sid_mismatch_nonexisting(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+ nonexistent_rid = self._get_non_existent_rid()
+ tgt = self._get_tgt(creds, from_rodc=True, new_rid=nonexistent_rid)
+ self._user2user(tgt, creds,
+ expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_fast_rodc_sid_mismatch_nonexisting(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+ nonexistent_rid = self._get_non_existent_rid()
+ tgt = self._get_tgt(creds, from_rodc=True, new_rid=nonexistent_rid)
+ self._fast(tgt, creds,
+ expected_error=KDC_ERR_TGT_REVOKED,
+ expected_sname=self.get_krbtgt_sname())
+
+ def test_tgs_rodc_requester_sid_mismatch_nonexisting(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+ nonexistent_rid = self._get_non_existent_rid()
+ tgt = self._get_tgt(creds, from_rodc=True, new_rid=nonexistent_rid,
+ can_modify_logon_info=False)
+ self._run_tgs(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_tgs_rodc_logon_info_sid_mismatch_nonexisting(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+ nonexistent_rid = self._get_non_existent_rid()
+ tgt = self._get_tgt(creds, from_rodc=True, new_rid=nonexistent_rid,
+ can_modify_requester_sid=False)
+ self._run_tgs(tgt, creds, expected_error=0)
+
+ def test_tgs_rodc_logon_info_only_sid_mismatch_nonexisting(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+ nonexistent_rid = self._get_non_existent_rid()
+ tgt = self._get_tgt(creds, from_rodc=True, new_rid=nonexistent_rid,
+ remove_requester_sid=True)
+ self._run_tgs(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ # Test with an RODC-issued ticket where the client is not revealed to the
+ # RODC.
+ def test_tgs_rodc_not_revealed(self):
+ creds = self._get_creds(replication_allowed=True)
+ tgt = self._get_tgt(creds, from_rodc=True)
+ # TODO: error code
+ self._run_tgs(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_renew_rodc_not_revealed(self):
+ creds = self._get_creds(replication_allowed=True)
+ tgt = self._get_tgt(creds, renewable=True, from_rodc=True)
+ self._renew_tgt(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_validate_rodc_not_revealed(self):
+ creds = self._get_creds(replication_allowed=True)
+ tgt = self._get_tgt(creds, invalid=True, from_rodc=True)
+ self._validate_tgt(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_s4u2self_rodc_not_revealed(self):
+ creds = self._get_creds(replication_allowed=True)
+ tgt = self._get_tgt(creds, from_rodc=True)
+ self._s4u2self(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_user2user_rodc_not_revealed(self):
+ creds = self._get_creds(replication_allowed=True)
+ tgt = self._get_tgt(creds, from_rodc=True)
+ self._user2user(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ # Test with an RODC-issued ticket where the RODC account does not have the
+ # PARTIAL_SECRETS bit set.
+ def test_tgs_rodc_no_partial_secrets(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+ tgt = self._get_tgt(creds, from_rodc=True)
+ self._remove_rodc_partial_secrets()
+ self._run_tgs(tgt, creds, expected_error=KDC_ERR_POLICY)
+
+ def test_renew_rodc_no_partial_secrets(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+ tgt = self._get_tgt(creds, renewable=True, from_rodc=True)
+ self._remove_rodc_partial_secrets()
+ self._renew_tgt(tgt, creds, expected_error=KDC_ERR_POLICY)
+
+ def test_validate_rodc_no_partial_secrets(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+ tgt = self._get_tgt(creds, invalid=True, from_rodc=True)
+ self._remove_rodc_partial_secrets()
+ self._validate_tgt(tgt, creds, expected_error=KDC_ERR_POLICY)
+
+ def test_s4u2self_rodc_no_partial_secrets(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+ tgt = self._get_tgt(creds, from_rodc=True)
+ self._remove_rodc_partial_secrets()
+ self._s4u2self(tgt, creds, expected_error=KDC_ERR_POLICY)
+
+ def test_user2user_rodc_no_partial_secrets(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+ tgt = self._get_tgt(creds, from_rodc=True)
+ self._remove_rodc_partial_secrets()
+ self._user2user(tgt, creds, expected_error=KDC_ERR_POLICY)
+
+ def test_fast_rodc_no_partial_secrets(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+ tgt = self._get_tgt(creds, from_rodc=True)
+ self._remove_rodc_partial_secrets()
+ self._fast(tgt, creds, expected_error=KDC_ERR_POLICY,
+ expected_sname=self.get_krbtgt_sname())
+
+ # Test with an RODC-issued ticket where the RODC account does not have an
+ # msDS-KrbTgtLink.
+ def test_tgs_rodc_no_krbtgt_link(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+ tgt = self._get_tgt(creds, from_rodc=True)
+ self._remove_rodc_krbtgt_link()
+ self._run_tgs(tgt, creds, expected_error=KDC_ERR_POLICY)
+
+ def test_renew_rodc_no_krbtgt_link(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+ tgt = self._get_tgt(creds, renewable=True, from_rodc=True)
+ self._remove_rodc_krbtgt_link()
+ self._renew_tgt(tgt, creds, expected_error=KDC_ERR_POLICY)
+
+ def test_validate_rodc_no_krbtgt_link(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+ tgt = self._get_tgt(creds, invalid=True, from_rodc=True)
+ self._remove_rodc_krbtgt_link()
+ self._validate_tgt(tgt, creds, expected_error=KDC_ERR_POLICY)
+
+ def test_s4u2self_rodc_no_krbtgt_link(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+ tgt = self._get_tgt(creds, from_rodc=True)
+ self._remove_rodc_krbtgt_link()
+ self._s4u2self(tgt, creds, expected_error=KDC_ERR_POLICY)
+
+ def test_user2user_rodc_no_krbtgt_link(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+ tgt = self._get_tgt(creds, from_rodc=True)
+ self._remove_rodc_krbtgt_link()
+ self._user2user(tgt, creds, expected_error=KDC_ERR_POLICY)
+
+ def test_fast_rodc_no_krbtgt_link(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+ tgt = self._get_tgt(creds, from_rodc=True)
+ self._remove_rodc_krbtgt_link()
+ self._fast(tgt, creds, expected_error=KDC_ERR_POLICY,
+ expected_sname=self.get_krbtgt_sname())
+
+ # Test with an RODC-issued ticket where the client is not allowed to
+ # replicate to the RODC.
+ def test_tgs_rodc_not_allowed(self):
+ creds = self._get_creds(revealed_to_rodc=True)
+ tgt = self._get_tgt(creds, from_rodc=True)
+ self._run_tgs(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_renew_rodc_not_allowed(self):
+ creds = self._get_creds(revealed_to_rodc=True)
+ tgt = self._get_tgt(creds, renewable=True, from_rodc=True)
+ self._renew_tgt(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_validate_rodc_not_allowed(self):
+ creds = self._get_creds(revealed_to_rodc=True)
+ tgt = self._get_tgt(creds, invalid=True, from_rodc=True)
+ self._validate_tgt(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_s4u2self_rodc_not_allowed(self):
+ creds = self._get_creds(revealed_to_rodc=True)
+ tgt = self._get_tgt(creds, from_rodc=True)
+ self._s4u2self(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_user2user_rodc_not_allowed(self):
+ creds = self._get_creds(revealed_to_rodc=True)
+ tgt = self._get_tgt(creds, from_rodc=True)
+ self._user2user(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_fast_rodc_not_allowed(self):
+ creds = self._get_creds(revealed_to_rodc=True)
+ tgt = self._get_tgt(creds, from_rodc=True)
+ self._fast(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED,
+ expected_sname=self.get_krbtgt_sname())
+
+ # Test with an RODC-issued ticket where the client is denied from
+ # replicating to the RODC.
+ def test_tgs_rodc_denied(self):
+ creds = self._get_creds(replication_denied=True,
+ revealed_to_rodc=True)
+ tgt = self._get_tgt(creds, from_rodc=True)
+ self._run_tgs(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_renew_rodc_denied(self):
+ creds = self._get_creds(replication_denied=True,
+ revealed_to_rodc=True)
+ tgt = self._get_tgt(creds, renewable=True, from_rodc=True)
+ self._renew_tgt(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_validate_rodc_denied(self):
+ creds = self._get_creds(replication_denied=True,
+ revealed_to_rodc=True)
+ tgt = self._get_tgt(creds, invalid=True, from_rodc=True)
+ self._validate_tgt(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_s4u2self_rodc_denied(self):
+ creds = self._get_creds(replication_denied=True,
+ revealed_to_rodc=True)
+ tgt = self._get_tgt(creds, from_rodc=True)
+ self._s4u2self(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_user2user_rodc_denied(self):
+ creds = self._get_creds(replication_denied=True,
+ revealed_to_rodc=True)
+ tgt = self._get_tgt(creds, from_rodc=True)
+ self._user2user(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_fast_rodc_denied(self):
+ creds = self._get_creds(replication_denied=True,
+ revealed_to_rodc=True)
+ tgt = self._get_tgt(creds, from_rodc=True)
+ self._fast(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED,
+ expected_sname=self.get_krbtgt_sname())
+
+ # Test with an RODC-issued ticket where the client is both allowed and
+ # denied replicating to the RODC.
+ def test_tgs_rodc_allowed_denied(self):
+ creds = self._get_creds(replication_allowed=True,
+ replication_denied=True,
+ revealed_to_rodc=True)
+ tgt = self._get_tgt(creds, from_rodc=True)
+ self._run_tgs(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_renew_rodc_allowed_denied(self):
+ creds = self._get_creds(replication_allowed=True,
+ replication_denied=True,
+ revealed_to_rodc=True)
+ tgt = self._get_tgt(creds, renewable=True, from_rodc=True)
+ self._renew_tgt(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_validate_rodc_allowed_denied(self):
+ creds = self._get_creds(replication_allowed=True,
+ replication_denied=True,
+ revealed_to_rodc=True)
+ tgt = self._get_tgt(creds, invalid=True, from_rodc=True)
+ self._validate_tgt(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_s4u2self_rodc_allowed_denied(self):
+ creds = self._get_creds(replication_allowed=True,
+ replication_denied=True,
+ revealed_to_rodc=True)
+ tgt = self._get_tgt(creds, from_rodc=True)
+ self._s4u2self(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_user2user_rodc_allowed_denied(self):
+ creds = self._get_creds(replication_allowed=True,
+ replication_denied=True,
+ revealed_to_rodc=True)
+ tgt = self._get_tgt(creds, from_rodc=True)
+ self._user2user(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_fast_rodc_allowed_denied(self):
+ creds = self._get_creds(replication_allowed=True,
+ replication_denied=True,
+ revealed_to_rodc=True)
+ tgt = self._get_tgt(creds, from_rodc=True)
+ self._fast(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED,
+ expected_sname=self.get_krbtgt_sname())
+
+ # Test making a TGS request with an RC4-encrypted TGT.
+ def test_tgs_rc4(self):
+ creds = self._get_creds()
+ tgt = self._get_tgt(creds, etype=kcrypto.Enctype.RC4)
+ self._run_tgs(tgt, creds, expected_error=(KDC_ERR_GENERIC,
+ KDC_ERR_BADKEYVER),
+ expect_edata=True,
+ # We aren’t particular about whether or not we get an
+ # NTSTATUS.
+ expect_status=None,
+ expected_status=ntstatus.NT_STATUS_INSUFFICIENT_RESOURCES)
+
+ def test_renew_rc4(self):
+ creds = self._get_creds()
+ tgt = self._get_tgt(creds, renewable=True, etype=kcrypto.Enctype.RC4)
+ self._renew_tgt(tgt, creds, expected_error=(KDC_ERR_GENERIC,
+ KDC_ERR_BADKEYVER),
+ expect_pac_attrs=True,
+ expect_pac_attrs_pac_request=True,
+ expect_requester_sid=True)
+
+ def test_validate_rc4(self):
+ creds = self._get_creds()
+ tgt = self._get_tgt(creds, invalid=True, etype=kcrypto.Enctype.RC4)
+ self._validate_tgt(tgt, creds, expected_error=(KDC_ERR_GENERIC,
+ KDC_ERR_BADKEYVER),
+ expect_pac_attrs=True,
+ expect_pac_attrs_pac_request=True,
+ expect_requester_sid=True)
+
+ def test_s4u2self_rc4(self):
+ creds = self._get_creds()
+ tgt = self._get_tgt(creds, etype=kcrypto.Enctype.RC4)
+ self._s4u2self(tgt, creds, expected_error=(KDC_ERR_GENERIC,
+ KDC_ERR_BADKEYVER),
+ expect_edata=True,
+ # We aren’t particular about whether or not we get an
+ # NTSTATUS.
+ expect_status=None,
+ expected_status=ntstatus.NT_STATUS_INSUFFICIENT_RESOURCES)
+
+ def test_user2user_rc4(self):
+ creds = self._get_creds()
+ tgt = self._get_tgt(creds, etype=kcrypto.Enctype.RC4)
+ self._user2user(tgt, creds, expected_error=KDC_ERR_ETYPE_NOSUPP)
+
+ def test_fast_rc4(self):
+ creds = self._get_creds()
+ tgt = self._get_tgt(creds, etype=kcrypto.Enctype.RC4)
+ self._fast(tgt, creds, expected_error=KDC_ERR_GENERIC,
+ expect_edata=self.expect_padata_outer)
+
+ # Test with a TGT that has the lifetime of a kpasswd ticket (two minutes).
+ def test_tgs_kpasswd(self):
+ creds = self._get_creds()
+ tgt = self.modify_lifetime(self._get_tgt(creds), lifetime=2 * 60)
+ self._run_tgs(tgt, creds, expected_error=KDC_ERR_TKT_EXPIRED)
+
+ def test_renew_kpasswd(self):
+ creds = self._get_creds()
+ tgt = self._get_tgt(creds, renewable=True)
+ tgt = self.modify_lifetime(tgt, lifetime=2 * 60)
+ self._renew_tgt(tgt, creds, expected_error=KDC_ERR_TKT_EXPIRED)
+
+ def test_validate_kpasswd(self):
+ creds = self._get_creds()
+ tgt = self._get_tgt(creds, invalid=True)
+ tgt = self.modify_lifetime(tgt, lifetime=2 * 60)
+ self._validate_tgt(tgt, creds, expected_error=KDC_ERR_TKT_EXPIRED)
+
+ def test_s4u2self_kpasswd(self):
+ creds = self._get_creds()
+ tgt = self.modify_lifetime(self._get_tgt(creds), lifetime=2 * 60)
+ self._s4u2self(tgt, creds, expected_error=KDC_ERR_TKT_EXPIRED)
+
+ def test_user2user_kpasswd(self):
+ creds = self._get_creds()
+ tgt = self.modify_lifetime(self._get_tgt(creds), lifetime=2 * 60)
+ self._user2user(tgt, creds, expected_error=KDC_ERR_TKT_EXPIRED)
+
+ def test_fast_kpasswd(self):
+ creds = self._get_creds()
+ tgt = self.modify_lifetime(self._get_tgt(creds), lifetime=2 * 60)
+ self._fast(tgt, creds, expected_error=KDC_ERR_TKT_EXPIRED)
+
+ # Test user-to-user with incorrect service principal names.
+ def test_user2user_matching_sname_host(self):
+ creds = self._get_creds()
+ tgt = self._get_tgt(creds)
+
+ user_name = creds.get_username()
+ sname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=['host', user_name])
+
+ self._user2user(tgt, creds, sname=sname,
+ expected_error=KDC_ERR_S_PRINCIPAL_UNKNOWN)
+
+ def test_user2user_matching_sname_no_host(self):
+ creds = self._get_creds()
+ tgt = self._get_tgt(creds)
+
+ user_name = creds.get_username()
+ sname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=[user_name])
+
+ self._user2user(tgt, creds, sname=sname, expected_error=0)
+
+ def test_user2user_wrong_sname(self):
+ creds = self._get_creds()
+ tgt = self._get_tgt(creds)
+
+ other_creds = self._get_mach_creds()
+ user_name = other_creds.get_username()
+ sname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=[user_name])
+
+ self._user2user(tgt, creds, sname=sname,
+ expected_error=KDC_ERR_BADMATCH)
+
+ def test_user2user_other_sname(self):
+ other_name = self.get_new_username()
+ spn = f'host/{other_name}'
+ creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'spn': spn})
+ tgt = self._get_tgt(creds)
+
+ sname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=['host', other_name])
+
+ self._user2user(tgt, creds, sname=sname, expected_error=0)
+
+ def test_user2user_wrong_sname_krbtgt(self):
+ creds = self._get_creds()
+ tgt = self._get_tgt(creds)
+
+ sname = self.get_krbtgt_sname()
+
+ self._user2user(tgt, creds, sname=sname,
+ expected_error=KDC_ERR_BADMATCH)
+
+ def test_user2user_wrong_srealm(self):
+ creds = self._get_creds()
+ tgt = self._get_tgt(creds)
+
+ self._user2user(tgt, creds, srealm='OTHER.REALM',
+ expected_error=(KDC_ERR_WRONG_REALM,
+ KDC_ERR_S_PRINCIPAL_UNKNOWN))
+
+ def test_user2user_tgt_correct_realm(self):
+ creds = self._get_creds()
+ tgt = self._get_tgt(creds)
+
+ realm = creds.get_realm().encode('utf-8')
+ tgt = self._modify_tgt(tgt, crealm=realm)
+
+ self._user2user(tgt, creds,
+ expected_error=0)
+
+ def test_user2user_tgt_wrong_realm(self):
+ creds = self._get_creds()
+ tgt = self._get_tgt(creds)
+
+ tgt = self._modify_tgt(tgt, crealm=b'OTHER.REALM')
+
+ self._user2user(tgt, creds,
+ expected_error=(
+ KDC_ERR_POLICY, # Windows
+ KDC_ERR_C_PRINCIPAL_UNKNOWN, # Heimdal
+ KDC_ERR_SERVER_NOMATCH, # MIT
+ ),
+ expect_edata=True,
+ expected_status=ntstatus.NT_STATUS_NO_MATCH)
+
+ def test_user2user_tgt_correct_cname(self):
+ creds = self._get_creds()
+ tgt = self._get_tgt(creds)
+
+ user_name = creds.get_username()
+ user_name = user_name.encode('utf-8')
+ cname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=[user_name])
+
+ tgt = self._modify_tgt(tgt, cname=cname)
+
+ self._user2user(tgt, creds, expected_error=0)
+
+ def test_user2user_tgt_other_cname(self):
+ samdb = self.get_samdb()
+
+ other_name = self.get_new_username()
+ upn = f'{other_name}@{samdb.domain_dns_name()}'
+
+ creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'upn': upn})
+ tgt = self._get_tgt(creds)
+
+ cname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=[other_name.encode('utf-8')])
+
+ tgt = self._modify_tgt(tgt, cname=cname)
+
+ self._user2user(tgt, creds, expected_error=0)
+
+ def test_user2user_tgt_cname_host(self):
+ creds = self._get_creds()
+ tgt = self._get_tgt(creds)
+
+ user_name = creds.get_username()
+ user_name = user_name.encode('utf-8')
+ cname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=[b'host', user_name])
+
+ tgt = self._modify_tgt(tgt, cname=cname)
+
+ self._user2user(tgt, creds,
+ expected_error=(KDC_ERR_TGT_REVOKED,
+ KDC_ERR_C_PRINCIPAL_UNKNOWN))
+
+ def test_user2user_non_existent_sname(self):
+ creds = self._get_creds()
+ tgt = self._get_tgt(creds)
+
+ sname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=['host', 'non_existent_user'])
+
+ self._user2user(tgt, creds, sname=sname,
+ expected_error=KDC_ERR_S_PRINCIPAL_UNKNOWN)
+
+ def test_user2user_no_sname(self):
+ creds = self._get_creds()
+ tgt = self._get_tgt(creds)
+
+ self._user2user(tgt, creds, sname=False,
+ expected_error=(KDC_ERR_GENERIC,
+ KDC_ERR_S_PRINCIPAL_UNKNOWN))
+
+ def test_tgs_service_ticket(self):
+ creds = self._get_creds()
+ tgt = self._get_tgt(creds)
+
+ service_creds = self.get_service_creds()
+ service_ticket = self.get_service_ticket(tgt, service_creds)
+
+ self._run_tgs(service_ticket, creds,
+ expected_error=(KDC_ERR_NOT_US, KDC_ERR_POLICY))
+
+ def test_renew_service_ticket(self):
+ creds = self._get_creds()
+ tgt = self._get_tgt(creds)
+
+ service_creds = self.get_service_creds()
+ service_ticket = self.get_service_ticket(tgt, service_creds)
+
+ service_ticket = self.modified_ticket(
+ service_ticket,
+ modify_fn=self._modify_renewable,
+ checksum_keys=self.get_krbtgt_checksum_key())
+
+ self._renew_tgt(service_ticket, creds,
+ expected_error=KDC_ERR_POLICY)
+
+ def test_validate_service_ticket(self):
+ creds = self._get_creds()
+ tgt = self._get_tgt(creds)
+
+ service_creds = self.get_service_creds()
+ service_ticket = self.get_service_ticket(tgt, service_creds)
+
+ service_ticket = self.modified_ticket(
+ service_ticket,
+ modify_fn=self._modify_invalid,
+ checksum_keys=self.get_krbtgt_checksum_key())
+
+ self._validate_tgt(service_ticket, creds,
+ expected_error=KDC_ERR_POLICY)
+
+ def test_s4u2self_service_ticket(self):
+ creds = self._get_creds()
+ tgt = self._get_tgt(creds)
+
+ service_creds = self.get_service_creds()
+ service_ticket = self.get_service_ticket(tgt, service_creds)
+
+ self._s4u2self(service_ticket, creds,
+ expected_error=(KDC_ERR_NOT_US, KDC_ERR_POLICY))
+
+ def test_user2user_service_ticket(self):
+ creds = self._get_creds()
+ tgt = self._get_tgt(creds)
+
+ service_creds = self.get_service_creds()
+ service_ticket = self.get_service_ticket(tgt, service_creds)
+
+ self._user2user(service_ticket, creds,
+ expected_error=(KDC_ERR_MODIFIED, KDC_ERR_POLICY))
+
+ # Expected to fail against Windows, which does not produce an error.
+ def test_fast_service_ticket(self):
+ creds = self._get_creds()
+ tgt = self._get_tgt(creds)
+
+ service_creds = self.get_service_creds()
+ service_ticket = self.get_service_ticket(tgt, service_creds)
+
+ self._fast(service_ticket, creds,
+ expected_error=(KDC_ERR_POLICY,
+ KDC_ERR_S_PRINCIPAL_UNKNOWN))
+
+ def test_single_component_krbtgt_requester_sid_as_req(self):
+ """Test that TGTs issued to a single‐component krbtgt principal always
+ contain a requester SID PAC buffer.
+ """
+
+ creds = self._get_creds()
+
+ # Create a single‐component principal of the form ‘krbtgt@REALM’.
+ sname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=['krbtgt'])
+
+ # Don’t request canonicalization.
+ kdc_options = 'forwardable,renewable,renewable-ok'
+
+ # Get a TGT and assert that the requester SID PAC buffer is present.
+ self.get_tgt(creds,
+ sname=sname,
+ kdc_options=kdc_options,
+ expect_requester_sid=True)
+
+ def test_single_component_krbtgt_requester_sid_tgs_req(self):
+ """Test that TGTs issued to a single‐component krbtgt principal always
+ contain a requester SID PAC buffer.
+ """
+
+ creds = self._get_creds()
+ tgt = self.get_tgt(creds)
+
+ # Create a single‐component principal of the form ‘krbtgt@REALM’.
+ sname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=['krbtgt'])
+
+ # Don’t request canonicalization.
+ kdc_options = '0'
+
+ # Get a TGT and assert that the requester SID PAC buffer is present.
+ self.get_service_ticket(tgt,
+ self.get_krbtgt_creds(),
+ sname=sname,
+ kdc_options=kdc_options,
+ expect_requester_sid=True)
+
+ def test_single_component_krbtgt_no_pac_as_req(self):
+ """Test that TGTs issued to a single‐component krbtgt principal always
+ contain a PAC.
+ """
+
+ creds = self._get_creds()
+
+ # Create a single‐component principal of the form ‘krbtgt@REALM’.
+ sname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=['krbtgt'])
+
+ # Don’t request canonicalization.
+ kdc_options = 'forwardable,renewable,renewable-ok'
+
+ # Get a TGT and assert that the requester SID PAC buffer is present.
+ self.get_tgt(creds,
+ sname=sname,
+ kdc_options=kdc_options,
+ # Request that no PAC be issued.
+ pac_request=False,
+ # Ensure that a PAC is issued nonetheless.
+ expect_pac=True)
+
+ def test_single_component_krbtgt_no_pac_tgs_req(self):
+ """Test that TGTs issued to a single‐component krbtgt principal always
+ contain a PAC.
+ """
+
+ creds = self._get_creds()
+ tgt = self.get_tgt(creds)
+
+ # Create a single‐component principal of the form ‘krbtgt@REALM’.
+ sname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=['krbtgt'])
+
+ # Don’t request canonicalization.
+ kdc_options = '0'
+
+ # Get a TGT and assert that the requester SID PAC buffer is present.
+ self.get_service_ticket(tgt,
+ self.get_krbtgt_creds(),
+ sname=sname,
+ kdc_options=kdc_options,
+ # Request that no PAC be issued.
+ pac_request=False,
+ # Ensure that a PAC is issued nonetheless.
+ expect_pac=True,
+ expect_pac_attrs=True,
+ expect_pac_attrs_pac_request=True)
+
+ def test_single_component_krbtgt_service_ticket(self):
+ """Test that TGTs issued to a single‐component krbtgt principal can be
+ used to get service tickets.
+ """
+
+ creds = self._get_creds()
+
+ # Create a single‐component principal of the form ‘krbtgt@REALM’.
+ sname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=['krbtgt'])
+
+ # Don’t request canonicalization.
+ kdc_options = 'forwardable,renewable,renewable-ok'
+
+ # Get a TGT.
+ tgt = self.get_tgt(creds,
+ sname=sname,
+ kdc_options=kdc_options)
+
+ # Ensure that we can use the TGT to get a service ticket.
+ self._run_tgs(tgt, creds, expected_error=0)
+
+ def test_pac_attrs_none(self):
+ creds = self._get_creds()
+ self.get_tgt(creds, pac_request=None,
+ expect_pac=True,
+ expect_pac_attrs=True,
+ expect_pac_attrs_pac_request=None)
+
+ def test_pac_attrs_false(self):
+ creds = self._get_creds()
+ self.get_tgt(creds, pac_request=False,
+ expect_pac=True,
+ expect_pac_attrs=True,
+ expect_pac_attrs_pac_request=False)
+
+ def test_pac_attrs_true(self):
+ creds = self._get_creds()
+ self.get_tgt(creds, pac_request=True,
+ expect_pac=True,
+ expect_pac_attrs=True,
+ expect_pac_attrs_pac_request=True)
+
+ def test_pac_attrs_renew_none(self):
+ creds = self._get_creds()
+ tgt = self.get_tgt(creds, pac_request=None,
+ expect_pac=True,
+ expect_pac_attrs=True,
+ expect_pac_attrs_pac_request=None)
+ tgt = self._modify_tgt(tgt, renewable=True)
+
+ self._renew_tgt(tgt, creds, expected_error=0,
+ expect_pac=True,
+ expect_pac_attrs=True,
+ expect_pac_attrs_pac_request=None,
+ expect_requester_sid=True)
+
+ def test_pac_attrs_renew_false(self):
+ creds = self._get_creds()
+ tgt = self.get_tgt(creds, pac_request=False,
+ expect_pac=True,
+ expect_pac_attrs=True,
+ expect_pac_attrs_pac_request=False)
+ tgt = self._modify_tgt(tgt, renewable=True)
+
+ self._renew_tgt(tgt, creds, expected_error=0,
+ expect_pac=True,
+ expect_pac_attrs=True,
+ expect_pac_attrs_pac_request=False,
+ expect_requester_sid=True)
+
+ def test_pac_attrs_renew_true(self):
+ creds = self._get_creds()
+ tgt = self.get_tgt(creds, pac_request=True,
+ expect_pac=True,
+ expect_pac_attrs=True,
+ expect_pac_attrs_pac_request=True)
+ tgt = self._modify_tgt(tgt, renewable=True)
+
+ self._renew_tgt(tgt, creds, expected_error=0,
+ expect_pac=True,
+ expect_pac_attrs=True,
+ expect_pac_attrs_pac_request=True,
+ expect_requester_sid=True)
+
+ def test_pac_attrs_rodc_renew_none(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+ tgt = self.get_tgt(creds, pac_request=None,
+ expect_pac=True,
+ expect_pac_attrs=True,
+ expect_pac_attrs_pac_request=None)
+ tgt = self._modify_tgt(tgt, from_rodc=True, renewable=True)
+
+ self._renew_tgt(tgt, creds, expected_error=0,
+ expect_pac=True,
+ expect_pac_attrs=False,
+ expect_requester_sid=True)
+
+ def test_pac_attrs_rodc_renew_false(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+ tgt = self.get_tgt(creds, pac_request=False,
+ expect_pac=True,
+ expect_pac_attrs=True,
+ expect_pac_attrs_pac_request=False)
+ tgt = self._modify_tgt(tgt, from_rodc=True, renewable=True)
+
+ self._renew_tgt(tgt, creds, expected_error=0,
+ expect_pac=True,
+ expect_pac_attrs=False,
+ expect_requester_sid=True)
+
+ def test_pac_attrs_rodc_renew_true(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+ tgt = self.get_tgt(creds, pac_request=True,
+ expect_pac=True,
+ expect_pac_attrs=True,
+ expect_pac_attrs_pac_request=True)
+ tgt = self._modify_tgt(tgt, from_rodc=True, renewable=True)
+
+ self._renew_tgt(tgt, creds, expected_error=0,
+ expect_pac=True,
+ expect_pac_attrs=False,
+ expect_requester_sid=True)
+
+ def test_pac_attrs_missing_renew_none(self):
+ creds = self._get_creds()
+ tgt = self.get_tgt(creds, pac_request=None,
+ expect_pac=True,
+ expect_pac_attrs=True,
+ expect_pac_attrs_pac_request=None)
+ tgt = self._modify_tgt(tgt, renewable=True,
+ remove_pac_attrs=True)
+
+ self._renew_tgt(tgt, creds, expected_error=0,
+ expect_pac=True,
+ expect_pac_attrs=False,
+ expect_requester_sid=True)
+
+ def test_pac_attrs_missing_renew_false(self):
+ creds = self._get_creds()
+ tgt = self.get_tgt(creds, pac_request=False,
+ expect_pac=True,
+ expect_pac_attrs=True,
+ expect_pac_attrs_pac_request=False)
+ tgt = self._modify_tgt(tgt, renewable=True,
+ remove_pac_attrs=True)
+
+ self._renew_tgt(tgt, creds, expected_error=0,
+ expect_pac=True,
+ expect_pac_attrs=False,
+ expect_requester_sid=True)
+
+ def test_pac_attrs_missing_renew_true(self):
+ creds = self._get_creds()
+ tgt = self.get_tgt(creds, pac_request=True,
+ expect_pac=True,
+ expect_pac_attrs=True,
+ expect_pac_attrs_pac_request=True)
+ tgt = self._modify_tgt(tgt, renewable=True,
+ remove_pac_attrs=True)
+
+ self._renew_tgt(tgt, creds, expected_error=0,
+ expect_pac=True,
+ expect_pac_attrs=False,
+ expect_requester_sid=True)
+
+ def test_pac_attrs_missing_rodc_renew_none(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+ tgt = self.get_tgt(creds, pac_request=None,
+ expect_pac=True,
+ expect_pac_attrs=True,
+ expect_pac_attrs_pac_request=None)
+ tgt = self._modify_tgt(tgt, from_rodc=True, renewable=True,
+ remove_pac_attrs=True)
+
+ self._renew_tgt(tgt, creds, expected_error=0,
+ expect_pac=True,
+ expect_pac_attrs=False,
+ expect_requester_sid=True)
+
+ def test_pac_attrs_missing_rodc_renew_false(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+ tgt = self.get_tgt(creds, pac_request=False,
+ expect_pac=True,
+ expect_pac_attrs=True,
+ expect_pac_attrs_pac_request=False)
+ tgt = self._modify_tgt(tgt, from_rodc=True, renewable=True,
+ remove_pac_attrs=True)
+
+ self._renew_tgt(tgt, creds, expected_error=0,
+ expect_pac=True,
+ expect_pac_attrs=False,
+ expect_requester_sid=True)
+
+ def test_pac_attrs_missing_rodc_renew_true(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+ tgt = self.get_tgt(creds, pac_request=True,
+ expect_pac=True,
+ expect_pac_attrs=True,
+ expect_pac_attrs_pac_request=True)
+ tgt = self._modify_tgt(tgt, from_rodc=True, renewable=True,
+ remove_pac_attrs=True)
+
+ self._renew_tgt(tgt, creds, expected_error=0,
+ expect_pac=True,
+ expect_pac_attrs=False,
+ expect_requester_sid=True)
+
+ def test_tgs_pac_attrs_none(self):
+ creds = self._get_creds()
+ tgt = self.get_tgt(creds, pac_request=None,
+ expect_pac=True,
+ expect_pac_attrs=True,
+ expect_pac_attrs_pac_request=None)
+
+ self._run_tgs(tgt, creds, expected_error=0, expect_pac=True,
+ expect_pac_attrs=False)
+
+ def test_tgs_pac_attrs_false(self):
+ creds = self._get_creds()
+ tgt = self.get_tgt(creds, pac_request=False,
+ expect_pac=True,
+ expect_pac_attrs=True,
+ expect_pac_attrs_pac_request=False)
+
+ self._run_tgs(tgt, creds, expected_error=0, expect_pac=False,
+ expect_pac_attrs=False)
+
+ def test_tgs_pac_attrs_true(self):
+ creds = self._get_creds()
+ tgt = self.get_tgt(creds, pac_request=True,
+ expect_pac=True,
+ expect_pac_attrs=True,
+ expect_pac_attrs_pac_request=True)
+
+ self._run_tgs(tgt, creds, expected_error=0, expect_pac=True,
+ expect_pac_attrs=False)
+
+ def test_as_requester_sid(self):
+ creds = self._get_creds()
+
+ sid = creds.get_sid()
+
+ self.get_tgt(creds, pac_request=None,
+ expect_pac=True,
+ expected_sid=sid,
+ expect_requester_sid=True)
+
+ def test_tgs_requester_sid(self):
+ creds = self._get_creds()
+
+ sid = creds.get_sid()
+
+ tgt = self.get_tgt(creds, pac_request=None,
+ expect_pac=True,
+ expected_sid=sid,
+ expect_requester_sid=True)
+
+ self._run_tgs(tgt, creds, expected_error=0, expect_pac=True,
+ expect_requester_sid=False)
+
+ def test_tgs_requester_sid_renew(self):
+ creds = self._get_creds()
+
+ sid = creds.get_sid()
+
+ tgt = self.get_tgt(creds, pac_request=None,
+ expect_pac=True,
+ expected_sid=sid,
+ expect_requester_sid=True)
+ tgt = self._modify_tgt(tgt, renewable=True)
+
+ self._renew_tgt(tgt, creds, expected_error=0, expect_pac=True,
+ expect_pac_attrs=True,
+ expect_pac_attrs_pac_request=None,
+ expected_sid=sid,
+ expect_requester_sid=True)
+
+ def test_tgs_requester_sid_rodc_renew(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+
+ sid = creds.get_sid()
+
+ tgt = self.get_tgt(creds, pac_request=None,
+ expect_pac=True,
+ expected_sid=sid,
+ expect_requester_sid=True)
+ tgt = self._modify_tgt(tgt, from_rodc=True, renewable=True)
+
+ self._renew_tgt(tgt, creds, expected_error=0, expect_pac=True,
+ expect_pac_attrs=False,
+ expected_sid=sid,
+ expect_requester_sid=True)
+
+ def test_tgs_requester_sid_missing_renew(self):
+ creds = self._get_creds()
+
+ sid = creds.get_sid()
+
+ tgt = self.get_tgt(creds, pac_request=None,
+ expect_pac=True,
+ expected_sid=sid,
+ expect_requester_sid=True)
+ tgt = self._modify_tgt(tgt, renewable=True,
+ remove_requester_sid=True)
+
+ self._renew_tgt(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_tgs_requester_sid_missing_rodc_renew(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+
+ sid = creds.get_sid()
+
+ tgt = self.get_tgt(creds, pac_request=None,
+ expect_pac=True,
+ expected_sid=sid,
+ expect_requester_sid=True)
+ tgt = self._modify_tgt(tgt, from_rodc=True, renewable=True,
+ remove_requester_sid=True)
+
+ self._renew_tgt(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_tgs_requester_sid_validate(self):
+ creds = self._get_creds()
+
+ sid = creds.get_sid()
+
+ tgt = self.get_tgt(creds, pac_request=None,
+ expect_pac=True,
+ expected_sid=sid,
+ expect_requester_sid=True)
+ tgt = self._modify_tgt(tgt, invalid=True)
+
+ self._validate_tgt(tgt, creds, expected_error=0, expect_pac=True,
+ expect_pac_attrs=True,
+ expect_pac_attrs_pac_request=None,
+ expected_sid=sid,
+ expect_requester_sid=True)
+
+ def test_tgs_requester_sid_rodc_validate(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+
+ sid = creds.get_sid()
+
+ tgt = self.get_tgt(creds, pac_request=None,
+ expect_pac=True,
+ expected_sid=sid,
+ expect_requester_sid=True)
+ tgt = self._modify_tgt(tgt, from_rodc=True, invalid=True)
+
+ self._validate_tgt(tgt, creds, expected_error=0, expect_pac=True,
+ expect_pac_attrs=False,
+ expected_sid=sid,
+ expect_requester_sid=True)
+
+ def test_tgs_requester_sid_missing_validate(self):
+ creds = self._get_creds()
+
+ sid = creds.get_sid()
+
+ tgt = self.get_tgt(creds, pac_request=None,
+ expect_pac=True,
+ expected_sid=sid,
+ expect_requester_sid=True)
+ tgt = self._modify_tgt(tgt, invalid=True,
+ remove_requester_sid=True)
+
+ self._validate_tgt(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_tgs_requester_sid_missing_rodc_validate(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+
+ sid = creds.get_sid()
+
+ tgt = self.get_tgt(creds, pac_request=None,
+ expect_pac=True,
+ expected_sid=sid,
+ expect_requester_sid=True)
+ tgt = self._modify_tgt(tgt, from_rodc=True, invalid=True,
+ remove_requester_sid=True)
+
+ self._validate_tgt(tgt, creds, expected_error=KDC_ERR_TGT_REVOKED)
+
+ def test_tgs_pac_request_none(self):
+ creds = self._get_creds()
+ tgt = self.get_tgt(creds, pac_request=None)
+
+ ticket = self._run_tgs(tgt, creds, expected_error=0, expect_pac=True)
+
+ pac = self.get_ticket_pac(ticket)
+ self.assertIsNotNone(pac)
+
+ def test_tgs_pac_request_false(self):
+ creds = self._get_creds()
+ tgt = self.get_tgt(creds, pac_request=False, expect_pac=None)
+
+ ticket = self._run_tgs(tgt, creds, expected_error=0, expect_pac=False)
+
+ pac = self.get_ticket_pac(ticket, expect_pac=False)
+ self.assertIsNone(pac)
+
+ def test_tgs_pac_request_true(self):
+ creds = self._get_creds()
+ tgt = self.get_tgt(creds, pac_request=True)
+
+ ticket = self._run_tgs(tgt, creds, expected_error=0, expect_pac=True)
+
+ pac = self.get_ticket_pac(ticket)
+ self.assertIsNotNone(pac)
+
+ def test_renew_pac_request_none(self):
+ creds = self._get_creds()
+ tgt = self.get_tgt(creds, pac_request=None)
+ tgt = self._modify_tgt(tgt, renewable=True)
+
+ tgt = self._renew_tgt(tgt, creds, expected_error=0, expect_pac=None,
+ expect_pac_attrs=True,
+ expect_pac_attrs_pac_request=None,
+ expect_requester_sid=True)
+
+ ticket = self._run_tgs(tgt, creds, expected_error=0, expect_pac=True)
+
+ pac = self.get_ticket_pac(ticket)
+ self.assertIsNotNone(pac)
+
+ def test_renew_pac_request_false(self):
+ creds = self._get_creds()
+ tgt = self.get_tgt(creds, pac_request=False, expect_pac=None)
+ tgt = self._modify_tgt(tgt, renewable=True)
+
+ tgt = self._renew_tgt(tgt, creds, expected_error=0, expect_pac=None,
+ expect_pac_attrs=True,
+ expect_pac_attrs_pac_request=False,
+ expect_requester_sid=True)
+
+ ticket = self._run_tgs(tgt, creds, expected_error=0, expect_pac=False)
+
+ pac = self.get_ticket_pac(ticket, expect_pac=False)
+ self.assertIsNone(pac)
+
+ def test_renew_pac_request_true(self):
+ creds = self._get_creds()
+ tgt = self.get_tgt(creds, pac_request=True)
+ tgt = self._modify_tgt(tgt, renewable=True)
+
+ tgt = self._renew_tgt(tgt, creds, expected_error=0, expect_pac=None,
+ expect_pac_attrs=True,
+ expect_pac_attrs_pac_request=True,
+ expect_requester_sid=True)
+
+ ticket = self._run_tgs(tgt, creds, expected_error=0, expect_pac=True)
+
+ pac = self.get_ticket_pac(ticket)
+ self.assertIsNotNone(pac)
+
+ def test_rodc_renew_pac_request_none(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+ tgt = self.get_tgt(creds, pac_request=None)
+ tgt = self._modify_tgt(tgt, renewable=True, from_rodc=True)
+
+ tgt = self._renew_tgt(tgt, creds, expected_error=0, expect_pac=None,
+ expect_pac_attrs=False,
+ expect_requester_sid=True)
+
+ ticket = self._run_tgs(tgt, creds, expected_error=0, expect_pac=True)
+
+ pac = self.get_ticket_pac(ticket)
+ self.assertIsNotNone(pac)
+
+ def test_rodc_renew_pac_request_false(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+ tgt = self.get_tgt(creds, pac_request=False, expect_pac=None)
+ tgt = self._modify_tgt(tgt, renewable=True, from_rodc=True)
+
+ tgt = self._renew_tgt(tgt, creds, expected_error=0, expect_pac=None,
+ expect_pac_attrs=False,
+ expect_requester_sid=True)
+
+ ticket = self._run_tgs(tgt, creds, expected_error=0, expect_pac=True)
+
+ pac = self.get_ticket_pac(ticket)
+ self.assertIsNotNone(pac)
+
+ def test_rodc_renew_pac_request_true(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+ tgt = self.get_tgt(creds, pac_request=True)
+ tgt = self._modify_tgt(tgt, renewable=True, from_rodc=True)
+
+ tgt = self._renew_tgt(tgt, creds, expected_error=0, expect_pac=None,
+ expect_pac_attrs=False,
+ expect_requester_sid=True)
+
+ ticket = self._run_tgs(tgt, creds, expected_error=0, expect_pac=True)
+
+ pac = self.get_ticket_pac(ticket)
+ self.assertIsNotNone(pac)
+
+ def test_validate_pac_request_none(self):
+ creds = self._get_creds()
+ tgt = self.get_tgt(creds, pac_request=None)
+ tgt = self._modify_tgt(tgt, invalid=True)
+
+ tgt = self._validate_tgt(tgt, creds, expected_error=0, expect_pac=None,
+ expect_pac_attrs=True,
+ expect_pac_attrs_pac_request=None,
+ expect_requester_sid=True)
+
+ ticket = self._run_tgs(tgt, creds, expected_error=0, expect_pac=True)
+
+ pac = self.get_ticket_pac(ticket)
+ self.assertIsNotNone(pac)
+
+ def test_validate_pac_request_false(self):
+ creds = self._get_creds()
+ tgt = self.get_tgt(creds, pac_request=False, expect_pac=None)
+ tgt = self._modify_tgt(tgt, invalid=True)
+
+ tgt = self._validate_tgt(tgt, creds, expected_error=0, expect_pac=None,
+ expect_pac_attrs=True,
+ expect_pac_attrs_pac_request=False,
+ expect_requester_sid=True)
+
+ ticket = self._run_tgs(tgt, creds, expected_error=0, expect_pac=False)
+
+ pac = self.get_ticket_pac(ticket, expect_pac=False)
+ self.assertIsNone(pac)
+
+ def test_validate_pac_request_true(self):
+ creds = self._get_creds()
+ tgt = self.get_tgt(creds, pac_request=True)
+ tgt = self._modify_tgt(tgt, invalid=True)
+
+ tgt = self._validate_tgt(tgt, creds, expected_error=0, expect_pac=None,
+ expect_pac_attrs=True,
+ expect_pac_attrs_pac_request=True,
+ expect_requester_sid=True)
+
+ ticket = self._run_tgs(tgt, creds, expected_error=0, expect_pac=True)
+
+ pac = self.get_ticket_pac(ticket)
+ self.assertIsNotNone(pac)
+
+ def test_rodc_validate_pac_request_none(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+ tgt = self.get_tgt(creds, pac_request=None)
+ tgt = self._modify_tgt(tgt, invalid=True, from_rodc=True)
+
+ tgt = self._validate_tgt(tgt, creds, expected_error=0, expect_pac=None,
+ expect_pac_attrs=False,
+ expect_requester_sid=True)
+
+ ticket = self._run_tgs(tgt, creds, expected_error=0, expect_pac=True)
+
+ pac = self.get_ticket_pac(ticket)
+ self.assertIsNotNone(pac)
+
+ def test_rodc_validate_pac_request_false(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+ tgt = self.get_tgt(creds, pac_request=False, expect_pac=None)
+ tgt = self._modify_tgt(tgt, invalid=True, from_rodc=True)
+
+ tgt = self._validate_tgt(tgt, creds, expected_error=0, expect_pac=None,
+ expect_pac_attrs=False,
+ expect_requester_sid=True)
+
+ ticket = self._run_tgs(tgt, creds, expected_error=0, expect_pac=True)
+
+ pac = self.get_ticket_pac(ticket)
+ self.assertIsNotNone(pac)
+
+ def test_rodc_validate_pac_request_true(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+ tgt = self.get_tgt(creds, pac_request=True)
+ tgt = self._modify_tgt(tgt, invalid=True, from_rodc=True)
+
+ tgt = self._validate_tgt(tgt, creds, expected_error=0, expect_pac=None,
+ expect_pac_attrs=False,
+ expect_requester_sid=True)
+
+ ticket = self._run_tgs(tgt, creds, expected_error=0, expect_pac=True)
+
+ pac = self.get_ticket_pac(ticket)
+ self.assertIsNotNone(pac)
+
+ def test_s4u2self_pac_request_none(self):
+ creds = self._get_creds()
+ tgt = self.get_tgt(creds, pac_request=None)
+
+ ticket = self._s4u2self(tgt, creds, expected_error=0, expect_pac=True)
+
+ pac = self.get_ticket_pac(ticket)
+ self.assertIsNotNone(pac)
+
+ def test_s4u2self_pac_request_false(self):
+ creds = self._get_creds()
+ tgt = self.get_tgt(creds, pac_request=False, expect_pac=None)
+
+ ticket = self._s4u2self(tgt, creds, expected_error=0, expect_pac=True)
+
+ pac = self.get_ticket_pac(ticket)
+ self.assertIsNotNone(pac)
+
+ def test_s4u2self_pac_request_true(self):
+ creds = self._get_creds()
+ tgt = self.get_tgt(creds, pac_request=True)
+
+ ticket = self._s4u2self(tgt, creds, expected_error=0, expect_pac=True)
+
+ pac = self.get_ticket_pac(ticket)
+ self.assertIsNotNone(pac)
+
+ def test_user2user_pac_request_none(self):
+ creds = self._get_creds()
+ tgt = self.get_tgt(creds, pac_request=None)
+
+ ticket = self._user2user(tgt, creds, expected_error=0, expect_pac=True)
+
+ pac = self.get_ticket_pac(ticket)
+ self.assertIsNotNone(pac)
+
+ def test_user2user_pac_request_false(self):
+ creds = self._get_creds()
+ tgt = self.get_tgt(creds, pac_request=False, expect_pac=None)
+
+ ticket = self._user2user(tgt, creds, expected_error=0,
+ expect_pac=True)
+
+ pac = self.get_ticket_pac(ticket, expect_pac=True)
+ self.assertIsNotNone(pac)
+
+ def test_user2user_pac_request_true(self):
+ creds = self._get_creds()
+ tgt = self.get_tgt(creds, pac_request=True)
+
+ ticket = self._user2user(tgt, creds, expected_error=0, expect_pac=True)
+
+ pac = self.get_ticket_pac(ticket)
+ self.assertIsNotNone(pac)
+
+ def test_user2user_user_pac_request_none(self):
+ creds = self._get_creds()
+ tgt = self.get_tgt(creds)
+
+ user_creds = self._get_mach_creds()
+ user_tgt = self.get_tgt(user_creds, pac_request=None)
+
+ ticket = self._user2user(tgt, creds, expected_error=0,
+ user_tgt=user_tgt, user_creds=user_creds,
+ expect_pac=True)
+
+ pac = self.get_ticket_pac(ticket)
+ self.assertIsNotNone(pac)
+
+ def test_user2user_user_pac_request_false(self):
+ creds = self._get_creds()
+ tgt = self.get_tgt(creds)
+
+ user_creds = self._get_mach_creds()
+ user_tgt = self.get_tgt(user_creds, pac_request=False, expect_pac=None)
+
+ ticket = self._user2user(tgt, creds, expected_error=0,
+ user_tgt=user_tgt, user_creds=user_creds,
+ expect_pac=False)
+
+ pac = self.get_ticket_pac(ticket, expect_pac=False)
+ self.assertIsNone(pac)
+
+ def test_user2user_user_pac_request_true(self):
+ creds = self._get_creds()
+ tgt = self.get_tgt(creds)
+
+ user_creds = self._get_mach_creds()
+ user_tgt = self.get_tgt(user_creds, pac_request=True)
+
+ ticket = self._user2user(tgt, creds, expected_error=0,
+ user_tgt=user_tgt, user_creds=user_creds,
+ expect_pac=True)
+
+ pac = self.get_ticket_pac(ticket)
+ self.assertIsNotNone(pac)
+
+ def test_fast_pac_request_none(self):
+ creds = self._get_creds()
+ tgt = self.get_tgt(creds, pac_request=None)
+
+ ticket = self._fast(tgt, creds, expected_error=0, expect_pac=True)
+
+ pac = self.get_ticket_pac(ticket)
+ self.assertIsNotNone(pac)
+
+ def test_fast_pac_request_false(self):
+ creds = self._get_creds()
+ tgt = self.get_tgt(creds, pac_request=False)
+
+ ticket = self._fast(tgt, creds, expected_error=0,
+ expect_pac=True)
+
+ pac = self.get_ticket_pac(ticket, expect_pac=True)
+ self.assertIsNotNone(pac)
+
+ def test_fast_pac_request_true(self):
+ creds = self._get_creds()
+ tgt = self.get_tgt(creds, pac_request=True)
+
+ ticket = self._fast(tgt, creds, expected_error=0, expect_pac=True)
+
+ pac = self.get_ticket_pac(ticket)
+ self.assertIsNotNone(pac)
+
+ def test_tgs_rodc_pac_request_none(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+ tgt = self.get_tgt(creds, pac_request=None)
+ tgt = self._modify_tgt(tgt, from_rodc=True)
+
+ ticket = self._run_tgs(tgt, creds, expected_error=0, expect_pac=True)
+
+ pac = self.get_ticket_pac(ticket)
+ self.assertIsNotNone(pac)
+
+ def test_tgs_rodc_pac_request_false(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+ tgt = self.get_tgt(creds, pac_request=False, expect_pac=None)
+ tgt = self._modify_tgt(tgt, from_rodc=True)
+
+ ticket = self._run_tgs(tgt, creds, expected_error=0, expect_pac=True)
+
+ pac = self.get_ticket_pac(ticket)
+ self.assertIsNotNone(pac)
+
+ def test_tgs_rodc_pac_request_true(self):
+ creds = self._get_creds(replication_allowed=True,
+ revealed_to_rodc=True)
+ tgt = self.get_tgt(creds, pac_request=True)
+ tgt = self._modify_tgt(tgt, from_rodc=True)
+
+ ticket = self._run_tgs(tgt, creds, expected_error=0, expect_pac=True)
+
+ pac = self.get_ticket_pac(ticket)
+ self.assertIsNotNone(pac)
+
+ def test_tgs_rename(self):
+ creds = self.get_cached_creds(account_type=self.AccountType.USER,
+ use_cache=False)
+ tgt = self.get_tgt(creds)
+
+ # Rename the account.
+ new_name = self.get_new_username()
+
+ samdb = self.get_samdb()
+ msg = ldb.Message(creds.get_dn())
+ msg['sAMAccountName'] = ldb.MessageElement(new_name,
+ ldb.FLAG_MOD_REPLACE,
+ 'sAMAccountName')
+ samdb.modify(msg)
+
+ self._run_tgs(tgt, creds, expected_error=(KDC_ERR_TGT_REVOKED,
+ KDC_ERR_C_PRINCIPAL_UNKNOWN))
+
+ # Test making a TGS request for a ticket expiring post-2038.
+ def test_tgs_req_future_till(self):
+ creds = self._get_creds()
+ tgt = self._get_tgt(creds)
+
+ target_creds = self.get_service_creds()
+ self._tgs_req(
+ tgt=tgt,
+ expected_error=0,
+ creds=creds,
+ target_creds=target_creds,
+ till='99990913024805Z')
+
+ def test_tgs_unicode(self):
+ creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'name_prefix': '🔐'})
+ tgt = self._get_tgt(creds)
+ self._run_tgs(tgt, creds, expected_error=0)
+
+ def test_renew_unicode(self):
+ creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'name_prefix': '🔐'})
+ tgt = self._get_tgt(creds, renewable=True)
+ self._renew_tgt(tgt, creds, expected_error=0,
+ expect_pac_attrs=True,
+ expect_pac_attrs_pac_request=True,
+ expect_requester_sid=True)
+
+ def test_validate_unicode(self):
+ creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'name_prefix': '🔐'})
+ tgt = self._get_tgt(creds, invalid=True)
+ self._validate_tgt(tgt, creds, expected_error=0,
+ expect_pac_attrs=True,
+ expect_pac_attrs_pac_request=True,
+ expect_requester_sid=True)
+
+ def test_s4u2self_unicode(self):
+ creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'name_prefix': '🔐'})
+ tgt = self._get_tgt(creds)
+ self._s4u2self(tgt, creds,
+ expected_error=0,
+ expect_edata=False)
+
+ def test_user2user_unicode(self):
+ creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'name_prefix': '🔐'})
+ tgt = self._get_tgt(creds)
+ self._user2user(tgt, creds, expected_error=0)
+
+ def test_fast_unicode(self):
+ creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'name_prefix': '🔐'})
+ tgt = self._get_tgt(creds)
+ self._fast(tgt, creds, expected_error=0)
+
+ def test_fast_as_req_unicode(self):
+ creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'name_prefix': '🔐'})
+ tgt = self._get_tgt(creds)
+ self._fast_as_req(tgt, creds, expected_error=0)
+
+ def _modify_renewable(self, enc_part):
+ # Set the renewable flag.
+ enc_part = self.modify_ticket_flag(enc_part, 'renewable', value=True)
+
+ # Set the renew-till time to be in the future.
+ renew_till = self.get_KerberosTime(offset=100 * 60 * 60)
+ enc_part['renew-till'] = renew_till
+
+ return enc_part
+
+ def _modify_invalid(self, enc_part):
+ # Set the invalid flag.
+ enc_part = self.modify_ticket_flag(enc_part, 'invalid', value=True)
+
+ # Set the ticket start time to be in the past.
+ past_time = self.get_KerberosTime(offset=-100 * 60 * 60)
+ enc_part['starttime'] = past_time
+
+ return enc_part
+
+ def _get_tgt(self,
+ client_creds,
+ renewable=False,
+ invalid=False,
+ from_rodc=False,
+ new_rid=None,
+ remove_pac=False,
+ allow_empty_authdata=False,
+ can_modify_logon_info=True,
+ can_modify_requester_sid=True,
+ remove_pac_attrs=False,
+ remove_requester_sid=False,
+ etype=None,
+ cksum_etype=None,
+ extra_pac_buffers=None):
+ self.assertFalse(renewable and invalid)
+
+ if remove_pac:
+ self.assertIsNone(new_rid)
+
+ tgt = self.get_tgt(client_creds)
+
+ return self._modify_tgt(
+ tgt=tgt,
+ renewable=renewable,
+ invalid=invalid,
+ from_rodc=from_rodc,
+ new_rid=new_rid,
+ remove_pac=remove_pac,
+ allow_empty_authdata=allow_empty_authdata,
+ can_modify_logon_info=can_modify_logon_info,
+ can_modify_requester_sid=can_modify_requester_sid,
+ remove_pac_attrs=remove_pac_attrs,
+ remove_requester_sid=remove_requester_sid,
+ etype=etype,
+ cksum_etype=cksum_etype,
+ extra_pac_buffers=extra_pac_buffers)
+
+ def _modify_tgt(self,
+ tgt,
+ *,
+ renewable=False,
+ invalid=False,
+ from_rodc=False,
+ new_rid=None,
+ remove_pac=False,
+ allow_empty_authdata=False,
+ cname=None,
+ crealm=None,
+ can_modify_logon_info=True,
+ can_modify_requester_sid=True,
+ remove_pac_attrs=False,
+ remove_requester_sid=False,
+ etype=None,
+ cksum_etype=None,
+ extra_pac_buffers=None):
+ if from_rodc:
+ krbtgt_creds = self.get_mock_rodc_krbtgt_creds()
+ else:
+ krbtgt_creds = self.get_krbtgt_creds()
+
+ modify_pac_fns = []
+
+ if new_rid is not None or remove_requester_sid or remove_pac_attrs:
+ def change_sid_fn(pac):
+ pac_buffers = pac.buffers
+ for pac_buffer in pac_buffers:
+ if pac_buffer.type == krb5pac.PAC_TYPE_LOGON_INFO:
+ if new_rid is not None and can_modify_logon_info:
+ logon_info = pac_buffer.info.info
+
+ logon_info.info3.base.rid = new_rid
+ elif pac_buffer.type == krb5pac.PAC_TYPE_REQUESTER_SID:
+ if remove_requester_sid:
+ pac.num_buffers -= 1
+ pac_buffers.remove(pac_buffer)
+ elif new_rid is not None and can_modify_requester_sid:
+ requester_sid = pac_buffer.info
+
+ samdb = self.get_samdb()
+ domain_sid = samdb.get_domain_sid()
+
+ new_sid = f'{domain_sid}-{new_rid}'
+
+ requester_sid.sid = security.dom_sid(new_sid)
+ elif pac_buffer.type == krb5pac.PAC_TYPE_ATTRIBUTES_INFO:
+ if remove_pac_attrs:
+ pac.num_buffers -= 1
+ pac_buffers.remove(pac_buffer)
+
+ pac.buffers = pac_buffers
+
+ return pac
+
+ modify_pac_fns.append(change_sid_fn)
+
+ krbtgt_key = self.TicketDecryptionKey_from_creds(krbtgt_creds,
+ etype)
+
+ if remove_pac:
+ checksum_keys = None
+ else:
+ if etype == cksum_etype:
+ cksum_key = krbtgt_key
+ else:
+ cksum_key = self.TicketDecryptionKey_from_creds(krbtgt_creds,
+ cksum_etype)
+ checksum_keys = {
+ krb5pac.PAC_TYPE_KDC_CHECKSUM: cksum_key
+ }
+
+ if renewable:
+ flags_modify_fn = self._modify_renewable
+ elif invalid:
+ flags_modify_fn = self._modify_invalid
+ else:
+ flags_modify_fn = None
+
+ if cname is not None or crealm is not None:
+ def modify_fn(enc_part):
+ if flags_modify_fn is not None:
+ enc_part = flags_modify_fn(enc_part)
+
+ if cname is not None:
+ enc_part['cname'] = cname
+
+ if crealm is not None:
+ enc_part['crealm'] = crealm
+
+ return enc_part
+ else:
+ modify_fn = flags_modify_fn
+
+ if cname is not None:
+ def change_cname_fn(pac):
+ for pac_buffer in pac.buffers:
+ if pac_buffer.type == krb5pac.PAC_TYPE_LOGON_NAME:
+ logon_info = pac_buffer.info
+
+ logon_info.account_name = (
+ cname['name-string'][0].decode('utf-8'))
+
+ return pac
+
+ modify_pac_fns.append(change_cname_fn)
+
+ if extra_pac_buffers is not None:
+ modify_pac_fns.append(partial(self.add_extra_pac_buffers,
+ buffers=extra_pac_buffers))
+
+ return self.modified_ticket(
+ tgt,
+ new_ticket_key=krbtgt_key,
+ modify_fn=modify_fn,
+ modify_pac_fn=modify_pac_fns or None,
+ exclude_pac=remove_pac,
+ allow_empty_authdata=allow_empty_authdata,
+ update_pac_checksums=not remove_pac,
+ checksum_keys=checksum_keys)
+
+ def _remove_rodc_partial_secrets(self):
+ samdb = self.get_samdb()
+
+ rodc_ctx = self.get_mock_rodc_ctx()
+ rodc_dn = ldb.Dn(samdb, rodc_ctx.acct_dn)
+
+ def add_rodc_partial_secrets():
+ msg = ldb.Message()
+ msg.dn = rodc_dn
+ msg['userAccountControl'] = ldb.MessageElement(
+ str(rodc_ctx.userAccountControl),
+ ldb.FLAG_MOD_REPLACE,
+ 'userAccountControl')
+ samdb.modify(msg)
+
+ self.addCleanup(add_rodc_partial_secrets)
+
+ uac = rodc_ctx.userAccountControl & ~dsdb.UF_PARTIAL_SECRETS_ACCOUNT
+
+ msg = ldb.Message()
+ msg.dn = rodc_dn
+ msg['userAccountControl'] = ldb.MessageElement(
+ str(uac),
+ ldb.FLAG_MOD_REPLACE,
+ 'userAccountControl')
+ samdb.modify(msg)
+
+ def _remove_rodc_krbtgt_link(self):
+ samdb = self.get_samdb()
+
+ rodc_ctx = self.get_mock_rodc_ctx()
+ rodc_dn = ldb.Dn(samdb, rodc_ctx.acct_dn)
+
+ def add_rodc_krbtgt_link():
+ msg = ldb.Message()
+ msg.dn = rodc_dn
+ msg['msDS-KrbTgtLink'] = ldb.MessageElement(
+ rodc_ctx.new_krbtgt_dn,
+ ldb.FLAG_MOD_ADD,
+ 'msDS-KrbTgtLink')
+ samdb.modify(msg)
+
+ self.addCleanup(add_rodc_krbtgt_link)
+
+ msg = ldb.Message()
+ msg.dn = rodc_dn
+ msg['msDS-KrbTgtLink'] = ldb.MessageElement(
+ [],
+ ldb.FLAG_MOD_DELETE,
+ 'msDS-KrbTgtLink')
+ samdb.modify(msg)
+
+ def _get_creds(self,
+ replication_allowed=False,
+ replication_denied=False,
+ revealed_to_rodc=False):
+ return self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={
+ 'allowed_replication_mock': replication_allowed,
+ 'denied_replication_mock': replication_denied,
+ 'revealed_to_mock_rodc': revealed_to_rodc,
+ 'id': 0
+ })
+
+ def _get_existing_rid(self,
+ replication_allowed=False,
+ replication_denied=False,
+ revealed_to_rodc=False):
+ other_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={
+ 'allowed_replication_mock': replication_allowed,
+ 'denied_replication_mock': replication_denied,
+ 'revealed_to_mock_rodc': revealed_to_rodc,
+ 'id': 1
+ })
+
+ other_sid = other_creds.get_sid()
+ other_rid = int(other_sid.rsplit('-', 1)[1])
+
+ return other_rid
+
+ def _get_mach_creds(self):
+ return self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={
+ 'allowed_replication_mock': True,
+ 'denied_replication_mock': False,
+ 'revealed_to_mock_rodc': True,
+ 'id': 2
+ })
+
+ def _get_user_creds(self,
+ replication_allowed=False,
+ replication_denied=False,
+ revealed_to_rodc=False):
+ return self.get_cached_creds(
+ account_type=self.AccountType.USER,
+ opts={
+ 'allowed_replication_mock': replication_allowed,
+ 'denied_replication_mock': replication_denied,
+ 'revealed_to_mock_rodc': revealed_to_rodc,
+ 'id': 3
+ })
+
+ def _get_non_existent_rid(self):
+ return (1 << 30) - 1
+
+ def _run_tgs(self, tgt, creds, expected_error, *, expect_pac=True,
+ expect_pac_attrs=None, expect_pac_attrs_pac_request=None,
+ expect_requester_sid=None, expected_sid=None,
+ expect_edata=False, expect_status=None, expected_status=None,
+ expected_extra_pac_buffers=None):
+ target_creds = self.get_service_creds()
+ return self._tgs_req(
+ tgt, expected_error, creds, target_creds,
+ expect_pac=expect_pac,
+ expect_pac_attrs=expect_pac_attrs,
+ expect_pac_attrs_pac_request=expect_pac_attrs_pac_request,
+ expect_requester_sid=expect_requester_sid,
+ expected_sid=expected_sid,
+ expect_edata=expect_edata,
+ expect_status=expect_status,
+ expected_status=expected_status,
+ expected_extra_pac_buffers=expected_extra_pac_buffers)
+
+ # These tests fail against Windows, which does not implement ticket
+ # renewal.
+ def _renew_tgt(self, tgt, creds, expected_error, *, expect_pac=True,
+ expect_pac_attrs=None, expect_pac_attrs_pac_request=None,
+ expect_requester_sid=None, expected_sid=None):
+ krbtgt_creds = self.get_krbtgt_creds()
+ kdc_options = str(krb5_asn1.KDCOptions('renew'))
+ return self._tgs_req(
+ tgt, expected_error, creds, krbtgt_creds,
+ kdc_options=kdc_options,
+ expect_pac=expect_pac,
+ expect_pac_attrs=expect_pac_attrs,
+ expect_pac_attrs_pac_request=expect_pac_attrs_pac_request,
+ expect_requester_sid=expect_requester_sid,
+ expected_sid=expected_sid)
+
+ # These tests fail against Windows, which does not implement ticket
+ # validation.
+ def _validate_tgt(self, tgt, creds, expected_error, *, expect_pac=True,
+ expect_pac_attrs=None,
+ expect_pac_attrs_pac_request=None,
+ expect_requester_sid=None,
+ expected_sid=None):
+ krbtgt_creds = self.get_krbtgt_creds()
+ kdc_options = str(krb5_asn1.KDCOptions('validate'))
+ return self._tgs_req(
+ tgt, expected_error, creds, krbtgt_creds,
+ kdc_options=kdc_options,
+ expect_pac=expect_pac,
+ expect_pac_attrs=expect_pac_attrs,
+ expect_pac_attrs_pac_request=expect_pac_attrs_pac_request,
+ expect_requester_sid=expect_requester_sid,
+ expected_sid=expected_sid)
+
+ def _s4u2self(self, tgt, tgt_creds, expected_error, *, expect_pac=True,
+ expect_edata=False, expect_status=None,
+ expected_status=None):
+ user_creds = self._get_mach_creds()
+
+ user_name = user_creds.get_username()
+ user_cname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=[user_name])
+ user_realm = user_creds.get_realm()
+
+ def generate_s4u2self_padata(_kdc_exchange_dict,
+ _callback_dict,
+ req_body):
+ padata = self.PA_S4U2Self_create(
+ name=user_cname,
+ realm=user_realm,
+ tgt_session_key=tgt.session_key,
+ ctype=None)
+
+ return [padata], req_body
+
+ return self._tgs_req(tgt, expected_error, tgt_creds, tgt_creds,
+ expected_cname=user_cname,
+ generate_padata_fn=generate_s4u2self_padata,
+ expect_edata=expect_edata,
+ expect_status=expect_status,
+ expected_status=expected_status,
+ expect_pac=expect_pac)
+
+ def _user2user(self, tgt, tgt_creds, expected_error, *,
+ sname=None,
+ srealm=None, user_tgt=None, user_creds=None,
+ expect_edata=False,
+ expect_pac=True, expected_status=None):
+ if user_tgt is None:
+ user_creds = self._get_mach_creds()
+ user_tgt = self.get_tgt(user_creds)
+ else:
+ self.assertIsNotNone(user_creds,
+ 'if supplying user_tgt, user_creds should be '
+ 'supplied also')
+
+ kdc_options = str(krb5_asn1.KDCOptions('enc-tkt-in-skey'))
+ return self._tgs_req(user_tgt, expected_error, user_creds, tgt_creds,
+ kdc_options=kdc_options,
+ additional_ticket=tgt,
+ sname=sname,
+ srealm=srealm,
+ expect_edata=expect_edata,
+ expect_pac=expect_pac,
+ expected_status=expected_status)
+
+ def _fast(self, armor_tgt, armor_tgt_creds, expected_error,
+ expected_sname=None, expect_pac=True, expect_edata=False):
+ user_creds = self._get_mach_creds()
+ user_tgt = self.get_tgt(user_creds)
+
+ target_creds = self.get_service_creds()
+
+ return self._tgs_req(user_tgt, expected_error,
+ user_creds, target_creds,
+ armor_tgt=armor_tgt,
+ expected_sname=expected_sname,
+ expect_pac=expect_pac,
+ expect_edata=expect_edata)
+
+ def _fast_as_req(self, armor_tgt, armor_tgt_creds, expected_error,
+ expected_sname=None):
+ user_creds = self._get_mach_creds()
+ target_creds = self.get_service_creds()
+
+ return self._armored_as_req(user_creds, target_creds, armor_tgt,
+ expected_error=expected_error,
+ expected_sname=expected_sname,
+ expect_edata=False)
+
+
+if __name__ == "__main__":
+ global_asn1_print = False
+ global_hexdump = False
+ import unittest
+ unittest.main()
diff --git a/python/samba/tests/krb5/kdc_tgt_tests.py b/python/samba/tests/krb5/kdc_tgt_tests.py
new file mode 100755
index 0000000..5a52a95
--- /dev/null
+++ b/python/samba/tests/krb5/kdc_tgt_tests.py
@@ -0,0 +1,86 @@
+#!/usr/bin/env python3
+# Unix SMB/CIFS implementation.
+# Copyright (C) Stefan Metzmacher 2020
+# Copyright (C) 2020 Catalyst.Net Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+import os
+
+sys.path.insert(0, "bin/python")
+os.environ["PYTHONUNBUFFERED"] = "1"
+
+from samba.tests.krb5.kdc_base_test import KDCBaseTest
+
+global_asn1_print = False
+global_hexdump = False
+
+
+class KdcTgtTests(KDCBaseTest):
+ def setUp(self):
+ super().setUp()
+ self.do_asn1_print = global_asn1_print
+ self.do_hexdump = global_hexdump
+
+ def test_ticket_signature(self):
+ # Ensure that a DC correctly issues tickets signed with its krbtgt key.
+ user_creds = self.get_client_creds()
+ target_creds = self.get_service_creds()
+
+ krbtgt_creds = self.get_krbtgt_creds()
+ key = self.TicketDecryptionKey_from_creds(krbtgt_creds)
+
+ # Get a TGT from the DC.
+ tgt = self.get_tgt(user_creds)
+
+ # Ensure the PAC contains the expected checksums.
+ self.verify_ticket(tgt, key, service_ticket=False)
+
+ # Get a service ticket from the DC.
+ service_ticket = self.get_service_ticket(tgt, target_creds)
+
+ # Ensure the PAC contains the expected checksums.
+ self.verify_ticket(service_ticket, key, service_ticket=True,
+ expect_ticket_checksum=True)
+
+ def test_full_signature(self):
+ # Ensure that a DC correctly issues tickets signed with its krbtgt key.
+ user_creds = self.get_client_creds()
+ target_creds = self.get_service_creds()
+
+ krbtgt_creds = self.get_krbtgt_creds()
+ key = self.TicketDecryptionKey_from_creds(krbtgt_creds)
+
+ # Get a TGT from the DC.
+ tgt = self.get_tgt(user_creds)
+
+ # Ensure the PAC contains the expected checksums.
+ self.verify_ticket(tgt, key, service_ticket=False)
+
+ # Get a service ticket from the DC.
+ service_ticket = self.get_service_ticket(tgt, target_creds)
+
+ # Ensure the PAC contains the expected checksums.
+ self.verify_ticket(service_ticket, key, service_ticket=True,
+ expect_ticket_checksum=True,
+ expect_full_checksum=True)
+
+
+if __name__ == "__main__":
+ global_asn1_print = False
+ global_hexdump = False
+ import unittest
+ unittest.main()
diff --git a/python/samba/tests/krb5/kpasswd_tests.py b/python/samba/tests/krb5/kpasswd_tests.py
new file mode 100755
index 0000000..0f1fe65
--- /dev/null
+++ b/python/samba/tests/krb5/kpasswd_tests.py
@@ -0,0 +1,983 @@
+#!/usr/bin/env python3
+# Unix SMB/CIFS implementation.
+# Copyright (C) Stefan Metzmacher 2020
+# Copyright (C) Catalyst.Net Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+import os
+
+sys.path.insert(0, 'bin/python')
+os.environ['PYTHONUNBUFFERED'] = '1'
+
+from functools import partial
+
+from samba import generate_random_password
+from samba.dcerpc import krb5pac
+from samba.sd_utils import SDUtils
+
+from samba.tests.krb5.kdc_base_test import KDCBaseTest
+from samba.tests.krb5.rfc4120_constants import (
+ KDC_ERR_TGT_REVOKED,
+ KDC_ERR_TKT_EXPIRED,
+ KPASSWD_ACCESSDENIED,
+ KPASSWD_AUTHERROR,
+ KPASSWD_HARDERROR,
+ KPASSWD_INITIAL_FLAG_NEEDED,
+ KPASSWD_MALFORMED,
+ KPASSWD_SOFTERROR,
+ KPASSWD_SUCCESS,
+ NT_PRINCIPAL,
+ NT_SRV_INST,
+)
+
+global_asn1_print = False
+global_hexdump = False
+
+
+# Note: these tests do not pass on Windows, which returns different error codes
+# to the ones we have chosen, and does not always return additional error data.
+class KpasswdTests(KDCBaseTest):
+
+ def setUp(self):
+ super().setUp()
+ self.do_asn1_print = global_asn1_print
+ self.do_hexdump = global_hexdump
+
+ samdb = self.get_samdb()
+
+ # Get the old 'dSHeuristics' if it was set
+ dsheuristics = samdb.get_dsheuristics()
+
+ # Reset the 'dSHeuristics' as they were before
+ self.addCleanup(samdb.set_dsheuristics, dsheuristics)
+
+ # Set the 'dSHeuristics' to activate the correct 'userPassword'
+ # behaviour
+ samdb.set_dsheuristics('000000001')
+
+ # Get the old 'minPwdAge'
+ minPwdAge = samdb.get_minPwdAge()
+
+ # Reset the 'minPwdAge' as it was before
+ self.addCleanup(samdb.set_minPwdAge, minPwdAge)
+
+ # Set it temporarily to '0'
+ samdb.set_minPwdAge('0')
+
+ def _get_creds(self, expired=False):
+ opts = {
+ 'expired_password': expired
+ }
+
+ # Create the account.
+ creds = self.get_cached_creds(account_type=self.AccountType.USER,
+ opts=opts,
+ use_cache=False)
+
+ return creds
+
+ def get_kpasswd_sname(self):
+ return self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=['kadmin', 'changepw'])
+
+ def get_ticket_lifetime(self, ticket):
+ enc_part = ticket.ticket_private
+
+ authtime = enc_part['authtime']
+ starttime = enc_part.get('starttime', authtime)
+ endtime = enc_part['endtime']
+
+ starttime = self.get_EpochFromKerberosTime(starttime)
+ endtime = self.get_EpochFromKerberosTime(endtime)
+
+ return endtime - starttime
+
+ # Test setting a password with kpasswd.
+ def test_kpasswd_set(self):
+ # Create an account for testing.
+ creds = self._get_creds()
+
+ # Get an initial ticket to kpasswd.
+ ticket = self.get_tgt(creds, sname=self.get_kpasswd_sname(),
+ kdc_options='0')
+
+ expected_code = KPASSWD_SUCCESS
+ expected_msg = b'Password changed'
+
+ # Set the password.
+ new_password = generate_random_password(32, 32)
+ self.kpasswd_exchange(ticket,
+ new_password,
+ expected_code,
+ expected_msg,
+ mode=self.KpasswdMode.SET)
+
+ # Test the newly set password.
+ creds.update_password(new_password)
+ self.get_tgt(creds, fresh=True)
+
+ # Test changing a password with kpasswd.
+ def test_kpasswd_change(self):
+ # Create an account for testing.
+ creds = self._get_creds()
+
+ # Get an initial ticket to kpasswd.
+ ticket = self.get_tgt(creds, sname=self.get_kpasswd_sname(),
+ kdc_options='0')
+
+ expected_code = KPASSWD_SUCCESS
+ expected_msg = b'Password changed'
+
+ # Change the password.
+ new_password = generate_random_password(32, 32)
+ self.kpasswd_exchange(ticket,
+ new_password,
+ expected_code,
+ expected_msg,
+ mode=self.KpasswdMode.CHANGE)
+
+ # Test the newly set password.
+ creds.update_password(new_password)
+ self.get_tgt(creds, fresh=True)
+
+ # Test kpasswd without setting the canonicalize option.
+ def test_kpasswd_no_canonicalize(self):
+ # Create an account for testing.
+ creds = self._get_creds()
+
+ sname = self.get_kpasswd_sname()
+
+ # Get an initial ticket to kpasswd.
+ ticket = self.get_tgt(creds, sname=sname,
+ kdc_options='0')
+
+ expected_code = KPASSWD_SUCCESS
+ expected_msg = b'Password changed'
+
+ # Set the password.
+ new_password = generate_random_password(32, 32)
+ self.kpasswd_exchange(ticket,
+ new_password,
+ expected_code,
+ expected_msg,
+ mode=self.KpasswdMode.SET)
+
+ creds.update_password(new_password)
+
+ # Get an initial ticket to kpasswd.
+ ticket = self.get_tgt(creds, sname=sname,
+ kdc_options='0')
+
+ # Change the password.
+ new_password = generate_random_password(32, 32)
+ self.kpasswd_exchange(ticket,
+ new_password,
+ expected_code,
+ expected_msg,
+ mode=self.KpasswdMode.CHANGE)
+
+ # Test kpasswd with the canonicalize option reset and a non-canonical
+ # (by conversion to title case) realm.
+ def test_kpasswd_no_canonicalize_realm_case(self):
+ # Create an account for testing.
+ creds = self._get_creds()
+
+ sname = self.get_kpasswd_sname()
+ realm = creds.get_realm().capitalize() # We use a title-cased realm.
+
+ # Get an initial ticket to kpasswd.
+ ticket = self.get_tgt(creds, sname=sname,
+ realm=realm,
+ kdc_options='0')
+
+ expected_code = KPASSWD_SUCCESS
+ expected_msg = b'Password changed'
+
+ # Set the password.
+ new_password = generate_random_password(32, 32)
+ self.kpasswd_exchange(ticket,
+ new_password,
+ expected_code,
+ expected_msg,
+ mode=self.KpasswdMode.SET)
+
+ creds.update_password(new_password)
+
+ # Get an initial ticket to kpasswd.
+ ticket = self.get_tgt(creds, sname=sname,
+ realm=realm,
+ kdc_options='0')
+
+ # Change the password.
+ new_password = generate_random_password(32, 32)
+ self.kpasswd_exchange(ticket,
+ new_password,
+ expected_code,
+ expected_msg,
+ mode=self.KpasswdMode.CHANGE)
+
+ # Test kpasswd with the canonicalize option set.
+ def test_kpasswd_canonicalize(self):
+ # Create an account for testing.
+ creds = self._get_creds()
+
+ # Get an initial ticket to kpasswd. We set the canonicalize flag here.
+ ticket = self.get_tgt(creds, sname=self.get_kpasswd_sname(),
+ kdc_options='canonicalize')
+
+ expected_code = KPASSWD_SUCCESS
+ expected_msg = b'Password changed'
+
+ # Set the password.
+ new_password = generate_random_password(32, 32)
+ self.kpasswd_exchange(ticket,
+ new_password,
+ expected_code,
+ expected_msg,
+ mode=self.KpasswdMode.SET)
+
+ creds.update_password(new_password)
+
+ # Get an initial ticket to kpasswd. We set the canonicalize flag here.
+ ticket = self.get_tgt(creds, sname=self.get_kpasswd_sname(),
+ kdc_options='canonicalize')
+
+ # Change the password.
+ new_password = generate_random_password(32, 32)
+ self.kpasswd_exchange(ticket,
+ new_password,
+ expected_code,
+ expected_msg,
+ mode=self.KpasswdMode.CHANGE)
+
+ # Test kpasswd with the canonicalize option set and a non-canonical (by
+ # conversion to title case) realm.
+ def test_kpasswd_canonicalize_realm_case(self):
+ # Create an account for testing.
+ creds = self._get_creds()
+
+ sname = self.get_kpasswd_sname()
+ realm = creds.get_realm().capitalize() # We use a title-cased realm.
+
+ # Get an initial ticket to kpasswd. We set the canonicalize flag here.
+ ticket = self.get_tgt(creds, sname=sname,
+ realm=realm,
+ kdc_options='canonicalize')
+
+ expected_code = KPASSWD_SUCCESS
+ expected_msg = b'Password changed'
+
+ # Set the password.
+ new_password = generate_random_password(32, 32)
+ self.kpasswd_exchange(ticket,
+ new_password,
+ expected_code,
+ expected_msg,
+ mode=self.KpasswdMode.SET)
+
+ creds.update_password(new_password)
+
+ # Get an initial ticket to kpasswd. We set the canonicalize flag here.
+ ticket = self.get_tgt(creds, sname=sname,
+ realm=realm,
+ kdc_options='canonicalize')
+
+ # Change the password.
+ new_password = generate_random_password(32, 32)
+ self.kpasswd_exchange(ticket,
+ new_password,
+ expected_code,
+ expected_msg,
+ mode=self.KpasswdMode.CHANGE)
+
+ # Test kpasswd rejects a password that does not meet complexity
+ # requirements.
+ def test_kpasswd_too_weak(self):
+ # Create an account for testing.
+ creds = self._get_creds()
+
+ # Get an initial ticket to kpasswd.
+ ticket = self.get_tgt(creds, sname=self.get_kpasswd_sname(),
+ kdc_options='0')
+
+ expected_code = KPASSWD_SOFTERROR
+ expected_msg = b'Password does not meet complexity requirements'
+
+ # Set the password.
+ new_password = 'password'
+ self.kpasswd_exchange(ticket,
+ new_password,
+ expected_code,
+ expected_msg,
+ mode=self.KpasswdMode.SET)
+
+ # Change the password.
+ self.kpasswd_exchange(ticket,
+ new_password,
+ expected_code,
+ expected_msg,
+ mode=self.KpasswdMode.CHANGE)
+
+ # Test kpasswd rejects an empty new password.
+ def test_kpasswd_empty(self):
+ # Create an account for testing.
+ creds = self._get_creds()
+
+ # Get an initial ticket to kpasswd.
+ ticket = self.get_tgt(creds, sname=self.get_kpasswd_sname(),
+ kdc_options='0')
+
+ expected_code = KPASSWD_SOFTERROR, KPASSWD_HARDERROR
+ expected_msg = (b'Password too short, password must be at least 7 '
+ b'characters long.',
+ b'String conversion failed!')
+
+ # Set the password.
+ new_password = ''
+ self.kpasswd_exchange(ticket,
+ new_password,
+ expected_code,
+ expected_msg,
+ mode=self.KpasswdMode.SET)
+
+ expected_code = KPASSWD_HARDERROR
+ expected_msg = b'String conversion failed!'
+
+ # Change the password.
+ self.kpasswd_exchange(ticket,
+ new_password,
+ expected_code,
+ expected_msg,
+ mode=self.KpasswdMode.CHANGE)
+
+ # Test kpasswd rejects a request that does not include a random sequence
+ # number.
+ def test_kpasswd_no_seq_number(self):
+ # Create an account for testing.
+ creds = self._get_creds()
+
+ # Get an initial ticket to kpasswd.
+ ticket = self.get_tgt(creds, sname=self.get_kpasswd_sname(),
+ kdc_options='0')
+
+ expected_code = KPASSWD_HARDERROR
+ expected_msg = b'gensec_unwrap failed - NT_STATUS_ACCESS_DENIED\n'
+
+ # Set the password.
+ new_password = generate_random_password(32, 32)
+ self.kpasswd_exchange(ticket,
+ new_password,
+ expected_code,
+ expected_msg,
+ mode=self.KpasswdMode.SET,
+ send_seq_number=False)
+
+ # Change the password.
+ self.kpasswd_exchange(ticket,
+ new_password,
+ expected_code,
+ expected_msg,
+ mode=self.KpasswdMode.CHANGE,
+ send_seq_number=False)
+
+ # Test kpasswd rejects a ticket issued by an RODC.
+ def test_kpasswd_from_rodc(self):
+ # Create an account for testing.
+ creds = self._get_creds()
+
+ # Get an initial ticket to kpasswd.
+ ticket = self.get_tgt(creds, sname=self.get_kpasswd_sname(),
+ kdc_options='0')
+
+ # Have the ticket be issued by the RODC.
+ ticket = self.issued_by_rodc(ticket)
+
+ expected_code = KPASSWD_HARDERROR
+ expected_msg = b'gensec_update failed - NT_STATUS_LOGON_FAILURE\n'
+
+ # Set the password.
+ new_password = generate_random_password(32, 32)
+ self.kpasswd_exchange(ticket,
+ new_password,
+ expected_code,
+ expected_msg,
+ mode=self.KpasswdMode.SET)
+
+ # Change the password.
+ self.kpasswd_exchange(ticket,
+ new_password,
+ expected_code,
+ expected_msg,
+ mode=self.KpasswdMode.CHANGE)
+
+ # Test setting a password, specifying the principal of the target user.
+ def test_kpasswd_set_target_princ_only(self):
+ # Create an account for testing.
+ creds = self._get_creds()
+ username = creds.get_username()
+
+ cname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=username.split('/'))
+
+ # Get an initial ticket to kpasswd.
+ ticket = self.get_tgt(creds, sname=self.get_kpasswd_sname(),
+ kdc_options='0')
+
+ expected_code = KPASSWD_MALFORMED
+ expected_msg = (b'Realm and principal must be both present, or '
+ b'neither present',
+ b'Failed to decode packet')
+
+ # Change the password.
+ new_password = generate_random_password(32, 32)
+ self.kpasswd_exchange(ticket,
+ new_password,
+ expected_code,
+ expected_msg,
+ mode=self.KpasswdMode.SET,
+ target_princ=cname)
+
+ # Test that kpasswd rejects a password set specifying only the realm of the
+ # target user.
+ def test_kpasswd_set_target_realm_only(self):
+ # Create an account for testing.
+ creds = self._get_creds()
+
+ # Get an initial ticket to kpasswd.
+ ticket = self.get_tgt(creds, sname=self.get_kpasswd_sname(),
+ kdc_options='0')
+
+ expected_code = KPASSWD_MALFORMED, KPASSWD_ACCESSDENIED
+ expected_msg = (b'Realm and principal must be both present, or '
+ b'neither present',
+ b'Failed to decode packet',
+ b'No such user when changing password')
+
+ # Change the password.
+ new_password = generate_random_password(32, 32)
+ self.kpasswd_exchange(ticket,
+ new_password,
+ expected_code,
+ expected_msg,
+ mode=self.KpasswdMode.SET,
+ target_realm=creds.get_realm())
+
+ # Show that a user cannot set a password, specifying both principal and
+ # realm of the target user, without having control access.
+ def test_kpasswd_set_target_princ_and_realm_no_access(self):
+ # Create an account for testing.
+ creds = self._get_creds()
+ username = creds.get_username()
+
+ cname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=username.split('/'))
+
+ # Get an initial ticket to kpasswd.
+ ticket = self.get_tgt(creds, sname=self.get_kpasswd_sname(),
+ kdc_options='0')
+
+ expected_code = KPASSWD_ACCESSDENIED
+ expected_msg = b'Not permitted to change password'
+
+ # Change the password.
+ new_password = generate_random_password(32, 32)
+ self.kpasswd_exchange(ticket,
+ new_password,
+ expected_code,
+ expected_msg,
+ mode=self.KpasswdMode.SET,
+ target_princ=cname,
+ target_realm=creds.get_realm())
+
+ # Test setting a password, specifying both principal and realm of the
+ # target user, when the user has control access on their account.
+ def test_kpasswd_set_target_princ_and_realm_access(self):
+ # Create an account for testing.
+ creds = self._get_creds()
+ username = creds.get_username()
+ tgt = self.get_tgt(creds)
+
+ cname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=username.split('/'))
+
+ samdb = self.get_samdb()
+ sd_utils = SDUtils(samdb)
+
+ user_dn = creds.get_dn()
+ user_sid = creds.get_sid()
+
+ # Give the user control access on their account.
+ ace = f'(A;;CR;;;{user_sid})'
+ sd_utils.dacl_add_ace(user_dn, ace)
+
+ # Get a non-initial ticket to kpasswd. Since we have the right to
+ # change the account's password, we don't need an initial ticket.
+ krbtgt_creds = self.get_krbtgt_creds()
+ ticket = self.get_service_ticket(tgt,
+ krbtgt_creds,
+ service='kadmin',
+ target_name='changepw',
+ kdc_options='0')
+
+ expected_code = KPASSWD_SUCCESS
+ expected_msg = b'Password changed'
+
+ # Change the password.
+ new_password = generate_random_password(32, 32)
+ self.kpasswd_exchange(ticket,
+ new_password,
+ expected_code,
+ expected_msg,
+ mode=self.KpasswdMode.SET,
+ target_princ=cname,
+ target_realm=creds.get_realm())
+
+ # Test setting a password when the existing password has expired.
+ def test_kpasswd_set_expired_password(self):
+ # Create an account for testing, with an expired password.
+ creds = self._get_creds(expired=True)
+
+ # Get an initial ticket to kpasswd.
+ ticket = self.get_tgt(creds, sname=self.get_kpasswd_sname(),
+ kdc_options='0')
+
+ expected_code = KPASSWD_SUCCESS
+ expected_msg = b'Password changed'
+
+ # Set the password.
+ new_password = generate_random_password(32, 32)
+ self.kpasswd_exchange(ticket,
+ new_password,
+ expected_code,
+ expected_msg,
+ mode=self.KpasswdMode.SET)
+
+ # Test changing a password when the existing password has expired.
+ def test_kpasswd_change_expired_password(self):
+ # Create an account for testing, with an expired password.
+ creds = self._get_creds(expired=True)
+
+ # Get an initial ticket to kpasswd.
+ ticket = self.get_tgt(creds, sname=self.get_kpasswd_sname(),
+ kdc_options='0')
+
+ expected_code = KPASSWD_SUCCESS
+ expected_msg = b'Password changed'
+
+ # Change the password.
+ new_password = generate_random_password(32, 32)
+ self.kpasswd_exchange(ticket,
+ new_password,
+ expected_code,
+ expected_msg,
+ mode=self.KpasswdMode.CHANGE)
+
+ # Check the lifetime of a kpasswd ticket is not more than two minutes.
+ def test_kpasswd_ticket_lifetime(self):
+ # Create an account for testing.
+ creds = self._get_creds()
+
+ # Get an initial ticket to kpasswd.
+ ticket = self.get_tgt(creds, sname=self.get_kpasswd_sname(),
+ kdc_options='0')
+
+ # Check the lifetime of the ticket is equal to two minutes.
+ lifetime = self.get_ticket_lifetime(ticket)
+ self.assertEqual(2 * 60, lifetime)
+
+ # Ensure we cannot perform a TGS-REQ with a kpasswd ticket.
+ def test_kpasswd_ticket_tgs(self):
+ creds = self.get_client_creds()
+
+ # Get an initial ticket to kpasswd.
+ ticket = self.get_tgt(creds, sname=self.get_kpasswd_sname(),
+ kdc_options='0')
+
+ # Change the sname of the ticket to match that of a TGT.
+ realm = creds.get_realm()
+ krbtgt_sname = self.PrincipalName_create(name_type=NT_SRV_INST,
+ names=['krbtgt', realm])
+ ticket.set_sname(krbtgt_sname)
+
+ # Try to use that ticket to get a service ticket.
+ service_creds = self.get_service_creds()
+
+ # This fails due to missing REQUESTER_SID buffer.
+ self._make_tgs_request(creds, service_creds, ticket,
+ expect_error=(KDC_ERR_TGT_REVOKED,
+ KDC_ERR_TKT_EXPIRED))
+
+ # Ensure we cannot perform a TGS-REQ with a kpasswd ticket containing a
+ # requester SID and having a remaining lifetime of two minutes.
+ def test_kpasswd_ticket_requester_sid_tgs(self):
+ creds = self.get_client_creds()
+
+ # Get an initial ticket to kpasswd.
+ ticket = self.get_tgt(creds, sname=self.get_kpasswd_sname(),
+ kdc_options='0')
+
+ # Change the sname of the ticket to match that of a TGT.
+ realm = creds.get_realm()
+ krbtgt_sname = self.PrincipalName_create(name_type=NT_SRV_INST,
+ names=['krbtgt', realm])
+ ticket.set_sname(krbtgt_sname)
+
+ # Modify the ticket to add a requester SID and give it two minutes to
+ # live.
+ ticket = self.modify_lifetime(ticket,
+ lifetime=2 * 60,
+ requester_sid=creds.get_sid())
+
+ # Try to use that ticket to get a service ticket.
+ service_creds = self.get_service_creds()
+
+ # This fails due to the lifetime being too short.
+ self._make_tgs_request(creds, service_creds, ticket,
+ expect_error=KDC_ERR_TKT_EXPIRED)
+
+ # Show we can perform a TGS-REQ with a kpasswd ticket containing a
+ # requester SID if the remaining lifetime exceeds two minutes.
+ def test_kpasswd_ticket_requester_sid_lifetime_tgs(self):
+ creds = self.get_client_creds()
+
+ # Get an initial ticket to kpasswd.
+ ticket = self.get_tgt(creds, sname=self.get_kpasswd_sname(),
+ kdc_options='0')
+
+ # Change the sname of the ticket to match that of a TGT.
+ realm = creds.get_realm()
+ krbtgt_sname = self.PrincipalName_create(name_type=NT_SRV_INST,
+ names=['krbtgt', realm])
+ ticket.set_sname(krbtgt_sname)
+
+ # Modify the ticket to add a requester SID and give it two minutes and
+ # ten seconds to live.
+ ticket = self.modify_lifetime(ticket,
+ lifetime=2 * 60 + 10,
+ requester_sid=creds.get_sid())
+
+ # Try to use that ticket to get a service ticket.
+ service_creds = self.get_service_creds()
+
+ # This succeeds.
+ self._make_tgs_request(creds, service_creds, ticket,
+ expect_error=False)
+
+ # Show that we cannot provide a TGT to kpasswd to change the password.
+ def test_kpasswd_tgt(self):
+ # Create an account for testing, and get a TGT.
+ creds = self._get_creds()
+ tgt = self.get_tgt(creds)
+
+ # Change the sname of the ticket to match that of kadmin/changepw.
+ tgt.set_sname(self.get_kpasswd_sname())
+
+ expected_code = KPASSWD_AUTHERROR
+ expected_msg = b'A TGT may not be used as a ticket to kpasswd'
+
+ # Set the password.
+ new_password = generate_random_password(32, 32)
+ self.kpasswd_exchange(tgt,
+ new_password,
+ expected_code,
+ expected_msg,
+ mode=self.KpasswdMode.SET)
+
+ # Change the password.
+ self.kpasswd_exchange(tgt,
+ new_password,
+ expected_code,
+ expected_msg,
+ mode=self.KpasswdMode.CHANGE)
+
+ # Show that we cannot provide a TGT to kpasswd that was obtained with a
+ # single‐component principal.
+ def test_kpasswd_tgt_single_component_krbtgt(self):
+ # Create an account for testing.
+ creds = self._get_creds()
+
+ # Create a single‐component principal of the form ‘krbtgt@REALM’.
+ sname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=['krbtgt'])
+
+ # Don’t request canonicalization.
+ kdc_options = 'forwardable,renewable,renewable-ok'
+
+ # Get a TGT.
+ tgt = self.get_tgt(creds, sname=sname, kdc_options=kdc_options)
+
+ # Change the sname of the ticket to match that of kadmin/changepw.
+ tgt.set_sname(self.get_kpasswd_sname())
+
+ expected_code = KPASSWD_AUTHERROR
+ expected_msg = b'A TGT may not be used as a ticket to kpasswd'
+
+ # Set the password.
+ new_password = generate_random_password(32, 32)
+ self.kpasswd_exchange(tgt,
+ new_password,
+ expected_code,
+ expected_msg,
+ mode=self.KpasswdMode.SET)
+
+ # Change the password.
+ self.kpasswd_exchange(tgt,
+ new_password,
+ expected_code,
+ expected_msg,
+ mode=self.KpasswdMode.CHANGE)
+
+ # Test that kpasswd rejects requests with a service ticket.
+ def test_kpasswd_non_initial(self):
+ # Create an account for testing, and get a TGT.
+ creds = self._get_creds()
+ tgt = self.get_tgt(creds)
+
+ # Get a non-initial ticket to kpasswd.
+ krbtgt_creds = self.get_krbtgt_creds()
+ ticket = self.get_service_ticket(tgt,
+ krbtgt_creds,
+ service='kadmin',
+ target_name='changepw',
+ kdc_options='0')
+
+ expected_code = KPASSWD_INITIAL_FLAG_NEEDED
+ expected_msg = b'Expected an initial ticket'
+
+ # Set the password.
+ new_password = generate_random_password(32, 32)
+ self.kpasswd_exchange(ticket,
+ new_password,
+ expected_code,
+ expected_msg,
+ mode=self.KpasswdMode.SET)
+
+ # Change the password.
+ self.kpasswd_exchange(ticket,
+ new_password,
+ expected_code,
+ expected_msg,
+ mode=self.KpasswdMode.CHANGE)
+
+ # Show that kpasswd accepts requests with a service ticket modified to set
+ # the 'initial' flag.
+ def test_kpasswd_initial(self):
+ # Create an account for testing, and get a TGT.
+ creds = self._get_creds()
+
+ krbtgt_creds = self.get_krbtgt_creds()
+
+ # Get a service ticket, and modify it to set the 'initial' flag.
+ def get_ticket():
+ tgt = self.get_tgt(creds, fresh=True)
+
+ # Get a non-initial ticket to kpasswd.
+ ticket = self.get_service_ticket(tgt,
+ krbtgt_creds,
+ service='kadmin',
+ target_name='changepw',
+ kdc_options='0',
+ fresh=True)
+
+ set_initial_flag = partial(self.modify_ticket_flag, flag='initial',
+ value=True)
+
+ checksum_keys = self.get_krbtgt_checksum_key()
+ return self.modified_ticket(ticket,
+ modify_fn=set_initial_flag,
+ checksum_keys=checksum_keys)
+
+ expected_code = KPASSWD_SUCCESS
+ expected_msg = b'Password changed'
+
+ ticket = get_ticket()
+
+ # Set the password.
+ new_password = generate_random_password(32, 32)
+ self.kpasswd_exchange(ticket,
+ new_password,
+ expected_code,
+ expected_msg,
+ mode=self.KpasswdMode.SET)
+
+ creds.update_password(new_password)
+ ticket = get_ticket()
+
+ # Change the password.
+ new_password = generate_random_password(32, 32)
+ self.kpasswd_exchange(ticket,
+ new_password,
+ expected_code,
+ expected_msg,
+ mode=self.KpasswdMode.CHANGE)
+
+ # Test that kpasswd rejects requests where the ticket is encrypted with a
+ # key other than the krbtgt's.
+ def test_kpasswd_wrong_key(self):
+ # Create an account for testing.
+ creds = self._get_creds()
+
+ sname = self.get_kpasswd_sname()
+
+ # Get an initial ticket to kpasswd.
+ ticket = self.get_tgt(creds, sname=sname,
+ kdc_options='0')
+
+ # Get a key belonging to the Administrator account.
+ admin_creds = self.get_admin_creds()
+ admin_key = self.TicketDecryptionKey_from_creds(admin_creds)
+ self.assertIsNotNone(admin_key.kvno,
+ 'a kvno is required to tell the DB '
+ 'which key to look up.')
+ checksum_keys = {
+ krb5pac.PAC_TYPE_KDC_CHECKSUM: admin_key,
+ }
+
+ # Re-encrypt the ticket using the Administrator's key.
+ ticket = self.modified_ticket(ticket,
+ new_ticket_key=admin_key,
+ checksum_keys=checksum_keys)
+
+ # Set the sname of the ticket to that of the Administrator account.
+ admin_sname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=['Administrator'])
+ ticket.set_sname(admin_sname)
+
+ expected_code = KPASSWD_HARDERROR
+ expected_msg = b'gensec_update failed - NT_STATUS_LOGON_FAILURE\n'
+
+ # Set the password.
+ new_password = generate_random_password(32, 32)
+ self.kpasswd_exchange(ticket,
+ new_password,
+ expected_code,
+ expected_msg,
+ mode=self.KpasswdMode.SET)
+
+ # Change the password.
+ self.kpasswd_exchange(ticket,
+ new_password,
+ expected_code,
+ expected_msg,
+ mode=self.KpasswdMode.CHANGE)
+
+ def test_kpasswd_wrong_key_service(self):
+ # Create an account for testing.
+ creds = self.get_cached_creds(account_type=self.AccountType.COMPUTER,
+ use_cache=False)
+
+ sname = self.get_kpasswd_sname()
+
+ # Get an initial ticket to kpasswd.
+ ticket = self.get_tgt(creds, sname=sname,
+ kdc_options='0')
+
+ # Get a key belonging to our account.
+ our_key = self.TicketDecryptionKey_from_creds(creds)
+ self.assertIsNotNone(our_key.kvno,
+ 'a kvno is required to tell the DB '
+ 'which key to look up.')
+ checksum_keys = {
+ krb5pac.PAC_TYPE_KDC_CHECKSUM: our_key,
+ }
+
+ # Re-encrypt the ticket using our key.
+ ticket = self.modified_ticket(ticket,
+ new_ticket_key=our_key,
+ checksum_keys=checksum_keys)
+
+ # Set the sname of the ticket to that of our account.
+ username = creds.get_username()
+ sname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=username.split('/'))
+ ticket.set_sname(sname)
+
+ expected_code = KPASSWD_HARDERROR
+ expected_msg = b'gensec_update failed - NT_STATUS_LOGON_FAILURE\n'
+
+ # Set the password.
+ new_password = generate_random_password(32, 32)
+ self.kpasswd_exchange(ticket,
+ new_password,
+ expected_code,
+ expected_msg,
+ mode=self.KpasswdMode.SET)
+
+ # Change the password.
+ self.kpasswd_exchange(ticket,
+ new_password,
+ expected_code,
+ expected_msg,
+ mode=self.KpasswdMode.CHANGE)
+
+ # Test that kpasswd rejects requests where the ticket is encrypted with a
+ # key belonging to a server account other than the krbtgt.
+ def test_kpasswd_wrong_key_server(self):
+ # Create an account for testing.
+ creds = self._get_creds()
+
+ sname = self.get_kpasswd_sname()
+
+ # Get an initial ticket to kpasswd.
+ ticket = self.get_tgt(creds, sname=sname,
+ kdc_options='0')
+
+ # Get a key belonging to the DC's account.
+ dc_creds = self.get_dc_creds()
+ dc_key = self.TicketDecryptionKey_from_creds(dc_creds)
+ self.assertIsNotNone(dc_key.kvno,
+ 'a kvno is required to tell the DB '
+ 'which key to look up.')
+ checksum_keys = {
+ krb5pac.PAC_TYPE_KDC_CHECKSUM: dc_key,
+ }
+
+ # Re-encrypt the ticket using the DC's key.
+ ticket = self.modified_ticket(ticket,
+ new_ticket_key=dc_key,
+ checksum_keys=checksum_keys)
+
+ # Set the sname of the ticket to that of the DC's account.
+ dc_username = dc_creds.get_username()
+ dc_sname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=dc_username.split('/'))
+ ticket.set_sname(dc_sname)
+
+ expected_code = KPASSWD_HARDERROR
+ expected_msg = b'gensec_update failed - NT_STATUS_LOGON_FAILURE\n'
+
+ # Set the password.
+ new_password = generate_random_password(32, 32)
+ self.kpasswd_exchange(ticket,
+ new_password,
+ expected_code,
+ expected_msg,
+ mode=self.KpasswdMode.SET)
+
+ # Change the password.
+ self.kpasswd_exchange(ticket,
+ new_password,
+ expected_code,
+ expected_msg,
+ mode=self.KpasswdMode.CHANGE)
+
+
+if __name__ == '__main__':
+ global_asn1_print = False
+ global_hexdump = False
+ import unittest
+ unittest.main()
diff --git a/python/samba/tests/krb5/lockout_tests.py b/python/samba/tests/krb5/lockout_tests.py
new file mode 100755
index 0000000..d91eb1d
--- /dev/null
+++ b/python/samba/tests/krb5/lockout_tests.py
@@ -0,0 +1,1137 @@
+#!/usr/bin/env python3
+# Unix SMB/CIFS implementation.
+# Copyright (C) Stefan Metzmacher 2020
+# Copyright (C) Catalyst.Net Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+import os
+
+sys.path.insert(0, 'bin/python')
+os.environ['PYTHONUNBUFFERED'] = '1'
+
+from concurrent import futures
+from enum import Enum
+from functools import partial
+from multiprocessing import Pipe
+import time
+
+from cryptography.hazmat.backends import default_backend
+from cryptography.hazmat.primitives.ciphers.base import Cipher
+from cryptography.hazmat.primitives.ciphers import algorithms
+
+import ldb
+
+from samba import (
+ NTSTATUSError,
+ dsdb,
+ generate_random_bytes,
+ generate_random_password,
+ ntstatus,
+ unix2nttime,
+ werror,
+)
+from samba.credentials import DONT_USE_KERBEROS, MUST_USE_KERBEROS
+from samba.crypto import (
+ aead_aes_256_cbc_hmac_sha512_blob,
+ des_crypt_blob_16,
+ md4_hash_blob,
+ sha512_pbkdf2,
+)
+from samba.dcerpc import lsa, samr
+from samba.samdb import SamDB
+
+from samba.tests import connect_samdb, env_get_var_value, env_loadparm
+
+from samba.tests.krb5.as_req_tests import AsReqBaseTest
+from samba.tests.krb5 import kcrypto
+from samba.tests.krb5.kdc_base_test import KDCBaseTest
+from samba.tests.krb5.raw_testcase import KerberosCredentials
+import samba.tests.krb5.rfc4120_pyasn1 as krb5_asn1
+from samba.tests.krb5.rfc4120_constants import (
+ KDC_ERR_CLIENT_REVOKED,
+ KDC_ERR_PREAUTH_FAILED,
+ KRB_AS_REP,
+ KRB_ERROR,
+ NT_PRINCIPAL,
+ NT_SRV_INST,
+)
+
+global_asn1_print = False
+global_hexdump = False
+
+
+class ConnectionResult(Enum):
+ LOCKED_OUT = 1
+ WRONG_PASSWORD = 2
+ SUCCESS = 3
+
+
+def connect_kdc(pipe,
+ url,
+ hostname,
+ username,
+ password,
+ domain,
+ realm,
+ workstation,
+ dn,
+ expect_error=True,
+ expect_status=None):
+ AsReqBaseTest.setUpClass()
+ as_req_base = AsReqBaseTest()
+ as_req_base.setUp()
+
+ user_creds = KerberosCredentials()
+ user_creds.set_username(username)
+ user_creds.set_password(password)
+ user_creds.set_domain(domain)
+ user_creds.set_realm(realm)
+ user_creds.set_workstation(workstation)
+ user_creds.set_kerberos_state(DONT_USE_KERBEROS)
+
+ user_name = user_creds.get_username()
+ cname = as_req_base.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=user_name.split('/'))
+
+ krbtgt_creds = as_req_base.get_krbtgt_creds()
+ krbtgt_supported_etypes = krbtgt_creds.tgs_supported_enctypes
+ realm = krbtgt_creds.get_realm()
+
+ krbtgt_account = krbtgt_creds.get_username()
+ sname = as_req_base.PrincipalName_create(name_type=NT_SRV_INST,
+ names=[krbtgt_account, realm])
+
+ expected_salt = user_creds.get_salt()
+
+ till = as_req_base.get_KerberosTime(offset=36000)
+
+ kdc_options = krb5_asn1.KDCOptions('postdated')
+
+ preauth_key = as_req_base.PasswordKey_from_creds(user_creds,
+ kcrypto.Enctype.AES256)
+
+ ts_enc_padata = as_req_base.get_enc_timestamp_pa_data_from_key(preauth_key)
+ padata = [ts_enc_padata]
+
+ krbtgt_decryption_key = (
+ as_req_base.TicketDecryptionKey_from_creds(krbtgt_creds))
+
+ etypes = as_req_base.get_default_enctypes(user_creds)
+
+ # Remove the LDAP connection.
+ del type(as_req_base)._ldb
+
+ if expect_error:
+ expected_error_modes = (KDC_ERR_CLIENT_REVOKED,
+ KDC_ERR_PREAUTH_FAILED)
+
+ # Wrap generic_check_kdc_error() to expect an NTSTATUS code when the
+ # account is locked out.
+ def check_error_fn(kdc_exchange_dict,
+ callback_dict,
+ rep):
+ error_code = rep.get('error-code')
+ if error_code == KDC_ERR_CLIENT_REVOKED:
+ # The account was locked out.
+ kdc_exchange_dict['expected_status'] = (
+ ntstatus.NT_STATUS_ACCOUNT_LOCKED_OUT)
+
+ if expect_status:
+ # Expect to get a LOCKED_OUT NTSTATUS code.
+ kdc_exchange_dict['expect_edata'] = True
+ kdc_exchange_dict['expect_status'] = True
+
+ elif error_code == KDC_ERR_PREAUTH_FAILED:
+ # Just a wrong password: the account wasn’t locked out. Don’t
+ # expect an NTSTATUS code.
+ kdc_exchange_dict['expect_status'] = False
+
+ # Continue with the generic error-checking logic.
+ return as_req_base.generic_check_kdc_error(
+ kdc_exchange_dict,
+ callback_dict,
+ rep)
+
+ check_rep_fn = None
+ else:
+ expected_error_modes = 0
+
+ check_error_fn = None
+ check_rep_fn = as_req_base.generic_check_kdc_rep
+
+ def _generate_padata_copy(_kdc_exchange_dict,
+ _callback_dict,
+ req_body):
+ return padata, req_body
+
+ kdc_exchange_dict = as_req_base.as_exchange_dict(
+ creds=user_creds,
+ expected_crealm=realm,
+ expected_cname=cname,
+ expected_srealm=realm,
+ expected_sname=sname,
+ expected_account_name=user_name,
+ expected_supported_etypes=krbtgt_supported_etypes,
+ ticket_decryption_key=krbtgt_decryption_key,
+ generate_padata_fn=_generate_padata_copy,
+ check_error_fn=check_error_fn,
+ check_rep_fn=check_rep_fn,
+ check_kdc_private_fn=as_req_base.generic_check_kdc_private,
+ expected_error_mode=expected_error_modes,
+ expected_salt=expected_salt,
+ preauth_key=preauth_key,
+ kdc_options=str(kdc_options),
+ pac_request=True)
+
+ # Indicate that we're ready. This ensures we hit the right transaction
+ # lock.
+ pipe.send_bytes(b'0')
+
+ # Wait for the main process to take out a transaction lock.
+ if not pipe.poll(timeout=5):
+ raise AssertionError('main process failed to indicate readiness')
+
+ # Try making a Kerberos AS-REQ to the KDC. This might fail, either due to
+ # the user's account being locked out or due to using the wrong password.
+ as_rep = as_req_base._generic_kdc_exchange(kdc_exchange_dict,
+ cname=cname,
+ realm=realm,
+ sname=sname,
+ till_time=till,
+ etypes=etypes)
+
+ as_req_base.assertIsNotNone(as_rep)
+
+ msg_type = as_rep['msg-type']
+ if expect_error and msg_type != KRB_ERROR or (
+ not expect_error and msg_type != KRB_AS_REP):
+ raise AssertionError(f'wrong message type {msg_type}')
+
+ if not expect_error:
+ return ConnectionResult.SUCCESS
+
+ error_code = as_rep['error-code']
+ if error_code == KDC_ERR_CLIENT_REVOKED:
+ return ConnectionResult.LOCKED_OUT
+ elif error_code == KDC_ERR_PREAUTH_FAILED:
+ return ConnectionResult.WRONG_PASSWORD
+ else:
+ raise AssertionError(f'wrong error code {error_code}')
+
+
+def connect_ntlm(pipe,
+ url,
+ hostname,
+ username,
+ password,
+ domain,
+ realm,
+ workstation,
+ dn):
+ user_creds = KerberosCredentials()
+ user_creds.set_username(username)
+ user_creds.set_password(password)
+ user_creds.set_domain(domain)
+ user_creds.set_workstation(workstation)
+ user_creds.set_kerberos_state(DONT_USE_KERBEROS)
+
+ # Indicate that we're ready. This ensures we hit the right transaction
+ # lock.
+ pipe.send_bytes(b'0')
+
+ # Wait for the main process to take out a transaction lock.
+ if not pipe.poll(timeout=5):
+ raise AssertionError('main process failed to indicate readiness')
+
+ try:
+ # Try connecting to SamDB. This should fail, either due to our
+ # account being locked out or due to using the wrong password.
+ SamDB(url=url,
+ credentials=user_creds,
+ lp=env_loadparm())
+ except ldb.LdbError as err:
+ num, estr = err.args
+
+ if num != ldb.ERR_INVALID_CREDENTIALS:
+ raise AssertionError(f'connection raised wrong error code '
+ f'({err})')
+
+ if f'data {werror.WERR_ACCOUNT_LOCKED_OUT:x},' in estr:
+ return ConnectionResult.LOCKED_OUT
+ elif f'data {werror.WERR_LOGON_FAILURE:x},' in estr:
+ return ConnectionResult.WRONG_PASSWORD
+ else:
+ raise AssertionError(f'connection raised wrong error code '
+ f'({estr})')
+ else:
+ return ConnectionResult.SUCCESS
+
+
+def connect_samr(pipe,
+ url,
+ hostname,
+ username,
+ password,
+ domain,
+ realm,
+ workstation,
+ dn):
+ # Get the user's NT hash.
+ user_creds = KerberosCredentials()
+ user_creds.set_password(password)
+ nt_hash = user_creds.get_nt_hash()
+
+ # Generate a new UTF-16 password.
+ new_password = generate_random_password(32, 32)
+ new_password = new_password.encode('utf-16le')
+
+ # Generate the MD4 hash of the password.
+ new_password_md4 = md4_hash_blob(new_password)
+
+ # Prefix the password with padding so it is 512 bytes long.
+ new_password_len = len(new_password)
+ remaining_len = 512 - new_password_len
+ new_password = bytes(remaining_len) + new_password
+
+ # Append the 32-bit length of the password..
+ new_password += int.to_bytes(new_password_len,
+ length=4,
+ byteorder='little')
+
+ # Encrypt the password with RC4 and the existing NT hash.
+ encryptor = Cipher(algorithms.ARC4(nt_hash),
+ None,
+ default_backend()).encryptor()
+ new_password = encryptor.update(new_password)
+
+ # Create a key from the MD4 hash of the new password.
+ key = new_password_md4[:14]
+
+ # Encrypt the old NT hash with DES to obtain the verifier.
+ verifier = des_crypt_blob_16(nt_hash, key)
+
+ server = lsa.String()
+ server.string = hostname
+
+ account = lsa.String()
+ account.string = username
+
+ nt_password = samr.CryptPassword()
+ nt_password.data = list(new_password)
+
+ nt_verifier = samr.Password()
+ nt_verifier.hash = list(verifier)
+
+ conn = samr.samr(f'ncacn_np:{hostname}[krb5,seal,smb2]')
+
+ # Indicate that we're ready. This ensures we hit the right transaction
+ # lock.
+ pipe.send_bytes(b'0')
+
+ # Wait for the main process to take out a transaction lock.
+ if not pipe.poll(timeout=5):
+ raise AssertionError('main process failed to indicate readiness')
+
+ try:
+ # Try changing the password. This should fail, either due to our
+ # account being locked out or due to using the wrong password.
+ conn.ChangePasswordUser3(server=server,
+ account=account,
+ nt_password=nt_password,
+ nt_verifier=nt_verifier,
+ lm_change=True,
+ lm_password=None,
+ lm_verifier=None,
+ password3=None)
+ except NTSTATUSError as err:
+ num, estr = err.args
+
+ if num == ntstatus.NT_STATUS_ACCOUNT_LOCKED_OUT:
+ return ConnectionResult.LOCKED_OUT
+ elif num == ntstatus.NT_STATUS_WRONG_PASSWORD:
+ return ConnectionResult.WRONG_PASSWORD
+ else:
+ raise AssertionError(f'pwd change raised wrong error code '
+ f'({num:08X})')
+ else:
+ return ConnectionResult.SUCCESS
+
+
+def connect_samr_aes(pipe,
+ url,
+ hostname,
+ username,
+ password,
+ domain,
+ realm,
+ workstation,
+ dn):
+ # Get the user's NT hash.
+ user_creds = KerberosCredentials()
+ user_creds.set_password(password)
+ nt_hash = user_creds.get_nt_hash()
+
+ # Generate a new UTF-16 password.
+ new_password = generate_random_password(32, 32)
+ new_password = new_password.encode('utf-16le')
+
+ # Prepend the 16-bit length of the password..
+ new_password_len = int.to_bytes(len(new_password),
+ length=2,
+ byteorder='little')
+ new_password = new_password_len + new_password
+
+ server = lsa.String()
+ server.string = hostname
+
+ account = lsa.String()
+ account.string = username
+
+ # Derive a key from the user's NT hash.
+ iv = generate_random_bytes(16)
+ iterations = 5555
+ cek = sha512_pbkdf2(nt_hash, iv, iterations)
+
+ enc_key_salt = (b'Microsoft SAM encryption key '
+ b'AEAD-AES-256-CBC-HMAC-SHA512 16\0')
+ mac_key_salt = (b'Microsoft SAM MAC key '
+ b'AEAD-AES-256-CBC-HMAC-SHA512 16\0')
+
+ # Encrypt the new password.
+ ciphertext, auth_data = aead_aes_256_cbc_hmac_sha512_blob(new_password,
+ cek,
+ enc_key_salt,
+ mac_key_salt,
+ iv)
+
+ # Create the new password structure
+ pwd_buf = samr.EncryptedPasswordAES()
+ pwd_buf.auth_data = list(auth_data)
+ pwd_buf.salt = list(iv)
+ pwd_buf.cipher_len = len(ciphertext)
+ pwd_buf.cipher = list(ciphertext)
+ pwd_buf.PBKDF2Iterations = iterations
+
+ conn = samr.samr(f'ncacn_np:{hostname}[krb5,seal,smb2]')
+
+ # Indicate that we're ready. This ensures we hit the right transaction
+ # lock.
+ pipe.send_bytes(b'0')
+
+ # Wait for the main process to take out a transaction lock.
+ if not pipe.poll(timeout=5):
+ raise AssertionError('main process failed to indicate readiness')
+
+ try:
+ # Try changing the password. This should fail, either due to our
+ # account being locked out or due to using the wrong password.
+ conn.ChangePasswordUser4(server=server,
+ account=account,
+ password=pwd_buf)
+ except NTSTATUSError as err:
+ num, estr = err.args
+
+ if num == ntstatus.NT_STATUS_ACCOUNT_LOCKED_OUT:
+ return ConnectionResult.LOCKED_OUT
+ elif num == ntstatus.NT_STATUS_WRONG_PASSWORD:
+ return ConnectionResult.WRONG_PASSWORD
+ else:
+ raise AssertionError(f'pwd change raised wrong error code '
+ f'({num:08X})')
+ else:
+ return ConnectionResult.SUCCESS
+
+
+def ldap_pwd_change(pipe,
+ url,
+ hostname,
+ username,
+ password,
+ domain,
+ realm,
+ workstation,
+ dn):
+ lp = env_loadparm()
+
+ admin_creds = KerberosCredentials()
+ admin_creds.guess(lp)
+ admin_creds.set_username(env_get_var_value('ADMIN_USERNAME'))
+ admin_creds.set_password(env_get_var_value('ADMIN_PASSWORD'))
+ admin_creds.set_kerberos_state(MUST_USE_KERBEROS)
+
+ samdb = SamDB(url=url,
+ credentials=admin_creds,
+ lp=lp)
+
+ old_utf16pw = f'"{password}"'.encode('utf-16le')
+
+ new_password = generate_random_password(32, 32)
+ new_utf16pw = f'"{new_password}"'.encode('utf-16le')
+
+ msg = ldb.Message(ldb.Dn(samdb, dn))
+ msg['0'] = ldb.MessageElement(old_utf16pw,
+ ldb.FLAG_MOD_DELETE,
+ 'unicodePwd')
+ msg['1'] = ldb.MessageElement(new_utf16pw,
+ ldb.FLAG_MOD_ADD,
+ 'unicodePwd')
+
+ # Indicate that we're ready. This ensures we hit the right transaction
+ # lock.
+ pipe.send_bytes(b'0')
+
+ # Wait for the main process to take out a transaction lock.
+ if not pipe.poll(timeout=5):
+ raise AssertionError('main process failed to indicate readiness')
+
+ # Try changing the user's password. This should fail, either due to the
+ # user's account being locked out or due to specifying the wrong password.
+ try:
+ samdb.modify(msg)
+ except ldb.LdbError as err:
+ num, estr = err.args
+ if num != ldb.ERR_CONSTRAINT_VIOLATION:
+ raise AssertionError(f'pwd change raised wrong error code ({err})')
+
+ if f'<{werror.WERR_ACCOUNT_LOCKED_OUT:08X}:' in estr:
+ return ConnectionResult.LOCKED_OUT
+ elif f'<{werror.WERR_INVALID_PASSWORD:08X}:' in estr:
+ return ConnectionResult.WRONG_PASSWORD
+ else:
+ raise AssertionError(f'pwd change raised wrong error code '
+ f'({estr})')
+ else:
+ return ConnectionResult.SUCCESS
+
+
+class LockoutTests(KDCBaseTest):
+
+ def setUp(self):
+ super().setUp()
+ self.do_asn1_print = global_asn1_print
+ self.do_hexdump = global_hexdump
+
+ samdb = self.get_samdb()
+ base_dn = ldb.Dn(samdb, samdb.domain_dn())
+
+ def modify_attr(attr, value):
+ if value is None:
+ value = []
+ flag = ldb.FLAG_MOD_DELETE
+ else:
+ value = str(value)
+ flag = ldb.FLAG_MOD_REPLACE
+
+ msg = ldb.Message(base_dn)
+ msg[attr] = ldb.MessageElement(
+ value, flag, attr)
+ samdb.modify(msg)
+
+ res = samdb.search(base_dn,
+ scope=ldb.SCOPE_BASE,
+ attrs=['lockoutDuration',
+ 'lockoutThreshold',
+ 'msDS-LogonTimeSyncInterval'])
+ self.assertEqual(1, len(res))
+
+ # Reset the lockout duration as it was before.
+ lockout_duration = res[0].get('lockoutDuration', idx=0)
+ self.addCleanup(modify_attr, 'lockoutDuration', lockout_duration)
+
+ # Set the new lockout duration: locked out accounts now stay locked
+ # out.
+ modify_attr('lockoutDuration', 0)
+
+ # Reset the lockout threshold as it was before.
+ lockout_threshold = res[0].get('lockoutThreshold', idx=0)
+ self.addCleanup(modify_attr, 'lockoutThreshold', lockout_threshold)
+
+ # Set the new lockout threshold.
+ self.lockout_threshold = 3
+ modify_attr('lockoutThreshold', self.lockout_threshold)
+
+ # Reset the logon time sync interval as it was before.
+ sync_interval = res[0].get('msDS-LogonTimeSyncInterval', idx=0)
+ self.addCleanup(modify_attr,
+ 'msDS-LogonTimeSyncInterval',
+ sync_interval)
+
+ # Set the new logon time sync interval. Setting it to 0 eliminates the
+ # need for this attribute to be updated on logon, and thus the
+ # requirement to take out a transaction.
+ modify_attr('msDS-LogonTimeSyncInterval', 0)
+
+ # Get the old 'minPwdAge'.
+ minPwdAge = samdb.get_minPwdAge()
+
+ # Reset the 'minPwdAge' as it was before.
+ self.addCleanup(samdb.set_minPwdAge, minPwdAge)
+
+ # Set it temporarily to '0'.
+ samdb.set_minPwdAge('0')
+
+ def assertLocalSamDB(self, samdb):
+ if samdb.url.startswith('tdb://'):
+ return
+ if samdb.url.startswith('mdb://'):
+ return
+
+ self.fail(f'connection to {samdb.url} is not local!')
+
+ def wait_for_ready(self, pipe, future):
+ if pipe.poll(timeout=5):
+ return
+
+ # We failed to read a response from the pipe, so see if the test raised
+ # an exception with more information.
+ if future.done():
+ exception = future.exception(timeout=0)
+ if exception is not None:
+ raise exception
+
+ self.fail('test failed to indicate readiness')
+
+ def test_lockout_transaction_kdc(self):
+ self.do_lockout_transaction(connect_kdc)
+
+ def test_lockout_transaction_kdc_ntstatus(self):
+ self.do_lockout_transaction(partial(connect_kdc, expect_status=True))
+
+ def test_lockout_transaction_ntlm(self):
+ self.do_lockout_transaction(connect_ntlm)
+
+ def test_lockout_transaction_samr(self):
+ self.do_lockout_transaction(connect_samr)
+
+ def test_lockout_transaction_samr_aes(self):
+ self.do_lockout_transaction(connect_samr_aes)
+
+ def test_lockout_transaction_ldap_pw_change(self):
+ self.do_lockout_transaction(ldap_pwd_change)
+
+ # Tests to ensure we can handle the account being renamed. We do not test
+ # renames with SAMR password changes, because in that case the entire
+ # process happens inside a transaction, and the password change method only
+ # receives the account username. By the time it searches for the account,
+ # it will have already been renamed, and so it will always fail to find the
+ # account.
+
+ def test_lockout_transaction_rename_kdc(self):
+ self.do_lockout_transaction(connect_kdc, rename=True)
+
+ def test_lockout_transaction_rename_kdc_ntstatus(self):
+ self.do_lockout_transaction(partial(connect_kdc, expect_status=True),
+ rename=True)
+
+ def test_lockout_transaction_rename_ntlm(self):
+ self.do_lockout_transaction(connect_ntlm, rename=True)
+
+ def test_lockout_transaction_rename_ldap_pw_change(self):
+ self.do_lockout_transaction(ldap_pwd_change, rename=True)
+
+ def test_lockout_transaction_bad_pwd_kdc(self):
+ self.do_lockout_transaction(connect_kdc, correct_pw=False)
+
+ def test_lockout_transaction_bad_pwd_kdc_ntstatus(self):
+ self.do_lockout_transaction(partial(connect_kdc, expect_status=True),
+ correct_pw=False)
+
+ def test_lockout_transaction_bad_pwd_ntlm(self):
+ self.do_lockout_transaction(connect_ntlm, correct_pw=False)
+
+ def test_lockout_transaction_bad_pwd_samr(self):
+ self.do_lockout_transaction(connect_samr, correct_pw=False)
+
+ def test_lockout_transaction_bad_pwd_samr_aes(self):
+ self.do_lockout_transaction(connect_samr_aes, correct_pw=False)
+
+ def test_lockout_transaction_bad_pwd_ldap_pw_change(self):
+ self.do_lockout_transaction(ldap_pwd_change, correct_pw=False)
+
+ def test_bad_pwd_count_transaction_kdc(self):
+ self.do_bad_pwd_count_transaction(connect_kdc)
+
+ def test_bad_pwd_count_transaction_ntlm(self):
+ self.do_bad_pwd_count_transaction(connect_ntlm)
+
+ def test_bad_pwd_count_transaction_samr(self):
+ self.do_bad_pwd_count_transaction(connect_samr)
+
+ def test_bad_pwd_count_transaction_samr_aes(self):
+ self.do_bad_pwd_count_transaction(connect_samr_aes)
+
+ def test_bad_pwd_count_transaction_ldap_pw_change(self):
+ self.do_bad_pwd_count_transaction(ldap_pwd_change)
+
+ def test_bad_pwd_count_transaction_rename_kdc(self):
+ self.do_bad_pwd_count_transaction(connect_kdc, rename=True)
+
+ def test_bad_pwd_count_transaction_rename_ntlm(self):
+ self.do_bad_pwd_count_transaction(connect_ntlm, rename=True)
+
+ def test_bad_pwd_count_transaction_rename_ldap_pw_change(self):
+ self.do_bad_pwd_count_transaction(ldap_pwd_change, rename=True)
+
+ def test_lockout_race_kdc(self):
+ self.do_lockout_race(connect_kdc)
+
+ def test_lockout_race_kdc_ntstatus(self):
+ self.do_lockout_race(partial(connect_kdc, expect_status=True))
+
+ def test_lockout_race_ntlm(self):
+ self.do_lockout_race(connect_ntlm)
+
+ def test_lockout_race_samr(self):
+ self.do_lockout_race(connect_samr)
+
+ def test_lockout_race_samr_aes(self):
+ self.do_lockout_race(connect_samr_aes)
+
+ def test_lockout_race_ldap_pw_change(self):
+ self.do_lockout_race(ldap_pwd_change)
+
+ def test_logon_without_transaction_ntlm(self):
+ self.do_logon_without_transaction(connect_ntlm)
+
+ # Tests to ensure that the connection functions work correctly in the happy
+ # path.
+
+ def test_logon_kdc(self):
+ self.do_logon(partial(connect_kdc, expect_error=False))
+
+ def test_logon_ntlm(self):
+ self.do_logon(connect_ntlm)
+
+ def test_logon_samr(self):
+ self.do_logon(connect_samr)
+
+ def test_logon_samr_aes(self):
+ self.do_logon(connect_samr_aes)
+
+ def test_logon_ldap_pw_change(self):
+ self.do_logon(ldap_pwd_change)
+
+ # Test that connection without a correct password works.
+ def do_logon(self, connect_fn):
+ # Create the user account for testing.
+ user_creds = self.get_cached_creds(account_type=self.AccountType.USER,
+ use_cache=False)
+ user_dn = user_creds.get_dn()
+
+ admin_creds = self.get_admin_creds()
+ lp = self.get_lp()
+
+ # Get a connection to our local SamDB.
+ samdb = connect_samdb(samdb_url=lp.samdb_url(), lp=lp,
+ credentials=admin_creds)
+ self.assertLocalSamDB(samdb)
+
+ password = user_creds.get_password()
+
+ # Prepare to connect to the server with a valid password.
+ our_pipe, their_pipe = Pipe(duplex=True)
+
+ # Inform the test function that it may proceed.
+ our_pipe.send_bytes(b'0')
+
+ result = connect_fn(pipe=their_pipe,
+ url=f'ldap://{samdb.host_dns_name()}',
+ hostname=samdb.host_dns_name(),
+ username=user_creds.get_username(),
+ password=password,
+ domain=user_creds.get_domain(),
+ realm=user_creds.get_realm(),
+ workstation=user_creds.get_workstation(),
+ dn=str(user_dn))
+
+ # The connection should succeed.
+ self.assertEqual(result, ConnectionResult.SUCCESS)
+
+ # Lock out the account while holding a transaction lock, then release the
+ # lock. A logon attempt already in progress should reread the account
+ # details and recognise the account is locked out. The account can
+ # additionally be renamed within the transaction to ensure that, by using
+ # the GUID, rereading the account's details still succeeds.
+ def do_lockout_transaction(self, connect_fn,
+ rename=False,
+ correct_pw=True):
+ # Create the user account for testing.
+ user_creds = self.get_cached_creds(account_type=self.AccountType.USER,
+ use_cache=False)
+ user_dn = user_creds.get_dn()
+
+ admin_creds = self.get_admin_creds()
+ lp = self.get_lp()
+
+ # Get a connection to our local SamDB.
+ samdb = connect_samdb(samdb_url=lp.samdb_url(), lp=lp,
+ credentials=admin_creds)
+ self.assertLocalSamDB(samdb)
+
+ password = user_creds.get_password()
+ if not correct_pw:
+ password = password[:-1]
+
+ # Prepare to connect to the server.
+ with futures.ProcessPoolExecutor(max_workers=1) as executor:
+ our_pipe, their_pipe = Pipe(duplex=True)
+ connect_future = executor.submit(
+ connect_fn,
+ pipe=their_pipe,
+ url=f'ldap://{samdb.host_dns_name()}',
+ hostname=samdb.host_dns_name(),
+ username=user_creds.get_username(),
+ password=password,
+ domain=user_creds.get_domain(),
+ realm=user_creds.get_realm(),
+ workstation=user_creds.get_workstation(),
+ dn=str(user_dn))
+
+ # Wait until the test process indicates it's ready.
+ self.wait_for_ready(our_pipe, connect_future)
+
+ # Take out a transaction.
+ samdb.transaction_start()
+ try:
+ # Lock out the account. We must do it using an actual password
+ # check like so, rather than directly with a database
+ # modification, so that the account is also added to the
+ # auxiliary bad password database.
+
+ old_utf16pw = '"Secret007"'.encode('utf-16le') # invalid pwd
+ new_utf16pw = '"Secret008"'.encode('utf-16le')
+
+ msg = ldb.Message(user_dn)
+ msg['0'] = ldb.MessageElement(old_utf16pw,
+ ldb.FLAG_MOD_DELETE,
+ 'unicodePwd')
+ msg['1'] = ldb.MessageElement(new_utf16pw,
+ ldb.FLAG_MOD_ADD,
+ 'unicodePwd')
+
+ for i in range(self.lockout_threshold):
+ try:
+ samdb.modify(msg)
+ except ldb.LdbError as err:
+ num, estr = err.args
+
+ # We get an error, but the bad password count should
+ # still be updated.
+ self.assertEqual(num, ldb.ERR_OPERATIONS_ERROR)
+ self.assertEqual('Failed to obtain remote address for '
+ 'the LDAP client while changing the '
+ 'password',
+ estr)
+ else:
+ self.fail('pwd change should have failed')
+
+ # Ensure the account is locked out.
+
+ res = samdb.search(
+ user_dn, scope=ldb.SCOPE_BASE,
+ attrs=['msDS-User-Account-Control-Computed'])
+ self.assertEqual(1, len(res))
+
+ uac = int(res[0].get('msDS-User-Account-Control-Computed',
+ idx=0))
+ self.assertTrue(uac & dsdb.UF_LOCKOUT)
+
+ # Now the bad password database has been updated, inform the
+ # test process that it may proceed.
+ our_pipe.send_bytes(b'0')
+
+ # Wait one second to ensure the test process hits the
+ # transaction lock.
+ time.sleep(1)
+
+ if rename:
+ # While we're at it, rename the account to ensure that is
+ # also safe if a race occurs.
+ msg = ldb.Message(user_dn)
+ new_username = self.get_new_username()
+ msg['sAMAccountName'] = ldb.MessageElement(
+ new_username,
+ ldb.FLAG_MOD_REPLACE,
+ 'sAMAccountName')
+ samdb.modify(msg)
+
+ except Exception:
+ samdb.transaction_cancel()
+ raise
+
+ # Commit the local transaction.
+ samdb.transaction_commit()
+
+ result = connect_future.result(timeout=5)
+ self.assertEqual(result, ConnectionResult.LOCKED_OUT)
+
+ # Update the bad password count while holding a transaction lock, then
+ # release the lock. A logon attempt already in progress should reread the
+ # account details and ensure the bad password count is atomically
+ # updated. The account can additionally be renamed within the transaction
+ # to ensure that, by using the GUID, rereading the account's details still
+ # succeeds.
+ def do_bad_pwd_count_transaction(self, connect_fn, rename=False):
+ # Create the user account for testing.
+ user_creds = self.get_cached_creds(account_type=self.AccountType.USER,
+ use_cache=False)
+ user_dn = user_creds.get_dn()
+
+ admin_creds = self.get_admin_creds()
+ lp = self.get_lp()
+
+ # Get a connection to our local SamDB.
+ samdb = connect_samdb(samdb_url=lp.samdb_url(), lp=lp,
+ credentials=admin_creds)
+ self.assertLocalSamDB(samdb)
+
+ # Prepare to connect to the server with an invalid password.
+ with futures.ProcessPoolExecutor(max_workers=1) as executor:
+ our_pipe, their_pipe = Pipe(duplex=True)
+ connect_future = executor.submit(
+ connect_fn,
+ pipe=their_pipe,
+ url=f'ldap://{samdb.host_dns_name()}',
+ hostname=samdb.host_dns_name(),
+ username=user_creds.get_username(),
+ password=user_creds.get_password()[:-1], # invalid password
+ domain=user_creds.get_domain(),
+ realm=user_creds.get_realm(),
+ workstation=user_creds.get_workstation(),
+ dn=str(user_dn))
+
+ # Wait until the test process indicates it's ready.
+ self.wait_for_ready(our_pipe, connect_future)
+
+ # Take out a transaction.
+ samdb.transaction_start()
+ try:
+ # Inform the test process that it may proceed.
+ our_pipe.send_bytes(b'0')
+
+ # Wait one second to ensure the test process hits the
+ # transaction lock.
+ time.sleep(1)
+
+ # Set badPwdCount to 1.
+ msg = ldb.Message(user_dn)
+ now = int(time.time())
+ bad_pwd_time = unix2nttime(now)
+ msg['badPwdCount'] = ldb.MessageElement(
+ '1',
+ ldb.FLAG_MOD_REPLACE,
+ 'badPwdCount')
+ msg['badPasswordTime'] = ldb.MessageElement(
+ str(bad_pwd_time),
+ ldb.FLAG_MOD_REPLACE,
+ 'badPasswordTime')
+ if rename:
+ # While we're at it, rename the account to ensure that is
+ # also safe if a race occurs.
+ new_username = self.get_new_username()
+ msg['sAMAccountName'] = ldb.MessageElement(
+ new_username,
+ ldb.FLAG_MOD_REPLACE,
+ 'sAMAccountName')
+ samdb.modify(msg)
+
+ # Ensure the account is not yet locked out.
+
+ res = samdb.search(
+ user_dn, scope=ldb.SCOPE_BASE,
+ attrs=['msDS-User-Account-Control-Computed'])
+ self.assertEqual(1, len(res))
+
+ uac = int(res[0].get('msDS-User-Account-Control-Computed',
+ idx=0))
+ self.assertFalse(uac & dsdb.UF_LOCKOUT)
+ except Exception:
+ samdb.transaction_cancel()
+ raise
+
+ # Commit the local transaction.
+ samdb.transaction_commit()
+
+ result = connect_future.result(timeout=5)
+ self.assertEqual(result, ConnectionResult.WRONG_PASSWORD, result)
+
+ # Check that badPwdCount has now increased to 2.
+
+ res = samdb.search(user_dn,
+ scope=ldb.SCOPE_BASE,
+ attrs=['badPwdCount'])
+ self.assertEqual(1, len(res))
+
+ bad_pwd_count = int(res[0].get('badPwdCount', idx=0))
+ self.assertEqual(2, bad_pwd_count)
+
+ # Attempt to log in to the account with an incorrect password, using
+ # lockoutThreshold+1 simultaneous attempts. We should get three 'wrong
+ # password' errors and one 'locked out' error, showing that the bad
+ # password count is checked and incremented atomically.
+ def do_lockout_race(self, connect_fn):
+ # Create the user account for testing.
+ user_creds = self.get_cached_creds(account_type=self.AccountType.USER,
+ use_cache=False)
+ user_dn = user_creds.get_dn()
+
+ admin_creds = self.get_admin_creds()
+ lp = self.get_lp()
+
+ # Get a connection to our local SamDB.
+ samdb = connect_samdb(samdb_url=lp.samdb_url(), lp=lp,
+ credentials=admin_creds)
+ self.assertLocalSamDB(samdb)
+
+ # Prepare to connect to the server with an invalid password, using four
+ # simultaneous requests. Only three of those attempts should get
+ # through before the account is locked out.
+ num_attempts = self.lockout_threshold + 1
+ with futures.ProcessPoolExecutor(max_workers=num_attempts) as executor:
+ connect_futures = []
+ our_pipes = []
+ for i in range(num_attempts):
+ our_pipe, their_pipe = Pipe(duplex=True)
+ our_pipes.append(our_pipe)
+
+ connect_future = executor.submit(
+ connect_fn,
+ pipe=their_pipe,
+ url=f'ldap://{samdb.host_dns_name()}',
+ hostname=samdb.host_dns_name(),
+ username=user_creds.get_username(),
+ password=user_creds.get_password()[:-1], # invalid pw
+ domain=user_creds.get_domain(),
+ realm=user_creds.get_realm(),
+ workstation=user_creds.get_workstation(),
+ dn=str(user_dn))
+ connect_futures.append(connect_future)
+
+ # Wait until the test process indicates it's ready.
+ self.wait_for_ready(our_pipe, connect_future)
+
+ # Take out a transaction.
+ samdb.transaction_start()
+ try:
+ # Inform the test processes that they may proceed.
+ for our_pipe in our_pipes:
+ our_pipe.send_bytes(b'0')
+
+ # Wait one second to ensure the test processes hit the
+ # transaction lock.
+ time.sleep(1)
+ except Exception:
+ samdb.transaction_cancel()
+ raise
+
+ # Commit the local transaction.
+ samdb.transaction_commit()
+
+ lockouts = 0
+ wrong_passwords = 0
+ for i, connect_future in enumerate(connect_futures):
+ result = connect_future.result(timeout=5)
+ if result == ConnectionResult.LOCKED_OUT:
+ lockouts += 1
+ elif result == ConnectionResult.WRONG_PASSWORD:
+ wrong_passwords += 1
+ else:
+ self.fail(f'process {i} gave an unexpected result '
+ f'{result}')
+
+ self.assertEqual(wrong_passwords, self.lockout_threshold)
+ self.assertEqual(lockouts, num_attempts - self.lockout_threshold)
+
+ # Ensure the account is now locked out.
+
+ res = samdb.search(
+ user_dn, scope=ldb.SCOPE_BASE,
+ attrs=['badPwdCount',
+ 'msDS-User-Account-Control-Computed'])
+ self.assertEqual(1, len(res))
+
+ bad_pwd_count = int(res[0].get('badPwdCount', idx=0))
+ self.assertEqual(self.lockout_threshold, bad_pwd_count)
+
+ uac = int(res[0].get('msDS-User-Account-Control-Computed',
+ idx=0))
+ self.assertTrue(uac & dsdb.UF_LOCKOUT)
+
+ # Test that logon is possible even while we locally hold a transaction
+ # lock. This test only works with NTLM authentication; Kerberos
+ # authentication must take out a transaction to update the logonCount
+ # attribute, and LDAP and SAMR password changes both take out a transaction
+ # to effect the password change. NTLM is the only logon method that does
+ # not require a transaction, and can thus be performed while we're holding
+ # the lock.
+ def do_logon_without_transaction(self, connect_fn):
+ # Create the user account for testing.
+ user_creds = self.get_cached_creds(account_type=self.AccountType.USER,
+ use_cache=False)
+ user_dn = user_creds.get_dn()
+
+ admin_creds = self.get_admin_creds()
+ lp = self.get_lp()
+
+ # Get a connection to our local SamDB.
+ samdb = connect_samdb(samdb_url=lp.samdb_url(), lp=lp,
+ credentials=admin_creds)
+ self.assertLocalSamDB(samdb)
+
+ password = user_creds.get_password()
+
+ # Prepare to connect to the server with a valid password.
+ with futures.ProcessPoolExecutor(max_workers=1) as executor:
+ our_pipe, their_pipe = Pipe(duplex=True)
+ connect_future = executor.submit(
+ connect_fn,
+ pipe=their_pipe,
+ url=f'ldap://{samdb.host_dns_name()}',
+ hostname=samdb.host_dns_name(),
+ username=user_creds.get_username(),
+ password=password,
+ domain=user_creds.get_domain(),
+ realm=user_creds.get_realm(),
+ workstation=user_creds.get_workstation(),
+ dn=str(user_dn))
+
+ # Wait until the test process indicates it's ready.
+ self.wait_for_ready(our_pipe, connect_future)
+
+ # Take out a transaction.
+ samdb.transaction_start()
+ try:
+ # Inform the test process that it may proceed.
+ our_pipe.send_bytes(b'0')
+
+ # The connection should succeed, despite our holding a
+ # transaction.
+ result = connect_future.result(timeout=5)
+ self.assertEqual(result, ConnectionResult.SUCCESS)
+ except Exception:
+ samdb.transaction_cancel()
+ raise
+
+ # Commit the local transaction.
+ samdb.transaction_commit()
+
+
+if __name__ == '__main__':
+ global_asn1_print = False
+ global_hexdump = False
+ import unittest
+ unittest.main()
diff --git a/python/samba/tests/krb5/ms_kile_client_principal_lookup_tests.py b/python/samba/tests/krb5/ms_kile_client_principal_lookup_tests.py
new file mode 100755
index 0000000..4feb3bb
--- /dev/null
+++ b/python/samba/tests/krb5/ms_kile_client_principal_lookup_tests.py
@@ -0,0 +1,818 @@
+#!/usr/bin/env python3
+# Unix SMB/CIFS implementation.
+# Copyright (C) Stefan Metzmacher 2020
+# Copyright (C) 2020 Catalyst.Net Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+import os
+
+sys.path.insert(0, "bin/python")
+os.environ["PYTHONUNBUFFERED"] = "1"
+
+from samba.dsdb import UF_DONT_REQUIRE_PREAUTH
+from samba.tests.krb5.kdc_base_test import KDCBaseTest
+from samba.tests.krb5.rfc4120_constants import (
+ AES256_CTS_HMAC_SHA1_96,
+ ARCFOUR_HMAC_MD5,
+ NT_ENTERPRISE_PRINCIPAL,
+ NT_PRINCIPAL,
+ NT_SRV_INST,
+ KDC_ERR_C_PRINCIPAL_UNKNOWN,
+ KDC_ERR_TGT_REVOKED,
+)
+
+global_asn1_print = False
+global_hexdump = False
+
+
+class MS_Kile_Client_Principal_Lookup_Tests(KDCBaseTest):
+ """ Tests for MS-KILE client principal look-up
+ See [MS-KILE]: Kerberos Protocol Extensions
+ section 3.3.5.6.1 Client Principal Lookup
+ """
+
+ def setUp(self):
+ super().setUp()
+ self.do_asn1_print = global_asn1_print
+ self.do_hexdump = global_hexdump
+
+ def check_pac(self, samdb, auth_data, uc, name, upn=None):
+
+ pac_data = self.get_pac_data(auth_data)
+ if upn is None:
+ upn = "%s@%s" % (name, uc.get_realm().lower())
+ if name.endswith('$'):
+ name = name[:-1]
+
+ self.assertEqual(
+ uc.get_username(),
+ str(pac_data.account_name),
+ "pac_data = {%s}" % str(pac_data))
+ self.assertEqual(
+ name,
+ pac_data.logon_name,
+ "pac_data = {%s}" % str(pac_data))
+ self.assertEqual(
+ uc.get_realm(),
+ pac_data.domain_name,
+ "pac_data = {%s}" % str(pac_data))
+ self.assertEqual(
+ upn,
+ pac_data.upn,
+ "pac_data = {%s}" % str(pac_data))
+ self.assertEqual(
+ uc.get_sid(),
+ pac_data.account_sid,
+ "pac_data = {%s}" % str(pac_data))
+
+ def test_nt_principal_step_1(self):
+ """ Step 1
+ For an NT_PRINCIPAL cname with no realm or the realm matches the
+ DC's domain
+ search for an account with the
+ sAMAccountName matching the cname.
+ """
+
+ # Create user and machine accounts for the test.
+ #
+ samdb = self.get_samdb()
+ user_name = "mskileusr"
+ (uc, dn) = self.create_account(samdb, user_name)
+ realm = uc.get_realm().lower()
+
+ mach_name = "mskilemac"
+ (mc, _) = self.create_account(samdb, mach_name,
+ account_type=self.AccountType.COMPUTER)
+
+ # Do the initial AS-REQ, should get a pre-authentication required
+ # response
+ etype = (AES256_CTS_HMAC_SHA1_96, ARCFOUR_HMAC_MD5)
+ cname = self.PrincipalName_create(
+ name_type=NT_PRINCIPAL, names=[user_name])
+ sname = self.PrincipalName_create(
+ name_type=NT_SRV_INST, names=["krbtgt", realm])
+
+ rep = self.as_req(cname, sname, realm, etype)
+ self.check_pre_authentication(rep)
+
+ # Do the next AS-REQ
+ padata = self.get_enc_timestamp_pa_data(uc, rep)
+ key = self.get_as_rep_key(uc, rep)
+ rep = self.as_req(cname, sname, realm, etype, padata=[padata])
+ self.check_as_reply(rep)
+
+ # Request a ticket to the host service on the machine account
+ ticket = rep['ticket']
+ enc_part2 = self.get_as_rep_enc_data(key, rep)
+ key = self.EncryptionKey_import(enc_part2['key'])
+ cname = self.PrincipalName_create(
+ name_type=NT_PRINCIPAL,
+ names=[user_name])
+ sname = self.PrincipalName_create(
+ name_type=NT_PRINCIPAL,
+ names=[mc.get_username()])
+
+ (rep, enc_part) = self.tgs_req(
+ cname, sname, uc.get_realm(), ticket, key, etype,
+ creds=uc, service_creds=mc)
+ self.check_tgs_reply(rep)
+
+ # Check the contents of the pac, and the ticket
+ ticket = rep['ticket']
+ enc_part = self.decode_service_ticket(mc, ticket)
+ self.check_pac(samdb, enc_part['authorization-data'], uc, user_name)
+ # check the crealm and cname
+ cname = enc_part['cname']
+ self.assertEqual(NT_PRINCIPAL, cname['name-type'])
+ self.assertEqual(user_name.encode('UTF8'), cname['name-string'][0])
+ self.assertEqual(realm.upper().encode('UTF8'), enc_part['crealm'])
+
+ def test_nt_principal_step_2(self):
+ """ Step 2
+ If not found
+ search for sAMAccountName equal to the cname + "$"
+
+ """
+
+ # Create a machine account for the test.
+ #
+ samdb = self.get_samdb()
+ mach_name = "mskilemac"
+ (mc, dn) = self.create_account(samdb, mach_name,
+ account_type=self.AccountType.COMPUTER)
+ realm = mc.get_realm().lower()
+
+ # Do the initial AS-REQ, should get a pre-authentication required
+ # response
+ etype = (AES256_CTS_HMAC_SHA1_96, ARCFOUR_HMAC_MD5)
+ cname = self.PrincipalName_create(
+ name_type=NT_PRINCIPAL, names=[mach_name])
+ sname = self.PrincipalName_create(
+ name_type=NT_SRV_INST, names=["krbtgt", realm])
+
+ rep = self.as_req(cname, sname, realm, etype)
+ self.check_pre_authentication(rep)
+
+ # Do the next AS-REQ
+ padata = self.get_enc_timestamp_pa_data(mc, rep)
+ key = self.get_as_rep_key(mc, rep)
+ rep = self.as_req(cname, sname, realm, etype, padata=[padata])
+ self.check_as_reply(rep)
+
+ # Request a ticket to the host service on the machine account
+ ticket = rep['ticket']
+ enc_part2 = self.get_as_rep_enc_data(key, rep)
+ key = self.EncryptionKey_import(enc_part2['key'])
+ cname = self.PrincipalName_create(
+ name_type=NT_PRINCIPAL,
+ names=[mach_name])
+ sname = self.PrincipalName_create(
+ name_type=NT_PRINCIPAL,
+ names=[mc.get_username()])
+
+ (rep, enc_part) = self.tgs_req(
+ cname, sname, mc.get_realm(), ticket, key, etype,
+ creds=mc, service_creds=mc)
+ self.check_tgs_reply(rep)
+
+ # Check the contents of the pac, and the ticket
+ ticket = rep['ticket']
+ enc_part = self.decode_service_ticket(mc, ticket)
+ self.check_pac(samdb, enc_part['authorization-data'], mc, mach_name + '$')
+ # check the crealm and cname
+ cname = enc_part['cname']
+ self.assertEqual(NT_PRINCIPAL, cname['name-type'])
+ self.assertEqual(mach_name.encode('UTF8'), cname['name-string'][0])
+ self.assertEqual(realm.upper().encode('UTF8'), enc_part['crealm'])
+
+ def test_nt_principal_step_3(self):
+ """ Step 3
+
+ If not found
+ search for a matching UPN name where the UPN is set to
+ cname@realm or cname@DC's domain name
+
+ """
+ # Create a user account for the test.
+ #
+ samdb = self.get_samdb()
+ user_name = "mskileusr"
+ upn_name = "mskileupn"
+ upn = upn_name + "@" + self.get_user_creds().get_realm().lower()
+ (uc, dn) = self.create_account(samdb, user_name, upn=upn)
+ realm = uc.get_realm().lower()
+
+ mach_name = "mskilemac"
+ (mc, _) = self.create_account(samdb, mach_name,
+ account_type=self.AccountType.COMPUTER)
+
+ # Do the initial AS-REQ, should get a pre-authentication required
+ # response
+ etype = (AES256_CTS_HMAC_SHA1_96, ARCFOUR_HMAC_MD5)
+ cname = self.PrincipalName_create(
+ name_type=NT_PRINCIPAL, names=[upn_name])
+ sname = self.PrincipalName_create(
+ name_type=NT_SRV_INST, names=["krbtgt", realm])
+
+ rep = self.as_req(cname, sname, realm, etype)
+ self.check_pre_authentication(rep)
+
+ # Do the next AS-REQ
+ padata = self.get_enc_timestamp_pa_data(uc, rep)
+ key = self.get_as_rep_key(uc, rep)
+ rep = self.as_req(cname, sname, realm, etype, padata=[padata])
+ self.check_as_reply(rep)
+
+ # Request a ticket to the host service on the machine account
+ ticket = rep['ticket']
+ enc_part2 = self.get_as_rep_enc_data(key, rep)
+ key = self.EncryptionKey_import(enc_part2['key'])
+ cname = self.PrincipalName_create(
+ name_type=NT_PRINCIPAL,
+ names=[upn_name])
+ sname = self.PrincipalName_create(
+ name_type=NT_PRINCIPAL,
+ names=[mc.get_username()])
+
+ (rep, enc_part) = self.tgs_req(
+ cname, sname, uc.get_realm(), ticket, key, etype,
+ creds=uc, service_creds=mc)
+ self.check_tgs_reply(rep)
+
+ # Check the contents of the service ticket
+ ticket = rep['ticket']
+ enc_part = self.decode_service_ticket(mc, ticket)
+ self.check_pac(samdb, enc_part['authorization-data'], uc, upn_name)
+ # check the crealm and cname
+ cname = enc_part['cname']
+ self.assertEqual(NT_PRINCIPAL, cname['name-type'])
+ self.assertEqual(upn_name.encode('UTF8'), cname['name-string'][0])
+ self.assertEqual(realm.upper().encode('UTF8'), enc_part['crealm'])
+
+ def test_nt_principal_step_4_a(self):
+ """ Step 4, no pre-authentication
+ If not found and no pre-authentication
+ search for a matching altSecurityIdentity
+ """
+ # Create a user account for the test.
+ # with an altSecurityIdentity, and with UF_DONT_REQUIRE_PREAUTH
+ # set.
+ #
+ # note that in this case IDL_DRSCrackNames is called with
+ # pmsgIn.formatOffered set to
+ # DS_USER_PRINCIPAL_NAME_AND_ALTSECID
+ #
+ # setting UF_DONT_REQUIRE_PREAUTH seems to be the only way
+ # to trigger the no pre-auth step
+
+ samdb = self.get_samdb()
+ user_name = "mskileusr"
+ alt_name = "mskilealtsec"
+ (uc, dn) = self.create_account(samdb, user_name,
+ account_control=UF_DONT_REQUIRE_PREAUTH)
+ realm = uc.get_realm().lower()
+ alt_sec = "Kerberos:%s@%s" % (alt_name, realm)
+ self.add_attribute(samdb, dn, "altSecurityIdentities", alt_sec)
+
+ mach_name = "mskilemac"
+ (mc, _) = self.create_account(samdb, mach_name,
+ account_type=self.AccountType.COMPUTER)
+
+ # Do the initial AS-REQ, as we've set UF_DONT_REQUIRE_PREAUTH
+ # we should get a valid AS-RESP
+ # response
+ etype = (AES256_CTS_HMAC_SHA1_96, ARCFOUR_HMAC_MD5)
+ cname = self.PrincipalName_create(
+ name_type=NT_PRINCIPAL, names=[alt_name])
+ sname = self.PrincipalName_create(
+ name_type=NT_SRV_INST, names=["krbtgt", realm])
+
+ rep = self.as_req(cname, sname, realm, etype)
+ self.check_as_reply(rep)
+ salt = "%s%s" % (realm.upper(), user_name)
+ key = self.PasswordKey_create(
+ rep['enc-part']['etype'],
+ uc.get_password(),
+ salt.encode('UTF8'),
+ rep['enc-part']['kvno'])
+
+ # Request a ticket to the host service on the machine account
+ ticket = rep['ticket']
+ enc_part2 = self.get_as_rep_enc_data(key, rep)
+ key = self.EncryptionKey_import(enc_part2['key'])
+ cname = self.PrincipalName_create(
+ name_type=NT_PRINCIPAL, names=[alt_name])
+ sname = self.PrincipalName_create(
+ name_type=NT_PRINCIPAL,
+ names=[mc.get_username()])
+
+ (rep, enc_part) = self.tgs_req(
+ cname, sname, uc.get_realm(), ticket, key, etype,
+ creds=uc, service_creds=mc, expect_pac=False,
+ expect_edata=False,
+ expected_error_mode=KDC_ERR_TGT_REVOKED)
+ self.check_error_rep(rep, KDC_ERR_TGT_REVOKED)
+
+ def test_nt_principal_step_4_b(self):
+ """ Step 4, pre-authentication
+ If not found and pre-authentication
+ search for a matching user principal name
+ """
+
+ # Create user and machine accounts for the test.
+ #
+ samdb = self.get_samdb()
+ user_name = "mskileusr"
+ alt_name = "mskilealtsec"
+ (uc, dn) = self.create_account(samdb, user_name)
+ realm = uc.get_realm().lower()
+ alt_sec = "Kerberos:%s@%s" % (alt_name, realm)
+ self.add_attribute(samdb, dn, "altSecurityIdentities", alt_sec)
+
+ mach_name = "mskilemac"
+ (mc, _) = self.create_account(samdb, mach_name,
+ account_type=self.AccountType.COMPUTER)
+
+ # Do the initial AS-REQ, should get a pre-authentication required
+ # response
+ etype = (AES256_CTS_HMAC_SHA1_96, ARCFOUR_HMAC_MD5)
+ cname = self.PrincipalName_create(
+ name_type=NT_PRINCIPAL, names=[alt_name])
+ sname = self.PrincipalName_create(
+ name_type=NT_SRV_INST, names=["krbtgt", realm])
+
+ rep = self.as_req(cname, sname, realm, etype)
+ self.check_pre_authentication(rep)
+
+ # Do the next AS-REQ
+ padata = self.get_enc_timestamp_pa_data(uc, rep)
+ key = self.get_as_rep_key(uc, rep)
+ # Note: although we used the alt security id for the pre-auth
+ # we need to use the username for the auth
+ cname = self.PrincipalName_create(
+ name_type=NT_PRINCIPAL, names=[user_name])
+ rep = self.as_req(cname, sname, realm, etype, padata=[padata])
+ self.check_as_reply(rep)
+
+ # Request a ticket to the host service on the machine account
+ ticket = rep['ticket']
+ enc_part2 = self.get_as_rep_enc_data(key, rep)
+ key = self.EncryptionKey_import(enc_part2['key'])
+ cname = self.PrincipalName_create(
+ name_type=NT_PRINCIPAL,
+ names=[user_name])
+ sname = self.PrincipalName_create(
+ name_type=NT_PRINCIPAL,
+ names=[mc.get_username()])
+
+ (rep, enc_part) = self.tgs_req(
+ cname, sname, uc.get_realm(), ticket, key, etype,
+ creds=uc, service_creds=mc)
+ self.check_tgs_reply(rep)
+
+ # Check the contents of the pac, and the ticket
+ ticket = rep['ticket']
+ enc_part = self.decode_service_ticket(mc, ticket)
+ self.check_pac(samdb,
+ enc_part['authorization-data'], uc, user_name)
+ # check the crealm and cname
+ cname = enc_part['cname']
+ self.assertEqual(NT_PRINCIPAL, cname['name-type'])
+ self.assertEqual(user_name.encode('UTF8'), cname['name-string'][0])
+ self.assertEqual(realm.upper().encode('UTF8'), enc_part['crealm'])
+
+ def test_nt_principal_step_4_c(self):
+ """ Step 4, pre-authentication
+ If not found and pre-authentication
+ search for a matching user principal name
+
+ This test uses the altsecid, so the AS-REQ should fail.
+ """
+
+ # Create user and machine accounts for the test.
+ #
+ samdb = self.get_samdb()
+ user_name = "mskileusr"
+ alt_name = "mskilealtsec"
+ (uc, dn) = self.create_account(samdb, user_name)
+ realm = uc.get_realm().lower()
+ alt_sec = "Kerberos:%s@%s" % (alt_name, realm)
+ self.add_attribute(samdb, dn, "altSecurityIdentities", alt_sec)
+
+ mach_name = "mskilemac"
+ (mc, _) = self.create_account(samdb, mach_name,
+ account_type=self.AccountType.COMPUTER)
+
+ # Do the initial AS-REQ, should get a pre-authentication required
+ # response
+ etype = (AES256_CTS_HMAC_SHA1_96, ARCFOUR_HMAC_MD5)
+ cname = self.PrincipalName_create(
+ name_type=NT_PRINCIPAL, names=[alt_name])
+ sname = self.PrincipalName_create(
+ name_type=NT_SRV_INST, names=["krbtgt", realm])
+
+ rep = self.as_req(cname, sname, realm, etype)
+ self.check_pre_authentication(rep)
+
+ # Do the next AS-REQ
+ padata = self.get_enc_timestamp_pa_data(uc, rep)
+ # Use the alternate security identifier
+ # this should fail
+ cname = self.PrincipalName_create(
+ name_type=NT_PRINCIPAL, names=[alt_sec])
+ rep = self.as_req(cname, sname, realm, etype, padata=[padata])
+ self.check_error_rep(rep, KDC_ERR_C_PRINCIPAL_UNKNOWN)
+
+ def test_enterprise_principal_step_1_3(self):
+ """ Steps 1-3
+ For an NT_ENTERPRISE_PRINCIPAL cname
+ search for a user principal name matching the cname
+
+ """
+
+ # Create a user account for the test.
+ #
+ samdb = self.get_samdb()
+ user_name = "mskileusr"
+ upn_name = "mskileupn"
+ upn = upn_name + "@" + self.get_user_creds().get_realm().lower()
+ (uc, dn) = self.create_account(samdb, user_name, upn=upn)
+ realm = uc.get_realm().lower()
+
+ mach_name = "mskilemac"
+ (mc, _) = self.create_account(samdb, mach_name,
+ account_type=self.AccountType.COMPUTER)
+
+ # Do the initial AS-REQ, should get a pre-authentication required
+ # response
+ etype = (AES256_CTS_HMAC_SHA1_96, ARCFOUR_HMAC_MD5)
+ cname = self.PrincipalName_create(
+ name_type=NT_ENTERPRISE_PRINCIPAL, names=[upn])
+ sname = self.PrincipalName_create(
+ name_type=NT_SRV_INST, names=["krbtgt", realm])
+
+ rep = self.as_req(cname, sname, realm, etype)
+ self.check_pre_authentication(rep)
+
+ # Do the next AS-REQ
+ padata = self.get_enc_timestamp_pa_data(uc, rep)
+ key = self.get_as_rep_key(uc, rep)
+ rep = self.as_req(cname, sname, realm, etype, padata=[padata])
+ self.check_as_reply(rep)
+
+ # Request a ticket to the host service on the machine account
+ ticket = rep['ticket']
+ enc_part2 = self.get_as_rep_enc_data(key, rep)
+ key = self.EncryptionKey_import(enc_part2['key'])
+ cname = self.PrincipalName_create(
+ name_type=NT_ENTERPRISE_PRINCIPAL, names=[upn])
+ sname = self.PrincipalName_create(
+ name_type=NT_PRINCIPAL,
+ names=[mc.get_username()])
+
+ (rep, enc_part) = self.tgs_req(
+ cname, sname, uc.get_realm(), ticket, key, etype,
+ creds=uc, service_creds=mc)
+ self.check_tgs_reply(rep)
+
+ # Check the contents of the pac, and the ticket
+ ticket = rep['ticket']
+ enc_part = self.decode_service_ticket(mc, ticket)
+ self.check_pac(
+ samdb, enc_part['authorization-data'], uc, upn, upn=upn)
+ # check the crealm and cname
+ cname = enc_part['cname']
+ crealm = enc_part['crealm']
+ self.assertEqual(NT_ENTERPRISE_PRINCIPAL, cname['name-type'])
+ self.assertEqual(upn.encode('UTF8'), cname['name-string'][0])
+ self.assertEqual(realm.upper().encode('UTF8'), crealm)
+
+ def test_enterprise_principal_step_4(self):
+ """ Step 4
+
+ If that fails
+ search for an account where the sAMAccountName matches
+ the name before the @
+
+ """
+
+ # Create a user account for the test.
+ #
+ samdb = self.get_samdb()
+ user_name = "mskileusr"
+ (uc, dn) = self.create_account(samdb, user_name)
+ realm = uc.get_realm().lower()
+ ename = user_name + "@" + realm
+
+ mach_name = "mskilemac"
+ (mc, _) = self.create_account(samdb, mach_name,
+ account_type=self.AccountType.COMPUTER)
+
+ # Do the initial AS-REQ, should get a pre-authentication required
+ # response
+ etype = (AES256_CTS_HMAC_SHA1_96, ARCFOUR_HMAC_MD5)
+ cname = self.PrincipalName_create(
+ name_type=NT_ENTERPRISE_PRINCIPAL, names=[ename])
+ sname = self.PrincipalName_create(
+ name_type=NT_SRV_INST, names=["krbtgt", realm])
+
+ rep = self.as_req(cname, sname, realm, etype)
+ self.check_pre_authentication(rep)
+
+ # Do the next AS-REQ
+ padata = self.get_enc_timestamp_pa_data(uc, rep)
+ key = self.get_as_rep_key(uc, rep)
+ rep = self.as_req(cname, sname, realm, etype, padata=[padata])
+ self.check_as_reply(rep)
+
+ # Request a ticket to the host service on the machine account
+ ticket = rep['ticket']
+ enc_part2 = self.get_as_rep_enc_data(key, rep)
+ key = self.EncryptionKey_import(enc_part2['key'])
+ cname = self.PrincipalName_create(
+ name_type=NT_ENTERPRISE_PRINCIPAL, names=[ename])
+ sname = self.PrincipalName_create(
+ name_type=NT_PRINCIPAL,
+ names=[mc.get_username()])
+
+ (rep, enc_part) = self.tgs_req(
+ cname, sname, uc.get_realm(), ticket, key, etype,
+ creds=uc, service_creds=mc)
+ self.check_tgs_reply(rep)
+
+ # Check the contents of the pac, and the ticket
+ ticket = rep['ticket']
+ enc_part = self.decode_service_ticket(mc, ticket)
+ self.check_pac(
+ samdb, enc_part['authorization-data'], uc, ename, upn=ename)
+ # check the crealm and cname
+ cname = enc_part['cname']
+ crealm = enc_part['crealm']
+ self.assertEqual(NT_ENTERPRISE_PRINCIPAL, cname['name-type'])
+ self.assertEqual(ename.encode('UTF8'), cname['name-string'][0])
+ self.assertEqual(realm.upper().encode('UTF8'), crealm)
+
+ def test_enterprise_principal_step_5(self):
+ """ Step 5
+
+ If that fails
+ search for an account where the sAMAccountName matches
+ the name before the @ with a $ appended.
+
+ """
+
+ # Create a user account for the test.
+ #
+ samdb = self.get_samdb()
+ user_name = "mskileusr"
+ (uc, _) = self.create_account(samdb, user_name)
+ realm = uc.get_realm().lower()
+
+ mach_name = "mskilemac"
+ (mc, dn) = self.create_account(samdb, mach_name,
+ account_type=self.AccountType.COMPUTER)
+ ename = mach_name + "@" + realm
+ uname = mach_name + "$@" + realm
+
+ # Do the initial AS-REQ, should get a pre-authentication required
+ # response
+ etype = (AES256_CTS_HMAC_SHA1_96, ARCFOUR_HMAC_MD5)
+ cname = self.PrincipalName_create(
+ name_type=NT_ENTERPRISE_PRINCIPAL, names=[ename])
+ sname = self.PrincipalName_create(
+ name_type=NT_SRV_INST, names=["krbtgt", realm])
+
+ rep = self.as_req(cname, sname, realm, etype)
+ self.check_pre_authentication(rep)
+
+ # Do the next AS-REQ
+ padata = self.get_enc_timestamp_pa_data(mc, rep)
+ key = self.get_as_rep_key(mc, rep)
+ rep = self.as_req(cname, sname, realm, etype, padata=[padata])
+ self.check_as_reply(rep)
+
+ # Request a ticket to the host service on the machine account
+ ticket = rep['ticket']
+ enc_part2 = self.get_as_rep_enc_data(key, rep)
+ key = self.EncryptionKey_import(enc_part2['key'])
+ cname = self.PrincipalName_create(
+ name_type=NT_ENTERPRISE_PRINCIPAL, names=[ename])
+ sname = self.PrincipalName_create(
+ name_type=NT_PRINCIPAL,
+ names=[mc.get_username()])
+
+ (rep, enc_part) = self.tgs_req(
+ cname, sname, uc.get_realm(), ticket, key, etype,
+ creds=uc, service_creds=mc)
+ self.check_tgs_reply(rep)
+
+ # Check the contents of the pac, and the ticket
+ ticket = rep['ticket']
+ enc_part = self.decode_service_ticket(mc, ticket)
+ self.check_pac(
+ samdb, enc_part['authorization-data'], mc, ename, upn=uname)
+ # check the crealm and cname
+ cname = enc_part['cname']
+ crealm = enc_part['crealm']
+ self.assertEqual(NT_ENTERPRISE_PRINCIPAL, cname['name-type'])
+ self.assertEqual(ename.encode('UTF8'), cname['name-string'][0])
+ self.assertEqual(realm.upper().encode('UTF8'), crealm)
+
+ def test_enterprise_principal_step_6_a(self):
+ """ Step 6, no pre-authentication
+ If not found and no pre-authentication
+ search for a matching altSecurityIdentity
+ """
+ # Create a user account for the test.
+ # with an altSecurityIdentity, and with UF_DONT_REQUIRE_PREAUTH
+ # set.
+ #
+ # note that in this case IDL_DRSCrackNames is called with
+ # pmsgIn.formatOffered set to
+ # DS_USER_PRINCIPAL_NAME_AND_ALTSECID
+ #
+ # setting UF_DONT_REQUIRE_PREAUTH seems to be the only way
+ # to trigger the no pre-auth step
+
+ samdb = self.get_samdb()
+ user_name = "mskileusr"
+ alt_name = "mskilealtsec"
+ (uc, dn) = self.create_account(samdb, user_name,
+ account_control=UF_DONT_REQUIRE_PREAUTH)
+ realm = uc.get_realm().lower()
+ alt_sec = "Kerberos:%s@%s" % (alt_name, realm)
+ self.add_attribute(samdb, dn, "altSecurityIdentities", alt_sec)
+ ename = alt_name + "@" + realm
+
+ mach_name = "mskilemac"
+ (mc, _) = self.create_account(samdb, mach_name,
+ account_type=self.AccountType.COMPUTER)
+
+ # Do the initial AS-REQ, as we've set UF_DONT_REQUIRE_PREAUTH
+ # we should get a valid AS-RESP
+ # response
+ etype = (AES256_CTS_HMAC_SHA1_96, ARCFOUR_HMAC_MD5)
+ cname = self.PrincipalName_create(
+ name_type=NT_ENTERPRISE_PRINCIPAL, names=[ename])
+ sname = self.PrincipalName_create(
+ name_type=NT_SRV_INST, names=["krbtgt", realm])
+
+ rep = self.as_req(cname, sname, realm, etype)
+ self.check_as_reply(rep)
+ salt = "%s%s" % (realm.upper(), user_name)
+ key = self.PasswordKey_create(
+ rep['enc-part']['etype'],
+ uc.get_password(),
+ salt.encode('UTF8'),
+ rep['enc-part']['kvno'])
+
+ # Request a ticket to the host service on the machine account
+ ticket = rep['ticket']
+ enc_part2 = self.get_as_rep_enc_data(key, rep)
+ key = self.EncryptionKey_import(enc_part2['key'])
+ cname = self.PrincipalName_create(
+ name_type=NT_ENTERPRISE_PRINCIPAL, names=[ename])
+ sname = self.PrincipalName_create(
+ name_type=NT_PRINCIPAL,
+ names=[mc.get_username()])
+
+ (rep, enc_part) = self.tgs_req(
+ cname, sname, uc.get_realm(), ticket, key, etype,
+ creds=uc, service_creds=mc, expect_pac=False,
+ expect_edata=False,
+ expected_error_mode=KDC_ERR_TGT_REVOKED)
+ self.check_error_rep(rep, KDC_ERR_TGT_REVOKED)
+
+ def test_nt_enterprise_principal_step_6_b(self):
+ """ Step 4, pre-authentication
+ If not found and pre-authentication
+ search for a matching user principal name
+ """
+
+ # Create user and machine accounts for the test.
+ #
+ samdb = self.get_samdb()
+ user_name = "mskileusr"
+ alt_name = "mskilealtsec"
+ (uc, dn) = self.create_account(samdb, user_name)
+ realm = uc.get_realm().lower()
+ alt_sec = "Kerberos:%s@%s" % (alt_name, realm)
+ self.add_attribute(samdb, dn, "altSecurityIdentities", alt_sec)
+ ename = alt_name + "@" + realm
+ uname = user_name + "@" + realm
+
+ mach_name = "mskilemac"
+ (mc, _) = self.create_account(samdb, mach_name,
+ account_type=self.AccountType.COMPUTER)
+
+ # Do the initial AS-REQ, should get a pre-authentication required
+ # response
+ etype = (AES256_CTS_HMAC_SHA1_96, ARCFOUR_HMAC_MD5)
+ cname = self.PrincipalName_create(
+ name_type=NT_ENTERPRISE_PRINCIPAL, names=[ename])
+ sname = self.PrincipalName_create(
+ name_type=NT_SRV_INST, names=["krbtgt", realm])
+
+ rep = self.as_req(cname, sname, realm, etype)
+ self.check_pre_authentication(rep)
+
+ # Do the next AS-REQ
+ padata = self.get_enc_timestamp_pa_data(uc, rep)
+ key = self.get_as_rep_key(uc, rep)
+ # Note: although we used the alt security id for the pre-auth
+ # we need to use the username for the auth
+ cname = self.PrincipalName_create(
+ name_type=NT_ENTERPRISE_PRINCIPAL, names=[uname])
+ rep = self.as_req(cname, sname, realm, etype, padata=[padata])
+ self.check_as_reply(rep)
+
+ # Request a ticket to the host service on the machine account
+ ticket = rep['ticket']
+ enc_part2 = self.get_as_rep_enc_data(key, rep)
+ key = self.EncryptionKey_import(enc_part2['key'])
+ cname = self.PrincipalName_create(
+ name_type=NT_ENTERPRISE_PRINCIPAL,
+ names=[uname])
+ sname = self.PrincipalName_create(
+ name_type=NT_PRINCIPAL,
+ names=[mc.get_username()])
+
+ (rep, enc_part) = self.tgs_req(
+ cname, sname, uc.get_realm(), ticket, key, etype,
+ creds=uc, service_creds=mc)
+ self.check_tgs_reply(rep)
+
+ # Check the contents of the pac, and the ticket
+ ticket = rep['ticket']
+ enc_part = self.decode_service_ticket(mc, ticket)
+ self.check_pac(
+ samdb, enc_part['authorization-data'], uc, uname, upn=uname)
+ # check the crealm and cname
+ cname = enc_part['cname']
+ self.assertEqual(NT_ENTERPRISE_PRINCIPAL, cname['name-type'])
+ self.assertEqual(uname.encode('UTF8'), cname['name-string'][0])
+ self.assertEqual(realm.upper().encode('UTF8'), enc_part['crealm'])
+
+ def test_nt_principal_step_6_c(self):
+ """ Step 4, pre-authentication
+ If not found and pre-authentication
+ search for a matching user principal name
+
+ This test uses the altsecid, so the AS-REQ should fail.
+ """
+
+ # Create user and machine accounts for the test.
+ #
+ samdb = self.get_samdb()
+ user_name = "mskileusr"
+ alt_name = "mskilealtsec"
+ (uc, dn) = self.create_account(samdb, user_name)
+ realm = uc.get_realm().lower()
+ alt_sec = "Kerberos:%s@%s" % (alt_name, realm)
+ self.add_attribute(samdb, dn, "altSecurityIdentities", alt_sec)
+ ename = alt_name + "@" + realm
+
+ mach_name = "mskilemac"
+ (mc, _) = self.create_account(samdb, mach_name,
+ account_type=self.AccountType.COMPUTER)
+
+ # Do the initial AS-REQ, should get a pre-authentication required
+ # response
+ etype = (AES256_CTS_HMAC_SHA1_96, ARCFOUR_HMAC_MD5)
+ cname = self.PrincipalName_create(
+ name_type=NT_ENTERPRISE_PRINCIPAL, names=[ename])
+ sname = self.PrincipalName_create(
+ name_type=NT_SRV_INST, names=["krbtgt", realm])
+
+ rep = self.as_req(cname, sname, realm, etype)
+ self.check_pre_authentication(rep)
+
+ # Do the next AS-REQ
+ padata = self.get_enc_timestamp_pa_data(uc, rep)
+ # Use the alternate security identifier
+ # this should fail
+ cname = self.PrincipalName_create(
+ name_type=NT_ENTERPRISE_PRINCIPAL, names=[ename])
+ rep = self.as_req(cname, sname, realm, etype, padata=[padata])
+ self.check_error_rep(rep, KDC_ERR_C_PRINCIPAL_UNKNOWN)
+
+
+if __name__ == "__main__":
+ global_asn1_print = False
+ global_hexdump = False
+ import unittest
+ unittest.main()
diff --git a/python/samba/tests/krb5/nt_hash_tests.py b/python/samba/tests/krb5/nt_hash_tests.py
new file mode 100755
index 0000000..82d9c09
--- /dev/null
+++ b/python/samba/tests/krb5/nt_hash_tests.py
@@ -0,0 +1,142 @@
+#!/usr/bin/env python3
+# Unix SMB/CIFS implementation.
+# Copyright (C) Stefan Metzmacher 2020
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+import os
+
+sys.path.insert(0, 'bin/python')
+os.environ['PYTHONUNBUFFERED'] = '1'
+
+import ldb
+
+from samba import generate_random_password, net
+from samba.dcerpc import drsuapi, misc
+
+from samba.tests.krb5.kdc_base_test import KDCBaseTest
+
+global_asn1_print = False
+global_hexdump = False
+
+
+class NtHashTests(KDCBaseTest):
+
+ def setUp(self):
+ super().setUp()
+ self.do_asn1_print = global_asn1_print
+ self.do_hexdump = global_hexdump
+
+ def _check_nt_hash(self, dn, history_len):
+ expect_nt_hash = bool(int(os.environ.get('EXPECT_NT_HASH', '1')))
+
+ samdb = self.get_samdb()
+ admin_creds = self.get_admin_creds()
+
+ bind, identifier, attributes = self.get_secrets(
+ dn,
+ destination_dsa_guid=misc.GUID(samdb.get_ntds_GUID()),
+ source_dsa_invocation_id=misc.GUID())
+
+ rid = identifier.sid.split()[1]
+
+ net_ctx = net.Net(admin_creds)
+
+ def num_hashes(attr):
+ if attr.value_ctr.values is None:
+ return 0
+
+ net_ctx.replicate_decrypt(bind, attr, rid)
+
+ length = sum(len(value.blob) for value in attr.value_ctr.values)
+ self.assertEqual(0, length & 0xf)
+ return length // 16
+
+ def is_unicodePwd(attr):
+ return attr.attid == drsuapi.DRSUAPI_ATTID_unicodePwd
+
+ def is_ntPwdHistory(attr):
+ return attr.attid == drsuapi.DRSUAPI_ATTID_ntPwdHistory
+
+ unicode_pwd_count = sum(attr.value_ctr.num_values
+ for attr in filter(is_unicodePwd, attributes))
+
+ nt_history_count = sum(num_hashes(attr)
+ for attr in filter(is_ntPwdHistory, attributes))
+
+ if expect_nt_hash:
+ self.assertEqual(1, unicode_pwd_count,
+ 'expected to find NT hash')
+ else:
+ self.assertEqual(0, unicode_pwd_count,
+ 'got unexpected NT hash')
+
+ if expect_nt_hash:
+ self.assertEqual(history_len, nt_history_count,
+ 'expected to find NT password history')
+ else:
+ self.assertEqual(0, nt_history_count,
+ 'got unexpected NT password history')
+
+ # Test that the NT hash and its history is not generated or stored for an
+ # account when we disable NTLM authentication.
+ def test_nt_hash(self):
+ samdb = self.get_samdb()
+ user_name = self.get_new_username()
+
+ client_creds, client_dn = self.create_account(
+ samdb, user_name,
+ account_type=KDCBaseTest.AccountType.USER)
+
+ self._check_nt_hash(client_dn, history_len=1)
+
+ # Change the password and check that the NT hash is still not present.
+
+ # Get the old "minPwdAge"
+ minPwdAge = samdb.get_minPwdAge()
+
+ # Reset the "minPwdAge" as it was before
+ self.addCleanup(samdb.set_minPwdAge, minPwdAge)
+
+ # Set it temporarily to '0'
+ samdb.set_minPwdAge('0')
+
+ old_utf16pw = f'"{client_creds.get_password()}"'.encode('utf-16-le')
+
+ history_len = 3
+ for _ in range(history_len - 1):
+ password = generate_random_password(32, 32)
+ utf16pw = f'"{password}"'.encode('utf-16-le')
+
+ msg = ldb.Message(ldb.Dn(samdb, client_dn))
+ msg['0'] = ldb.MessageElement(old_utf16pw,
+ ldb.FLAG_MOD_DELETE,
+ 'unicodePwd')
+ msg['1'] = ldb.MessageElement(utf16pw,
+ ldb.FLAG_MOD_ADD,
+ 'unicodePwd')
+ samdb.modify(msg)
+
+ old_utf16pw = utf16pw
+
+ self._check_nt_hash(client_dn, history_len)
+
+
+if __name__ == '__main__':
+ global_asn1_print = False
+ global_hexdump = False
+ import unittest
+ unittest.main()
diff --git a/python/samba/tests/krb5/pac_align_tests.py b/python/samba/tests/krb5/pac_align_tests.py
new file mode 100755
index 0000000..ae63596
--- /dev/null
+++ b/python/samba/tests/krb5/pac_align_tests.py
@@ -0,0 +1,93 @@
+#!/usr/bin/env python3
+# Unix SMB/CIFS implementation.
+# Copyright (C) Stefan Metzmacher 2020
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+import os
+
+sys.path.insert(0, 'bin/python')
+os.environ['PYTHONUNBUFFERED'] = '1'
+
+from samba.dcerpc import krb5pac
+from samba.ndr import ndr_unpack
+from samba.tests import DynamicTestCase
+from samba.tests.krb5.kdc_base_test import KDCBaseTest
+
+global_asn1_print = False
+global_hexdump = False
+
+
+@DynamicTestCase
+class PacAlignTests(KDCBaseTest):
+
+ base_name = 'krbpac'
+
+ @classmethod
+ def setUpDynamicTestCases(cls):
+ for length in range(len(cls.base_name), 21):
+ cls.generate_dynamic_test('test_pac_align',
+ f'{length}_chars',
+ length)
+
+ def setUp(self):
+ super().setUp()
+ self.do_asn1_print = global_asn1_print
+ self.do_hexdump = global_hexdump
+
+ def _test_pac_align_with_args(self, length):
+ samdb = self.get_samdb()
+
+ account_name = self.base_name + 'a' * (length - len(self.base_name))
+ creds, _ = self.create_account(samdb, account_name)
+
+ tgt = self.get_tgt(creds, expect_pac=True)
+
+ pac_data = self.get_ticket_pac(tgt)
+ self.assertIsNotNone(pac_data)
+
+ self.assertEqual(0, len(pac_data) & 7)
+
+ pac = ndr_unpack(krb5pac.PAC_DATA_RAW, pac_data)
+ for pac_buffer in pac.buffers:
+ buffer_type = pac_buffer.type
+ buffer_size = pac_buffer.ndr_size
+
+ with self.subTest(buffer_type=buffer_type):
+ if buffer_type == krb5pac.PAC_TYPE_LOGON_NAME:
+ self.assertEqual(length * 2 + 10, buffer_size)
+ elif buffer_type == krb5pac.PAC_TYPE_REQUESTER_SID:
+ self.assertEqual(28, buffer_size)
+ elif buffer_type in {krb5pac.PAC_TYPE_SRV_CHECKSUM,
+ krb5pac.PAC_TYPE_KDC_CHECKSUM,
+ krb5pac.PAC_TYPE_TICKET_CHECKSUM}:
+ self.assertEqual(0, buffer_size & 3,
+ f'buffer type was: {buffer_type}, '
+ f'buffer size was: {buffer_size}')
+ else:
+ self.assertEqual(0, buffer_size & 7,
+ f'buffer type was: {buffer_type}, '
+ f'buffer size was: {buffer_size}')
+
+ rounded_len = (buffer_size + 7) & ~7
+ self.assertEqual(rounded_len, len(pac_buffer.info.remaining))
+
+
+if __name__ == '__main__':
+ global_asn1_print = False
+ global_hexdump = False
+ import unittest
+ unittest.main()
diff --git a/python/samba/tests/krb5/pkinit_tests.py b/python/samba/tests/krb5/pkinit_tests.py
new file mode 100755
index 0000000..3d47c79
--- /dev/null
+++ b/python/samba/tests/krb5/pkinit_tests.py
@@ -0,0 +1,1211 @@
+#!/usr/bin/env python3
+# Unix SMB/CIFS implementation.
+# Copyright (C) Stefan Metzmacher 2020
+# Copyright (C) Catalyst.Net Ltd 2023
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+import os
+
+sys.path.insert(0, 'bin/python')
+os.environ['PYTHONUNBUFFERED'] = '1'
+
+from datetime import datetime, timedelta
+
+from pyasn1.type import univ
+
+from cryptography import x509
+from cryptography.hazmat.primitives.serialization import pkcs12
+from cryptography.hazmat.backends import default_backend
+from cryptography.hazmat.primitives import hashes, serialization
+from cryptography.hazmat.primitives.asymmetric import dh, padding
+from cryptography.x509.oid import NameOID
+
+import samba.tests
+from samba.tests.krb5 import kcrypto
+from samba.tests.krb5.kdc_base_test import KDCBaseTest
+from samba.tests.krb5.raw_testcase import PkInit
+from samba.tests.krb5.rfc4120_constants import (
+ DES_EDE3_CBC,
+ KDC_ERR_CLIENT_NOT_TRUSTED,
+ KDC_ERR_ETYPE_NOSUPP,
+ KDC_ERR_MODIFIED,
+ KDC_ERR_PREAUTH_EXPIRED,
+ KDC_ERR_PREAUTH_FAILED,
+ KDC_ERR_PREAUTH_REQUIRED,
+ KU_PA_ENC_TIMESTAMP,
+ NT_PRINCIPAL,
+ PADATA_AS_FRESHNESS,
+ PADATA_ENC_TIMESTAMP,
+ PADATA_PK_AS_REP_19,
+ PADATA_PK_AS_REQ,
+)
+import samba.tests.krb5.rfc4120_pyasn1 as krb5_asn1
+
+global_asn1_print = False
+global_hexdump = False
+
+
+class PkInitTests(KDCBaseTest):
+ @classmethod
+ def setUpClass(cls):
+ super().setUpClass()
+
+ def setUp(self):
+ super().setUp()
+ self.do_asn1_print = global_asn1_print
+ self.do_hexdump = global_hexdump
+
+ def _get_creds(self, account_type=KDCBaseTest.AccountType.USER):
+ """Return credentials with an account having a UPN for performing
+ PK-INIT."""
+ samdb = self.get_samdb()
+ realm = samdb.domain_dns_name().upper()
+
+ return self.get_cached_creds(
+ account_type=account_type,
+ opts={'upn': f'{{account}}.{realm}@{realm}'})
+
+ def test_pkinit_no_des3(self):
+ """Test public-key PK-INIT without specifying the DES3 encryption
+ type. It should fail."""
+ client_creds = self._get_creds()
+ target_creds = self.get_service_creds()
+
+ self._pkinit_req(client_creds, target_creds,
+ etypes=(kcrypto.Enctype.AES256, kcrypto.Enctype.RC4),
+ expect_error=KDC_ERR_ETYPE_NOSUPP)
+
+ def test_pkinit_no_des3_dh(self):
+ """Test Diffie-Hellman PK-INIT without specifying the DES3 encryption
+ type. This time, it should succeed."""
+ client_creds = self._get_creds()
+ target_creds = self.get_service_creds()
+
+ self._pkinit_req(client_creds, target_creds,
+ using_pkinit=PkInit.DIFFIE_HELLMAN,
+ etypes=(kcrypto.Enctype.AES256, kcrypto.Enctype.RC4))
+
+ def test_pkinit_aes128(self):
+ """Test public-key PK-INIT, specifying the AES128 encryption type
+ first."""
+ client_creds = self._get_creds()
+ target_creds = self.get_service_creds()
+
+ self._pkinit_req(client_creds, target_creds,
+ etypes=(
+ kcrypto.Enctype.AES128,
+ kcrypto.Enctype.AES256,
+ DES_EDE3_CBC,
+ ))
+
+ def test_pkinit_rc4(self):
+ """Test public-key PK-INIT, specifying the RC4 encryption type first.
+ """
+ client_creds = self._get_creds()
+ target_creds = self.get_service_creds()
+
+ self._pkinit_req(client_creds, target_creds,
+ etypes=(
+ kcrypto.Enctype.RC4,
+ kcrypto.Enctype.AES256,
+ DES_EDE3_CBC,
+ ))
+
+ def test_pkinit_zero_nonce(self):
+ """Test public-key PK-INIT with a nonce of zero. The nonce in the
+ request body should take precedence."""
+ client_creds = self._get_creds()
+ target_creds = self.get_service_creds()
+
+ self._pkinit_req(client_creds, target_creds, pk_nonce=0)
+
+ def test_pkinit_zero_nonce_dh(self):
+ """Test Diffie-Hellman PK-INIT with a nonce of zero. The nonce in the
+ request body should take precedence.
+ """
+ client_creds = self._get_creds()
+ target_creds = self.get_service_creds()
+
+ self._pkinit_req(client_creds, target_creds,
+ using_pkinit=PkInit.DIFFIE_HELLMAN,
+ pk_nonce=0)
+
+ def test_pkinit_computer(self):
+ """Test public-key PK-INIT with a computer account."""
+ client_creds = self._get_creds(self.AccountType.COMPUTER)
+ target_creds = self.get_service_creds()
+
+ self._pkinit_req(client_creds, target_creds)
+
+ def test_pkinit_computer_dh(self):
+ """Test Diffie-Hellman PK-INIT with a computer account."""
+ client_creds = self._get_creds(self.AccountType.COMPUTER)
+ target_creds = self.get_service_creds()
+
+ self._pkinit_req(client_creds, target_creds,
+ using_pkinit=PkInit.DIFFIE_HELLMAN)
+
+ def test_pkinit_computer_win2k(self):
+ """Test public-key Windows 2000 PK-INIT with a computer account."""
+ client_creds = self._get_creds(self.AccountType.COMPUTER)
+ target_creds = self.get_service_creds()
+
+ self._pkinit_req(client_creds, target_creds, win2k_variant=True)
+
+ def test_pkinit_service(self):
+ """Test public-key PK-INIT with a service account."""
+ client_creds = self._get_creds(self.AccountType.MANAGED_SERVICE)
+ target_creds = self.get_service_creds()
+
+ self._pkinit_req(client_creds, target_creds)
+
+ def test_pkinit_service_dh(self):
+ """Test Diffie-Hellman PK-INIT with a service account."""
+ client_creds = self._get_creds(self.AccountType.MANAGED_SERVICE)
+ target_creds = self.get_service_creds()
+
+ self._pkinit_req(client_creds, target_creds,
+ using_pkinit=PkInit.DIFFIE_HELLMAN)
+
+ def test_pkinit_service_win2k(self):
+ """Test public-key Windows 2000 PK-INIT with a service account."""
+ client_creds = self._get_creds(self.AccountType.MANAGED_SERVICE)
+ target_creds = self.get_service_creds()
+
+ self._pkinit_req(client_creds, target_creds, win2k_variant=True)
+
+ def test_pkinit_no_supported_cms_types(self):
+ """Test public-key PK-INIT, excluding the supportedCmsTypes field. This
+ causes Windows to reply with differently-encoded ASN.1."""
+ client_creds = self._get_creds()
+ target_creds = self.get_service_creds()
+
+ self._pkinit_req(client_creds, target_creds,
+ supported_cms_types=False)
+
+ def test_pkinit_no_supported_cms_types_dh(self):
+ """Test Diffie-Hellman PK-INIT, excluding the supportedCmsTypes field.
+ """
+ client_creds = self._get_creds()
+ target_creds = self.get_service_creds()
+
+ self._pkinit_req(client_creds, target_creds,
+ using_pkinit=PkInit.DIFFIE_HELLMAN,
+ supported_cms_types=False)
+
+ def test_pkinit_empty_supported_cms_types(self):
+ """Test public-key PK-INIT with an empty supportedCmsTypes field."""
+ client_creds = self._get_creds()
+ target_creds = self.get_service_creds()
+
+ self._pkinit_req(client_creds, target_creds,
+ supported_cms_types=[])
+
+ def test_pkinit_empty_supported_cms_types_dh(self):
+ """Test Diffie-Hellman PK-INIT with an empty supportedCmsTypes field.
+ """
+ client_creds = self._get_creds()
+ target_creds = self.get_service_creds()
+
+ self._pkinit_req(client_creds, target_creds,
+ using_pkinit=PkInit.DIFFIE_HELLMAN,
+ supported_cms_types=[])
+
+ def test_pkinit_sha256_signature(self):
+ """Test public-key PK-INIT with a SHA256 signature."""
+ client_creds = self._get_creds()
+ target_creds = self.get_service_creds()
+
+ self._pkinit_req(
+ client_creds, target_creds,
+ signature_algorithm=krb5_asn1.id_pkcs1_sha256WithRSAEncryption)
+
+ def test_pkinit_sha256_signature_dh(self):
+ """Test Diffie-Hellman PK-INIT with a SHA256 signature."""
+ client_creds = self._get_creds()
+ target_creds = self.get_service_creds()
+
+ self._pkinit_req(
+ client_creds, target_creds,
+ using_pkinit=PkInit.DIFFIE_HELLMAN,
+ signature_algorithm=krb5_asn1.id_pkcs1_sha256WithRSAEncryption)
+
+ def test_pkinit_sha256_signature_win2k(self):
+ """Test public-key Windows 2000 PK-INIT with a SHA256 signature."""
+ client_creds = self._get_creds()
+ target_creds = self.get_service_creds()
+
+ self._pkinit_req(
+ client_creds, target_creds,
+ signature_algorithm=krb5_asn1.id_pkcs1_sha256WithRSAEncryption,
+ win2k_variant=True)
+
+ def test_pkinit_sha256_certificate_signature(self):
+ """Test public-key PK-INIT with a SHA256 certificate signature."""
+ client_creds = self._get_creds()
+ target_creds = self.get_service_creds()
+
+ self._pkinit_req(
+ client_creds, target_creds,
+ certificate_signature=hashes.SHA256)
+
+ def test_pkinit_sha256_certificate_signature_dh(self):
+ """Test Diffie-Hellman PK-INIT with a SHA256 certificate signature."""
+ client_creds = self._get_creds()
+ target_creds = self.get_service_creds()
+
+ self._pkinit_req(
+ client_creds, target_creds,
+ using_pkinit=PkInit.DIFFIE_HELLMAN,
+ certificate_signature=hashes.SHA256)
+
+ def test_pkinit_sha256_certificate_signature_win2k(self):
+ """Test public-key Windows 2000 PK-INIT with a SHA256 certificate
+ signature."""
+ client_creds = self._get_creds()
+ target_creds = self.get_service_creds()
+
+ self._pkinit_req(
+ client_creds, target_creds,
+ certificate_signature=hashes.SHA256,
+ win2k_variant=True)
+
+ def test_pkinit_freshness(self):
+ """Test public-key PK-INIT with the PKINIT Freshness Extension."""
+ client_creds = self._get_creds()
+ target_creds = self.get_service_creds()
+
+ # Perform the AS-REQ to get the freshness token.
+ kdc_exchange_dict = self._as_req(client_creds, target_creds,
+ freshness=b'',
+ expect_error=KDC_ERR_PREAUTH_REQUIRED,
+ expect_edata=True)
+ freshness_token = kdc_exchange_dict.get('freshness_token')
+ self.assertIsNotNone(freshness_token)
+
+ # Include the freshness token in the PK-INIT request.
+ self._pkinit_req(client_creds, target_creds,
+ freshness_token=freshness_token)
+
+ def test_pkinit_freshness_dh(self):
+ """Test Diffie-Hellman PK-INIT with the PKINIT Freshness Extension."""
+ client_creds = self._get_creds()
+ target_creds = self.get_service_creds()
+
+ kdc_exchange_dict = self._as_req(client_creds, target_creds,
+ freshness=b'',
+ expect_error=KDC_ERR_PREAUTH_REQUIRED,
+ expect_edata=True)
+ freshness_token = kdc_exchange_dict.get('freshness_token')
+ self.assertIsNotNone(freshness_token)
+
+ self._pkinit_req(client_creds, target_creds,
+ using_pkinit=PkInit.DIFFIE_HELLMAN,
+ freshness_token=freshness_token)
+
+ def test_pkinit_freshness_non_empty(self):
+ """Test sending a non-empty freshness token."""
+ client_creds = self._get_creds()
+ target_creds = self.get_service_creds()
+
+ kdc_exchange_dict = self._as_req(
+ client_creds, target_creds,
+ freshness=b'A genuine freshness token',
+ expect_error=KDC_ERR_PREAUTH_REQUIRED,
+ expect_edata=True)
+ freshness_token = kdc_exchange_dict.get('freshness_token')
+ self.assertIsNotNone(freshness_token)
+
+ def test_pkinit_freshness_with_enc_ts(self):
+ """Test sending a freshness token and ENC-TS in the same request."""
+ client_creds = self._get_creds()
+ target_creds = self.get_service_creds()
+
+ kdc_exchange_dict = self._as_req(client_creds, target_creds,
+ freshness=b'',
+ send_enc_ts=True)
+
+ # There should be no freshness token in the reply.
+ freshness_token = kdc_exchange_dict.get('freshness_token')
+ self.assertIsNone(freshness_token)
+
+ def test_pkinit_freshness_current(self):
+ """Test public-key PK-INIT with an up-to-date freshness token."""
+ client_creds = self._get_creds()
+ target_creds = self.get_service_creds()
+
+ freshness_token = self.create_freshness_token()
+
+ self._pkinit_req(client_creds, target_creds,
+ freshness_token=freshness_token)
+
+ def test_pkinit_freshness_current_dh(self):
+ """Test Diffie-Hellman PK-INIT with an up-to-date freshness token."""
+ client_creds = self._get_creds()
+ target_creds = self.get_service_creds()
+
+ freshness_token = self.create_freshness_token()
+
+ self._pkinit_req(client_creds, target_creds,
+ using_pkinit=PkInit.DIFFIE_HELLMAN,
+ freshness_token=freshness_token)
+
+ def test_pkinit_freshness_old(self):
+ """Test public-key PK-INIT with an old freshness token."""
+ client_creds = self._get_creds()
+ target_creds = self.get_service_creds()
+
+ # Present a freshness token from fifteen minutes in the past.
+ fifteen_minutes = timedelta(minutes=15).total_seconds()
+ freshness_token = self.create_freshness_token(offset=-fifteen_minutes)
+
+ # The request should be rejected.
+ self._pkinit_req(client_creds, target_creds,
+ freshness_token=freshness_token,
+ expect_error=KDC_ERR_PREAUTH_EXPIRED)
+
+ def test_pkinit_freshness_old_dh(self):
+ """Test Diffie-Hellman PK-INIT with an old freshness token."""
+ client_creds = self._get_creds()
+ target_creds = self.get_service_creds()
+
+ # Present a freshness token from fifteen minutes in the past.
+ fifteen_minutes = timedelta(minutes=15).total_seconds()
+ freshness_token = self.create_freshness_token(offset=-fifteen_minutes)
+
+ # The request should be rejected.
+ self._pkinit_req(client_creds, target_creds,
+ using_pkinit=PkInit.DIFFIE_HELLMAN,
+ freshness_token=freshness_token,
+ expect_error=KDC_ERR_PREAUTH_EXPIRED)
+
+ def test_pkinit_freshness_future(self):
+ """Test public-key PK-INIT with a freshness token from the future."""
+ client_creds = self._get_creds()
+ target_creds = self.get_service_creds()
+
+ # Present a freshness token from fifteen minutes in the future.
+ fifteen_minutes = timedelta(minutes=15).total_seconds()
+ freshness_token = self.create_freshness_token(offset=fifteen_minutes)
+
+ # The request should be rejected.
+ self._pkinit_req(client_creds, target_creds,
+ freshness_token=freshness_token,
+ expect_error=KDC_ERR_PREAUTH_EXPIRED)
+
+ def test_pkinit_freshness_future_dh(self):
+ """Test Diffie-Hellman PK-INIT with a freshness token from the future.
+ """
+ client_creds = self._get_creds()
+ target_creds = self.get_service_creds()
+
+ # Present a freshness token from fifteen minutes in the future.
+ fifteen_minutes = timedelta(minutes=15).total_seconds()
+ freshness_token = self.create_freshness_token(offset=fifteen_minutes)
+
+ # The request should be rejected.
+ self._pkinit_req(client_creds, target_creds,
+ using_pkinit=PkInit.DIFFIE_HELLMAN,
+ freshness_token=freshness_token,
+ expect_error=KDC_ERR_PREAUTH_EXPIRED)
+
+ def test_pkinit_freshness_invalid(self):
+ """Test public-key PK-INIT with an invalid freshness token."""
+ client_creds = self._get_creds()
+ target_creds = self.get_service_creds()
+
+ freshness_token = b'A genuine freshness token'
+
+ # The request should be rejected.
+ self._pkinit_req(client_creds, target_creds,
+ freshness_token=freshness_token,
+ expect_error=KDC_ERR_MODIFIED)
+
+ def test_pkinit_freshness_invalid_dh(self):
+ """Test Diffie-Hellman PK-INIT with an invalid freshness token."""
+ client_creds = self._get_creds()
+ target_creds = self.get_service_creds()
+
+ freshness_token = b'A genuine freshness token'
+
+ # The request should be rejected.
+ self._pkinit_req(client_creds, target_creds,
+ using_pkinit=PkInit.DIFFIE_HELLMAN,
+ freshness_token=freshness_token,
+ expect_error=KDC_ERR_MODIFIED)
+
+ def test_pkinit_freshness_rodc_ts(self):
+ """Test public-key PK-INIT with an RODC-issued freshness token."""
+ client_creds = self._get_creds()
+ target_creds = self.get_service_creds()
+
+ rodc_krbtgt_creds = self.get_mock_rodc_krbtgt_creds()
+ freshness_token = self.create_freshness_token(
+ krbtgt_creds=rodc_krbtgt_creds)
+
+ # The token should be rejected.
+ self._pkinit_req(client_creds, target_creds,
+ freshness_token=freshness_token,
+ expect_error=KDC_ERR_PREAUTH_FAILED)
+
+ def test_pkinit_freshness_rodc_dh(self):
+ """Test Diffie-Hellman PK-INIT with an RODC-issued freshness token."""
+ client_creds = self._get_creds()
+ target_creds = self.get_service_creds()
+
+ rodc_krbtgt_creds = self.get_mock_rodc_krbtgt_creds()
+ freshness_token = self.create_freshness_token(
+ krbtgt_creds=rodc_krbtgt_creds)
+
+ # The token should be rejected.
+ self._pkinit_req(client_creds, target_creds,
+ using_pkinit=PkInit.DIFFIE_HELLMAN,
+ freshness_token=freshness_token,
+ expect_error=KDC_ERR_PREAUTH_FAILED)
+
+ def test_pkinit_freshness_wrong_header(self):
+ """Test public-key PK-INIT with a modified freshness token."""
+ client_creds = self._get_creds()
+ target_creds = self.get_service_creds()
+
+ freshness_token = self.create_freshness_token()
+
+ # Modify the leading two bytes of the freshness token.
+ freshness_token = b'@@' + freshness_token[2:]
+
+ # Expect to get an error.
+ self._pkinit_req(client_creds, target_creds,
+ freshness_token=freshness_token,
+ expect_error=KDC_ERR_MODIFIED)
+
+ def test_pkinit_freshness_wrong_header_dh(self):
+ """Test Diffie-Hellman PK-INIT with a modified freshness token."""
+ client_creds = self._get_creds()
+ target_creds = self.get_service_creds()
+
+ freshness_token = self.create_freshness_token()
+
+ # Modify the leading two bytes of the freshness token.
+ freshness_token = b'@@' + freshness_token[2:]
+
+ # Expect to get an error.
+ self._pkinit_req(client_creds, target_creds,
+ using_pkinit=PkInit.DIFFIE_HELLMAN,
+ freshness_token=freshness_token,
+ expect_error=KDC_ERR_MODIFIED)
+
+ def test_pkinit_freshness_empty(self):
+ """Test public-key PK-INIT with an empty freshness token."""
+ client_creds = self._get_creds()
+ target_creds = self.get_service_creds()
+
+ # Expect to get an error.
+ self._pkinit_req(client_creds, target_creds,
+ freshness_token=b'',
+ expect_error=KDC_ERR_MODIFIED)
+
+ def test_pkinit_freshness_empty_dh(self):
+ """Test Diffie-Hellman PK-INIT with an empty freshness token."""
+ client_creds = self._get_creds()
+ target_creds = self.get_service_creds()
+
+ # Expect to get an error.
+ self._pkinit_req(client_creds, target_creds,
+ using_pkinit=PkInit.DIFFIE_HELLMAN,
+ freshness_token=b'',
+ expect_error=KDC_ERR_MODIFIED)
+
+ def test_pkinit_revoked(self):
+ """Test PK-INIT with a revoked certificate."""
+ client_creds = self._get_creds()
+ target_creds = self.get_service_creds()
+
+ ca_cert, ca_private_key = self.get_ca_cert_and_private_key()
+
+ certificate = self.create_certificate(client_creds,
+ ca_cert,
+ ca_private_key)
+
+ # The initial public-key PK-INIT request should succeed.
+ self._pkinit_req(client_creds, target_creds,
+ certificate=certificate)
+
+ # The initial Diffie-Hellman PK-INIT request should succeed.
+ self._pkinit_req(client_creds, target_creds,
+ certificate=certificate,
+ using_pkinit=PkInit.DIFFIE_HELLMAN)
+
+ # Revoke the client’s certificate.
+ self.revoke_certificate(certificate, ca_cert, ca_private_key)
+
+ # The subsequent public-key PK-INIT request should fail.
+ self._pkinit_req(client_creds, target_creds,
+ certificate=certificate,
+ expect_error=KDC_ERR_CLIENT_NOT_TRUSTED)
+
+ # The subsequent Diffie-Hellman PK-INIT request should also fail.
+ self._pkinit_req(client_creds, target_creds,
+ certificate=certificate,
+ using_pkinit=PkInit.DIFFIE_HELLMAN,
+ expect_error=KDC_ERR_CLIENT_NOT_TRUSTED)
+
+ def _as_req(self,
+ creds,
+ target_creds,
+ *,
+ expect_error=0,
+ expect_edata=False,
+ etypes=None,
+ freshness=None,
+ send_enc_ts=False,
+ ):
+ if send_enc_ts:
+ preauth_key = self.PasswordKey_from_creds(creds, kcrypto.Enctype.AES256)
+ else:
+ preauth_key = None
+
+ if freshness is not None or send_enc_ts:
+ def generate_padata_fn(_kdc_exchange_dict,
+ _callback_dict,
+ req_body):
+ padata = []
+
+ if freshness is not None:
+ freshness_padata = self.PA_DATA_create(PADATA_AS_FRESHNESS,
+ freshness)
+ padata.append(freshness_padata)
+
+ if send_enc_ts:
+ patime, pausec = self.get_KerberosTimeWithUsec()
+ enc_ts = self.PA_ENC_TS_ENC_create(patime, pausec)
+ enc_ts = self.der_encode(
+ enc_ts, asn1Spec=krb5_asn1.PA_ENC_TS_ENC())
+
+ enc_ts = self.EncryptedData_create(preauth_key,
+ KU_PA_ENC_TIMESTAMP,
+ enc_ts)
+ enc_ts = self.der_encode(
+ enc_ts, asn1Spec=krb5_asn1.EncryptedData())
+
+ enc_ts = self.PA_DATA_create(PADATA_ENC_TIMESTAMP, enc_ts)
+
+ padata.append(enc_ts)
+
+ return padata, req_body
+ else:
+ generate_padata_fn = None
+
+ user_name = creds.get_username()
+ cname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=user_name.split('/'))
+
+ target_name = target_creds.get_username()
+ target_realm = target_creds.get_realm()
+
+ sname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=['host', target_name[:-1]])
+
+ if expect_error:
+ check_error_fn = self.generic_check_kdc_error
+ check_rep_fn = None
+
+ expected_sname = sname
+ else:
+ check_error_fn = None
+ check_rep_fn = self.generic_check_kdc_rep
+
+ expected_sname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=[target_name])
+
+ kdc_options = ('forwardable,'
+ 'renewable,'
+ 'canonicalize,'
+ 'renewable-ok')
+ kdc_options = krb5_asn1.KDCOptions(kdc_options)
+
+ ticket_decryption_key = self.TicketDecryptionKey_from_creds(
+ target_creds)
+
+ kdc_exchange_dict = self.as_exchange_dict(
+ creds=creds,
+ expected_crealm=creds.get_realm(),
+ expected_cname=cname,
+ expected_srealm=target_realm,
+ expected_sname=expected_sname,
+ expected_supported_etypes=target_creds.tgs_supported_enctypes,
+ ticket_decryption_key=ticket_decryption_key,
+ generate_padata_fn=generate_padata_fn,
+ check_error_fn=check_error_fn,
+ check_rep_fn=check_rep_fn,
+ check_kdc_private_fn=self.generic_check_kdc_private,
+ expected_error_mode=expect_error,
+ expected_salt=creds.get_salt(),
+ preauth_key=preauth_key,
+ kdc_options=str(kdc_options),
+ expect_edata=expect_edata)
+
+ till = self.get_KerberosTime(offset=36000)
+
+ if etypes is None:
+ etypes = kcrypto.Enctype.AES256, kcrypto.Enctype.RC4,
+
+ rep = self._generic_kdc_exchange(kdc_exchange_dict,
+ cname=cname,
+ realm=target_realm,
+ sname=sname,
+ till_time=till,
+ etypes=etypes)
+ if expect_error:
+ self.check_error_rep(rep, expect_error)
+ else:
+ self.check_as_reply(rep)
+
+ return kdc_exchange_dict
+
+ def get_ca_cert_and_private_key(self):
+ # The password with which to try to encrypt the certificate or private
+ # key specified on the command line.
+ ca_pass = samba.tests.env_get_var_value('CA_PASS', allow_missing=True)
+ if ca_pass is not None:
+ ca_pass = ca_pass.encode('utf-8')
+
+ # The root certificate of the CA, with which we can issue new
+ # certificates.
+ ca_cert_path = samba.tests.env_get_var_value('CA_CERT')
+ with open(ca_cert_path, mode='rb') as f:
+ ca_cert_data = f.read()
+
+ try:
+ # If the certificate file is in the PKCS#12 format (such as is
+ # found in a .pfx file) try to get the private key and the
+ # certificate all in one go.
+ ca_private_key, ca_cert, _additional_ca_certs = (
+ pkcs12.load_key_and_certificates(
+ ca_cert_data, ca_pass, default_backend()))
+ except ValueError:
+ # Fall back to loading a PEM-encoded certificate.
+ ca_private_key = None
+ ca_cert = x509.load_pem_x509_certificate(
+ ca_cert_data, default_backend())
+
+ # If we didn’t get the private key, do that now.
+ if ca_private_key is None:
+ ca_private_key_path = samba.tests.env_get_var_value(
+ 'CA_PRIVATE_KEY')
+ with open(ca_private_key_path, mode='rb') as f:
+ ca_private_key = serialization.load_pem_private_key(
+ f.read(), password=ca_pass, backend=default_backend())
+
+ return ca_cert, ca_private_key
+
+ def create_certificate(self,
+ creds,
+ ca_cert,
+ ca_private_key,
+ certificate_signature=None):
+ if certificate_signature is None:
+ certificate_signature = hashes.SHA256
+
+ user_name = creds.get_username()
+
+ builder = x509.CertificateBuilder()
+
+ # Add the subject name.
+ cert_name = f'{user_name}@{creds.get_realm().lower()}'
+ builder = builder.subject_name(x509.Name([
+ # This name can be anything; it isn’t needed to authorize the
+ # user. The SubjectAlternativeName is used for that instead.
+ x509.NameAttribute(NameOID.COUNTRY_NAME, 'US'),
+ x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, 'SambaState'),
+ x509.NameAttribute(NameOID.ORGANIZATION_NAME, 'SambaSelfTesting'),
+ x509.NameAttribute(NameOID.ORGANIZATIONAL_UNIT_NAME, 'Users'),
+ x509.NameAttribute(NameOID.COMMON_NAME,
+ f'{cert_name}/emailAddress={cert_name}'),
+ ]))
+
+ # The new certificate must be issued by the root CA.
+ builder = builder.issuer_name(ca_cert.issuer)
+
+ one_day = timedelta(1, 0, 0)
+
+ # Put the certificate start time in the past to avoid issues where the
+ # KDC considers the certificate to be invalid due to clock skew. Note
+ # that if the certificate predates the existence of the account in AD,
+ # Windows will refuse authentication unless a strong mapping is
+ # present (in the certificate, or in AD).
+ # See https://support.microsoft.com/en-us/topic/kb5014754-certificate-based-authentication-changes-on-windows-domain-controllers-ad2c23b0-15d8-4340-a468-4d4f3b188f16#ID0EFR
+ builder = builder.not_valid_before(datetime.today() - one_day)
+
+ builder = builder.not_valid_after(datetime.today() + (one_day * 30))
+
+ builder = builder.serial_number(x509.random_serial_number())
+
+ public_key = creds.get_public_key()
+ builder = builder.public_key(public_key)
+
+ # Add the SubjectAlternativeName. Windows uses this to map the account
+ # to the certificate.
+ id_pkinit_ms_san = x509.ObjectIdentifier(
+ str(krb5_asn1.id_pkinit_ms_san))
+ encoded_upn = self.der_encode(creds.get_upn(),
+ asn1Spec=krb5_asn1.MS_UPN_SAN())
+ ms_upn_san = x509.OtherName(id_pkinit_ms_san, encoded_upn)
+ builder = builder.add_extension(
+ x509.SubjectAlternativeName([ms_upn_san]),
+ critical=False,
+ )
+
+ builder = builder.add_extension(
+ x509.BasicConstraints(ca=False, path_length=None), critical=True,
+ )
+
+ # The key identifier is used to identify the certificate.
+ subject_key_id = x509.SubjectKeyIdentifier.from_public_key(public_key)
+ builder = builder.add_extension(
+ subject_key_id, critical=True,
+ )
+
+ # Add the key usages for which this certificate is valid. Windows
+ # doesn’t actually require this extension to be present.
+ builder = builder.add_extension(
+ # Heimdal requires that the certificate be valid for digital
+ # signatures.
+ x509.KeyUsage(digital_signature=True,
+ content_commitment=False,
+ key_encipherment=False,
+ data_encipherment=False,
+ key_agreement=False,
+ key_cert_sign=False,
+ crl_sign=False,
+ encipher_only=False,
+ decipher_only=False),
+ critical=True,
+ )
+
+ # Windows doesn’t require this extension to be present either; but if
+ # it is, Windows will not accept the certificate unless either client
+ # authentication or smartcard logon is specified, returning
+ # KDC_ERR_INCONSISTENT_KEY_PURPOSE otherwise.
+ builder = builder.add_extension(
+ x509.ExtendedKeyUsage([
+ x509.oid.ExtendedKeyUsageOID.CLIENT_AUTH,
+ ]),
+ critical=False,
+ )
+
+ # If the certificate predates (as ours does) the existence of the
+ # account that presents it Windows will refuse to accept it unless
+ # there exists a strong mapping from one to the other. This strong
+ # mapping will in this case take the form of a certificate extension
+ # described in [MS-WCCE] 2.2.2.7.7.4 (szOID_NTDS_CA_SECURITY_EXT) and
+ # containing the account’s SID.
+
+ # Encode this structure manually until we are able to produce the same
+ # ASN.1 encoding that Windows does.
+
+ encoded_sid = creds.get_sid().encode('utf-8')
+
+ # The OCTET STRING tag, followed by length and encoded SID…
+ security_ext = bytes([0x04]) + self.asn1_length(encoded_sid) + (
+ encoded_sid)
+
+ # …enclosed in a construct tagged with the application-specific value
+ # 0…
+ security_ext = bytes([0xa0]) + self.asn1_length(security_ext) + (
+ security_ext)
+
+ # …preceded by the extension OID…
+ encoded_oid = self.der_encode(krb5_asn1.szOID_NTDS_OBJECTSID,
+ univ.ObjectIdentifier())
+ security_ext = encoded_oid + security_ext
+
+ # …and another application-specific tag 0…
+ # (This is the part about which I’m unsure. This length is not just of
+ # the OID, but of the entire structure so far, as if there’s some
+ # nesting going on. So far I haven’t been able to replicate this with
+ # pyasn1.)
+ security_ext = bytes([0xa0]) + self.asn1_length(security_ext) + (
+ security_ext)
+
+ # …all enclosed in a structure with a SEQUENCE tag.
+ security_ext = bytes([0x30]) + self.asn1_length(security_ext) + (
+ security_ext)
+
+ # Add the security extension to the certificate.
+ builder = builder.add_extension(
+ x509.UnrecognizedExtension(
+ x509.ObjectIdentifier(
+ str(krb5_asn1.szOID_NTDS_CA_SECURITY_EXT)),
+ security_ext,
+ ),
+ critical=False,
+ )
+
+ # Sign the certificate with the CA’s private key. Windows accepts both
+ # SHA1 and SHA256 hashes.
+ certificate = builder.sign(
+ private_key=ca_private_key, algorithm=certificate_signature(),
+ backend=default_backend()
+ )
+
+ return certificate
+
+ def revoke_certificate(self, certificate,
+ ca_cert,
+ ca_private_key,
+ crl_signature=None):
+ if crl_signature is None:
+ crl_signature = hashes.SHA256
+
+ # Read the existing certificate revocation list.
+ crl_path = samba.tests.env_get_var_value('KRB5_CRL_FILE')
+ with open(crl_path, 'rb') as crl_file:
+ crl_data = crl_file.read()
+
+ try:
+ # Get the list of existing revoked certificates.
+ revoked_certs = x509.load_pem_x509_crl(crl_data, default_backend())
+ extensions = revoked_certs.extensions
+ except ValueError:
+ # We couldn’t parse the file. Let’s just create a new CRL from
+ # scratch.
+ revoked_certs = []
+ extensions = []
+
+ # Create a new CRL.
+ builder = x509.CertificateRevocationListBuilder()
+ builder = builder.issuer_name(ca_cert.issuer)
+ builder = builder.last_update(datetime.today())
+ one_day = timedelta(1, 0, 0)
+ builder = builder.next_update(datetime.today() + one_day)
+
+ # Add the existing revoked certificates.
+ for revoked_cert in revoked_certs:
+ builder = builder.add_revoked_certificate(revoked_cert)
+
+ # Add the serial number of the certificate that we’re revoking.
+ revoked_cert = x509.RevokedCertificateBuilder().serial_number(
+ certificate.serial_number
+ ).revocation_date(
+ datetime.today()
+ ).build(default_backend())
+ builder = builder.add_revoked_certificate(revoked_cert)
+
+ # Copy over any extensions from the existing certificate.
+ for extension in extensions:
+ builder = builder.add_extension(extension.value,
+ extension.critical)
+
+ # Sign the CRL with the CA’s private key.
+ crl = builder.sign(
+ private_key=ca_private_key, algorithm=crl_signature(),
+ backend=default_backend(),
+ )
+
+ # Write the CRL back out to the file.
+ crl_data = crl.public_bytes(serialization.Encoding.PEM)
+ with open(crl_path, 'wb') as crl_file:
+ crl_file.write(crl_data)
+
+ def _pkinit_req(self,
+ creds,
+ target_creds,
+ *,
+ certificate=None,
+ expect_error=0,
+ using_pkinit=PkInit.PUBLIC_KEY,
+ etypes=None,
+ pk_nonce=None,
+ supported_cms_types=None,
+ signature_algorithm=None,
+ certificate_signature=None,
+ freshness_token=None,
+ win2k_variant=False,
+ ):
+ self.assertIsNot(using_pkinit, PkInit.NOT_USED)
+
+ if signature_algorithm is None:
+ # This algorithm must be one of ‘sig_algs’ for it to be supported
+ # by Heimdal.
+ signature_algorithm = krb5_asn1.sha1WithRSAEncryption
+
+ signature_algorithm_id = self.AlgorithmIdentifier_create(
+ signature_algorithm)
+
+ if certificate is None:
+ ca_cert, ca_private_key = self.get_ca_cert_and_private_key()
+
+ # Create a certificate for the client signed by the CA.
+ certificate = self.create_certificate(creds,
+ ca_cert,
+ ca_private_key,
+ certificate_signature)
+
+ private_key = creds.get_private_key()
+
+ if using_pkinit is PkInit.DIFFIE_HELLMAN:
+ # This is the 2048-bit MODP Group from RFC 3526. Heimdal refers to
+ # it as “rfc3526-MODP-group14”.
+ p, g = 32317006071311007300338913926423828248817941241140239112842009751400741706634354222619689417363569347117901737909704191754605873209195028853758986185622153212175412514901774520270235796078236248884246189477587641105928646099411723245426622522193230540919037680524235519125679715870117001058055877651038861847280257976054903569732561526167081339361799541336476559160368317896729073178384589680639671900977202194168647225871031411336429319536193471636533209717077448227988588565369208645296636077250268955505928362751121174096972998068410554359584866583291642136218231078990999448652468262416972035911852507045361090559, 2
+
+ numbers = dh.DHParameterNumbers(p, g)
+ dh_params = numbers.parameters(default_backend())
+
+ dh_private_key = dh_params.generate_private_key()
+
+ preauth_key = dh_private_key
+ else:
+ preauth_key = private_key
+
+ if pk_nonce is None:
+ pk_nonce = self.get_Nonce()
+
+ def generate_pk_padata(_kdc_exchange_dict,
+ _callback_dict,
+ req_body):
+ if win2k_variant:
+ digest = None
+ else:
+ checksum_blob = self.der_encode(
+ req_body,
+ asn1Spec=krb5_asn1.KDC_REQ_BODY())
+
+ # Calculate the SHA1 checksum over the KDC-REQ-BODY. This checksum
+ # is required to be present in the authenticator, and must be SHA1.
+ digest = hashes.Hash(hashes.SHA1(), default_backend())
+ digest.update(checksum_blob)
+ digest = digest.finalize()
+
+ ctime, cusec = self.get_KerberosTimeWithUsec()
+
+ if win2k_variant:
+ krbtgt_sname = self.get_krbtgt_sname()
+ krbtgt_realm = self.get_krbtgt_creds().get_realm()
+ else:
+ krbtgt_sname = None
+ krbtgt_realm = None
+
+ # Create the authenticator, which shows that we had possession of
+ # the private key at some point.
+ authenticator_obj = self.PKAuthenticator_create(
+ cusec,
+ ctime,
+ pk_nonce,
+ pa_checksum=digest,
+ freshness_token=freshness_token,
+ kdc_name=krbtgt_sname,
+ kdc_realm=krbtgt_realm,
+ win2k_variant=win2k_variant)
+
+ if using_pkinit is PkInit.DIFFIE_HELLMAN:
+ dh_public_key = dh_private_key.public_key()
+
+ encoded_dh_public_key = dh_public_key.public_bytes(
+ serialization.Encoding.DER,
+ serialization.PublicFormat.SubjectPublicKeyInfo)
+ decoded_dh_public_key = self.der_decode(
+ encoded_dh_public_key,
+ asn1Spec=krb5_asn1.SubjectPublicKeyInfo())
+ dh_public_key_bitstring = decoded_dh_public_key[
+ 'subjectPublicKey']
+
+ # Encode the Diffie-Hellman parameters.
+ params = dh_params.parameter_bytes(
+ serialization.Encoding.DER,
+ serialization.ParameterFormat.PKCS3)
+
+ pk_algorithm = self.AlgorithmIdentifier_create(
+ krb5_asn1.dhpublicnumber,
+ parameters=params)
+
+ # Create the structure containing information about the public
+ # key of the certificate that we shall present.
+ client_public_value = self.SubjectPublicKeyInfo_create(
+ pk_algorithm,
+ dh_public_key_bitstring)
+ else:
+ client_public_value = None
+
+ # An optional set of algorithms supported by the client in
+ # decreasing order of preference. For whatever reason, if this
+ # field is missing or empty, Windows will respond with a slightly
+ # differently encoded ReplyKeyPack, wrapping it first in a
+ # ContentInfo structure.
+ nonlocal supported_cms_types
+ if win2k_variant:
+ self.assertIsNone(supported_cms_types)
+ elif supported_cms_types is False:
+ # Exclude this field.
+ supported_cms_types = None
+ elif supported_cms_types is None:
+ supported_cms_types = [
+ self.AlgorithmIdentifier_create(
+ krb5_asn1.id_pkcs1_sha256WithRSAEncryption),
+ ]
+
+ # The client may include this field if it wishes to reuse DH keys
+ # or allow the KDC to do so.
+ client_dh_nonce = None
+
+ auth_pack_obj = self.AuthPack_create(
+ authenticator_obj,
+ client_public_value=client_public_value,
+ supported_cms_types=supported_cms_types,
+ client_dh_nonce=client_dh_nonce,
+ win2k_variant=win2k_variant)
+
+ asn1_spec = (krb5_asn1.AuthPack_Win2k
+ if win2k_variant
+ else krb5_asn1.AuthPack)
+ auth_pack = self.der_encode(auth_pack_obj, asn1Spec=asn1_spec())
+
+ signature_hash = self.hash_from_algorithm(signature_algorithm)
+
+ pad = padding.PKCS1v15()
+ signed = private_key.sign(auth_pack,
+ padding=pad,
+ algorithm=signature_hash())
+
+ encap_content_info_obj = self.EncapsulatedContentInfo_create(
+ krb5_asn1.id_pkinit_authData, auth_pack)
+
+ subject_key_id = certificate.extensions.get_extension_for_oid(
+ x509.ExtensionOID.SUBJECT_KEY_IDENTIFIER)
+ signer_identifier = self.SignerIdentifier_create(
+ subject_key_id=subject_key_id.value.digest)
+
+ signer_info = self.SignerInfo_create(
+ signer_identifier,
+ signature_algorithm_id,
+ signature_algorithm_id,
+ signed,
+ signed_attrs=[
+ # Note: these attributes are optional.
+ krb5_asn1.id_pkinit_authData,
+ krb5_asn1.id_messageDigest,
+ ])
+
+ encoded_cert = certificate.public_bytes(serialization.Encoding.DER)
+ decoded_cert = self.der_decode(
+ encoded_cert, asn1Spec=krb5_asn1.CertificateChoices())
+
+ signed_auth_pack = self.SignedData_create(
+ [signature_algorithm_id],
+ encap_content_info_obj,
+ signer_infos=[signer_info],
+ certificates=[decoded_cert],
+ crls=None)
+
+ signed_auth_pack = self.der_encode(signed_auth_pack,
+ asn1Spec=krb5_asn1.SignedData())
+
+ pk_as_req = self.PK_AS_REQ_create(signed_auth_pack,
+ # This contains a list of CAs,
+ # trusted by the client, that can
+ # be used to certify the KDC.
+ trusted_certifiers=None,
+ kdc_pk_id=None,
+ win2k_variant=win2k_variant)
+
+ pa_type = (PADATA_PK_AS_REP_19
+ if win2k_variant
+ else PADATA_PK_AS_REQ)
+ padata = [self.PA_DATA_create(pa_type, pk_as_req)]
+
+ return padata, req_body
+
+ user_name = creds.get_username()
+ cname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=user_name.split('/'))
+
+ target_name = target_creds.get_username()
+ target_realm = target_creds.get_realm()
+
+ sname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=['host', target_name[:-1]])
+
+ if expect_error:
+ check_error_fn = self.generic_check_kdc_error
+ check_rep_fn = None
+
+ expected_sname = sname
+ else:
+ check_error_fn = None
+ check_rep_fn = self.generic_check_kdc_rep
+
+ expected_sname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=[target_name])
+
+ kdc_options = ('forwardable,'
+ 'renewable,'
+ 'canonicalize,'
+ 'renewable-ok')
+ kdc_options = krb5_asn1.KDCOptions(kdc_options)
+
+ ticket_decryption_key = self.TicketDecryptionKey_from_creds(
+ target_creds)
+
+ kdc_exchange_dict = self.as_exchange_dict(
+ creds=creds,
+ client_cert=certificate,
+ expected_crealm=creds.get_realm(),
+ expected_cname=cname,
+ expected_srealm=target_realm,
+ expected_sname=expected_sname,
+ expected_supported_etypes=target_creds.tgs_supported_enctypes,
+ ticket_decryption_key=ticket_decryption_key,
+ generate_padata_fn=generate_pk_padata,
+ check_error_fn=check_error_fn,
+ check_rep_fn=check_rep_fn,
+ check_kdc_private_fn=self.generic_check_kdc_private,
+ expected_error_mode=expect_error,
+ expected_salt=creds.get_salt(),
+ preauth_key=preauth_key,
+ kdc_options=str(kdc_options),
+ using_pkinit=using_pkinit,
+ pk_nonce=pk_nonce,
+ expect_edata=False)
+
+ till = self.get_KerberosTime(offset=36000)
+
+ if etypes is None:
+ etypes = kcrypto.Enctype.AES256, kcrypto.Enctype.RC4,
+
+ if using_pkinit is PkInit.PUBLIC_KEY:
+ # DES-EDE3-CBC is required for public-key PK-INIT to work on
+ # Windows.
+ etypes += DES_EDE3_CBC,
+
+ rep = self._generic_kdc_exchange(kdc_exchange_dict,
+ cname=cname,
+ realm=target_realm,
+ sname=sname,
+ till_time=till,
+ etypes=etypes)
+ if expect_error:
+ self.check_error_rep(rep, expect_error)
+ return None
+
+ self.check_as_reply(rep)
+ return kdc_exchange_dict['rep_ticket_creds']
+
+
+if __name__ == '__main__':
+ global_asn1_print = False
+ global_hexdump = False
+ import unittest
+ unittest.main()
diff --git a/python/samba/tests/krb5/protected_users_tests.py b/python/samba/tests/krb5/protected_users_tests.py
new file mode 100755
index 0000000..fee78ab
--- /dev/null
+++ b/python/samba/tests/krb5/protected_users_tests.py
@@ -0,0 +1,1053 @@
+#!/usr/bin/env python3
+# Unix SMB/CIFS implementation.
+# Copyright (C) Stefan Metzmacher 2020
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+import os
+
+sys.path.insert(0, 'bin/python')
+os.environ['PYTHONUNBUFFERED'] = '1'
+
+from functools import partial
+
+import ldb
+
+from samba import generate_random_password, ntstatus
+from samba.dcerpc import netlogon, security
+from samba.hresult import HRES_SEC_E_LOGON_DENIED
+
+import samba.tests.krb5.kcrypto as kcrypto
+from samba.tests.krb5.kdc_base_test import KDCBaseTest
+from samba.tests.krb5.rfc4120_constants import (
+ AES128_CTS_HMAC_SHA1_96,
+ AES256_CTS_HMAC_SHA1_96,
+ ARCFOUR_HMAC_MD5,
+ DES3_CBC_MD5,
+ DES3_CBC_SHA1,
+ DES_CBC_CRC,
+ DES_CBC_MD5,
+ KDC_ERR_ETYPE_NOSUPP,
+ KDC_ERR_POLICY,
+ KDC_ERR_PREAUTH_REQUIRED,
+ KRB_ERROR,
+ NT_PRINCIPAL,
+ NT_SRV_INST,
+)
+import samba.tests.krb5.rfc4120_pyasn1 as krb5_asn1
+
+global_asn1_print = False
+global_hexdump = False
+
+
+class ProtectedUsersTests(KDCBaseTest):
+ def setUp(self):
+ super().setUp()
+ self.do_asn1_print = global_asn1_print
+ self.do_hexdump = global_hexdump
+
+ samdb = self.get_samdb()
+
+ # Get the old ‘minPwdAge’.
+ minPwdAge = samdb.get_minPwdAge()
+
+ # Reset the ‘minPwdAge’ as it was before.
+ self.addCleanup(samdb.set_minPwdAge, minPwdAge)
+
+ # Set it temporarily to ‘0’.
+ samdb.set_minPwdAge('0')
+
+ # Get account credentials for testing.
+ def _get_creds(self,
+ protected,
+ account_type=KDCBaseTest.AccountType.USER,
+ ntlm=False,
+ member_of=None,
+ supported_enctypes=None,
+ cached=True):
+ opts = {
+ 'kerberos_enabled': not ntlm,
+ }
+ members = ()
+ if protected:
+ samdb = self.get_samdb()
+ protected_users_group = (f'<SID={samdb.get_domain_sid()}-'
+ f'{security.DOMAIN_RID_PROTECTED_USERS}>')
+ members += (protected_users_group,)
+ if member_of is not None:
+ members += (member_of,)
+
+ if members:
+ opts['member_of'] = members
+ if supported_enctypes is not None:
+ opts['supported_enctypes'] = supported_enctypes
+
+ return self.get_cached_creds(account_type=account_type,
+ opts=opts,
+ use_cache=cached)
+
+ # Test NTLM authentication with a normal account. Authentication should
+ # succeed.
+ def test_ntlm_not_protected(self):
+ client_creds = self._get_creds(protected=False,
+ ntlm=True,
+ cached=False)
+
+ self._connect(client_creds, simple_bind=False)
+
+ # Test NTLM authentication with a protected account. Authentication should
+ # fail, as Protected User accounts cannot use NTLM authentication.
+ def test_ntlm_protected(self):
+ client_creds = self._get_creds(protected=True,
+ ntlm=True,
+ cached=False)
+
+ self._connect(client_creds, simple_bind=False,
+ expect_error=f'{HRES_SEC_E_LOGON_DENIED:08X}')
+
+ # Test that the Protected Users restrictions still apply when the user is a
+ # member of a group that is itself a member of Protected Users.
+ def test_ntlm_protected_nested(self):
+ samdb = self.get_samdb()
+ group_name = self.get_new_username()
+ group_dn = self.create_group(samdb, group_name)
+
+ protected_users_group = (f'<SID={samdb.get_domain_sid()}-'
+ f'{security.DOMAIN_RID_PROTECTED_USERS}>')
+ self.add_to_group(group_dn, ldb.Dn(samdb, protected_users_group),
+ 'member', expect_attr=False)
+
+ client_creds = self._get_creds(protected=False,
+ ntlm=True,
+ member_of=group_dn)
+
+ self._connect(client_creds, simple_bind=False,
+ expect_error=f'{HRES_SEC_E_LOGON_DENIED:08X}')
+
+ # Test SAMR password changes for unprotected and protected accounts.
+ def test_samr_change_password_not_protected(self):
+ # Use a non-cached account so that it is not locked out for other
+ # tests.
+ client_creds = self._get_creds(protected=False,
+ cached=False)
+
+ self._test_samr_change_password(
+ client_creds,
+ expect_error=None)
+
+ def test_samr_change_password_protected(self):
+ # Use a non-cached account so that it is not locked out for other
+ # tests.
+ client_creds = self._get_creds(protected=True,
+ cached=False)
+
+ self._test_samr_change_password(
+ client_creds,
+ expect_error=ntstatus.NT_STATUS_ACCOUNT_RESTRICTION)
+
+ # Test interactive SamLogon with an unprotected account.
+ def test_samlogon_interactive_not_protected(self):
+ client_creds = self._get_creds(protected=False,
+ ntlm=True)
+ self._test_samlogon(creds=client_creds,
+ logon_type=netlogon.NetlogonInteractiveInformation)
+
+ # Test interactive SamLogon with a protected account.
+ def test_samlogon_interactive_protected(self):
+ client_creds = self._get_creds(protected=True,
+ ntlm=True)
+ self._test_samlogon(
+ creds=client_creds,
+ logon_type=netlogon.NetlogonInteractiveInformation,
+ expect_error=ntstatus.NT_STATUS_ACCOUNT_RESTRICTION)
+
+ # Test network SamLogon with an unprotected account.
+ def test_samlogon_network_not_protected(self):
+ client_creds = self._get_creds(protected=False,
+ ntlm=True)
+ self._test_samlogon(creds=client_creds,
+ logon_type=netlogon.NetlogonNetworkInformation)
+
+ # Test network SamLogon with a protected account.
+ def test_samlogon_network_protected(self):
+ client_creds = self._get_creds(protected=True,
+ ntlm=True)
+ self._test_samlogon(
+ creds=client_creds,
+ logon_type=netlogon.NetlogonNetworkInformation,
+ expect_error=ntstatus.NT_STATUS_ACCOUNT_RESTRICTION)
+
+ # Test that changing the password of an account in the Protected Users
+ # group still generates an NT hash.
+ def test_protected_nt_hash(self):
+ # Use a non-cached account, as we are changing the password.
+ client_creds = self._get_creds(protected=True,
+ cached=False)
+ client_dn = client_creds.get_dn()
+
+ new_password = generate_random_password(32, 32)
+ utf16pw = f'"{new_password}"'.encode('utf-16-le')
+
+ samdb = self.get_samdb()
+ msg = ldb.Message(client_dn)
+ msg['unicodePwd'] = ldb.MessageElement(utf16pw,
+ ldb.FLAG_MOD_REPLACE,
+ 'unicodePwd')
+ samdb.modify(msg)
+
+ client_creds.set_password(new_password)
+
+ expected_etypes = {
+ kcrypto.Enctype.AES256,
+ kcrypto.Enctype.AES128,
+ }
+ if self.expect_nt_hash:
+ expected_etypes.add(kcrypto.Enctype.RC4)
+
+ self.get_keys(client_creds,
+ expected_etypes=expected_etypes)
+
+ # Test that DES-CBC-CRC cannot be used whether or not the user is
+ # protected.
+ def test_des_cbc_crc_not_protected(self):
+ client_creds = self._get_creds(protected=False)
+
+ self._test_etype(client_creds, etype=DES_CBC_CRC,
+ expect_error=True)
+
+ def test_des_cbc_crc_protected(self):
+ client_creds = self._get_creds(protected=True)
+
+ self._test_etype(client_creds, etype=DES_CBC_CRC,
+ expect_error=True, rc4_support=False)
+
+ # Test that DES-CBC-MD5 cannot be used whether or not the user is
+ # protected.
+ def test_des_cbc_md5_not_protected(self):
+ client_creds = self._get_creds(protected=False)
+
+ self._test_etype(client_creds, etype=DES_CBC_MD5,
+ expect_error=True)
+
+ def test_des_cbc_md5_protected(self):
+ client_creds = self._get_creds(protected=True)
+
+ self._test_etype(client_creds, etype=DES_CBC_MD5,
+ expect_error=True, rc4_support=False)
+
+ # Test that DES3-CBC-MD5 cannot be used whether or not the user is
+ # protected.
+ def test_des3_cbc_md5_not_protected(self):
+ client_creds = self._get_creds(protected=False)
+
+ self._test_etype(client_creds, etype=DES3_CBC_MD5,
+ expect_error=True)
+
+ def test_des3_cbc_md5_protected(self):
+ client_creds = self._get_creds(protected=True)
+
+ self._test_etype(client_creds, etype=DES3_CBC_MD5,
+ expect_error=True, rc4_support=False)
+
+ # Test that DES3-CBC-SHA1 cannot be used whether or not the user is
+ # protected.
+ def test_des3_cbc_sha1_not_protected(self):
+ client_creds = self._get_creds(protected=False)
+
+ self._test_etype(client_creds, etype=DES3_CBC_SHA1,
+ expect_error=True)
+
+ def test_des3_cbc_sha1_protected(self):
+ client_creds = self._get_creds(protected=True)
+
+ self._test_etype(client_creds, etype=DES3_CBC_SHA1,
+ expect_error=True, rc4_support=False)
+
+ # Test that RC4 may only be used if the user is not protected.
+ def test_rc4_not_protected(self):
+ client_creds = self._get_creds(protected=False)
+
+ self._test_etype(client_creds, etype=ARCFOUR_HMAC_MD5)
+
+ def test_rc4_protected_aes256_preauth(self):
+ client_creds = self._get_creds(protected=True)
+
+ self._test_etype(client_creds, etype=ARCFOUR_HMAC_MD5,
+ preauth_etype=AES256_CTS_HMAC_SHA1_96,
+ rc4_support=False)
+
+ def test_rc4_protected_rc4_preauth(self):
+ client_creds = self._get_creds(protected=True)
+
+ self._test_etype(client_creds, etype=ARCFOUR_HMAC_MD5,
+ preauth_etype=ARCFOUR_HMAC_MD5,
+ expect_error=True, rc4_support=False,
+ expect_edata=False)
+
+ # Test that AES256 can always be used.
+ def test_aes256_not_protected(self):
+ client_creds = self._get_creds(protected=False)
+
+ self._test_etype(client_creds, etype=AES256_CTS_HMAC_SHA1_96)
+
+ def test_aes256_protected(self):
+ client_creds = self._get_creds(protected=True)
+
+ self._test_etype(client_creds, etype=AES256_CTS_HMAC_SHA1_96,
+ rc4_support=False)
+
+ def test_aes256_rc4_not_protected(self):
+ client_creds = self._get_creds(protected=False)
+
+ self._test_etype(client_creds, etype=(AES256_CTS_HMAC_SHA1_96,
+ ARCFOUR_HMAC_MD5))
+
+ def test_aes256_rc4_protected(self):
+ client_creds = self._get_creds(protected=True)
+
+ self._test_etype(client_creds, etype=(AES256_CTS_HMAC_SHA1_96,
+ ARCFOUR_HMAC_MD5),
+ rc4_support=False)
+
+ def test_rc4_aes256_not_protected(self):
+ client_creds = self._get_creds(protected=False)
+
+ self._test_etype(client_creds, etype=(ARCFOUR_HMAC_MD5,
+ AES256_CTS_HMAC_SHA1_96))
+
+ def test_rc4_aes256_protected(self):
+ client_creds = self._get_creds(protected=True)
+
+ self._test_etype(client_creds, etype=(ARCFOUR_HMAC_MD5,
+ AES256_CTS_HMAC_SHA1_96),
+ rc4_support=False)
+
+ # Test that AES128 can always be used.
+ def test_aes128_not_protected(self):
+ client_creds = self._get_creds(protected=False)
+
+ self._test_etype(client_creds, etype=AES128_CTS_HMAC_SHA1_96)
+
+ def test_aes128_protected(self):
+ client_creds = self._get_creds(protected=True)
+
+ self._test_etype(client_creds, etype=AES128_CTS_HMAC_SHA1_96,
+ rc4_support=False)
+
+ def test_aes128_rc4_not_protected(self):
+ client_creds = self._get_creds(protected=False)
+
+ self._test_etype(client_creds, etype=(AES128_CTS_HMAC_SHA1_96,
+ ARCFOUR_HMAC_MD5))
+
+ def test_aes128_rc4_protected(self):
+ client_creds = self._get_creds(protected=True)
+
+ self._test_etype(client_creds, etype=(AES128_CTS_HMAC_SHA1_96,
+ ARCFOUR_HMAC_MD5),
+ rc4_support=False)
+
+ def test_rc4_aes128_not_protected(self):
+ client_creds = self._get_creds(protected=False)
+
+ self._test_etype(client_creds, etype=(ARCFOUR_HMAC_MD5,
+ AES128_CTS_HMAC_SHA1_96))
+
+ def test_rc4_aes128_protected(self):
+ client_creds = self._get_creds(protected=True)
+
+ self._test_etype(client_creds, etype=(ARCFOUR_HMAC_MD5,
+ AES128_CTS_HMAC_SHA1_96),
+ rc4_support=False)
+
+ # Test also with computer accounts.
+ def test_rc4_mac_not_protected(self):
+ client_creds = self._get_creds(
+ protected=False,
+ account_type=self.AccountType.COMPUTER)
+
+ self._test_etype(client_creds, etype=ARCFOUR_HMAC_MD5)
+
+ def test_rc4_mac_protected_aes256_preauth(self):
+ client_creds = self._get_creds(
+ protected=True,
+ account_type=self.AccountType.COMPUTER)
+
+ self._test_etype(client_creds, etype=ARCFOUR_HMAC_MD5,
+ preauth_etype=AES256_CTS_HMAC_SHA1_96,
+ rc4_support=False)
+
+ def test_rc4_mac_protected_rc4_preauth(self):
+ client_creds = self._get_creds(
+ protected=True,
+ account_type=self.AccountType.COMPUTER)
+
+ self._test_etype(client_creds, etype=ARCFOUR_HMAC_MD5,
+ preauth_etype=ARCFOUR_HMAC_MD5,
+ expect_error=True, rc4_support=False,
+ expect_edata=False)
+
+ def test_aes256_rc4_mac_not_protected(self):
+ client_creds = self._get_creds(
+ protected=False,
+ account_type=self.AccountType.COMPUTER)
+
+ self._test_etype(client_creds, etype=(AES256_CTS_HMAC_SHA1_96,
+ ARCFOUR_HMAC_MD5))
+
+ def test_aes256_rc4_mac_protected(self):
+ client_creds = self._get_creds(
+ protected=True,
+ account_type=self.AccountType.COMPUTER)
+
+ self._test_etype(client_creds, etype=(AES256_CTS_HMAC_SHA1_96,
+ ARCFOUR_HMAC_MD5),
+ rc4_support=False)
+
+ def test_rc4_aes256_mac_not_protected(self):
+ client_creds = self._get_creds(
+ protected=False,
+ account_type=self.AccountType.COMPUTER)
+
+ self._test_etype(client_creds, etype=(ARCFOUR_HMAC_MD5,
+ AES256_CTS_HMAC_SHA1_96))
+
+ def test_rc4_aes256_mac_protected(self):
+ client_creds = self._get_creds(
+ protected=True,
+ account_type=self.AccountType.COMPUTER)
+
+ self._test_etype(client_creds, etype=(ARCFOUR_HMAC_MD5,
+ AES256_CTS_HMAC_SHA1_96),
+ rc4_support=False)
+
+ def test_aes128_rc4_mac_not_protected(self):
+ client_creds = self._get_creds(
+ protected=False,
+ account_type=self.AccountType.COMPUTER)
+
+ self._test_etype(client_creds, etype=(AES128_CTS_HMAC_SHA1_96,
+ ARCFOUR_HMAC_MD5))
+
+ def test_aes128_rc4_mac_protected(self):
+ client_creds = self._get_creds(
+ protected=True,
+ account_type=self.AccountType.COMPUTER)
+
+ self._test_etype(client_creds, etype=(AES128_CTS_HMAC_SHA1_96,
+ ARCFOUR_HMAC_MD5),
+ rc4_support=False)
+
+ def test_rc4_aes128_mac_not_protected(self):
+ client_creds = self._get_creds(
+ protected=False,
+ account_type=self.AccountType.COMPUTER)
+
+ self._test_etype(client_creds, etype=(ARCFOUR_HMAC_MD5,
+ AES128_CTS_HMAC_SHA1_96))
+
+ def test_rc4_aes128_mac_protected(self):
+ client_creds = self._get_creds(
+ protected=True,
+ account_type=self.AccountType.COMPUTER)
+
+ self._test_etype(client_creds, etype=(ARCFOUR_HMAC_MD5,
+ AES128_CTS_HMAC_SHA1_96),
+ rc4_support=False)
+
+ # Test that RC4 can only be used as a preauth etype if the user is not
+ # protected.
+ def test_ts_rc4_not_protected(self):
+ client_creds = self._get_creds(protected=False)
+
+ self._test_etype(client_creds, preauth_etype=ARCFOUR_HMAC_MD5)
+
+ def test_ts_rc4_protected(self):
+ client_creds = self._get_creds(protected=True)
+
+ self._test_etype(client_creds, preauth_etype=ARCFOUR_HMAC_MD5,
+ expect_error=True, rc4_support=False,
+ expect_edata=False)
+
+ # Test that the etype restrictions still apply if the user is a member of a
+ # group that is itself in the Protected Users group.
+ def test_ts_rc4_protected_nested(self):
+ samdb = self.get_samdb()
+ group_name = self.get_new_username()
+ group_dn = self.create_group(samdb, group_name)
+
+ protected_users_group = (f'<SID={samdb.get_domain_sid()}-'
+ f'{security.DOMAIN_RID_PROTECTED_USERS}>')
+ self.add_to_group(group_dn, ldb.Dn(samdb, protected_users_group),
+ 'member', expect_attr=False)
+
+ client_creds = self._get_creds(protected=False,
+ member_of=group_dn)
+
+ self._test_etype(client_creds, preauth_etype=ARCFOUR_HMAC_MD5,
+ expect_error=True, rc4_support=False,
+ expect_edata=False)
+
+ # Test that AES256 can always be used as a preauth etype.
+ def test_ts_aes256_not_protected(self):
+ client_creds = self._get_creds(protected=False)
+
+ self._test_etype(client_creds, preauth_etype=AES256_CTS_HMAC_SHA1_96)
+
+ def test_ts_aes256_protected(self):
+ client_creds = self._get_creds(protected=True)
+
+ self._test_etype(client_creds, preauth_etype=AES256_CTS_HMAC_SHA1_96,
+ rc4_support=False)
+
+ # Test that AES128 can always be used as a preauth etype.
+ def test_ts_aes128_not_protected(self):
+ client_creds = self._get_creds(protected=False)
+
+ self._test_etype(client_creds, preauth_etype=AES128_CTS_HMAC_SHA1_96)
+
+ def test_ts_aes128_protected(self):
+ client_creds = self._get_creds(protected=True)
+
+ self._test_etype(client_creds, preauth_etype=AES128_CTS_HMAC_SHA1_96,
+ rc4_support=False)
+
+ # Test also with machine accounts.
+ def test_ts_rc4_mac_not_protected(self):
+ client_creds = self._get_creds(
+ protected=False,
+ account_type=self.AccountType.COMPUTER)
+
+ self._test_etype(client_creds, preauth_etype=ARCFOUR_HMAC_MD5)
+
+ def test_ts_rc4_mac_protected(self):
+ client_creds = self._get_creds(
+ protected=True,
+ account_type=self.AccountType.COMPUTER)
+
+ self._test_etype(client_creds, preauth_etype=ARCFOUR_HMAC_MD5,
+ expect_error=True, rc4_support=False,
+ expect_edata=False)
+
+ def test_ts_aes256_mac_not_protected(self):
+ client_creds = self._get_creds(
+ protected=False,
+ account_type=self.AccountType.COMPUTER)
+
+ self._test_etype(client_creds, preauth_etype=AES256_CTS_HMAC_SHA1_96)
+
+ def test_ts_aes256_mac_protected(self):
+ client_creds = self._get_creds(
+ protected=True,
+ account_type=self.AccountType.COMPUTER)
+
+ self._test_etype(client_creds, preauth_etype=AES256_CTS_HMAC_SHA1_96,
+ rc4_support=False)
+
+ def test_ts_aes128_mac_not_protected(self):
+ client_creds = self._get_creds(
+ protected=False,
+ account_type=self.AccountType.COMPUTER)
+
+ self._test_etype(client_creds, preauth_etype=AES128_CTS_HMAC_SHA1_96)
+
+ def test_ts_aes128_mac_protected(self):
+ client_creds = self._get_creds(
+ protected=True,
+ account_type=self.AccountType.COMPUTER)
+
+ self._test_etype(client_creds, preauth_etype=AES128_CTS_HMAC_SHA1_96,
+ rc4_support=False)
+
+ # Test that the restrictions do not apply to accounts acting as services,
+ # and that RC4 service tickets can still be obtained.
+ def test_service_rc4_only_not_protected(self):
+ client_creds = self.get_client_creds()
+ service_creds = self._get_creds(protected=False,
+ account_type=self.AccountType.COMPUTER,
+ supported_enctypes=kcrypto.Enctype.RC4)
+ tgt = self.get_tgt(client_creds)
+ self.get_service_ticket(tgt, service_creds)
+
+ def test_service_rc4_only_protected(self):
+ client_creds = self.get_client_creds()
+ service_creds = self._get_creds(protected=True,
+ account_type=self.AccountType.COMPUTER,
+ supported_enctypes=kcrypto.Enctype.RC4)
+ tgt = self.get_tgt(client_creds)
+ self.get_service_ticket(tgt, service_creds)
+
+ # Test that requesting a ticket with a short lifetime results in a ticket
+ # with that lifetime.
+ def test_tgt_lifetime_shorter_not_protected(self):
+ client_creds = self._get_creds(protected=False)
+
+ till = self.get_KerberosTime(offset=2 * 60 * 60) # 2 hours
+ tgt = self._test_etype(client_creds,
+ preauth_etype=AES256_CTS_HMAC_SHA1_96,
+ till=till)
+ self.check_ticket_times(tgt, expected_end=till)
+
+ def test_tgt_lifetime_shorter_protected(self):
+ client_creds = self._get_creds(protected=True)
+
+ till = self.get_KerberosTime(offset=2 * 60 * 60) # 2 hours
+ tgt = self._test_etype(client_creds,
+ preauth_etype=AES256_CTS_HMAC_SHA1_96,
+ till=till, rc4_support=False)
+
+ self.check_ticket_times(tgt, expected_end=till,
+ expected_renew_time=till)
+
+ # Test that requesting a ticket with a long lifetime produces a ticket with
+ # that lifetime, unless the user is protected, whereupon the lifetime will
+ # be capped at four hours.
+ def test_tgt_lifetime_longer_not_protected(self):
+ client_creds = self._get_creds(protected=False)
+
+ till = self.get_KerberosTime(offset=6 * 60 * 60) # 6 hours
+ tgt = self._test_etype(client_creds,
+ preauth_etype=AES256_CTS_HMAC_SHA1_96,
+ till=till)
+ self.check_ticket_times(tgt, expected_end=till)
+
+ def test_tgt_lifetime_longer_protected(self):
+ client_creds = self._get_creds(protected=True)
+
+ till = self.get_KerberosTime(offset=6 * 60 * 60) # 6 hours
+ tgt = self._test_etype(client_creds,
+ preauth_etype=AES256_CTS_HMAC_SHA1_96,
+ till=till, rc4_support=False)
+
+ expected_life = 4 * 60 * 60 # 4 hours
+ self.check_ticket_times(tgt, expected_life=expected_life,
+ expected_renew_life=expected_life)
+
+ # Test that the lifetime of a service ticket is capped to the lifetime of
+ # the TGT.
+ def test_ticket_lifetime_not_protected(self):
+ client_creds = self._get_creds(protected=False)
+
+ till = self.get_KerberosTime(offset=2 * 60 * 60) # 2 hours
+ tgt = self._test_etype(
+ client_creds, preauth_etype=AES256_CTS_HMAC_SHA1_96, till=till)
+ self.check_ticket_times(tgt, expected_end=till)
+
+ service_creds = self.get_service_creds()
+ till2 = self.get_KerberosTime(offset=10 * 60 * 60) # 10 hours
+ ticket = self.get_service_ticket(tgt, service_creds, till=till2)
+
+ self.check_ticket_times(ticket, expected_end=till)
+
+ def test_ticket_lifetime_protected(self):
+ client_creds = self._get_creds(protected=True)
+
+ till = self.get_KerberosTime(offset=2 * 60 * 60) # 2 hours
+ tgt = self._test_etype(
+ client_creds, preauth_etype=AES256_CTS_HMAC_SHA1_96, till=till,
+ rc4_support=False)
+
+ self.check_ticket_times(tgt, expected_end=till,
+ expected_renew_time=till)
+
+ service_creds = self.get_service_creds()
+ till2 = self.get_KerberosTime(offset=10 * 60 * 60) # 10 hours
+ ticket = self.get_service_ticket(tgt, service_creds, till=till2)
+
+ self.check_ticket_times(ticket, expected_end=till)
+
+ # Test that a request for a forwardable ticket will only be fulfilled if
+ # the user is not protected.
+ def test_forwardable_as_not_protected(self):
+ client_creds = self._get_creds(protected=False)
+
+ self._get_tgt_check_flags(client_creds, kdc_options='forwardable',
+ expected_flags='forwardable')
+
+ def test_forwardable_as_protected(self):
+ client_creds = self._get_creds(protected=True)
+
+ self._get_tgt_check_flags(client_creds, kdc_options='forwardable',
+ unexpected_flags='forwardable',
+ rc4_support=False)
+
+ # Test that a request for a proxiable ticket will only be fulfilled if the
+ # user is not protected.
+ def test_proxiable_as_not_protected(self):
+ client_creds = self._get_creds(protected=False)
+
+ self._get_tgt_check_flags(client_creds, kdc_options='proxiable',
+ expected_flags='proxiable')
+
+ def test_proxiable_as_protected(self):
+ client_creds = self._get_creds(protected=True)
+
+ self._get_tgt_check_flags(client_creds, kdc_options='proxiable',
+ unexpected_flags='proxiable',
+ rc4_support=False)
+
+ # An alternate test for Protected Users that passes if we get a policy
+ # error rather than a ticket that is not proxiable.
+ def test_proxiable_as_protected_policy_error(self):
+ client_creds = self._get_creds(protected=True)
+
+ self._get_tgt_check_flags(client_creds, kdc_options='proxiable',
+ unexpected_flags='proxiable',
+ rc4_support=False, expect_error=True)
+
+ # Test that if we have a forwardable TGT, then we can use it to obtain a
+ # forwardable service ticket, whether or not the account is protected.
+ def test_forwardable_tgs_not_protected(self):
+ client_creds = self._get_creds(protected=False)
+
+ tgt = self.get_tgt(client_creds)
+ tgt = self.modified_ticket(
+ tgt,
+ modify_fn=partial(self.modify_ticket_flag, flag='forwardable',
+ value=True),
+ checksum_keys=self.get_krbtgt_checksum_key())
+
+ service_creds = self.get_service_creds()
+ self.get_service_ticket(
+ tgt, service_creds, kdc_options='forwardable',
+ expected_flags=krb5_asn1.TicketFlags('forwardable'))
+
+ def test_forwardable_tgs_protected(self):
+ client_creds = self._get_creds(protected=True)
+
+ tgt = self.get_tgt(client_creds, rc4_support=False)
+ tgt = self.modified_ticket(
+ tgt,
+ modify_fn=partial(self.modify_ticket_flag, flag='forwardable',
+ value=True),
+ checksum_keys=self.get_krbtgt_checksum_key())
+
+ service_creds = self.get_service_creds()
+ self.get_service_ticket(
+ tgt, service_creds, kdc_options='forwardable',
+ expected_flags=krb5_asn1.TicketFlags('forwardable'),
+ rc4_support=False)
+
+ # Test that if we have a proxiable TGT, then we can use it to obtain a
+ # forwardable service ticket, whether or not the account is protected.
+ def test_proxiable_tgs_not_protected(self):
+ client_creds = self._get_creds(protected=False)
+
+ tgt = self.get_tgt(client_creds)
+ tgt = self.modified_ticket(
+ tgt,
+ modify_fn=partial(self.modify_ticket_flag, flag='proxiable',
+ value=True),
+ checksum_keys=self.get_krbtgt_checksum_key())
+
+ service_creds = self.get_service_creds()
+ self.get_service_ticket(
+ tgt, service_creds, kdc_options='proxiable',
+ expected_flags=krb5_asn1.TicketFlags('proxiable'))
+
+ def test_proxiable_tgs_protected(self):
+ client_creds = self._get_creds(protected=True)
+
+ tgt = self.get_tgt(client_creds, rc4_support=False)
+ tgt = self.modified_ticket(
+ tgt,
+ modify_fn=partial(self.modify_ticket_flag, flag='proxiable',
+ value=True),
+ checksum_keys=self.get_krbtgt_checksum_key())
+
+ service_creds = self.get_service_creds()
+ self.get_service_ticket(
+ tgt, service_creds, kdc_options='proxiable',
+ expected_flags=krb5_asn1.TicketFlags('proxiable'),
+ rc4_support=False)
+
+ def check_ticket_times(self,
+ ticket_creds,
+ expected_end=None,
+ expected_life=None,
+ expected_renew_time=None,
+ expected_renew_life=None):
+ ticket = ticket_creds.ticket_private
+
+ authtime = ticket['authtime']
+ starttime = ticket.get('starttime', authtime)
+ endtime = ticket['endtime']
+ renew_till = ticket.get('renew-till', None)
+
+ starttime = self.get_EpochFromKerberosTime(starttime)
+
+ if expected_end is None:
+ self.assertIsNotNone(expected_life,
+ 'did not supply expected endtime or lifetime')
+
+ expected_end = self.get_KerberosTime(epoch=starttime,
+ offset=expected_life)
+ else:
+ self.assertIsNone(expected_life,
+ 'supplied both expected endtime and lifetime')
+
+ self.assertEqual(expected_end, endtime.decode('ascii'))
+
+ if renew_till is None:
+ self.assertIsNone(expected_renew_time)
+ self.assertIsNone(expected_renew_life)
+ else:
+ if expected_renew_life is not None:
+ self.assertIsNone(
+ expected_renew_time,
+ 'supplied both expected renew time and lifetime')
+
+ expected_renew_time = self.get_KerberosTime(
+ epoch=starttime, offset=expected_renew_life)
+
+ if expected_renew_time is not None:
+ self.assertEqual(expected_renew_time,
+ renew_till.decode('ascii'))
+
+ def _test_etype(self,
+ creds,
+ expect_error=False,
+ etype=None,
+ preauth_etype=None,
+ till=None,
+ rc4_support=True,
+ expect_edata=None):
+ if etype is None:
+ etype = (AES256_CTS_HMAC_SHA1_96, ARCFOUR_HMAC_MD5)
+ elif isinstance(etype, int):
+ etype = (etype,)
+
+ user_name = creds.get_username()
+ realm = creds.get_realm()
+ salt = creds.get_salt()
+
+ cname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=user_name.split('/'))
+ sname = self.PrincipalName_create(name_type=NT_SRV_INST,
+ names=['krbtgt', realm])
+ expected_sname = self.PrincipalName_create(
+ name_type=NT_SRV_INST, names=['krbtgt', realm.upper()])
+
+ expected_cname = cname
+
+ if till is None:
+ till = self.get_KerberosTime(offset=36000)
+
+ renew_time = till
+
+ krbtgt_creds = self.get_krbtgt_creds()
+ ticket_decryption_key = (
+ self.TicketDecryptionKey_from_creds(krbtgt_creds))
+
+ expected_etypes = krbtgt_creds.tgs_supported_enctypes
+
+ kdc_options = krb5_asn1.KDCOptions('renewable')
+ expected_flags = krb5_asn1.TicketFlags('renewable')
+
+ expected_error = KDC_ERR_ETYPE_NOSUPP if expect_error else 0
+
+ if preauth_etype is None:
+ if expected_error:
+ expected_error_mode = KDC_ERR_PREAUTH_REQUIRED, expected_error
+ else:
+ expected_error_mode = KDC_ERR_PREAUTH_REQUIRED
+
+ rep, kdc_exchange_dict = self._test_as_exchange(
+ creds=creds,
+ cname=cname,
+ realm=realm,
+ sname=sname,
+ till=till,
+ renew_time=renew_time,
+ expected_error_mode=expected_error_mode,
+ expected_crealm=realm,
+ expected_cname=expected_cname,
+ expected_srealm=realm,
+ expected_sname=sname,
+ expected_salt=salt,
+ expected_flags=expected_flags,
+ expected_supported_etypes=expected_etypes,
+ etypes=etype,
+ padata=None,
+ kdc_options=kdc_options,
+ ticket_decryption_key=ticket_decryption_key,
+ rc4_support=rc4_support,
+ expect_edata=expect_edata)
+ self.assertIsNotNone(rep)
+ self.assertEqual(KRB_ERROR, rep['msg-type'])
+ error_code = rep['error-code']
+ if expected_error:
+ self.assertIn(error_code, expected_error_mode)
+ if error_code == expected_error:
+ return
+ else:
+ self.assertEqual(expected_error_mode, error_code)
+
+ etype_info2 = kdc_exchange_dict['preauth_etype_info2']
+
+ preauth_key = self.PasswordKey_from_etype_info2(creds,
+ etype_info2[0],
+ creds.get_kvno())
+ else:
+ preauth_key = self.PasswordKey_from_creds(creds, preauth_etype)
+
+ ts_enc_padata = self.get_enc_timestamp_pa_data_from_key(preauth_key)
+ padata = [ts_enc_padata]
+
+ expected_realm = realm.upper()
+
+ rep, kdc_exchange_dict = self._test_as_exchange(
+ creds=creds,
+ cname=cname,
+ realm=realm,
+ sname=sname,
+ till=till,
+ renew_time=renew_time,
+ expected_error_mode=expected_error,
+ expected_crealm=expected_realm,
+ expected_cname=expected_cname,
+ expected_srealm=expected_realm,
+ expected_sname=expected_sname,
+ expected_salt=salt,
+ expected_flags=expected_flags,
+ expected_supported_etypes=expected_etypes,
+ etypes=etype,
+ padata=padata,
+ kdc_options=kdc_options,
+ preauth_key=preauth_key,
+ ticket_decryption_key=ticket_decryption_key,
+ rc4_support=rc4_support,
+ expect_edata=expect_edata)
+ if expect_error:
+ self.check_error_rep(rep, expected_error)
+
+ return None
+
+ self.check_as_reply(rep)
+
+ ticket_creds = kdc_exchange_dict['rep_ticket_creds']
+ return ticket_creds
+
+ def _get_tgt_check_flags(self,
+ creds,
+ kdc_options,
+ rc4_support=True,
+ expect_error=False,
+ expected_flags=None,
+ unexpected_flags=None):
+ user_name = creds.get_username()
+
+ realm = creds.get_realm()
+
+ salt = creds.get_salt()
+
+ etype = (AES256_CTS_HMAC_SHA1_96, ARCFOUR_HMAC_MD5)
+ cname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=user_name.split('/'))
+ sname = self.PrincipalName_create(name_type=NT_SRV_INST,
+ names=['krbtgt', realm])
+ expected_sname = self.PrincipalName_create(
+ name_type=NT_SRV_INST, names=['krbtgt', realm.upper()])
+
+ expected_cname = cname
+
+ till = self.get_KerberosTime(offset=36000)
+
+ krbtgt_creds = self.get_krbtgt_creds()
+ ticket_decryption_key = (
+ self.TicketDecryptionKey_from_creds(krbtgt_creds))
+
+ expected_etypes = krbtgt_creds.tgs_supported_enctypes
+
+ kdc_options = krb5_asn1.KDCOptions(kdc_options)
+
+ if expected_flags is not None:
+ expected_flags = krb5_asn1.TicketFlags(expected_flags)
+ if unexpected_flags is not None:
+ unexpected_flags = krb5_asn1.TicketFlags(unexpected_flags)
+
+ rep, kdc_exchange_dict = self._test_as_exchange(
+ creds=creds,
+ cname=cname,
+ realm=realm,
+ sname=sname,
+ till=till,
+ expected_error_mode=KDC_ERR_PREAUTH_REQUIRED,
+ expected_crealm=realm,
+ expected_cname=expected_cname,
+ expected_srealm=realm,
+ expected_sname=sname,
+ expected_salt=salt,
+ expected_flags=expected_flags,
+ unexpected_flags=unexpected_flags,
+ expected_supported_etypes=expected_etypes,
+ etypes=etype,
+ padata=None,
+ kdc_options=kdc_options,
+ ticket_decryption_key=ticket_decryption_key,
+ rc4_support=rc4_support)
+ self.check_pre_authentication(rep)
+
+ etype_info2 = kdc_exchange_dict['preauth_etype_info2']
+
+ preauth_key = self.PasswordKey_from_etype_info2(creds,
+ etype_info2[0],
+ creds.get_kvno())
+
+ ts_enc_padata = self.get_enc_timestamp_pa_data_from_key(preauth_key)
+ padata = [ts_enc_padata]
+
+ expected_realm = realm.upper()
+
+ expected_error = KDC_ERR_POLICY if expect_error else 0
+
+ rep, kdc_exchange_dict = self._test_as_exchange(
+ creds=creds,
+ cname=cname,
+ realm=realm,
+ sname=sname,
+ till=till,
+ expected_error_mode=expected_error,
+ expected_crealm=expected_realm,
+ expected_cname=expected_cname,
+ expected_srealm=expected_realm,
+ expected_sname=expected_sname,
+ expected_salt=salt,
+ expected_flags=expected_flags,
+ unexpected_flags=unexpected_flags,
+ expected_supported_etypes=expected_etypes,
+ etypes=etype,
+ padata=padata,
+ kdc_options=kdc_options,
+ preauth_key=preauth_key,
+ ticket_decryption_key=ticket_decryption_key,
+ rc4_support=rc4_support)
+ if expect_error:
+ self.check_error_rep(rep, expected_error)
+
+ return None
+
+ self.check_as_reply(rep)
+
+ ticket_creds = kdc_exchange_dict['rep_ticket_creds']
+ return ticket_creds
+
+
+if __name__ == '__main__':
+ global_asn1_print = False
+ global_hexdump = False
+ import unittest
+ unittest.main()
diff --git a/python/samba/tests/krb5/pyasn1_regen.sh b/python/samba/tests/krb5/pyasn1_regen.sh
new file mode 100755
index 0000000..75b3988
--- /dev/null
+++ b/python/samba/tests/krb5/pyasn1_regen.sh
@@ -0,0 +1,42 @@
+#!/bin/bash
+#
+
+#
+# I used https://github.com/kimgr/asn1ate.git
+# to generate pyasn1 bindings for rfc4120.asn1
+#
+
+PATH_TO_ASN1ATE_CHECKOUT=$1
+PATH_TO_ASN1_INPUT_FILE=$2
+
+set -u
+set -e
+
+usage()
+{
+ echo "usage: $0 PATH_TO_ASN1ATE_CHECKOUT PATH_TO_ASN1_INPUT_FILE > PATH_TO_PYASN1_OUTPUT_FILE"
+}
+
+test -n "${PATH_TO_ASN1ATE_CHECKOUT}" || {
+ usage
+ exit 1
+}
+test -n "${PATH_TO_ASN1_INPUT_FILE}" || {
+ usage
+ exit 1
+}
+test -d "${PATH_TO_ASN1ATE_CHECKOUT}" || {
+ usage
+ exit 1
+}
+test -f "${PATH_TO_ASN1_INPUT_FILE}" || {
+ usage
+ exit 1
+}
+
+PATH_TO_PYASN1GEN_PY="${PATH_TO_ASN1ATE_CHECKOUT}/asn1ate/pyasn1gen.py"
+
+PYTHONPATH="${PATH_TO_ASN1ATE_CHECKOUT}:${PYTHONPATH-}"
+export PYTHONPATH
+
+python3 "${PATH_TO_PYASN1GEN_PY}" "${PATH_TO_ASN1_INPUT_FILE}"
diff --git a/python/samba/tests/krb5/raw_testcase.py b/python/samba/tests/krb5/raw_testcase.py
new file mode 100644
index 0000000..90d286a
--- /dev/null
+++ b/python/samba/tests/krb5/raw_testcase.py
@@ -0,0 +1,6221 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Isaac Boukris 2020
+# Copyright (C) Stefan Metzmacher 2020
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+import socket
+import struct
+import time
+import datetime
+import random
+import binascii
+import itertools
+import collections
+import math
+
+from enum import Enum
+from pprint import pprint
+
+from cryptography import x509
+from cryptography.hazmat.primitives import asymmetric, hashes
+from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
+from cryptography.hazmat.backends import default_backend
+
+from pyasn1.codec.der.decoder import decode as pyasn1_der_decode
+from pyasn1.codec.der.encoder import encode as pyasn1_der_encode
+from pyasn1.codec.native.decoder import decode as pyasn1_native_decode
+from pyasn1.codec.native.encoder import encode as pyasn1_native_encode
+
+from pyasn1.codec.ber.encoder import BitStringEncoder
+import pyasn1.type.univ
+
+from pyasn1.error import PyAsn1Error
+
+from samba import unix2nttime
+from samba.credentials import Credentials
+from samba.dcerpc import claims, krb5pac, netlogon, samr, security
+from samba.gensec import FEATURE_SEAL
+from samba.ndr import ndr_pack, ndr_unpack
+from samba.dcerpc.misc import (
+ SEC_CHAN_WKSTA,
+ SEC_CHAN_BDC,
+)
+
+import samba.tests
+from samba.tests import TestCase
+
+import samba.tests.krb5.rfc4120_pyasn1 as krb5_asn1
+from samba.tests.krb5.rfc4120_constants import (
+ AD_IF_RELEVANT,
+ AD_WIN2K_PAC,
+ FX_FAST_ARMOR_AP_REQUEST,
+ KDC_ERR_CLIENT_REVOKED,
+ KDC_ERR_GENERIC,
+ KDC_ERR_POLICY,
+ KDC_ERR_PREAUTH_FAILED,
+ KDC_ERR_SKEW,
+ KDC_ERR_UNKNOWN_CRITICAL_FAST_OPTIONS,
+ KERB_ERR_TYPE_EXTENDED,
+ KRB_AP_REP,
+ KRB_AP_REQ,
+ KRB_AS_REP,
+ KRB_AS_REQ,
+ KRB_ERROR,
+ KRB_PRIV,
+ KRB_TGS_REP,
+ KRB_TGS_REQ,
+ KU_AP_REQ_AUTH,
+ KU_AP_REQ_ENC_PART,
+ KU_AS_FRESHNESS,
+ KU_AS_REP_ENC_PART,
+ KU_AS_REQ,
+ KU_ENC_CHALLENGE_KDC,
+ KU_FAST_ENC,
+ KU_FAST_FINISHED,
+ KU_FAST_REP,
+ KU_FAST_REQ_CHKSUM,
+ KU_KRB_PRIV,
+ KU_NON_KERB_CKSUM_SALT,
+ KU_NON_KERB_SALT,
+ KU_PKINIT_AS_REQ,
+ KU_TGS_REP_ENC_PART_SESSION,
+ KU_TGS_REP_ENC_PART_SUB_KEY,
+ KU_TGS_REQ_AUTH,
+ KU_TGS_REQ_AUTH_CKSUM,
+ KU_TGS_REQ_AUTH_DAT_SESSION,
+ KU_TGS_REQ_AUTH_DAT_SUBKEY,
+ KU_TICKET,
+ NT_PRINCIPAL,
+ NT_SRV_INST,
+ NT_WELLKNOWN,
+ PADATA_AS_FRESHNESS,
+ PADATA_ENCRYPTED_CHALLENGE,
+ PADATA_ENC_TIMESTAMP,
+ PADATA_ETYPE_INFO,
+ PADATA_ETYPE_INFO2,
+ PADATA_FOR_USER,
+ PADATA_FX_COOKIE,
+ PADATA_FX_ERROR,
+ PADATA_FX_FAST,
+ PADATA_GSS,
+ PADATA_KDC_REQ,
+ PADATA_PAC_OPTIONS,
+ PADATA_PAC_REQUEST,
+ PADATA_PKINIT_KX,
+ PADATA_PK_AS_REP,
+ PADATA_PK_AS_REP_19,
+ PADATA_PK_AS_REQ,
+ PADATA_PW_SALT,
+ PADATA_REQ_ENC_PA_REP,
+ PADATA_SUPPORTED_ETYPES,
+)
+import samba.tests.krb5.kcrypto as kcrypto
+
+
+def BitStringEncoder_encodeValue32(
+ self, value, asn1Spec, encodeFun, **options):
+ #
+ # BitStrings like KDCOptions or TicketFlags should at least
+ # be 32-Bit on the wire
+ #
+ if asn1Spec is not None:
+ # TODO: try to avoid ASN.1 schema instantiation
+ value = asn1Spec.clone(value)
+
+ valueLength = len(value)
+ if valueLength % 8:
+ alignedValue = value << (8 - valueLength % 8)
+ else:
+ alignedValue = value
+
+ substrate = alignedValue.asOctets()
+ length = len(substrate)
+ # We need at least 32-Bit / 4-Bytes
+ if length < 4:
+ padding = 4 - length
+ else:
+ padding = 0
+ ret = b'\x00' + substrate + (b'\x00' * padding)
+ return ret, False, True
+
+
+BitStringEncoder.encodeValue = BitStringEncoder_encodeValue32
+
+
+def BitString_NamedValues_prettyPrint(self, scope=0):
+ ret = "%s" % self.asBinary()
+ bits = []
+ highest_bit = 32
+ for byte in self.asNumbers():
+ for bit in [7, 6, 5, 4, 3, 2, 1, 0]:
+ mask = 1 << bit
+ if byte & mask:
+ val = 1
+ else:
+ val = 0
+ bits.append(val)
+ if len(bits) < highest_bit:
+ for bitPosition in range(len(bits), highest_bit):
+ bits.append(0)
+ indent = " " * scope
+ delim = ": (\n%s " % indent
+ for bitPosition in range(highest_bit):
+ if bitPosition in self.prettyPrintNamedValues:
+ name = self.prettyPrintNamedValues[bitPosition]
+ elif bits[bitPosition] != 0:
+ name = "unknown-bit-%u" % bitPosition
+ else:
+ continue
+ ret += "%s%s:%u" % (delim, name, bits[bitPosition])
+ delim = ",\n%s " % indent
+ ret += "\n%s)" % indent
+ return ret
+
+
+krb5_asn1.TicketFlags.prettyPrintNamedValues =\
+ krb5_asn1.TicketFlagsValues.namedValues
+krb5_asn1.TicketFlags.namedValues =\
+ krb5_asn1.TicketFlagsValues.namedValues
+krb5_asn1.TicketFlags.prettyPrint =\
+ BitString_NamedValues_prettyPrint
+krb5_asn1.KDCOptions.prettyPrintNamedValues =\
+ krb5_asn1.KDCOptionsValues.namedValues
+krb5_asn1.KDCOptions.namedValues =\
+ krb5_asn1.KDCOptionsValues.namedValues
+krb5_asn1.KDCOptions.prettyPrint =\
+ BitString_NamedValues_prettyPrint
+krb5_asn1.APOptions.prettyPrintNamedValues =\
+ krb5_asn1.APOptionsValues.namedValues
+krb5_asn1.APOptions.namedValues =\
+ krb5_asn1.APOptionsValues.namedValues
+krb5_asn1.APOptions.prettyPrint =\
+ BitString_NamedValues_prettyPrint
+krb5_asn1.PACOptionFlags.prettyPrintNamedValues =\
+ krb5_asn1.PACOptionFlagsValues.namedValues
+krb5_asn1.PACOptionFlags.namedValues =\
+ krb5_asn1.PACOptionFlagsValues.namedValues
+krb5_asn1.PACOptionFlags.prettyPrint =\
+ BitString_NamedValues_prettyPrint
+
+
+def Integer_NamedValues_prettyPrint(self, scope=0):
+ intval = int(self)
+ if intval in self.prettyPrintNamedValues:
+ name = self.prettyPrintNamedValues[intval]
+ else:
+ name = "<__unknown__>"
+ ret = "%d (0x%x) %s" % (intval, intval, name)
+ return ret
+
+
+krb5_asn1.NameType.prettyPrintNamedValues =\
+ krb5_asn1.NameTypeValues.namedValues
+krb5_asn1.NameType.prettyPrint =\
+ Integer_NamedValues_prettyPrint
+krb5_asn1.AuthDataType.prettyPrintNamedValues =\
+ krb5_asn1.AuthDataTypeValues.namedValues
+krb5_asn1.AuthDataType.prettyPrint =\
+ Integer_NamedValues_prettyPrint
+krb5_asn1.PADataType.prettyPrintNamedValues =\
+ krb5_asn1.PADataTypeValues.namedValues
+krb5_asn1.PADataType.prettyPrint =\
+ Integer_NamedValues_prettyPrint
+krb5_asn1.EncryptionType.prettyPrintNamedValues =\
+ krb5_asn1.EncryptionTypeValues.namedValues
+krb5_asn1.EncryptionType.prettyPrint =\
+ Integer_NamedValues_prettyPrint
+krb5_asn1.ChecksumType.prettyPrintNamedValues =\
+ krb5_asn1.ChecksumTypeValues.namedValues
+krb5_asn1.ChecksumType.prettyPrint =\
+ Integer_NamedValues_prettyPrint
+krb5_asn1.KerbErrorDataType.prettyPrintNamedValues =\
+ krb5_asn1.KerbErrorDataTypeValues.namedValues
+krb5_asn1.KerbErrorDataType.prettyPrint =\
+ Integer_NamedValues_prettyPrint
+
+
+class Krb5EncryptionKey:
+ __slots__ = [
+ 'ctype',
+ 'etype',
+ 'key',
+ 'kvno',
+ ]
+
+ def __init__(self, key, kvno):
+ EncTypeChecksum = {
+ kcrypto.Enctype.AES256: kcrypto.Cksumtype.SHA1_AES256,
+ kcrypto.Enctype.AES128: kcrypto.Cksumtype.SHA1_AES128,
+ kcrypto.Enctype.RC4: kcrypto.Cksumtype.HMAC_MD5,
+ }
+ self.key = key
+ self.etype = key.enctype
+ self.ctype = EncTypeChecksum[self.etype]
+ self.kvno = kvno
+
+ def __str__(self):
+ return "etype=%d ctype=%d kvno=%d key=%s" % (
+ self.etype, self.ctype, self.kvno, self.key)
+
+ def encrypt(self, usage, plaintext):
+ ciphertext = kcrypto.encrypt(self.key, usage, plaintext)
+ return ciphertext
+
+ def decrypt(self, usage, ciphertext):
+ plaintext = kcrypto.decrypt(self.key, usage, ciphertext)
+ return plaintext
+
+ def make_zeroed_checksum(self, ctype=None):
+ if ctype is None:
+ ctype = self.ctype
+
+ checksum_len = kcrypto.checksum_len(ctype)
+ return bytes(checksum_len)
+
+ def make_checksum(self, usage, plaintext, ctype=None):
+ if ctype is None:
+ ctype = self.ctype
+ cksum = kcrypto.make_checksum(ctype, self.key, usage, plaintext)
+ return cksum
+
+ def verify_checksum(self, usage, plaintext, ctype, cksum):
+ if self.ctype != ctype:
+ raise AssertionError(f'key checksum type ({self.ctype}) != '
+ f'checksum type ({ctype})')
+
+ kcrypto.verify_checksum(ctype,
+ self.key,
+ usage,
+ plaintext,
+ cksum)
+
+ def export_obj(self):
+ EncryptionKey_obj = {
+ 'keytype': self.etype,
+ 'keyvalue': self.key.contents,
+ }
+ return EncryptionKey_obj
+
+
+class RodcPacEncryptionKey(Krb5EncryptionKey):
+ __slots__ = ['rodc_id']
+
+ def __init__(self, key, kvno, rodc_id=None):
+ super().__init__(key, kvno)
+
+ if rodc_id is None:
+ kvno = self.kvno
+ if kvno is not None:
+ kvno >>= 16
+ kvno &= (1 << 16) - 1
+
+ rodc_id = kvno or None
+
+ if rodc_id is not None:
+ self.rodc_id = rodc_id.to_bytes(2, byteorder='little')
+ else:
+ self.rodc_id = b''
+
+ def make_rodc_zeroed_checksum(self, ctype=None):
+ checksum = super().make_zeroed_checksum(ctype)
+ return checksum + bytes(len(self.rodc_id))
+
+ def make_rodc_checksum(self, usage, plaintext, ctype=None):
+ checksum = super().make_checksum(usage, plaintext, ctype)
+ return checksum + self.rodc_id
+
+ def verify_rodc_checksum(self, usage, plaintext, ctype, cksum):
+ if self.rodc_id:
+ cksum, cksum_rodc_id = cksum[:-2], cksum[-2:]
+
+ if self.rodc_id != cksum_rodc_id:
+ raise AssertionError(f'{self.rodc_id.hex()} != '
+ f'{cksum_rodc_id.hex()}')
+
+ super().verify_checksum(usage,
+ plaintext,
+ ctype,
+ cksum)
+
+
+class ZeroedChecksumKey(RodcPacEncryptionKey):
+ def make_checksum(self, usage, plaintext, ctype=None):
+ return self.make_zeroed_checksum(ctype)
+
+ def make_rodc_checksum(self, usage, plaintext, ctype=None):
+ return self.make_rodc_zeroed_checksum(ctype)
+
+
+class WrongLengthChecksumKey(RodcPacEncryptionKey):
+ __slots__ = ['_length']
+
+ def __init__(self, key, kvno, length):
+ super().__init__(key, kvno)
+
+ self._length = length
+
+ @classmethod
+ def _adjust_to_length(cls, checksum, length):
+ diff = length - len(checksum)
+ if diff > 0:
+ checksum += bytes(diff)
+ elif diff < 0:
+ checksum = checksum[:length]
+
+ return checksum
+
+ def make_zeroed_checksum(self, ctype=None):
+ return bytes(self._length)
+
+ def make_checksum(self, usage, plaintext, ctype=None):
+ checksum = super().make_checksum(usage, plaintext, ctype)
+ return self._adjust_to_length(checksum, self._length)
+
+ def make_rodc_zeroed_checksum(self, ctype=None):
+ return bytes(self._length)
+
+ def make_rodc_checksum(self, usage, plaintext, ctype=None):
+ checksum = super().make_rodc_checksum(usage, plaintext, ctype)
+ return self._adjust_to_length(checksum, self._length)
+
+
+class KerberosCredentials(Credentials):
+ __slots__ = [
+ '_private_key',
+ 'account_type',
+ 'ap_supported_enctypes',
+ 'as_supported_enctypes',
+ 'dn',
+ 'forced_keys',
+ 'forced_salt',
+ 'kvno',
+ 'sid',
+ 'spn',
+ 'tgs_supported_enctypes',
+ 'upn',
+ ]
+
+ non_etype_bits = (
+ security.KERB_ENCTYPE_FAST_SUPPORTED) | (
+ security.KERB_ENCTYPE_COMPOUND_IDENTITY_SUPPORTED) | (
+ security.KERB_ENCTYPE_CLAIMS_SUPPORTED) | (
+ security.KERB_ENCTYPE_RESOURCE_SID_COMPRESSION_DISABLED) | (
+ security.KERB_ENCTYPE_AES256_CTS_HMAC_SHA1_96_SK)
+
+ def __init__(self):
+ super().__init__()
+ all_enc_types = 0
+ all_enc_types |= security.KERB_ENCTYPE_RC4_HMAC_MD5
+ all_enc_types |= security.KERB_ENCTYPE_AES128_CTS_HMAC_SHA1_96
+ all_enc_types |= security.KERB_ENCTYPE_AES256_CTS_HMAC_SHA1_96
+
+ self.as_supported_enctypes = all_enc_types
+ self.tgs_supported_enctypes = all_enc_types
+ self.ap_supported_enctypes = all_enc_types
+
+ self.kvno = None
+ self.forced_keys = {}
+
+ self.forced_salt = None
+
+ self.dn = None
+ self.upn = None
+ self.spn = None
+ self.sid = None
+ self.account_type = None
+
+ self._private_key = None
+
+ def set_as_supported_enctypes(self, value):
+ self.as_supported_enctypes = int(value)
+
+ def set_tgs_supported_enctypes(self, value):
+ self.tgs_supported_enctypes = int(value)
+
+ def set_ap_supported_enctypes(self, value):
+ self.ap_supported_enctypes = int(value)
+
+ etype_map = collections.OrderedDict([
+ (kcrypto.Enctype.AES256,
+ security.KERB_ENCTYPE_AES256_CTS_HMAC_SHA1_96),
+ (kcrypto.Enctype.AES128,
+ security.KERB_ENCTYPE_AES128_CTS_HMAC_SHA1_96),
+ (kcrypto.Enctype.RC4,
+ security.KERB_ENCTYPE_RC4_HMAC_MD5),
+ (kcrypto.Enctype.DES_MD5,
+ security.KERB_ENCTYPE_DES_CBC_MD5),
+ (kcrypto.Enctype.DES_CRC,
+ security.KERB_ENCTYPE_DES_CBC_CRC)
+ ])
+
+ @classmethod
+ def etypes_to_bits(cls, etypes):
+ bits = 0
+ for etype in etypes:
+ bit = cls.etype_map[etype]
+ if bits & bit:
+ raise ValueError(f'Got duplicate etype: {etype}')
+ bits |= bit
+
+ return bits
+
+ @classmethod
+ def bits_to_etypes(cls, bits):
+ etypes = ()
+ for etype, bit in cls.etype_map.items():
+ if bit & bits:
+ bits &= ~bit
+ etypes += (etype,)
+
+ bits &= ~cls.non_etype_bits
+ if bits != 0:
+ raise ValueError(f'Unsupported etype bits: {bits}')
+
+ return etypes
+
+ def get_as_krb5_etypes(self):
+ return self.bits_to_etypes(self.as_supported_enctypes)
+
+ def get_tgs_krb5_etypes(self):
+ return self.bits_to_etypes(self.tgs_supported_enctypes)
+
+ def get_ap_krb5_etypes(self):
+ return self.bits_to_etypes(self.ap_supported_enctypes)
+
+ def set_kvno(self, kvno):
+ # Sign-extend from 32 bits.
+ if kvno & 1 << 31:
+ kvno |= -1 << 31
+ self.kvno = kvno
+
+ def get_kvno(self):
+ return self.kvno
+
+ def set_forced_key(self, etype, hexkey):
+ etype = int(etype)
+ contents = binascii.a2b_hex(hexkey)
+ key = kcrypto.Key(etype, contents)
+ self.forced_keys[etype] = RodcPacEncryptionKey(key, self.kvno)
+
+ # Also set the NT hash of computer accounts for which we don’t know the
+ # password.
+ if etype == kcrypto.Enctype.RC4 and self.get_password() is None:
+ nt_hash = samr.Password()
+ nt_hash.hash = list(contents)
+
+ self.set_nt_hash(nt_hash)
+
+ def get_forced_key(self, etype):
+ etype = int(etype)
+ return self.forced_keys.get(etype)
+
+ def set_forced_salt(self, salt):
+ self.forced_salt = bytes(salt)
+
+ def get_forced_salt(self):
+ return self.forced_salt
+
+ def get_salt(self):
+ if self.forced_salt is not None:
+ return self.forced_salt
+
+ upn = self.get_upn()
+ if upn is not None:
+ salt_name = upn.rsplit('@', 1)[0].replace('/', '')
+ else:
+ salt_name = self.get_username()
+
+ secure_schannel_type = self.get_secure_channel_type()
+ if secure_schannel_type in [SEC_CHAN_WKSTA,SEC_CHAN_BDC]:
+ salt_name = self.get_username().lower()
+ if salt_name[-1] == '$':
+ salt_name = salt_name[:-1]
+ salt_string = '%shost%s.%s' % (
+ self.get_realm().upper(),
+ salt_name,
+ self.get_realm().lower())
+ else:
+ salt_string = self.get_realm().upper() + salt_name
+
+ return salt_string.encode('utf-8')
+
+ def set_dn(self, dn):
+ self.dn = dn
+
+ def get_dn(self):
+ return self.dn
+
+ def set_spn(self, spn):
+ self.spn = spn
+
+ def get_spn(self):
+ return self.spn
+
+ def set_upn(self, upn):
+ self.upn = upn
+
+ def get_upn(self):
+ return self.upn
+
+ def set_sid(self, sid):
+ self.sid = sid
+
+ def get_sid(self):
+ return self.sid
+
+ def get_rid(self):
+ sid = self.get_sid()
+ if sid is None:
+ return None
+
+ _, rid = sid.rsplit('-', 1)
+ return int(rid)
+
+ def set_type(self, account_type):
+ self.account_type = account_type
+
+ def get_type(self):
+ return self.account_type
+
+ def update_password(self, password):
+ self.set_password(password)
+ self.set_kvno(self.get_kvno() + 1)
+
+ def get_private_key(self):
+ if self._private_key is None:
+ # Generate a new keypair.
+ self._private_key = asymmetric.rsa.generate_private_key(
+ public_exponent=65537,
+ key_size=2048,
+ backend=default_backend()
+ )
+
+ return self._private_key
+
+ def get_public_key(self):
+ return self.get_private_key().public_key()
+
+
+class KerberosTicketCreds:
+ __slots__ = [
+ 'cname',
+ 'crealm',
+ 'decryption_key',
+ 'encpart_private',
+ 'session_key',
+ 'sname',
+ 'srealm',
+ 'ticket_private',
+ 'ticket',
+ ]
+
+ def __init__(self, ticket, session_key,
+ crealm=None, cname=None,
+ srealm=None, sname=None,
+ decryption_key=None,
+ ticket_private=None,
+ encpart_private=None):
+ self.ticket = ticket
+ self.session_key = session_key
+ self.crealm = crealm
+ self.cname = cname
+ self.srealm = srealm
+ self.sname = sname
+ self.decryption_key = decryption_key
+ self.ticket_private = ticket_private
+ self.encpart_private = encpart_private
+
+ def set_sname(self, sname):
+ self.ticket['sname'] = sname
+ self.sname = sname
+
+
+class PkInit(Enum):
+ NOT_USED = object()
+ PUBLIC_KEY = object()
+ DIFFIE_HELLMAN = object()
+
+
+class RawKerberosTest(TestCase):
+ """A raw Kerberos Test case."""
+
+ class KpasswdMode(Enum):
+ SET = object()
+ CHANGE = object()
+
+ # The location of a SID within the PAC
+ class SidType(Enum):
+ BASE_SID = object() # in info3.base.groups
+ EXTRA_SID = object() # in info3.sids
+ RESOURCE_SID = object() # in resource_groups
+ PRIMARY_GID = object() # the (sole) primary group
+
+ def __repr__(self):
+ return self.__str__()
+
+ pac_checksum_types = {krb5pac.PAC_TYPE_SRV_CHECKSUM,
+ krb5pac.PAC_TYPE_KDC_CHECKSUM,
+ krb5pac.PAC_TYPE_TICKET_CHECKSUM,
+ krb5pac.PAC_TYPE_FULL_CHECKSUM}
+
+ etypes_to_test = (
+ {"value": -1111, "name": "dummy", },
+ {"value": kcrypto.Enctype.AES256, "name": "aes128", },
+ {"value": kcrypto.Enctype.AES128, "name": "aes256", },
+ {"value": kcrypto.Enctype.RC4, "name": "rc4", },
+ )
+
+ expect_padata_outer = object()
+
+ setup_etype_test_permutations_done = False
+
+ @classmethod
+ def setup_etype_test_permutations(cls):
+ if cls.setup_etype_test_permutations_done:
+ return
+
+ res = []
+
+ num_idxs = len(cls.etypes_to_test)
+ permutations = []
+ for num in range(1, num_idxs + 1):
+ chunk = list(itertools.permutations(range(num_idxs), num))
+ for e in chunk:
+ el = list(e)
+ permutations.append(el)
+
+ for p in permutations:
+ name = None
+ etypes = ()
+ for idx in p:
+ n = cls.etypes_to_test[idx]["name"]
+ if name is None:
+ name = n
+ else:
+ name += "_%s" % n
+ etypes += (cls.etypes_to_test[idx]["value"],)
+
+ r = {"name": name, "etypes": etypes, }
+ res.append(r)
+
+ cls.etype_test_permutations = res
+ cls.setup_etype_test_permutations_done = True
+
+ @classmethod
+ def etype_test_permutation_name_idx(cls):
+ cls.setup_etype_test_permutations()
+ res = []
+ idx = 0
+ for e in cls.etype_test_permutations:
+ r = (e['name'], idx)
+ idx += 1
+ res.append(r)
+ return res
+
+ def etype_test_permutation_by_idx(self, idx):
+ e = self.etype_test_permutations[idx]
+ return (e['name'], e['etypes'])
+
+ @classmethod
+ def setUpClass(cls):
+ super().setUpClass()
+
+ cls.host = samba.tests.env_get_var_value('SERVER')
+ cls.dc_host = samba.tests.env_get_var_value('DC_SERVER')
+
+ # A dictionary containing credentials that have already been
+ # obtained.
+ cls.creds_dict = {}
+
+ kdc_fast_support = samba.tests.env_get_var_value('FAST_SUPPORT',
+ allow_missing=True)
+ if kdc_fast_support is None:
+ kdc_fast_support = '0'
+ cls.kdc_fast_support = bool(int(kdc_fast_support))
+
+ kdc_claims_support = samba.tests.env_get_var_value('CLAIMS_SUPPORT',
+ allow_missing=True)
+ if kdc_claims_support is None:
+ kdc_claims_support = '0'
+ cls.kdc_claims_support = bool(int(kdc_claims_support))
+
+ kdc_compound_id_support = samba.tests.env_get_var_value(
+ 'COMPOUND_ID_SUPPORT',
+ allow_missing=True)
+ if kdc_compound_id_support is None:
+ kdc_compound_id_support = '0'
+ cls.kdc_compound_id_support = bool(int(kdc_compound_id_support))
+
+ tkt_sig_support = samba.tests.env_get_var_value('TKT_SIG_SUPPORT',
+ allow_missing=True)
+ if tkt_sig_support is None:
+ tkt_sig_support = '1'
+ cls.tkt_sig_support = bool(int(tkt_sig_support))
+
+ full_sig_support = samba.tests.env_get_var_value('FULL_SIG_SUPPORT',
+ allow_missing=True)
+ if full_sig_support is None:
+ full_sig_support = '1'
+ cls.full_sig_support = bool(int(full_sig_support))
+
+ expect_pac = samba.tests.env_get_var_value('EXPECT_PAC',
+ allow_missing=True)
+ if expect_pac is None:
+ expect_pac = '1'
+ cls.expect_pac = bool(int(expect_pac))
+
+ expect_extra_pac_buffers = samba.tests.env_get_var_value(
+ 'EXPECT_EXTRA_PAC_BUFFERS',
+ allow_missing=True)
+ if expect_extra_pac_buffers is None:
+ expect_extra_pac_buffers = '1'
+ cls.expect_extra_pac_buffers = bool(int(expect_extra_pac_buffers))
+
+ cname_checking = samba.tests.env_get_var_value('CHECK_CNAME',
+ allow_missing=True)
+ if cname_checking is None:
+ cname_checking = '1'
+ cls.cname_checking = bool(int(cname_checking))
+
+ padata_checking = samba.tests.env_get_var_value('CHECK_PADATA',
+ allow_missing=True)
+ if padata_checking is None:
+ padata_checking = '1'
+ cls.padata_checking = bool(int(padata_checking))
+
+ kadmin_is_tgs = samba.tests.env_get_var_value('KADMIN_IS_TGS',
+ allow_missing=True)
+ if kadmin_is_tgs is None:
+ kadmin_is_tgs = '0'
+ cls.kadmin_is_tgs = bool(int(kadmin_is_tgs))
+
+ default_etypes = samba.tests.env_get_var_value('DEFAULT_ETYPES',
+ allow_missing=True)
+ if default_etypes is not None:
+ default_etypes = int(default_etypes)
+ cls.default_etypes = default_etypes
+
+ forced_rc4 = samba.tests.env_get_var_value('FORCED_RC4',
+ allow_missing=True)
+ if forced_rc4 is None:
+ forced_rc4 = '0'
+ cls.forced_rc4 = bool(int(forced_rc4))
+
+ expect_nt_hash = samba.tests.env_get_var_value('EXPECT_NT_HASH',
+ allow_missing=True)
+ if expect_nt_hash is None:
+ expect_nt_hash = '1'
+ cls.expect_nt_hash = bool(int(expect_nt_hash))
+
+ expect_nt_status = samba.tests.env_get_var_value('EXPECT_NT_STATUS',
+ allow_missing=True)
+ if expect_nt_status is None:
+ expect_nt_status = '1'
+ cls.expect_nt_status = bool(int(expect_nt_status))
+
+ crash_windows = samba.tests.env_get_var_value('CRASH_WINDOWS',
+ allow_missing=True)
+ if crash_windows is None:
+ crash_windows = '1'
+ cls.crash_windows = bool(int(crash_windows))
+
+ def setUp(self):
+ super().setUp()
+ self.do_asn1_print = False
+ self.do_hexdump = False
+
+ strict_checking = samba.tests.env_get_var_value('STRICT_CHECKING',
+ allow_missing=True)
+ if strict_checking is None:
+ strict_checking = '1'
+ self.strict_checking = bool(int(strict_checking))
+
+ self.s = None
+
+ self.unspecified_kvno = object()
+
+ def tearDown(self):
+ self._disconnect("tearDown")
+ super().tearDown()
+
+ def _disconnect(self, reason):
+ if self.s is None:
+ return
+ self.s.close()
+ self.s = None
+ if self.do_hexdump:
+ sys.stderr.write("disconnect[%s]\n" % reason)
+
+ def _connect_tcp(self, host, port=None):
+ if port is None:
+ port = 88
+ try:
+ self.a = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
+ socket.SOCK_STREAM, socket.SOL_TCP,
+ 0)
+ self.s = socket.socket(self.a[0][0], self.a[0][1], self.a[0][2])
+ self.s.settimeout(10)
+ self.s.connect(self.a[0][4])
+ except socket.error:
+ self.s.close()
+ raise
+
+ def connect(self, host, port=None):
+ self.assertNotConnected()
+ self._connect_tcp(host, port)
+ if self.do_hexdump:
+ sys.stderr.write("connected[%s]\n" % host)
+
+ def env_get_var(self, varname, prefix,
+ fallback_default=True,
+ allow_missing=False):
+ val = None
+ if prefix is not None:
+ allow_missing_prefix = allow_missing or fallback_default
+ val = samba.tests.env_get_var_value(
+ '%s_%s' % (prefix, varname),
+ allow_missing=allow_missing_prefix)
+ else:
+ fallback_default = True
+ if val is None and fallback_default:
+ val = samba.tests.env_get_var_value(varname,
+ allow_missing=allow_missing)
+ return val
+
+ def _get_krb5_creds_from_env(self, prefix,
+ default_username=None,
+ allow_missing_password=False,
+ allow_missing_keys=True,
+ require_strongest_key=False):
+ c = KerberosCredentials()
+ c.guess()
+
+ domain = self.env_get_var('DOMAIN', prefix)
+ realm = self.env_get_var('REALM', prefix)
+ allow_missing_username = default_username is not None
+ username = self.env_get_var('USERNAME', prefix,
+ fallback_default=False,
+ allow_missing=allow_missing_username)
+ if username is None:
+ username = default_username
+ password = self.env_get_var('PASSWORD', prefix,
+ fallback_default=False,
+ allow_missing=allow_missing_password)
+ c.set_domain(domain)
+ c.set_realm(realm)
+ c.set_username(username)
+ if password is not None:
+ c.set_password(password)
+ as_supported_enctypes = self.env_get_var('AS_SUPPORTED_ENCTYPES',
+ prefix, allow_missing=True)
+ if as_supported_enctypes is not None:
+ c.set_as_supported_enctypes(as_supported_enctypes)
+ tgs_supported_enctypes = self.env_get_var('TGS_SUPPORTED_ENCTYPES',
+ prefix, allow_missing=True)
+ if tgs_supported_enctypes is not None:
+ c.set_tgs_supported_enctypes(tgs_supported_enctypes)
+ ap_supported_enctypes = self.env_get_var('AP_SUPPORTED_ENCTYPES',
+ prefix, allow_missing=True)
+ if ap_supported_enctypes is not None:
+ c.set_ap_supported_enctypes(ap_supported_enctypes)
+
+ if require_strongest_key:
+ kvno_allow_missing = False
+ if password is None:
+ aes256_allow_missing = False
+ else:
+ aes256_allow_missing = True
+ else:
+ kvno_allow_missing = allow_missing_keys
+ aes256_allow_missing = allow_missing_keys
+ kvno = self.env_get_var('KVNO', prefix,
+ fallback_default=False,
+ allow_missing=kvno_allow_missing)
+ if kvno is not None:
+ c.set_kvno(int(kvno))
+ aes256_key = self.env_get_var('AES256_KEY_HEX', prefix,
+ fallback_default=False,
+ allow_missing=aes256_allow_missing)
+ if aes256_key is not None:
+ c.set_forced_key(kcrypto.Enctype.AES256, aes256_key)
+ aes128_key = self.env_get_var('AES128_KEY_HEX', prefix,
+ fallback_default=False,
+ allow_missing=True)
+ if aes128_key is not None:
+ c.set_forced_key(kcrypto.Enctype.AES128, aes128_key)
+ rc4_key = self.env_get_var('RC4_KEY_HEX', prefix,
+ fallback_default=False, allow_missing=True)
+ if rc4_key is not None:
+ c.set_forced_key(kcrypto.Enctype.RC4, rc4_key)
+
+ if not allow_missing_keys:
+ self.assertTrue(c.forced_keys,
+ 'Please supply %s encryption keys '
+ 'in environment' % prefix)
+
+ return c
+
+ def _get_krb5_creds(self,
+ prefix,
+ default_username=None,
+ allow_missing_password=False,
+ allow_missing_keys=True,
+ require_strongest_key=False,
+ fallback_creds_fn=None):
+ if prefix in self.creds_dict:
+ return self.creds_dict[prefix]
+
+ # We don't have the credentials already
+ creds = None
+ env_err = None
+ try:
+ # Try to obtain them from the environment
+ creds = self._get_krb5_creds_from_env(
+ prefix,
+ default_username=default_username,
+ allow_missing_password=allow_missing_password,
+ allow_missing_keys=allow_missing_keys,
+ require_strongest_key=require_strongest_key)
+ except Exception as err:
+ # An error occurred, so save it for later
+ env_err = err
+ else:
+ self.assertIsNotNone(creds)
+ # Save the obtained credentials
+ self.creds_dict[prefix] = creds
+ return creds
+
+ if fallback_creds_fn is not None:
+ try:
+ # Try to use the fallback method
+ creds = fallback_creds_fn()
+ except Exception as err:
+ print("ERROR FROM ENV: %r" % (env_err))
+ print("FALLBACK-FN: %s" % (fallback_creds_fn))
+ print("FALLBACK-ERROR: %r" % (err))
+ else:
+ self.assertIsNotNone(creds)
+ # Save the obtained credentials
+ self.creds_dict[prefix] = creds
+ return creds
+
+ # Both methods failed, so raise the exception from the
+ # environment method
+ raise env_err
+
+ def get_user_creds(self,
+ allow_missing_password=False,
+ allow_missing_keys=True):
+ c = self._get_krb5_creds(prefix=None,
+ allow_missing_password=allow_missing_password,
+ allow_missing_keys=allow_missing_keys)
+ return c
+
+ def get_service_creds(self,
+ allow_missing_password=False,
+ allow_missing_keys=True):
+ c = self._get_krb5_creds(prefix='SERVICE',
+ allow_missing_password=allow_missing_password,
+ allow_missing_keys=allow_missing_keys)
+ return c
+
+ def get_client_creds(self,
+ allow_missing_password=False,
+ allow_missing_keys=True):
+ c = self._get_krb5_creds(prefix='CLIENT',
+ allow_missing_password=allow_missing_password,
+ allow_missing_keys=allow_missing_keys)
+ return c
+
+ def get_server_creds(self,
+ allow_missing_password=False,
+ allow_missing_keys=True):
+ c = self._get_krb5_creds(prefix='SERVER',
+ allow_missing_password=allow_missing_password,
+ allow_missing_keys=allow_missing_keys)
+ return c
+
+ def get_admin_creds(self,
+ allow_missing_password=False,
+ allow_missing_keys=True):
+ c = self._get_krb5_creds(prefix='ADMIN',
+ allow_missing_password=allow_missing_password,
+ allow_missing_keys=allow_missing_keys)
+ c.set_gensec_features(c.get_gensec_features() | FEATURE_SEAL)
+ c.set_workstation('')
+ return c
+
+ def get_rodc_krbtgt_creds(self,
+ require_keys=True,
+ require_strongest_key=False):
+ if require_strongest_key:
+ self.assertTrue(require_keys)
+ c = self._get_krb5_creds(prefix='RODC_KRBTGT',
+ allow_missing_password=True,
+ allow_missing_keys=not require_keys,
+ require_strongest_key=require_strongest_key)
+ return c
+
+ def get_krbtgt_creds(self,
+ require_keys=True,
+ require_strongest_key=False):
+ if require_strongest_key:
+ self.assertTrue(require_keys)
+ c = self._get_krb5_creds(prefix='KRBTGT',
+ default_username='krbtgt',
+ allow_missing_password=True,
+ allow_missing_keys=not require_keys,
+ require_strongest_key=require_strongest_key)
+ return c
+
+ def get_anon_creds(self):
+ c = Credentials()
+ c.set_anonymous()
+ return c
+
+ # Overridden by KDCBaseTest. At this level we don't know what actual
+ # enctypes are supported, so the best we can do is go by whether NT hashes
+ # are expected and whether the account is a workstation or not. This
+ # matches the behaviour that tests expect by default.
+ def get_default_enctypes(self, creds):
+ self.assertIsNotNone(creds)
+
+ default_enctypes = [
+ kcrypto.Enctype.AES256,
+ kcrypto.Enctype.AES128,
+ ]
+
+ if self.expect_nt_hash or creds.get_workstation():
+ default_enctypes.append(kcrypto.Enctype.RC4)
+
+ return default_enctypes
+
+ def asn1_dump(self, name, obj, asn1_print=None):
+ if asn1_print is None:
+ asn1_print = self.do_asn1_print
+ if asn1_print:
+ if name is not None:
+ sys.stderr.write("%s:\n%s" % (name, obj))
+ else:
+ sys.stderr.write("%s" % (obj))
+
+ def hex_dump(self, name, blob, hexdump=None):
+ if hexdump is None:
+ hexdump = self.do_hexdump
+ if hexdump:
+ sys.stderr.write(
+ "%s: %d\n%s" % (name, len(blob), self.hexdump(blob)))
+
+ def der_decode(
+ self,
+ blob,
+ asn1Spec=None,
+ native_encode=True,
+ asn1_print=None,
+ hexdump=None):
+ if asn1Spec is not None:
+ class_name = type(asn1Spec).__name__.split(':')[0]
+ else:
+ class_name = "<None-asn1Spec>"
+ self.hex_dump(class_name, blob, hexdump=hexdump)
+ obj, _ = pyasn1_der_decode(blob, asn1Spec=asn1Spec)
+ self.asn1_dump(None, obj, asn1_print=asn1_print)
+ if native_encode:
+ obj = pyasn1_native_encode(obj)
+ return obj
+
+ def der_encode(
+ self,
+ obj,
+ asn1Spec=None,
+ native_decode=True,
+ asn1_print=None,
+ hexdump=None):
+ if native_decode:
+ obj = pyasn1_native_decode(obj, asn1Spec=asn1Spec)
+ class_name = type(obj).__name__.split(':')[0]
+ if class_name is not None:
+ self.asn1_dump(None, obj, asn1_print=asn1_print)
+ blob = pyasn1_der_encode(obj)
+ if class_name is not None:
+ self.hex_dump(class_name, blob, hexdump=hexdump)
+ return blob
+
+ def send_pdu(self, req, asn1_print=None, hexdump=None):
+ k5_pdu = self.der_encode(
+ req, native_decode=False, asn1_print=asn1_print, hexdump=False)
+ self.send_msg(k5_pdu, hexdump=hexdump)
+
+ def send_msg(self, msg, hexdump=None):
+ header = struct.pack('>I', len(msg))
+ req_pdu = header
+ req_pdu += msg
+ self.hex_dump("send_msg", header, hexdump=hexdump)
+ self.hex_dump("send_msg", msg, hexdump=hexdump)
+
+ try:
+ while True:
+ sent = self.s.send(req_pdu, 0)
+ if sent == len(req_pdu):
+ return
+ req_pdu = req_pdu[sent:]
+ except socket.error as e:
+ self._disconnect("send_msg: %s" % e)
+ raise
+
+ def recv_raw(self, num_recv=0xffff, hexdump=None, timeout=None):
+ rep_pdu = None
+ try:
+ if timeout is not None:
+ self.s.settimeout(timeout)
+ rep_pdu = self.s.recv(num_recv, 0)
+ self.s.settimeout(10)
+ if len(rep_pdu) == 0:
+ self._disconnect("recv_raw: EOF")
+ return None
+ self.hex_dump("recv_raw", rep_pdu, hexdump=hexdump)
+ except socket.timeout:
+ self.s.settimeout(10)
+ sys.stderr.write("recv_raw: TIMEOUT\n")
+ except socket.error as e:
+ self._disconnect("recv_raw: %s" % e)
+ raise
+ return rep_pdu
+
+ def recv_pdu_raw(self, asn1_print=None, hexdump=None, timeout=None):
+ raw_pdu = self.recv_raw(
+ num_recv=4, hexdump=hexdump, timeout=timeout)
+ if raw_pdu is None:
+ return None
+ header = struct.unpack(">I", raw_pdu[0:4])
+ k5_len = header[0]
+ if k5_len == 0:
+ return ""
+ missing = k5_len
+ rep_pdu = b''
+ while missing > 0:
+ raw_pdu = self.recv_raw(
+ num_recv=missing, hexdump=hexdump, timeout=timeout)
+ self.assertGreaterEqual(len(raw_pdu), 1)
+ rep_pdu += raw_pdu
+ missing = k5_len - len(rep_pdu)
+ return rep_pdu
+
+ def recv_reply(self, asn1_print=None, hexdump=None, timeout=None):
+ rep_pdu = self.recv_pdu_raw(asn1_print=asn1_print,
+ hexdump=hexdump,
+ timeout=timeout)
+ if not rep_pdu:
+ return None, rep_pdu
+ k5_raw = self.der_decode(
+ rep_pdu,
+ asn1Spec=None,
+ native_encode=False,
+ asn1_print=False,
+ hexdump=False)
+ pvno = k5_raw['field-0']
+ self.assertEqual(pvno, 5)
+ msg_type = k5_raw['field-1']
+ self.assertIn(msg_type, [KRB_AS_REP, KRB_TGS_REP, KRB_ERROR])
+ if msg_type == KRB_AS_REP:
+ asn1Spec = krb5_asn1.AS_REP()
+ elif msg_type == KRB_TGS_REP:
+ asn1Spec = krb5_asn1.TGS_REP()
+ elif msg_type == KRB_ERROR:
+ asn1Spec = krb5_asn1.KRB_ERROR()
+ rep = self.der_decode(rep_pdu, asn1Spec=asn1Spec,
+ asn1_print=asn1_print, hexdump=False)
+ return (rep, rep_pdu)
+
+ def recv_pdu(self, asn1_print=None, hexdump=None, timeout=None):
+ (rep, rep_pdu) = self.recv_reply(asn1_print=asn1_print,
+ hexdump=hexdump,
+ timeout=timeout)
+ return rep
+
+ def assertIsConnected(self):
+ self.assertIsNotNone(self.s, msg="Not connected")
+
+ def assertNotConnected(self):
+ self.assertIsNone(self.s, msg="Is connected")
+
+ def send_recv_transaction(
+ self,
+ req,
+ asn1_print=None,
+ hexdump=None,
+ timeout=None,
+ to_rodc=False):
+ host = self.host if to_rodc else self.dc_host
+ self.connect(host)
+ try:
+ self.send_pdu(req, asn1_print=asn1_print, hexdump=hexdump)
+ rep = self.recv_pdu(
+ asn1_print=asn1_print, hexdump=hexdump, timeout=timeout)
+ except Exception:
+ self._disconnect("transaction failed")
+ raise
+ self._disconnect("transaction done")
+ return rep
+
+ def getElementValue(self, obj, elem):
+ return obj.get(elem)
+
+ def assertElementMissing(self, obj, elem):
+ v = self.getElementValue(obj, elem)
+ self.assertIsNone(v)
+
+ def assertElementPresent(self, obj, elem, expect_empty=False):
+ v = self.getElementValue(obj, elem)
+ self.assertIsNotNone(v)
+ if self.strict_checking:
+ if isinstance(v, collections.abc.Container):
+ if expect_empty:
+ self.assertEqual(0, len(v))
+ else:
+ self.assertNotEqual(0, len(v))
+
+ def assertElementEqual(self, obj, elem, value):
+ v = self.getElementValue(obj, elem)
+ self.assertIsNotNone(v)
+ self.assertEqual(v, value)
+
+ def assertElementEqualUTF8(self, obj, elem, value):
+ v = self.getElementValue(obj, elem)
+ self.assertIsNotNone(v)
+ self.assertEqual(v, bytes(value, 'utf8'))
+
+ def assertPrincipalEqual(self, princ1, princ2):
+ self.assertEqual(princ1['name-type'], princ2['name-type'])
+ self.assertEqual(
+ len(princ1['name-string']),
+ len(princ2['name-string']),
+ msg="princ1=%s != princ2=%s" % (princ1, princ2))
+ for idx in range(len(princ1['name-string'])):
+ self.assertEqual(
+ princ1['name-string'][idx],
+ princ2['name-string'][idx],
+ msg="princ1=%s != princ2=%s" % (princ1, princ2))
+
+ def assertElementEqualPrincipal(self, obj, elem, value):
+ v = self.getElementValue(obj, elem)
+ self.assertIsNotNone(v)
+ v = pyasn1_native_decode(v, asn1Spec=krb5_asn1.PrincipalName())
+ self.assertPrincipalEqual(v, value)
+
+ def assertElementKVNO(self, obj, elem, value):
+ v = self.getElementValue(obj, elem)
+ if value == "autodetect":
+ value = v
+ if value is not None:
+ self.assertIsNotNone(v)
+ # The value on the wire should never be 0
+ self.assertNotEqual(v, 0)
+ # unspecified_kvno means we don't know the kvno,
+ # but want to enforce its presence
+ if value is not self.unspecified_kvno:
+ value = int(value)
+ self.assertNotEqual(value, 0)
+ self.assertEqual(v, value)
+ else:
+ self.assertIsNone(v)
+
+ def assertElementFlags(self, obj, elem, expected, unexpected):
+ v = self.getElementValue(obj, elem)
+ self.assertIsNotNone(v)
+ if expected is not None:
+ self.assertIsInstance(expected, krb5_asn1.TicketFlags)
+ for i, flag in enumerate(expected):
+ if flag == 1:
+ self.assertEqual('1', v[i],
+ f"'{expected.namedValues[i]}' "
+ f"expected in {v}")
+ if unexpected is not None:
+ self.assertIsInstance(unexpected, krb5_asn1.TicketFlags)
+ for i, flag in enumerate(unexpected):
+ if flag == 1:
+ self.assertEqual('0', v[i],
+ f"'{unexpected.namedValues[i]}' "
+ f"unexpected in {v}")
+
+ def assertSequenceElementsEqual(self, expected, got, *,
+ require_strict=None,
+ unchecked=None,
+ require_ordered=True):
+ if self.strict_checking and require_ordered and not unchecked:
+ self.assertEqual(expected, got)
+ else:
+ fail_msg = f'expected: {expected} got: {got}'
+
+ ignored = set()
+ if unchecked:
+ ignored.update(unchecked)
+ if require_strict and not self.strict_checking:
+ ignored.update(require_strict)
+
+ if ignored:
+ fail_msg += f' (ignoring: {ignored})'
+ expected = (x for x in expected if x not in ignored)
+ got = (x for x in got if x not in ignored)
+
+ self.assertCountEqual(expected, got, fail_msg)
+
+ def get_KerberosTimeWithUsec(self, epoch=None, offset=None):
+ if epoch is None:
+ epoch = time.time()
+ if offset is not None:
+ epoch = epoch + int(offset)
+ dt = datetime.datetime.fromtimestamp(epoch, tz=datetime.timezone.utc)
+ return (dt.strftime("%Y%m%d%H%M%SZ"), dt.microsecond)
+
+ def get_KerberosTime(self, epoch=None, offset=None):
+ (s, _) = self.get_KerberosTimeWithUsec(epoch=epoch, offset=offset)
+ return s
+
+ def get_EpochFromKerberosTime(self, kerberos_time):
+ if isinstance(kerberos_time, bytes):
+ kerberos_time = kerberos_time.decode()
+
+ epoch = datetime.datetime.strptime(kerberos_time,
+ '%Y%m%d%H%M%SZ')
+ epoch = epoch.replace(tzinfo=datetime.timezone.utc)
+ epoch = int(epoch.timestamp())
+
+ return epoch
+
+ def get_Nonce(self):
+ nonce_min = 0x7f000000
+ nonce_max = 0x7fffffff
+ v = random.randint(nonce_min, nonce_max)
+ return v
+
+ def get_pa_dict(self, pa_data):
+ pa_dict = {}
+
+ if pa_data is not None:
+ for pa in pa_data:
+ pa_type = pa['padata-type']
+ if pa_type in pa_dict:
+ raise RuntimeError(f'Duplicate type {pa_type}')
+ pa_dict[pa_type] = pa['padata-value']
+
+ return pa_dict
+
+ def SessionKey_create(self, etype, contents, kvno=None):
+ key = kcrypto.Key(etype, contents)
+ return RodcPacEncryptionKey(key, kvno)
+
+ def PasswordKey_create(self, etype=None, pwd=None, salt=None, kvno=None,
+ params=None):
+ self.assertIsNotNone(pwd)
+ self.assertIsNotNone(salt)
+ key = kcrypto.string_to_key(etype, pwd, salt, params=params)
+ return RodcPacEncryptionKey(key, kvno)
+
+ def PasswordKey_from_etype_info2(self, creds, etype_info2, kvno=None):
+ e = etype_info2['etype']
+ salt = etype_info2.get('salt')
+ _params = etype_info2.get('s2kparams')
+ return self.PasswordKey_from_etype(creds, e,
+ kvno=kvno,
+ salt=salt)
+
+ def PasswordKey_from_creds(self, creds, etype):
+ kvno = creds.get_kvno()
+ salt = creds.get_salt()
+ return self.PasswordKey_from_etype(creds, etype,
+ kvno=kvno,
+ salt=salt)
+
+ def PasswordKey_from_etype(self, creds, etype, kvno=None, salt=None):
+ if etype == kcrypto.Enctype.RC4:
+ nthash = creds.get_nt_hash()
+ return self.SessionKey_create(etype=etype, contents=nthash, kvno=kvno)
+
+ password = creds.get_password().encode('utf-8')
+ return self.PasswordKey_create(
+ etype=etype, pwd=password, salt=salt, kvno=kvno)
+
+ def TicketDecryptionKey_from_creds(self, creds, etype=None):
+
+ if etype is None:
+ etypes = creds.get_tgs_krb5_etypes()
+ if etypes and etypes[0] not in (kcrypto.Enctype.DES_CRC,
+ kcrypto.Enctype.DES_MD5):
+ etype = etypes[0]
+ else:
+ etype = kcrypto.Enctype.RC4
+
+ forced_key = creds.get_forced_key(etype)
+ if forced_key is not None:
+ return forced_key
+
+ kvno = creds.get_kvno()
+
+ fail_msg = ("%s has no fixed key for etype[%s] kvno[%s] "
+ "nor a password specified, " % (
+ creds.get_username(), etype, kvno))
+
+ if etype == kcrypto.Enctype.RC4:
+ nthash = creds.get_nt_hash()
+ self.assertIsNotNone(nthash, msg=fail_msg)
+ return self.SessionKey_create(etype=etype,
+ contents=nthash,
+ kvno=kvno)
+
+ password = creds.get_password()
+ self.assertIsNotNone(password, msg=fail_msg)
+ salt = creds.get_salt()
+ return self.PasswordKey_create(etype=etype,
+ pwd=password,
+ salt=salt,
+ kvno=kvno)
+
+ def RandomKey(self, etype):
+ e = kcrypto._get_enctype_profile(etype)
+ contents = samba.generate_random_bytes(e.keysize)
+ return self.SessionKey_create(etype=etype, contents=contents)
+
+ def EncryptionKey_import(self, EncryptionKey_obj):
+ return self.SessionKey_create(EncryptionKey_obj['keytype'],
+ EncryptionKey_obj['keyvalue'])
+
+ def EncryptedData_create(self, key, usage, plaintext):
+ # EncryptedData ::= SEQUENCE {
+ # etype [0] Int32 -- EncryptionType --,
+ # kvno [1] Int32 OPTIONAL,
+ # cipher [2] OCTET STRING -- ciphertext
+ # }
+ ciphertext = key.encrypt(usage, plaintext)
+ EncryptedData_obj = {
+ 'etype': key.etype,
+ 'cipher': ciphertext
+ }
+ if key.kvno is not None:
+ EncryptedData_obj['kvno'] = key.kvno
+ return EncryptedData_obj
+
+ def Checksum_create(self, key, usage, plaintext, ctype=None):
+ # Checksum ::= SEQUENCE {
+ # cksumtype [0] Int32,
+ # checksum [1] OCTET STRING
+ # }
+ if ctype is None:
+ ctype = key.ctype
+ checksum = key.make_checksum(usage, plaintext, ctype=ctype)
+ Checksum_obj = {
+ 'cksumtype': ctype,
+ 'checksum': checksum,
+ }
+ return Checksum_obj
+
+ @classmethod
+ def PrincipalName_create(cls, name_type, names):
+ # PrincipalName ::= SEQUENCE {
+ # name-type [0] Int32,
+ # name-string [1] SEQUENCE OF KerberosString
+ # }
+ PrincipalName_obj = {
+ 'name-type': name_type,
+ 'name-string': names,
+ }
+ return PrincipalName_obj
+
+ def AuthorizationData_create(self, ad_type, ad_data):
+ # AuthorizationData ::= SEQUENCE {
+ # ad-type [0] Int32,
+ # ad-data [1] OCTET STRING
+ # }
+ AUTH_DATA_obj = {
+ 'ad-type': ad_type,
+ 'ad-data': ad_data
+ }
+ return AUTH_DATA_obj
+
+ def PA_DATA_create(self, padata_type, padata_value):
+ # PA-DATA ::= SEQUENCE {
+ # -- NOTE: first tag is [1], not [0]
+ # padata-type [1] Int32,
+ # padata-value [2] OCTET STRING -- might be encoded AP-REQ
+ # }
+ PA_DATA_obj = {
+ 'padata-type': padata_type,
+ 'padata-value': padata_value,
+ }
+ return PA_DATA_obj
+
+ def PA_ENC_TS_ENC_create(self, ts, usec):
+ # PA-ENC-TS-ENC ::= SEQUENCE {
+ # patimestamp[0] KerberosTime, -- client's time
+ # pausec[1] krb5int32 OPTIONAL
+ # }
+ PA_ENC_TS_ENC_obj = {
+ 'patimestamp': ts,
+ 'pausec': usec,
+ }
+ return PA_ENC_TS_ENC_obj
+
+ def PA_PAC_OPTIONS_create(self, options):
+ # PA-PAC-OPTIONS ::= SEQUENCE {
+ # options [0] PACOptionFlags
+ # }
+ PA_PAC_OPTIONS_obj = {
+ 'options': options
+ }
+ return PA_PAC_OPTIONS_obj
+
+ def KRB_FAST_ARMOR_create(self, armor_type, armor_value):
+ # KrbFastArmor ::= SEQUENCE {
+ # armor-type [0] Int32,
+ # armor-value [1] OCTET STRING,
+ # ...
+ # }
+ KRB_FAST_ARMOR_obj = {
+ 'armor-type': armor_type,
+ 'armor-value': armor_value
+ }
+ return KRB_FAST_ARMOR_obj
+
+ def KRB_FAST_REQ_create(self, fast_options, padata, req_body):
+ # KrbFastReq ::= SEQUENCE {
+ # fast-options [0] FastOptions,
+ # padata [1] SEQUENCE OF PA-DATA,
+ # req-body [2] KDC-REQ-BODY,
+ # ...
+ # }
+ KRB_FAST_REQ_obj = {
+ 'fast-options': fast_options,
+ 'padata': padata,
+ 'req-body': req_body
+ }
+ return KRB_FAST_REQ_obj
+
+ def KRB_FAST_ARMORED_REQ_create(self, armor, req_checksum, enc_fast_req):
+ # KrbFastArmoredReq ::= SEQUENCE {
+ # armor [0] KrbFastArmor OPTIONAL,
+ # req-checksum [1] Checksum,
+ # enc-fast-req [2] EncryptedData -- KrbFastReq --
+ # }
+ KRB_FAST_ARMORED_REQ_obj = {
+ 'req-checksum': req_checksum,
+ 'enc-fast-req': enc_fast_req
+ }
+ if armor is not None:
+ KRB_FAST_ARMORED_REQ_obj['armor'] = armor
+ return KRB_FAST_ARMORED_REQ_obj
+
+ def PA_FX_FAST_REQUEST_create(self, armored_data):
+ # PA-FX-FAST-REQUEST ::= CHOICE {
+ # armored-data [0] KrbFastArmoredReq,
+ # ...
+ # }
+ PA_FX_FAST_REQUEST_obj = {
+ 'armored-data': armored_data
+ }
+ return PA_FX_FAST_REQUEST_obj
+
+ def KERB_PA_PAC_REQUEST_create(self, include_pac, pa_data_create=True):
+ # KERB-PA-PAC-REQUEST ::= SEQUENCE {
+ # include-pac[0] BOOLEAN --If TRUE, and no pac present,
+ # -- include PAC.
+ # --If FALSE, and PAC present,
+ # -- remove PAC.
+ # }
+ KERB_PA_PAC_REQUEST_obj = {
+ 'include-pac': include_pac,
+ }
+ if not pa_data_create:
+ return KERB_PA_PAC_REQUEST_obj
+ pa_pac = self.der_encode(KERB_PA_PAC_REQUEST_obj,
+ asn1Spec=krb5_asn1.KERB_PA_PAC_REQUEST())
+ pa_data = self.PA_DATA_create(PADATA_PAC_REQUEST, pa_pac)
+ return pa_data
+
+ def get_pa_pac_options(self, options):
+ pac_options = self.PA_PAC_OPTIONS_create(options)
+ pac_options = self.der_encode(pac_options,
+ asn1Spec=krb5_asn1.PA_PAC_OPTIONS())
+ pac_options = self.PA_DATA_create(PADATA_PAC_OPTIONS, pac_options)
+
+ return pac_options
+
+ def KDC_REQ_BODY_create(self,
+ kdc_options,
+ cname,
+ realm,
+ sname,
+ from_time,
+ till_time,
+ renew_time,
+ nonce,
+ etypes,
+ addresses,
+ additional_tickets,
+ EncAuthorizationData,
+ EncAuthorizationData_key,
+ EncAuthorizationData_usage,
+ asn1_print=None,
+ hexdump=None):
+ # KDC-REQ-BODY ::= SEQUENCE {
+ # kdc-options [0] KDCOptions,
+ # cname [1] PrincipalName OPTIONAL
+ # -- Used only in AS-REQ --,
+ # realm [2] Realm
+ # -- Server's realm
+ # -- Also client's in AS-REQ --,
+ # sname [3] PrincipalName OPTIONAL,
+ # from [4] KerberosTime OPTIONAL,
+ # till [5] KerberosTime,
+ # rtime [6] KerberosTime OPTIONAL,
+ # nonce [7] UInt32,
+ # etype [8] SEQUENCE OF Int32
+ # -- EncryptionType
+ # -- in preference order --,
+ # addresses [9] HostAddresses OPTIONAL,
+ # enc-authorization-data [10] EncryptedData OPTIONAL
+ # -- AuthorizationData --,
+ # additional-tickets [11] SEQUENCE OF Ticket OPTIONAL
+ # -- NOTE: not empty
+ # }
+ if EncAuthorizationData is not None:
+ enc_ad_plain = self.der_encode(
+ EncAuthorizationData,
+ asn1Spec=krb5_asn1.AuthorizationData(),
+ asn1_print=asn1_print,
+ hexdump=hexdump)
+ enc_ad = self.EncryptedData_create(EncAuthorizationData_key,
+ EncAuthorizationData_usage,
+ enc_ad_plain)
+ else:
+ enc_ad = None
+ KDC_REQ_BODY_obj = {
+ 'kdc-options': kdc_options,
+ 'realm': realm,
+ 'till': till_time,
+ 'nonce': nonce,
+ 'etype': etypes,
+ }
+ if cname is not None:
+ KDC_REQ_BODY_obj['cname'] = cname
+ if sname is not None:
+ KDC_REQ_BODY_obj['sname'] = sname
+ if from_time is not None:
+ KDC_REQ_BODY_obj['from'] = from_time
+ if renew_time is not None:
+ KDC_REQ_BODY_obj['rtime'] = renew_time
+ if addresses is not None:
+ KDC_REQ_BODY_obj['addresses'] = addresses
+ if enc_ad is not None:
+ KDC_REQ_BODY_obj['enc-authorization-data'] = enc_ad
+ if additional_tickets is not None:
+ KDC_REQ_BODY_obj['additional-tickets'] = additional_tickets
+ return KDC_REQ_BODY_obj
+
+ def KDC_REQ_create(self,
+ msg_type,
+ padata,
+ req_body,
+ asn1Spec=None,
+ asn1_print=None,
+ hexdump=None):
+ # KDC-REQ ::= SEQUENCE {
+ # -- NOTE: first tag is [1], not [0]
+ # pvno [1] INTEGER (5) ,
+ # msg-type [2] INTEGER (10 -- AS -- | 12 -- TGS --),
+ # padata [3] SEQUENCE OF PA-DATA OPTIONAL
+ # -- NOTE: not empty --,
+ # req-body [4] KDC-REQ-BODY
+ # }
+ #
+ KDC_REQ_obj = {
+ 'pvno': 5,
+ 'msg-type': msg_type,
+ 'req-body': req_body,
+ }
+ if padata is not None:
+ KDC_REQ_obj['padata'] = padata
+ if asn1Spec is not None:
+ KDC_REQ_decoded = pyasn1_native_decode(
+ KDC_REQ_obj, asn1Spec=asn1Spec)
+ else:
+ KDC_REQ_decoded = None
+ return KDC_REQ_obj, KDC_REQ_decoded
+
+ def AS_REQ_create(self,
+ padata, # optional
+ kdc_options, # required
+ cname, # optional
+ realm, # required
+ sname, # optional
+ from_time, # optional
+ till_time, # required
+ renew_time, # optional
+ nonce, # required
+ etypes, # required
+ addresses, # optional
+ additional_tickets,
+ native_decoded_only=True,
+ asn1_print=None,
+ hexdump=None):
+ # KDC-REQ ::= SEQUENCE {
+ # -- NOTE: first tag is [1], not [0]
+ # pvno [1] INTEGER (5) ,
+ # msg-type [2] INTEGER (10 -- AS -- | 12 -- TGS --),
+ # padata [3] SEQUENCE OF PA-DATA OPTIONAL
+ # -- NOTE: not empty --,
+ # req-body [4] KDC-REQ-BODY
+ # }
+ #
+ # KDC-REQ-BODY ::= SEQUENCE {
+ # kdc-options [0] KDCOptions,
+ # cname [1] PrincipalName OPTIONAL
+ # -- Used only in AS-REQ --,
+ # realm [2] Realm
+ # -- Server's realm
+ # -- Also client's in AS-REQ --,
+ # sname [3] PrincipalName OPTIONAL,
+ # from [4] KerberosTime OPTIONAL,
+ # till [5] KerberosTime,
+ # rtime [6] KerberosTime OPTIONAL,
+ # nonce [7] UInt32,
+ # etype [8] SEQUENCE OF Int32
+ # -- EncryptionType
+ # -- in preference order --,
+ # addresses [9] HostAddresses OPTIONAL,
+ # enc-authorization-data [10] EncryptedData OPTIONAL
+ # -- AuthorizationData --,
+ # additional-tickets [11] SEQUENCE OF Ticket OPTIONAL
+ # -- NOTE: not empty
+ # }
+ KDC_REQ_BODY_obj = self.KDC_REQ_BODY_create(
+ kdc_options,
+ cname,
+ realm,
+ sname,
+ from_time,
+ till_time,
+ renew_time,
+ nonce,
+ etypes,
+ addresses,
+ additional_tickets,
+ EncAuthorizationData=None,
+ EncAuthorizationData_key=None,
+ EncAuthorizationData_usage=None,
+ asn1_print=asn1_print,
+ hexdump=hexdump)
+ obj, decoded = self.KDC_REQ_create(
+ msg_type=KRB_AS_REQ,
+ padata=padata,
+ req_body=KDC_REQ_BODY_obj,
+ asn1Spec=krb5_asn1.AS_REQ(),
+ asn1_print=asn1_print,
+ hexdump=hexdump)
+ if native_decoded_only:
+ return decoded
+ return decoded, obj
+
+ def AP_REQ_create(self, ap_options, ticket, authenticator):
+ # AP-REQ ::= [APPLICATION 14] SEQUENCE {
+ # pvno [0] INTEGER (5),
+ # msg-type [1] INTEGER (14),
+ # ap-options [2] APOptions,
+ # ticket [3] Ticket,
+ # authenticator [4] EncryptedData -- Authenticator
+ # }
+ AP_REQ_obj = {
+ 'pvno': 5,
+ 'msg-type': KRB_AP_REQ,
+ 'ap-options': ap_options,
+ 'ticket': ticket,
+ 'authenticator': authenticator,
+ }
+ return AP_REQ_obj
+
+ def Authenticator_create(
+ self, crealm, cname, cksum, cusec, ctime, subkey, seq_number,
+ authorization_data):
+ # -- Unencrypted authenticator
+ # Authenticator ::= [APPLICATION 2] SEQUENCE {
+ # authenticator-vno [0] INTEGER (5),
+ # crealm [1] Realm,
+ # cname [2] PrincipalName,
+ # cksum [3] Checksum OPTIONAL,
+ # cusec [4] Microseconds,
+ # ctime [5] KerberosTime,
+ # subkey [6] EncryptionKey OPTIONAL,
+ # seq-number [7] UInt32 OPTIONAL,
+ # authorization-data [8] AuthorizationData OPTIONAL
+ # }
+ Authenticator_obj = {
+ 'authenticator-vno': 5,
+ 'crealm': crealm,
+ 'cname': cname,
+ 'cusec': cusec,
+ 'ctime': ctime,
+ }
+ if cksum is not None:
+ Authenticator_obj['cksum'] = cksum
+ if subkey is not None:
+ Authenticator_obj['subkey'] = subkey
+ if seq_number is not None:
+ Authenticator_obj['seq-number'] = seq_number
+ if authorization_data is not None:
+ Authenticator_obj['authorization-data'] = authorization_data
+ return Authenticator_obj
+
+ def PKAuthenticator_create(self,
+ cusec,
+ ctime,
+ nonce,
+ *,
+ pa_checksum=None,
+ freshness_token=None,
+ kdc_name=None,
+ kdc_realm=None,
+ win2k_variant=False):
+ if win2k_variant:
+ self.assertIsNone(pa_checksum)
+ self.assertIsNone(freshness_token)
+ self.assertIsNotNone(kdc_name)
+ self.assertIsNotNone(kdc_realm)
+ else:
+ self.assertIsNone(kdc_name)
+ self.assertIsNone(kdc_realm)
+
+ pk_authenticator_obj = {
+ 'cusec': cusec,
+ 'ctime': ctime,
+ 'nonce': nonce,
+ }
+ if pa_checksum is not None:
+ pk_authenticator_obj['paChecksum'] = pa_checksum
+ if freshness_token is not None:
+ pk_authenticator_obj['freshnessToken'] = freshness_token
+ if kdc_name is not None:
+ pk_authenticator_obj['kdcName'] = kdc_name
+ if kdc_realm is not None:
+ pk_authenticator_obj['kdcRealm'] = kdc_realm
+
+ return pk_authenticator_obj
+
+ def TGS_REQ_create(self,
+ padata, # optional
+ cusec,
+ ctime,
+ ticket,
+ kdc_options, # required
+ cname, # optional
+ realm, # required
+ sname, # optional
+ from_time, # optional
+ till_time, # required
+ renew_time, # optional
+ nonce, # required
+ etypes, # required
+ addresses, # optional
+ EncAuthorizationData,
+ EncAuthorizationData_key,
+ additional_tickets,
+ ticket_session_key,
+ authenticator_subkey=None,
+ body_checksum_type=None,
+ native_decoded_only=True,
+ asn1_print=None,
+ hexdump=None):
+ # KDC-REQ ::= SEQUENCE {
+ # -- NOTE: first tag is [1], not [0]
+ # pvno [1] INTEGER (5) ,
+ # msg-type [2] INTEGER (10 -- AS -- | 12 -- TGS --),
+ # padata [3] SEQUENCE OF PA-DATA OPTIONAL
+ # -- NOTE: not empty --,
+ # req-body [4] KDC-REQ-BODY
+ # }
+ #
+ # KDC-REQ-BODY ::= SEQUENCE {
+ # kdc-options [0] KDCOptions,
+ # cname [1] PrincipalName OPTIONAL
+ # -- Used only in AS-REQ --,
+ # realm [2] Realm
+ # -- Server's realm
+ # -- Also client's in AS-REQ --,
+ # sname [3] PrincipalName OPTIONAL,
+ # from [4] KerberosTime OPTIONAL,
+ # till [5] KerberosTime,
+ # rtime [6] KerberosTime OPTIONAL,
+ # nonce [7] UInt32,
+ # etype [8] SEQUENCE OF Int32
+ # -- EncryptionType
+ # -- in preference order --,
+ # addresses [9] HostAddresses OPTIONAL,
+ # enc-authorization-data [10] EncryptedData OPTIONAL
+ # -- AuthorizationData --,
+ # additional-tickets [11] SEQUENCE OF Ticket OPTIONAL
+ # -- NOTE: not empty
+ # }
+
+ if authenticator_subkey is not None:
+ EncAuthorizationData_usage = KU_TGS_REQ_AUTH_DAT_SUBKEY
+ else:
+ EncAuthorizationData_usage = KU_TGS_REQ_AUTH_DAT_SESSION
+
+ req_body = self.KDC_REQ_BODY_create(
+ kdc_options=kdc_options,
+ cname=None,
+ realm=realm,
+ sname=sname,
+ from_time=from_time,
+ till_time=till_time,
+ renew_time=renew_time,
+ nonce=nonce,
+ etypes=etypes,
+ addresses=addresses,
+ additional_tickets=additional_tickets,
+ EncAuthorizationData=EncAuthorizationData,
+ EncAuthorizationData_key=EncAuthorizationData_key,
+ EncAuthorizationData_usage=EncAuthorizationData_usage)
+ req_body_blob = self.der_encode(req_body,
+ asn1Spec=krb5_asn1.KDC_REQ_BODY(),
+ asn1_print=asn1_print, hexdump=hexdump)
+
+ req_body_checksum = self.Checksum_create(ticket_session_key,
+ KU_TGS_REQ_AUTH_CKSUM,
+ req_body_blob,
+ ctype=body_checksum_type)
+
+ subkey_obj = None
+ if authenticator_subkey is not None:
+ subkey_obj = authenticator_subkey.export_obj()
+ seq_number = random.randint(0, 0xfffffffe)
+ authenticator = self.Authenticator_create(
+ crealm=realm,
+ cname=cname,
+ cksum=req_body_checksum,
+ cusec=cusec,
+ ctime=ctime,
+ subkey=subkey_obj,
+ seq_number=seq_number,
+ authorization_data=None)
+ authenticator = self.der_encode(
+ authenticator,
+ asn1Spec=krb5_asn1.Authenticator(),
+ asn1_print=asn1_print,
+ hexdump=hexdump)
+
+ authenticator = self.EncryptedData_create(
+ ticket_session_key, KU_TGS_REQ_AUTH, authenticator)
+
+ ap_options = krb5_asn1.APOptions('0')
+ ap_req = self.AP_REQ_create(ap_options=str(ap_options),
+ ticket=ticket,
+ authenticator=authenticator)
+ ap_req = self.der_encode(ap_req, asn1Spec=krb5_asn1.AP_REQ(),
+ asn1_print=asn1_print, hexdump=hexdump)
+ pa_tgs_req = self.PA_DATA_create(PADATA_KDC_REQ, ap_req)
+ if padata is not None:
+ padata.append(pa_tgs_req)
+ else:
+ padata = [pa_tgs_req]
+
+ obj, decoded = self.KDC_REQ_create(
+ msg_type=KRB_TGS_REQ,
+ padata=padata,
+ req_body=req_body,
+ asn1Spec=krb5_asn1.TGS_REQ(),
+ asn1_print=asn1_print,
+ hexdump=hexdump)
+ if native_decoded_only:
+ return decoded
+ return decoded, obj
+
+ def PA_S4U2Self_create(self, name, realm, tgt_session_key, ctype=None):
+ # PA-S4U2Self ::= SEQUENCE {
+ # name [0] PrincipalName,
+ # realm [1] Realm,
+ # cksum [2] Checksum,
+ # auth [3] GeneralString
+ # }
+ cksum_data = name['name-type'].to_bytes(4, byteorder='little')
+ for n in name['name-string']:
+ cksum_data += n.encode()
+ cksum_data += realm.encode()
+ cksum_data += "Kerberos".encode()
+ cksum = self.Checksum_create(tgt_session_key,
+ KU_NON_KERB_CKSUM_SALT,
+ cksum_data,
+ ctype)
+
+ PA_S4U2Self_obj = {
+ 'name': name,
+ 'realm': realm,
+ 'cksum': cksum,
+ 'auth': "Kerberos",
+ }
+ pa_s4u2self = self.der_encode(
+ PA_S4U2Self_obj, asn1Spec=krb5_asn1.PA_S4U2Self())
+ return self.PA_DATA_create(PADATA_FOR_USER, pa_s4u2self)
+
+ def ChangePasswdDataMS_create(self,
+ new_password,
+ target_princ=None,
+ target_realm=None):
+ ChangePasswdDataMS_obj = {
+ 'newpasswd': new_password,
+ }
+ if target_princ is not None:
+ ChangePasswdDataMS_obj['targname'] = target_princ
+ if target_realm is not None:
+ ChangePasswdDataMS_obj['targrealm'] = target_realm
+
+ change_password_data = self.der_encode(
+ ChangePasswdDataMS_obj, asn1Spec=krb5_asn1.ChangePasswdDataMS())
+
+ return change_password_data
+
+ def KRB_PRIV_create(self,
+ subkey,
+ user_data,
+ s_address,
+ timestamp=None,
+ usec=None,
+ seq_number=None,
+ r_address=None):
+ EncKrbPrivPart_obj = {
+ 'user-data': user_data,
+ 's-address': s_address,
+ }
+ if timestamp is not None:
+ EncKrbPrivPart_obj['timestamp'] = timestamp
+ if usec is not None:
+ EncKrbPrivPart_obj['usec'] = usec
+ if seq_number is not None:
+ EncKrbPrivPart_obj['seq-number'] = seq_number
+ if r_address is not None:
+ EncKrbPrivPart_obj['r-address'] = r_address
+
+ enc_krb_priv_part = self.der_encode(
+ EncKrbPrivPart_obj, asn1Spec=krb5_asn1.EncKrbPrivPart())
+
+ enc_data = self.EncryptedData_create(subkey,
+ KU_KRB_PRIV,
+ enc_krb_priv_part)
+
+ KRB_PRIV_obj = {
+ 'pvno': 5,
+ 'msg-type': KRB_PRIV,
+ 'enc-part': enc_data,
+ }
+
+ krb_priv = self.der_encode(
+ KRB_PRIV_obj, asn1Spec=krb5_asn1.KRB_PRIV())
+
+ return krb_priv
+
+ def ContentInfo_create(self, content_type, content):
+ content_info_obj = {
+ 'contentType': content_type,
+ 'content': content,
+ }
+
+ return content_info_obj
+
+ def EncapsulatedContentInfo_create(self, content_type, content):
+ encapsulated_content_info_obj = {
+ 'eContentType': content_type,
+ 'eContent': content,
+ }
+
+ return encapsulated_content_info_obj
+
+ def SignedData_create(self,
+ digest_algorithms,
+ encap_content_info,
+ signer_infos,
+ *,
+ version=None,
+ certificates=None,
+ crls=None):
+ def is_cert_version_present(version):
+ return certificates is not None and any(
+ version in cert for cert in certificates)
+
+ def is_crl_version_present(version):
+ return crls is not None and any(
+ version in crl for crl in crls)
+
+ def is_signer_info_version_present(version):
+ return signer_infos is not None and any(
+ signer_info['version'] == version
+ for signer_info in signer_infos)
+
+ def data_version():
+ # per RFC5652 5.1:
+ if is_cert_version_present('other') or (
+ is_crl_version_present('other')):
+ return 5
+
+ if is_cert_version_present('v2AttrCert'):
+ return 4
+
+ if is_cert_version_present('v1AttrCert') or (
+ is_signer_info_version_present(3)) or (
+ encap_content_info['eContentType'] != krb5_asn1.id_data
+ ):
+ return 3
+
+ return 1
+
+ if version is None:
+ version = data_version()
+
+ signed_data_obj = {
+ 'version': version,
+ 'digestAlgorithms': digest_algorithms,
+ 'encapContentInfo': encap_content_info,
+ 'signerInfos': signer_infos,
+ }
+
+ if certificates is not None:
+ signed_data_obj['certificates'] = certificates
+ if crls is not None:
+ signed_data_obj['crls'] = crls
+
+ return signed_data_obj
+
+ def AuthPack_create(self,
+ pk_authenticator,
+ *,
+ client_public_value=None,
+ supported_cms_types=None,
+ client_dh_nonce=None,
+ win2k_variant=False):
+ if win2k_variant:
+ self.assertIsNone(supported_cms_types)
+ self.assertIsNone(client_dh_nonce)
+
+ auth_pack_obj = {
+ 'pkAuthenticator': pk_authenticator,
+ }
+
+ if client_public_value is not None:
+ auth_pack_obj['clientPublicValue'] = client_public_value
+ if supported_cms_types is not None:
+ auth_pack_obj['supportedCMSTypes'] = supported_cms_types
+ if client_dh_nonce is not None:
+ auth_pack_obj['clientDHNonce'] = client_dh_nonce
+
+ return auth_pack_obj
+
+ def PK_AS_REQ_create(self,
+ signed_auth_pack,
+ *,
+ trusted_certifiers=None,
+ kdc_pk_id=None,
+ kdc_cert=None,
+ encryption_cert=None,
+ win2k_variant=False):
+ if win2k_variant:
+ self.assertIsNone(kdc_pk_id)
+ asn1_spec = krb5_asn1.PA_PK_AS_REQ_Win2k
+ else:
+ self.assertIsNone(kdc_cert)
+ self.assertIsNone(encryption_cert)
+ asn1_spec = krb5_asn1.PA_PK_AS_REQ
+
+ content_info_obj = self.ContentInfo_create(
+ krb5_asn1.id_signedData, signed_auth_pack)
+ content_info = self.der_encode(content_info_obj,
+ asn1Spec=krb5_asn1.ContentInfo())
+
+ pk_as_req_obj = {
+ 'signedAuthPack': content_info,
+ }
+
+ if trusted_certifiers is not None:
+ pk_as_req_obj['trustedCertifiers'] = trusted_certifiers
+ if kdc_pk_id is not None:
+ pk_as_req_obj['kdcPkId'] = kdc_pk_id
+ if kdc_cert is not None:
+ pk_as_req_obj['kdcCert'] = kdc_cert
+ if encryption_cert is not None:
+ pk_as_req_obj['encryptionCert'] = encryption_cert
+
+ return self.der_encode(pk_as_req_obj, asn1Spec=asn1_spec())
+
+ def SignerInfo_create(self,
+ signer_id,
+ digest_algorithm,
+ signature_algorithm,
+ signature,
+ *,
+ version=None,
+ signed_attrs=None,
+ unsigned_attrs=None):
+ if version is None:
+ # per RFC5652 5.3:
+ if 'issuerAndSerialNumber' in signer_id:
+ version = 1
+ elif 'subjectKeyIdentifier' in signer_id:
+ version = 3
+ else:
+ self.fail(f'unknown signer ID version ({signer_id})')
+
+ signer_info_obj = {
+ 'version': version,
+ 'sid': signer_id,
+ 'digestAlgorithm': digest_algorithm,
+ 'signatureAlgorithm': signature_algorithm,
+ 'signature': signature,
+ }
+
+ if signed_attrs is not None:
+ signer_info_obj['signedAttrs'] = signed_attrs
+ if unsigned_attrs is not None:
+ signer_info_obj['unsignedAttrs'] = unsigned_attrs
+
+ return signer_info_obj
+
+ def SignerIdentifier_create(self, *,
+ issuer_and_serial_number=None,
+ subject_key_id=None):
+ if issuer_and_serial_number is not None:
+ return {'issuerAndSerialNumber': issuer_and_serial_number}
+
+ if subject_key_id is not None:
+ return {'subjectKeyIdentifier': subject_key_id}
+
+ self.fail('identifier not specified')
+
+ def AlgorithmIdentifier_create(self,
+ algorithm,
+ *,
+ parameters=None):
+ algorithm_id_obj = {
+ 'algorithm': algorithm,
+ }
+
+ if parameters is not None:
+ algorithm_id_obj['parameters'] = parameters
+
+ return algorithm_id_obj
+
+ def SubjectPublicKeyInfo_create(self,
+ algorithm,
+ public_key):
+ return {
+ 'algorithm': algorithm,
+ 'subjectPublicKey': public_key,
+ }
+
+ def ValidationParms_create(self,
+ seed,
+ pgen_counter):
+ return {
+ 'seed': seed,
+ 'pgenCounter': pgen_counter,
+ }
+
+ def DomainParameters_create(self,
+ p,
+ g,
+ *,
+ q=None,
+ j=None,
+ validation_parms=None):
+ domain_params_obj = {
+ 'p': p,
+ 'g': g,
+ }
+
+ if q is not None:
+ domain_params_obj['q'] = q
+ if j is not None:
+ domain_params_obj['j'] = j
+ if validation_parms is not None:
+ domain_params_obj['validationParms'] = validation_parms
+
+ return domain_params_obj
+
+ def length_in_bytes(self, value):
+ """Return the length in bytes of an integer once it is encoded as
+ bytes."""
+
+ self.assertGreaterEqual(value, 0, 'value must be positive')
+ self.assertIsInstance(value, int)
+
+ length_in_bits = max(1, math.log2(value + 1))
+ length_in_bytes = math.ceil(length_in_bits / 8)
+ return length_in_bytes
+
+ def bytes_from_int(self, value, *, length=None):
+ """Return an integer encoded big-endian into bytes of an optionally
+ specified length.
+ """
+ if length is None:
+ length = self.length_in_bytes(value)
+ return value.to_bytes(length, 'big')
+
+ def int_from_bytes(self, data):
+ """Return an integer decoded from bytes in big-endian format."""
+ return int.from_bytes(data, 'big')
+
+ def int_from_bit_string(self, string):
+ """Return an integer decoded from a bitstring."""
+ return int(string, base=2)
+
+ def bit_string_from_int(self, value):
+ """Return a bitstring encoding of an integer."""
+
+ string = f'{value:b}'
+
+ # The bitstring must be padded to a multiple of 8 bits in length, or
+ # pyasn1 will interpret it incorrectly (as if the padding bits were
+ # present, but on the wrong end).
+ length = len(string)
+ padding_len = math.ceil(length / 8) * 8 - length
+ return '0' * padding_len + string
+
+ def bit_string_from_bytes(self, data):
+ """Return a bitstring encoding of bytes in big-endian format."""
+ value = self.int_from_bytes(data)
+ return self.bit_string_from_int(value)
+
+ def bytes_from_bit_string(self, string):
+ """Return big-endian format bytes encoded from a bitstring."""
+ value = self.int_from_bit_string(string)
+ length = math.ceil(len(string) / 8)
+ return value.to_bytes(length, 'big')
+
+ def asn1_length(self, data):
+ """Return the ASN.1 encoding of the length of some data."""
+
+ length = len(data)
+
+ self.assertGreater(length, 0)
+ if length < 0x80:
+ return bytes([length])
+
+ encoding_len = self.length_in_bytes(length)
+ self.assertLess(encoding_len, 0x80,
+ 'item is too long to be ASN.1 encoded')
+
+ data = self.bytes_from_int(length, length=encoding_len)
+ return bytes([0x80 | encoding_len]) + data
+
+ @staticmethod
+ def octetstring2key(x, enctype):
+ """This implements the function defined in RFC4556 3.2.3.1 “Using
+ Diffie-Hellman Key Exchange”."""
+
+ seedsize = kcrypto.seedsize(enctype)
+ seed = b''
+
+ # A counter that cycles through the bytes 0x00–0xff.
+ counter = itertools.cycle(map(lambda x: bytes([x]),
+ range(256)))
+
+ while len(seed) < seedsize:
+ digest = hashes.Hash(hashes.SHA1(), default_backend())
+ digest.update(next(counter) + x)
+ seed += digest.finalize()
+
+ key = kcrypto.random_to_key(enctype, seed[:seedsize])
+ return RodcPacEncryptionKey(key, kvno=None)
+
+ def unpad(self, data):
+ """Return unpadded data."""
+ padding_len = data[-1]
+ expected_padding = bytes([padding_len]) * padding_len
+ self.assertEqual(expected_padding, data[-padding_len:],
+ 'invalid padding bytes')
+
+ return data[:-padding_len]
+
+ def try_decode(self, data, module=None):
+ """Try to decode some data of unknown type with various known ASN.1
+ schemata (optionally restricted to those from a particular module) and
+ print any results that seem promising. For use when debugging.
+ """
+
+ if module is None:
+ # Try a couple of known ASN.1 modules.
+ self.try_decode(data, krb5_asn1)
+ self.try_decode(data, pyasn1.type.univ)
+
+ # It’s helpful to stop and give the user a chance to examine the
+ # results.
+ self.fail('decoding done')
+
+ names = dir(module)
+ for name in names:
+ item = getattr(module, name)
+ if not callable(item):
+ continue
+
+ try:
+ decoded = self.der_decode(data, asn1Spec=item())
+ except Exception:
+ # Initiating the schema or decoding the ASN.1 failed for
+ # whatever reason.
+ pass
+ else:
+ # Decoding succeeded: print the structure to be examined.
+ print(f'\t{name}')
+ pprint(decoded)
+
+ def cipher_from_algorithm(self, algorithm):
+ if algorithm == str(krb5_asn1.aes256_CBC_PAD):
+ return algorithms.AES
+
+ if algorithm == str(krb5_asn1.des_EDE3_CBC):
+ return algorithms.TripleDES
+
+ self.fail(f'unknown cipher algorithm {algorithm}')
+
+ def hash_from_algorithm(self, algorithm):
+ # Let someone pass in an ObjectIdentifier.
+ algorithm = str(algorithm)
+
+ if algorithm == str(krb5_asn1.id_sha1):
+ return hashes.SHA1
+
+ if algorithm == str(krb5_asn1.sha1WithRSAEncryption):
+ return hashes.SHA1
+
+ if algorithm == str(krb5_asn1.rsaEncryption):
+ return hashes.SHA1
+
+ if algorithm == str(krb5_asn1.id_pkcs1_sha256WithRSAEncryption):
+ return hashes.SHA256
+
+ if algorithm == str(krb5_asn1.id_sha512):
+ return hashes.SHA512
+
+ self.fail(f'unknown hash algorithm {algorithm}')
+
+ def hash_from_algorithm_id(self, algorithm_id):
+ self.assertIsInstance(algorithm_id, dict)
+
+ hash = self.hash_from_algorithm(algorithm_id['algorithm'])
+
+ parameters = algorithm_id.get('parameters')
+ if self.strict_checking:
+ self.assertIsNotNone(parameters)
+ if parameters is not None:
+ self.assertEqual(b'\x05\x00', parameters)
+
+ return hash
+
+ def create_freshness_token(self,
+ epoch=None,
+ *,
+ offset=None,
+ krbtgt_creds=None):
+ timestamp, usec = self.get_KerberosTimeWithUsec(epoch, offset)
+
+ # Encode the freshness token as PA-ENC-TS-ENC.
+ ts_enc = self.PA_ENC_TS_ENC_create(timestamp, usec)
+ ts_enc = self.der_encode(ts_enc, asn1Spec=krb5_asn1.PA_ENC_TS_ENC())
+
+ if krbtgt_creds is None:
+ krbtgt_creds = self.get_krbtgt_creds()
+ krbtgt_key = self.TicketDecryptionKey_from_creds(krbtgt_creds)
+
+ # Encrypt the freshness token.
+ freshness = self.EncryptedData_create(krbtgt_key, KU_AS_FRESHNESS, ts_enc)
+
+ freshness_token = self.der_encode(freshness,
+ asn1Spec=krb5_asn1.EncryptedData())
+
+ # Prepend a couple of zero bytes.
+ freshness_token = bytes(2) + freshness_token
+
+ return freshness_token
+
+ def kpasswd_create(self,
+ subkey,
+ user_data,
+ version,
+ seq_number,
+ ap_req,
+ local_address,
+ remote_address):
+ self.assertIsNotNone(self.s, 'call self.connect() first')
+
+ timestamp, usec = self.get_KerberosTimeWithUsec()
+
+ krb_priv = self.KRB_PRIV_create(subkey,
+ user_data,
+ s_address=local_address,
+ timestamp=timestamp,
+ usec=usec,
+ seq_number=seq_number,
+ r_address=remote_address)
+
+ size = 6 + len(ap_req) + len(krb_priv)
+ self.assertLess(size, 0x10000)
+
+ msg = bytearray()
+ msg.append(size >> 8)
+ msg.append(size & 0xff)
+ msg.append(version >> 8)
+ msg.append(version & 0xff)
+ msg.append(len(ap_req) >> 8)
+ msg.append(len(ap_req) & 0xff)
+ # Note: for sets, there could be a little-endian four-byte length here.
+
+ msg.extend(ap_req)
+ msg.extend(krb_priv)
+
+ return msg
+
+ def get_enc_part(self, obj, key, usage):
+ self.assertElementEqual(obj, 'pvno', 5)
+
+ enc_part = obj['enc-part']
+ self.assertElementEqual(enc_part, 'etype', key.etype)
+ self.assertElementKVNO(enc_part, 'kvno', key.kvno)
+
+ enc_part = key.decrypt(usage, enc_part['cipher'])
+
+ return enc_part
+
+ def kpasswd_exchange(self,
+ ticket,
+ new_password,
+ expected_code,
+ expected_msg,
+ mode,
+ target_princ=None,
+ target_realm=None,
+ ap_options=None,
+ send_seq_number=True):
+ if mode is self.KpasswdMode.SET:
+ version = 0xff80
+ user_data = self.ChangePasswdDataMS_create(new_password,
+ target_princ,
+ target_realm)
+ elif mode is self.KpasswdMode.CHANGE:
+ self.assertIsNone(target_princ,
+ 'target_princ only valid for pw set')
+ self.assertIsNone(target_realm,
+ 'target_realm only valid for pw set')
+
+ version = 1
+ user_data = new_password.encode('utf-8')
+ else:
+ self.fail(f'invalid mode {mode}')
+
+ subkey = self.RandomKey(kcrypto.Enctype.AES256)
+
+ if ap_options is None:
+ ap_options = '0'
+ ap_options = str(krb5_asn1.APOptions(ap_options))
+
+ kdc_exchange_dict = {
+ 'tgt': ticket,
+ 'authenticator_subkey': subkey,
+ 'auth_data': None,
+ 'ap_options': ap_options,
+ }
+
+ if send_seq_number:
+ seq_number = random.randint(0, 0xfffffffe)
+ else:
+ seq_number = None
+
+ ap_req = self.generate_ap_req(kdc_exchange_dict,
+ None,
+ req_body=None,
+ armor=False,
+ usage=KU_AP_REQ_AUTH,
+ seq_number=seq_number)
+
+ self.connect(self.host, port=464)
+ self.assertIsNotNone(self.s)
+
+ family = self.s.family
+
+ if family == socket.AF_INET:
+ addr_type = 2 # IPv4
+ elif family == socket.AF_INET6:
+ addr_type = 24 # IPv6
+ else:
+ self.fail(f'unknown family {family}')
+
+ def create_address(ip):
+ return {
+ 'addr-type': addr_type,
+ 'address': socket.inet_pton(family, ip),
+ }
+
+ local_ip = self.s.getsockname()[0]
+ local_address = create_address(local_ip)
+
+ # remote_ip = self.s.getpeername()[0]
+ # remote_address = create_address(remote_ip)
+
+ # TODO: due to a bug (?), MIT Kerberos will not accept the request
+ # unless r-address is set to our _local_ address. Heimdal, on the other
+ # hand, requires the r-address is set to the remote address (as
+ # expected). To avoid problems, avoid sending r-address for now.
+ remote_address = None
+
+ msg = self.kpasswd_create(subkey,
+ user_data,
+ version,
+ seq_number,
+ ap_req,
+ local_address,
+ remote_address)
+
+ self.send_msg(msg)
+ rep_pdu = self.recv_pdu_raw()
+
+ self._disconnect('transaction done')
+
+ self.assertIsNotNone(rep_pdu)
+
+ header = rep_pdu[:6]
+ reply = rep_pdu[6:]
+
+ reply_len = (header[0] << 8) | header[1]
+ reply_version = (header[2] << 8) | header[3]
+ ap_rep_len = (header[4] << 8) | header[5]
+
+ self.assertEqual(reply_len, len(rep_pdu))
+ self.assertEqual(1, reply_version) # KRB5_KPASSWD_VERS_CHANGEPW
+ self.assertLess(ap_rep_len, reply_len)
+
+ self.assertNotEqual(0x7e, rep_pdu[1])
+ self.assertNotEqual(0x5e, rep_pdu[1])
+
+ if ap_rep_len:
+ # We received an AP-REQ and KRB-PRIV as a response. This may or may
+ # not indicate an error, depending on the status code.
+ ap_rep = reply[:ap_rep_len]
+ krb_priv = reply[ap_rep_len:]
+
+ key = ticket.session_key
+
+ ap_rep = self.der_decode(ap_rep, asn1Spec=krb5_asn1.AP_REP())
+ self.assertElementEqual(ap_rep, 'msg-type', KRB_AP_REP)
+ enc_part = self.get_enc_part(ap_rep, key, KU_AP_REQ_ENC_PART)
+ enc_part = self.der_decode(
+ enc_part, asn1Spec=krb5_asn1.EncAPRepPart())
+
+ self.assertElementPresent(enc_part, 'ctime')
+ self.assertElementPresent(enc_part, 'cusec')
+ # self.assertElementMissing(enc_part, 'subkey') # TODO
+ # self.assertElementPresent(enc_part, 'seq-number') # TODO
+
+ try:
+ krb_priv = self.der_decode(krb_priv, asn1Spec=krb5_asn1.KRB_PRIV())
+ except PyAsn1Error:
+ self.fail()
+
+ self.assertElementEqual(krb_priv, 'msg-type', KRB_PRIV)
+ priv_enc_part = self.get_enc_part(krb_priv, subkey, KU_KRB_PRIV)
+ priv_enc_part = self.der_decode(
+ priv_enc_part, asn1Spec=krb5_asn1.EncKrbPrivPart())
+
+ self.assertElementMissing(priv_enc_part, 'timestamp')
+ self.assertElementMissing(priv_enc_part, 'usec')
+ # self.assertElementPresent(priv_enc_part, 'seq-number') # TODO
+ # self.assertElementEqual(priv_enc_part, 's-address', remote_address) # TODO
+ # self.assertElementMissing(priv_enc_part, 'r-address') # TODO
+
+ result_data = priv_enc_part['user-data']
+ else:
+ # We received a KRB-ERROR as a response, indicating an error.
+ krb_error = self.der_decode(reply, asn1Spec=krb5_asn1.KRB_ERROR())
+
+ sname = self.PrincipalName_create(
+ name_type=NT_PRINCIPAL,
+ names=['kadmin', 'changepw'])
+ realm = self.get_krbtgt_creds().get_realm().upper()
+
+ self.assertElementEqual(krb_error, 'pvno', 5)
+ self.assertElementEqual(krb_error, 'msg-type', KRB_ERROR)
+ self.assertElementMissing(krb_error, 'ctime')
+ self.assertElementMissing(krb_error, 'usec')
+ self.assertElementPresent(krb_error, 'stime')
+ self.assertElementPresent(krb_error, 'susec')
+
+ error_code = krb_error['error-code']
+ if isinstance(expected_code, int):
+ self.assertEqual(error_code, expected_code)
+ else:
+ self.assertIn(error_code, expected_code)
+
+ self.assertElementMissing(krb_error, 'crealm')
+ self.assertElementMissing(krb_error, 'cname')
+ self.assertElementEqual(krb_error, 'realm', realm.encode('utf-8'))
+ self.assertElementEqualPrincipal(krb_error, 'sname', sname)
+ self.assertElementMissing(krb_error, 'e-text')
+
+ result_data = krb_error['e-data']
+
+ status = result_data[:2]
+ message = result_data[2:]
+
+ status_code = (status[0] << 8) | status[1]
+ if isinstance(expected_code, int):
+ self.assertEqual(status_code, expected_code)
+ else:
+ self.assertIn(status_code, expected_code)
+
+ if not message:
+ self.assertEqual(0, status_code,
+ 'got an error result, but no message')
+ return
+
+ # Check the first character of the message.
+ if message[0]:
+ if isinstance(expected_msg, bytes):
+ self.assertEqual(message, expected_msg)
+ else:
+ self.assertIn(message, expected_msg)
+ else:
+ # We got AD password policy information.
+ self.assertEqual(30, len(message))
+
+ (empty_bytes,
+ min_length,
+ history_length,
+ properties,
+ expire_time,
+ min_age) = struct.unpack('>HIIIQQ', message)
+
+ def _generic_kdc_exchange(self,
+ kdc_exchange_dict, # required
+ cname=None, # optional
+ realm=None, # required
+ sname=None, # optional
+ from_time=None, # optional
+ till_time=None, # required
+ renew_time=None, # optional
+ etypes=None, # required
+ addresses=None, # optional
+ additional_tickets=None, # optional
+ EncAuthorizationData=None, # optional
+ EncAuthorizationData_key=None, # optional
+ EncAuthorizationData_usage=None): # optional
+
+ check_error_fn = kdc_exchange_dict['check_error_fn']
+ check_rep_fn = kdc_exchange_dict['check_rep_fn']
+ generate_fast_fn = kdc_exchange_dict['generate_fast_fn']
+ generate_fast_armor_fn = kdc_exchange_dict['generate_fast_armor_fn']
+ generate_fast_padata_fn = kdc_exchange_dict['generate_fast_padata_fn']
+ generate_padata_fn = kdc_exchange_dict['generate_padata_fn']
+ callback_dict = kdc_exchange_dict['callback_dict']
+ req_msg_type = kdc_exchange_dict['req_msg_type']
+ req_asn1Spec = kdc_exchange_dict['req_asn1Spec']
+ rep_msg_type = kdc_exchange_dict['rep_msg_type']
+
+ expected_error_mode = kdc_exchange_dict['expected_error_mode']
+ kdc_options = kdc_exchange_dict['kdc_options']
+
+ pac_request = kdc_exchange_dict['pac_request']
+ pac_options = kdc_exchange_dict['pac_options']
+
+ # Parameters specific to the inner request body
+ inner_req = kdc_exchange_dict['inner_req']
+
+ # Parameters specific to the outer request body
+ outer_req = kdc_exchange_dict['outer_req']
+
+ if till_time is None:
+ till_time = self.get_KerberosTime(offset=36000)
+
+ if 'nonce' in kdc_exchange_dict:
+ nonce = kdc_exchange_dict['nonce']
+ else:
+ nonce = self.get_Nonce()
+ kdc_exchange_dict['nonce'] = nonce
+
+ req_body = self.KDC_REQ_BODY_create(
+ kdc_options=kdc_options,
+ cname=cname,
+ realm=realm,
+ sname=sname,
+ from_time=from_time,
+ till_time=till_time,
+ renew_time=renew_time,
+ nonce=nonce,
+ etypes=etypes,
+ addresses=addresses,
+ additional_tickets=additional_tickets,
+ EncAuthorizationData=EncAuthorizationData,
+ EncAuthorizationData_key=EncAuthorizationData_key,
+ EncAuthorizationData_usage=EncAuthorizationData_usage)
+
+ inner_req_body = dict(req_body)
+ if inner_req is not None:
+ for key, value in inner_req.items():
+ if value is not None:
+ inner_req_body[key] = value
+ else:
+ del inner_req_body[key]
+ if outer_req is not None:
+ for key, value in outer_req.items():
+ if value is not None:
+ req_body[key] = value
+ else:
+ del req_body[key]
+
+ additional_padata = []
+ if pac_request is not None:
+ pa_pac_request = self.KERB_PA_PAC_REQUEST_create(pac_request)
+ additional_padata.append(pa_pac_request)
+ if pac_options is not None:
+ pa_pac_options = self.get_pa_pac_options(pac_options)
+ additional_padata.append(pa_pac_options)
+
+ if req_msg_type == KRB_AS_REQ:
+ tgs_req = None
+ tgs_req_padata = None
+ else:
+ self.assertEqual(KRB_TGS_REQ, req_msg_type)
+
+ tgs_req = self.generate_ap_req(kdc_exchange_dict,
+ callback_dict,
+ req_body,
+ armor=False)
+ tgs_req_padata = self.PA_DATA_create(PADATA_KDC_REQ, tgs_req)
+
+ if generate_fast_padata_fn is not None:
+ self.assertIsNotNone(generate_fast_fn)
+ # This can alter req_body...
+ fast_padata, req_body = generate_fast_padata_fn(kdc_exchange_dict,
+ callback_dict,
+ req_body)
+ else:
+ fast_padata = []
+
+ if generate_fast_armor_fn is not None:
+ self.assertIsNotNone(generate_fast_fn)
+ fast_ap_req = generate_fast_armor_fn(kdc_exchange_dict,
+ callback_dict,
+ None,
+ armor=True)
+
+ fast_armor_type = kdc_exchange_dict['fast_armor_type']
+ fast_armor = self.KRB_FAST_ARMOR_create(fast_armor_type,
+ fast_ap_req)
+ else:
+ fast_armor = None
+
+ if generate_padata_fn is not None:
+ # This can alter req_body...
+ outer_padata, req_body = generate_padata_fn(kdc_exchange_dict,
+ callback_dict,
+ req_body)
+ self.assertIsNotNone(outer_padata)
+ self.assertNotIn(PADATA_KDC_REQ,
+ [pa['padata-type'] for pa in outer_padata],
+ 'Don\'t create TGS-REQ manually')
+ else:
+ outer_padata = None
+
+ if generate_fast_fn is not None:
+ armor_key = kdc_exchange_dict['armor_key']
+ self.assertIsNotNone(armor_key)
+
+ if req_msg_type == KRB_AS_REQ:
+ checksum_blob = self.der_encode(
+ req_body,
+ asn1Spec=krb5_asn1.KDC_REQ_BODY())
+ else:
+ self.assertEqual(KRB_TGS_REQ, req_msg_type)
+ checksum_blob = tgs_req
+
+ checksum = self.Checksum_create(armor_key,
+ KU_FAST_REQ_CHKSUM,
+ checksum_blob)
+
+ fast_padata += additional_padata
+ fast = generate_fast_fn(kdc_exchange_dict,
+ callback_dict,
+ inner_req_body,
+ fast_padata,
+ fast_armor,
+ checksum)
+ else:
+ fast = None
+
+ padata = []
+
+ if tgs_req_padata is not None:
+ padata.append(tgs_req_padata)
+
+ if fast is not None:
+ padata.append(fast)
+
+ if outer_padata is not None:
+ padata += outer_padata
+
+ if fast is None:
+ padata += additional_padata
+
+ if not padata:
+ padata = None
+
+ kdc_exchange_dict['req_padata'] = padata
+ kdc_exchange_dict['fast_padata'] = fast_padata
+ kdc_exchange_dict['req_body'] = inner_req_body
+
+ req_obj, req_decoded = self.KDC_REQ_create(msg_type=req_msg_type,
+ padata=padata,
+ req_body=req_body,
+ asn1Spec=req_asn1Spec())
+
+ kdc_exchange_dict['req_obj'] = req_obj
+
+ to_rodc = kdc_exchange_dict['to_rodc']
+
+ rep = self.send_recv_transaction(req_decoded, to_rodc=to_rodc)
+ self.assertIsNotNone(rep)
+
+ msg_type = self.getElementValue(rep, 'msg-type')
+ self.assertIsNotNone(msg_type)
+
+ expected_msg_type = None
+ if check_error_fn is not None:
+ expected_msg_type = KRB_ERROR
+ self.assertIsNone(check_rep_fn)
+ self.assertNotEqual(0, len(expected_error_mode))
+ self.assertNotIn(0, expected_error_mode)
+ if check_rep_fn is not None:
+ expected_msg_type = rep_msg_type
+ self.assertIsNone(check_error_fn)
+ self.assertEqual(0, len(expected_error_mode))
+ self.assertIsNotNone(expected_msg_type)
+ if msg_type == KRB_ERROR:
+ error_code = self.getElementValue(rep, 'error-code')
+ fail_msg = f'Got unexpected error: {error_code}'
+ else:
+ fail_msg = f'Expected to fail with error: {expected_error_mode}'
+ self.assertEqual(msg_type, expected_msg_type, fail_msg)
+
+ if msg_type == KRB_ERROR:
+ return check_error_fn(kdc_exchange_dict,
+ callback_dict,
+ rep)
+
+ return check_rep_fn(kdc_exchange_dict, callback_dict, rep)
+
+ def as_exchange_dict(self,
+ creds=None,
+ client_cert=None,
+ expected_crealm=None,
+ expected_cname=None,
+ expected_anon=False,
+ expected_srealm=None,
+ expected_sname=None,
+ expected_account_name=None,
+ expected_groups=None,
+ unexpected_groups=None,
+ expected_upn_name=None,
+ expected_sid=None,
+ expected_requester_sid=None,
+ expected_domain_sid=None,
+ expected_device_domain_sid=None,
+ expected_supported_etypes=None,
+ expected_flags=None,
+ unexpected_flags=None,
+ ticket_decryption_key=None,
+ expect_ticket_checksum=None,
+ expect_full_checksum=None,
+ generate_fast_fn=None,
+ generate_fast_armor_fn=None,
+ generate_fast_padata_fn=None,
+ fast_armor_type=FX_FAST_ARMOR_AP_REQUEST,
+ generate_padata_fn=None,
+ check_error_fn=None,
+ check_rep_fn=None,
+ check_kdc_private_fn=None,
+ check_patypes=True,
+ callback_dict=None,
+ expected_error_mode=0,
+ expect_status=None,
+ expected_status=None,
+ expected_salt=None,
+ authenticator_subkey=None,
+ preauth_key=None,
+ armor_key=None,
+ armor_tgt=None,
+ armor_subkey=None,
+ auth_data=None,
+ kdc_options='',
+ inner_req=None,
+ outer_req=None,
+ pac_request=None,
+ pac_options=None,
+ ap_options=None,
+ fast_ap_options=None,
+ strict_edata_checking=True,
+ using_pkinit=PkInit.NOT_USED,
+ pk_nonce=None,
+ expect_edata=None,
+ expect_pac=True,
+ expect_client_claims=None,
+ expect_device_info=None,
+ expect_device_claims=None,
+ expect_upn_dns_info_ex=None,
+ expect_pac_attrs=None,
+ expect_pac_attrs_pac_request=None,
+ expect_requester_sid=None,
+ rc4_support=True,
+ expected_client_claims=None,
+ unexpected_client_claims=None,
+ expected_device_claims=None,
+ unexpected_device_claims=None,
+ expect_resource_groups_flag=None,
+ expected_device_groups=None,
+ expected_extra_pac_buffers=None,
+ to_rodc=False):
+ if expected_error_mode == 0:
+ expected_error_mode = ()
+ elif not isinstance(expected_error_mode, collections.abc.Container):
+ expected_error_mode = (expected_error_mode,)
+
+ kdc_exchange_dict = {
+ 'req_msg_type': KRB_AS_REQ,
+ 'req_asn1Spec': krb5_asn1.AS_REQ,
+ 'rep_msg_type': KRB_AS_REP,
+ 'rep_asn1Spec': krb5_asn1.AS_REP,
+ 'rep_encpart_asn1Spec': krb5_asn1.EncASRepPart,
+ 'creds': creds,
+ 'client_cert': client_cert,
+ 'expected_crealm': expected_crealm,
+ 'expected_cname': expected_cname,
+ 'expected_anon': expected_anon,
+ 'expected_srealm': expected_srealm,
+ 'expected_sname': expected_sname,
+ 'expected_account_name': expected_account_name,
+ 'expected_groups': expected_groups,
+ 'unexpected_groups': unexpected_groups,
+ 'expected_upn_name': expected_upn_name,
+ 'expected_sid': expected_sid,
+ 'expected_requester_sid': expected_requester_sid,
+ 'expected_domain_sid': expected_domain_sid,
+ 'expected_device_domain_sid': expected_device_domain_sid,
+ 'expected_supported_etypes': expected_supported_etypes,
+ 'expected_flags': expected_flags,
+ 'unexpected_flags': unexpected_flags,
+ 'ticket_decryption_key': ticket_decryption_key,
+ 'expect_ticket_checksum': expect_ticket_checksum,
+ 'expect_full_checksum': expect_full_checksum,
+ 'generate_fast_fn': generate_fast_fn,
+ 'generate_fast_armor_fn': generate_fast_armor_fn,
+ 'generate_fast_padata_fn': generate_fast_padata_fn,
+ 'fast_armor_type': fast_armor_type,
+ 'generate_padata_fn': generate_padata_fn,
+ 'check_error_fn': check_error_fn,
+ 'check_rep_fn': check_rep_fn,
+ 'check_kdc_private_fn': check_kdc_private_fn,
+ 'check_patypes': check_patypes,
+ 'callback_dict': callback_dict,
+ 'expected_error_mode': expected_error_mode,
+ 'expect_status': expect_status,
+ 'expected_status': expected_status,
+ 'expected_salt': expected_salt,
+ 'authenticator_subkey': authenticator_subkey,
+ 'preauth_key': preauth_key,
+ 'armor_key': armor_key,
+ 'armor_tgt': armor_tgt,
+ 'armor_subkey': armor_subkey,
+ 'auth_data': auth_data,
+ 'kdc_options': kdc_options,
+ 'inner_req': inner_req,
+ 'outer_req': outer_req,
+ 'pac_request': pac_request,
+ 'pac_options': pac_options,
+ 'ap_options': ap_options,
+ 'fast_ap_options': fast_ap_options,
+ 'strict_edata_checking': strict_edata_checking,
+ 'using_pkinit': using_pkinit,
+ 'pk_nonce': pk_nonce,
+ 'expect_edata': expect_edata,
+ 'expect_pac': expect_pac,
+ 'expect_client_claims': expect_client_claims,
+ 'expect_device_info': expect_device_info,
+ 'expect_device_claims': expect_device_claims,
+ 'expect_upn_dns_info_ex': expect_upn_dns_info_ex,
+ 'expect_pac_attrs': expect_pac_attrs,
+ 'expect_pac_attrs_pac_request': expect_pac_attrs_pac_request,
+ 'expect_requester_sid': expect_requester_sid,
+ 'rc4_support': rc4_support,
+ 'expected_client_claims': expected_client_claims,
+ 'unexpected_client_claims': unexpected_client_claims,
+ 'expected_device_claims': expected_device_claims,
+ 'unexpected_device_claims': unexpected_device_claims,
+ 'expect_resource_groups_flag': expect_resource_groups_flag,
+ 'expected_device_groups': expected_device_groups,
+ 'expected_extra_pac_buffers': expected_extra_pac_buffers,
+ 'to_rodc': to_rodc
+ }
+ if callback_dict is None:
+ callback_dict = {}
+
+ return kdc_exchange_dict
+
+ def tgs_exchange_dict(self,
+ creds=None,
+ expected_crealm=None,
+ expected_cname=None,
+ expected_anon=False,
+ expected_srealm=None,
+ expected_sname=None,
+ expected_account_name=None,
+ expected_groups=None,
+ unexpected_groups=None,
+ expected_upn_name=None,
+ expected_sid=None,
+ expected_requester_sid=None,
+ expected_domain_sid=None,
+ expected_device_domain_sid=None,
+ expected_supported_etypes=None,
+ expected_flags=None,
+ unexpected_flags=None,
+ ticket_decryption_key=None,
+ expect_ticket_checksum=None,
+ expect_full_checksum=None,
+ generate_fast_fn=None,
+ generate_fast_armor_fn=None,
+ generate_fast_padata_fn=None,
+ fast_armor_type=FX_FAST_ARMOR_AP_REQUEST,
+ generate_padata_fn=None,
+ check_error_fn=None,
+ check_rep_fn=None,
+ check_kdc_private_fn=None,
+ check_patypes=True,
+ expected_error_mode=0,
+ expect_status=None,
+ expected_status=None,
+ callback_dict=None,
+ tgt=None,
+ armor_key=None,
+ armor_tgt=None,
+ armor_subkey=None,
+ authenticator_subkey=None,
+ auth_data=None,
+ body_checksum_type=None,
+ kdc_options='',
+ inner_req=None,
+ outer_req=None,
+ pac_request=None,
+ pac_options=None,
+ ap_options=None,
+ fast_ap_options=None,
+ strict_edata_checking=True,
+ expect_edata=None,
+ expect_pac=True,
+ expect_client_claims=None,
+ expect_device_info=None,
+ expect_device_claims=None,
+ expect_upn_dns_info_ex=None,
+ expect_pac_attrs=None,
+ expect_pac_attrs_pac_request=None,
+ expect_requester_sid=None,
+ expected_proxy_target=None,
+ expected_transited_services=None,
+ rc4_support=True,
+ expected_client_claims=None,
+ unexpected_client_claims=None,
+ expected_device_claims=None,
+ unexpected_device_claims=None,
+ expect_resource_groups_flag=None,
+ expected_device_groups=None,
+ expected_extra_pac_buffers=None,
+ to_rodc=False):
+ if expected_error_mode == 0:
+ expected_error_mode = ()
+ elif not isinstance(expected_error_mode, collections.abc.Container):
+ expected_error_mode = (expected_error_mode,)
+
+ kdc_exchange_dict = {
+ 'req_msg_type': KRB_TGS_REQ,
+ 'req_asn1Spec': krb5_asn1.TGS_REQ,
+ 'rep_msg_type': KRB_TGS_REP,
+ 'rep_asn1Spec': krb5_asn1.TGS_REP,
+ 'rep_encpart_asn1Spec': krb5_asn1.EncTGSRepPart,
+ 'creds': creds,
+ 'expected_crealm': expected_crealm,
+ 'expected_cname': expected_cname,
+ 'expected_anon': expected_anon,
+ 'expected_srealm': expected_srealm,
+ 'expected_sname': expected_sname,
+ 'expected_account_name': expected_account_name,
+ 'expected_groups': expected_groups,
+ 'unexpected_groups': unexpected_groups,
+ 'expected_upn_name': expected_upn_name,
+ 'expected_sid': expected_sid,
+ 'expected_requester_sid': expected_requester_sid,
+ 'expected_domain_sid': expected_domain_sid,
+ 'expected_device_domain_sid': expected_device_domain_sid,
+ 'expected_supported_etypes': expected_supported_etypes,
+ 'expected_flags': expected_flags,
+ 'unexpected_flags': unexpected_flags,
+ 'ticket_decryption_key': ticket_decryption_key,
+ 'expect_ticket_checksum': expect_ticket_checksum,
+ 'expect_full_checksum': expect_full_checksum,
+ 'generate_fast_fn': generate_fast_fn,
+ 'generate_fast_armor_fn': generate_fast_armor_fn,
+ 'generate_fast_padata_fn': generate_fast_padata_fn,
+ 'fast_armor_type': fast_armor_type,
+ 'generate_padata_fn': generate_padata_fn,
+ 'check_error_fn': check_error_fn,
+ 'check_rep_fn': check_rep_fn,
+ 'check_kdc_private_fn': check_kdc_private_fn,
+ 'check_patypes': check_patypes,
+ 'callback_dict': callback_dict,
+ 'expected_error_mode': expected_error_mode,
+ 'expect_status': expect_status,
+ 'expected_status': expected_status,
+ 'tgt': tgt,
+ 'body_checksum_type': body_checksum_type,
+ 'armor_key': armor_key,
+ 'armor_tgt': armor_tgt,
+ 'armor_subkey': armor_subkey,
+ 'auth_data': auth_data,
+ 'authenticator_subkey': authenticator_subkey,
+ 'kdc_options': kdc_options,
+ 'inner_req': inner_req,
+ 'outer_req': outer_req,
+ 'pac_request': pac_request,
+ 'pac_options': pac_options,
+ 'ap_options': ap_options,
+ 'fast_ap_options': fast_ap_options,
+ 'strict_edata_checking': strict_edata_checking,
+ 'expect_edata': expect_edata,
+ 'expect_pac': expect_pac,
+ 'expect_client_claims': expect_client_claims,
+ 'expect_device_info': expect_device_info,
+ 'expect_device_claims': expect_device_claims,
+ 'expect_upn_dns_info_ex': expect_upn_dns_info_ex,
+ 'expect_pac_attrs': expect_pac_attrs,
+ 'expect_pac_attrs_pac_request': expect_pac_attrs_pac_request,
+ 'expect_requester_sid': expect_requester_sid,
+ 'expected_proxy_target': expected_proxy_target,
+ 'expected_transited_services': expected_transited_services,
+ 'rc4_support': rc4_support,
+ 'expected_client_claims': expected_client_claims,
+ 'unexpected_client_claims': unexpected_client_claims,
+ 'expected_device_claims': expected_device_claims,
+ 'unexpected_device_claims': unexpected_device_claims,
+ 'expect_resource_groups_flag': expect_resource_groups_flag,
+ 'expected_device_groups': expected_device_groups,
+ 'expected_extra_pac_buffers': expected_extra_pac_buffers,
+ 'to_rodc': to_rodc
+ }
+ if callback_dict is None:
+ callback_dict = {}
+
+ return kdc_exchange_dict
+
+ def generic_check_kdc_rep(self,
+ kdc_exchange_dict,
+ callback_dict,
+ rep):
+
+ expected_crealm = kdc_exchange_dict['expected_crealm']
+ expected_anon = kdc_exchange_dict['expected_anon']
+ expected_srealm = kdc_exchange_dict['expected_srealm']
+ expected_sname = kdc_exchange_dict['expected_sname']
+ ticket_decryption_key = kdc_exchange_dict['ticket_decryption_key']
+ check_kdc_private_fn = kdc_exchange_dict['check_kdc_private_fn']
+ rep_encpart_asn1Spec = kdc_exchange_dict['rep_encpart_asn1Spec']
+ msg_type = kdc_exchange_dict['rep_msg_type']
+ armor_key = kdc_exchange_dict['armor_key']
+
+ self.assertElementEqual(rep, 'msg-type', msg_type) # AS-REP | TGS-REP
+ padata = self.getElementValue(rep, 'padata')
+ if self.strict_checking:
+ self.assertElementEqualUTF8(rep, 'crealm', expected_crealm)
+ if self.cname_checking:
+ if expected_anon:
+ expected_cname = self.PrincipalName_create(
+ name_type=NT_WELLKNOWN,
+ names=['WELLKNOWN', 'ANONYMOUS'])
+ else:
+ expected_cname = kdc_exchange_dict['expected_cname']
+ self.assertElementEqualPrincipal(rep, 'cname', expected_cname)
+ self.assertElementPresent(rep, 'ticket')
+ ticket = self.getElementValue(rep, 'ticket')
+ ticket_encpart = None
+ ticket_cipher = None
+ self.assertIsNotNone(ticket)
+ if ticket is not None: # Never None, but gives indentation
+ self.assertElementEqual(ticket, 'tkt-vno', 5)
+ self.assertElementEqualUTF8(ticket, 'realm', expected_srealm)
+ self.assertElementEqualPrincipal(ticket, 'sname', expected_sname)
+ self.assertElementPresent(ticket, 'enc-part')
+ ticket_encpart = self.getElementValue(ticket, 'enc-part')
+ self.assertIsNotNone(ticket_encpart)
+ if ticket_encpart is not None: # Never None, but gives indentation
+ self.assertElementPresent(ticket_encpart, 'etype')
+
+ kdc_options = kdc_exchange_dict['kdc_options']
+ pos = len(tuple(krb5_asn1.KDCOptions('enc-tkt-in-skey'))) - 1
+ expect_kvno = (pos >= len(kdc_options)
+ or kdc_options[pos] != '1')
+ if expect_kvno:
+ # 'unspecified' means present, with any value != 0
+ self.assertElementKVNO(ticket_encpart, 'kvno',
+ self.unspecified_kvno)
+ else:
+ # For user-to-user, don't expect a kvno.
+ self.assertElementMissing(ticket_encpart, 'kvno')
+
+ self.assertElementPresent(ticket_encpart, 'cipher')
+ ticket_cipher = self.getElementValue(ticket_encpart, 'cipher')
+ self.assertElementPresent(rep, 'enc-part')
+ encpart = self.getElementValue(rep, 'enc-part')
+ encpart_cipher = None
+ self.assertIsNotNone(encpart)
+ if encpart is not None: # Never None, but gives indentation
+ self.assertElementPresent(encpart, 'etype')
+ self.assertElementKVNO(ticket_encpart, 'kvno', 'autodetect')
+ self.assertElementPresent(encpart, 'cipher')
+ encpart_cipher = self.getElementValue(encpart, 'cipher')
+
+ if self.padata_checking:
+ self.check_reply_padata(kdc_exchange_dict,
+ callback_dict,
+ encpart,
+ padata)
+
+ ticket_checksum = None
+
+ # Get the decryption key for the encrypted part
+ encpart_decryption_key, encpart_decryption_usage = (
+ self.get_preauth_key(kdc_exchange_dict))
+
+ pa_dict = self.get_pa_dict(padata)
+
+ pk_as_rep = pa_dict.get(PADATA_PK_AS_REP)
+ if pk_as_rep is not None:
+ pk_as_rep_asn1_spec = krb5_asn1.PA_PK_AS_REP
+ reply_key_pack_asn1_spec = krb5_asn1.ReplyKeyPack
+ pk_win2k = False
+ else:
+ pk_as_rep = pa_dict.get(PADATA_PK_AS_REP_19)
+ pk_as_rep_asn1_spec = krb5_asn1.PA_PK_AS_REP_Win2k
+ reply_key_pack_asn1_spec = krb5_asn1.ReplyKeyPack_Win2k
+ pk_win2k = True
+ if pk_as_rep is not None:
+ pk_as_rep = self.der_decode(pk_as_rep,
+ asn1Spec=pk_as_rep_asn1_spec())
+
+ using_pkinit = kdc_exchange_dict['using_pkinit']
+ if using_pkinit is PkInit.PUBLIC_KEY:
+ content_info = self.der_decode(
+ pk_as_rep['encKeyPack'],
+ asn1Spec=krb5_asn1.ContentInfo())
+ self.assertEqual(str(krb5_asn1.id_envelopedData),
+ content_info['contentType'])
+
+ content = self.der_decode(content_info['content'],
+ asn1Spec=krb5_asn1.EnvelopedData())
+
+ self.assertEqual(0, content['version'])
+ originator_info = content['originatorInfo']
+ self.assertFalse(originator_info.get('certs'))
+ self.assertFalse(originator_info.get('crls'))
+ self.assertFalse(content.get('unprotectedAttrs'))
+
+ encrypted_content_info = content['encryptedContentInfo']
+ recipient_infos = content['recipientInfos']
+
+ self.assertEqual(1, len(recipient_infos))
+ ktri = recipient_infos[0]['ktri']
+
+ if self.strict_checking:
+ self.assertEqual(0, ktri['version'])
+
+ private_key = encpart_decryption_key
+ self.assertIsInstance(private_key,
+ asymmetric.rsa.RSAPrivateKey)
+
+ client_subject_key_id = (
+ x509.SubjectKeyIdentifier.from_public_key(
+ private_key.public_key()))
+
+ # Check that the client certificate is named as the recipient.
+ ktri_rid = ktri['rid']
+ try:
+ issuer_and_serial_number = ktri_rid[
+ 'issuerAndSerialNumber']
+ except KeyError:
+ subject_key_id = ktri_rid['subjectKeyIdentifier']
+ self.assertEqual(subject_key_id,
+ client_subject_key_id.digest)
+ else:
+ client_certificate = kdc_exchange_dict['client_cert']
+
+ self.assertIsNotNone(issuer_and_serial_number['issuer'])
+ self.assertEqual(issuer_and_serial_number['serialNumber'],
+ client_certificate.serial_number)
+
+ key_encryption_algorithm = ktri['keyEncryptionAlgorithm']
+ self.assertEqual(str(krb5_asn1.rsaEncryption),
+ key_encryption_algorithm['algorithm'])
+ if self.strict_checking:
+ self.assertEqual(
+ b'\x05\x00',
+ key_encryption_algorithm.get('parameters'))
+
+ encrypted_key = ktri['encryptedKey']
+
+ # Decrypt the key.
+ pad_len = 256 - len(encrypted_key)
+ if pad_len:
+ encrypted_key = bytes(pad_len) + encrypted_key
+ decrypted_key = private_key.decrypt(
+ encrypted_key,
+ padding=asymmetric.padding.PKCS1v15())
+
+ self.assertEqual(str(krb5_asn1.id_signedData),
+ encrypted_content_info['contentType'])
+
+ encrypted_content = encrypted_content_info['encryptedContent']
+ encryption_algorithm = encrypted_content_info[
+ 'contentEncryptionAlgorithm']
+
+ cipher_algorithm = self.cipher_from_algorithm(encryption_algorithm['algorithm'])
+
+ # This will serve as the IV.
+ parameters = self.der_decode(
+ encryption_algorithm['parameters'],
+ asn1Spec=krb5_asn1.CMSCBCParameter())
+
+ # Decrypt the content.
+ cipher = Cipher(cipher_algorithm(decrypted_key),
+ modes.CBC(parameters),
+ default_backend())
+ decryptor = cipher.decryptor()
+ decrypted_content = decryptor.update(encrypted_content)
+ decrypted_content += decryptor.finalize()
+
+ # The padding doesn’t fully comply to PKCS7 with a specified
+ # blocksize, so we must unpad the data ourselves.
+ decrypted_content = self.unpad(decrypted_content)
+
+ signed_data = None
+ signed_data_rfc2315 = None
+
+ first_tag = decrypted_content[0]
+ if first_tag == 0x30: # ASN.1 SEQUENCE tag
+ signed_data = decrypted_content
+ else:
+ # Windows encodes the ASN.1 incorrectly, neglecting to add
+ # the SEQUENCE tag. We’ll have to prepend it ourselves in
+ # order for the decoding to work.
+ encoded_len = self.asn1_length(decrypted_content)
+ decrypted_content = bytes([0x30]) + encoded_len + (
+ decrypted_content)
+
+ if first_tag == 0x02: # ASN.1 INTEGER tag
+
+ # The INTEGER tag indicates that the data is encoded
+ # with the earlier variant of the SignedData ASN.1
+ # schema specified in RFC2315, as per [MS-PKCA] 2.2.4
+ # (PA-PK-AS-REP).
+ signed_data_rfc2315 = decrypted_content
+
+ elif first_tag == 0x06: # ASN.1 OBJECT IDENTIFIER tag
+
+ # The OBJECT IDENTIFIER tag indicates that the data is
+ # encoded as SignedData and wrapped in a ContentInfo
+ # structure, which we shall have to decode first. This
+ # seems to be the case when the supportedCMSTypes field
+ # in the client’s AuthPack is missing or empty.
+
+ content_info = self.der_decode(
+ decrypted_content,
+ asn1Spec=krb5_asn1.ContentInfo())
+ self.assertEqual(str(krb5_asn1.id_signedData),
+ content_info['contentType'])
+ signed_data = content_info['content']
+ else:
+ self.fail(f'got reply with unknown initial tag '
+ f'({first_tag})')
+
+ if signed_data is not None:
+ signed_data = self.der_decode(
+ signed_data, asn1Spec=krb5_asn1.SignedData())
+
+ encap_content_info = signed_data['encapContentInfo']
+
+ content_type = encap_content_info['eContentType']
+ content = encap_content_info['eContent']
+ elif signed_data_rfc2315 is not None:
+ signed_data = self.der_decode(
+ signed_data_rfc2315,
+ asn1Spec=krb5_asn1.SignedData_RFC2315())
+
+ encap_content_info = signed_data['contentInfo']
+
+ content_type = encap_content_info['contentType']
+ content = self.der_decode(
+ encap_content_info['content'],
+ asn1Spec=pyasn1.type.univ.OctetString())
+ else:
+ self.fail('we must have got SignedData')
+
+ self.assertEqual(str(krb5_asn1.id_pkinit_rkeyData),
+ content_type)
+ reply_key_pack = self.der_decode(
+ content, asn1Spec=reply_key_pack_asn1_spec())
+
+ req_obj = kdc_exchange_dict['req_obj']
+ req_asn1Spec = kdc_exchange_dict['req_asn1Spec']
+ req_obj = self.der_encode(req_obj,
+ asn1Spec=req_asn1Spec())
+
+ reply_key = reply_key_pack['replyKey']
+
+ # Reply the encpart decryption key with the decrypted key from
+ # the reply.
+ encpart_decryption_key = self.SessionKey_create(
+ etype=reply_key['keytype'],
+ contents=reply_key['keyvalue'],
+ kvno=None)
+
+ if not pk_win2k:
+ as_checksum = reply_key_pack['asChecksum']
+
+ # Verify the checksum over the AS request body.
+ kcrypto.verify_checksum(as_checksum['cksumtype'],
+ encpart_decryption_key.key,
+ KU_PKINIT_AS_REQ,
+ req_obj,
+ as_checksum['checksum'])
+ elif using_pkinit is PkInit.DIFFIE_HELLMAN:
+ content_info = self.der_decode(
+ pk_as_rep['dhInfo']['dhSignedData'],
+ asn1Spec=krb5_asn1.ContentInfo())
+ self.assertEqual(str(krb5_asn1.id_signedData),
+ content_info['contentType'])
+
+ signed_data = self.der_decode(content_info['content'],
+ asn1Spec=krb5_asn1.SignedData())
+
+ encap_content_info = signed_data['encapContentInfo']
+ content = encap_content_info['eContent']
+
+ self.assertEqual(str(krb5_asn1.id_pkinit_DHKeyData),
+ encap_content_info['eContentType'])
+
+ dh_key_info = self.der_decode(
+ content, asn1Spec=krb5_asn1.KDCDHKeyInfo())
+
+ self.assertNotIn('dhKeyExpiration', dh_key_info)
+
+ dh_private_key = encpart_decryption_key
+ self.assertIsInstance(dh_private_key,
+ asymmetric.dh.DHPrivateKey)
+
+ self.assertElementEqual(dh_key_info, 'nonce',
+ kdc_exchange_dict['pk_nonce'])
+
+ dh_public_key_data = self.bytes_from_bit_string(
+ dh_key_info['subjectPublicKey'])
+ dh_public_key_decoded = self.der_decode(
+ dh_public_key_data, asn1Spec=krb5_asn1.DHPublicKey())
+
+ dh_numbers = dh_private_key.parameters().parameter_numbers()
+
+ public_numbers = asymmetric.dh.DHPublicNumbers(
+ dh_public_key_decoded, dh_numbers)
+ dh_public_key = public_numbers.public_key(default_backend())
+
+ # Perform the Diffie-Hellman key exchange.
+ shared_secret = dh_private_key.exchange(dh_public_key)
+
+ # Pad the shared secret out to the length of ‘p’.
+ p_len = self.length_in_bytes(dh_numbers.p)
+ padding_len = p_len - len(shared_secret)
+ self.assertGreaterEqual(padding_len, 0)
+ padded_shared_secret = bytes(padding_len) + shared_secret
+
+ reply_key_enc_type = self.expected_etype(kdc_exchange_dict)
+
+ # At the moment, we don’t specify a nonce in the request, so we
+ # can assume these are empty.
+ client_nonce = b''
+ server_nonce = b''
+
+ ciphertext = padded_shared_secret + client_nonce + server_nonce
+
+ # Replace the encpart decryption key with the key derived from
+ # the Diffie-Hellman key exchange.
+ encpart_decryption_key = self.octetstring2key(
+ ciphertext, reply_key_enc_type)
+ else:
+ self.fail(f'invalid value for using_pkinit: {using_pkinit}')
+
+ self.assertEqual(3, signed_data['version'])
+
+ digest_algorithms = signed_data['digestAlgorithms']
+ self.assertEqual(1, len(digest_algorithms))
+ digest_algorithm = digest_algorithms[0]
+ # Ensure the hash algorithm is valid.
+ _ = self.hash_from_algorithm_id(digest_algorithm)
+
+ self.assertFalse(signed_data.get('crls'))
+
+ signer_infos = signed_data['signerInfos']
+ self.assertEqual(1, len(signer_infos))
+ signer_info = signer_infos[0]
+
+ self.assertEqual(1, signer_info['version'])
+
+ # Get the certificate presented by the KDC.
+ kdc_certificates = signed_data['certificates']
+ self.assertEqual(1, len(kdc_certificates))
+ kdc_certificate = self.der_encode(
+ kdc_certificates[0], asn1Spec=krb5_asn1.CertificateChoices())
+ kdc_certificate = x509.load_der_x509_certificate(kdc_certificate,
+ default_backend())
+
+ # Verify that the KDC’s certificate is named as the signer.
+ sid = signer_info['sid']
+ try:
+ issuer_and_serial_number = sid['issuerAndSerialNumber']
+ except KeyError:
+ extension = kdc_certificate.extensions.get_extension_for_oid(
+ x509.oid.ExtensionOID.SUBJECT_KEY_IDENTIFIER)
+ cert_subject_key_id = extension.value.digest
+ self.assertEqual(sid['subjectKeyIdentifier'], cert_subject_key_id)
+ else:
+ self.assertIsNotNone(issuer_and_serial_number['issuer'])
+ self.assertEqual(issuer_and_serial_number['serialNumber'],
+ kdc_certificate.serial_number)
+
+ digest_algorithm = signer_info['digestAlgorithm']
+ digest_hash_fn = self.hash_from_algorithm_id(digest_algorithm)
+
+ signed_attrs = signer_info['signedAttrs']
+ self.assertEqual(2, len(signed_attrs))
+
+ signed_attr0 = signed_attrs[0]
+ self.assertEqual(str(krb5_asn1.id_contentType),
+ signed_attr0['type'])
+ signed_attr0_values = signed_attr0['values']
+ self.assertEqual(1, len(signed_attr0_values))
+ signed_attr0_value = self.der_decode(
+ signed_attr0_values[0],
+ asn1Spec=krb5_asn1.ContentType())
+ if using_pkinit is PkInit.DIFFIE_HELLMAN:
+ self.assertEqual(str(krb5_asn1.id_pkinit_DHKeyData),
+ signed_attr0_value)
+ else:
+ self.assertEqual(str(krb5_asn1.id_pkinit_rkeyData),
+ signed_attr0_value)
+
+ signed_attr1 = signed_attrs[1]
+ self.assertEqual(str(krb5_asn1.id_messageDigest),
+ signed_attr1['type'])
+ signed_attr1_values = signed_attr1['values']
+ self.assertEqual(1, len(signed_attr1_values))
+ message_digest = self.der_decode(signed_attr1_values[0],
+ krb5_asn1.MessageDigest())
+
+ signature_algorithm = signer_info['signatureAlgorithm']
+ hash_fn = self.hash_from_algorithm_id(signature_algorithm)
+
+ # Compute the hash of the content to be signed. With the
+ # Diffie-Hellman key exchange, this signature is over the type
+ # KDCDHKeyInfo; otherwise, it is over the type ReplyKeyPack.
+ digest = hashes.Hash(digest_hash_fn(), default_backend())
+ digest.update(content)
+ digest = digest.finalize()
+
+ # Verify the hash. Note: this is a non–constant time comparison.
+ self.assertEqual(digest, message_digest)
+
+ # Re-encode the attributes ready for verifying the signature.
+ cms_attrs = self.der_encode(signed_attrs,
+ asn1Spec=krb5_asn1.CMSAttributes())
+
+ # Verify the signature.
+ kdc_public_key = kdc_certificate.public_key()
+ kdc_public_key.verify(
+ signer_info['signature'],
+ cms_attrs,
+ asymmetric.padding.PKCS1v15(),
+ hash_fn())
+
+ self.assertFalse(signer_info.get('unsignedAttrs'))
+
+ if armor_key is not None:
+ if PADATA_FX_FAST in pa_dict:
+ fx_fast_data = pa_dict[PADATA_FX_FAST]
+ fast_response = self.check_fx_fast_data(kdc_exchange_dict,
+ fx_fast_data,
+ armor_key,
+ finished=True)
+
+ if 'strengthen-key' in fast_response:
+ strengthen_key = self.EncryptionKey_import(
+ fast_response['strengthen-key'])
+ encpart_decryption_key = (
+ self.generate_strengthen_reply_key(
+ strengthen_key,
+ encpart_decryption_key))
+
+ fast_finished = fast_response.get('finished')
+ if fast_finished is not None:
+ ticket_checksum = fast_finished['ticket-checksum']
+
+ self.check_rep_padata(kdc_exchange_dict,
+ callback_dict,
+ fast_response['padata'],
+ error_code=0)
+
+ ticket_private = None
+ if ticket_decryption_key is not None:
+ self.assertElementEqual(ticket_encpart, 'etype',
+ ticket_decryption_key.etype)
+ self.assertElementKVNO(ticket_encpart, 'kvno',
+ ticket_decryption_key.kvno)
+ ticket_decpart = ticket_decryption_key.decrypt(KU_TICKET,
+ ticket_cipher)
+ ticket_private = self.der_decode(
+ ticket_decpart,
+ asn1Spec=krb5_asn1.EncTicketPart())
+
+ encpart_private = None
+ self.assertIsNotNone(encpart_decryption_key)
+ if encpart_decryption_key is not None:
+ self.assertElementEqual(encpart, 'etype',
+ encpart_decryption_key.etype)
+ if self.strict_checking:
+ self.assertElementKVNO(encpart, 'kvno',
+ encpart_decryption_key.kvno)
+ rep_decpart = encpart_decryption_key.decrypt(
+ encpart_decryption_usage,
+ encpart_cipher)
+ # MIT KDC encodes both EncASRepPart and EncTGSRepPart with
+ # application tag 26
+ try:
+ encpart_private = self.der_decode(
+ rep_decpart,
+ asn1Spec=rep_encpart_asn1Spec())
+ except Exception:
+ encpart_private = self.der_decode(
+ rep_decpart,
+ asn1Spec=krb5_asn1.EncTGSRepPart())
+
+ kdc_exchange_dict['reply_key'] = encpart_decryption_key
+
+ self.assertIsNotNone(check_kdc_private_fn)
+ if check_kdc_private_fn is not None:
+ check_kdc_private_fn(kdc_exchange_dict, callback_dict,
+ rep, ticket_private, encpart_private,
+ ticket_checksum)
+
+ return rep
+
+ def check_fx_fast_data(self,
+ kdc_exchange_dict,
+ fx_fast_data,
+ armor_key,
+ finished=False,
+ expect_strengthen_key=True):
+ fx_fast_data = self.der_decode(fx_fast_data,
+ asn1Spec=krb5_asn1.PA_FX_FAST_REPLY())
+
+ enc_fast_rep = fx_fast_data['armored-data']['enc-fast-rep']
+ self.assertEqual(enc_fast_rep['etype'], armor_key.etype)
+
+ fast_rep = armor_key.decrypt(KU_FAST_REP, enc_fast_rep['cipher'])
+
+ fast_response = self.der_decode(fast_rep,
+ asn1Spec=krb5_asn1.KrbFastResponse())
+
+ if expect_strengthen_key and self.strict_checking:
+ self.assertIn('strengthen-key', fast_response)
+
+ if finished:
+ self.assertIn('finished', fast_response)
+
+ # Ensure that the nonce matches the nonce in the body of the request
+ # (RFC6113 5.4.3).
+ nonce = kdc_exchange_dict['nonce']
+ self.assertEqual(nonce, fast_response['nonce'])
+
+ return fast_response
+
+ def generic_check_kdc_private(self,
+ kdc_exchange_dict,
+ callback_dict,
+ rep,
+ ticket_private,
+ encpart_private,
+ ticket_checksum):
+ kdc_options = kdc_exchange_dict['kdc_options']
+ canon_pos = len(tuple(krb5_asn1.KDCOptions('canonicalize'))) - 1
+ canonicalize = (canon_pos < len(kdc_options)
+ and kdc_options[canon_pos] == '1')
+ renewable_pos = len(tuple(krb5_asn1.KDCOptions('renewable'))) - 1
+ renewable = (renewable_pos < len(kdc_options)
+ and kdc_options[renewable_pos] == '1')
+ renew_pos = len(tuple(krb5_asn1.KDCOptions('renew'))) - 1
+ renew = (renew_pos < len(kdc_options)
+ and kdc_options[renew_pos] == '1')
+ expect_renew_till = renewable or renew
+
+ expected_crealm = kdc_exchange_dict['expected_crealm']
+ expected_cname = kdc_exchange_dict['expected_cname']
+ expected_srealm = kdc_exchange_dict['expected_srealm']
+ expected_sname = kdc_exchange_dict['expected_sname']
+ ticket_decryption_key = kdc_exchange_dict['ticket_decryption_key']
+
+ rep_msg_type = kdc_exchange_dict['rep_msg_type']
+
+ expected_flags = kdc_exchange_dict.get('expected_flags')
+ unexpected_flags = kdc_exchange_dict.get('unexpected_flags')
+
+ ticket = self.getElementValue(rep, 'ticket')
+
+ if ticket_checksum is not None:
+ armor_key = kdc_exchange_dict['armor_key']
+ self.verify_ticket_checksum(ticket, ticket_checksum, armor_key)
+
+ to_rodc = kdc_exchange_dict['to_rodc']
+ if to_rodc:
+ krbtgt_creds = self.get_rodc_krbtgt_creds()
+ else:
+ krbtgt_creds = self.get_krbtgt_creds()
+ krbtgt_key = self.TicketDecryptionKey_from_creds(krbtgt_creds)
+
+ krbtgt_keys = [krbtgt_key]
+ if not self.strict_checking:
+ krbtgt_key_rc4 = self.TicketDecryptionKey_from_creds(
+ krbtgt_creds,
+ etype=kcrypto.Enctype.RC4)
+ krbtgt_keys.append(krbtgt_key_rc4)
+
+ if self.expect_pac and self.is_tgs(expected_sname):
+ expect_pac = True
+ else:
+ expect_pac = kdc_exchange_dict['expect_pac']
+
+ ticket_session_key = None
+ if ticket_private is not None:
+ self.assertElementFlags(ticket_private, 'flags',
+ expected_flags,
+ unexpected_flags)
+ self.assertElementPresent(ticket_private, 'key')
+ ticket_key = self.getElementValue(ticket_private, 'key')
+ self.assertIsNotNone(ticket_key)
+ if ticket_key is not None: # Never None, but gives indentation
+ self.assertElementPresent(ticket_key, 'keytype')
+ self.assertElementPresent(ticket_key, 'keyvalue')
+ ticket_session_key = self.EncryptionKey_import(ticket_key)
+ self.assertElementEqualUTF8(ticket_private, 'crealm',
+ expected_crealm)
+ if self.cname_checking:
+ self.assertElementEqualPrincipal(ticket_private, 'cname',
+ expected_cname)
+ self.assertElementPresent(ticket_private, 'transited')
+ self.assertElementPresent(ticket_private, 'authtime')
+ if self.strict_checking:
+ self.assertElementPresent(ticket_private, 'starttime')
+ self.assertElementPresent(ticket_private, 'endtime')
+ if self.strict_checking:
+ if expect_renew_till:
+ self.assertElementPresent(ticket_private, 'renew-till')
+ else:
+ self.assertElementMissing(ticket_private, 'renew-till')
+ if self.strict_checking:
+ self.assertElementMissing(ticket_private, 'caddr')
+ if expect_pac is not None:
+ if expect_pac:
+ self.assertElementPresent(ticket_private,
+ 'authorization-data',
+ expect_empty=not expect_pac)
+ else:
+ # It is more correct to not have an authorization-data
+ # present than an empty one.
+ #
+ # https://github.com/krb5/krb5/pull/1225#issuecomment-995104193
+ v = self.getElementValue(ticket_private,
+ 'authorization-data')
+ if v is not None:
+ self.assertElementPresent(ticket_private,
+ 'authorization-data',
+ expect_empty=True)
+
+ encpart_session_key = None
+ if encpart_private is not None:
+ self.assertElementPresent(encpart_private, 'key')
+ encpart_key = self.getElementValue(encpart_private, 'key')
+ self.assertIsNotNone(encpart_key)
+ if encpart_key is not None: # Never None, but gives indentation
+ self.assertElementPresent(encpart_key, 'keytype')
+ self.assertElementPresent(encpart_key, 'keyvalue')
+ encpart_session_key = self.EncryptionKey_import(encpart_key)
+ self.assertElementPresent(encpart_private, 'last-req')
+ expected_nonce = kdc_exchange_dict.get('pk_nonce')
+ if not expected_nonce:
+ expected_nonce = kdc_exchange_dict['nonce']
+ self.assertElementEqual(encpart_private, 'nonce',
+ expected_nonce)
+ if rep_msg_type == KRB_AS_REP:
+ if self.strict_checking:
+ self.assertElementPresent(encpart_private,
+ 'key-expiration')
+ else:
+ self.assertElementMissing(encpart_private,
+ 'key-expiration')
+ self.assertElementFlags(encpart_private, 'flags',
+ expected_flags,
+ unexpected_flags)
+ self.assertElementPresent(encpart_private, 'authtime')
+ if self.strict_checking:
+ self.assertElementPresent(encpart_private, 'starttime')
+ self.assertElementPresent(encpart_private, 'endtime')
+ if self.strict_checking:
+ if expect_renew_till:
+ self.assertElementPresent(encpart_private, 'renew-till')
+ else:
+ self.assertElementMissing(encpart_private, 'renew-till')
+ self.assertElementEqualUTF8(encpart_private, 'srealm',
+ expected_srealm)
+ self.assertElementEqualPrincipal(encpart_private, 'sname',
+ expected_sname)
+ if self.strict_checking:
+ self.assertElementMissing(encpart_private, 'caddr')
+
+ sent_pac_options = self.get_sent_pac_options(kdc_exchange_dict)
+
+ sent_enc_pa_rep = self.sent_enc_pa_rep(kdc_exchange_dict)
+
+ enc_padata = self.getElementValue(encpart_private,
+ 'encrypted-pa-data')
+ if (canonicalize or '1' in sent_pac_options or (
+ rep_msg_type == KRB_AS_REP and sent_enc_pa_rep)):
+ if self.strict_checking:
+ self.assertIsNotNone(enc_padata)
+
+ if enc_padata is not None:
+ enc_pa_dict = self.get_pa_dict(enc_padata)
+ if self.strict_checking:
+ if canonicalize:
+ self.assertIn(PADATA_SUPPORTED_ETYPES, enc_pa_dict)
+ else:
+ self.assertNotIn(PADATA_SUPPORTED_ETYPES,
+ enc_pa_dict)
+
+ if '1' in sent_pac_options:
+ self.assertIn(PADATA_PAC_OPTIONS, enc_pa_dict)
+ else:
+ self.assertNotIn(PADATA_PAC_OPTIONS, enc_pa_dict)
+
+ if rep_msg_type == KRB_AS_REP and sent_enc_pa_rep:
+ self.assertIn(PADATA_REQ_ENC_PA_REP, enc_pa_dict)
+ else:
+ self.assertNotIn(PADATA_REQ_ENC_PA_REP, enc_pa_dict)
+
+ if PADATA_SUPPORTED_ETYPES in enc_pa_dict:
+ expected_supported_etypes = kdc_exchange_dict[
+ 'expected_supported_etypes']
+
+ (supported_etypes,) = struct.unpack(
+ '<L',
+ enc_pa_dict[PADATA_SUPPORTED_ETYPES])
+
+ ignore_bits = (security.KERB_ENCTYPE_DES_CBC_CRC |
+ security.KERB_ENCTYPE_DES_CBC_MD5)
+
+ self.assertEqual(
+ supported_etypes & ~ignore_bits,
+ expected_supported_etypes & ~ignore_bits,
+ f'PADATA_SUPPORTED_ETYPES: got: {supported_etypes} (0x{supported_etypes:X}), '
+ f'expected: {expected_supported_etypes} (0x{expected_supported_etypes:X})')
+
+ if PADATA_PAC_OPTIONS in enc_pa_dict:
+ pac_options = self.der_decode(
+ enc_pa_dict[PADATA_PAC_OPTIONS],
+ asn1Spec=krb5_asn1.PA_PAC_OPTIONS())
+
+ self.assertElementEqual(pac_options, 'options',
+ sent_pac_options)
+
+ if PADATA_REQ_ENC_PA_REP in enc_pa_dict:
+ enc_pa_rep = enc_pa_dict[PADATA_REQ_ENC_PA_REP]
+
+ enc_pa_rep = self.der_decode(
+ enc_pa_rep,
+ asn1Spec=krb5_asn1.Checksum())
+
+ reply_key = kdc_exchange_dict['reply_key']
+ req_obj = kdc_exchange_dict['req_obj']
+ req_asn1Spec = kdc_exchange_dict['req_asn1Spec']
+
+ req_obj = self.der_encode(req_obj,
+ asn1Spec=req_asn1Spec())
+
+ checksum = enc_pa_rep['checksum']
+ ctype = enc_pa_rep['cksumtype']
+
+ reply_key.verify_checksum(KU_AS_REQ,
+ req_obj,
+ ctype,
+ checksum)
+ else:
+ if enc_padata is not None:
+ self.assertEqual(enc_padata, [])
+
+ if ticket_session_key is not None and encpart_session_key is not None:
+ self.assertEqual(ticket_session_key.etype,
+ encpart_session_key.etype)
+ self.assertEqual(ticket_session_key.key.contents,
+ encpart_session_key.key.contents)
+ if encpart_session_key is not None:
+ session_key = encpart_session_key
+ else:
+ session_key = ticket_session_key
+ ticket_creds = KerberosTicketCreds(
+ ticket,
+ session_key,
+ crealm=expected_crealm,
+ cname=expected_cname,
+ srealm=expected_srealm,
+ sname=expected_sname,
+ decryption_key=ticket_decryption_key,
+ ticket_private=ticket_private,
+ encpart_private=encpart_private)
+
+ if ticket_private is not None:
+ pac_data = self.get_ticket_pac(ticket_creds, expect_pac=expect_pac)
+ if expect_pac is True:
+ self.assertIsNotNone(pac_data)
+ elif expect_pac is False:
+ self.assertIsNone(pac_data)
+
+ if pac_data is not None:
+ self.check_pac_buffers(pac_data, kdc_exchange_dict)
+
+ expect_ticket_checksum = kdc_exchange_dict['expect_ticket_checksum']
+ expect_full_checksum = kdc_exchange_dict['expect_full_checksum']
+ if expect_ticket_checksum or expect_full_checksum:
+ self.assertIsNotNone(ticket_decryption_key)
+
+ if ticket_decryption_key is not None:
+ service_ticket = (rep_msg_type == KRB_TGS_REP
+ and not self.is_tgs_principal(expected_sname))
+ self.verify_ticket(ticket_creds, krbtgt_keys,
+ service_ticket=service_ticket,
+ expect_pac=expect_pac,
+ expect_ticket_checksum=expect_ticket_checksum
+ or self.tkt_sig_support,
+ expect_full_checksum=expect_full_checksum
+ or self.full_sig_support)
+
+ kdc_exchange_dict['rep_ticket_creds'] = ticket_creds
+
+ # Check the SIDs in a LOGON_INFO PAC buffer.
+ def check_logon_info_sids(self, logon_info_buffer, kdc_exchange_dict):
+ info3 = logon_info_buffer.info.info.info3
+ logon_info = info3.base
+ resource_groups = logon_info_buffer.info.info.resource_groups
+
+ expected_groups = kdc_exchange_dict['expected_groups']
+ unexpected_groups = kdc_exchange_dict['unexpected_groups']
+ expected_domain_sid = kdc_exchange_dict['expected_domain_sid']
+ expected_sid = kdc_exchange_dict['expected_sid']
+
+ domain_sid = logon_info.domain_sid
+ if expected_domain_sid is not None:
+ self.assertEqual(expected_domain_sid, str(domain_sid))
+
+ if expected_sid is not None:
+ got_sid = f'{domain_sid}-{logon_info.rid}'
+ self.assertEqual(expected_sid, got_sid)
+
+ if expected_groups is None and unexpected_groups is None:
+ # Nothing more to do.
+ return
+
+ # Check the SIDs in the PAC.
+
+ # Form a representation of the PAC, containing at first the primary
+ # GID.
+ primary_sid = f'{domain_sid}-{logon_info.primary_gid}'
+ pac_sids = {
+ (primary_sid, self.SidType.PRIMARY_GID, None),
+ }
+
+ # Collect the Extra SIDs.
+ if info3.sids is not None:
+ self.assertTrue(logon_info.user_flags & (
+ netlogon.NETLOGON_EXTRA_SIDS),
+ 'extra SIDs present, but EXTRA_SIDS flag not set')
+ self.assertTrue(info3.sids, 'got empty SIDs')
+
+ for sid_attr in info3.sids:
+ got_sid = str(sid_attr.sid)
+ if unexpected_groups is not None:
+ self.assertNotIn(got_sid, unexpected_groups)
+
+ pac_sid = (got_sid,
+ self.SidType.EXTRA_SID,
+ sid_attr.attributes)
+ self.assertNotIn(pac_sid, pac_sids, 'got duplicated SID')
+ pac_sids.add(pac_sid)
+ else:
+ self.assertFalse(logon_info.user_flags & (
+ netlogon.NETLOGON_EXTRA_SIDS),
+ 'no extra SIDs present, but EXTRA_SIDS flag set')
+
+ # Collect the Base RIDs.
+ if logon_info.groups.rids is not None:
+ self.assertTrue(logon_info.groups.rids, 'got empty RIDs')
+
+ for group in logon_info.groups.rids:
+ got_sid = f'{domain_sid}-{group.rid}'
+ if unexpected_groups is not None:
+ self.assertNotIn(got_sid, unexpected_groups)
+
+ pac_sid = (got_sid, self.SidType.BASE_SID, group.attributes)
+ self.assertNotIn(pac_sid, pac_sids, 'got duplicated SID')
+ pac_sids.add(pac_sid)
+
+ # Collect the Resource SIDs.
+ expect_resource_groups_flag = kdc_exchange_dict[
+ 'expect_resource_groups_flag']
+ expect_set_reason = ''
+ expect_reset_reason = ''
+ if expect_resource_groups_flag is None:
+ expect_resource_groups_flag = (
+ resource_groups.groups.rids is not None)
+ expect_set_reason = 'resource groups present, but '
+ expect_reset_reason = 'no resource groups present, but '
+
+ if expect_resource_groups_flag:
+ self.assertTrue(
+ logon_info.user_flags & netlogon.NETLOGON_RESOURCE_GROUPS,
+ f'{expect_set_reason}RESOURCE_GROUPS flag unexpectedly reset')
+ else:
+ self.assertFalse(
+ logon_info.user_flags & netlogon.NETLOGON_RESOURCE_GROUPS,
+ f'{expect_reset_reason}RESOURCE_GROUPS flag unexpectedly set')
+
+ if resource_groups.groups.rids is not None:
+ self.assertTrue(resource_groups.groups.rids, 'got empty RIDs')
+
+ resource_group_sid = resource_groups.domain_sid
+ for resource_group in resource_groups.groups.rids:
+ got_sid = f'{resource_group_sid}-{resource_group.rid}'
+ if unexpected_groups is not None:
+ self.assertNotIn(got_sid, unexpected_groups)
+
+ pac_sid = (got_sid,
+ self.SidType.RESOURCE_SID,
+ resource_group.attributes)
+ self.assertNotIn(pac_sid, pac_sids, 'got duplicated SID')
+ pac_sids.add(pac_sid)
+
+ # Compare the aggregated SIDs against the set of expected SIDs.
+ if expected_groups is not None:
+ if ... in expected_groups:
+ # The caller is only interested in asserting the
+ # presence of particular groups, and doesn't mind if
+ # other groups are present as well.
+ pac_sids.add(...)
+ self.assertLessEqual(expected_groups, pac_sids,
+ 'expected groups')
+ else:
+ # The caller wants to make sure the groups match
+ # exactly.
+ self.assertEqual(expected_groups, pac_sids,
+ 'expected != got')
+
+ def check_device_info(self, device_info, kdc_exchange_dict):
+ armor_tgt = kdc_exchange_dict['armor_tgt']
+ armor_auth_data = armor_tgt.ticket_private.get(
+ 'authorization-data')
+ self.assertIsNotNone(armor_auth_data,
+ 'missing authdata for armor TGT')
+ armor_pac_data = self.get_pac(armor_auth_data)
+ armor_pac = ndr_unpack(krb5pac.PAC_DATA, armor_pac_data)
+ for armor_pac_buffer in armor_pac.buffers:
+ if armor_pac_buffer.type == krb5pac.PAC_TYPE_LOGON_INFO:
+ armor_info = armor_pac_buffer.info.info.info3
+ break
+ else:
+ self.fail('missing logon info for armor PAC')
+ self.assertEqual(armor_info.base.rid, device_info.rid)
+
+ device_domain_sid = kdc_exchange_dict['expected_device_domain_sid']
+ expected_device_groups = kdc_exchange_dict['expected_device_groups']
+ if kdc_exchange_dict['expect_device_info']:
+ self.assertIsNotNone(device_domain_sid)
+ self.assertIsNotNone(expected_device_groups)
+
+ if device_domain_sid is not None:
+ self.assertEqual(device_domain_sid, str(device_info.domain_sid))
+ else:
+ device_domain_sid = str(device_info.domain_sid)
+
+ # Check the device info SIDs.
+
+ # A representation of the device info groups.
+ primary_sid = f'{device_domain_sid}-{device_info.primary_gid}'
+ got_sids = {
+ (primary_sid, self.SidType.PRIMARY_GID, None),
+ }
+
+ # Collect the groups.
+ if device_info.groups.rids is not None:
+ self.assertTrue(device_info.groups.rids, 'got empty RIDs')
+
+ for group in device_info.groups.rids:
+ got_sid = f'{device_domain_sid}-{group.rid}'
+
+ device_sid = (got_sid, self.SidType.BASE_SID, group.attributes)
+ self.assertNotIn(device_sid, got_sids, 'got duplicated SID')
+ got_sids.add(device_sid)
+
+ # Collect the SIDs.
+ if device_info.sids is not None:
+ self.assertTrue(device_info.sids, 'got empty SIDs')
+
+ for sid_attr in device_info.sids:
+ got_sid = str(sid_attr.sid)
+
+ in_a_domain = sid_attr.sid.num_auths == 5 and (
+ str(sid_attr.sid).startswith('S-1-5-21-'))
+ self.assertFalse(in_a_domain,
+ f'got unexpected SID for domain: {got_sid} '
+ f'(should be in device_info.domain_groups)')
+
+ device_sid = (got_sid,
+ self.SidType.EXTRA_SID,
+ sid_attr.attributes)
+ self.assertNotIn(device_sid, got_sids, 'got duplicated SID')
+ got_sids.add(device_sid)
+
+ # Collect the domain groups.
+ if device_info.domain_groups is not None:
+ self.assertTrue(device_info.domain_groups, 'got empty domain groups')
+
+ for domain_group in device_info.domain_groups:
+ self.assertTrue(domain_group, 'got empty domain group')
+
+ got_domain_sids = set()
+
+ resource_group_sid = domain_group.domain_sid
+
+ in_a_domain = resource_group_sid.num_auths == 4 and (
+ str(resource_group_sid).startswith('S-1-5-21-'))
+ self.assertTrue(
+ in_a_domain,
+ f'got unexpected domain SID for non-domain: {resource_group_sid} '
+ f'(should be in device_info.sids)')
+
+ for resource_group in domain_group.groups.rids:
+ got_sid = f'{resource_group_sid}-{resource_group.rid}'
+
+ device_sid = (got_sid,
+ self.SidType.RESOURCE_SID,
+ resource_group.attributes)
+ self.assertNotIn(device_sid, got_domain_sids, 'got duplicated SID')
+ got_domain_sids.add(device_sid)
+
+ got_domain_sids = frozenset(got_domain_sids)
+ self.assertNotIn(got_domain_sids, got_sids)
+ got_sids.add(got_domain_sids)
+
+ # Compare the aggregated device SIDs against the set of expected device
+ # SIDs.
+ if expected_device_groups is not None:
+ self.assertEqual(expected_device_groups, got_sids,
+ 'expected != got')
+
+ def check_pac_buffers(self, pac_data, kdc_exchange_dict):
+ pac = ndr_unpack(krb5pac.PAC_DATA, pac_data)
+
+ rep_msg_type = kdc_exchange_dict['rep_msg_type']
+ armor_tgt = kdc_exchange_dict['armor_tgt']
+
+ compound_id = rep_msg_type == KRB_TGS_REP and armor_tgt is not None
+
+ expected_sname = kdc_exchange_dict['expected_sname']
+ expect_client_claims = kdc_exchange_dict['expect_client_claims']
+ expect_device_info = kdc_exchange_dict['expect_device_info']
+ expect_device_claims = kdc_exchange_dict['expect_device_claims']
+
+ expected_types = [krb5pac.PAC_TYPE_LOGON_INFO,
+ krb5pac.PAC_TYPE_SRV_CHECKSUM,
+ krb5pac.PAC_TYPE_KDC_CHECKSUM,
+ krb5pac.PAC_TYPE_LOGON_NAME,
+ krb5pac.PAC_TYPE_UPN_DNS_INFO]
+
+ kdc_options = kdc_exchange_dict['kdc_options']
+ pos = len(tuple(krb5_asn1.KDCOptions('cname-in-addl-tkt'))) - 1
+ constrained_delegation = (pos < len(kdc_options)
+ and kdc_options[pos] == '1')
+ if constrained_delegation:
+ expected_types.append(krb5pac.PAC_TYPE_CONSTRAINED_DELEGATION)
+
+ require_strict = set()
+ unchecked = set()
+ if not self.tkt_sig_support:
+ require_strict.add(krb5pac.PAC_TYPE_TICKET_CHECKSUM)
+ if not self.full_sig_support:
+ require_strict.add(krb5pac.PAC_TYPE_FULL_CHECKSUM)
+
+ expected_client_claims = kdc_exchange_dict['expected_client_claims']
+ unexpected_client_claims = kdc_exchange_dict[
+ 'unexpected_client_claims']
+
+ if self.kdc_claims_support and expect_client_claims:
+ expected_types.append(krb5pac.PAC_TYPE_CLIENT_CLAIMS_INFO)
+ else:
+ self.assertFalse(
+ expected_client_claims,
+ 'expected client claims, but client claims not expected in '
+ 'PAC')
+ self.assertFalse(
+ unexpected_client_claims,
+ 'unexpected client claims, but client claims not expected in '
+ 'PAC')
+
+ if expect_client_claims is None:
+ unchecked.add(krb5pac.PAC_TYPE_CLIENT_CLAIMS_INFO)
+
+ expected_device_claims = kdc_exchange_dict['expected_device_claims']
+ unexpected_device_claims = kdc_exchange_dict['unexpected_device_claims']
+
+ expected_device_groups = kdc_exchange_dict['expected_device_groups']
+
+ if (self.kdc_claims_support and self.kdc_compound_id_support
+ and expect_device_claims and compound_id):
+ expected_types.append(krb5pac.PAC_TYPE_DEVICE_CLAIMS_INFO)
+ else:
+ self.assertFalse(
+ expect_device_claims,
+ 'expected device claims buffer, but device claims not '
+ 'expected in PAC')
+ self.assertFalse(
+ expected_device_claims,
+ 'expected device claims, but device claims not expected in '
+ 'PAC')
+ self.assertFalse(
+ unexpected_device_claims,
+ 'unexpected device claims, but device claims not expected in '
+ 'PAC')
+
+ if expect_device_claims is None and compound_id:
+ unchecked.add(krb5pac.PAC_TYPE_DEVICE_CLAIMS_INFO)
+
+ if self.kdc_compound_id_support and compound_id and expect_device_info:
+ expected_types.append(krb5pac.PAC_TYPE_DEVICE_INFO)
+ else:
+ self.assertFalse(expect_device_info,
+ 'expected device info with no armor TGT or '
+ 'for non-TGS request')
+ self.assertFalse(expected_device_groups,
+ 'expected device groups, but device info not '
+ 'expected in PAC')
+
+ if expect_device_info is None and compound_id:
+ unchecked.add(krb5pac.PAC_TYPE_DEVICE_INFO)
+
+ if rep_msg_type == KRB_TGS_REP:
+ if not self.is_tgs_principal(expected_sname):
+ expected_types.append(krb5pac.PAC_TYPE_TICKET_CHECKSUM)
+ expected_types.append(krb5pac.PAC_TYPE_FULL_CHECKSUM)
+
+ expect_extra_pac_buffers = self.is_tgs(expected_sname)
+
+ expect_pac_attrs = kdc_exchange_dict['expect_pac_attrs']
+
+ if expect_pac_attrs:
+ expect_pac_attrs_pac_request = kdc_exchange_dict[
+ 'expect_pac_attrs_pac_request']
+ else:
+ expect_pac_attrs_pac_request = kdc_exchange_dict[
+ 'pac_request']
+
+ if expect_pac_attrs is None:
+ if self.expect_extra_pac_buffers:
+ expect_pac_attrs = expect_extra_pac_buffers
+ else:
+ require_strict.add(krb5pac.PAC_TYPE_ATTRIBUTES_INFO)
+ if expect_pac_attrs:
+ expected_types.append(krb5pac.PAC_TYPE_ATTRIBUTES_INFO)
+
+ expect_requester_sid = kdc_exchange_dict['expect_requester_sid']
+ expected_requester_sid = kdc_exchange_dict['expected_requester_sid']
+
+ if expect_requester_sid is None:
+ if self.expect_extra_pac_buffers:
+ expect_requester_sid = expect_extra_pac_buffers
+ else:
+ require_strict.add(krb5pac.PAC_TYPE_REQUESTER_SID)
+ if expected_requester_sid is not None:
+ expect_requester_sid = True
+ if expect_requester_sid:
+ expected_types.append(krb5pac.PAC_TYPE_REQUESTER_SID)
+
+ sent_pk_as_req = self.sent_pk_as_req(kdc_exchange_dict) or (
+ self.sent_pk_as_req_win2k(kdc_exchange_dict))
+ if sent_pk_as_req:
+ expected_types.append(krb5pac.PAC_TYPE_CREDENTIAL_INFO)
+
+ expected_extra_pac_buffers = kdc_exchange_dict['expected_extra_pac_buffers']
+ if expected_extra_pac_buffers is not None:
+ expected_types.extend(expected_extra_pac_buffers)
+
+ buffer_types = [pac_buffer.type
+ for pac_buffer in pac.buffers]
+ self.assertSequenceElementsEqual(
+ expected_types, buffer_types,
+ require_ordered=False,
+ require_strict=require_strict,
+ unchecked=unchecked)
+
+ expected_account_name = kdc_exchange_dict['expected_account_name']
+ expected_sid = kdc_exchange_dict['expected_sid']
+
+ expect_upn_dns_info_ex = kdc_exchange_dict['expect_upn_dns_info_ex']
+ if expect_upn_dns_info_ex is None and (
+ expected_account_name is not None
+ or expected_sid is not None):
+ expect_upn_dns_info_ex = True
+
+ for pac_buffer in pac.buffers:
+ if pac_buffer.type == krb5pac.PAC_TYPE_CONSTRAINED_DELEGATION:
+ expected_proxy_target = kdc_exchange_dict[
+ 'expected_proxy_target']
+ expected_transited_services = kdc_exchange_dict[
+ 'expected_transited_services']
+
+ delegation_info = pac_buffer.info.info
+
+ self.assertEqual(expected_proxy_target,
+ str(delegation_info.proxy_target))
+
+ transited_services = list(map(
+ str, delegation_info.transited_services))
+ self.assertEqual(expected_transited_services,
+ transited_services)
+
+ elif pac_buffer.type == krb5pac.PAC_TYPE_LOGON_NAME:
+ expected_cname = kdc_exchange_dict['expected_cname']
+ account_name = '/'.join(expected_cname['name-string'])
+
+ self.assertEqual(account_name, pac_buffer.info.account_name)
+
+ elif pac_buffer.type == krb5pac.PAC_TYPE_LOGON_INFO:
+ info3 = pac_buffer.info.info.info3
+ logon_info = info3.base
+
+ if expected_account_name is not None:
+ self.assertEqual(expected_account_name,
+ str(logon_info.account_name))
+
+ self.check_logon_info_sids(pac_buffer, kdc_exchange_dict)
+
+ elif pac_buffer.type == krb5pac.PAC_TYPE_UPN_DNS_INFO:
+ upn_dns_info = pac_buffer.info
+ upn_dns_info_ex = upn_dns_info.ex
+
+ expected_realm = kdc_exchange_dict['expected_crealm']
+ self.assertEqual(expected_realm,
+ upn_dns_info.dns_domain_name)
+
+ expected_upn_name = kdc_exchange_dict['expected_upn_name']
+ if expected_upn_name is not None:
+ self.assertEqual(expected_upn_name,
+ upn_dns_info.upn_name)
+
+ if expect_upn_dns_info_ex:
+ self.assertIsNotNone(upn_dns_info_ex)
+
+ if upn_dns_info_ex is not None:
+ if expected_account_name is not None:
+ self.assertEqual(expected_account_name,
+ upn_dns_info_ex.samaccountname)
+
+ if expected_sid is not None:
+ self.assertEqual(expected_sid,
+ str(upn_dns_info_ex.objectsid))
+
+ elif (pac_buffer.type == krb5pac.PAC_TYPE_ATTRIBUTES_INFO
+ and expect_pac_attrs):
+ attr_info = pac_buffer.info
+
+ self.assertEqual(2, attr_info.flags_length)
+
+ flags = attr_info.flags
+
+ requested_pac = bool(flags & 1)
+ given_pac = bool(flags & 2)
+
+ self.assertEqual(expect_pac_attrs_pac_request is True,
+ requested_pac)
+ self.assertEqual(expect_pac_attrs_pac_request is None,
+ given_pac)
+
+ elif (pac_buffer.type == krb5pac.PAC_TYPE_REQUESTER_SID
+ and expect_requester_sid):
+ requester_sid = pac_buffer.info.sid
+
+ if expected_requester_sid is None:
+ expected_requester_sid = expected_sid
+ if expected_sid is not None:
+ self.assertEqual(expected_requester_sid,
+ str(requester_sid))
+
+ elif pac_buffer.type in {krb5pac.PAC_TYPE_CLIENT_CLAIMS_INFO,
+ krb5pac.PAC_TYPE_DEVICE_CLAIMS_INFO}:
+ remaining = pac_buffer.info.remaining
+
+ if pac_buffer.type == krb5pac.PAC_TYPE_CLIENT_CLAIMS_INFO:
+ claims_type = 'client claims'
+ expected_claims = expected_client_claims
+ unexpected_claims = unexpected_client_claims
+ else:
+ claims_type = 'device claims'
+ expected_claims = expected_device_claims
+ unexpected_claims = unexpected_device_claims
+
+ if not remaining:
+ # Windows may produce an empty claims buffer.
+ self.assertFalse(expected_claims,
+ f'expected {claims_type}, but the PAC '
+ f'buffer was empty')
+ continue
+
+ if expected_claims:
+ empty_msg = f', and {claims_type} were expected'
+ else:
+ empty_msg = f' for {claims_type} (should be missing)'
+
+ claims_metadata_ndr = ndr_unpack(claims.CLAIMS_SET_METADATA_NDR,
+ remaining)
+ claims_metadata = claims_metadata_ndr.claims.metadata
+ self.assertIsNotNone(claims_metadata,
+ f'got empty CLAIMS_SET_METADATA_NDR '
+ f'inner structure {empty_msg}')
+
+ self.assertIsNotNone(claims_metadata.claims_set,
+ f'got empty CLAIMS_SET_METADATA '
+ f'structure {empty_msg}')
+
+ uncompressed_size = claims_metadata.uncompressed_claims_set_size
+ compression_format = claims_metadata.compression_format
+
+ if uncompressed_size < (
+ claims.CLAIM_LOWER_COMPRESSION_THRESHOLD):
+ self.assertEqual(claims.CLAIMS_COMPRESSION_FORMAT_NONE,
+ compression_format,
+ f'{claims_type} unexpectedly '
+ f'compressed ({uncompressed_size} '
+ f'bytes uncompressed)')
+ elif uncompressed_size >= (
+ claims.CLAIM_UPPER_COMPRESSION_THRESHOLD):
+ self.assertEqual(
+ claims.CLAIMS_COMPRESSION_FORMAT_XPRESS_HUFF,
+ compression_format,
+ f'{claims_type} unexpectedly not compressed '
+ f'({uncompressed_size} bytes uncompressed)')
+
+ claims_set = claims_metadata.claims_set.claims.claims
+ self.assertIsNotNone(claims_set,
+ f'got empty CLAIMS_SET_NDR inner '
+ f'structure {empty_msg}')
+
+ claims_arrays = claims_set.claims_arrays
+ self.assertIsNotNone(claims_arrays,
+ f'got empty CLAIMS_SET structure '
+ f'{empty_msg}')
+ self.assertGreater(len(claims_arrays), 0,
+ f'got empty claims array {empty_msg}')
+ self.assertEqual(len(claims_arrays),
+ claims_set.claims_array_count,
+ f'{claims_type} arrays size mismatch')
+
+ got_claims = {}
+
+ for claims_array in claims_arrays:
+ claim_entries = claims_array.claim_entries
+ self.assertIsNotNone(claim_entries,
+ f'got empty CLAIMS_ARRAY structure '
+ f'{empty_msg}')
+ self.assertGreater(len(claim_entries), 0,
+ f'got empty claim entries array '
+ f'{empty_msg}')
+ self.assertEqual(len(claim_entries),
+ claims_array.claims_count,
+ f'{claims_type} entries array size '
+ f'mismatch')
+
+ for entry in claim_entries:
+ if unexpected_claims is not None:
+ self.assertNotIn(entry.id, unexpected_claims,
+ f'got unexpected {claims_type} '
+ f'in PAC')
+ if expected_claims is None:
+ continue
+
+ expected_claim = expected_claims.get(entry.id)
+ if expected_claim is None:
+ continue
+
+ self.assertNotIn(entry.id, got_claims,
+ f'got duplicate {claims_type}')
+
+ self.assertIsNotNone(entry.values.values,
+ f'got {claims_type} with no '
+ f'values')
+ self.assertGreater(len(entry.values.values), 0,
+ f'got empty {claims_type} values '
+ f'array')
+ self.assertEqual(len(entry.values.values),
+ entry.values.value_count,
+ f'{claims_type} values array size '
+ f'mismatch')
+
+ expected_claim_values = expected_claim.get('values')
+ self.assertIsNotNone(expected_claim_values,
+ f'got expected {claims_type} '
+ f'with no values')
+
+ values = type(expected_claim_values)(
+ entry.values.values)
+
+ got_claims[entry.id] = {
+ 'source_type': claims_array.claims_source_type,
+ 'type': entry.type,
+ 'values': values,
+ }
+
+ self.assertEqual(expected_claims, got_claims or None,
+ f'{claims_type} did not match expectations')
+
+ elif pac_buffer.type == krb5pac.PAC_TYPE_DEVICE_INFO:
+ device_info = pac_buffer.info.info
+
+ self.check_device_info(device_info, kdc_exchange_dict)
+
+ elif pac_buffer.type == krb5pac.PAC_TYPE_CREDENTIAL_INFO:
+ credential_info = pac_buffer.info
+
+ expected_etype = self.expected_etype(kdc_exchange_dict)
+
+ self.assertEqual(0, credential_info.version)
+ self.assertEqual(expected_etype,
+ credential_info.encryption_type)
+
+ encrypted_data = credential_info.encrypted_data
+ reply_key = kdc_exchange_dict['reply_key']
+
+ data = reply_key.decrypt(KU_NON_KERB_SALT, encrypted_data)
+
+ credential_data_ndr = ndr_unpack(
+ krb5pac.PAC_CREDENTIAL_DATA_NDR, data)
+
+ credential_data = credential_data_ndr.ctr.data
+
+ self.assertEqual(1, credential_data.credential_count)
+ self.assertEqual(credential_data.credential_count,
+ len(credential_data.credentials))
+
+ package = credential_data.credentials[0]
+ self.assertEqual('NTLM', str(package.package_name))
+
+ ntlm_blob = bytes(package.credential)
+
+ ntlm_package = ndr_unpack(krb5pac.PAC_CREDENTIAL_NTLM_SECPKG,
+ ntlm_blob)
+
+ self.assertEqual(0, ntlm_package.version)
+ self.assertEqual(krb5pac.PAC_CREDENTIAL_NTLM_HAS_NT_HASH,
+ ntlm_package.flags)
+
+ creds = kdc_exchange_dict['creds']
+ nt_password = bytes(ntlm_package.nt_password.hash)
+ self.assertEqual(creds.get_nt_hash(), nt_password)
+
+ lm_password = bytes(ntlm_package.lm_password.hash)
+ self.assertEqual(bytes(16), lm_password)
+
+ def generic_check_kdc_error(self,
+ kdc_exchange_dict,
+ callback_dict,
+ rep,
+ inner=False):
+
+ rep_msg_type = kdc_exchange_dict['rep_msg_type']
+
+ expected_anon = kdc_exchange_dict['expected_anon']
+ expected_srealm = kdc_exchange_dict['expected_srealm']
+ expected_sname = kdc_exchange_dict['expected_sname']
+ expected_error_mode = kdc_exchange_dict['expected_error_mode']
+
+ sent_fast = self.sent_fast(kdc_exchange_dict)
+
+ fast_armor_type = kdc_exchange_dict['fast_armor_type']
+
+ self.assertElementEqual(rep, 'pvno', 5)
+ self.assertElementEqual(rep, 'msg-type', KRB_ERROR)
+ error_code = self.getElementValue(rep, 'error-code')
+ self.assertIn(error_code, expected_error_mode)
+ if self.strict_checking:
+ self.assertElementMissing(rep, 'ctime')
+ self.assertElementMissing(rep, 'cusec')
+ self.assertElementPresent(rep, 'stime')
+ self.assertElementPresent(rep, 'susec')
+ # error-code checked above
+ if expected_anon and not inner:
+ expected_cname = self.PrincipalName_create(
+ name_type=NT_WELLKNOWN,
+ names=['WELLKNOWN', 'ANONYMOUS'])
+ self.assertElementEqualPrincipal(rep, 'cname', expected_cname)
+ elif self.strict_checking:
+ self.assertElementMissing(rep, 'cname')
+ if self.strict_checking:
+ self.assertElementMissing(rep, 'crealm')
+ self.assertElementEqualUTF8(rep, 'realm', expected_srealm)
+ self.assertElementEqualPrincipal(rep, 'sname', expected_sname)
+ self.assertElementMissing(rep, 'e-text')
+ expect_status = kdc_exchange_dict['expect_status']
+ expected_status = kdc_exchange_dict['expected_status']
+ expect_edata = kdc_exchange_dict['expect_edata']
+ if expect_edata is None:
+ expect_edata = (error_code != KDC_ERR_UNKNOWN_CRITICAL_FAST_OPTIONS
+ and (not sent_fast or fast_armor_type is None
+ or fast_armor_type == FX_FAST_ARMOR_AP_REQUEST)
+ and not inner)
+ if inner and expect_edata is self.expect_padata_outer:
+ expect_edata = False
+ if not expect_edata:
+ self.assertFalse(expect_status)
+ if self.strict_checking or expect_status is False:
+ self.assertElementMissing(rep, 'e-data')
+ return rep
+ edata = self.getElementValue(rep, 'e-data')
+ if self.strict_checking or expect_status:
+ self.assertIsNotNone(edata)
+ if edata is not None:
+ try:
+ error_data = self.der_decode(
+ edata,
+ asn1Spec=krb5_asn1.KERB_ERROR_DATA())
+ except PyAsn1Error:
+ if expect_status:
+ # The test requires that the KDC be declared to support
+ # NTSTATUS values in e-data to proceed.
+ self.assertTrue(
+ self.expect_nt_status,
+ 'expected status code (which, according to '
+ 'EXPECT_NT_STATUS=0, the KDC does not support)')
+
+ self.fail('expected to get status code')
+
+ rep_padata = self.der_decode(
+ edata, asn1Spec=krb5_asn1.METHOD_DATA())
+ self.assertGreater(len(rep_padata), 0)
+
+ if sent_fast:
+ self.assertEqual(1, len(rep_padata))
+ rep_pa_dict = self.get_pa_dict(rep_padata)
+ self.assertIn(PADATA_FX_FAST, rep_pa_dict)
+
+ armor_key = kdc_exchange_dict['armor_key']
+ self.assertIsNotNone(armor_key)
+ fast_response = self.check_fx_fast_data(
+ kdc_exchange_dict,
+ rep_pa_dict[PADATA_FX_FAST],
+ armor_key,
+ expect_strengthen_key=False)
+
+ rep_padata = fast_response['padata']
+
+ etype_info2 = self.check_rep_padata(kdc_exchange_dict,
+ callback_dict,
+ rep_padata,
+ error_code)
+
+ kdc_exchange_dict['preauth_etype_info2'] = etype_info2
+ else:
+ self.assertTrue(self.expect_nt_status,
+ 'got status code, but EXPECT_NT_STATUS=0')
+
+ if expect_status is not None:
+ self.assertTrue(expect_status,
+ 'got unexpected status code')
+
+ self.assertEqual(KERB_ERR_TYPE_EXTENDED,
+ error_data['data-type'])
+
+ extended_error = error_data['data-value']
+
+ self.assertEqual(12, len(extended_error))
+
+ status = int.from_bytes(extended_error[:4], 'little')
+ flags = int.from_bytes(extended_error[8:], 'little')
+
+ self.assertEqual(expected_status, status)
+
+ if rep_msg_type == KRB_TGS_REP:
+ self.assertEqual(3, flags)
+ else:
+ self.assertEqual(1, flags)
+
+ return rep
+
+ def check_reply_padata(self,
+ kdc_exchange_dict,
+ callback_dict,
+ encpart,
+ rep_padata):
+ expected_patypes = ()
+
+ sent_fast = self.sent_fast(kdc_exchange_dict)
+ rep_msg_type = kdc_exchange_dict['rep_msg_type']
+
+ if sent_fast:
+ expected_patypes += (PADATA_FX_FAST,)
+ elif rep_msg_type == KRB_AS_REP:
+ if self.sent_pk_as_req(kdc_exchange_dict):
+ expected_patypes += PADATA_PK_AS_REP,
+ elif self.sent_pk_as_req_win2k(kdc_exchange_dict):
+ expected_patypes += PADATA_PK_AS_REP_19,
+ else:
+ chosen_etype = self.getElementValue(encpart, 'etype')
+ self.assertIsNotNone(chosen_etype)
+
+ if chosen_etype in {kcrypto.Enctype.AES256,
+ kcrypto.Enctype.AES128}:
+ expected_patypes += (PADATA_ETYPE_INFO2,)
+
+ preauth_key = kdc_exchange_dict['preauth_key']
+ self.assertIsInstance(preauth_key, Krb5EncryptionKey)
+ if preauth_key.etype == kcrypto.Enctype.RC4 and rep_padata is None:
+ rep_padata = ()
+ elif rep_msg_type == KRB_TGS_REP:
+ if expected_patypes == () and rep_padata is None:
+ rep_padata = ()
+
+ if not self.strict_checking and rep_padata is None:
+ rep_padata = ()
+
+ self.assertIsNotNone(rep_padata)
+ got_patypes = tuple(pa['padata-type'] for pa in rep_padata)
+ self.assertSequenceElementsEqual(expected_patypes, got_patypes,
+ # Windows does not add this.
+ unchecked={PADATA_PKINIT_KX})
+
+ if len(expected_patypes) == 0:
+ return None
+
+ pa_dict = self.get_pa_dict(rep_padata)
+
+ etype_info2 = pa_dict.get(PADATA_ETYPE_INFO2)
+ if etype_info2 is not None:
+ etype_info2 = self.der_decode(etype_info2,
+ asn1Spec=krb5_asn1.ETYPE_INFO2())
+ self.assertEqual(len(etype_info2), 1)
+ elem = etype_info2[0]
+
+ e = self.getElementValue(elem, 'etype')
+ self.assertEqual(e, chosen_etype)
+ salt = self.getElementValue(elem, 'salt')
+ self.assertIsNotNone(salt)
+ expected_salt = kdc_exchange_dict['expected_salt']
+ if expected_salt is not None:
+ self.assertEqual(salt, expected_salt)
+ s2kparams = self.getElementValue(elem, 's2kparams')
+ if self.strict_checking:
+ self.assertIsNone(s2kparams)
+
+ @staticmethod
+ def greatest_common_etype(etypes, proposed_etypes):
+ return max(filter(lambda e: e in etypes, proposed_etypes),
+ default=None)
+
+ @staticmethod
+ def first_common_etype(etypes, proposed_etypes):
+ return next(filter(lambda e: e in etypes, proposed_etypes), None)
+
+ def supported_aes_rc4_etypes(self, kdc_exchange_dict):
+ creds = kdc_exchange_dict['creds']
+ supported_etypes = self.get_default_enctypes(creds)
+
+ rc4_support = kdc_exchange_dict['rc4_support']
+
+ aes_etypes = set()
+ if kcrypto.Enctype.AES256 in supported_etypes:
+ aes_etypes.add(kcrypto.Enctype.AES256)
+ if kcrypto.Enctype.AES128 in supported_etypes:
+ aes_etypes.add(kcrypto.Enctype.AES128)
+
+ rc4_etypes = set()
+ if rc4_support and kcrypto.Enctype.RC4 in supported_etypes:
+ rc4_etypes.add(kcrypto.Enctype.RC4)
+
+ return aes_etypes, rc4_etypes
+
+ def greatest_aes_rc4_etypes(self, kdc_exchange_dict):
+ req_body = kdc_exchange_dict['req_body']
+ proposed_etypes = req_body['etype']
+
+ aes_etypes, rc4_etypes = self.supported_aes_rc4_etypes(kdc_exchange_dict)
+
+ expected_aes = self.greatest_common_etype(aes_etypes, proposed_etypes)
+ expected_rc4 = self.greatest_common_etype(rc4_etypes, proposed_etypes)
+
+ return expected_aes, expected_rc4
+
+ def expected_etype(self, kdc_exchange_dict):
+ req_body = kdc_exchange_dict['req_body']
+ proposed_etypes = req_body['etype']
+
+ aes_etypes, rc4_etypes = self.supported_aes_rc4_etypes(
+ kdc_exchange_dict)
+
+ return self.first_common_etype(aes_etypes | rc4_etypes,
+ proposed_etypes)
+
+ def check_rep_padata(self,
+ kdc_exchange_dict,
+ callback_dict,
+ rep_padata,
+ error_code):
+ rep_msg_type = kdc_exchange_dict['rep_msg_type']
+
+ sent_fast = self.sent_fast(kdc_exchange_dict)
+ sent_enc_challenge = self.sent_enc_challenge(kdc_exchange_dict)
+
+ if rep_msg_type == KRB_TGS_REP:
+ self.assertTrue(sent_fast)
+
+ rc4_support = kdc_exchange_dict['rc4_support']
+
+ expected_aes, expected_rc4 = self.greatest_aes_rc4_etypes(
+ kdc_exchange_dict)
+
+ expect_etype_info2 = ()
+ expect_etype_info = False
+ if expected_aes is not None:
+ expect_etype_info2 += (expected_aes,)
+ if expected_rc4 is not None:
+ if error_code != 0:
+ expect_etype_info2 += (expected_rc4,)
+ if expected_aes is None:
+ expect_etype_info = True
+
+ if expect_etype_info:
+ self.assertGreater(len(expect_etype_info2), 0)
+
+ sent_pac_options = self.get_sent_pac_options(kdc_exchange_dict)
+
+ check_patypes = kdc_exchange_dict['check_patypes']
+ if check_patypes:
+ expected_patypes = ()
+ if sent_fast and error_code != 0:
+ expected_patypes += (PADATA_FX_ERROR,)
+ expected_patypes += (PADATA_FX_COOKIE,)
+
+ if rep_msg_type == KRB_TGS_REP:
+ if ('1' in sent_pac_options
+ and error_code not in (0, KDC_ERR_GENERIC)):
+ expected_patypes += (PADATA_PAC_OPTIONS,)
+ elif error_code != KDC_ERR_GENERIC:
+ if expect_etype_info:
+ expected_patypes += (PADATA_ETYPE_INFO,)
+ if len(expect_etype_info2) != 0:
+ expected_patypes += (PADATA_ETYPE_INFO2,)
+
+ sent_freshness = self.sent_freshness(kdc_exchange_dict)
+
+ if error_code not in (KDC_ERR_PREAUTH_FAILED, KDC_ERR_SKEW,
+ KDC_ERR_POLICY, KDC_ERR_CLIENT_REVOKED):
+ if sent_fast:
+ expected_patypes += (PADATA_ENCRYPTED_CHALLENGE,)
+ else:
+ expected_patypes += (PADATA_ENC_TIMESTAMP,)
+
+ if not sent_enc_challenge:
+ expected_patypes += (PADATA_PK_AS_REQ,)
+ if not sent_freshness:
+ expected_patypes += (PADATA_PK_AS_REP_19,)
+
+ if sent_freshness:
+ expected_patypes += PADATA_AS_FRESHNESS,
+
+ if (self.kdc_fast_support
+ and not sent_fast
+ and not sent_enc_challenge):
+ expected_patypes += (PADATA_FX_FAST,)
+ expected_patypes += (PADATA_FX_COOKIE,)
+
+ require_strict = {PADATA_FX_COOKIE,
+ PADATA_FX_FAST,
+ PADATA_PAC_OPTIONS,
+ PADATA_PK_AS_REP_19,
+ PADATA_PK_AS_REQ,
+ PADATA_PKINIT_KX,
+ PADATA_GSS}
+ strict_edata_checking = kdc_exchange_dict['strict_edata_checking']
+ if not strict_edata_checking:
+ require_strict.add(PADATA_ETYPE_INFO2)
+ require_strict.add(PADATA_ENCRYPTED_CHALLENGE)
+
+ got_patypes = tuple(pa['padata-type'] for pa in rep_padata)
+ self.assertSequenceElementsEqual(expected_patypes, got_patypes,
+ require_strict=require_strict,
+ unchecked={PADATA_PW_SALT})
+
+ if not expected_patypes:
+ return None
+
+ pa_dict = self.get_pa_dict(rep_padata)
+
+ enc_timestamp = pa_dict.get(PADATA_ENC_TIMESTAMP)
+ if enc_timestamp is not None:
+ self.assertEqual(len(enc_timestamp), 0)
+
+ pk_as_req = pa_dict.get(PADATA_PK_AS_REQ)
+ if pk_as_req is not None:
+ self.assertEqual(len(pk_as_req), 0)
+
+ pk_as_rep19 = pa_dict.get(PADATA_PK_AS_REP_19)
+ if pk_as_rep19 is not None:
+ self.assertEqual(len(pk_as_rep19), 0)
+
+ freshness_token = pa_dict.get(PADATA_AS_FRESHNESS)
+ if freshness_token is not None:
+ self.assertEqual(bytes(2), freshness_token[:2])
+
+ freshness = self.der_decode(freshness_token[2:],
+ asn1Spec=krb5_asn1.EncryptedData())
+
+ krbtgt_creds = self.get_krbtgt_creds()
+ krbtgt_key = self.TicketDecryptionKey_from_creds(krbtgt_creds)
+
+ self.assertElementEqual(freshness, 'etype', krbtgt_key.etype)
+ self.assertElementKVNO(freshness, 'kvno', krbtgt_key.kvno)
+
+ # Decrypt the freshness token.
+ ts_enc = krbtgt_key.decrypt(KU_AS_FRESHNESS,
+ freshness['cipher'])
+
+ # Ensure that we can decode it as PA-ENC-TS-ENC.
+ ts_enc = self.der_decode(ts_enc,
+ asn1Spec=krb5_asn1.PA_ENC_TS_ENC())
+ freshness_time = self.get_EpochFromKerberosTime(
+ ts_enc['patimestamp'])
+ freshness_time += ts_enc['pausec'] / 1e6
+
+ # Ensure that it is reasonably close to the current time (within
+ # five minutes, to allow for clock skew).
+ current_time = datetime.datetime.now(
+ datetime.timezone.utc).timestamp()
+ self.assertLess(current_time - 5 * 60, freshness_time)
+ self.assertLess(freshness_time, current_time + 5 * 60)
+
+ kdc_exchange_dict['freshness_token'] = freshness_token
+
+ fx_fast = pa_dict.get(PADATA_FX_FAST)
+ if fx_fast is not None:
+ self.assertEqual(len(fx_fast), 0)
+
+ fast_cookie = pa_dict.get(PADATA_FX_COOKIE)
+ if fast_cookie is not None:
+ kdc_exchange_dict['fast_cookie'] = fast_cookie
+
+ fast_error = pa_dict.get(PADATA_FX_ERROR)
+ if fast_error is not None:
+ fast_error = self.der_decode(fast_error,
+ asn1Spec=krb5_asn1.KRB_ERROR())
+ self.generic_check_kdc_error(kdc_exchange_dict,
+ callback_dict,
+ fast_error,
+ inner=True)
+
+ pac_options = pa_dict.get(PADATA_PAC_OPTIONS)
+ if pac_options is not None:
+ pac_options = self.der_decode(
+ pac_options,
+ asn1Spec=krb5_asn1.PA_PAC_OPTIONS())
+ self.assertElementEqual(pac_options, 'options', sent_pac_options)
+
+ enc_challenge = pa_dict.get(PADATA_ENCRYPTED_CHALLENGE)
+ if enc_challenge is not None:
+ if not sent_enc_challenge:
+ self.assertEqual(len(enc_challenge), 0)
+ else:
+ armor_key = kdc_exchange_dict['armor_key']
+ self.assertIsNotNone(armor_key)
+
+ preauth_key, _ = self.get_preauth_key(kdc_exchange_dict)
+
+ kdc_challenge_key = self.generate_kdc_challenge_key(
+ armor_key, preauth_key)
+
+ # Ensure that the encrypted challenge FAST factor is supported
+ # (RFC6113 5.4.6).
+ if self.strict_checking:
+ self.assertNotEqual(len(enc_challenge), 0)
+ if len(enc_challenge) != 0:
+ encrypted_challenge = self.der_decode(
+ enc_challenge,
+ asn1Spec=krb5_asn1.EncryptedData())
+ self.assertEqual(encrypted_challenge['etype'],
+ kdc_challenge_key.etype)
+
+ challenge = kdc_challenge_key.decrypt(
+ KU_ENC_CHALLENGE_KDC,
+ encrypted_challenge['cipher'])
+ challenge = self.der_decode(
+ challenge,
+ asn1Spec=krb5_asn1.PA_ENC_TS_ENC())
+
+ # Retrieve the returned timestamp.
+ rep_patime = challenge['patimestamp']
+ self.assertIn('pausec', challenge)
+
+ # Ensure the returned time is within five minutes of the
+ # current time.
+ rep_time = self.get_EpochFromKerberosTime(rep_patime)
+ current_time = time.time()
+
+ self.assertLess(current_time - 300, rep_time)
+ self.assertLess(rep_time, current_time + 300)
+
+ etype_info2 = pa_dict.get(PADATA_ETYPE_INFO2)
+ if etype_info2 is not None:
+ etype_info2 = self.der_decode(etype_info2,
+ asn1Spec=krb5_asn1.ETYPE_INFO2())
+ self.assertGreaterEqual(len(etype_info2), 1)
+ if self.strict_checking:
+ self.assertEqual(len(etype_info2), len(expect_etype_info2))
+ for i in range(0, len(etype_info2)):
+ e = self.getElementValue(etype_info2[i], 'etype')
+ if self.strict_checking:
+ self.assertEqual(e, expect_etype_info2[i])
+ salt = self.getElementValue(etype_info2[i], 'salt')
+ if e == kcrypto.Enctype.RC4:
+ if self.strict_checking:
+ self.assertIsNone(salt)
+ else:
+ self.assertIsNotNone(salt)
+ expected_salt = kdc_exchange_dict['expected_salt']
+ if expected_salt is not None:
+ self.assertEqual(salt, expected_salt)
+ s2kparams = self.getElementValue(etype_info2[i], 's2kparams')
+ if self.strict_checking:
+ self.assertIsNone(s2kparams)
+
+ etype_info = pa_dict.get(PADATA_ETYPE_INFO)
+ if etype_info is not None:
+ etype_info = self.der_decode(etype_info,
+ asn1Spec=krb5_asn1.ETYPE_INFO())
+ self.assertEqual(len(etype_info), 1)
+ e = self.getElementValue(etype_info[0], 'etype')
+ self.assertEqual(e, kcrypto.Enctype.RC4)
+ if rc4_support:
+ self.assertEqual(e, expect_etype_info2[0])
+ salt = self.getElementValue(etype_info[0], 'salt')
+ if self.strict_checking:
+ self.assertIsNotNone(salt)
+ self.assertEqual(len(salt), 0)
+
+ return etype_info2
+
+ def generate_simple_fast(self,
+ kdc_exchange_dict,
+ _callback_dict,
+ req_body,
+ fast_padata,
+ fast_armor,
+ checksum,
+ fast_options=''):
+ armor_key = kdc_exchange_dict['armor_key']
+
+ fast_req = self.KRB_FAST_REQ_create(fast_options,
+ fast_padata,
+ req_body)
+ fast_req = self.der_encode(fast_req,
+ asn1Spec=krb5_asn1.KrbFastReq())
+ fast_req = self.EncryptedData_create(armor_key,
+ KU_FAST_ENC,
+ fast_req)
+
+ fast_armored_req = self.KRB_FAST_ARMORED_REQ_create(fast_armor,
+ checksum,
+ fast_req)
+
+ fx_fast_request = self.PA_FX_FAST_REQUEST_create(fast_armored_req)
+ fx_fast_request = self.der_encode(
+ fx_fast_request,
+ asn1Spec=krb5_asn1.PA_FX_FAST_REQUEST())
+
+ fast_padata = self.PA_DATA_create(PADATA_FX_FAST,
+ fx_fast_request)
+
+ return fast_padata
+
+ def generate_ap_req(self,
+ kdc_exchange_dict,
+ _callback_dict,
+ req_body,
+ armor,
+ usage=None,
+ seq_number=None):
+ req_body_checksum = None
+
+ if armor:
+ self.assertIsNone(req_body)
+
+ tgt = kdc_exchange_dict['armor_tgt']
+ authenticator_subkey = kdc_exchange_dict['armor_subkey']
+ else:
+ tgt = kdc_exchange_dict['tgt']
+ authenticator_subkey = kdc_exchange_dict['authenticator_subkey']
+
+ if req_body is not None:
+ body_checksum_type = kdc_exchange_dict['body_checksum_type']
+
+ req_body_blob = self.der_encode(
+ req_body, asn1Spec=krb5_asn1.KDC_REQ_BODY())
+
+ req_body_checksum = self.Checksum_create(
+ tgt.session_key,
+ KU_TGS_REQ_AUTH_CKSUM,
+ req_body_blob,
+ ctype=body_checksum_type)
+
+ auth_data = kdc_exchange_dict['auth_data']
+
+ subkey_obj = None
+ if authenticator_subkey is not None:
+ subkey_obj = authenticator_subkey.export_obj()
+ if seq_number is None:
+ seq_number = random.randint(0, 0xfffffffe)
+ (ctime, cusec) = self.get_KerberosTimeWithUsec()
+ authenticator_obj = self.Authenticator_create(
+ crealm=tgt.crealm,
+ cname=tgt.cname,
+ cksum=req_body_checksum,
+ cusec=cusec,
+ ctime=ctime,
+ subkey=subkey_obj,
+ seq_number=seq_number,
+ authorization_data=auth_data)
+ authenticator_blob = self.der_encode(
+ authenticator_obj,
+ asn1Spec=krb5_asn1.Authenticator())
+
+ if usage is None:
+ usage = KU_AP_REQ_AUTH if armor else KU_TGS_REQ_AUTH
+ authenticator = self.EncryptedData_create(tgt.session_key,
+ usage,
+ authenticator_blob)
+
+ if armor:
+ ap_options = kdc_exchange_dict['fast_ap_options']
+ else:
+ ap_options = kdc_exchange_dict['ap_options']
+ if ap_options is None:
+ ap_options = str(krb5_asn1.APOptions('0'))
+ ap_req_obj = self.AP_REQ_create(ap_options=ap_options,
+ ticket=tgt.ticket,
+ authenticator=authenticator)
+ ap_req = self.der_encode(ap_req_obj, asn1Spec=krb5_asn1.AP_REQ())
+
+ return ap_req
+
+ def generate_simple_tgs_padata(self,
+ kdc_exchange_dict,
+ callback_dict,
+ req_body):
+ ap_req = self.generate_ap_req(kdc_exchange_dict,
+ callback_dict,
+ req_body,
+ armor=False)
+ pa_tgs_req = self.PA_DATA_create(PADATA_KDC_REQ, ap_req)
+ padata = [pa_tgs_req]
+
+ return padata, req_body
+
+ def get_preauth_key(self, kdc_exchange_dict):
+ msg_type = kdc_exchange_dict['rep_msg_type']
+
+ if msg_type == KRB_AS_REP:
+ key = kdc_exchange_dict['preauth_key']
+ usage = KU_AS_REP_ENC_PART
+ else: # KRB_TGS_REP
+ authenticator_subkey = kdc_exchange_dict['authenticator_subkey']
+ if authenticator_subkey is not None:
+ key = authenticator_subkey
+ usage = KU_TGS_REP_ENC_PART_SUB_KEY
+ else:
+ tgt = kdc_exchange_dict['tgt']
+ key = tgt.session_key
+ usage = KU_TGS_REP_ENC_PART_SESSION
+
+ self.assertIsNotNone(key)
+
+ return key, usage
+
+ def generate_armor_key(self, subkey, session_key):
+ armor_key = kcrypto.cf2(subkey.key,
+ session_key.key,
+ b'subkeyarmor',
+ b'ticketarmor')
+ armor_key = Krb5EncryptionKey(armor_key, None)
+
+ return armor_key
+
+ def generate_strengthen_reply_key(self, strengthen_key, reply_key):
+ strengthen_reply_key = kcrypto.cf2(strengthen_key.key,
+ reply_key.key,
+ b'strengthenkey',
+ b'replykey')
+ strengthen_reply_key = Krb5EncryptionKey(strengthen_reply_key,
+ reply_key.kvno)
+
+ return strengthen_reply_key
+
+ def generate_client_challenge_key(self, armor_key, longterm_key):
+ client_challenge_key = kcrypto.cf2(armor_key.key,
+ longterm_key.key,
+ b'clientchallengearmor',
+ b'challengelongterm')
+ client_challenge_key = Krb5EncryptionKey(client_challenge_key, None)
+
+ return client_challenge_key
+
+ def generate_kdc_challenge_key(self, armor_key, longterm_key):
+ kdc_challenge_key = kcrypto.cf2(armor_key.key,
+ longterm_key.key,
+ b'kdcchallengearmor',
+ b'challengelongterm')
+ kdc_challenge_key = Krb5EncryptionKey(kdc_challenge_key, None)
+
+ return kdc_challenge_key
+
+ def verify_ticket_checksum(self, ticket, expected_checksum, armor_key):
+ expected_type = expected_checksum['cksumtype']
+ self.assertEqual(armor_key.ctype, expected_type)
+
+ ticket_blob = self.der_encode(ticket,
+ asn1Spec=krb5_asn1.Ticket())
+ checksum = self.Checksum_create(armor_key,
+ KU_FAST_FINISHED,
+ ticket_blob)
+ self.assertEqual(expected_checksum, checksum)
+
+ def verify_ticket(self, ticket, krbtgt_keys, service_ticket,
+ expect_pac=True,
+ expect_ticket_checksum=True,
+ expect_full_checksum=None):
+ # Decrypt the ticket.
+
+ key = ticket.decryption_key
+ enc_part = ticket.ticket['enc-part']
+
+ self.assertElementEqual(enc_part, 'etype', key.etype)
+ self.assertElementKVNO(enc_part, 'kvno', key.kvno)
+
+ enc_part = key.decrypt(KU_TICKET, enc_part['cipher'])
+ enc_part = self.der_decode(
+ enc_part, asn1Spec=krb5_asn1.EncTicketPart())
+
+ # Fetch the authorization data from the ticket.
+ auth_data = enc_part.get('authorization-data')
+ if expect_pac:
+ self.assertIsNotNone(auth_data)
+ elif auth_data is None:
+ return
+
+ # Get a copy of the authdata with an empty PAC, and the existing PAC
+ # (if present).
+ empty_pac = self.get_empty_pac()
+ auth_data, pac_data = self.replace_pac(auth_data,
+ empty_pac,
+ expect_pac=expect_pac)
+ if not expect_pac:
+ return
+
+ # Unpack the PAC as both PAC_DATA and PAC_DATA_RAW types. We use the
+ # raw type to create a new PAC with zeroed signatures for
+ # verification. This is because on Windows, the resource_groups field
+ # is added to PAC_LOGON_INFO after the info3 field has been created,
+ # which results in a different ordering of pointer values than Samba
+ # (see commit 0e201ecdc53). Using the raw type avoids changing
+ # PAC_LOGON_INFO, so verification against Windows can work. We still
+ # need the PAC_DATA type to retrieve the actual checksums, because the
+ # signatures in the raw type may contain padding bytes.
+ pac = ndr_unpack(krb5pac.PAC_DATA,
+ pac_data)
+ raw_pac = ndr_unpack(krb5pac.PAC_DATA_RAW,
+ pac_data)
+
+ checksums = {}
+
+ full_checksum_buffer = None
+
+ for pac_buffer, raw_pac_buffer in zip(pac.buffers, raw_pac.buffers):
+ buffer_type = pac_buffer.type
+ if buffer_type in self.pac_checksum_types:
+ self.assertNotIn(buffer_type, checksums,
+ f'Duplicate checksum type {buffer_type}')
+
+ # Fetch the checksum and the checksum type from the PAC buffer.
+ checksum = pac_buffer.info.signature
+ ctype = pac_buffer.info.type
+ if ctype & 1 << 31:
+ ctype |= -1 << 31
+
+ checksums[buffer_type] = checksum, ctype
+
+ if buffer_type == krb5pac.PAC_TYPE_FULL_CHECKSUM:
+ full_checksum_buffer = raw_pac_buffer
+ elif buffer_type != krb5pac.PAC_TYPE_TICKET_CHECKSUM:
+ # Zero the checksum field so that we can later verify the
+ # checksums. The ticket checksum field is not zeroed.
+
+ signature = ndr_unpack(
+ krb5pac.PAC_SIGNATURE_DATA,
+ raw_pac_buffer.info.remaining)
+ signature.signature = bytes(len(checksum))
+ raw_pac_buffer.info.remaining = ndr_pack(
+ signature)
+
+ # Re-encode the PAC.
+ pac_data = ndr_pack(raw_pac)
+
+ if full_checksum_buffer is not None:
+ signature = ndr_unpack(
+ krb5pac.PAC_SIGNATURE_DATA,
+ full_checksum_buffer.info.remaining)
+ signature.signature = bytes(len(checksum))
+ full_checksum_buffer.info.remaining = ndr_pack(
+ signature)
+
+ # Re-encode the PAC.
+ full_pac_data = ndr_pack(raw_pac)
+
+ # Verify the signatures.
+
+ server_checksum, server_ctype = checksums[
+ krb5pac.PAC_TYPE_SRV_CHECKSUM]
+ key.verify_checksum(KU_NON_KERB_CKSUM_SALT,
+ pac_data,
+ server_ctype,
+ server_checksum)
+
+ kdc_checksum, kdc_ctype = checksums[
+ krb5pac.PAC_TYPE_KDC_CHECKSUM]
+
+ if isinstance(krbtgt_keys, collections.abc.Container):
+ if self.strict_checking:
+ krbtgt_key = krbtgt_keys[0]
+ else:
+ krbtgt_key = next(key for key in krbtgt_keys
+ if key.ctype == kdc_ctype)
+ else:
+ krbtgt_key = krbtgt_keys
+
+ krbtgt_key.verify_rodc_checksum(KU_NON_KERB_CKSUM_SALT,
+ server_checksum,
+ kdc_ctype,
+ kdc_checksum)
+
+ if not service_ticket:
+ self.assertNotIn(krb5pac.PAC_TYPE_TICKET_CHECKSUM, checksums)
+ self.assertNotIn(krb5pac.PAC_TYPE_FULL_CHECKSUM, checksums)
+ else:
+ ticket_checksum, ticket_ctype = checksums.get(
+ krb5pac.PAC_TYPE_TICKET_CHECKSUM,
+ (None, None))
+ if expect_ticket_checksum:
+ self.assertIsNotNone(ticket_checksum)
+ elif expect_ticket_checksum is False:
+ self.assertIsNone(ticket_checksum)
+ if ticket_checksum is not None:
+ enc_part['authorization-data'] = auth_data
+ enc_part = self.der_encode(enc_part,
+ asn1Spec=krb5_asn1.EncTicketPart())
+
+ krbtgt_key.verify_rodc_checksum(KU_NON_KERB_CKSUM_SALT,
+ enc_part,
+ ticket_ctype,
+ ticket_checksum)
+
+ full_checksum, full_ctype = checksums.get(
+ krb5pac.PAC_TYPE_FULL_CHECKSUM,
+ (None, None))
+ if expect_full_checksum:
+ self.assertIsNotNone(full_checksum)
+ elif expect_full_checksum is False:
+ self.assertIsNone(full_checksum)
+ if full_checksum is not None:
+ krbtgt_key.verify_rodc_checksum(KU_NON_KERB_CKSUM_SALT,
+ full_pac_data,
+ full_ctype,
+ full_checksum)
+
+ def modified_ticket(self,
+ ticket, *,
+ new_ticket_key=None,
+ modify_fn=None,
+ modify_pac_fn=None,
+ exclude_pac=False,
+ allow_empty_authdata=False,
+ update_pac_checksums=None,
+ checksum_keys=None,
+ include_checksums=None):
+ if checksum_keys is None:
+ # A dict containing a key for each checksum type to be created in
+ # the PAC.
+ checksum_keys = {}
+ else:
+ checksum_keys = dict(checksum_keys)
+
+ if include_checksums is None:
+ # A dict containing a value for each checksum type; True if the
+ # checksum type is to be included in the PAC, False if it is to be
+ # excluded, or None/not present if the checksum is to be included
+ # based on its presence in the original PAC.
+ include_checksums = {}
+ else:
+ include_checksums = dict(include_checksums)
+
+ # Check that the values passed in by the caller make sense.
+
+ self.assertLessEqual(checksum_keys.keys(), self.pac_checksum_types)
+ self.assertLessEqual(include_checksums.keys(), self.pac_checksum_types)
+
+ if update_pac_checksums is None:
+ update_pac_checksums = not exclude_pac
+
+ if exclude_pac:
+ self.assertIsNone(modify_pac_fn)
+ self.assertFalse(update_pac_checksums)
+
+ if not update_pac_checksums:
+ self.assertFalse(checksum_keys)
+ self.assertFalse(include_checksums)
+
+ expect_pac = bool(modify_pac_fn)
+
+ key = ticket.decryption_key
+
+ if new_ticket_key is None:
+ # Use the same key to re-encrypt the ticket.
+ new_ticket_key = key
+
+ if krb5pac.PAC_TYPE_SRV_CHECKSUM not in checksum_keys:
+ # If the server signature key is not present, fall back to the key
+ # used to encrypt the ticket.
+ checksum_keys[krb5pac.PAC_TYPE_SRV_CHECKSUM] = new_ticket_key
+
+ if krb5pac.PAC_TYPE_TICKET_CHECKSUM not in checksum_keys:
+ # If the ticket signature key is not present, fall back to the key
+ # used for the KDC signature.
+ kdc_checksum_key = checksum_keys.get(krb5pac.PAC_TYPE_KDC_CHECKSUM)
+ if kdc_checksum_key is not None:
+ checksum_keys[krb5pac.PAC_TYPE_TICKET_CHECKSUM] = (
+ kdc_checksum_key)
+
+ if krb5pac.PAC_TYPE_FULL_CHECKSUM not in checksum_keys:
+ # If the full signature key is not present, fall back to the key
+ # used for the KDC signature.
+ kdc_checksum_key = checksum_keys.get(krb5pac.PAC_TYPE_KDC_CHECKSUM)
+ if kdc_checksum_key is not None:
+ checksum_keys[krb5pac.PAC_TYPE_FULL_CHECKSUM] = (
+ kdc_checksum_key)
+
+ # Decrypt the ticket.
+
+ enc_part = ticket.ticket['enc-part']
+
+ self.assertElementEqual(enc_part, 'etype', key.etype)
+ self.assertElementKVNO(enc_part, 'kvno', key.kvno)
+
+ enc_part = key.decrypt(KU_TICKET, enc_part['cipher'])
+ enc_part = self.der_decode(
+ enc_part, asn1Spec=krb5_asn1.EncTicketPart())
+
+ # Modify the ticket here.
+ if callable(modify_fn):
+ enc_part = modify_fn(enc_part)
+ elif modify_fn:
+ for fn in modify_fn:
+ enc_part = fn(enc_part)
+
+ auth_data = enc_part.get('authorization-data')
+ if expect_pac:
+ self.assertIsNotNone(auth_data)
+ if auth_data is not None:
+ new_pac = None
+ if exclude_pac:
+ need_to_call_replace_pac = True
+ elif not modify_pac_fn and not update_pac_checksums:
+ need_to_call_replace_pac = False
+ else:
+ need_to_call_replace_pac = True
+ # Get a copy of the authdata with an empty PAC, and the
+ # existing PAC (if present).
+ empty_pac = self.get_empty_pac()
+ empty_pac_auth_data, pac_data = self.replace_pac(
+ auth_data,
+ empty_pac,
+ expect_pac=expect_pac)
+
+ if pac_data is not None:
+ pac = ndr_unpack(krb5pac.PAC_DATA, pac_data)
+
+ # Modify the PAC here.
+ if callable(modify_pac_fn):
+ pac = modify_pac_fn(pac)
+ elif modify_pac_fn:
+ for fn in modify_pac_fn:
+ pac = fn(pac)
+
+ if update_pac_checksums:
+ # Get the enc-part with an empty PAC, which is needed
+ # to create a ticket signature.
+ enc_part_to_sign = enc_part.copy()
+ enc_part_to_sign['authorization-data'] = (
+ empty_pac_auth_data)
+ enc_part_to_sign = self.der_encode(
+ enc_part_to_sign,
+ asn1Spec=krb5_asn1.EncTicketPart())
+
+ self.update_pac_checksums(pac,
+ checksum_keys,
+ include_checksums,
+ enc_part_to_sign)
+
+ # Re-encode the PAC.
+ pac_data = ndr_pack(pac)
+ new_pac = self.AuthorizationData_create(AD_WIN2K_PAC,
+ pac_data)
+
+ # Replace the PAC in the authorization data and re-add it to the
+ # ticket enc-part.
+ if need_to_call_replace_pac:
+ auth_data, _ = self.replace_pac(
+ auth_data, new_pac,
+ expect_pac=expect_pac,
+ allow_empty_authdata=allow_empty_authdata)
+ enc_part['authorization-data'] = auth_data
+
+ # Re-encrypt the ticket enc-part with the new key.
+ enc_part_new = self.der_encode(enc_part,
+ asn1Spec=krb5_asn1.EncTicketPart())
+ enc_part_new = self.EncryptedData_create(new_ticket_key,
+ KU_TICKET,
+ enc_part_new)
+
+ # Create a copy of the ticket with the new enc-part.
+ new_ticket = ticket.ticket.copy()
+ new_ticket['enc-part'] = enc_part_new
+
+ new_ticket_creds = KerberosTicketCreds(
+ new_ticket,
+ session_key=ticket.session_key,
+ crealm=ticket.crealm,
+ cname=ticket.cname,
+ srealm=ticket.srealm,
+ sname=ticket.sname,
+ decryption_key=new_ticket_key,
+ ticket_private=enc_part,
+ encpart_private=ticket.encpart_private)
+
+ return new_ticket_creds
+
+ def update_pac_checksums(self,
+ pac,
+ checksum_keys,
+ include_checksums,
+ enc_part=None):
+ pac_buffers = pac.buffers
+ checksum_buffers = {}
+
+ # Find the relevant PAC checksum buffers.
+ for pac_buffer in pac_buffers:
+ buffer_type = pac_buffer.type
+ if buffer_type in self.pac_checksum_types:
+ self.assertNotIn(buffer_type, checksum_buffers,
+ f'Duplicate checksum type {buffer_type}')
+
+ checksum_buffers[buffer_type] = pac_buffer
+
+ # Create any additional buffers that were requested but not
+ # present. Conversely, remove any buffers that were requested to be
+ # removed.
+ for buffer_type in self.pac_checksum_types:
+ if buffer_type in checksum_buffers:
+ if include_checksums.get(buffer_type) is False:
+ checksum_buffer = checksum_buffers.pop(buffer_type)
+
+ pac.num_buffers -= 1
+ pac_buffers.remove(checksum_buffer)
+
+ elif include_checksums.get(buffer_type) is True:
+ info = krb5pac.PAC_SIGNATURE_DATA()
+
+ checksum_buffer = krb5pac.PAC_BUFFER()
+ checksum_buffer.type = buffer_type
+ checksum_buffer.info = info
+
+ pac_buffers.append(checksum_buffer)
+ pac.num_buffers += 1
+
+ checksum_buffers[buffer_type] = checksum_buffer
+
+ # Fill the relevant checksum buffers.
+ for buffer_type, checksum_buffer in checksum_buffers.items():
+ checksum_key = checksum_keys[buffer_type]
+ ctype = checksum_key.ctype & ((1 << 32) - 1)
+
+ if buffer_type == krb5pac.PAC_TYPE_TICKET_CHECKSUM:
+ self.assertIsNotNone(enc_part)
+
+ signature = checksum_key.make_rodc_checksum(
+ KU_NON_KERB_CKSUM_SALT,
+ enc_part)
+
+ elif buffer_type == krb5pac.PAC_TYPE_SRV_CHECKSUM:
+ signature = checksum_key.make_zeroed_checksum()
+
+ else:
+ signature = checksum_key.make_rodc_zeroed_checksum()
+
+ checksum_buffer.info.signature = signature
+ checksum_buffer.info.type = ctype
+
+ # Add the new checksum buffers to the PAC.
+ pac.buffers = pac_buffers
+
+ # Calculate the full checksum and insert it into the PAC.
+ full_checksum_buffer = checksum_buffers.get(
+ krb5pac.PAC_TYPE_FULL_CHECKSUM)
+ if full_checksum_buffer is not None:
+ full_checksum_key = checksum_keys[krb5pac.PAC_TYPE_FULL_CHECKSUM]
+
+ pac_data = ndr_pack(pac)
+ full_checksum = full_checksum_key.make_rodc_checksum(
+ KU_NON_KERB_CKSUM_SALT,
+ pac_data)
+
+ full_checksum_buffer.info.signature = full_checksum
+
+ # Calculate the server and KDC checksums and insert them into the PAC.
+
+ server_checksum_buffer = checksum_buffers.get(
+ krb5pac.PAC_TYPE_SRV_CHECKSUM)
+ if server_checksum_buffer is not None:
+ server_checksum_key = checksum_keys[krb5pac.PAC_TYPE_SRV_CHECKSUM]
+
+ pac_data = ndr_pack(pac)
+ server_checksum = server_checksum_key.make_checksum(
+ KU_NON_KERB_CKSUM_SALT,
+ pac_data)
+
+ server_checksum_buffer.info.signature = server_checksum
+
+ kdc_checksum_buffer = checksum_buffers.get(
+ krb5pac.PAC_TYPE_KDC_CHECKSUM)
+ if kdc_checksum_buffer is not None:
+ if server_checksum_buffer is None:
+ # There's no server signature to make the checksum over, so
+ # just make the checksum over an empty bytes object.
+ server_checksum = bytes()
+
+ kdc_checksum_key = checksum_keys[krb5pac.PAC_TYPE_KDC_CHECKSUM]
+
+ kdc_checksum = kdc_checksum_key.make_rodc_checksum(
+ KU_NON_KERB_CKSUM_SALT,
+ server_checksum)
+
+ kdc_checksum_buffer.info.signature = kdc_checksum
+
+ def replace_pac(self, auth_data, new_pac, expect_pac=True,
+ allow_empty_authdata=False):
+ if new_pac is not None:
+ self.assertElementEqual(new_pac, 'ad-type', AD_WIN2K_PAC)
+ self.assertElementPresent(new_pac, 'ad-data')
+
+ new_auth_data = []
+
+ ad_relevant = None
+ old_pac = None
+
+ for authdata_elem in auth_data:
+ if authdata_elem['ad-type'] == AD_IF_RELEVANT:
+ ad_relevant = self.der_decode(
+ authdata_elem['ad-data'],
+ asn1Spec=krb5_asn1.AD_IF_RELEVANT())
+
+ relevant_elems = []
+ for relevant_elem in ad_relevant:
+ if relevant_elem['ad-type'] == AD_WIN2K_PAC:
+ self.assertIsNone(old_pac, 'Multiple PACs detected')
+ old_pac = relevant_elem['ad-data']
+
+ if new_pac is not None:
+ relevant_elems.append(new_pac)
+ else:
+ relevant_elems.append(relevant_elem)
+ if expect_pac:
+ self.assertIsNotNone(old_pac, 'Expected PAC')
+
+ if relevant_elems or allow_empty_authdata:
+ ad_relevant = self.der_encode(
+ relevant_elems,
+ asn1Spec=krb5_asn1.AD_IF_RELEVANT())
+
+ authdata_elem = self.AuthorizationData_create(
+ AD_IF_RELEVANT,
+ ad_relevant)
+ else:
+ authdata_elem = None
+
+ if authdata_elem is not None or allow_empty_authdata:
+ new_auth_data.append(authdata_elem)
+
+ if expect_pac:
+ self.assertIsNotNone(ad_relevant, 'Expected AD-RELEVANT')
+
+ return new_auth_data, old_pac
+
+ def get_pac(self, auth_data, expect_pac=True):
+ _, pac = self.replace_pac(auth_data, None, expect_pac)
+ return pac
+
+ def get_ticket_pac(self, ticket, expect_pac=True):
+ auth_data = ticket.ticket_private.get('authorization-data')
+ if expect_pac:
+ self.assertIsNotNone(auth_data)
+ elif auth_data is None:
+ return None
+
+ return self.get_pac(auth_data, expect_pac=expect_pac)
+
+ def get_krbtgt_checksum_key(self):
+ krbtgt_creds = self.get_krbtgt_creds()
+ krbtgt_key = self.TicketDecryptionKey_from_creds(krbtgt_creds)
+
+ return {
+ krb5pac.PAC_TYPE_KDC_CHECKSUM: krbtgt_key
+ }
+
+ def is_tgs_principal(self, principal):
+ if self.is_tgs(principal):
+ return True
+
+ if self.kadmin_is_tgs and self.is_kadmin(principal):
+ return True
+
+ return False
+
+ def is_kadmin(self, principal):
+ name = principal['name-string'][0]
+ return name in ('kadmin', b'kadmin')
+
+ def is_tgs(self, principal):
+ name_string = principal['name-string']
+ if 1 <= len(name_string) <= 2:
+ return name_string[0] in ('krbtgt', b'krbtgt')
+
+ return False
+
+ def is_tgt(self, ticket):
+ sname = ticket.ticket['sname']
+ return self.is_tgs(sname)
+
+ def get_empty_pac(self):
+ return self.AuthorizationData_create(AD_WIN2K_PAC, bytes(1))
+
+ def get_outer_pa_dict(self, kdc_exchange_dict):
+ return self.get_pa_dict(kdc_exchange_dict['req_padata'])
+
+ def get_fast_pa_dict(self, kdc_exchange_dict):
+ req_pa_dict = self.get_pa_dict(kdc_exchange_dict['fast_padata'])
+
+ if req_pa_dict:
+ return req_pa_dict
+
+ return self.get_outer_pa_dict(kdc_exchange_dict)
+
+ def sent_fast(self, kdc_exchange_dict):
+ outer_pa_dict = self.get_outer_pa_dict(kdc_exchange_dict)
+
+ return PADATA_FX_FAST in outer_pa_dict
+
+ def sent_enc_challenge(self, kdc_exchange_dict):
+ fast_pa_dict = self.get_fast_pa_dict(kdc_exchange_dict)
+
+ return PADATA_ENCRYPTED_CHALLENGE in fast_pa_dict
+
+ def sent_enc_pa_rep(self, kdc_exchange_dict):
+ fast_pa_dict = self.get_fast_pa_dict(kdc_exchange_dict)
+
+ return PADATA_REQ_ENC_PA_REP in fast_pa_dict
+
+ def sent_pk_as_req(self, kdc_exchange_dict):
+ fast_pa_dict = self.get_fast_pa_dict(kdc_exchange_dict)
+
+ return PADATA_PK_AS_REQ in fast_pa_dict
+
+ def sent_pk_as_req_win2k(self, kdc_exchange_dict):
+ fast_pa_dict = self.get_fast_pa_dict(kdc_exchange_dict)
+
+ return PADATA_PK_AS_REP_19 in fast_pa_dict
+
+ def sent_freshness(self, kdc_exchange_dict):
+ fast_pa_dict = self.get_fast_pa_dict(kdc_exchange_dict)
+
+ return PADATA_AS_FRESHNESS in fast_pa_dict
+
+ def get_sent_pac_options(self, kdc_exchange_dict):
+ fast_pa_dict = self.get_fast_pa_dict(kdc_exchange_dict)
+
+ if PADATA_PAC_OPTIONS not in fast_pa_dict:
+ return ''
+
+ pac_options = self.der_decode(fast_pa_dict[PADATA_PAC_OPTIONS],
+ asn1Spec=krb5_asn1.PA_PAC_OPTIONS())
+ pac_options = pac_options['options']
+
+ # Mask out unsupported bits.
+ pac_options, remaining = pac_options[:4], pac_options[4:]
+ pac_options += '0' * len(remaining)
+
+ return pac_options
+
+ def get_krbtgt_sname(self):
+ krbtgt_creds = self.get_krbtgt_creds()
+ krbtgt_username = krbtgt_creds.get_username()
+ krbtgt_realm = krbtgt_creds.get_realm()
+ krbtgt_sname = self.PrincipalName_create(
+ name_type=NT_SRV_INST, names=[krbtgt_username, krbtgt_realm])
+
+ return krbtgt_sname
+
+ def add_requester_sid(self, pac, sid):
+ pac_buffers = pac.buffers
+
+ buffer_types = [pac_buffer.type for pac_buffer in pac_buffers]
+ self.assertNotIn(krb5pac.PAC_TYPE_REQUESTER_SID, buffer_types)
+
+ requester_sid = krb5pac.PAC_REQUESTER_SID()
+ requester_sid.sid = security.dom_sid(sid)
+
+ requester_sid_buffer = krb5pac.PAC_BUFFER()
+ requester_sid_buffer.type = krb5pac.PAC_TYPE_REQUESTER_SID
+ requester_sid_buffer.info = requester_sid
+
+ pac_buffers.append(requester_sid_buffer)
+
+ pac.buffers = pac_buffers
+ pac.num_buffers += 1
+
+ return pac
+
+ def modify_lifetime(self, ticket, lifetime, requester_sid=None):
+ # Get the krbtgt key.
+ krbtgt_creds = self.get_krbtgt_creds()
+
+ krbtgt_key = self.TicketDecryptionKey_from_creds(krbtgt_creds)
+ checksum_keys = {
+ krb5pac.PAC_TYPE_KDC_CHECKSUM: krbtgt_key,
+ }
+
+ current_time = time.time()
+
+ # Set authtime and starttime to an hour in the past, to show that they
+ # do not affect ticket rejection.
+ start_time = self.get_KerberosTime(epoch=current_time, offset=-60 * 60)
+
+ # Set the endtime of the ticket relative to our current time, so that
+ # the ticket has 'lifetime' seconds remaining to live.
+ end_time = self.get_KerberosTime(epoch=current_time, offset=lifetime)
+
+ # Modify the times in the ticket.
+ def modify_ticket_times(enc_part):
+ enc_part['authtime'] = start_time
+ if 'starttime' in enc_part:
+ enc_part['starttime'] = start_time
+
+ enc_part['endtime'] = end_time
+
+ return enc_part
+
+ # We have to set the times in both the ticket and the PAC, otherwise
+ # Heimdal will complain.
+ def modify_pac_time(pac):
+ pac_buffers = pac.buffers
+
+ for pac_buffer in pac_buffers:
+ if pac_buffer.type == krb5pac.PAC_TYPE_LOGON_NAME:
+ logon_time = self.get_EpochFromKerberosTime(start_time)
+ pac_buffer.info.logon_time = unix2nttime(logon_time)
+ break
+ else:
+ self.fail('failed to find LOGON_NAME PAC buffer')
+
+ pac.buffers = pac_buffers
+
+ return pac
+
+ def modify_pac_fn(pac):
+ if requester_sid is not None:
+ # Add a requester SID to show that the KDC will then accept
+ # this kpasswd ticket as if it were a TGT.
+ pac = self.add_requester_sid(pac, sid=requester_sid)
+ pac = modify_pac_time(pac)
+ return pac
+
+ # Do the actual modification.
+ return self.modified_ticket(ticket,
+ new_ticket_key=krbtgt_key,
+ modify_fn=modify_ticket_times,
+ modify_pac_fn=modify_pac_fn,
+ checksum_keys=checksum_keys)
+
+ def _test_as_exchange(self,
+ cname,
+ realm,
+ sname,
+ till,
+ expected_error_mode,
+ expected_crealm,
+ expected_cname,
+ expected_srealm,
+ expected_sname,
+ expected_salt,
+ etypes,
+ padata,
+ kdc_options,
+ creds=None,
+ renew_time=None,
+ expected_account_name=None,
+ expected_groups=None,
+ unexpected_groups=None,
+ expected_upn_name=None,
+ expected_sid=None,
+ expected_domain_sid=None,
+ expected_flags=None,
+ unexpected_flags=None,
+ expected_supported_etypes=None,
+ preauth_key=None,
+ ticket_decryption_key=None,
+ pac_request=None,
+ pac_options=None,
+ expect_pac=True,
+ expect_pac_attrs=None,
+ expect_pac_attrs_pac_request=None,
+ expect_requester_sid=None,
+ expect_client_claims=None,
+ expect_device_claims=None,
+ expected_client_claims=None,
+ unexpected_client_claims=None,
+ expected_device_claims=None,
+ unexpected_device_claims=None,
+ expect_edata=None,
+ expect_status=None,
+ expected_status=None,
+ rc4_support=True,
+ to_rodc=False):
+
+ def _generate_padata_copy(_kdc_exchange_dict,
+ _callback_dict,
+ req_body):
+ return padata, req_body
+
+ if not expected_error_mode:
+ check_error_fn = None
+ check_rep_fn = self.generic_check_kdc_rep
+ else:
+ check_error_fn = self.generic_check_kdc_error
+ check_rep_fn = None
+
+ if padata is not None:
+ generate_padata_fn = _generate_padata_copy
+ else:
+ generate_padata_fn = None
+
+ kdc_exchange_dict = self.as_exchange_dict(
+ creds=creds,
+ expected_crealm=expected_crealm,
+ expected_cname=expected_cname,
+ expected_srealm=expected_srealm,
+ expected_sname=expected_sname,
+ expected_account_name=expected_account_name,
+ expected_groups=expected_groups,
+ unexpected_groups=unexpected_groups,
+ expected_upn_name=expected_upn_name,
+ expected_sid=expected_sid,
+ expected_domain_sid=expected_domain_sid,
+ expected_supported_etypes=expected_supported_etypes,
+ ticket_decryption_key=ticket_decryption_key,
+ generate_padata_fn=generate_padata_fn,
+ check_error_fn=check_error_fn,
+ check_rep_fn=check_rep_fn,
+ check_kdc_private_fn=self.generic_check_kdc_private,
+ expected_error_mode=expected_error_mode,
+ expected_salt=expected_salt,
+ expected_flags=expected_flags,
+ unexpected_flags=unexpected_flags,
+ preauth_key=preauth_key,
+ kdc_options=str(kdc_options),
+ pac_request=pac_request,
+ pac_options=pac_options,
+ expect_pac=expect_pac,
+ expect_pac_attrs=expect_pac_attrs,
+ expect_pac_attrs_pac_request=expect_pac_attrs_pac_request,
+ expect_requester_sid=expect_requester_sid,
+ expect_client_claims=expect_client_claims,
+ expect_device_claims=expect_device_claims,
+ expected_client_claims=expected_client_claims,
+ unexpected_client_claims=unexpected_client_claims,
+ expected_device_claims=expected_device_claims,
+ unexpected_device_claims=unexpected_device_claims,
+ expect_edata=expect_edata,
+ expect_status=expect_status,
+ expected_status=expected_status,
+ rc4_support=rc4_support,
+ to_rodc=to_rodc)
+
+ rep = self._generic_kdc_exchange(kdc_exchange_dict,
+ cname=cname,
+ realm=realm,
+ sname=sname,
+ till_time=till,
+ renew_time=renew_time,
+ etypes=etypes)
+
+ return rep, kdc_exchange_dict
diff --git a/python/samba/tests/krb5/rfc4120.asn1 b/python/samba/tests/krb5/rfc4120.asn1
new file mode 100644
index 0000000..79449d8
--- /dev/null
+++ b/python/samba/tests/krb5/rfc4120.asn1
@@ -0,0 +1,1908 @@
+-- Portions of these ASN.1 modules are structures from RFC6113
+-- authored by S. Hartman (Painless Security) and L. Zhu (Microsoft)
+--
+-- Portions of these ASN.1 modules are structures from RFC4556
+-- authored by L. Zhu (Microsoft Corporation) and B. Tung (Aerospace
+-- Corporation)
+--
+-- Portions of these ASN.1 modules are structures from RFC5280
+-- authored by D. Cooper (NIST), S. Santesson (Microsoft),
+-- S. Farrell (Trinity College Dublin), S. Boeyen (Entrust),
+-- R. Housley (Vigil Security), W. Polk (NIST)
+--
+-- Portions of these ASN.1 modules are structures from RFC0817
+-- authored by K. Moriarty, Ed. (EMC Corporation)
+-- B. Kaliski (Verisign), J. Jonsson (Subset AB), A. Rusch (RSA)
+
+-- Portions of these ASN.1 modules are structures from RFC0818
+-- authored by K. Moriarty, Ed. (Dell EMC), B. Kaliski (Verisign)
+-- A. Rusch (RSA)
+--
+-- Copyright (c) 2011 IETF Trust and the persons identified as authors of the
+-- code. All rights reserved.
+--
+-- Redistribution and use in source and binary forms, with or without
+-- modification, is permitted pursuant to, and subject to the license terms
+-- contained in, the Simplified BSD License set forth in Section 4.c of the IETF
+-- Trust’s Legal Provisions Relating to IETF Documents
+-- (http://trustee.ietf.org/license-info).
+--
+-- BSD License:
+--
+-- Copyright (c) 2011 IETF Trust and the persons identified as authors of the code. All rights reserved.
+-- Redistribution and use in source and binary forms, with or without modification, are permitted provided
+-- that the following conditions are met:
+-- • Redistributions of source code must retain the above copyright notice, this list of conditions and
+-- the following disclaimer.
+--
+-- • Redistributions in binary form must reproduce the above copyright notice, this list of conditions
+-- and the following disclaimer in the documentation and/or other materials provided with the
+-- distribution.
+--
+-- • Neither the name of Internet Society, IETF or IETF Trust, nor the names of specific contributors,
+-- may be used to endorse or promote products derived from this software without specific prior written
+-- permission.
+-- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS “AS IS”
+-- AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+-- IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+-- ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+-- LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+-- CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+-- SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+-- INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+-- CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+-- ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+-- POSSIBILITY OF SUCH DAMAGE.
+--
+--
+-- Portions of these ASN.1 modules are from Microsoft's MS-WCCE and MS-KILE
+-- from the Microsoft Open Specifications Documentation.
+--
+-- Intellectual Property Rights Notice for Open Specifications Documentation
+--
+-- * Technical Documentation. Microsoft publishes Open Specifications
+-- documentation (“this documentation”) for protocols, file formats,
+-- data portability, computer languages, and standards
+-- support. Additionally, overview documents cover inter-protocol
+-- relationships and interactions.
+--
+-- * Copyrights. This documentation is covered by Microsoft
+-- copyrights. Regardless of any other terms that are contained in
+-- the terms of use for the Microsoft website that hosts this
+-- documentation, you can make copies of it in order to develop
+-- implementations of the technologies that are described in this
+-- documentation and can distribute portions of it in your
+-- implementations that use these technologies or in your
+-- documentation as necessary to properly document the
+-- implementation. You can also distribute in your implementation,
+-- with or without modification, any schemas, IDLs, or code samples
+-- that are included in the documentation. This permission also
+-- applies to any documents that are referenced in the Open
+-- Specifications documentation.
+--
+-- * No Trade Secrets. Microsoft does not claim any trade secret rights
+-- in this documentation.
+--
+-- * Patents. Microsoft has patents that might cover your
+-- implementations of the technologies described in the Open
+-- Specifications documentation. Neither this notice nor Microsoft's
+-- delivery of this documentation grants any licenses under those
+-- patents or any other Microsoft patents. However, a given Open
+-- Specifications document might be covered by the Microsoft Open
+-- Specifications Promise or the Microsoft Community Promise. If you
+-- would prefer a written license, or if the technologies described
+-- in this documentation are not covered by the Open Specifications
+-- Promise or Community Promise, as applicable, patent licenses are
+-- available by contacting iplg@microsoft.com.
+--
+-- * License Programs. To see all of the protocols in scope under a
+-- specific license program and the associated patents, visit the
+-- Patent Map.
+--
+-- * Trademarks. The names of companies and products contained in this
+-- documentation might be covered by trademarks or similar
+-- intellectual property rights. This notice does not grant any
+-- licenses under those rights. For a list of Microsoft trademarks,
+-- visit www.microsoft.com/trademarks.
+--
+-- * Fictitious Names. The example companies, organizations, products,
+-- domain names, email addresses, logos, people, places, and events
+-- that are depicted in this documentation are fictitious. No
+-- association with any real company, organization, product, domain
+-- name, email address, logo, person, place, or event is intended or
+-- should be inferred.
+--
+-- Reservation of Rights. All other rights are reserved, and this notice
+-- does not grant any rights other than as specifically described above,
+-- whether by implication, estoppel, or otherwise.
+--
+-- Tools. The Open Specifications documentation does not require the use
+-- of Microsoft programming tools or programming environments in order
+-- for you to develop an implementation. If you have access to Microsoft
+-- programming tools and environments, you are free to take advantage of
+-- them. Certain Open Specifications documents are intended for use in
+-- conjunction with publicly available standards specifications and
+-- network programming art and, as such, assume that the reader either
+-- is familiar with the aforementioned material or has immediate access
+-- to it.
+--
+-- Support. For questions and support, please contact dochelp@microsoft.com
+
+
+-- The above is the IPR notice from MS-KILE
+
+KerberosV5Spec2 {
+ iso(1) identified-organization(3) dod(6) internet(1)
+ security(5) kerberosV5(2) modules(4) krb5spec2(2)
+} DEFINITIONS EXPLICIT TAGS ::= BEGIN
+
+-- OID arc for KerberosV5
+--
+-- This OID may be used to identify Kerberos protocol messages
+-- encapsulated in other protocols.
+--
+-- This OID also designates the OID arc for KerberosV5-related OIDs.
+--
+-- NOTE: RFC 1510 had an incorrect value (5) for "dod" in its OID.
+id-krb5 OBJECT IDENTIFIER ::= {
+ iso(1) identified-organization(3) dod(6) internet(1)
+ security(5) kerberosV5(2)
+}
+
+Int32 ::= INTEGER (-2147483648..2147483647)
+ -- signed values representable in 32 bits
+
+UInt32 ::= INTEGER (0..4294967295)
+ -- unsigned 32 bit values
+
+Microseconds ::= INTEGER (0..999999)
+ -- microseconds
+
+--
+-- asn1ate doesn't support 'GeneralString (IA5String)'
+-- only 'GeneralString' or 'IA5String', on the wire
+-- GeneralString is used.
+--
+-- KerberosString ::= GeneralString (IA5String)
+KerberosString ::= GeneralString
+
+Realm ::= KerberosString
+
+PrincipalName ::= SEQUENCE {
+ name-type [0] NameType, -- Int32,
+ name-string [1] SEQUENCE OF KerberosString
+}
+
+NameType ::= Int32
+
+KerberosTime ::= GeneralizedTime -- with no fractional seconds
+
+HostAddress ::= SEQUENCE {
+ addr-type [0] Int32,
+ address [1] OCTET STRING
+}
+
+-- NOTE: HostAddresses is always used as an OPTIONAL field and
+-- should not be empty.
+HostAddresses -- NOTE: subtly different from rfc1510,
+ -- but has a value mapping and encodes the same
+ ::= SEQUENCE OF HostAddress
+
+-- NOTE: AuthorizationData is always used as an OPTIONAL field and
+-- should not be empty.
+AuthorizationData ::= SEQUENCE OF SEQUENCE {
+ ad-type [0] AuthDataType, -- Int32,
+ ad-data [1] OCTET STRING
+}
+
+AuthDataType ::= Int32
+
+PA-DATA ::= SEQUENCE {
+ -- NOTE: first tag is [1], not [0]
+ padata-type [1] PADataType, -- Int32
+ padata-value [2] OCTET STRING -- might be encoded AP-REQ
+}
+
+PADataType ::= Int32
+
+--
+-- asn1ate doesn't support 'MAX' nor a lower range != 1.
+-- We'll use a custom enodeValue() hooks for BitString
+-- in order to encode them with at least 32-Bit.
+--
+-- KerberosFlags ::= BIT STRING (SIZE (32..MAX))
+KerberosFlags ::= BIT STRING (SIZE (1..32))
+ -- minimum number of bits shall be sent,
+ -- but no fewer than 32
+
+EncryptedData ::= SEQUENCE {
+ etype [0] EncryptionType, --Int32 EncryptionType --
+ kvno [1] Int32 OPTIONAL,
+ cipher [2] OCTET STRING -- ciphertext
+}
+
+EncryptionKey ::= SEQUENCE {
+ keytype [0] EncryptionType, -- Int32 actually encryption type --
+ keyvalue [1] OCTET STRING
+}
+
+Checksum ::= SEQUENCE {
+ cksumtype [0] ChecksumType, -- Int32,
+ checksum [1] OCTET STRING
+}
+
+ChecksumType ::= Int32
+
+Ticket ::= [APPLICATION 1] SEQUENCE {
+ tkt-vno [0] INTEGER (5),
+ realm [1] Realm,
+ sname [2] PrincipalName,
+ enc-part [3] EncryptedData -- EncTicketPart
+}
+
+-- Encrypted part of ticket
+EncTicketPart ::= [APPLICATION 3] SEQUENCE {
+ flags [0] TicketFlags,
+ key [1] EncryptionKey,
+ crealm [2] Realm,
+ cname [3] PrincipalName,
+ transited [4] TransitedEncoding,
+ authtime [5] KerberosTime,
+ starttime [6] KerberosTime OPTIONAL,
+ endtime [7] KerberosTime,
+ renew-till [8] KerberosTime OPTIONAL,
+ caddr [9] HostAddresses OPTIONAL,
+ authorization-data [10] AuthorizationData OPTIONAL
+}
+
+-- encoded Transited field
+TransitedEncoding ::= SEQUENCE {
+ tr-type [0] Int32 -- must be registered --,
+ contents [1] OCTET STRING
+}
+
+TicketFlags ::= KerberosFlags
+ -- reserved(0),
+ -- forwardable(1),
+ -- forwarded(2),
+ -- proxiable(3),
+ -- proxy(4),
+ -- may-postdate(5),
+ -- postdated(6),
+ -- invalid(7),
+ -- renewable(8),
+ -- initial(9),
+ -- pre-authent(10),
+ -- hw-authent(11),
+-- the following are new since 1510
+ -- transited-policy-checked(12),
+ -- ok-as-delegate(13)
+ -- enc-pa-rep(15)
+
+AS-REQ ::= [APPLICATION 10] KDC-REQ
+
+TGS-REQ ::= [APPLICATION 12] KDC-REQ
+
+KDC-REQ ::= SEQUENCE {
+ -- NOTE: first tag is [1], not [0]
+ pvno [1] INTEGER (5) ,
+ msg-type [2] INTEGER (10 -- AS -- | 12 -- TGS --),
+ padata [3] SEQUENCE OF PA-DATA OPTIONAL
+ -- NOTE: not empty --,
+ req-body [4] KDC-REQ-BODY
+}
+
+KDC-REQ-BODY ::= SEQUENCE {
+ kdc-options [0] KDCOptions,
+ cname [1] PrincipalName OPTIONAL
+ -- Used only in AS-REQ --,
+ realm [2] Realm
+ -- Server's realm
+ -- Also client's in AS-REQ --,
+ sname [3] PrincipalName OPTIONAL,
+ from [4] KerberosTime OPTIONAL,
+ till [5] KerberosTime,
+ rtime [6] KerberosTime OPTIONAL,
+ nonce [7] UInt32,
+ etype [8] SEQUENCE OF EncryptionType -- Int32 - EncryptionType
+ -- in preference order --,
+ addresses [9] HostAddresses OPTIONAL,
+ enc-authorization-data [10] EncryptedData OPTIONAL
+ -- AuthorizationData --,
+ additional-tickets [11] SEQUENCE OF Ticket OPTIONAL
+ -- NOTE: not empty
+}
+
+EncryptionType ::= Int32
+
+KDCOptions ::= KerberosFlags
+ -- reserved(0),
+ -- forwardable(1),
+ -- forwarded(2),
+ -- proxiable(3),
+ -- proxy(4),
+ -- allow-postdate(5),
+ -- postdated(6),
+ -- unused7(7),
+ -- renewable(8),
+ -- unused9(9),
+ -- unused10(10),
+ -- opt-hardware-auth(11),
+ -- unused12(12),
+ -- unused13(13),
+-- Canonicalize is used in RFC 6806
+ -- canonicalize(15),
+-- 26 was unused in 1510
+ -- disable-transited-check(26),
+--
+ -- renewable-ok(27),
+ -- enc-tkt-in-skey(28),
+ -- renew(30),
+ -- validate(31)
+
+AS-REP ::= [APPLICATION 11] KDC-REP
+
+TGS-REP ::= [APPLICATION 13] KDC-REP
+
+KDC-REP ::= SEQUENCE {
+ pvno [0] INTEGER (5),
+ msg-type [1] INTEGER (11 -- AS -- | 13 -- TGS --),
+ padata [2] SEQUENCE OF PA-DATA OPTIONAL
+ -- NOTE: not empty --,
+ crealm [3] Realm,
+ cname [4] PrincipalName,
+ ticket [5] Ticket,
+ enc-part [6] EncryptedData
+ -- EncASRepPart or EncTGSRepPart,
+ -- as appropriate
+}
+
+EncASRepPart ::= [APPLICATION 25] EncKDCRepPart
+
+EncTGSRepPart ::= [APPLICATION 26] EncKDCRepPart
+
+EncKDCRepPart ::= SEQUENCE {
+ key [0] EncryptionKey,
+ last-req [1] LastReq,
+ nonce [2] UInt32,
+ key-expiration [3] KerberosTime OPTIONAL,
+ flags [4] TicketFlags,
+ authtime [5] KerberosTime,
+ starttime [6] KerberosTime OPTIONAL,
+ endtime [7] KerberosTime,
+ renew-till [8] KerberosTime OPTIONAL,
+ srealm [9] Realm,
+ sname [10] PrincipalName,
+ caddr [11] HostAddresses OPTIONAL,
+ encrypted-pa-data[12] METHOD-DATA OPTIONAL
+}
+
+LastReq ::= SEQUENCE OF SEQUENCE {
+ lr-type [0] Int32,
+ lr-value [1] KerberosTime
+}
+
+AP-REQ ::= [APPLICATION 14] SEQUENCE {
+ pvno [0] INTEGER (5),
+ msg-type [1] INTEGER (14),
+ ap-options [2] APOptions,
+ ticket [3] Ticket,
+ authenticator [4] EncryptedData -- Authenticator
+}
+
+APOptions ::= KerberosFlags
+ -- reserved(0),
+ -- use-session-key(1),
+ -- mutual-required(2)
+
+-- Unencrypted authenticator
+Authenticator ::= [APPLICATION 2] SEQUENCE {
+ authenticator-vno [0] INTEGER (5),
+ crealm [1] Realm,
+ cname [2] PrincipalName,
+ cksum [3] Checksum OPTIONAL,
+ cusec [4] Microseconds,
+ ctime [5] KerberosTime,
+ subkey [6] EncryptionKey OPTIONAL,
+ seq-number [7] UInt32 OPTIONAL,
+ authorization-data [8] AuthorizationData OPTIONAL
+}
+
+AP-REP ::= [APPLICATION 15] SEQUENCE {
+ pvno [0] INTEGER (5),
+ msg-type [1] INTEGER (15),
+ enc-part [2] EncryptedData -- EncAPRepPart
+}
+
+EncAPRepPart ::= [APPLICATION 27] SEQUENCE {
+ ctime [0] KerberosTime,
+ cusec [1] Microseconds,
+ subkey [2] EncryptionKey OPTIONAL,
+ seq-number [3] UInt32 OPTIONAL
+}
+
+KRB-SAFE ::= [APPLICATION 20] SEQUENCE {
+ pvno [0] INTEGER (5),
+ msg-type [1] INTEGER (20),
+ safe-body [2] KRB-SAFE-BODY,
+ cksum [3] Checksum
+}
+
+KRB-SAFE-BODY ::= SEQUENCE {
+ user-data [0] OCTET STRING,
+ timestamp [1] KerberosTime OPTIONAL,
+ usec [2] Microseconds OPTIONAL,
+ seq-number [3] UInt32 OPTIONAL,
+ s-address [4] HostAddress,
+ r-address [5] HostAddress OPTIONAL
+}
+
+KRB-PRIV ::= [APPLICATION 21] SEQUENCE {
+ pvno [0] INTEGER (5),
+ msg-type [1] INTEGER (21),
+ -- NOTE: there is no [2] tag
+ enc-part [3] EncryptedData -- EncKrbPrivPart
+}
+
+EncKrbPrivPart ::= [APPLICATION 28] SEQUENCE {
+ user-data [0] OCTET STRING,
+ timestamp [1] KerberosTime OPTIONAL,
+ usec [2] Microseconds OPTIONAL,
+ seq-number [3] UInt32 OPTIONAL,
+ s-address [4] HostAddress -- sender's addr --,
+ r-address [5] HostAddress OPTIONAL -- recip's addr
+}
+
+KRB-CRED ::= [APPLICATION 22] SEQUENCE {
+ pvno [0] INTEGER (5),
+ msg-type [1] INTEGER (22),
+ tickets [2] SEQUENCE OF Ticket,
+ enc-part [3] EncryptedData -- EncKrbCredPart
+}
+
+EncKrbCredPart ::= [APPLICATION 29] SEQUENCE {
+ ticket-info [0] SEQUENCE OF KrbCredInfo,
+ nonce [1] UInt32 OPTIONAL,
+ timestamp [2] KerberosTime OPTIONAL,
+ usec [3] Microseconds OPTIONAL,
+ s-address [4] HostAddress OPTIONAL,
+ r-address [5] HostAddress OPTIONAL
+}
+
+KrbCredInfo ::= SEQUENCE {
+ key [0] EncryptionKey,
+ prealm [1] Realm OPTIONAL,
+ pname [2] PrincipalName OPTIONAL,
+ flags [3] TicketFlags OPTIONAL,
+ authtime [4] KerberosTime OPTIONAL,
+ starttime [5] KerberosTime OPTIONAL,
+ endtime [6] KerberosTime OPTIONAL,
+ renew-till [7] KerberosTime OPTIONAL,
+ srealm [8] Realm OPTIONAL,
+ sname [9] PrincipalName OPTIONAL,
+ caddr [10] HostAddresses OPTIONAL
+}
+
+KRB-ERROR ::= [APPLICATION 30] SEQUENCE {
+ pvno [0] INTEGER (5),
+ msg-type [1] INTEGER (30),
+ ctime [2] KerberosTime OPTIONAL,
+ cusec [3] Microseconds OPTIONAL,
+ stime [4] KerberosTime,
+ susec [5] Microseconds,
+ error-code [6] Int32,
+ crealm [7] Realm OPTIONAL,
+ cname [8] PrincipalName OPTIONAL,
+ realm [9] Realm -- service realm --,
+ sname [10] PrincipalName -- service name --,
+ e-text [11] KerberosString OPTIONAL,
+ e-data [12] OCTET STRING OPTIONAL
+}
+
+METHOD-DATA ::= SEQUENCE OF PA-DATA
+
+--
+-- asn1ate doesn't support 'MAX'
+--
+-- TYPED-DATA ::= SEQUENCE SIZE (1..MAX) OF SEQUENCE {
+TYPED-DATA ::= SEQUENCE SIZE (1..256) OF SEQUENCE {
+ data-type [0] Int32,
+ data-value [1] OCTET STRING OPTIONAL
+}
+
+-- preauth stuff follows
+
+PA-ENC-TIMESTAMP ::= EncryptedData -- PA-ENC-TS-ENC
+
+PA-ENC-TS-ENC ::= SEQUENCE {
+ patimestamp [0] KerberosTime -- client's time --,
+ pausec [1] Microseconds OPTIONAL
+}
+
+ETYPE-INFO-ENTRY ::= SEQUENCE {
+ etype [0] EncryptionType, --Int32 EncryptionType --
+ salt [1] OCTET STRING OPTIONAL
+}
+
+ETYPE-INFO ::= SEQUENCE OF ETYPE-INFO-ENTRY
+
+ETYPE-INFO2-ENTRY ::= SEQUENCE {
+ etype [0] EncryptionType, --Int32 EncryptionType --
+ salt [1] KerberosString OPTIONAL,
+ s2kparams [2] OCTET STRING OPTIONAL
+}
+
+ETYPE-INFO2 ::= SEQUENCE SIZE (1..256) OF ETYPE-INFO2-ENTRY
+
+AD-IF-RELEVANT ::= AuthorizationData
+
+AD-KDCIssued ::= SEQUENCE {
+ ad-checksum [0] Checksum,
+ i-realm [1] Realm OPTIONAL,
+ i-sname [2] PrincipalName OPTIONAL,
+ elements [3] AuthorizationData
+}
+
+AD-AND-OR ::= SEQUENCE {
+ condition-count [0] Int32,
+ elements [1] AuthorizationData
+}
+
+AD-MANDATORY-FOR-KDC ::= AuthorizationData
+
+-- S4U
+
+PA-S4U2Self ::= SEQUENCE {
+ name [0] PrincipalName,
+ realm [1] Realm,
+ cksum [2] Checksum,
+ auth [3] KerberosString
+}
+
+-- PK-INIT
+
+-- (from RFC 1422)
+
+-- asn1ate doesn’t support ‘SIGNED’.
+-- CertificateRevocationList ::= SIGNED SEQUENCE {
+CertificateRevocationList ::= SEQUENCE {
+ signature AlgorithmIdentifier,
+ issuer Name,
+ lastUpdate UTCTime,
+ nextUpdate UTCTime,
+ revokedCertificates
+ SEQUENCE OF CRLEntry OPTIONAL
+}
+
+CRLEntry ::= SEQUENCE{
+ userCertificate SerialNumber,
+ revocationDate UTCTime
+}
+
+-- Not actually defined in an RFC.
+SerialNumber ::= INTEGER
+
+-- (from RFC 2315)
+
+SignedData-RFC2315 ::= SEQUENCE {
+ version Version-RFC2315,
+ digestAlgorithms DigestAlgorithmIdentifiers,
+ contentInfo ContentInfo,
+ certificates [0] IMPLICIT CertificateSet OPTIONAL,
+ crls [1] IMPLICIT RevocationInfoChoices OPTIONAL,
+ signerInfos SignerInfos
+}
+
+Version-RFC2315 ::= INTEGER
+
+ContentInfo ::= SEQUENCE {
+ contentType ContentType,
+ content
+ [0] EXPLICIT ANY DEFINED BY contentType OPTIONAL
+}
+
+ExtendedCertificatesAndCertificates ::=
+ SET OF ExtendedCertificateOrCertificate
+
+ExtendedCertificateOrCertificate ::= CHOICE {
+ certificate Certificate, -- X.509
+ extendedCertificate [0] IMPLICIT ExtendedCertificate
+}
+
+CertificateRevocationLists ::=
+ SET OF CertificateRevocationList
+
+-- (from RFC 3279)
+
+DomainParameters ::= SEQUENCE {
+ p INTEGER, -- odd prime, p=jq +1
+ g INTEGER, -- generator, g
+ -- Note: RFC 3279 does not mention that ‘q’ is optional.
+ q INTEGER OPTIONAL, -- factor of p-1
+ j INTEGER OPTIONAL, -- subgroup factor
+ validationParms ValidationParms OPTIONAL
+}
+
+ValidationParms ::= SEQUENCE {
+ seed BIT STRING,
+ pgenCounter INTEGER
+}
+
+DHPublicKey ::= INTEGER -- public key, y = g^x mod p
+
+dhpublicnumber OBJECT IDENTIFIER ::= {
+ iso(1) member-body(2)
+ us(840) ansi-x942(10046) number-type(2) 1
+}
+
+md2 OBJECT IDENTIFIER ::= {
+ iso(1) member-body(2) us(840) rsadsi(113549)
+ digestAlgorithm(2) 2
+}
+
+md5 OBJECT IDENTIFIER ::= {
+ iso(1) member-body(2) us(840) rsadsi(113549)
+ digestAlgorithm(2) 5
+}
+
+id-sha1 OBJECT IDENTIFIER ::= {
+ iso(1) identified-organization(3) oiw(14) secsig(3)
+ algorithms(2) 26
+}
+
+-- (from RFC 3281)
+
+AttributeCertificate ::= SEQUENCE {
+ acinfo AttributeCertificateInfo,
+ signatureAlgorithm AlgorithmIdentifier,
+ signatureValue BIT STRING
+}
+
+AttributeCertificateInfo ::= SEQUENCE {
+ version AttCertVersion, -- version is v2
+ holder Holder,
+ issuer AttCertIssuer,
+ signature AlgorithmIdentifier,
+ serialNumber CertificateSerialNumber,
+ attrCertValidityPeriod AttCertValidityPeriod,
+ attributes SEQUENCE OF Attribute,
+ issuerUniqueID UniqueIdentifier OPTIONAL,
+ extensions Extensions OPTIONAL
+}
+
+AttCertVersion ::= INTEGER { v2(1) }
+
+Holder ::= SEQUENCE {
+ baseCertificateID [0] IssuerSerial OPTIONAL,
+ entityName [1] GeneralNames OPTIONAL,
+ objectDigestInfo [2] ObjectDigestInfo OPTIONAL
+}
+
+ObjectDigestInfo ::= SEQUENCE {
+ digestedObjectType ENUMERATED {
+ publicKey (0),
+ publicKeyCert (1),
+ otherObjectTypes (2)
+ },
+ -- otherObjectTypes MUST NOT
+ -- be used in this profile
+ otherObjectTypeID OBJECT IDENTIFIER OPTIONAL,
+ digestAlgorithm AlgorithmIdentifier,
+ objectDigest BIT STRING
+}
+
+-- (from RFC 3370)
+
+sha1WithRSAEncryption OBJECT IDENTIFIER ::= {
+ iso(1)
+ member-body(2) us(840) rsadsi(113549) pkcs(1) pkcs-1(1) 5
+}
+
+-- (from RFC 4556)
+
+id-pkinit OBJECT IDENTIFIER ::= {
+ iso(1) identified-organization(3) dod(6) internet(1)
+ security(5) kerberosv5(2) pkinit (3)
+}
+
+id-pkinit-authData OBJECT IDENTIFIER ::= { id-pkinit 1 }
+
+id-pkinit-DHKeyData OBJECT IDENTIFIER ::= { id-pkinit 2 }
+
+id-pkinit-rkeyData OBJECT IDENTIFIER ::= { id-pkinit 3 }
+
+PA-PK-AS-REQ ::= SEQUENCE {
+ signedAuthPack [0] IMPLICIT OCTET STRING,
+ trustedCertifiers [1] SEQUENCE OF
+ ExternalPrincipalIdentifier OPTIONAL,
+ kdcPkId [2] IMPLICIT OCTET STRING
+ OPTIONAL,
+ ...
+}
+
+DHNonce ::= OCTET STRING
+
+ExternalPrincipalIdentifier ::= SEQUENCE {
+ subjectName [0] IMPLICIT OCTET STRING OPTIONAL,
+ issuerAndSerialNumber [1] IMPLICIT OCTET STRING OPTIONAL,
+ subjectKeyIdentifier [2] IMPLICIT OCTET STRING OPTIONAL,
+ ...
+}
+
+AuthPack ::= SEQUENCE {
+ pkAuthenticator [0] PKAuthenticator,
+ clientPublicValue [1] SubjectPublicKeyInfo OPTIONAL,
+ supportedCMSTypes [2] SEQUENCE OF AlgorithmIdentifier
+ OPTIONAL,
+ clientDHNonce [3] DHNonce OPTIONAL,
+ ...
+}
+
+PKAuthenticator ::= SEQUENCE {
+ cusec [0] INTEGER (0..999999),
+ ctime [1] KerberosTime,
+ nonce [2] INTEGER (0..4294967295),
+ paChecksum [3] OCTET STRING OPTIONAL,
+ freshnessToken [4] OCTET STRING OPTIONAL,
+ ...
+}
+
+TD-TRUSTED-CERTIFIERS ::= SEQUENCE OF ExternalPrincipalIdentifier
+TD-INVALID-CERTIFICATES ::= SEQUENCE OF ExternalPrincipalIdentifier
+
+KRB5PrincipalName ::= SEQUENCE {
+ realm [0] Realm,
+ principalName [1] PrincipalName
+}
+
+AD-INITIAL-VERIFIED-CAS ::= SEQUENCE OF ExternalPrincipalIdentifier
+
+PA-PK-AS-REP ::= CHOICE {
+ dhInfo [0] DHRepInfo,
+ encKeyPack [1] IMPLICIT OCTET STRING,
+ ...
+}
+
+DHRepInfo ::= SEQUENCE {
+ dhSignedData [0] IMPLICIT OCTET STRING,
+ serverDHNonce [1] DHNonce OPTIONAL,
+ ...
+}
+
+KDCDHKeyInfo ::= SEQUENCE {
+ subjectPublicKey [0] BIT STRING,
+ nonce [1] INTEGER (0..4294967295),
+ dhKeyExpiration [2] KerberosTime OPTIONAL,
+ ...
+}
+
+ReplyKeyPack ::= SEQUENCE {
+ replyKey [0] EncryptionKey,
+ asChecksum [1] Checksum,
+ ...
+}
+
+TD-DH-PARAMETERS ::= SEQUENCE OF AlgorithmIdentifier
+
+-- (from RFC 5755)
+
+Attribute ::= SEQUENCE {
+ type AttributeType,
+ values SET OF AttributeValue
+ -- at least one value is required
+}
+
+AttCertIssuer ::= CHOICE {
+ v1Form GeneralNames, -- MUST NOT be used in this
+ -- profile
+ v2Form [0] V2Form -- v2 only
+}
+
+V2Form ::= SEQUENCE {
+ issuerName GeneralNames OPTIONAL,
+ baseCertificateID [0] IssuerSerial OPTIONAL,
+ objectDigestInfo [1] ObjectDigestInfo OPTIONAL
+ -- issuerName MUST be present in this profile
+ -- baseCertificateID and objectDigestInfo MUST NOT
+ -- be present in this profile
+}
+
+IssuerSerial ::= SEQUENCE {
+ issuer GeneralNames,
+ serial CertificateSerialNumber,
+ issuerUID UniqueIdentifier OPTIONAL
+}
+
+AttCertValidityPeriod ::= SEQUENCE {
+ notBeforeTime GeneralizedTime,
+ notAfterTime GeneralizedTime
+}
+
+-- (from RFC 5280)
+
+id-ce OBJECT IDENTIFIER ::= { joint-iso-ccitt(2) ds(5) 29 }
+
+id-ce-subjectAltName OBJECT IDENTIFIER ::= { id-ce 17 }
+
+SubjectAltName ::= GeneralNames
+
+--
+-- asn1ate doesn’t support ‘MAX’.
+--
+-- GeneralNames ::= SEQUENCE SIZE (1..MAX) OF GeneralName
+GeneralNames ::= SEQUENCE SIZE (1..256) OF GeneralName
+
+GeneralName ::= CHOICE {
+ otherName [0] OtherName,
+ rfc822Name [1] IA5String,
+ dNSName [2] IA5String,
+ x400Address [3] ORAddress,
+ directoryName [4] Name,
+ ediPartyName [5] EDIPartyName,
+ uniformResourceIdentifier [6] IA5String,
+ iPAddress [7] OCTET STRING,
+ registeredID [8] OBJECT IDENTIFIER
+}
+
+OtherName ::= SEQUENCE {
+ type-id OBJECT IDENTIFIER,
+ value [0] EXPLICIT ANY DEFINED BY type-id
+}
+
+EDIPartyName ::= SEQUENCE {
+ nameAssigner [0] DirectoryString OPTIONAL,
+ partyName [1] DirectoryString
+}
+
+Name ::= CHOICE { -- only one possibility for now --
+ rdnSequence RDNSequence
+}
+
+DirectoryString ::= CHOICE {
+--
+-- asn1ate doesn’t support ‘MAX’.
+--
+-- teletexString TeletexString (SIZE (1..MAX)),
+-- printableString PrintableString (SIZE (1..MAX)),
+-- universalString UniversalString (SIZE (1..MAX)),
+-- utf8String UTF8String (SIZE (1..MAX)),
+-- bmpString BMPString (SIZE (1..MAX))
+ teletexString TeletexString (SIZE (1..256)),
+ printableString PrintableString (SIZE (1..256)),
+ universalString UniversalString (SIZE (1..256)),
+ utf8String UTF8String (SIZE (1..256)),
+ bmpString BMPString (SIZE (1..256))
+}
+
+Certificate ::= SEQUENCE {
+ tbsCertificate TBSCertificate,
+ signatureAlgorithm AlgorithmIdentifier,
+ signatureValue BIT STRING
+}
+
+TBSCertificate ::= SEQUENCE {
+--
+-- asn1ate doesn’t support ‘v1’.
+--
+-- version [0] EXPLICIT Version DEFAULT v1,
+ version [0] EXPLICIT Version DEFAULT 1,
+ serialNumber CertificateSerialNumber,
+ signature AlgorithmIdentifier,
+ issuer Name,
+ validity Validity,
+ subject Name,
+ subjectPublicKeyInfo SubjectPublicKeyInfo,
+ issuerUniqueID [1] IMPLICIT UniqueIdentifier OPTIONAL,
+ -- If present, version MUST be v2 or v3
+ subjectUniqueID [2] IMPLICIT UniqueIdentifier OPTIONAL,
+ -- If present, version MUST be v2 or v3
+ extensions [3] EXPLICIT Extensions OPTIONAL
+ -- If present, version MUST be v3
+}
+
+Version ::= INTEGER { v1(0), v2(1), v3(2) }
+
+CertificateSerialNumber ::= INTEGER
+
+Validity ::= SEQUENCE {
+ notBefore Time,
+ notAfter Time
+}
+
+Time ::= CHOICE {
+ utcTime UTCTime,
+ generalTime GeneralizedTime
+}
+
+UniqueIdentifier ::= BIT STRING
+
+AlgorithmIdentifier ::= SEQUENCE {
+ algorithm OBJECT IDENTIFIER,
+ parameters ANY DEFINED BY algorithm OPTIONAL
+}
+
+SubjectPublicKeyInfo ::= SEQUENCE {
+ algorithm AlgorithmIdentifier,
+ subjectPublicKey BIT STRING
+}
+
+RDNSequence ::= SEQUENCE OF RelativeDistinguishedName
+
+ORAddress ::= SEQUENCE {
+ built-in-standard-attributes BuiltInStandardAttributes,
+ built-in-domain-defined-attributes
+ BuiltInDomainDefinedAttributes OPTIONAL,
+ -- see also teletex-domain-defined-attributes
+ extension-attributes ExtensionAttributes OPTIONAL
+}
+
+BuiltInStandardAttributes ::= SEQUENCE {
+ country-name CountryName OPTIONAL,
+ administration-domain-name AdministrationDomainName OPTIONAL,
+ network-address [0] IMPLICIT NetworkAddress OPTIONAL,
+ -- see also extended-network-address
+ terminal-identifier [1] IMPLICIT TerminalIdentifier OPTIONAL,
+ private-domain-name [2] PrivateDomainName OPTIONAL,
+ organization-name [3] IMPLICIT OrganizationName OPTIONAL,
+ -- see also teletex-organization-name
+ numeric-user-identifier [4] IMPLICIT NumericUserIdentifier
+ OPTIONAL,
+ personal-name [5] IMPLICIT PersonalName OPTIONAL,
+ -- see also teletex-personal-name
+ organizational-unit-names [6] IMPLICIT OrganizationalUnitNames
+ OPTIONAL
+ -- see also teletex-organizational-unit-names
+}
+
+CountryName ::= [APPLICATION 1] CHOICE {
+ x121-dcc-code NumericString
+ (SIZE (ub-country-name-numeric-length)),
+ iso-3166-alpha2-code PrintableString
+ (SIZE (ub-country-name-alpha-length))
+}
+
+AdministrationDomainName ::= [APPLICATION 2] CHOICE {
+ numeric NumericString (SIZE (0..ub-domain-name-length)),
+ printable PrintableString (SIZE (0..ub-domain-name-length))
+}
+
+NetworkAddress ::= X121Address -- see also extended-network-address
+
+X121Address ::= NumericString (SIZE (1..ub-x121-address-length))
+
+TerminalIdentifier ::= PrintableString (SIZE (1..ub-terminal-id-length))
+
+PrivateDomainName ::= CHOICE {
+ numeric NumericString (SIZE (1..ub-domain-name-length)),
+ printable PrintableString (SIZE (1..ub-domain-name-length))
+}
+
+OrganizationName ::= PrintableString
+ (SIZE (1..ub-organization-name-length))
+ -- see also teletex-organization-name
+
+NumericUserIdentifier ::= NumericString
+ (SIZE (1..ub-numeric-user-id-length))
+
+PersonalName ::= SET {
+ surname [0] IMPLICIT PrintableString
+ (SIZE (1..ub-surname-length)),
+ given-name [1] IMPLICIT PrintableString
+ (SIZE (1..ub-given-name-length)) OPTIONAL,
+ initials [2] IMPLICIT PrintableString
+ (SIZE (1..ub-initials-length)) OPTIONAL,
+ generation-qualifier [3] IMPLICIT PrintableString
+ (SIZE (1..ub-generation-qualifier-length))
+ OPTIONAL
+}
+ -- see also teletex-personal-name
+
+OrganizationalUnitNames ::= SEQUENCE SIZE (1..ub-organizational-units)
+ OF OrganizationalUnitName
+ -- see also teletex-organizational-unit-names
+
+OrganizationalUnitName ::= PrintableString (SIZE
+ (1..ub-organizational-unit-name-length))
+
+BuiltInDomainDefinedAttributes ::= SEQUENCE SIZE
+ (1..ub-domain-defined-attributes) OF
+ BuiltInDomainDefinedAttribute
+
+BuiltInDomainDefinedAttribute ::= SEQUENCE {
+ type PrintableString (SIZE
+ (1..ub-domain-defined-attribute-type-length)),
+ value PrintableString (SIZE
+ (1..ub-domain-defined-attribute-value-length))
+}
+
+ExtensionAttributes ::= SET SIZE (1..ub-extension-attributes) OF
+ ExtensionAttribute
+
+ExtensionAttribute ::= SEQUENCE {
+ extension-attribute-type [0] IMPLICIT INTEGER
+ (0..ub-extension-attributes),
+ extension-attribute-value [1]
+ ANY DEFINED BY extension-attribute-type
+}
+
+--
+-- asn1ate doesn’t support ‘MAX’.
+--
+-- Extensions ::= SEQUENCE SIZE (1..MAX) OF Extension
+Extensions ::= SEQUENCE SIZE (1..256) OF Extension
+
+Extension ::= SEQUENCE {
+ extnID OBJECT IDENTIFIER,
+ critical BOOLEAN DEFAULT FALSE,
+ extnValue OCTET STRING
+ -- contains the DER encoding of an ASN.1 value
+ -- corresponding to the extension type identified
+ -- by extnID
+}
+
+CertificateList ::= SEQUENCE {
+ tbsCertList TBSCertList,
+ signatureAlgorithm AlgorithmIdentifier,
+ signatureValue BIT STRING
+}
+
+TBSCertList ::= SEQUENCE {
+ version Version OPTIONAL,
+ -- if present, MUST be v2
+ signature AlgorithmIdentifier,
+ issuer Name,
+ thisUpdate Time,
+ nextUpdate Time OPTIONAL,
+ revokedCertificates SEQUENCE OF SEQUENCE {
+ userCertificate CertificateSerialNumber,
+ revocationDate Time,
+ crlEntryExtensions Extensions OPTIONAL
+ -- if present, version MUST be v2
+ } OPTIONAL,
+ crlExtensions [0] EXPLICIT Extensions OPTIONAL
+ -- if present, version MUST be v2
+}
+
+ub-name INTEGER ::= 32768
+ub-common-name INTEGER ::= 64
+ub-locality-name INTEGER ::= 128
+ub-state-name INTEGER ::= 128
+ub-organization-name INTEGER ::= 64
+ub-organizational-unit-name INTEGER ::= 64
+ub-title INTEGER ::= 64
+ub-serial-number INTEGER ::= 64
+ub-match INTEGER ::= 128
+ub-emailaddress-length INTEGER ::= 255
+ub-common-name-length INTEGER ::= 64
+ub-country-name-alpha-length INTEGER ::= 2
+ub-country-name-numeric-length INTEGER ::= 3
+ub-domain-defined-attributes INTEGER ::= 4
+ub-domain-defined-attribute-type-length INTEGER ::= 8
+ub-domain-defined-attribute-value-length INTEGER ::= 128
+ub-domain-name-length INTEGER ::= 16
+ub-extension-attributes INTEGER ::= 256
+ub-e163-4-number-length INTEGER ::= 15
+ub-e163-4-sub-address-length INTEGER ::= 40
+ub-generation-qualifier-length INTEGER ::= 3
+ub-given-name-length INTEGER ::= 16
+ub-initials-length INTEGER ::= 5
+ub-integer-options INTEGER ::= 256
+ub-numeric-user-id-length INTEGER ::= 32
+ub-organization-name-length INTEGER ::= 64
+ub-organizational-unit-name-length INTEGER ::= 32
+ub-organizational-units INTEGER ::= 4
+ub-pds-name-length INTEGER ::= 16
+ub-pds-parameter-length INTEGER ::= 30
+ub-pds-physical-address-lines INTEGER ::= 6
+ub-postal-code-length INTEGER ::= 16
+ub-pseudonym INTEGER ::= 128
+ub-surname-length INTEGER ::= 40
+ub-terminal-id-length INTEGER ::= 24
+ub-unformatted-address-length INTEGER ::= 180
+ub-x121-address-length INTEGER ::= 16
+
+--
+-- asn1ate doesn’t support ‘MAX’.
+--
+-- RelativeDistinguishedName ::= SET SIZE (1..MAX) OF AttributeTypeAndValue
+RelativeDistinguishedName ::= SET SIZE (1..256) OF AttributeTypeAndValue
+
+AttributeTypeAndValue ::= SEQUENCE {
+ type AttributeType,
+ value AttributeValue
+}
+
+AttributeType ::= OBJECT IDENTIFIER
+
+AttributeValue ::= ANY -- DEFINED BY AttributeType
+
+-- (from RFC 5652)
+
+ContentType ::= OBJECT IDENTIFIER
+
+RevocationInfoChoices ::= SET OF RevocationInfoChoice
+
+RevocationInfoChoice ::= CHOICE {
+ crl CertificateList,
+ other [1] IMPLICIT OtherRevocationInfoFormat
+}
+
+OtherRevocationInfoFormat ::= SEQUENCE {
+ otherRevInfoFormat OBJECT IDENTIFIER,
+ otherRevInfo ANY DEFINED BY otherRevInfoFormat
+}
+
+AttributeCertificateV1 ::= SEQUENCE {
+ acInfo AttributeCertificateInfoV1,
+ signatureAlgorithm AlgorithmIdentifier,
+ signature BIT STRING
+}
+
+AttributeCertificateInfoV1 ::= SEQUENCE {
+--
+-- asn1ate doesn’t support ‘v1’.
+--
+-- version AttCertVersionV1 DEFAULT v1,
+ version AttCertVersionV1 DEFAULT 1,
+ subject CHOICE {
+ baseCertificateID [0] IssuerSerial,
+ -- associated with a Public Key Certificate
+ subjectName [1] GeneralNames },
+ -- associated with a name
+ issuer GeneralNames,
+ signature AlgorithmIdentifier,
+ serialNumber CertificateSerialNumber,
+ attCertValidityPeriod AttCertValidityPeriod,
+ attributes SEQUENCE OF Attribute,
+ issuerUniqueID UniqueIdentifier OPTIONAL,
+ extensions Extensions OPTIONAL
+}
+
+AttCertVersionV1 ::= INTEGER { v1(0) }
+
+ExtendedCertificate ::= SEQUENCE {
+ extendedCertificateInfo ExtendedCertificateInfo,
+ signatureAlgorithm SignatureAlgorithmIdentifier,
+ signature Signature
+}
+
+ExtendedCertificateInfo ::= SEQUENCE {
+ version CMSVersion,
+ certificate Certificate,
+ attributes UnauthAttributes
+}
+
+CertificateChoices ::= CHOICE {
+ certificate Certificate,
+ extendedCertificate [0] IMPLICIT ExtendedCertificate, -- Obsolete
+ v1AttrCert [1] IMPLICIT AttributeCertificateV1, -- Obsolete
+ v2AttrCert [2] IMPLICIT AttributeCertificateV2,
+ other [3] IMPLICIT OtherCertificateFormat
+}
+
+AttributeCertificateV2 ::= AttributeCertificate
+
+OtherCertificateFormat ::= SEQUENCE {
+ otherCertFormat OBJECT IDENTIFIER,
+ otherCert ANY DEFINED BY otherCertFormat
+}
+
+CertificateSet ::= SET OF CertificateChoices
+
+IssuerAndSerialNumber ::= SEQUENCE {
+ issuer Name,
+ serialNumber CertificateSerialNumber
+}
+
+CMSVersion ::= INTEGER { v0(0), v1(1), v2(2), v3(3), v4(4), v5(5) }
+
+SignerInfo ::= SEQUENCE {
+ version CMSVersion,
+ sid SignerIdentifier,
+ digestAlgorithm DigestAlgorithmIdentifier,
+ signedAttrs [0] IMPLICIT SignedAttributes OPTIONAL,
+ signatureAlgorithm SignatureAlgorithmIdentifier,
+ signature SignatureValue,
+ unsignedAttrs [1] IMPLICIT UnsignedAttributes OPTIONAL
+}
+
+SignerIdentifier ::= CHOICE {
+ issuerAndSerialNumber IssuerAndSerialNumber,
+ subjectKeyIdentifier [0] SubjectKeyIdentifier
+}
+
+SubjectKeyIdentifier ::= OCTET STRING
+
+--
+-- asn1ate doesn’t support ‘MAX’.
+--
+-- SignedAttributes ::= SET SIZE (1..MAX) OF Attribute
+SignedAttributes ::= SET SIZE (1..256) OF Attribute
+
+--
+-- asn1ate doesn’t support ‘MAX’.
+--
+-- UnsignedAttributes ::= SET SIZE (1..MAX) OF Attribute
+UnsignedAttributes ::= SET SIZE (1..256) OF Attribute
+
+SignatureValue ::= OCTET STRING
+
+SignedData ::= SEQUENCE {
+ version CMSVersion,
+ digestAlgorithms DigestAlgorithmIdentifiers,
+ encapContentInfo EncapsulatedContentInfo,
+ certificates [0] IMPLICIT CertificateSet OPTIONAL,
+ crls [1] IMPLICIT RevocationInfoChoices OPTIONAL,
+ signerInfos SignerInfos
+}
+
+DigestAlgorithmIdentifiers ::= SET OF DigestAlgorithmIdentifier
+
+SignerInfos ::= SET OF SignerInfo
+
+EncapsulatedContentInfo ::= SEQUENCE {
+ eContentType ContentType,
+ eContent [0] EXPLICIT OCTET STRING OPTIONAL
+}
+
+EnvelopedData ::= SEQUENCE {
+ version CMSVersion,
+ originatorInfo [0] IMPLICIT OriginatorInfo OPTIONAL,
+ recipientInfos RecipientInfos,
+ encryptedContentInfo EncryptedContentInfo,
+ unprotectedAttrs [1] IMPLICIT UnprotectedAttributes OPTIONAL
+}
+
+OriginatorInfo ::= SEQUENCE {
+ certs [0] IMPLICIT CertificateSet OPTIONAL,
+ crls [1] IMPLICIT RevocationInfoChoices OPTIONAL
+}
+
+--
+-- asn1ate doesn't support 'MAX'
+--
+-- RecipientInfos ::= SET SIZE (1..MAX) OF RecipientInfo
+RecipientInfos ::= SET SIZE (1..256) OF RecipientInfo
+
+EncryptedContentInfo ::= SEQUENCE {
+ contentType ContentType,
+ contentEncryptionAlgorithm ContentEncryptionAlgorithmIdentifier,
+ encryptedContent [0] IMPLICIT EncryptedContent OPTIONAL
+}
+
+EncryptedContent ::= OCTET STRING
+
+--
+-- asn1ate doesn't support 'MAX'
+--
+-- UnprotectedAttributes ::= SET SIZE (1..MAX) OF Attribute
+UnprotectedAttributes ::= SET SIZE (1..256) OF Attribute
+
+RecipientInfo ::= CHOICE {
+ ktri KeyTransRecipientInfo,
+ kari [1] KeyAgreeRecipientInfo,
+ kekri [2] KEKRecipientInfo,
+ pwri [3] PasswordRecipientInfo,
+ ori [4] OtherRecipientInfo
+}
+
+EncryptedKey ::= OCTET STRING
+
+KeyTransRecipientInfo ::= SEQUENCE {
+ version CMSVersion, -- always set to 0 or 2
+ rid RecipientIdentifier,
+ keyEncryptionAlgorithm KeyEncryptionAlgorithmIdentifier,
+ encryptedKey EncryptedKey
+}
+
+RecipientIdentifier ::= CHOICE {
+ issuerAndSerialNumber IssuerAndSerialNumber,
+ subjectKeyIdentifier [0] SubjectKeyIdentifier
+}
+
+KeyAgreeRecipientInfo ::= SEQUENCE {
+ version CMSVersion, -- always set to 3
+ originator [0] EXPLICIT OriginatorIdentifierOrKey,
+ ukm [1] EXPLICIT UserKeyingMaterial OPTIONAL,
+ keyEncryptionAlgorithm KeyEncryptionAlgorithmIdentifier,
+ recipientEncryptedKeys RecipientEncryptedKeys
+}
+
+OriginatorIdentifierOrKey ::= CHOICE {
+ issuerAndSerialNumber IssuerAndSerialNumber,
+ subjectKeyIdentifier [0] SubjectKeyIdentifier,
+ originatorKey [1] OriginatorPublicKey
+}
+
+OriginatorPublicKey ::= SEQUENCE {
+ algorithm AlgorithmIdentifier,
+ publicKey BIT STRING
+}
+
+RecipientEncryptedKeys ::= SEQUENCE OF RecipientEncryptedKey
+
+RecipientEncryptedKey ::= SEQUENCE {
+ rid KeyAgreeRecipientIdentifier,
+ encryptedKey EncryptedKey
+}
+
+KeyAgreeRecipientIdentifier ::= CHOICE {
+ issuerAndSerialNumber IssuerAndSerialNumber,
+ rKeyId [0] IMPLICIT RecipientKeyIdentifier
+}
+
+RecipientKeyIdentifier ::= SEQUENCE {
+ subjectKeyIdentifier SubjectKeyIdentifier,
+ date GeneralizedTime OPTIONAL,
+ other OtherKeyAttribute OPTIONAL
+}
+
+KEKRecipientInfo ::= SEQUENCE {
+ version CMSVersion, -- always set to 4
+ kekid KEKIdentifier,
+ keyEncryptionAlgorithm KeyEncryptionAlgorithmIdentifier,
+ encryptedKey EncryptedKey
+}
+
+KEKIdentifier ::= SEQUENCE {
+ keyIdentifier OCTET STRING,
+ date GeneralizedTime OPTIONAL,
+ other OtherKeyAttribute OPTIONAL
+}
+
+PasswordRecipientInfo ::= SEQUENCE {
+ version CMSVersion, -- always set to 0
+ keyDerivationAlgorithm [0] KeyDerivationAlgorithmIdentifier
+ OPTIONAL,
+ keyEncryptionAlgorithm KeyEncryptionAlgorithmIdentifier,
+ encryptedKey EncryptedKey
+}
+
+OtherRecipientInfo ::= SEQUENCE {
+ oriType OBJECT IDENTIFIER,
+ oriValue ANY DEFINED BY oriType
+}
+
+UserKeyingMaterial ::= OCTET STRING
+
+OtherKeyAttribute ::= SEQUENCE {
+ keyAttrId OBJECT IDENTIFIER,
+ keyAttr ANY DEFINED BY keyAttrId OPTIONAL
+}
+
+MessageDigest ::= OCTET STRING
+
+id-data OBJECT IDENTIFIER ::= {
+ iso(1) member-body(2)
+ us(840) rsadsi(113549) pkcs(1) pkcs7(7) 1
+}
+
+id-signedData OBJECT IDENTIFIER ::= {
+ iso(1) member-body(2)
+ us(840) rsadsi(113549) pkcs(1) pkcs7(7) 2
+}
+
+id-envelopedData OBJECT IDENTIFIER ::= {
+ iso(1) member-body(2)
+ us(840) rsadsi(113549) pkcs(1) pkcs7(7) 3
+}
+
+id-contentType OBJECT IDENTIFIER ::= {
+ iso(1) member-body(2)
+ us(840) rsadsi(113549) pkcs(1) pkcs9(9) 3
+}
+
+id-messageDigest OBJECT IDENTIFIER ::= {
+ iso(1) member-body(2)
+ us(840) rsadsi(113549) pkcs(1) pkcs9(9) 4
+}
+
+--
+-- asn1ate doesn’t support ‘MAX’.
+--
+-- UnauthAttributes ::= SET SIZE (1..MAX) OF Attribute
+UnauthAttributes ::= SET SIZE (1..256) OF Attribute
+
+DigestAlgorithmIdentifier ::= AlgorithmIdentifier
+
+SignatureAlgorithmIdentifier ::= AlgorithmIdentifier
+
+KeyEncryptionAlgorithmIdentifier ::= AlgorithmIdentifier
+
+KeyDerivationAlgorithmIdentifier ::= AlgorithmIdentifier
+
+ContentEncryptionAlgorithmIdentifier ::= AlgorithmIdentifier
+
+Signature ::= BIT STRING
+
+-- Other PK-INIT definitions
+
+id-pkcs1-sha256WithRSAEncryption OBJECT IDENTIFIER ::= {
+ iso(1) member-body(2)
+ us(840) rsadsi(113549) pkcs(1)
+ label-less(1) label-less(11)
+}
+
+MS-UPN-SAN ::= UTF8String
+
+CMSCBCParameter ::= OCTET STRING
+
+-- (from MS-WCCE)
+
+szOID-NTDS-CA-SECURITY-EXT OBJECT IDENTIFIER ::= {
+ iso(1) org(3) dod(6) internet(1) private(4) enterprise(1)
+ microsoft(311) directory-service(25) 2
+}
+
+szOID-NTDS-OBJECTSID OBJECT IDENTIFIER ::= {
+ iso(1) org(3) dod(6) internet(1) private(4) enterprise(1)
+ microsoft(311) directory-service(25) 2 1
+}
+
+-- (from RFC 8017)
+
+rsaEncryption OBJECT IDENTIFIER ::= {
+ iso(1)
+ member-body(2) us(840) rsadsi(113549) pkcs(1) pkcs-1(1) 1
+}
+
+id-sha512 OBJECT IDENTIFIER ::= {
+ joint-iso-itu-t (2) country (16) us (840) organization (1)
+ gov (101) csor (3) nistalgorithm (4) hashalgs (2) 3
+}
+
+-- (from RFC 8018)
+
+nistAlgorithms OBJECT IDENTIFIER ::= {joint-iso-itu-t(2) country(16)
+ us(840) organization(1)
+ gov(101) csor(3) 4}
+
+aes OBJECT IDENTIFIER ::= { nistAlgorithms 1 }
+
+aes256-CBC-PAD OBJECT IDENTIFIER ::= { aes 42 }
+
+rsadsi OBJECT IDENTIFIER ::= {iso(1) member-body(2) us(840) 113549}
+
+encryptionAlgorithm OBJECT IDENTIFIER ::= {rsadsi 3}
+
+des-EDE3-CBC OBJECT IDENTIFIER ::= {encryptionAlgorithm 7}
+
+-- Windows 2000 PK-INIT definitions
+
+PKAuthenticator-Win2k ::= SEQUENCE {
+ kdcName [0] PrincipalName,
+ kdcRealm [1] Realm,
+ cusec [2] INTEGER (0..4294967295),
+ ctime [3] KerberosTime,
+ nonce [4] INTEGER (-2147483648..2147483647)
+}
+
+AuthPack-Win2k ::= SEQUENCE {
+ pkAuthenticator [0] PKAuthenticator-Win2k
+}
+
+TrustedCA-Win2k ::= CHOICE {
+ caName [1] ANY,
+ issuerAndSerial [2] IssuerAndSerialNumber
+}
+
+PA-PK-AS-REQ-Win2k ::= SEQUENCE {
+ signedAuthPack [0] IMPLICIT OCTET STRING,
+ trustedCertifiers [2] SEQUENCE OF TrustedCA-Win2k OPTIONAL,
+ kdcCert [3] IMPLICIT OCTET STRING OPTIONAL,
+ encryptionCert [4] IMPLICIT OCTET STRING OPTIONAL,
+ ...
+}
+
+PA-PK-AS-REP-Win2k ::= CHOICE {
+ dhSignedData [0] IMPLICIT OCTET STRING,
+ encKeyPack [1] IMPLICIT OCTET STRING
+}
+
+ReplyKeyPack-Win2k ::= SEQUENCE {
+ replyKey [0] EncryptionKey,
+ nonce [1] INTEGER (-2147483648..2147483647),
+ ...
+}
+
+--
+
+id-pkinit-ms-san OBJECT IDENTIFIER ::= {
+ iso(1) org(3) dod(6) internet(1) private(4) enterprise(1)
+ microsoft(311) 20 2 3
+}
+
+kdc-authentication OBJECT IDENTIFIER ::= { id-pkinit keyPurposeKdc(5) }
+
+smartcard-logon OBJECT IDENTIFIER ::= {
+ iso(1) org(3) dod(6) internet(1) private(4) enterprise(1)
+ microsoft(311) 20 2 2
+}
+
+CMSAttributes ::= SET OF Attribute
+
+--
+--
+-- MS-KILE Start
+
+KERB-ERROR-DATA ::= SEQUENCE {
+ data-type [1] KerbErrorDataType,
+ data-value [2] OCTET STRING OPTIONAL
+}
+
+KerbErrorDataType ::= INTEGER
+
+KERB-PA-PAC-REQUEST ::= SEQUENCE {
+ include-pac[0] BOOLEAN --If TRUE, and no pac present, include PAC.
+ --If FALSE, and PAC present, remove PAC
+}
+
+KERB-LOCAL ::= OCTET STRING -- Implementation-specific data which MUST be
+ -- ignored if Kerberos client is not local.
+
+KERB-AD-RESTRICTION-ENTRY ::= SEQUENCE {
+ restriction-type [0] Int32,
+ restriction [1] OCTET STRING -- LSAP_TOKEN_INFO_INTEGRITY structure
+}
+
+PA-SUPPORTED-ENCTYPES ::= Int32 -- Supported Encryption Types Bit Field --
+
+PACOptionFlags ::= KerberosFlags -- Claims (0)
+ -- Branch Aware (1)
+ -- Forward to Full DC (2)
+ -- Resource Based Constrained Delegation (3)
+PA-PAC-OPTIONS ::= SEQUENCE {
+ options [0] PACOptionFlags
+}
+-- Note: KerberosFlags ::= BIT STRING (SIZE (32..MAX))
+-- minimum number of bits shall be sent, but no fewer than 32
+
+KERB-KEY-LIST-REQ ::= SEQUENCE OF EncryptionType -- Int32 encryption type --
+KERB-KEY-LIST-REP ::= SEQUENCE OF EncryptionKey
+
+FastOptions ::= BIT STRING {
+ reserved(0),
+ hide-client-names(1),
+ kdc-follow-referrals(16)
+}
+
+KrbFastReq ::= SEQUENCE {
+ fast-options [0] FastOptions,
+ padata [1] SEQUENCE OF PA-DATA,
+ req-body [2] KDC-REQ-BODY,
+ ...
+}
+
+KrbFastArmor ::= SEQUENCE {
+ armor-type [0] Int32,
+ armor-value [1] OCTET STRING,
+ ...
+}
+
+KrbFastArmoredReq ::= SEQUENCE {
+ armor [0] KrbFastArmor OPTIONAL,
+ req-checksum [1] Checksum,
+ enc-fast-req [2] EncryptedData -- KrbFastReq --
+}
+
+PA-FX-FAST-REQUEST ::= CHOICE {
+ armored-data [0] KrbFastArmoredReq,
+ ...
+}
+
+KrbFastFinished ::= SEQUENCE {
+ timestamp [0] KerberosTime,
+ usec [1] Int32,
+ crealm [2] Realm,
+ cname [3] PrincipalName,
+ ticket-checksum [4] Checksum,
+ ...
+}
+
+KrbFastResponse ::= SEQUENCE {
+ padata [0] SEQUENCE OF PA-DATA,
+ -- padata typed holes.
+ strengthen-key [1] EncryptionKey OPTIONAL,
+ -- This, if present, strengthens the reply key for AS and
+ -- TGS. MUST be present for TGS.
+ -- MUST be absent in KRB-ERROR.
+ finished [2] KrbFastFinished OPTIONAL,
+ -- Present in AS or TGS reply; absent otherwise.
+ nonce [3] UInt32,
+ -- Nonce from the client request.
+ ...
+}
+
+KrbFastArmoredRep ::= SEQUENCE {
+ enc-fast-rep [0] EncryptedData, -- KrbFastResponse --
+ ...
+}
+
+PA-FX-FAST-REPLY ::= CHOICE {
+ armored-data [0] KrbFastArmoredRep,
+ ...
+}
+
+ChangePasswdDataMS ::= SEQUENCE {
+ newpasswd [0] OCTET STRING,
+ targname [1] PrincipalName OPTIONAL,
+ targrealm [2] Realm OPTIONAL
+}
+
+-- MS-KILE End
+--
+--
+
+--
+--
+-- prettyPrint values
+--
+--
+
+NameTypeValues ::= INTEGER { -- Int32
+ kRB5-NT-UNKNOWN(0), -- Name type not known
+ kRB5-NT-PRINCIPAL(1), -- Just the name of the principal as in
+ kRB5-NT-SRV-INST(2), -- Service and other unique instance (krbtgt)
+ kRB5-NT-SRV-HST(3), -- Service with host name as instance
+ kRB5-NT-SRV-XHST(4), -- Service with host as remaining components
+ kRB5-NT-UID(5), -- Unique ID
+ kRB5-NT-X500-PRINCIPAL(6), -- PKINIT
+ kRB5-NT-SMTP-NAME(7), -- Name in form of SMTP email name
+ kRB5-NT-ENTERPRISE-PRINCIPAL(10), -- Windows 2000 UPN
+ kRB5-NT-WELLKNOWN(11), -- Wellknown
+ kRB5-NT-ENT-PRINCIPAL-AND-ID(-130), -- Windows 2000 UPN and SID
+ kRB5-NT-MS-PRINCIPAL(-128), -- NT 4 style name
+ kRB5-NT-MS-PRINCIPAL-AND-ID(-129) -- NT style name and SID
+}
+NameTypeSequence ::= SEQUENCE {
+ dummy [0] NameTypeValues
+}
+
+TicketFlagsValues ::= BIT STRING { -- KerberosFlags
+ reserved(0),
+ forwardable(1),
+ forwarded(2),
+ proxiable(3),
+ proxy(4),
+ may-postdate(5),
+ postdated(6),
+ invalid(7),
+ renewable(8),
+ initial(9),
+ pre-authent(10),
+ hw-authent(11),
+-- the following are new since 1510
+ transited-policy-checked(12),
+ ok-as-delegate(13),
+ enc-pa-rep(15)
+}
+TicketFlagsSequence ::= SEQUENCE {
+ dummy [0] TicketFlagsValues
+}
+
+KDCOptionsValues ::= BIT STRING { -- KerberosFlags
+ reserved(0),
+ forwardable(1),
+ forwarded(2),
+ proxiable(3),
+ proxy(4),
+ allow-postdate(5),
+ postdated(6),
+ unused7(7),
+ renewable(8),
+ unused9(9),
+ unused10(10),
+ opt-hardware-auth(11),
+ unused12(12),
+ unused13(13),
+ cname-in-addl-tkt(14),
+-- Canonicalize is used by RFC 6806
+ canonicalize(15),
+-- 26 was unused in 1510
+ disable-transited-check(26),
+--
+ renewable-ok(27),
+ enc-tkt-in-skey(28),
+ renew(30),
+ validate(31)
+}
+KDCOptionsSequence ::= SEQUENCE {
+ dummy [0] KDCOptionsValues
+}
+
+APOptionsValues ::= BIT STRING { -- KerberosFlags
+ reserved(0),
+ use-session-key(1),
+ mutual-required(2)
+}
+APOptionsSequence ::= SEQUENCE {
+ dummy [0] APOptionsValues
+}
+
+MessageTypeValues ::= INTEGER {
+ krb-as-req(10), -- Request for initial authentication
+ krb-as-rep(11), -- Response to KRB_AS_REQ request
+ krb-tgs-req(12), -- Request for authentication based on TGT
+ krb-tgs-rep(13), -- Response to KRB_TGS_REQ request
+ krb-ap-req(14), -- application request to server
+ krb-ap-rep(15), -- Response to KRB_AP_REQ_MUTUAL
+ krb-safe(20), -- Safe (checksummed) application message
+ krb-priv(21), -- Private (encrypted) application message
+ krb-cred(22), -- Private (encrypted) message to forward credentials
+ krb-error(30) -- Error response
+}
+MessageTypeSequence ::= SEQUENCE {
+ dummy [0] MessageTypeValues
+}
+
+PADataTypeValues ::= INTEGER {
+ kRB5-PADATA-NONE(0),
+ -- kRB5-PADATA-TGS-REQ(1),
+ -- kRB5-PADATA-AP-REQ(1),
+ kRB5-PADATA-KDC-REQ(1),
+ kRB5-PADATA-ENC-TIMESTAMP(2),
+ kRB5-PADATA-PW-SALT(3),
+ kRB5-PADATA-ENC-UNIX-TIME(5),
+ kRB5-PADATA-SANDIA-SECUREID(6),
+ kRB5-PADATA-SESAME(7),
+ kRB5-PADATA-OSF-DCE(8),
+ kRB5-PADATA-CYBERSAFE-SECUREID(9),
+ kRB5-PADATA-AFS3-SALT(10),
+ kRB5-PADATA-ETYPE-INFO(11),
+ kRB5-PADATA-SAM-CHALLENGE(12), -- (sam/otp)
+ kRB5-PADATA-SAM-RESPONSE(13), -- (sam/otp)
+ kRB5-PADATA-PK-AS-REQ-19(14), -- (PKINIT-19)
+ kRB5-PADATA-PK-AS-REP-19(15), -- (PKINIT-19)
+ -- kRB5-PADATA-PK-AS-REQ-WIN(15), - (PKINIT - old number)
+ kRB5-PADATA-PK-AS-REQ(16), -- (PKINIT-25)
+ kRB5-PADATA-PK-AS-REP(17), -- (PKINIT-25)
+ kRB5-PADATA-PA-PK-OCSP-RESPONSE(18),
+ kRB5-PADATA-ETYPE-INFO2(19),
+ -- kRB5-PADATA-USE-SPECIFIED-KVNO(20),
+ kRB5-PADATA-SVR-REFERRAL-INFO(20), --- old ms referral number
+ kRB5-PADATA-SAM-REDIRECT(21), -- (sam/otp)
+ kRB5-PADATA-GET-FROM-TYPED-DATA(22),
+ kRB5-PADATA-SAM-ETYPE-INFO(23),
+ kRB5-PADATA-SERVER-REFERRAL(25),
+ kRB5-PADATA-ALT-PRINC(24), -- (crawdad@fnal.gov)
+ kRB5-PADATA-SAM-CHALLENGE2(30), -- (kenh@pobox.com)
+ kRB5-PADATA-SAM-RESPONSE2(31), -- (kenh@pobox.com)
+ kRB5-PA-EXTRA-TGT(41), -- Reserved extra TGT
+ kRB5-PADATA-TD-KRB-PRINCIPAL(102), -- PrincipalName
+ kRB5-PADATA-PK-TD-TRUSTED-CERTIFIERS(104), -- PKINIT
+ kRB5-PADATA-PK-TD-CERTIFICATE-INDEX(105), -- PKINIT
+ kRB5-PADATA-TD-APP-DEFINED-ERROR(106), -- application specific
+ kRB5-PADATA-TD-REQ-NONCE(107), -- INTEGER
+ kRB5-PADATA-TD-REQ-SEQ(108), -- INTEGER
+ kRB5-PADATA-PA-PAC-REQUEST(128), -- jbrezak@exchange.microsoft.com
+ kRB5-PADATA-FOR-USER(129), -- MS-KILE
+ kRB5-PADATA-FOR-X509-USER(130), -- MS-KILE
+ kRB5-PADATA-FOR-CHECK-DUPS(131), -- MS-KILE
+ kRB5-PADATA-AS-CHECKSUM(132), -- MS-KILE
+ -- kRB5-PADATA-PK-AS-09-BINDING(132), - client send this to
+ -- tell KDC that is supports
+ -- the asCheckSum in the
+ -- PK-AS-REP
+ kRB5-PADATA-FX-COOKIE(133), -- krb-wg-preauth-framework
+ kRB5-PADATA-AUTHENTICATION-SET(134), -- krb-wg-preauth-framework
+ kRB5-PADATA-AUTH-SET-SELECTED(135), -- krb-wg-preauth-framework
+ kRB5-PADATA-FX-FAST(136), -- krb-wg-preauth-framework
+ kRB5-PADATA-FX-ERROR(137), -- krb-wg-preauth-framework
+ kRB5-PADATA-ENCRYPTED-CHALLENGE(138), -- krb-wg-preauth-framework
+ kRB5-PADATA-OTP-CHALLENGE(141), -- (gareth.richards@rsa.com)
+ kRB5-PADATA-OTP-REQUEST(142), -- (gareth.richards@rsa.com)
+ kBB5-PADATA-OTP-CONFIRM(143), -- (gareth.richards@rsa.com)
+ kRB5-PADATA-OTP-PIN-CHANGE(144), -- (gareth.richards@rsa.com)
+ kRB5-PADATA-EPAK-AS-REQ(145),
+ kRB5-PADATA-EPAK-AS-REP(146),
+ kRB5-PADATA-PKINIT-KX(147), -- krb-wg-anon
+ kRB5-PADATA-PKU2U-NAME(148), -- zhu-pku2u
+ kRB5-PADATA-REQ-ENC-PA-REP(149), --
+ kRB5-PADATA-AS-FRESHNESS(150), -- RFC 8070
+ kRB5-PADATA-SUPPORTED-ETYPES(165), -- MS-KILE
+ kRB5-PADATA-PAC-OPTIONS(167), -- MS-KILE
+ kRB5-PADATA-GSS(655) -- gss-preauth
+}
+PADataTypeSequence ::= SEQUENCE {
+ dummy [0] PADataTypeValues
+}
+
+AuthDataTypeValues ::= INTEGER {
+ kRB5-AUTHDATA-IF-RELEVANT(1),
+ kRB5-AUTHDATA-INTENDED-FOR-SERVER(2),
+ kRB5-AUTHDATA-INTENDED-FOR-APPLICATION-CLASS(3),
+ kRB5-AUTHDATA-KDC-ISSUED(4),
+ kRB5-AUTHDATA-AND-OR(5),
+ kRB5-AUTHDATA-MANDATORY-TICKET-EXTENSIONS(6),
+ kRB5-AUTHDATA-IN-TICKET-EXTENSIONS(7),
+ kRB5-AUTHDATA-MANDATORY-FOR-KDC(8),
+ kRB5-AUTHDATA-INITIAL-VERIFIED-CAS(9),
+ kRB5-AUTHDATA-OSF-DCE(64),
+ kRB5-AUTHDATA-SESAME(65),
+ kRB5-AUTHDATA-OSF-DCE-PKI-CERTID(66),
+ kRB5-AUTHDATA-WIN2K-PAC(128),
+ kRB5-AUTHDATA-GSS-API-ETYPE-NEGOTIATION(129), -- Authenticator only
+ kRB5-AUTHDATA-SIGNTICKET-OLDER(-17),
+ kRB5-AUTHDATA-SIGNTICKET-OLD(142),
+ kRB5-AUTHDATA-SIGNTICKET(512)
+}
+AuthDataTypeSequence ::= SEQUENCE {
+ dummy [0] AuthDataTypeValues
+}
+
+ChecksumTypeValues ::= INTEGER {
+ kRB5-CKSUMTYPE-NONE(0),
+ kRB5-CKSUMTYPE-CRC32(1),
+ kRB5-CKSUMTYPE-RSA-MD4(2),
+ kRB5-CKSUMTYPE-RSA-MD4-DES(3),
+ kRB5-CKSUMTYPE-DES-MAC(4),
+ kRB5-CKSUMTYPE-DES-MAC-K(5),
+ kRB5-CKSUMTYPE-RSA-MD4-DES-K(6),
+ kRB5-CKSUMTYPE-RSA-MD5(7),
+ kRB5-CKSUMTYPE-RSA-MD5-DES(8),
+ kRB5-CKSUMTYPE-RSA-MD5-DES3(9),
+ kRB5-CKSUMTYPE-SHA1-OTHER(10),
+ kRB5-CKSUMTYPE-HMAC-SHA1-DES3(12),
+ kRB5-CKSUMTYPE-SHA1(14),
+ kRB5-CKSUMTYPE-HMAC-SHA1-96-AES-128(15),
+ kRB5-CKSUMTYPE-HMAC-SHA1-96-AES-256(16),
+ kRB5-CKSUMTYPE-GSSAPI(32771), -- 0x8003
+ kRB5-CKSUMTYPE-HMAC-MD5(-138), -- unofficial microsoft number
+ kRB5-CKSUMTYPE-HMAC-MD5-ENC(-1138) -- even more unofficial
+}
+ChecksumTypeSequence ::= SEQUENCE {
+ dummy [0] ChecksumTypeValues
+}
+
+EncryptionTypeValues ::= INTEGER {
+ kRB5-ENCTYPE-NULL(0),
+ kRB5-ENCTYPE-DES-CBC-CRC(1),
+ kRB5-ENCTYPE-DES-CBC-MD4(2),
+ kRB5-ENCTYPE-DES-CBC-MD5(3),
+ kRB5-ENCTYPE-DES3-CBC-MD5(5),
+ kRB5-ENCTYPE-OLD-DES3-CBC-SHA1(7),
+ kRB5-ENCTYPE-SIGN-DSA-GENERATE(8),
+ kRB5-ENCTYPE-ENCRYPT-RSA-PRIV(9),
+ kRB5-ENCTYPE-ENCRYPT-RSA-PUB(10),
+ kRB5-ENCTYPE-DES3-CBC-SHA1(16), -- with key derivation
+ kRB5-ENCTYPE-AES128-CTS-HMAC-SHA1-96(17),
+ kRB5-ENCTYPE-AES256-CTS-HMAC-SHA1-96(18),
+ kRB5-ENCTYPE-ARCFOUR-HMAC-MD5(23),
+ kRB5-ENCTYPE-ARCFOUR-HMAC-MD5-56(24),
+ kRB5-ENCTYPE-ENCTYPE-PK-CROSS(48),
+-- some "old" windows types
+ kRB5-ENCTYPE-ARCFOUR-MD4(-128),
+ kRB5-ENCTYPE-ARCFOUR-HMAC-OLD(-133),
+ kRB5-ENCTYPE-ARCFOUR-HMAC-OLD-EXP(-135),
+-- these are for Heimdal internal use
+-- kRB5-ENCTYPE-DES-CBC-NONE(-0x1000),
+-- kRB5-ENCTYPE-DES3-CBC-NONE(-0x1001),
+-- kRB5-ENCTYPE-DES-CFB64-NONE(-0x1002),
+-- kRB5-ENCTYPE-DES-PCBC-NONE(-0x1003),
+-- kRB5-ENCTYPE-DIGEST-MD5-NONE(-0x1004), - private use, lukeh@padl.com
+-- kRB5-ENCTYPE-CRAM-MD5-NONE(-0x1005) - private use, lukeh@padl.com
+ kRB5-ENCTYPE-DUMMY(-1111)
+}
+EncryptionTypeSequence ::= SEQUENCE {
+ dummy [0] EncryptionTypeValues
+}
+
+KerbErrorDataTypeValues ::= INTEGER {
+ kERB-AP-ERR-TYPE-SKEW-RECOVERY(2),
+ kERB-ERR-TYPE-EXTENDED(3)
+}
+KerbErrorDataTypeSequence ::= SEQUENCE {
+ dummy [0] KerbErrorDataTypeValues
+}
+
+PACOptionFlagsValues ::= BIT STRING { -- KerberosFlags
+ claims(0),
+ branch-aware(1),
+ forward-to-full-dc(2),
+ resource-based-constrained-delegation(3)
+}
+PACOptionFlagsSequence ::= SEQUENCE {
+ dummy [0] PACOptionFlagsValues
+}
+
+END
diff --git a/python/samba/tests/krb5/rfc4120_constants.py b/python/samba/tests/krb5/rfc4120_constants.py
new file mode 100644
index 0000000..dff6017
--- /dev/null
+++ b/python/samba/tests/krb5/rfc4120_constants.py
@@ -0,0 +1,247 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) 2020 Catalyst.Net Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import samba.tests.krb5.rfc4120_pyasn1 as krb5_asn1
+
+# Encryption types
+AES256_CTS_HMAC_SHA1_96 = int(
+ krb5_asn1.EncryptionTypeValues('kRB5-ENCTYPE-AES256-CTS-HMAC-SHA1-96'))
+AES128_CTS_HMAC_SHA1_96 = int(
+ krb5_asn1.EncryptionTypeValues('kRB5-ENCTYPE-AES128-CTS-HMAC-SHA1-96'))
+ARCFOUR_HMAC_MD5 = int(
+ krb5_asn1.EncryptionTypeValues('kRB5-ENCTYPE-ARCFOUR-HMAC-MD5'))
+DES_CBC_CRC = int(
+ krb5_asn1.EncryptionTypeValues('kRB5-ENCTYPE-DES-CBC-CRC'))
+DES_CBC_MD5 = int(
+ krb5_asn1.EncryptionTypeValues('kRB5-ENCTYPE-DES-CBC-MD5'))
+DES3_CBC_MD5 = int(
+ krb5_asn1.EncryptionTypeValues('kRB5-ENCTYPE-DES3-CBC-MD5'))
+DES3_CBC_SHA1 = int(
+ krb5_asn1.EncryptionTypeValues('kRB5-ENCTYPE-DES3-CBC-SHA1'))
+
+DES_EDE3_CBC = 15 # des-ede3-cbc-EnvOID — required for Windows PK-INIT.
+
+# Message types
+KRB_ERROR = int(krb5_asn1.MessageTypeValues('krb-error'))
+KRB_AP_REP = int(krb5_asn1.MessageTypeValues('krb-ap-rep'))
+KRB_AP_REQ = int(krb5_asn1.MessageTypeValues('krb-ap-req'))
+KRB_AS_REP = int(krb5_asn1.MessageTypeValues('krb-as-rep'))
+KRB_AS_REQ = int(krb5_asn1.MessageTypeValues('krb-as-req'))
+KRB_TGS_REP = int(krb5_asn1.MessageTypeValues('krb-tgs-rep'))
+KRB_TGS_REQ = int(krb5_asn1.MessageTypeValues('krb-tgs-req'))
+KRB_PRIV = int(krb5_asn1.MessageTypeValues('krb-priv'))
+
+# PAData types
+PADATA_ENC_TIMESTAMP = int(
+ krb5_asn1.PADataTypeValues('kRB5-PADATA-ENC-TIMESTAMP'))
+PADATA_ENCRYPTED_CHALLENGE = int(
+ krb5_asn1.PADataTypeValues('kRB5-PADATA-ENCRYPTED-CHALLENGE'))
+PADATA_ETYPE_INFO = int(
+ krb5_asn1.PADataTypeValues('kRB5-PADATA-ETYPE-INFO'))
+PADATA_ETYPE_INFO2 = int(
+ krb5_asn1.PADataTypeValues('kRB5-PADATA-ETYPE-INFO2'))
+PADATA_FOR_USER = int(
+ krb5_asn1.PADataTypeValues('kRB5-PADATA-FOR-USER'))
+PADATA_FX_COOKIE = int(
+ krb5_asn1.PADataTypeValues('kRB5-PADATA-FX-COOKIE'))
+PADATA_FX_ERROR = int(
+ krb5_asn1.PADataTypeValues('kRB5-PADATA-FX-ERROR'))
+PADATA_FX_FAST = int(
+ krb5_asn1.PADataTypeValues('kRB5-PADATA-FX-FAST'))
+PADATA_KDC_REQ = int(
+ krb5_asn1.PADataTypeValues('kRB5-PADATA-KDC-REQ'))
+PADATA_PAC_OPTIONS = int(
+ krb5_asn1.PADataTypeValues('kRB5-PADATA-PAC-OPTIONS'))
+PADATA_PAC_REQUEST = int(
+ krb5_asn1.PADataTypeValues('kRB5-PADATA-PA-PAC-REQUEST'))
+PADATA_PK_AS_REQ = int(
+ krb5_asn1.PADataTypeValues('kRB5-PADATA-PK-AS-REQ'))
+PADATA_PK_AS_REP = int(
+ krb5_asn1.PADataTypeValues('kRB5-PADATA-PK-AS-REP'))
+PADATA_PK_AS_REQ_19 = int(
+ krb5_asn1.PADataTypeValues('kRB5-PADATA-PK-AS-REQ-19'))
+PADATA_PK_AS_REP_19 = int(
+ krb5_asn1.PADataTypeValues('kRB5-PADATA-PK-AS-REP-19'))
+PADATA_PW_SALT = int(
+ krb5_asn1.PADataTypeValues('kRB5-PADATA-PW-SALT'))
+PADATA_SUPPORTED_ETYPES = int(
+ krb5_asn1.PADataTypeValues('kRB5-PADATA-SUPPORTED-ETYPES'))
+PADATA_PKINIT_KX = int(
+ krb5_asn1.PADataTypeValues('kRB5-PADATA-PKINIT-KX'))
+PADATA_GSS = int(
+ krb5_asn1.PADataTypeValues('kRB5-PADATA-GSS'))
+PADATA_REQ_ENC_PA_REP = int(
+ krb5_asn1.PADataTypeValues('kRB5-PADATA-REQ-ENC-PA-REP'))
+PADATA_AS_FRESHNESS = int(
+ krb5_asn1.PADataTypeValues('kRB5-PADATA-AS-FRESHNESS'))
+
+# Error codes
+KDC_ERR_C_PRINCIPAL_UNKNOWN = 6
+KDC_ERR_S_PRINCIPAL_UNKNOWN = 7
+KDC_ERR_NEVER_VALID = 11
+KDC_ERR_POLICY = 12
+KDC_ERR_BADOPTION = 13
+KDC_ERR_ETYPE_NOSUPP = 14
+KDC_ERR_SUMTYPE_NOSUPP = 15
+KDC_ERR_CLIENT_REVOKED = 18
+KDC_ERR_TGT_REVOKED = 20
+KDC_ERR_PREAUTH_FAILED = 24
+KDC_ERR_PREAUTH_REQUIRED = 25
+KDC_ERR_SERVER_NOMATCH = 26
+KDC_ERR_BAD_INTEGRITY = 31
+KDC_ERR_TKT_EXPIRED = 32
+KRB_ERR_TKT_NYV = 33
+KDC_ERR_NOT_US = 35
+KDC_ERR_BADMATCH = 36
+KDC_ERR_SKEW = 37
+KDC_ERR_MODIFIED = 41
+KDC_ERR_BADKEYVER = 44
+KDC_ERR_INAPP_CKSUM = 50
+KDC_ERR_GENERIC = 60
+KDC_ERR_CLIENT_NOT_TRUSTED = 62
+KDC_ERR_INVALID_SIG = 64
+KDC_ERR_DH_KEY_PARAMETERS_NOT_ACCEPTED = 65
+KDC_ERR_WRONG_REALM = 68
+KDC_ERR_CANT_VERIFY_CERTIFICATE = 70
+KDC_ERR_INVALID_CERTIFICATE = 71
+KDC_ERR_REVOKED_CERTIFICATE = 72
+KDC_ERR_REVOCATION_STATUS_UNKNOWN = 73
+KDC_ERR_CLIENT_NAME_MISMATCH = 75
+KDC_ERR_INCONSISTENT_KEY_PURPOSE = 77
+KDC_ERR_DIGEST_IN_CERT_NOT_ACCEPTED = 78
+KDC_ERR_PA_CHECKSUM_MUST_BE_INCLUDED = 79
+KDC_ERR_DIGEST_IN_SIGNED_DATA_NOT_ACCEPTED = 80
+KDC_ERR_PUBLIC_KEY_ENCRYPTION_NOT_SUPPORTED = 81
+KDC_ERR_PREAUTH_EXPIRED = 90
+KDC_ERR_UNKNOWN_CRITICAL_FAST_OPTIONS = 93
+
+# Kpasswd error codes
+KPASSWD_SUCCESS = 0
+KPASSWD_MALFORMED = 1
+KPASSWD_HARDERROR = 2
+KPASSWD_AUTHERROR = 3
+KPASSWD_SOFTERROR = 4
+KPASSWD_ACCESSDENIED = 5
+KPASSWD_BAD_VERSION = 6
+KPASSWD_INITIAL_FLAG_NEEDED = 7
+
+# Extended error types
+KERB_AP_ERR_TYPE_SKEW_RECOVERY = int(
+ krb5_asn1.KerbErrorDataTypeValues('kERB-AP-ERR-TYPE-SKEW-RECOVERY'))
+KERB_ERR_TYPE_EXTENDED = int(
+ krb5_asn1.KerbErrorDataTypeValues('kERB-ERR-TYPE-EXTENDED'))
+
+# Name types
+NT_UNKNOWN = int(krb5_asn1.NameTypeValues('kRB5-NT-UNKNOWN'))
+NT_PRINCIPAL = int(krb5_asn1.NameTypeValues('kRB5-NT-PRINCIPAL'))
+NT_SRV_HST = int(krb5_asn1.NameTypeValues('kRB5-NT-SRV-HST'))
+NT_SRV_INST = int(krb5_asn1.NameTypeValues('kRB5-NT-SRV-INST'))
+NT_ENTERPRISE_PRINCIPAL = int(krb5_asn1.NameTypeValues(
+ 'kRB5-NT-ENTERPRISE-PRINCIPAL'))
+NT_WELLKNOWN = int(krb5_asn1.NameTypeValues('kRB5-NT-WELLKNOWN'))
+
+# Authorization data ad-type values
+
+AD_IF_RELEVANT = 1
+AD_INTENDED_FOR_SERVER = 2
+AD_INTENDED_FOR_APPLICATION_CLASS = 3
+AD_KDC_ISSUED = 4
+AD_AND_OR = 5
+AD_MANDATORY_TICKET_EXTENSIONS = 6
+AD_IN_TICKET_EXTENSIONS = 7
+AD_MANDATORY_FOR_KDC = 8
+AD_INITIAL_VERIFIED_CAS = 9
+AD_FX_FAST_ARMOR = 71
+AD_FX_FAST_USED = 72
+AD_WIN2K_PAC = 128
+AD_SIGNTICKET = 512
+
+# Key usage numbers
+# RFC 4120 Section 7.5.1. Key Usage Numbers
+KU_PA_ENC_TIMESTAMP = 1
+''' AS-REQ PA-ENC-TIMESTAMP padata timestamp, encrypted with the
+ client key (section 5.2.7.2) '''
+KU_TICKET = 2
+''' AS-REP Ticket and TGS-REP Ticket (includes tgs session key or
+ application session key), encrypted with the service key
+ (section 5.3) '''
+KU_AS_REP_ENC_PART = 3
+''' AS-REP encrypted part (includes tgs session key or application
+ session key), encrypted with the client key (section 5.4.2) '''
+KU_TGS_REQ_AUTH_DAT_SESSION = 4
+''' TGS-REQ KDC-REQ-BODY AuthorizationData, encrypted with the tgs
+ session key (section 5.4.1) '''
+KU_TGS_REQ_AUTH_DAT_SUBKEY = 5
+''' TGS-REQ KDC-REQ-BODY AuthorizationData, encrypted with the tgs
+ authenticator subkey (section 5.4.1) '''
+KU_TGS_REQ_AUTH_CKSUM = 6
+''' TGS-REQ PA-TGS-REQ padata AP-REQ Authenticator cksum, keyed
+ with the tgs session key (section 5.5.1) '''
+KU_PKINIT_AS_REQ = 6
+KU_TGS_REQ_AUTH = 7
+''' TGS-REQ PA-TGS-REQ padata AP-REQ Authenticator (includes tgs
+ authenticator subkey), encrypted with the tgs session key
+ (section 5.5.1) '''
+KU_TGS_REP_ENC_PART_SESSION = 8
+''' TGS-REP encrypted part (includes application session key),
+ encrypted with the tgs session key (section 5.4.2) '''
+KU_TGS_REP_ENC_PART_SUB_KEY = 9
+''' TGS-REP encrypted part (includes application session key),
+ encrypted with the tgs authenticator subkey (section 5.4.2) '''
+KU_AP_REQ_AUTH_CKSUM = 10
+''' AP-REQ Authenticator cksum, keyed with the application session
+ key (section 5.5.1) '''
+KU_AP_REQ_AUTH = 11
+''' AP-REQ Authenticator (includes application authenticator
+ subkey), encrypted with the application session key (section 5.5.1) '''
+KU_AP_REQ_ENC_PART = 12
+''' AP-REP encrypted part (includes application session subkey),
+ encrypted with the application session key (section 5.5.2) '''
+KU_KRB_PRIV = 13
+''' KRB-PRIV encrypted part, encrypted with a key chosen by the
+ application (section 5.7.1) '''
+KU_KRB_CRED = 14
+''' KRB-CRED encrypted part, encrypted with a key chosen by the
+ application (section 5.8.1) '''
+KU_KRB_SAFE_CKSUM = 15
+''' KRB-SAFE cksum, keyed with a key chosen by the application
+ (section 5.6.1) '''
+KU_NON_KERB_SALT = 16
+KU_NON_KERB_CKSUM_SALT = 17
+
+KU_ACCEPTOR_SEAL = 22
+KU_ACCEPTOR_SIGN = 23
+KU_INITIATOR_SEAL = 24
+KU_INITIATOR_SIGN = 25
+
+KU_FAST_REQ_CHKSUM = 50
+KU_FAST_ENC = 51
+KU_FAST_REP = 52
+KU_FAST_FINISHED = 53
+KU_ENC_CHALLENGE_CLIENT = 54
+KU_ENC_CHALLENGE_KDC = 55
+KU_AS_REQ = 56
+
+KU_AS_FRESHNESS = 60
+
+# Armor types
+FX_FAST_ARMOR_AP_REQUEST = 1
+
+# PKINIT typed data errors
+TD_TRUSTED_CERTIFIERS = 104
+TD_INVALID_CERTIFICATES = 105
+TD_DH_PARAMETERS = 109
diff --git a/python/samba/tests/krb5/rfc4120_pyasn1.py b/python/samba/tests/krb5/rfc4120_pyasn1.py
new file mode 100644
index 0000000..ad8a6e7
--- /dev/null
+++ b/python/samba/tests/krb5/rfc4120_pyasn1.py
@@ -0,0 +1,92 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Catalyst.Net Ltd 2023
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from samba.tests.krb5.rfc4120_pyasn1_generated import *
+
+# Kerberos strings should generally be treated as UTF‐8 encoded, but asn1ate
+# (the tool which generates Python definitions from our ASN.1 modules) provides
+# no way to specify the encoding to use. By the time we’ve imported
+# ‘rfc4120_pyasn1_generated’, KerberosString in the process having been
+# instantiated as part of several schema objects, it’s too late to change the
+# existing objects. But by overriding the __getattribute__() method on
+# KerberosString, we can have objects of that type, or a subtype thereof,
+# encoded as UTF‐8 strings instead of as ISO-8859-1 strings (the default).
+
+class ReadOnlyUtf8EncodingDict(dict):
+ # Don’t allow any attributes to be set.
+ __slots__ = []
+
+ def __getitem__(self, key):
+ # Get the original item. This will raise KeyError if it’s not present.
+ val = super().__getitem__(key)
+
+ # If anyone wants to know our encoding, say it’s UTF‐8.
+ if key == 'encoding':
+ return 'utf-8'
+
+ return val
+
+ # Python’s default implementations of the following methods don’t call
+ # __getitem__(), so we’ll need to override them with our own replacements.
+ # In behaviour, they are close enough to the originals for our purposes.
+
+ def get(self, key, default=None):
+ try:
+ return self[key]
+ except KeyError:
+ return default
+
+ def items(self):
+ for key in self:
+ yield key, self[key]
+
+ def values(self):
+ for key in self:
+ yield self[key]
+
+ # Don’t let anyone modify the dict’s contents.
+
+ def __setitem__(self, key, val):
+ raise TypeError('item assignment not supported')
+
+ def __delitem__(self, key):
+ raise TypeError('item deletion not supported')
+
+
+KerberosString_get_attribute = KerberosString.__getattribute__
+
+def get_attribute_override(self, attr):
+ # Get the original attribute. This will raise AttributeError if it’s not
+ # present.
+ val = KerberosString_get_attribute(self, attr)
+
+ # If anyone wants to know our encoding, say it’s UTF‐8.
+ if attr == 'encoding':
+ return 'utf-8'
+
+ if attr == '_readOnly':
+ # Return a copy of the read‐only attributes with the encoding overridden
+ # to be UTF-8. To avoid the possibility of changes being made to the
+ # original dict that do not propagate to its copies, the returned dict
+ # does not allow modification of its contents. Besides, this is supposed
+ # to be read‐only.
+ return ReadOnlyUtf8EncodingDict(val)
+
+ return val
+
+# Override the __getattribute__() method on KerberosString.
+KerberosString.__getattribute__ = get_attribute_override
diff --git a/python/samba/tests/krb5/rfc4120_pyasn1_generated.py b/python/samba/tests/krb5/rfc4120_pyasn1_generated.py
new file mode 100644
index 0000000..6949737
--- /dev/null
+++ b/python/samba/tests/krb5/rfc4120_pyasn1_generated.py
@@ -0,0 +1,2690 @@
+# Auto-generated by asn1ate v.0.6.1.dev0 from rfc4120.asn1
+# (last modified on 2023-12-15 11:13:21.627710)
+
+# KerberosV5Spec2
+from pyasn1.type import univ, char, namedtype, namedval, tag, constraint, useful
+
+
+def _OID(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+
+ return univ.ObjectIdentifier(output)
+
+
+class Int32(univ.Integer):
+ pass
+
+
+Int32.subtypeSpec = constraint.ValueRangeConstraint(-2147483648, 2147483647)
+
+
+class AuthDataType(Int32):
+ pass
+
+
+class AuthorizationData(univ.SequenceOf):
+ pass
+
+
+AuthorizationData.componentType = univ.Sequence(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('ad-type', AuthDataType().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('ad-data', univ.OctetString().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+))
+
+
+class AD_AND_OR(univ.Sequence):
+ pass
+
+
+AD_AND_OR.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('condition-count', Int32().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('elements', AuthorizationData().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class AD_IF_RELEVANT(AuthorizationData):
+ pass
+
+
+class ExternalPrincipalIdentifier(univ.Sequence):
+ pass
+
+
+ExternalPrincipalIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('subjectName', univ.OctetString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('issuerAndSerialNumber', univ.OctetString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('subjectKeyIdentifier', univ.OctetString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+)
+
+
+class AD_INITIAL_VERIFIED_CAS(univ.SequenceOf):
+ pass
+
+
+AD_INITIAL_VERIFIED_CAS.componentType = ExternalPrincipalIdentifier()
+
+
+class ChecksumType(Int32):
+ pass
+
+
+class Checksum(univ.Sequence):
+ pass
+
+
+Checksum.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('cksumtype', ChecksumType().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('checksum', univ.OctetString().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class KerberosString(char.GeneralString):
+ pass
+
+
+class NameType(Int32):
+ pass
+
+
+class PrincipalName(univ.Sequence):
+ pass
+
+
+PrincipalName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('name-type', NameType().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('name-string', univ.SequenceOf(componentType=KerberosString()).subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class Realm(KerberosString):
+ pass
+
+
+class AD_KDCIssued(univ.Sequence):
+ pass
+
+
+AD_KDCIssued.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('ad-checksum', Checksum().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('i-realm', Realm().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('i-sname', PrincipalName().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))),
+ namedtype.NamedType('elements', AuthorizationData().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
+)
+
+
+class AD_MANDATORY_FOR_KDC(AuthorizationData):
+ pass
+
+
+class EncryptionType(Int32):
+ pass
+
+
+class EncryptedData(univ.Sequence):
+ pass
+
+
+EncryptedData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('etype', EncryptionType().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('kvno', Int32().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('cipher', univ.OctetString().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+)
+
+
+class AP_REP(univ.Sequence):
+ pass
+
+
+AP_REP.tagSet = univ.Sequence.tagSet.tagExplicitly(tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 15))
+AP_REP.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('pvno', univ.Integer().subtype(subtypeSpec=constraint.SingleValueConstraint(5)).subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('msg-type', univ.Integer().subtype(subtypeSpec=constraint.SingleValueConstraint(15)).subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('enc-part', EncryptedData().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)))
+)
+
+
+class KerberosFlags(univ.BitString):
+ pass
+
+
+KerberosFlags.subtypeSpec=constraint.ValueSizeConstraint(1, 32)
+
+
+class APOptions(KerberosFlags):
+ pass
+
+
+class Ticket(univ.Sequence):
+ pass
+
+
+Ticket.tagSet = univ.Sequence.tagSet.tagExplicitly(tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 1))
+Ticket.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('tkt-vno', univ.Integer().subtype(subtypeSpec=constraint.SingleValueConstraint(5)).subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('realm', Realm().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('sname', PrincipalName().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))),
+ namedtype.NamedType('enc-part', EncryptedData().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3)))
+)
+
+
+class AP_REQ(univ.Sequence):
+ pass
+
+
+AP_REQ.tagSet = univ.Sequence.tagSet.tagExplicitly(tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 14))
+AP_REQ.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('pvno', univ.Integer().subtype(subtypeSpec=constraint.SingleValueConstraint(5)).subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('msg-type', univ.Integer().subtype(subtypeSpec=constraint.SingleValueConstraint(14)).subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('ap-options', APOptions().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.NamedType('ticket', Ticket().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.NamedType('authenticator', EncryptedData().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4)))
+)
+
+
+class APOptionsValues(univ.BitString):
+ pass
+
+
+APOptionsValues.namedValues = namedval.NamedValues(
+ ('reserved', 0),
+ ('use-session-key', 1),
+ ('mutual-required', 2)
+)
+
+
+class APOptionsSequence(univ.Sequence):
+ pass
+
+
+APOptionsSequence.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('dummy', APOptionsValues().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class PADataType(Int32):
+ pass
+
+
+class PA_DATA(univ.Sequence):
+ pass
+
+
+PA_DATA.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('padata-type', PADataType().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('padata-value', univ.OctetString().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+)
+
+
+class KDC_REP(univ.Sequence):
+ pass
+
+
+KDC_REP.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('pvno', univ.Integer().subtype(subtypeSpec=constraint.SingleValueConstraint(5)).subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('msg-type', univ.Integer().subtype(subtypeSpec=constraint.SingleValueConstraint(11, 13)).subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('padata', univ.SequenceOf(componentType=PA_DATA()).subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.NamedType('crealm', Realm().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.NamedType('cname', PrincipalName().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4))),
+ namedtype.NamedType('ticket', Ticket().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5))),
+ namedtype.NamedType('enc-part', EncryptedData().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 6)))
+)
+
+
+class AS_REP(KDC_REP):
+ pass
+
+
+AS_REP.tagSet = KDC_REP.tagSet.tagExplicitly(tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 11))
+
+
+class HostAddress(univ.Sequence):
+ pass
+
+
+HostAddress.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('addr-type', Int32().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('address', univ.OctetString().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class HostAddresses(univ.SequenceOf):
+ pass
+
+
+HostAddresses.componentType = HostAddress()
+
+
+class KDCOptions(KerberosFlags):
+ pass
+
+
+class KerberosTime(useful.GeneralizedTime):
+ pass
+
+
+class UInt32(univ.Integer):
+ pass
+
+
+UInt32.subtypeSpec = constraint.ValueRangeConstraint(0, 4294967295)
+
+
+class KDC_REQ_BODY(univ.Sequence):
+ pass
+
+
+KDC_REQ_BODY.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('kdc-options', KDCOptions().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('cname', PrincipalName().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.NamedType('realm', Realm().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('sname', PrincipalName().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))),
+ namedtype.OptionalNamedType('from', KerberosTime().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))),
+ namedtype.NamedType('till', KerberosTime().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5))),
+ namedtype.OptionalNamedType('rtime', KerberosTime().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6))),
+ namedtype.NamedType('nonce', UInt32().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))),
+ namedtype.NamedType('etype', univ.SequenceOf(componentType=EncryptionType()).subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 8))),
+ namedtype.OptionalNamedType('addresses', HostAddresses().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 9))),
+ namedtype.OptionalNamedType('enc-authorization-data', EncryptedData().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 10))),
+ namedtype.OptionalNamedType('additional-tickets', univ.SequenceOf(componentType=Ticket()).subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 11)))
+)
+
+
+class KDC_REQ(univ.Sequence):
+ pass
+
+
+KDC_REQ.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('pvno', univ.Integer().subtype(subtypeSpec=constraint.SingleValueConstraint(5)).subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('msg-type', univ.Integer().subtype(subtypeSpec=constraint.SingleValueConstraint(10, 12)).subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('padata', univ.SequenceOf(componentType=PA_DATA()).subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.NamedType('req-body', KDC_REQ_BODY().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4)))
+)
+
+
+class AS_REQ(KDC_REQ):
+ pass
+
+
+AS_REQ.tagSet = KDC_REQ.tagSet.tagExplicitly(tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 10))
+
+
+ub_domain_name_length = univ.Integer(16)
+
+
+class AdministrationDomainName(univ.Choice):
+ pass
+
+
+AdministrationDomainName.tagSet = univ.Choice.tagSet.tagExplicitly(tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 2))
+AdministrationDomainName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('numeric', char.NumericString().subtype(subtypeSpec=constraint.ValueSizeConstraint(0, ub_domain_name_length))),
+ namedtype.NamedType('printable', char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(0, ub_domain_name_length)))
+)
+
+
+class AlgorithmIdentifier(univ.Sequence):
+ pass
+
+
+AlgorithmIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('algorithm', univ.ObjectIdentifier()),
+ namedtype.OptionalNamedType('parameters', univ.Any())
+)
+
+
+class DirectoryString(univ.Choice):
+ pass
+
+
+DirectoryString.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString', char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 256))),
+ namedtype.NamedType('printableString', char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 256))),
+ namedtype.NamedType('universalString', char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 256))),
+ namedtype.NamedType('utf8String', char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 256))),
+ namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 256)))
+)
+
+
+class EDIPartyName(univ.Sequence):
+ pass
+
+
+EDIPartyName.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('nameAssigner', DirectoryString().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('partyName', DirectoryString().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+)
+
+
+class AttributeType(univ.ObjectIdentifier):
+ pass
+
+
+class AttributeValue(univ.Any):
+ pass
+
+
+class AttributeTypeAndValue(univ.Sequence):
+ pass
+
+
+AttributeTypeAndValue.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', AttributeType()),
+ namedtype.NamedType('value', AttributeValue())
+)
+
+
+class RelativeDistinguishedName(univ.SetOf):
+ pass
+
+
+RelativeDistinguishedName.componentType = AttributeTypeAndValue()
+RelativeDistinguishedName.subtypeSpec=constraint.ValueSizeConstraint(1, 256)
+
+
+class RDNSequence(univ.SequenceOf):
+ pass
+
+
+RDNSequence.componentType = RelativeDistinguishedName()
+
+
+class Name(univ.Choice):
+ pass
+
+
+Name.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('rdnSequence', RDNSequence())
+)
+
+
+ub_domain_defined_attribute_type_length = univ.Integer(8)
+
+
+ub_domain_defined_attribute_value_length = univ.Integer(128)
+
+
+class BuiltInDomainDefinedAttribute(univ.Sequence):
+ pass
+
+
+BuiltInDomainDefinedAttribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_type_length))),
+ namedtype.NamedType('value', char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_value_length)))
+)
+
+
+ub_domain_defined_attributes = univ.Integer(4)
+
+
+class BuiltInDomainDefinedAttributes(univ.SequenceOf):
+ pass
+
+
+BuiltInDomainDefinedAttributes.componentType = BuiltInDomainDefinedAttribute()
+BuiltInDomainDefinedAttributes.subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attributes)
+
+
+ub_country_name_alpha_length = univ.Integer(2)
+
+
+ub_country_name_numeric_length = univ.Integer(3)
+
+
+class CountryName(univ.Choice):
+ pass
+
+
+CountryName.tagSet = univ.Choice.tagSet.tagExplicitly(tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 1))
+CountryName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('x121-dcc-code', char.NumericString().subtype(subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_numeric_length, ub_country_name_numeric_length))),
+ namedtype.NamedType('iso-3166-alpha2-code', char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_alpha_length, ub_country_name_alpha_length)))
+)
+
+
+ub_x121_address_length = univ.Integer(16)
+
+
+class X121Address(char.NumericString):
+ pass
+
+
+X121Address.subtypeSpec = constraint.ValueSizeConstraint(1, ub_x121_address_length)
+
+
+class NetworkAddress(X121Address):
+ pass
+
+
+ub_numeric_user_id_length = univ.Integer(32)
+
+
+class NumericUserIdentifier(char.NumericString):
+ pass
+
+
+NumericUserIdentifier.subtypeSpec = constraint.ValueSizeConstraint(1, ub_numeric_user_id_length)
+
+
+ub_organization_name_length = univ.Integer(64)
+
+
+class OrganizationName(char.PrintableString):
+ pass
+
+
+OrganizationName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_organization_name_length)
+
+
+ub_organizational_unit_name_length = univ.Integer(32)
+
+
+class OrganizationalUnitName(char.PrintableString):
+ pass
+
+
+OrganizationalUnitName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_organizational_unit_name_length)
+
+
+ub_organizational_units = univ.Integer(4)
+
+
+class OrganizationalUnitNames(univ.SequenceOf):
+ pass
+
+
+OrganizationalUnitNames.componentType = OrganizationalUnitName()
+OrganizationalUnitNames.subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_units)
+
+
+ub_generation_qualifier_length = univ.Integer(3)
+
+
+ub_given_name_length = univ.Integer(16)
+
+
+ub_initials_length = univ.Integer(5)
+
+
+ub_surname_length = univ.Integer(40)
+
+
+class PersonalName(univ.Set):
+ pass
+
+
+PersonalName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('surname', char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_surname_length)).subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('given-name', char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_given_name_length)).subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('initials', char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_initials_length)).subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('generation-qualifier', char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_generation_qualifier_length)).subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
+)
+
+
+class PrivateDomainName(univ.Choice):
+ pass
+
+
+PrivateDomainName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('numeric', char.NumericString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_name_length))),
+ namedtype.NamedType('printable', char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_name_length)))
+)
+
+
+ub_terminal_id_length = univ.Integer(24)
+
+
+class TerminalIdentifier(char.PrintableString):
+ pass
+
+
+TerminalIdentifier.subtypeSpec = constraint.ValueSizeConstraint(1, ub_terminal_id_length)
+
+
+class BuiltInStandardAttributes(univ.Sequence):
+ pass
+
+
+BuiltInStandardAttributes.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('country-name', CountryName()),
+ namedtype.OptionalNamedType('administration-domain-name', AdministrationDomainName()),
+ namedtype.OptionalNamedType('network-address', NetworkAddress().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('terminal-identifier', TerminalIdentifier().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('private-domain-name', PrivateDomainName().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))),
+ namedtype.OptionalNamedType('organization-name', OrganizationName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.OptionalNamedType('numeric-user-identifier', NumericUserIdentifier().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))),
+ namedtype.OptionalNamedType('personal-name', PersonalName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5))),
+ namedtype.OptionalNamedType('organizational-unit-names', OrganizationalUnitNames().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6)))
+)
+
+
+ub_extension_attributes = univ.Integer(256)
+
+
+class ExtensionAttribute(univ.Sequence):
+ pass
+
+
+ExtensionAttribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('extension-attribute-type', univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, ub_extension_attributes)).subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('extension-attribute-value', univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class ExtensionAttributes(univ.SetOf):
+ pass
+
+
+ExtensionAttributes.componentType = ExtensionAttribute()
+ExtensionAttributes.subtypeSpec=constraint.ValueSizeConstraint(1, ub_extension_attributes)
+
+
+class ORAddress(univ.Sequence):
+ pass
+
+
+ORAddress.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('built-in-standard-attributes', BuiltInStandardAttributes()),
+ namedtype.OptionalNamedType('built-in-domain-defined-attributes', BuiltInDomainDefinedAttributes()),
+ namedtype.OptionalNamedType('extension-attributes', ExtensionAttributes())
+)
+
+
+class OtherName(univ.Sequence):
+ pass
+
+
+OtherName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type-id', univ.ObjectIdentifier()),
+ namedtype.NamedType('value', univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class GeneralName(univ.Choice):
+ pass
+
+
+GeneralName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('otherName', OtherName().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('rfc822Name', char.IA5String().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('dNSName', char.IA5String().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.NamedType('x400Address', ORAddress().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))),
+ namedtype.NamedType('directoryName', Name().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4))),
+ namedtype.NamedType('ediPartyName', EDIPartyName().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5))),
+ namedtype.NamedType('uniformResourceIdentifier', char.IA5String().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6))),
+ namedtype.NamedType('iPAddress', univ.OctetString().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))),
+ namedtype.NamedType('registeredID', univ.ObjectIdentifier().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 8)))
+)
+
+
+class GeneralNames(univ.SequenceOf):
+ pass
+
+
+GeneralNames.componentType = GeneralName()
+GeneralNames.subtypeSpec=constraint.ValueSizeConstraint(1, 256)
+
+
+class CertificateSerialNumber(univ.Integer):
+ pass
+
+
+class UniqueIdentifier(univ.BitString):
+ pass
+
+
+class IssuerSerial(univ.Sequence):
+ pass
+
+
+IssuerSerial.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuer', GeneralNames()),
+ namedtype.NamedType('serial', CertificateSerialNumber()),
+ namedtype.OptionalNamedType('issuerUID', UniqueIdentifier())
+)
+
+
+class ObjectDigestInfo(univ.Sequence):
+ pass
+
+
+ObjectDigestInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('digestedObjectType', univ.Enumerated(namedValues=namedval.NamedValues(('publicKey', 0), ('publicKeyCert', 1), ('otherObjectTypes', 2)))),
+ namedtype.OptionalNamedType('otherObjectTypeID', univ.ObjectIdentifier()),
+ namedtype.NamedType('digestAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('objectDigest', univ.BitString())
+)
+
+
+class V2Form(univ.Sequence):
+ pass
+
+
+V2Form.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('issuerName', GeneralNames()),
+ namedtype.OptionalNamedType('baseCertificateID', IssuerSerial().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('objectDigestInfo', ObjectDigestInfo().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+)
+
+
+class AttCertIssuer(univ.Choice):
+ pass
+
+
+AttCertIssuer.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('v1Form', GeneralNames()),
+ namedtype.NamedType('v2Form', V2Form().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
+)
+
+
+class AttCertValidityPeriod(univ.Sequence):
+ pass
+
+
+AttCertValidityPeriod.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('notBeforeTime', useful.GeneralizedTime()),
+ namedtype.NamedType('notAfterTime', useful.GeneralizedTime())
+)
+
+
+class AttCertVersion(univ.Integer):
+ pass
+
+
+AttCertVersion.namedValues = namedval.NamedValues(
+ ('v2', 1)
+)
+
+
+class AttCertVersionV1(univ.Integer):
+ pass
+
+
+AttCertVersionV1.namedValues = namedval.NamedValues(
+ ('v1', 0)
+)
+
+
+class Attribute(univ.Sequence):
+ pass
+
+
+Attribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', AttributeType()),
+ namedtype.NamedType('values', univ.SetOf(componentType=AttributeValue()))
+)
+
+
+class Extension(univ.Sequence):
+ pass
+
+
+Extension.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('extnID', univ.ObjectIdentifier()),
+ namedtype.DefaultedNamedType('critical', univ.Boolean().subtype(value=0)),
+ namedtype.NamedType('extnValue', univ.OctetString())
+)
+
+
+class Extensions(univ.SequenceOf):
+ pass
+
+
+Extensions.componentType = Extension()
+Extensions.subtypeSpec=constraint.ValueSizeConstraint(1, 256)
+
+
+class Holder(univ.Sequence):
+ pass
+
+
+Holder.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('baseCertificateID', IssuerSerial().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('entityName', GeneralNames().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('objectDigestInfo', ObjectDigestInfo().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)))
+)
+
+
+class AttributeCertificateInfo(univ.Sequence):
+ pass
+
+
+AttributeCertificateInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', AttCertVersion()),
+ namedtype.NamedType('holder', Holder()),
+ namedtype.NamedType('issuer', AttCertIssuer()),
+ namedtype.NamedType('signature', AlgorithmIdentifier()),
+ namedtype.NamedType('serialNumber', CertificateSerialNumber()),
+ namedtype.NamedType('attrCertValidityPeriod', AttCertValidityPeriod()),
+ namedtype.NamedType('attributes', univ.SequenceOf(componentType=Attribute())),
+ namedtype.OptionalNamedType('issuerUniqueID', UniqueIdentifier()),
+ namedtype.OptionalNamedType('extensions', Extensions())
+)
+
+
+class AttributeCertificate(univ.Sequence):
+ pass
+
+
+AttributeCertificate.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('acinfo', AttributeCertificateInfo()),
+ namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('signatureValue', univ.BitString())
+)
+
+
+class AttributeCertificateInfoV1(univ.Sequence):
+ pass
+
+
+AttributeCertificateInfoV1.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version', AttCertVersionV1().subtype(value=1)),
+ namedtype.NamedType('subject', univ.Choice(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('baseCertificateID', IssuerSerial().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('subjectName', GeneralNames().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+ ))
+ ),
+ namedtype.NamedType('issuer', GeneralNames()),
+ namedtype.NamedType('signature', AlgorithmIdentifier()),
+ namedtype.NamedType('serialNumber', CertificateSerialNumber()),
+ namedtype.NamedType('attCertValidityPeriod', AttCertValidityPeriod()),
+ namedtype.NamedType('attributes', univ.SequenceOf(componentType=Attribute())),
+ namedtype.OptionalNamedType('issuerUniqueID', UniqueIdentifier()),
+ namedtype.OptionalNamedType('extensions', Extensions())
+)
+
+
+class AttributeCertificateV1(univ.Sequence):
+ pass
+
+
+AttributeCertificateV1.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('acInfo', AttributeCertificateInfoV1()),
+ namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('signature', univ.BitString())
+)
+
+
+class AttributeCertificateV2(AttributeCertificate):
+ pass
+
+
+class AuthDataTypeValues(univ.Integer):
+ pass
+
+
+AuthDataTypeValues.namedValues = namedval.NamedValues(
+ ('kRB5-AUTHDATA-IF-RELEVANT', 1),
+ ('kRB5-AUTHDATA-INTENDED-FOR-SERVER', 2),
+ ('kRB5-AUTHDATA-INTENDED-FOR-APPLICATION-CLASS', 3),
+ ('kRB5-AUTHDATA-KDC-ISSUED', 4),
+ ('kRB5-AUTHDATA-AND-OR', 5),
+ ('kRB5-AUTHDATA-MANDATORY-TICKET-EXTENSIONS', 6),
+ ('kRB5-AUTHDATA-IN-TICKET-EXTENSIONS', 7),
+ ('kRB5-AUTHDATA-MANDATORY-FOR-KDC', 8),
+ ('kRB5-AUTHDATA-INITIAL-VERIFIED-CAS', 9),
+ ('kRB5-AUTHDATA-OSF-DCE', 64),
+ ('kRB5-AUTHDATA-SESAME', 65),
+ ('kRB5-AUTHDATA-OSF-DCE-PKI-CERTID', 66),
+ ('kRB5-AUTHDATA-WIN2K-PAC', 128),
+ ('kRB5-AUTHDATA-GSS-API-ETYPE-NEGOTIATION', 129),
+ ('kRB5-AUTHDATA-SIGNTICKET-OLDER', -17),
+ ('kRB5-AUTHDATA-SIGNTICKET-OLD', 142),
+ ('kRB5-AUTHDATA-SIGNTICKET', 512)
+)
+
+
+class AuthDataTypeSequence(univ.Sequence):
+ pass
+
+
+AuthDataTypeSequence.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('dummy', AuthDataTypeValues().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class DHNonce(univ.OctetString):
+ pass
+
+
+class PKAuthenticator(univ.Sequence):
+ pass
+
+
+PKAuthenticator.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('cusec', univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, 999999)).subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('ctime', KerberosTime().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('nonce', univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, 4294967295)).subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('paChecksum', univ.OctetString().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.OptionalNamedType('freshnessToken', univ.OctetString().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4)))
+)
+
+
+class SubjectPublicKeyInfo(univ.Sequence):
+ pass
+
+
+SubjectPublicKeyInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('algorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('subjectPublicKey', univ.BitString())
+)
+
+
+class AuthPack(univ.Sequence):
+ pass
+
+
+AuthPack.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('pkAuthenticator', PKAuthenticator().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('clientPublicValue', SubjectPublicKeyInfo().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.OptionalNamedType('supportedCMSTypes', univ.SequenceOf(componentType=AlgorithmIdentifier()).subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('clientDHNonce', DHNonce().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
+)
+
+
+class PKAuthenticator_Win2k(univ.Sequence):
+ pass
+
+
+PKAuthenticator_Win2k.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('kdcName', PrincipalName().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('kdcRealm', Realm().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('cusec', univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, 4294967295)).subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.NamedType('ctime', KerberosTime().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.NamedType('nonce', univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(-2147483648, 2147483647)).subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4)))
+)
+
+
+class AuthPack_Win2k(univ.Sequence):
+ pass
+
+
+AuthPack_Win2k.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('pkAuthenticator', PKAuthenticator_Win2k().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
+)
+
+
+class EncryptionKey(univ.Sequence):
+ pass
+
+
+EncryptionKey.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('keytype', EncryptionType().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('keyvalue', univ.OctetString().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class Microseconds(univ.Integer):
+ pass
+
+
+Microseconds.subtypeSpec = constraint.ValueRangeConstraint(0, 999999)
+
+
+class Authenticator(univ.Sequence):
+ pass
+
+
+Authenticator.tagSet = univ.Sequence.tagSet.tagExplicitly(tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 2))
+Authenticator.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('authenticator-vno', univ.Integer().subtype(subtypeSpec=constraint.SingleValueConstraint(5)).subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('crealm', Realm().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('cname', PrincipalName().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))),
+ namedtype.OptionalNamedType('cksum', Checksum().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))),
+ namedtype.NamedType('cusec', Microseconds().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))),
+ namedtype.NamedType('ctime', KerberosTime().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5))),
+ namedtype.OptionalNamedType('subkey', EncryptionKey().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 6))),
+ namedtype.OptionalNamedType('seq-number', UInt32().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))),
+ namedtype.OptionalNamedType('authorization-data', AuthorizationData().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 8)))
+)
+
+
+class CMSAttributes(univ.SetOf):
+ pass
+
+
+CMSAttributes.componentType = Attribute()
+
+
+class CMSCBCParameter(univ.OctetString):
+ pass
+
+
+class CMSVersion(univ.Integer):
+ pass
+
+
+CMSVersion.namedValues = namedval.NamedValues(
+ ('v0', 0),
+ ('v1', 1),
+ ('v2', 2),
+ ('v3', 3),
+ ('v4', 4),
+ ('v5', 5)
+)
+
+
+class SerialNumber(univ.Integer):
+ pass
+
+
+class CRLEntry(univ.Sequence):
+ pass
+
+
+CRLEntry.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('userCertificate', SerialNumber()),
+ namedtype.NamedType('revocationDate', useful.UTCTime())
+)
+
+
+class Time(univ.Choice):
+ pass
+
+
+Time.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('utcTime', useful.UTCTime()),
+ namedtype.NamedType('generalTime', useful.GeneralizedTime())
+)
+
+
+class Validity(univ.Sequence):
+ pass
+
+
+Validity.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('notBefore', Time()),
+ namedtype.NamedType('notAfter', Time())
+)
+
+
+class Version(univ.Integer):
+ pass
+
+
+Version.namedValues = namedval.NamedValues(
+ ('v1', 0),
+ ('v2', 1),
+ ('v3', 2)
+)
+
+
+class TBSCertificate(univ.Sequence):
+ pass
+
+
+TBSCertificate.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version', Version().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)).subtype(value=1)),
+ namedtype.NamedType('serialNumber', CertificateSerialNumber()),
+ namedtype.NamedType('signature', AlgorithmIdentifier()),
+ namedtype.NamedType('issuer', Name()),
+ namedtype.NamedType('validity', Validity()),
+ namedtype.NamedType('subject', Name()),
+ namedtype.NamedType('subjectPublicKeyInfo', SubjectPublicKeyInfo()),
+ namedtype.OptionalNamedType('issuerUniqueID', UniqueIdentifier().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('subjectUniqueID', UniqueIdentifier().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('extensions', Extensions().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
+)
+
+
+class Certificate(univ.Sequence):
+ pass
+
+
+Certificate.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('tbsCertificate', TBSCertificate()),
+ namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('signatureValue', univ.BitString())
+)
+
+
+class UnauthAttributes(univ.SetOf):
+ pass
+
+
+UnauthAttributes.componentType = Attribute()
+UnauthAttributes.subtypeSpec=constraint.ValueSizeConstraint(1, 256)
+
+
+class ExtendedCertificateInfo(univ.Sequence):
+ pass
+
+
+ExtendedCertificateInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.NamedType('certificate', Certificate()),
+ namedtype.NamedType('attributes', UnauthAttributes())
+)
+
+
+class Signature(univ.BitString):
+ pass
+
+
+class SignatureAlgorithmIdentifier(AlgorithmIdentifier):
+ pass
+
+
+class ExtendedCertificate(univ.Sequence):
+ pass
+
+
+ExtendedCertificate.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('extendedCertificateInfo', ExtendedCertificateInfo()),
+ namedtype.NamedType('signatureAlgorithm', SignatureAlgorithmIdentifier()),
+ namedtype.NamedType('signature', Signature())
+)
+
+
+class OtherCertificateFormat(univ.Sequence):
+ pass
+
+
+OtherCertificateFormat.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('otherCertFormat', univ.ObjectIdentifier()),
+ namedtype.NamedType('otherCert', univ.Any())
+)
+
+
+class CertificateChoices(univ.Choice):
+ pass
+
+
+CertificateChoices.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certificate', Certificate()),
+ namedtype.NamedType('extendedCertificate', ExtendedCertificate().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('v1AttrCert', AttributeCertificateV1().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.NamedType('v2AttrCert', AttributeCertificateV2().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))),
+ namedtype.NamedType('other', OtherCertificateFormat().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3)))
+)
+
+
+class TBSCertList(univ.Sequence):
+ pass
+
+
+TBSCertList.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('version', Version()),
+ namedtype.NamedType('signature', AlgorithmIdentifier()),
+ namedtype.NamedType('issuer', Name()),
+ namedtype.NamedType('thisUpdate', Time()),
+ namedtype.OptionalNamedType('nextUpdate', Time()),
+ namedtype.OptionalNamedType('revokedCertificates', univ.SequenceOf(componentType=univ.Sequence(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('userCertificate', CertificateSerialNumber()),
+ namedtype.NamedType('revocationDate', Time()),
+ namedtype.OptionalNamedType('crlEntryExtensions', Extensions())
+ ))
+ )),
+ namedtype.OptionalNamedType('crlExtensions', Extensions().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class CertificateList(univ.Sequence):
+ pass
+
+
+CertificateList.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('tbsCertList', TBSCertList()),
+ namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('signatureValue', univ.BitString())
+)
+
+
+class CertificateRevocationList(univ.Sequence):
+ pass
+
+
+CertificateRevocationList.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('signature', AlgorithmIdentifier()),
+ namedtype.NamedType('issuer', Name()),
+ namedtype.NamedType('lastUpdate', useful.UTCTime()),
+ namedtype.NamedType('nextUpdate', useful.UTCTime()),
+ namedtype.OptionalNamedType('revokedCertificates', univ.SequenceOf(componentType=CRLEntry()))
+)
+
+
+class CertificateRevocationLists(univ.SetOf):
+ pass
+
+
+CertificateRevocationLists.componentType = CertificateRevocationList()
+
+
+class CertificateSet(univ.SetOf):
+ pass
+
+
+CertificateSet.componentType = CertificateChoices()
+
+
+class ChangePasswdDataMS(univ.Sequence):
+ pass
+
+
+ChangePasswdDataMS.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('newpasswd', univ.OctetString().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('targname', PrincipalName().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.OptionalNamedType('targrealm', Realm().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+)
+
+
+class ChecksumTypeValues(univ.Integer):
+ pass
+
+
+ChecksumTypeValues.namedValues = namedval.NamedValues(
+ ('kRB5-CKSUMTYPE-NONE', 0),
+ ('kRB5-CKSUMTYPE-CRC32', 1),
+ ('kRB5-CKSUMTYPE-RSA-MD4', 2),
+ ('kRB5-CKSUMTYPE-RSA-MD4-DES', 3),
+ ('kRB5-CKSUMTYPE-DES-MAC', 4),
+ ('kRB5-CKSUMTYPE-DES-MAC-K', 5),
+ ('kRB5-CKSUMTYPE-RSA-MD4-DES-K', 6),
+ ('kRB5-CKSUMTYPE-RSA-MD5', 7),
+ ('kRB5-CKSUMTYPE-RSA-MD5-DES', 8),
+ ('kRB5-CKSUMTYPE-RSA-MD5-DES3', 9),
+ ('kRB5-CKSUMTYPE-SHA1-OTHER', 10),
+ ('kRB5-CKSUMTYPE-HMAC-SHA1-DES3', 12),
+ ('kRB5-CKSUMTYPE-SHA1', 14),
+ ('kRB5-CKSUMTYPE-HMAC-SHA1-96-AES-128', 15),
+ ('kRB5-CKSUMTYPE-HMAC-SHA1-96-AES-256', 16),
+ ('kRB5-CKSUMTYPE-GSSAPI', 32771),
+ ('kRB5-CKSUMTYPE-HMAC-MD5', -138),
+ ('kRB5-CKSUMTYPE-HMAC-MD5-ENC', -1138)
+)
+
+
+class ChecksumTypeSequence(univ.Sequence):
+ pass
+
+
+ChecksumTypeSequence.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('dummy', ChecksumTypeValues().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class ContentEncryptionAlgorithmIdentifier(AlgorithmIdentifier):
+ pass
+
+
+class ContentType(univ.ObjectIdentifier):
+ pass
+
+
+class ContentInfo(univ.Sequence):
+ pass
+
+
+ContentInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('contentType', ContentType()),
+ namedtype.OptionalNamedType('content', univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class DHPublicKey(univ.Integer):
+ pass
+
+
+class DHRepInfo(univ.Sequence):
+ pass
+
+
+DHRepInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('dhSignedData', univ.OctetString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('serverDHNonce', DHNonce().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class DigestAlgorithmIdentifier(AlgorithmIdentifier):
+ pass
+
+
+class DigestAlgorithmIdentifiers(univ.SetOf):
+ pass
+
+
+DigestAlgorithmIdentifiers.componentType = DigestAlgorithmIdentifier()
+
+
+class ValidationParms(univ.Sequence):
+ pass
+
+
+ValidationParms.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('seed', univ.BitString()),
+ namedtype.NamedType('pgenCounter', univ.Integer())
+)
+
+
+class DomainParameters(univ.Sequence):
+ pass
+
+
+DomainParameters.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('p', univ.Integer()),
+ namedtype.NamedType('g', univ.Integer()),
+ namedtype.OptionalNamedType('q', univ.Integer()),
+ namedtype.OptionalNamedType('j', univ.Integer()),
+ namedtype.OptionalNamedType('validationParms', ValidationParms())
+)
+
+
+class ETYPE_INFO_ENTRY(univ.Sequence):
+ pass
+
+
+ETYPE_INFO_ENTRY.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('etype', EncryptionType().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('salt', univ.OctetString().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class ETYPE_INFO(univ.SequenceOf):
+ pass
+
+
+ETYPE_INFO.componentType = ETYPE_INFO_ENTRY()
+
+
+class ETYPE_INFO2_ENTRY(univ.Sequence):
+ pass
+
+
+ETYPE_INFO2_ENTRY.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('etype', EncryptionType().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('salt', KerberosString().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('s2kparams', univ.OctetString().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+)
+
+
+class ETYPE_INFO2(univ.SequenceOf):
+ pass
+
+
+ETYPE_INFO2.componentType = ETYPE_INFO2_ENTRY()
+ETYPE_INFO2.subtypeSpec=constraint.ValueSizeConstraint(1, 256)
+
+
+class EncAPRepPart(univ.Sequence):
+ pass
+
+
+EncAPRepPart.tagSet = univ.Sequence.tagSet.tagExplicitly(tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 27))
+EncAPRepPart.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('ctime', KerberosTime().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('cusec', Microseconds().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('subkey', EncryptionKey().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))),
+ namedtype.OptionalNamedType('seq-number', UInt32().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
+)
+
+
+class LastReq(univ.SequenceOf):
+ pass
+
+
+LastReq.componentType = univ.Sequence(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('lr-type', Int32().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('lr-value', KerberosTime().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+))
+
+
+class METHOD_DATA(univ.SequenceOf):
+ pass
+
+
+METHOD_DATA.componentType = PA_DATA()
+
+
+class TicketFlags(KerberosFlags):
+ pass
+
+
+class EncKDCRepPart(univ.Sequence):
+ pass
+
+
+EncKDCRepPart.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('key', EncryptionKey().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('last-req', LastReq().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('nonce', UInt32().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('key-expiration', KerberosTime().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.NamedType('flags', TicketFlags().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))),
+ namedtype.NamedType('authtime', KerberosTime().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5))),
+ namedtype.OptionalNamedType('starttime', KerberosTime().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6))),
+ namedtype.NamedType('endtime', KerberosTime().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))),
+ namedtype.OptionalNamedType('renew-till', KerberosTime().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 8))),
+ namedtype.NamedType('srealm', Realm().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 9))),
+ namedtype.NamedType('sname', PrincipalName().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 10))),
+ namedtype.OptionalNamedType('caddr', HostAddresses().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 11))),
+ namedtype.OptionalNamedType('encrypted-pa-data', METHOD_DATA().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 12)))
+)
+
+
+class EncASRepPart(EncKDCRepPart):
+ pass
+
+
+EncASRepPart.tagSet = EncKDCRepPart.tagSet.tagExplicitly(tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 25))
+
+
+class KrbCredInfo(univ.Sequence):
+ pass
+
+
+KrbCredInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('key', EncryptionKey().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('prealm', Realm().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('pname', PrincipalName().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))),
+ namedtype.OptionalNamedType('flags', TicketFlags().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.OptionalNamedType('authtime', KerberosTime().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))),
+ namedtype.OptionalNamedType('starttime', KerberosTime().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5))),
+ namedtype.OptionalNamedType('endtime', KerberosTime().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6))),
+ namedtype.OptionalNamedType('renew-till', KerberosTime().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))),
+ namedtype.OptionalNamedType('srealm', Realm().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 8))),
+ namedtype.OptionalNamedType('sname', PrincipalName().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 9))),
+ namedtype.OptionalNamedType('caddr', HostAddresses().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 10)))
+)
+
+
+class EncKrbCredPart(univ.Sequence):
+ pass
+
+
+EncKrbCredPart.tagSet = univ.Sequence.tagSet.tagExplicitly(tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 29))
+EncKrbCredPart.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('ticket-info', univ.SequenceOf(componentType=KrbCredInfo()).subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('nonce', UInt32().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('timestamp', KerberosTime().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('usec', Microseconds().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.OptionalNamedType('s-address', HostAddress().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4))),
+ namedtype.OptionalNamedType('r-address', HostAddress().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5)))
+)
+
+
+class EncKrbPrivPart(univ.Sequence):
+ pass
+
+
+EncKrbPrivPart.tagSet = univ.Sequence.tagSet.tagExplicitly(tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 28))
+EncKrbPrivPart.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('user-data', univ.OctetString().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('timestamp', KerberosTime().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('usec', Microseconds().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('seq-number', UInt32().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.NamedType('s-address', HostAddress().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4))),
+ namedtype.OptionalNamedType('r-address', HostAddress().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5)))
+)
+
+
+class EncTGSRepPart(EncKDCRepPart):
+ pass
+
+
+EncTGSRepPart.tagSet = EncKDCRepPart.tagSet.tagExplicitly(tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 26))
+
+
+class TransitedEncoding(univ.Sequence):
+ pass
+
+
+TransitedEncoding.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('tr-type', Int32().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('contents', univ.OctetString().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class EncTicketPart(univ.Sequence):
+ pass
+
+
+EncTicketPart.tagSet = univ.Sequence.tagSet.tagExplicitly(tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 3))
+EncTicketPart.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('flags', TicketFlags().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('key', EncryptionKey().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.NamedType('crealm', Realm().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.NamedType('cname', PrincipalName().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))),
+ namedtype.NamedType('transited', TransitedEncoding().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4))),
+ namedtype.NamedType('authtime', KerberosTime().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5))),
+ namedtype.OptionalNamedType('starttime', KerberosTime().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6))),
+ namedtype.NamedType('endtime', KerberosTime().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))),
+ namedtype.OptionalNamedType('renew-till', KerberosTime().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 8))),
+ namedtype.OptionalNamedType('caddr', HostAddresses().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 9))),
+ namedtype.OptionalNamedType('authorization-data', AuthorizationData().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 10)))
+)
+
+
+class EncapsulatedContentInfo(univ.Sequence):
+ pass
+
+
+EncapsulatedContentInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('eContentType', ContentType()),
+ namedtype.OptionalNamedType('eContent', univ.OctetString().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class EncryptedContent(univ.OctetString):
+ pass
+
+
+class EncryptedContentInfo(univ.Sequence):
+ pass
+
+
+EncryptedContentInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('contentType', ContentType()),
+ namedtype.NamedType('contentEncryptionAlgorithm', ContentEncryptionAlgorithmIdentifier()),
+ namedtype.OptionalNamedType('encryptedContent', EncryptedContent().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class EncryptedKey(univ.OctetString):
+ pass
+
+
+class EncryptionTypeValues(univ.Integer):
+ pass
+
+
+EncryptionTypeValues.namedValues = namedval.NamedValues(
+ ('kRB5-ENCTYPE-NULL', 0),
+ ('kRB5-ENCTYPE-DES-CBC-CRC', 1),
+ ('kRB5-ENCTYPE-DES-CBC-MD4', 2),
+ ('kRB5-ENCTYPE-DES-CBC-MD5', 3),
+ ('kRB5-ENCTYPE-DES3-CBC-MD5', 5),
+ ('kRB5-ENCTYPE-OLD-DES3-CBC-SHA1', 7),
+ ('kRB5-ENCTYPE-SIGN-DSA-GENERATE', 8),
+ ('kRB5-ENCTYPE-ENCRYPT-RSA-PRIV', 9),
+ ('kRB5-ENCTYPE-ENCRYPT-RSA-PUB', 10),
+ ('kRB5-ENCTYPE-DES3-CBC-SHA1', 16),
+ ('kRB5-ENCTYPE-AES128-CTS-HMAC-SHA1-96', 17),
+ ('kRB5-ENCTYPE-AES256-CTS-HMAC-SHA1-96', 18),
+ ('kRB5-ENCTYPE-ARCFOUR-HMAC-MD5', 23),
+ ('kRB5-ENCTYPE-ARCFOUR-HMAC-MD5-56', 24),
+ ('kRB5-ENCTYPE-ENCTYPE-PK-CROSS', 48),
+ ('kRB5-ENCTYPE-ARCFOUR-MD4', -128),
+ ('kRB5-ENCTYPE-ARCFOUR-HMAC-OLD', -133),
+ ('kRB5-ENCTYPE-ARCFOUR-HMAC-OLD-EXP', -135),
+ ('kRB5-ENCTYPE-DUMMY', -1111)
+)
+
+
+class EncryptionTypeSequence(univ.Sequence):
+ pass
+
+
+EncryptionTypeSequence.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('dummy', EncryptionTypeValues().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class OtherRevocationInfoFormat(univ.Sequence):
+ pass
+
+
+OtherRevocationInfoFormat.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('otherRevInfoFormat', univ.ObjectIdentifier()),
+ namedtype.NamedType('otherRevInfo', univ.Any())
+)
+
+
+class RevocationInfoChoice(univ.Choice):
+ pass
+
+
+RevocationInfoChoice.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('crl', CertificateList()),
+ namedtype.NamedType('other', OtherRevocationInfoFormat().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+)
+
+
+class RevocationInfoChoices(univ.SetOf):
+ pass
+
+
+RevocationInfoChoices.componentType = RevocationInfoChoice()
+
+
+class OriginatorInfo(univ.Sequence):
+ pass
+
+
+OriginatorInfo.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('certs', CertificateSet().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('crls', RevocationInfoChoices().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class OtherKeyAttribute(univ.Sequence):
+ pass
+
+
+OtherKeyAttribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('keyAttrId', univ.ObjectIdentifier()),
+ namedtype.OptionalNamedType('keyAttr', univ.Any())
+)
+
+
+class KEKIdentifier(univ.Sequence):
+ pass
+
+
+KEKIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('keyIdentifier', univ.OctetString()),
+ namedtype.OptionalNamedType('date', useful.GeneralizedTime()),
+ namedtype.OptionalNamedType('other', OtherKeyAttribute())
+)
+
+
+class KeyEncryptionAlgorithmIdentifier(AlgorithmIdentifier):
+ pass
+
+
+class KEKRecipientInfo(univ.Sequence):
+ pass
+
+
+KEKRecipientInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.NamedType('kekid', KEKIdentifier()),
+ namedtype.NamedType('keyEncryptionAlgorithm', KeyEncryptionAlgorithmIdentifier()),
+ namedtype.NamedType('encryptedKey', EncryptedKey())
+)
+
+
+class IssuerAndSerialNumber(univ.Sequence):
+ pass
+
+
+IssuerAndSerialNumber.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuer', Name()),
+ namedtype.NamedType('serialNumber', CertificateSerialNumber())
+)
+
+
+class OriginatorPublicKey(univ.Sequence):
+ pass
+
+
+OriginatorPublicKey.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('algorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('publicKey', univ.BitString())
+)
+
+
+class SubjectKeyIdentifier(univ.OctetString):
+ pass
+
+
+class OriginatorIdentifierOrKey(univ.Choice):
+ pass
+
+
+OriginatorIdentifierOrKey.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()),
+ namedtype.NamedType('subjectKeyIdentifier', SubjectKeyIdentifier().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('originatorKey', OriginatorPublicKey().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+)
+
+
+class RecipientKeyIdentifier(univ.Sequence):
+ pass
+
+
+RecipientKeyIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('subjectKeyIdentifier', SubjectKeyIdentifier()),
+ namedtype.OptionalNamedType('date', useful.GeneralizedTime()),
+ namedtype.OptionalNamedType('other', OtherKeyAttribute())
+)
+
+
+class KeyAgreeRecipientIdentifier(univ.Choice):
+ pass
+
+
+KeyAgreeRecipientIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()),
+ namedtype.NamedType('rKeyId', RecipientKeyIdentifier().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
+)
+
+
+class RecipientEncryptedKey(univ.Sequence):
+ pass
+
+
+RecipientEncryptedKey.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('rid', KeyAgreeRecipientIdentifier()),
+ namedtype.NamedType('encryptedKey', EncryptedKey())
+)
+
+
+class RecipientEncryptedKeys(univ.SequenceOf):
+ pass
+
+
+RecipientEncryptedKeys.componentType = RecipientEncryptedKey()
+
+
+class UserKeyingMaterial(univ.OctetString):
+ pass
+
+
+class KeyAgreeRecipientInfo(univ.Sequence):
+ pass
+
+
+KeyAgreeRecipientInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.NamedType('originator', OriginatorIdentifierOrKey().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('ukm', UserKeyingMaterial().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('keyEncryptionAlgorithm', KeyEncryptionAlgorithmIdentifier()),
+ namedtype.NamedType('recipientEncryptedKeys', RecipientEncryptedKeys())
+)
+
+
+class RecipientIdentifier(univ.Choice):
+ pass
+
+
+RecipientIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()),
+ namedtype.NamedType('subjectKeyIdentifier', SubjectKeyIdentifier().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class KeyTransRecipientInfo(univ.Sequence):
+ pass
+
+
+KeyTransRecipientInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.NamedType('rid', RecipientIdentifier()),
+ namedtype.NamedType('keyEncryptionAlgorithm', KeyEncryptionAlgorithmIdentifier()),
+ namedtype.NamedType('encryptedKey', EncryptedKey())
+)
+
+
+class OtherRecipientInfo(univ.Sequence):
+ pass
+
+
+OtherRecipientInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('oriType', univ.ObjectIdentifier()),
+ namedtype.NamedType('oriValue', univ.Any())
+)
+
+
+class KeyDerivationAlgorithmIdentifier(AlgorithmIdentifier):
+ pass
+
+
+class PasswordRecipientInfo(univ.Sequence):
+ pass
+
+
+PasswordRecipientInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.OptionalNamedType('keyDerivationAlgorithm', KeyDerivationAlgorithmIdentifier().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('keyEncryptionAlgorithm', KeyEncryptionAlgorithmIdentifier()),
+ namedtype.NamedType('encryptedKey', EncryptedKey())
+)
+
+
+class RecipientInfo(univ.Choice):
+ pass
+
+
+RecipientInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('ktri', KeyTransRecipientInfo()),
+ namedtype.NamedType('kari', KeyAgreeRecipientInfo().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.NamedType('kekri', KEKRecipientInfo().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))),
+ namedtype.NamedType('pwri', PasswordRecipientInfo().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))),
+ namedtype.NamedType('ori', OtherRecipientInfo().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4)))
+)
+
+
+class RecipientInfos(univ.SetOf):
+ pass
+
+
+RecipientInfos.componentType = RecipientInfo()
+RecipientInfos.subtypeSpec=constraint.ValueSizeConstraint(1, 256)
+
+
+class UnprotectedAttributes(univ.SetOf):
+ pass
+
+
+UnprotectedAttributes.componentType = Attribute()
+UnprotectedAttributes.subtypeSpec=constraint.ValueSizeConstraint(1, 256)
+
+
+class EnvelopedData(univ.Sequence):
+ pass
+
+
+EnvelopedData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.OptionalNamedType('originatorInfo', OriginatorInfo().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('recipientInfos', RecipientInfos()),
+ namedtype.NamedType('encryptedContentInfo', EncryptedContentInfo()),
+ namedtype.OptionalNamedType('unprotectedAttrs', UnprotectedAttributes().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class ExtendedCertificateOrCertificate(univ.Choice):
+ pass
+
+
+ExtendedCertificateOrCertificate.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certificate', Certificate()),
+ namedtype.NamedType('extendedCertificate', ExtendedCertificate().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
+)
+
+
+class ExtendedCertificatesAndCertificates(univ.SetOf):
+ pass
+
+
+ExtendedCertificatesAndCertificates.componentType = ExtendedCertificateOrCertificate()
+
+
+class FastOptions(univ.BitString):
+ pass
+
+
+FastOptions.namedValues = namedval.NamedValues(
+ ('reserved', 0),
+ ('hide-client-names', 1),
+ ('kdc-follow-referrals', 16)
+)
+
+
+class KDCDHKeyInfo(univ.Sequence):
+ pass
+
+
+KDCDHKeyInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('subjectPublicKey', univ.BitString().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('nonce', univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, 4294967295)).subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('dhKeyExpiration', KerberosTime().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+)
+
+
+class KDCOptionsValues(univ.BitString):
+ pass
+
+
+KDCOptionsValues.namedValues = namedval.NamedValues(
+ ('reserved', 0),
+ ('forwardable', 1),
+ ('forwarded', 2),
+ ('proxiable', 3),
+ ('proxy', 4),
+ ('allow-postdate', 5),
+ ('postdated', 6),
+ ('unused7', 7),
+ ('renewable', 8),
+ ('unused9', 9),
+ ('unused10', 10),
+ ('opt-hardware-auth', 11),
+ ('unused12', 12),
+ ('unused13', 13),
+ ('cname-in-addl-tkt', 14),
+ ('canonicalize', 15),
+ ('disable-transited-check', 26),
+ ('renewable-ok', 27),
+ ('enc-tkt-in-skey', 28),
+ ('renew', 30),
+ ('validate', 31)
+)
+
+
+class KDCOptionsSequence(univ.Sequence):
+ pass
+
+
+KDCOptionsSequence.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('dummy', KDCOptionsValues().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class KERB_AD_RESTRICTION_ENTRY(univ.Sequence):
+ pass
+
+
+KERB_AD_RESTRICTION_ENTRY.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('restriction-type', Int32().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('restriction', univ.OctetString().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class KerbErrorDataType(univ.Integer):
+ pass
+
+
+class KERB_ERROR_DATA(univ.Sequence):
+ pass
+
+
+KERB_ERROR_DATA.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('data-type', KerbErrorDataType().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('data-value', univ.OctetString().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+)
+
+
+class KERB_KEY_LIST_REP(univ.SequenceOf):
+ pass
+
+
+KERB_KEY_LIST_REP.componentType = EncryptionKey()
+
+
+class KERB_KEY_LIST_REQ(univ.SequenceOf):
+ pass
+
+
+KERB_KEY_LIST_REQ.componentType = EncryptionType()
+
+
+class KERB_LOCAL(univ.OctetString):
+ pass
+
+
+class KERB_PA_PAC_REQUEST(univ.Sequence):
+ pass
+
+
+KERB_PA_PAC_REQUEST.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('include-pac', univ.Boolean().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class KRB_CRED(univ.Sequence):
+ pass
+
+
+KRB_CRED.tagSet = univ.Sequence.tagSet.tagExplicitly(tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 22))
+KRB_CRED.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('pvno', univ.Integer().subtype(subtypeSpec=constraint.SingleValueConstraint(5)).subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('msg-type', univ.Integer().subtype(subtypeSpec=constraint.SingleValueConstraint(22)).subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('tickets', univ.SequenceOf(componentType=Ticket()).subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.NamedType('enc-part', EncryptedData().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3)))
+)
+
+
+class KRB_ERROR(univ.Sequence):
+ pass
+
+
+KRB_ERROR.tagSet = univ.Sequence.tagSet.tagExplicitly(tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 30))
+KRB_ERROR.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('pvno', univ.Integer().subtype(subtypeSpec=constraint.SingleValueConstraint(5)).subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('msg-type', univ.Integer().subtype(subtypeSpec=constraint.SingleValueConstraint(30)).subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('ctime', KerberosTime().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('cusec', Microseconds().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.NamedType('stime', KerberosTime().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))),
+ namedtype.NamedType('susec', Microseconds().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5))),
+ namedtype.NamedType('error-code', Int32().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6))),
+ namedtype.OptionalNamedType('crealm', Realm().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))),
+ namedtype.OptionalNamedType('cname', PrincipalName().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 8))),
+ namedtype.NamedType('realm', Realm().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 9))),
+ namedtype.NamedType('sname', PrincipalName().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 10))),
+ namedtype.OptionalNamedType('e-text', KerberosString().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 11))),
+ namedtype.OptionalNamedType('e-data', univ.OctetString().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 12)))
+)
+
+
+class KRB_PRIV(univ.Sequence):
+ pass
+
+
+KRB_PRIV.tagSet = univ.Sequence.tagSet.tagExplicitly(tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 21))
+KRB_PRIV.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('pvno', univ.Integer().subtype(subtypeSpec=constraint.SingleValueConstraint(5)).subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('msg-type', univ.Integer().subtype(subtypeSpec=constraint.SingleValueConstraint(21)).subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('enc-part', EncryptedData().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3)))
+)
+
+
+class KRB_SAFE_BODY(univ.Sequence):
+ pass
+
+
+KRB_SAFE_BODY.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('user-data', univ.OctetString().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('timestamp', KerberosTime().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('usec', Microseconds().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('seq-number', UInt32().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.NamedType('s-address', HostAddress().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4))),
+ namedtype.OptionalNamedType('r-address', HostAddress().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5)))
+)
+
+
+class KRB_SAFE(univ.Sequence):
+ pass
+
+
+KRB_SAFE.tagSet = univ.Sequence.tagSet.tagExplicitly(tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 20))
+KRB_SAFE.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('pvno', univ.Integer().subtype(subtypeSpec=constraint.SingleValueConstraint(5)).subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('msg-type', univ.Integer().subtype(subtypeSpec=constraint.SingleValueConstraint(20)).subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('safe-body', KRB_SAFE_BODY().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))),
+ namedtype.NamedType('cksum', Checksum().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3)))
+)
+
+
+class KRB5PrincipalName(univ.Sequence):
+ pass
+
+
+KRB5PrincipalName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('realm', Realm().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('principalName', PrincipalName().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+)
+
+
+class KerbErrorDataTypeValues(univ.Integer):
+ pass
+
+
+KerbErrorDataTypeValues.namedValues = namedval.NamedValues(
+ ('kERB-AP-ERR-TYPE-SKEW-RECOVERY', 2),
+ ('kERB-ERR-TYPE-EXTENDED', 3)
+)
+
+
+class KerbErrorDataTypeSequence(univ.Sequence):
+ pass
+
+
+KerbErrorDataTypeSequence.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('dummy', KerbErrorDataTypeValues().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class KrbFastArmor(univ.Sequence):
+ pass
+
+
+KrbFastArmor.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('armor-type', Int32().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('armor-value', univ.OctetString().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class KrbFastArmoredRep(univ.Sequence):
+ pass
+
+
+KrbFastArmoredRep.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('enc-fast-rep', EncryptedData().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
+)
+
+
+class KrbFastArmoredReq(univ.Sequence):
+ pass
+
+
+KrbFastArmoredReq.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('armor', KrbFastArmor().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('req-checksum', Checksum().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.NamedType('enc-fast-req', EncryptedData().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)))
+)
+
+
+class KrbFastFinished(univ.Sequence):
+ pass
+
+
+KrbFastFinished.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('timestamp', KerberosTime().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('usec', Int32().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('crealm', Realm().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.NamedType('cname', PrincipalName().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))),
+ namedtype.NamedType('ticket-checksum', Checksum().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4)))
+)
+
+
+class KrbFastReq(univ.Sequence):
+ pass
+
+
+KrbFastReq.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('fast-options', FastOptions().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('padata', univ.SequenceOf(componentType=PA_DATA()).subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('req-body', KDC_REQ_BODY().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)))
+)
+
+
+class KrbFastResponse(univ.Sequence):
+ pass
+
+
+KrbFastResponse.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('padata', univ.SequenceOf(componentType=PA_DATA()).subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('strengthen-key', EncryptionKey().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.OptionalNamedType('finished', KrbFastFinished().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))),
+ namedtype.NamedType('nonce', UInt32().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
+)
+
+
+class MS_UPN_SAN(char.UTF8String):
+ pass
+
+
+class MessageDigest(univ.OctetString):
+ pass
+
+
+class MessageTypeValues(univ.Integer):
+ pass
+
+
+MessageTypeValues.namedValues = namedval.NamedValues(
+ ('krb-as-req', 10),
+ ('krb-as-rep', 11),
+ ('krb-tgs-req', 12),
+ ('krb-tgs-rep', 13),
+ ('krb-ap-req', 14),
+ ('krb-ap-rep', 15),
+ ('krb-safe', 20),
+ ('krb-priv', 21),
+ ('krb-cred', 22),
+ ('krb-error', 30)
+)
+
+
+class MessageTypeSequence(univ.Sequence):
+ pass
+
+
+MessageTypeSequence.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('dummy', MessageTypeValues().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class NameTypeValues(univ.Integer):
+ pass
+
+
+NameTypeValues.namedValues = namedval.NamedValues(
+ ('kRB5-NT-UNKNOWN', 0),
+ ('kRB5-NT-PRINCIPAL', 1),
+ ('kRB5-NT-SRV-INST', 2),
+ ('kRB5-NT-SRV-HST', 3),
+ ('kRB5-NT-SRV-XHST', 4),
+ ('kRB5-NT-UID', 5),
+ ('kRB5-NT-X500-PRINCIPAL', 6),
+ ('kRB5-NT-SMTP-NAME', 7),
+ ('kRB5-NT-ENTERPRISE-PRINCIPAL', 10),
+ ('kRB5-NT-WELLKNOWN', 11),
+ ('kRB5-NT-ENT-PRINCIPAL-AND-ID', -130),
+ ('kRB5-NT-MS-PRINCIPAL', -128),
+ ('kRB5-NT-MS-PRINCIPAL-AND-ID', -129)
+)
+
+
+class NameTypeSequence(univ.Sequence):
+ pass
+
+
+NameTypeSequence.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('dummy', NameTypeValues().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class PA_ENC_TIMESTAMP(EncryptedData):
+ pass
+
+
+class PA_ENC_TS_ENC(univ.Sequence):
+ pass
+
+
+PA_ENC_TS_ENC.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('patimestamp', KerberosTime().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('pausec', Microseconds().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class PA_FX_FAST_REPLY(univ.Choice):
+ pass
+
+
+PA_FX_FAST_REPLY.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('armored-data', KrbFastArmoredRep().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
+)
+
+
+class PA_FX_FAST_REQUEST(univ.Choice):
+ pass
+
+
+PA_FX_FAST_REQUEST.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('armored-data', KrbFastArmoredReq().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
+)
+
+
+class PACOptionFlags(KerberosFlags):
+ pass
+
+
+class PA_PAC_OPTIONS(univ.Sequence):
+ pass
+
+
+PA_PAC_OPTIONS.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('options', PACOptionFlags().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class PA_PK_AS_REP(univ.Choice):
+ pass
+
+
+PA_PK_AS_REP.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('dhInfo', DHRepInfo().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('encKeyPack', univ.OctetString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class PA_PK_AS_REP_Win2k(univ.Choice):
+ pass
+
+
+PA_PK_AS_REP_Win2k.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('dhSignedData', univ.OctetString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('encKeyPack', univ.OctetString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class PA_PK_AS_REQ(univ.Sequence):
+ pass
+
+
+PA_PK_AS_REQ.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('signedAuthPack', univ.OctetString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('trustedCertifiers', univ.SequenceOf(componentType=ExternalPrincipalIdentifier()).subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('kdcPkId', univ.OctetString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+)
+
+
+class TrustedCA_Win2k(univ.Choice):
+ pass
+
+
+TrustedCA_Win2k.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('caName', univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('issuerAndSerial', IssuerAndSerialNumber().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)))
+)
+
+
+class PA_PK_AS_REQ_Win2k(univ.Sequence):
+ pass
+
+
+PA_PK_AS_REQ_Win2k.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('signedAuthPack', univ.OctetString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('trustedCertifiers', univ.SequenceOf(componentType=TrustedCA_Win2k()).subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('kdcCert', univ.OctetString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.OptionalNamedType('encryptionCert', univ.OctetString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4)))
+)
+
+
+class PA_S4U2Self(univ.Sequence):
+ pass
+
+
+PA_S4U2Self.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('name', PrincipalName().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('realm', Realm().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('cksum', Checksum().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))),
+ namedtype.NamedType('auth', KerberosString().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
+)
+
+
+class PA_SUPPORTED_ENCTYPES(Int32):
+ pass
+
+
+class PACOptionFlagsValues(univ.BitString):
+ pass
+
+
+PACOptionFlagsValues.namedValues = namedval.NamedValues(
+ ('claims', 0),
+ ('branch-aware', 1),
+ ('forward-to-full-dc', 2),
+ ('resource-based-constrained-delegation', 3)
+)
+
+
+class PACOptionFlagsSequence(univ.Sequence):
+ pass
+
+
+PACOptionFlagsSequence.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('dummy', PACOptionFlagsValues().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class PADataTypeValues(univ.Integer):
+ pass
+
+
+PADataTypeValues.namedValues = namedval.NamedValues(
+ ('kRB5-PADATA-NONE', 0),
+ ('kRB5-PADATA-KDC-REQ', 1),
+ ('kRB5-PADATA-ENC-TIMESTAMP', 2),
+ ('kRB5-PADATA-PW-SALT', 3),
+ ('kRB5-PADATA-ENC-UNIX-TIME', 5),
+ ('kRB5-PADATA-SANDIA-SECUREID', 6),
+ ('kRB5-PADATA-SESAME', 7),
+ ('kRB5-PADATA-OSF-DCE', 8),
+ ('kRB5-PADATA-CYBERSAFE-SECUREID', 9),
+ ('kRB5-PADATA-AFS3-SALT', 10),
+ ('kRB5-PADATA-ETYPE-INFO', 11),
+ ('kRB5-PADATA-SAM-CHALLENGE', 12),
+ ('kRB5-PADATA-SAM-RESPONSE', 13),
+ ('kRB5-PADATA-PK-AS-REQ-19', 14),
+ ('kRB5-PADATA-PK-AS-REP-19', 15),
+ ('kRB5-PADATA-PK-AS-REQ', 16),
+ ('kRB5-PADATA-PK-AS-REP', 17),
+ ('kRB5-PADATA-PA-PK-OCSP-RESPONSE', 18),
+ ('kRB5-PADATA-ETYPE-INFO2', 19),
+ ('kRB5-PADATA-SVR-REFERRAL-INFO', 20),
+ ('kRB5-PADATA-SAM-REDIRECT', 21),
+ ('kRB5-PADATA-GET-FROM-TYPED-DATA', 22),
+ ('kRB5-PADATA-SAM-ETYPE-INFO', 23),
+ ('kRB5-PADATA-SERVER-REFERRAL', 25),
+ ('kRB5-PADATA-ALT-PRINC', 24),
+ ('kRB5-PADATA-SAM-CHALLENGE2', 30),
+ ('kRB5-PADATA-SAM-RESPONSE2', 31),
+ ('kRB5-PA-EXTRA-TGT', 41),
+ ('kRB5-PADATA-TD-KRB-PRINCIPAL', 102),
+ ('kRB5-PADATA-PK-TD-TRUSTED-CERTIFIERS', 104),
+ ('kRB5-PADATA-PK-TD-CERTIFICATE-INDEX', 105),
+ ('kRB5-PADATA-TD-APP-DEFINED-ERROR', 106),
+ ('kRB5-PADATA-TD-REQ-NONCE', 107),
+ ('kRB5-PADATA-TD-REQ-SEQ', 108),
+ ('kRB5-PADATA-PA-PAC-REQUEST', 128),
+ ('kRB5-PADATA-FOR-USER', 129),
+ ('kRB5-PADATA-FOR-X509-USER', 130),
+ ('kRB5-PADATA-FOR-CHECK-DUPS', 131),
+ ('kRB5-PADATA-AS-CHECKSUM', 132),
+ ('kRB5-PADATA-FX-COOKIE', 133),
+ ('kRB5-PADATA-AUTHENTICATION-SET', 134),
+ ('kRB5-PADATA-AUTH-SET-SELECTED', 135),
+ ('kRB5-PADATA-FX-FAST', 136),
+ ('kRB5-PADATA-FX-ERROR', 137),
+ ('kRB5-PADATA-ENCRYPTED-CHALLENGE', 138),
+ ('kRB5-PADATA-OTP-CHALLENGE', 141),
+ ('kRB5-PADATA-OTP-REQUEST', 142),
+ ('kBB5-PADATA-OTP-CONFIRM', 143),
+ ('kRB5-PADATA-OTP-PIN-CHANGE', 144),
+ ('kRB5-PADATA-EPAK-AS-REQ', 145),
+ ('kRB5-PADATA-EPAK-AS-REP', 146),
+ ('kRB5-PADATA-PKINIT-KX', 147),
+ ('kRB5-PADATA-PKU2U-NAME', 148),
+ ('kRB5-PADATA-REQ-ENC-PA-REP', 149),
+ ('kRB5-PADATA-AS-FRESHNESS', 150),
+ ('kRB5-PADATA-SUPPORTED-ETYPES', 165),
+ ('kRB5-PADATA-PAC-OPTIONS', 167),
+ ('kRB5-PADATA-GSS', 655)
+)
+
+
+class PADataTypeSequence(univ.Sequence):
+ pass
+
+
+PADataTypeSequence.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('dummy', PADataTypeValues().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class ReplyKeyPack(univ.Sequence):
+ pass
+
+
+ReplyKeyPack.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('replyKey', EncryptionKey().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('asChecksum', Checksum().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+)
+
+
+class ReplyKeyPack_Win2k(univ.Sequence):
+ pass
+
+
+ReplyKeyPack_Win2k.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('replyKey', EncryptionKey().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('nonce', univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(-2147483648, 2147483647)).subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class SignatureValue(univ.OctetString):
+ pass
+
+
+class SignedAttributes(univ.SetOf):
+ pass
+
+
+SignedAttributes.componentType = Attribute()
+SignedAttributes.subtypeSpec=constraint.ValueSizeConstraint(1, 256)
+
+
+class SignerIdentifier(univ.Choice):
+ pass
+
+
+SignerIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()),
+ namedtype.NamedType('subjectKeyIdentifier', SubjectKeyIdentifier().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class UnsignedAttributes(univ.SetOf):
+ pass
+
+
+UnsignedAttributes.componentType = Attribute()
+UnsignedAttributes.subtypeSpec=constraint.ValueSizeConstraint(1, 256)
+
+
+class SignerInfo(univ.Sequence):
+ pass
+
+
+SignerInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.NamedType('sid', SignerIdentifier()),
+ namedtype.NamedType('digestAlgorithm', DigestAlgorithmIdentifier()),
+ namedtype.OptionalNamedType('signedAttrs', SignedAttributes().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('signatureAlgorithm', SignatureAlgorithmIdentifier()),
+ namedtype.NamedType('signature', SignatureValue()),
+ namedtype.OptionalNamedType('unsignedAttrs', UnsignedAttributes().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class SignerInfos(univ.SetOf):
+ pass
+
+
+SignerInfos.componentType = SignerInfo()
+
+
+class SignedData(univ.Sequence):
+ pass
+
+
+SignedData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.NamedType('digestAlgorithms', DigestAlgorithmIdentifiers()),
+ namedtype.NamedType('encapContentInfo', EncapsulatedContentInfo()),
+ namedtype.OptionalNamedType('certificates', CertificateSet().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('crls', RevocationInfoChoices().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('signerInfos', SignerInfos())
+)
+
+
+class Version_RFC2315(univ.Integer):
+ pass
+
+
+class SignedData_RFC2315(univ.Sequence):
+ pass
+
+
+SignedData_RFC2315.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', Version_RFC2315()),
+ namedtype.NamedType('digestAlgorithms', DigestAlgorithmIdentifiers()),
+ namedtype.NamedType('contentInfo', ContentInfo()),
+ namedtype.OptionalNamedType('certificates', CertificateSet().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('crls', RevocationInfoChoices().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('signerInfos', SignerInfos())
+)
+
+
+class SubjectAltName(GeneralNames):
+ pass
+
+
+class TD_DH_PARAMETERS(univ.SequenceOf):
+ pass
+
+
+TD_DH_PARAMETERS.componentType = AlgorithmIdentifier()
+
+
+class TD_INVALID_CERTIFICATES(univ.SequenceOf):
+ pass
+
+
+TD_INVALID_CERTIFICATES.componentType = ExternalPrincipalIdentifier()
+
+
+class TD_TRUSTED_CERTIFIERS(univ.SequenceOf):
+ pass
+
+
+TD_TRUSTED_CERTIFIERS.componentType = ExternalPrincipalIdentifier()
+
+
+class TGS_REP(KDC_REP):
+ pass
+
+
+TGS_REP.tagSet = KDC_REP.tagSet.tagExplicitly(tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 13))
+
+
+class TGS_REQ(KDC_REQ):
+ pass
+
+
+TGS_REQ.tagSet = KDC_REQ.tagSet.tagExplicitly(tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 12))
+
+
+class TYPED_DATA(univ.SequenceOf):
+ pass
+
+
+TYPED_DATA.componentType = univ.Sequence(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('data-type', Int32().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('data-value', univ.OctetString().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+))
+
+TYPED_DATA.subtypeSpec=constraint.ValueSizeConstraint(1, 256)
+
+
+class TicketFlagsValues(univ.BitString):
+ pass
+
+
+TicketFlagsValues.namedValues = namedval.NamedValues(
+ ('reserved', 0),
+ ('forwardable', 1),
+ ('forwarded', 2),
+ ('proxiable', 3),
+ ('proxy', 4),
+ ('may-postdate', 5),
+ ('postdated', 6),
+ ('invalid', 7),
+ ('renewable', 8),
+ ('initial', 9),
+ ('pre-authent', 10),
+ ('hw-authent', 11),
+ ('transited-policy-checked', 12),
+ ('ok-as-delegate', 13),
+ ('enc-pa-rep', 15)
+)
+
+
+class TicketFlagsSequence(univ.Sequence):
+ pass
+
+
+TicketFlagsSequence.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('dummy', TicketFlagsValues().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+nistAlgorithms = _OID(2, 16, 840, 1, 101, 3, 4)
+
+
+aes = _OID(nistAlgorithms, 1)
+
+
+aes256_CBC_PAD = _OID(aes, 42)
+
+
+rsadsi = _OID(1, 2, 840, 113549)
+
+
+encryptionAlgorithm = _OID(rsadsi, 3)
+
+
+des_EDE3_CBC = _OID(encryptionAlgorithm, 7)
+
+
+dhpublicnumber = _OID(1, 2, 840, 10046, 2, 1)
+
+
+id_ce = _OID(2, 5, 29)
+
+
+id_ce_subjectAltName = _OID(id_ce, 17)
+
+
+id_contentType = _OID(1, 2, 840, 113549, 1, 9, 3)
+
+
+id_data = _OID(1, 2, 840, 113549, 1, 7, 1)
+
+
+id_envelopedData = _OID(1, 2, 840, 113549, 1, 7, 3)
+
+
+id_krb5 = _OID(1, 3, 6, 1, 5, 2)
+
+
+id_messageDigest = _OID(1, 2, 840, 113549, 1, 9, 4)
+
+
+id_pkcs1_sha256WithRSAEncryption = _OID(1, 2, 840, 113549, 1, 1, 11)
+
+
+id_pkinit = _OID(1, 3, 6, 1, 5, 2, 3)
+
+
+id_pkinit_DHKeyData = _OID(id_pkinit, 2)
+
+
+id_pkinit_authData = _OID(id_pkinit, 1)
+
+
+id_pkinit_ms_san = _OID(1, 3, 6, 1, 4, 1, 311, 20, 2, 3)
+
+
+id_pkinit_rkeyData = _OID(id_pkinit, 3)
+
+
+id_sha1 = _OID(1, 3, 14, 3, 2, 26)
+
+
+id_sha512 = _OID(2, 16, 840, 1, 101, 3, 4, 2, 3)
+
+
+id_signedData = _OID(1, 2, 840, 113549, 1, 7, 2)
+
+
+kdc_authentication = _OID(id_pkinit, 5)
+
+
+md2 = _OID(1, 2, 840, 113549, 2, 2)
+
+
+md5 = _OID(1, 2, 840, 113549, 2, 5)
+
+
+rsaEncryption = _OID(1, 2, 840, 113549, 1, 1, 1)
+
+
+sha1WithRSAEncryption = _OID(1, 2, 840, 113549, 1, 1, 5)
+
+
+smartcard_logon = _OID(1, 3, 6, 1, 4, 1, 311, 20, 2, 2)
+
+
+szOID_NTDS_CA_SECURITY_EXT = _OID(1, 3, 6, 1, 4, 1, 311, 25, 2)
+
+
+szOID_NTDS_OBJECTSID = _OID(1, 3, 6, 1, 4, 1, 311, 25, 2, 1)
+
+
+ub_common_name = univ.Integer(64)
+
+
+ub_common_name_length = univ.Integer(64)
+
+
+ub_e163_4_number_length = univ.Integer(15)
+
+
+ub_e163_4_sub_address_length = univ.Integer(40)
+
+
+ub_emailaddress_length = univ.Integer(255)
+
+
+ub_integer_options = univ.Integer(256)
+
+
+ub_locality_name = univ.Integer(128)
+
+
+ub_match = univ.Integer(128)
+
+
+ub_name = univ.Integer(32768)
+
+
+ub_organization_name = univ.Integer(64)
+
+
+ub_organizational_unit_name = univ.Integer(64)
+
+
+ub_pds_name_length = univ.Integer(16)
+
+
+ub_pds_parameter_length = univ.Integer(30)
+
+
+ub_pds_physical_address_lines = univ.Integer(6)
+
+
+ub_postal_code_length = univ.Integer(16)
+
+
+ub_pseudonym = univ.Integer(128)
+
+
+ub_serial_number = univ.Integer(64)
+
+
+ub_state_name = univ.Integer(128)
+
+
+ub_title = univ.Integer(64)
+
+
+ub_unformatted_address_length = univ.Integer(180)
+
+
diff --git a/python/samba/tests/krb5/rodc_tests.py b/python/samba/tests/krb5/rodc_tests.py
new file mode 100755
index 0000000..71ef603
--- /dev/null
+++ b/python/samba/tests/krb5/rodc_tests.py
@@ -0,0 +1,77 @@
+#!/usr/bin/env python3
+# Unix SMB/CIFS implementation.
+# Copyright (C) Stefan Metzmacher 2020
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+import os
+
+sys.path.insert(0, "bin/python")
+os.environ["PYTHONUNBUFFERED"] = "1"
+
+from samba.tests.krb5.kdc_base_test import KDCBaseTest
+
+global_asn1_print = False
+global_hexdump = False
+
+
+class RodcKerberosTests(KDCBaseTest):
+
+ def setUp(self):
+ super().setUp()
+ self.do_asn1_print = global_asn1_print
+ self.do_hexdump = global_hexdump
+
+ # Ensure that an RODC correctly issues tickets signed with its krbtgt key
+ # and including the RODCIdentifier.
+ def test_rodc_ticket_signature(self):
+ user_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER,
+ opts={
+ 'allowed_replication': True,
+ 'revealed_to_rodc': True
+ })
+ target_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={
+ 'allowed_replication': True,
+ 'revealed_to_rodc': True
+ })
+
+ krbtgt_creds = self.get_rodc_krbtgt_creds()
+ rodc_key = self.TicketDecryptionKey_from_creds(krbtgt_creds)
+
+ # Get a TGT from the RODC.
+ tgt = self.get_tgt(user_creds, to_rodc=True)
+
+ # Ensure the PAC contains the expected checksums.
+ self.verify_ticket(tgt, rodc_key, service_ticket=False)
+
+ # Get a service ticket from the RODC.
+ service_ticket = self.get_service_ticket(tgt, target_creds,
+ to_rodc=True)
+
+ # Ensure the PAC contains the expected checksums.
+ self.verify_ticket(service_ticket, rodc_key, service_ticket=True,
+ expect_ticket_checksum=True,
+ expect_full_checksum=True)
+
+
+if __name__ == "__main__":
+ global_asn1_print = False
+ global_hexdump = False
+ import unittest
+ unittest.main()
diff --git a/python/samba/tests/krb5/s4u_tests.py b/python/samba/tests/krb5/s4u_tests.py
new file mode 100755
index 0000000..b91c412
--- /dev/null
+++ b/python/samba/tests/krb5/s4u_tests.py
@@ -0,0 +1,1838 @@
+#!/usr/bin/env python3
+# Unix SMB/CIFS implementation.
+# Copyright (C) Stefan Metzmacher 2020
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+import os
+
+sys.path.insert(0, "bin/python")
+os.environ["PYTHONUNBUFFERED"] = "1"
+
+import functools
+import time
+
+from samba import dsdb, ntstatus
+from samba.dcerpc import krb5pac, lsa, security
+
+from samba.tests import env_get_var_value
+from samba.tests.krb5.kcrypto import Cksumtype, Enctype
+from samba.tests.krb5.kdc_base_test import KDCBaseTest
+from samba.tests.krb5.raw_testcase import (
+ RawKerberosTest,
+ RodcPacEncryptionKey,
+ ZeroedChecksumKey
+)
+from samba.tests.krb5.rfc4120_constants import (
+ AD_IF_RELEVANT,
+ AES256_CTS_HMAC_SHA1_96,
+ ARCFOUR_HMAC_MD5,
+ KDC_ERR_BADMATCH,
+ KDC_ERR_BADOPTION,
+ KDC_ERR_BAD_INTEGRITY,
+ KDC_ERR_GENERIC,
+ KDC_ERR_INAPP_CKSUM,
+ KDC_ERR_MODIFIED,
+ KDC_ERR_SUMTYPE_NOSUPP,
+ KDC_ERR_TGT_REVOKED,
+ KU_AS_REP_ENC_PART,
+ KU_PA_ENC_TIMESTAMP,
+ KU_TGS_REP_ENC_PART_SUB_KEY,
+ KU_TGS_REQ_AUTH_DAT_SESSION,
+ KU_TGS_REQ_AUTH_DAT_SUBKEY,
+ NT_PRINCIPAL,
+)
+import samba.tests.krb5.rfc4120_pyasn1 as krb5_asn1
+
+SidType = RawKerberosTest.SidType
+
+global_asn1_print = False
+global_hexdump = False
+
+
+class S4UKerberosTests(KDCBaseTest):
+
+ default_attrs = security.SE_GROUP_DEFAULT_FLAGS
+
+ def setUp(self):
+ super().setUp()
+ self.do_asn1_print = global_asn1_print
+ self.do_hexdump = global_hexdump
+
+ def _test_s4u2self(self, pa_s4u2self_ctype=None):
+ service_creds = self.get_service_creds()
+ service = service_creds.get_username()
+ realm = service_creds.get_realm()
+
+ cname = self.PrincipalName_create(name_type=1, names=[service])
+ sname = self.PrincipalName_create(name_type=2, names=["krbtgt", realm])
+
+ till = self.get_KerberosTime(offset=36000)
+
+ kdc_options = krb5_asn1.KDCOptions('forwardable')
+ padata = None
+
+ etypes = (18, 17, 23)
+
+ req = self.AS_REQ_create(padata=padata,
+ kdc_options=str(kdc_options),
+ cname=cname,
+ realm=realm,
+ sname=sname,
+ from_time=None,
+ till_time=till,
+ renew_time=None,
+ nonce=0x7fffffff,
+ etypes=etypes,
+ addresses=None,
+ additional_tickets=None)
+ rep = self.send_recv_transaction(req)
+ self.assertIsNotNone(rep)
+
+ self.assertEqual(rep['msg-type'], 30)
+ self.assertEqual(rep['error-code'], 25)
+ rep_padata = self.der_decode(
+ rep['e-data'], asn1Spec=krb5_asn1.METHOD_DATA())
+
+ for pa in rep_padata:
+ if pa['padata-type'] == 19:
+ etype_info2 = pa['padata-value']
+ break
+
+ etype_info2 = self.der_decode(
+ etype_info2, asn1Spec=krb5_asn1.ETYPE_INFO2())
+
+ key = self.PasswordKey_from_etype_info2(service_creds, etype_info2[0])
+
+ (patime, pausec) = self.get_KerberosTimeWithUsec()
+ pa_ts = self.PA_ENC_TS_ENC_create(patime, pausec)
+ pa_ts = self.der_encode(pa_ts, asn1Spec=krb5_asn1.PA_ENC_TS_ENC())
+
+ pa_ts = self.EncryptedData_create(key, KU_PA_ENC_TIMESTAMP, pa_ts)
+ pa_ts = self.der_encode(pa_ts, asn1Spec=krb5_asn1.EncryptedData())
+
+ pa_ts = self.PA_DATA_create(2, pa_ts)
+
+ kdc_options = krb5_asn1.KDCOptions('forwardable')
+ padata = [pa_ts]
+
+ req = self.AS_REQ_create(padata=padata,
+ kdc_options=str(kdc_options),
+ cname=cname,
+ realm=realm,
+ sname=sname,
+ from_time=None,
+ till_time=till,
+ renew_time=None,
+ nonce=0x7fffffff,
+ etypes=etypes,
+ addresses=None,
+ additional_tickets=None)
+ rep = self.send_recv_transaction(req)
+ self.assertIsNotNone(rep)
+
+ msg_type = rep['msg-type']
+ self.assertEqual(msg_type, 11)
+
+ enc_part2 = key.decrypt(KU_AS_REP_ENC_PART, rep['enc-part']['cipher'])
+ # MIT KDC encodes both EncASRepPart and EncTGSRepPart with
+ # application tag 26
+ try:
+ enc_part2 = self.der_decode(
+ enc_part2, asn1Spec=krb5_asn1.EncASRepPart())
+ except Exception:
+ enc_part2 = self.der_decode(
+ enc_part2, asn1Spec=krb5_asn1.EncTGSRepPart())
+
+ # S4U2Self Request
+ sname = cname
+
+ for_user_name = env_get_var_value('FOR_USER')
+ uname = self.PrincipalName_create(name_type=1, names=[for_user_name])
+
+ kdc_options = krb5_asn1.KDCOptions('forwardable')
+ till = self.get_KerberosTime(offset=36000)
+ ticket = rep['ticket']
+ ticket_session_key = self.EncryptionKey_import(enc_part2['key'])
+ pa_s4u = self.PA_S4U2Self_create(name=uname, realm=realm,
+ tgt_session_key=ticket_session_key,
+ ctype=pa_s4u2self_ctype)
+ padata = [pa_s4u]
+
+ subkey = self.RandomKey(ticket_session_key.etype)
+
+ (ctime, cusec) = self.get_KerberosTimeWithUsec()
+
+ req = self.TGS_REQ_create(padata=padata,
+ cusec=cusec,
+ ctime=ctime,
+ ticket=ticket,
+ kdc_options=str(kdc_options),
+ cname=cname,
+ realm=realm,
+ sname=sname,
+ from_time=None,
+ till_time=till,
+ renew_time=None,
+ nonce=0x7ffffffe,
+ etypes=etypes,
+ addresses=None,
+ EncAuthorizationData=None,
+ EncAuthorizationData_key=None,
+ additional_tickets=None,
+ ticket_session_key=ticket_session_key,
+ authenticator_subkey=subkey)
+ rep = self.send_recv_transaction(req)
+ self.assertIsNotNone(rep)
+
+ msg_type = rep['msg-type']
+ if msg_type == 13:
+ enc_part2 = subkey.decrypt(
+ KU_TGS_REP_ENC_PART_SUB_KEY, rep['enc-part']['cipher'])
+ enc_part2 = self.der_decode(
+ enc_part2, asn1Spec=krb5_asn1.EncTGSRepPart())
+
+ return msg_type
+
+ # Using the checksum type from the tgt_session_key happens to work
+ # everywhere
+ def test_s4u2self(self):
+ msg_type = self._test_s4u2self()
+ self.assertEqual(msg_type, 13)
+
+ # Per spec, the checksum of PA-FOR-USER is HMAC_MD5, see [MS-SFU] 2.2.1
+ def test_s4u2self_hmac_md5_checksum(self):
+ msg_type = self._test_s4u2self(pa_s4u2self_ctype=Cksumtype.HMAC_MD5)
+ self.assertEqual(msg_type, 13)
+
+ def test_s4u2self_md5_unkeyed_checksum(self):
+ msg_type = self._test_s4u2self(pa_s4u2self_ctype=Cksumtype.MD5)
+ self.assertEqual(msg_type, 30)
+
+ def test_s4u2self_sha1_unkeyed_checksum(self):
+ msg_type = self._test_s4u2self(pa_s4u2self_ctype=Cksumtype.SHA1)
+ self.assertEqual(msg_type, 30)
+
+ def test_s4u2self_crc32_unkeyed_checksum(self):
+ msg_type = self._test_s4u2self(pa_s4u2self_ctype=Cksumtype.CRC32)
+ self.assertEqual(msg_type, 30)
+
+ def _run_s4u2self_test(self, kdc_dict):
+ client_opts = kdc_dict.pop('client_opts', None)
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER,
+ opts=client_opts)
+
+ service_opts = kdc_dict.pop('service_opts', None)
+ service_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts=service_opts)
+
+ service_tgt = self.get_tgt(service_creds)
+ modify_service_tgt_fn = kdc_dict.pop('modify_service_tgt_fn', None)
+ if modify_service_tgt_fn is not None:
+ service_tgt = modify_service_tgt_fn(service_tgt)
+
+ client_name = client_creds.get_username()
+ client_cname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=[client_name])
+
+ service_name = kdc_dict.pop('service_name', None)
+ if service_name is None:
+ service_name = service_creds.get_username()[:-1]
+ service_sname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=['host', service_name])
+
+ realm = client_creds.get_realm()
+
+ expected_flags = kdc_dict.pop('expected_flags', None)
+ if expected_flags is not None:
+ expected_flags = krb5_asn1.TicketFlags(expected_flags)
+
+ unexpected_flags = kdc_dict.pop('unexpected_flags', None)
+ if unexpected_flags is not None:
+ unexpected_flags = krb5_asn1.TicketFlags(unexpected_flags)
+
+ expected_error_mode = kdc_dict.pop('expected_error_mode', 0)
+ expect_status = kdc_dict.pop('expect_status', None)
+ expected_status = kdc_dict.pop('expected_status', None)
+ if expected_error_mode:
+ check_error_fn = self.generic_check_kdc_error
+ check_rep_fn = None
+ else:
+ check_error_fn = None
+ check_rep_fn = self.generic_check_kdc_rep
+
+ self.assertIsNone(expect_status)
+ self.assertIsNone(expected_status)
+
+ kdc_options = kdc_dict.pop('kdc_options', '0')
+ kdc_options = krb5_asn1.KDCOptions(kdc_options)
+
+ service_decryption_key = self.TicketDecryptionKey_from_creds(
+ service_creds)
+
+ authenticator_subkey = self.RandomKey(Enctype.AES256)
+
+ etypes = kdc_dict.pop('etypes', (AES256_CTS_HMAC_SHA1_96,
+ ARCFOUR_HMAC_MD5))
+
+ expect_edata = kdc_dict.pop('expect_edata', None)
+ expected_groups = kdc_dict.pop('expected_groups', None)
+ unexpected_groups = kdc_dict.pop('unexpected_groups', None)
+
+ def generate_s4u2self_padata(_kdc_exchange_dict,
+ _callback_dict,
+ req_body):
+ pa_s4u = self.PA_S4U2Self_create(
+ name=client_cname,
+ realm=realm,
+ tgt_session_key=service_tgt.session_key,
+ ctype=None)
+
+ return [pa_s4u], req_body
+
+ kdc_exchange_dict = self.tgs_exchange_dict(
+ expected_crealm=realm,
+ expected_cname=client_cname,
+ expected_srealm=realm,
+ expected_sname=service_sname,
+ expected_account_name=client_name,
+ expected_groups=expected_groups,
+ unexpected_groups=unexpected_groups,
+ expected_sid=client_creds.get_sid(),
+ expected_flags=expected_flags,
+ unexpected_flags=unexpected_flags,
+ ticket_decryption_key=service_decryption_key,
+ expect_ticket_checksum=True,
+ generate_padata_fn=generate_s4u2self_padata,
+ check_error_fn=check_error_fn,
+ check_rep_fn=check_rep_fn,
+ check_kdc_private_fn=self.generic_check_kdc_private,
+ expected_error_mode=expected_error_mode,
+ expect_status=expect_status,
+ expected_status=expected_status,
+ tgt=service_tgt,
+ authenticator_subkey=authenticator_subkey,
+ kdc_options=str(kdc_options),
+ expect_edata=expect_edata)
+
+ self._generic_kdc_exchange(kdc_exchange_dict,
+ cname=None,
+ realm=realm,
+ sname=service_sname,
+ etypes=etypes)
+
+ if not expected_error_mode:
+ # Check that the ticket contains a PAC.
+ ticket = kdc_exchange_dict['rep_ticket_creds']
+
+ pac = self.get_ticket_pac(ticket)
+ self.assertIsNotNone(pac)
+
+ # Ensure we used all the parameters given to us.
+ self.assertEqual({}, kdc_dict)
+
+ # Test performing an S4U2Self operation with a forwardable ticket. The
+ # resulting ticket should have the 'forwardable' flag set.
+ def test_s4u2self_forwardable(self):
+ self._run_s4u2self_test(
+ {
+ 'client_opts': {
+ 'not_delegated': False
+ },
+ 'kdc_options': 'forwardable',
+ 'modify_service_tgt_fn': functools.partial(
+ self.set_ticket_forwardable, flag=True),
+ 'expected_flags': 'forwardable'
+ })
+
+ # Test performing an S4U2Self operation with a forwardable ticket that does
+ # not contain a PAC. The request should fail.
+ def test_s4u2self_no_pac(self):
+ def forwardable_no_pac(ticket):
+ ticket = self.set_ticket_forwardable(ticket, flag=True)
+ return self.remove_ticket_pac(ticket)
+
+ self._run_s4u2self_test(
+ {
+ 'expected_error_mode': KDC_ERR_TGT_REVOKED,
+ 'client_opts': {
+ 'not_delegated': False
+ },
+ 'kdc_options': 'forwardable',
+ 'modify_service_tgt_fn': forwardable_no_pac,
+ 'expected_flags': 'forwardable',
+ 'expect_edata': False
+ })
+
+ # Test performing an S4U2Self operation without requesting a forwardable
+ # ticket. The resulting ticket should not have the 'forwardable' flag set.
+ def test_s4u2self_without_forwardable(self):
+ self._run_s4u2self_test(
+ {
+ 'client_opts': {
+ 'not_delegated': False
+ },
+ 'modify_service_tgt_fn': functools.partial(
+ self.set_ticket_forwardable, flag=True),
+ 'unexpected_flags': 'forwardable'
+ })
+
+ # Do an S4U2Self with a non-forwardable TGT. The 'forwardable' flag should
+ # not be set on the ticket.
+ def test_s4u2self_not_forwardable(self):
+ self._run_s4u2self_test(
+ {
+ 'client_opts': {
+ 'not_delegated': False
+ },
+ 'kdc_options': 'forwardable',
+ 'modify_service_tgt_fn': functools.partial(
+ self.set_ticket_forwardable, flag=False),
+ 'unexpected_flags': 'forwardable'
+ })
+
+ # Do an S4U2Self with the not_delegated flag set on the client. The
+ # 'forwardable' flag should not be set on the ticket.
+ def test_s4u2self_client_not_delegated(self):
+ self._run_s4u2self_test(
+ {
+ 'client_opts': {
+ 'not_delegated': True
+ },
+ 'kdc_options': 'forwardable',
+ 'modify_service_tgt_fn': functools.partial(
+ self.set_ticket_forwardable, flag=True),
+ 'unexpected_flags': 'forwardable'
+ })
+
+ # Do an S4U2Self with a service not trusted to authenticate for delegation,
+ # but having an empty msDS-AllowedToDelegateTo attribute. The 'forwardable'
+ # flag should be set on the ticket.
+ def test_s4u2self_not_trusted_empty_allowed(self):
+ self._run_s4u2self_test(
+ {
+ 'client_opts': {
+ 'not_delegated': False
+ },
+ 'service_opts': {
+ 'trusted_to_auth_for_delegation': False,
+ 'delegation_to_spn': ()
+ },
+ 'kdc_options': 'forwardable',
+ 'modify_service_tgt_fn': functools.partial(
+ self.set_ticket_forwardable, flag=True),
+ 'expected_flags': 'forwardable'
+ })
+
+ # Do an S4U2Self with a service not trusted to authenticate for delegation
+ # and having a non-empty msDS-AllowedToDelegateTo attribute. The
+ # 'forwardable' flag should not be set on the ticket.
+ def test_s4u2self_not_trusted_nonempty_allowed(self):
+ self._run_s4u2self_test(
+ {
+ 'client_opts': {
+ 'not_delegated': False
+ },
+ 'service_opts': {
+ 'trusted_to_auth_for_delegation': False,
+ 'delegation_to_spn': ('test',)
+ },
+ 'kdc_options': 'forwardable',
+ 'modify_service_tgt_fn': functools.partial(
+ self.set_ticket_forwardable, flag=True),
+ 'unexpected_flags': 'forwardable'
+ })
+
+ # Do an S4U2Self with a service trusted to authenticate for delegation and
+ # having an empty msDS-AllowedToDelegateTo attribute. The 'forwardable'
+ # flag should be set on the ticket.
+ def test_s4u2self_trusted_empty_allowed(self):
+ self._run_s4u2self_test(
+ {
+ 'client_opts': {
+ 'not_delegated': False
+ },
+ 'service_opts': {
+ 'trusted_to_auth_for_delegation': True,
+ 'delegation_to_spn': ()
+ },
+ 'kdc_options': 'forwardable',
+ 'modify_service_tgt_fn': functools.partial(
+ self.set_ticket_forwardable, flag=True),
+ 'expected_flags': 'forwardable'
+ })
+
+ # Do an S4U2Self with a service trusted to authenticate for delegation and
+ # having a non-empty msDS-AllowedToDelegateTo attribute. The 'forwardable'
+ # flag should be set on the ticket.
+ def test_s4u2self_trusted_nonempty_allowed(self):
+ self._run_s4u2self_test(
+ {
+ 'client_opts': {
+ 'not_delegated': False
+ },
+ 'service_opts': {
+ 'trusted_to_auth_for_delegation': True,
+ 'delegation_to_spn': ('test',)
+ },
+ 'kdc_options': 'forwardable',
+ 'modify_service_tgt_fn': functools.partial(
+ self.set_ticket_forwardable, flag=True),
+ 'expected_flags': 'forwardable'
+ })
+
+ # Do an S4U2Self with the sname in the request different to that of the
+ # service. We expect an error.
+ def test_s4u2self_wrong_sname(self):
+ other_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={
+ 'trusted_to_auth_for_delegation': True,
+ 'id': 0
+ })
+ other_sname = other_creds.get_username()[:-1]
+
+ self._run_s4u2self_test(
+ {
+ 'expected_error_mode': KDC_ERR_BADMATCH,
+ 'expect_edata': False,
+ 'client_opts': {
+ 'not_delegated': False
+ },
+ 'service_opts': {
+ 'trusted_to_auth_for_delegation': True
+ },
+ 'service_name': other_sname,
+ 'kdc_options': 'forwardable',
+ 'modify_service_tgt_fn': functools.partial(
+ self.set_ticket_forwardable, flag=True)
+ })
+
+ # Do an S4U2Self where the service does not require authorization data. The
+ # resulting ticket should still contain a PAC.
+ def test_s4u2self_no_auth_data_required(self):
+ self._run_s4u2self_test(
+ {
+ 'client_opts': {
+ 'not_delegated': False
+ },
+ 'service_opts': {
+ 'trusted_to_auth_for_delegation': True,
+ 'no_auth_data_required': True
+ },
+ 'kdc_options': 'forwardable',
+ 'modify_service_tgt_fn': functools.partial(
+ self.set_ticket_forwardable, flag=True),
+ 'expected_flags': 'forwardable'
+ })
+
+ # Do an S4U2Self and check that the service asserted identity is part of
+ # the sids.
+ def test_s4u2self_asserted_identity(self):
+ self._run_s4u2self_test(
+ {
+ 'client_opts': {
+ 'not_delegated': False
+ },
+ 'expected_groups': {
+ (security.SID_SERVICE_ASSERTED_IDENTITY,
+ SidType.EXTRA_SID,
+ self.default_attrs),
+ ...
+ },
+ 'unexpected_groups': {
+ security.SID_AUTHENTICATION_AUTHORITY_ASSERTED_IDENTITY,
+ },
+ })
+
+ def _run_delegation_test(self, kdc_dict):
+ s4u2self = kdc_dict.pop('s4u2self', False)
+
+ authtime_delay = kdc_dict.pop('authtime_delay', 0)
+
+ client_opts = kdc_dict.pop('client_opts', None)
+ client_creds = self.get_cached_creds(
+ account_type=self.AccountType.USER,
+ opts=client_opts)
+
+ sid = client_creds.get_sid()
+
+ service1_opts = kdc_dict.pop('service1_opts', {})
+ service2_opts = kdc_dict.pop('service2_opts', {})
+
+ allow_delegation = kdc_dict.pop('allow_delegation', False)
+ allow_rbcd = kdc_dict.pop('allow_rbcd', False)
+ self.assertFalse(allow_delegation and allow_rbcd)
+
+ if allow_rbcd:
+ service1_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts=service1_opts)
+
+ self.assertNotIn('delegation_from_dn', service2_opts)
+ service2_opts['delegation_from_dn'] = str(service1_creds.get_dn())
+
+ service2_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts=service2_opts)
+ else:
+ service2_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts=service2_opts)
+
+ if allow_delegation:
+ self.assertNotIn('delegation_to_spn', service1_opts)
+ service1_opts['delegation_to_spn'] = service2_creds.get_spn()
+
+ service1_creds = self.get_cached_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts=service1_opts)
+
+ service1_tgt = self.get_tgt(service1_creds)
+ self.assertElementPresent(service1_tgt.ticket_private, 'authtime')
+ service1_tgt_authtime = self.getElementValue(service1_tgt.ticket_private, 'authtime')
+
+ client_username = client_creds.get_username()
+ client_realm = client_creds.get_realm()
+ client_cname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=[client_username])
+
+ service1_name = service1_creds.get_username()[:-1]
+ service1_realm = service1_creds.get_realm()
+ service1_service = 'host'
+ service1_sname = self.PrincipalName_create(
+ name_type=NT_PRINCIPAL, names=[service1_service,
+ service1_name])
+ service1_decryption_key = self.TicketDecryptionKey_from_creds(
+ service1_creds)
+
+ expect_pac = kdc_dict.pop('expect_pac', True)
+
+ expected_groups = kdc_dict.pop('expected_groups', None)
+ unexpected_groups = kdc_dict.pop('unexpected_groups', None)
+
+ client_tkt_options = kdc_dict.pop('client_tkt_options', 'forwardable')
+ expected_flags = krb5_asn1.TicketFlags(client_tkt_options)
+
+ etypes = kdc_dict.pop('etypes', (AES256_CTS_HMAC_SHA1_96,
+ ARCFOUR_HMAC_MD5))
+
+ if s4u2self:
+ self.assertEqual(authtime_delay, 0)
+
+ def generate_s4u2self_padata(_kdc_exchange_dict,
+ _callback_dict,
+ req_body):
+ pa_s4u = self.PA_S4U2Self_create(
+ name=client_cname,
+ realm=client_realm,
+ tgt_session_key=service1_tgt.session_key,
+ ctype=None)
+
+ return [pa_s4u], req_body
+
+ s4u2self_expected_flags = krb5_asn1.TicketFlags('forwardable')
+ s4u2self_unexpected_flags = krb5_asn1.TicketFlags('0')
+
+ s4u2self_kdc_options = krb5_asn1.KDCOptions('forwardable')
+
+ s4u2self_authenticator_subkey = self.RandomKey(Enctype.AES256)
+ s4u2self_kdc_exchange_dict = self.tgs_exchange_dict(
+ expected_crealm=client_realm,
+ expected_cname=client_cname,
+ expected_srealm=service1_realm,
+ expected_sname=service1_sname,
+ expected_account_name=client_username,
+ expected_groups=expected_groups,
+ unexpected_groups=unexpected_groups,
+ expected_sid=sid,
+ expected_flags=s4u2self_expected_flags,
+ unexpected_flags=s4u2self_unexpected_flags,
+ ticket_decryption_key=service1_decryption_key,
+ generate_padata_fn=generate_s4u2self_padata,
+ check_rep_fn=self.generic_check_kdc_rep,
+ check_kdc_private_fn=self.generic_check_kdc_private,
+ tgt=service1_tgt,
+ authenticator_subkey=s4u2self_authenticator_subkey,
+ kdc_options=str(s4u2self_kdc_options),
+ expect_edata=False)
+
+ self._generic_kdc_exchange(s4u2self_kdc_exchange_dict,
+ cname=None,
+ realm=service1_realm,
+ sname=service1_sname,
+ etypes=etypes)
+
+ client_service_tkt = s4u2self_kdc_exchange_dict['rep_ticket_creds']
+ else:
+ if authtime_delay != 0:
+ time.sleep(authtime_delay)
+ fresh = True
+ else:
+ fresh = False
+
+ client_tgt = self.get_tgt(client_creds,
+ kdc_options=client_tkt_options,
+ expected_flags=expected_flags,
+ fresh=fresh)
+ client_service_tkt = self.get_service_ticket(
+ client_tgt,
+ service1_creds,
+ kdc_options=client_tkt_options,
+ expected_flags=expected_flags,
+ fresh=fresh)
+
+ modify_client_tkt_fn = kdc_dict.pop('modify_client_tkt_fn', None)
+ if modify_client_tkt_fn is not None:
+ client_service_tkt = modify_client_tkt_fn(client_service_tkt)
+
+ self.assertElementPresent(client_service_tkt.ticket_private, 'authtime')
+ expected_authtime = self.getElementValue(client_service_tkt.ticket_private, 'authtime')
+ if authtime_delay > 1:
+ self.assertNotEqual(expected_authtime, service1_tgt_authtime)
+
+ additional_tickets = [client_service_tkt.ticket]
+
+ modify_service_tgt_fn = kdc_dict.pop('modify_service_tgt_fn', None)
+ if modify_service_tgt_fn is not None:
+ service1_tgt = modify_service_tgt_fn(service1_tgt)
+
+ kdc_options = kdc_dict.pop('kdc_options', None)
+ if kdc_options is None:
+ kdc_options = str(krb5_asn1.KDCOptions('cname-in-addl-tkt'))
+
+ service2_name = service2_creds.get_username()[:-1]
+ service2_realm = service2_creds.get_realm()
+ service2_service = 'host'
+ service2_sname = self.PrincipalName_create(
+ name_type=NT_PRINCIPAL, names=[service2_service,
+ service2_name])
+ service2_decryption_key = self.TicketDecryptionKey_from_creds(
+ service2_creds)
+ service2_etypes = service2_creds.tgs_supported_enctypes
+
+ expected_error_mode = kdc_dict.pop('expected_error_mode')
+ expect_status = kdc_dict.pop('expect_status', None)
+ expected_status = kdc_dict.pop('expected_status', None)
+ if expected_error_mode:
+ check_error_fn = self.generic_check_kdc_error
+ check_rep_fn = None
+ else:
+ check_error_fn = None
+ check_rep_fn = self.generic_check_kdc_rep
+
+ self.assertIsNone(expect_status)
+ self.assertIsNone(expected_status)
+
+ expect_edata = kdc_dict.pop('expect_edata', None)
+ if expect_edata is not None:
+ self.assertTrue(expected_error_mode)
+
+ pac_options = kdc_dict.pop('pac_options', None)
+
+ use_authenticator_subkey = kdc_dict.pop('use_authenticator_subkey', True)
+ if use_authenticator_subkey:
+ authenticator_subkey = self.RandomKey(Enctype.AES256)
+ else:
+ authenticator_subkey = None
+
+ expected_proxy_target = service2_creds.get_spn()
+
+ expected_transited_services = kdc_dict.pop(
+ 'expected_transited_services', [])
+
+ transited_service = f'host/{service1_name}@{service1_realm}'
+ expected_transited_services.append(transited_service)
+
+ kdc_exchange_dict = self.tgs_exchange_dict(
+ expected_crealm=client_realm,
+ expected_cname=client_cname,
+ expected_srealm=service2_realm,
+ expected_sname=service2_sname,
+ expected_account_name=client_username,
+ expected_groups=expected_groups,
+ unexpected_groups=unexpected_groups,
+ expected_sid=sid,
+ expected_supported_etypes=service2_etypes,
+ ticket_decryption_key=service2_decryption_key,
+ check_error_fn=check_error_fn,
+ check_rep_fn=check_rep_fn,
+ check_kdc_private_fn=self.generic_check_kdc_private,
+ expected_error_mode=expected_error_mode,
+ expect_status=expect_status,
+ expected_status=expected_status,
+ callback_dict={},
+ tgt=service1_tgt,
+ authenticator_subkey=authenticator_subkey,
+ kdc_options=kdc_options,
+ pac_options=pac_options,
+ expect_edata=expect_edata,
+ expected_proxy_target=expected_proxy_target,
+ expected_transited_services=expected_transited_services,
+ expect_pac=expect_pac)
+
+ EncAuthorizationData = kdc_dict.pop('enc-authorization-data', None)
+
+ if EncAuthorizationData is not None:
+ if authenticator_subkey is not None:
+ EncAuthorizationData_key = authenticator_subkey
+ EncAuthorizationData_usage = KU_TGS_REQ_AUTH_DAT_SUBKEY
+ else:
+ EncAuthorizationData_key = client_service_tkt.session_key
+ EncAuthorizationData_usage = KU_TGS_REQ_AUTH_DAT_SESSION
+ else:
+ EncAuthorizationData_key = None
+ EncAuthorizationData_usage = None
+
+ self._generic_kdc_exchange(kdc_exchange_dict,
+ cname=None,
+ realm=service2_realm,
+ sname=service2_sname,
+ etypes=etypes,
+ additional_tickets=additional_tickets,
+ EncAuthorizationData=EncAuthorizationData,
+ EncAuthorizationData_key=EncAuthorizationData_key,
+ EncAuthorizationData_usage=EncAuthorizationData_usage)
+
+ if not expected_error_mode:
+ # Check whether the ticket contains a PAC.
+ ticket = kdc_exchange_dict['rep_ticket_creds']
+ self.assertElementEqual(ticket.ticket_private, 'authtime', expected_authtime)
+ pac = self.get_ticket_pac(ticket, expect_pac=expect_pac)
+ ticket_auth_data = ticket.ticket_private.get('authorization-data')
+ expected_num_ticket_auth_data = 0
+ if expect_pac:
+ self.assertIsNotNone(pac)
+ expected_num_ticket_auth_data += 1
+ else:
+ self.assertIsNone(pac)
+
+ if EncAuthorizationData is not None:
+ expected_num_ticket_auth_data += len(EncAuthorizationData)
+
+ if expected_num_ticket_auth_data == 0:
+ self.assertIsNone(ticket_auth_data)
+ else:
+ self.assertIsNotNone(ticket_auth_data)
+ self.assertEqual(len(ticket_auth_data),
+ expected_num_ticket_auth_data)
+
+ if EncAuthorizationData is not None:
+ enc_ad_plain = self.der_encode(
+ EncAuthorizationData,
+ asn1Spec=krb5_asn1.AuthorizationData())
+ req_EncAuthorizationData = self.der_decode(
+ enc_ad_plain,
+ asn1Spec=krb5_asn1.AuthorizationData())
+
+ rep_EncAuthorizationData = ticket_auth_data.copy()
+ if expect_pac:
+ rep_EncAuthorizationData.pop(0)
+ self.assertEqual(rep_EncAuthorizationData, req_EncAuthorizationData)
+
+ # Ensure we used all the parameters given to us.
+ self.assertEqual({}, kdc_dict)
+
+ def skip_unless_fl2008(self):
+ samdb = self.get_samdb()
+ functional_level = self.get_domain_functional_level(samdb)
+
+ if functional_level < dsdb.DS_DOMAIN_FUNCTION_2008:
+ self.skipTest('RBCD requires FL2008')
+
+ def test_constrained_delegation(self):
+ # Test constrained delegation.
+ self._run_delegation_test(
+ {
+ 'expected_error_mode': 0,
+ 'allow_delegation': True
+ })
+
+ def test_constrained_delegation_authtime(self):
+ # Test constrained delegation.
+ self._run_delegation_test(
+ {
+ 'expected_error_mode': 0,
+ 'allow_delegation': True,
+ 'authtime_delay': 2,
+ })
+
+ def test_constrained_delegation_with_enc_auth_data_subkey(self):
+ # Test constrained delegation.
+ EncAuthorizationData = []
+ relevant_elems = []
+ auth_data777 = self.AuthorizationData_create(777, b'AuthorizationData777')
+ relevant_elems.append(auth_data777)
+ auth_data999 = self.AuthorizationData_create(999, b'AuthorizationData999')
+ relevant_elems.append(auth_data999)
+ ad_relevant = self.der_encode(relevant_elems, asn1Spec=krb5_asn1.AD_IF_RELEVANT())
+ ad_data = self.AuthorizationData_create(AD_IF_RELEVANT, ad_relevant)
+ EncAuthorizationData.append(ad_data)
+ self._run_delegation_test(
+ {
+ 'expected_error_mode': 0,
+ 'allow_delegation': True,
+ 'enc-authorization-data': EncAuthorizationData,
+ })
+
+ def test_constrained_delegation_with_enc_auth_data_no_subkey(self):
+ # Test constrained delegation.
+ EncAuthorizationData = []
+ relevant_elems = []
+ auth_data777 = self.AuthorizationData_create(777, b'AuthorizationData777')
+ relevant_elems.append(auth_data777)
+ auth_data999 = self.AuthorizationData_create(999, b'AuthorizationData999')
+ relevant_elems.append(auth_data999)
+ ad_relevant = self.der_encode(relevant_elems, asn1Spec=krb5_asn1.AD_IF_RELEVANT())
+ ad_data = self.AuthorizationData_create(AD_IF_RELEVANT, ad_relevant)
+ EncAuthorizationData.append(ad_data)
+ self._run_delegation_test(
+ {
+ 'expected_error_mode': 0,
+ 'allow_delegation': True,
+ 'enc-authorization-data': EncAuthorizationData,
+ 'use_authenticator_subkey': False,
+ })
+
+ def test_constrained_delegation_authentication_asserted_identity(self):
+ # Test constrained delegation and check asserted identity is the
+ # authentication authority. Note that we should always find this
+ # SID for all the requests. Just S4U2Self will have a different SID.
+ self._run_delegation_test(
+ {
+ 'expected_error_mode': 0,
+ 'allow_delegation': True,
+ 'expected_groups': {
+ (security.SID_AUTHENTICATION_AUTHORITY_ASSERTED_IDENTITY,
+ SidType.EXTRA_SID,
+ self.default_attrs),
+ ...
+ },
+ 'unexpected_groups': {
+ security.SID_SERVICE_ASSERTED_IDENTITY,
+ },
+ })
+
+ def test_constrained_delegation_service_asserted_identity(self):
+ # Test constrained delegation and check asserted identity is the
+ # service sid is there. This is a S4U2Proxy + S4U2Self test.
+ self._run_delegation_test(
+ {
+ 'expected_error_mode': 0,
+ 'allow_delegation': True,
+ 's4u2self': True,
+ 'service1_opts': {
+ 'trusted_to_auth_for_delegation': True,
+ },
+ 'expected_groups': {
+ (security.SID_SERVICE_ASSERTED_IDENTITY,
+ SidType.EXTRA_SID,
+ self.default_attrs),
+ ...
+ },
+ 'unexpected_groups': {
+ security.SID_AUTHENTICATION_AUTHORITY_ASSERTED_IDENTITY,
+ },
+ })
+
+ def test_constrained_delegation_no_auth_data_required(self):
+ # Test constrained delegation.
+ self._run_delegation_test(
+ {
+ 'expected_error_mode': 0,
+ 'allow_delegation': True,
+ 'service2_opts': {
+ 'no_auth_data_required': True
+ },
+ 'expect_pac': False
+ })
+
+ def test_constrained_delegation_existing_delegation_info(self):
+ # Test constrained delegation with an existing S4U_DELEGATION_INFO
+ # structure in the PAC.
+
+ services = ['service1', 'service2', 'service3']
+
+ self._run_delegation_test(
+ {
+ 'expected_error_mode': 0,
+ 'allow_delegation': True,
+ 'modify_client_tkt_fn': functools.partial(
+ self.add_delegation_info, services=services),
+ 'expected_transited_services': services
+ })
+
+ def test_constrained_delegation_not_allowed(self):
+ # Test constrained delegation when the delegating service does not
+ # allow it.
+ self._run_delegation_test(
+ {
+ 'expected_error_mode': KDC_ERR_BADOPTION,
+ # We aren’t particular about whether or not we get an NTSTATUS.
+ 'expect_status': None,
+ 'expected_status': ntstatus.NT_STATUS_NOT_SUPPORTED,
+ 'allow_delegation': False
+ })
+
+ def test_constrained_delegation_no_client_pac(self):
+ # Test constrained delegation when the client service ticket does not
+ # contain a PAC.
+ self._run_delegation_test(
+ {
+ 'expected_error_mode': (KDC_ERR_MODIFIED,
+ KDC_ERR_TGT_REVOKED),
+ 'allow_delegation': True,
+ 'modify_client_tkt_fn': self.remove_ticket_pac,
+ 'expect_edata': False
+ })
+
+ def test_constrained_delegation_no_service_pac(self):
+ # Test constrained delegation when the service TGT does not contain a
+ # PAC.
+ self._run_delegation_test(
+ {
+ 'expected_error_mode': KDC_ERR_TGT_REVOKED,
+ 'allow_delegation': True,
+ 'modify_service_tgt_fn': self.remove_ticket_pac,
+ 'expect_edata': False
+ })
+
+ def test_constrained_delegation_no_client_pac_no_auth_data_required(self):
+ # Test constrained delegation when the client service ticket does not
+ # contain a PAC.
+ self._run_delegation_test(
+ {
+ 'expected_error_mode': (KDC_ERR_MODIFIED,
+ KDC_ERR_BADOPTION,
+ KDC_ERR_TGT_REVOKED),
+ 'allow_delegation': True,
+ 'modify_client_tkt_fn': self.remove_ticket_pac,
+ 'expect_edata': False,
+ 'service2_opts': {
+ 'no_auth_data_required': True
+ }
+ })
+
+ def test_constrained_delegation_no_service_pac_no_auth_data_required(self):
+ # Test constrained delegation when the service TGT does not contain a
+ # PAC.
+ self._run_delegation_test(
+ {
+ 'expected_error_mode': KDC_ERR_TGT_REVOKED,
+ 'allow_delegation': True,
+ 'modify_service_tgt_fn': self.remove_ticket_pac,
+ 'service2_opts': {
+ 'no_auth_data_required': True
+ },
+ 'expect_pac': False,
+ 'expect_edata': False
+ })
+
+ def test_constrained_delegation_non_forwardable(self):
+ # Test constrained delegation with a non-forwardable ticket.
+ self._run_delegation_test(
+ {
+ 'expected_error_mode': KDC_ERR_BADOPTION,
+ # We aren’t particular about whether or not we get an NTSTATUS.
+ 'expect_status': None,
+ 'expected_status': ntstatus.NT_STATUS_ACCOUNT_RESTRICTION,
+ 'allow_delegation': True,
+ 'modify_client_tkt_fn': functools.partial(
+ self.set_ticket_forwardable, flag=False)
+ })
+
+ def test_constrained_delegation_pac_options_rbcd(self):
+ # Test constrained delegation, but with the RBCD bit set in the PAC
+ # options.
+ self._run_delegation_test(
+ {
+ 'expected_error_mode': 0,
+ 'pac_options': '0001', # supports RBCD
+ 'allow_delegation': True
+ })
+
+ def test_rbcd(self):
+ # Test resource-based constrained delegation.
+ self.skip_unless_fl2008()
+
+ self._run_delegation_test(
+ {
+ 'expected_error_mode': 0,
+ 'allow_rbcd': True,
+ 'pac_options': '0001', # supports RBCD
+ })
+
+ def test_rbcd_no_auth_data_required(self):
+ self.skip_unless_fl2008()
+
+ self._run_delegation_test(
+ {
+ 'expected_error_mode': 0,
+ 'allow_rbcd': True,
+ 'pac_options': '0001', # supports RBCD
+ 'service2_opts': {
+ 'no_auth_data_required': True
+ },
+ 'expect_pac': False
+ })
+
+ def test_rbcd_existing_delegation_info(self):
+ self.skip_unless_fl2008()
+
+ # Test constrained delegation with an existing S4U_DELEGATION_INFO
+ # structure in the PAC.
+
+ services = ['service1', 'service2', 'service3']
+
+ self._run_delegation_test(
+ {
+ 'expected_error_mode': 0,
+ 'allow_rbcd': True,
+ 'pac_options': '0001', # supports RBCD
+ 'modify_client_tkt_fn': functools.partial(
+ self.add_delegation_info, services=services),
+ 'expected_transited_services': services
+ })
+
+ def test_rbcd_not_allowed(self):
+ # Test resource-based constrained delegation when the target service
+ # does not allow it.
+ self._run_delegation_test(
+ {
+ 'expected_error_mode': KDC_ERR_BADOPTION,
+ # We aren’t particular about whether or not we get an NTSTATUS.
+ 'expect_status': None,
+ 'expected_status': ntstatus.NT_STATUS_NOT_FOUND,
+ 'allow_rbcd': False,
+ 'pac_options': '0001' # supports RBCD
+ })
+
+ def test_rbcd_no_client_pac_a(self):
+ self.skip_unless_fl2008()
+
+ # Test constrained delegation when the client service ticket does not
+ # contain a PAC, and an empty msDS-AllowedToDelegateTo attribute.
+ self._run_delegation_test(
+ {
+ 'expected_error_mode': (KDC_ERR_MODIFIED,
+ KDC_ERR_TGT_REVOKED),
+ # We aren’t particular about whether or not we get an NTSTATUS.
+ 'expect_status': None,
+ 'expected_status': ntstatus.NT_STATUS_NOT_SUPPORTED,
+ 'allow_rbcd': True,
+ 'pac_options': '0001', # supports RBCD
+ 'modify_client_tkt_fn': self.remove_ticket_pac
+ })
+
+ def test_rbcd_no_client_pac_b(self):
+ self.skip_unless_fl2008()
+
+ # Test constrained delegation when the client service ticket does not
+ # contain a PAC, and a non-empty msDS-AllowedToDelegateTo attribute.
+ self._run_delegation_test(
+ {
+ 'expected_error_mode': (KDC_ERR_MODIFIED,
+ KDC_ERR_TGT_REVOKED),
+ # We aren’t particular about whether or not we get an NTSTATUS.
+ 'expect_status': None,
+ 'expected_status': ntstatus.NT_STATUS_NO_MATCH,
+ 'allow_rbcd': True,
+ 'pac_options': '0001', # supports RBCD
+ 'modify_client_tkt_fn': self.remove_ticket_pac,
+ 'service1_opts': {
+ 'delegation_to_spn': ('host/test')
+ }
+ })
+
+ def test_rbcd_no_service_pac(self):
+ self.skip_unless_fl2008()
+
+ # Test constrained delegation when the service TGT does not contain a
+ # PAC.
+ self._run_delegation_test(
+ {
+ 'expected_error_mode': KDC_ERR_TGT_REVOKED,
+ 'allow_rbcd': True,
+ 'pac_options': '0001', # supports RBCD
+ 'modify_service_tgt_fn': self.remove_ticket_pac,
+ 'expect_edata': False
+ })
+
+ def test_rbcd_no_client_pac_no_auth_data_required_a(self):
+ self.skip_unless_fl2008()
+
+ # Test constrained delegation when the client service ticket does not
+ # contain a PAC, and an empty msDS-AllowedToDelegateTo attribute.
+ self._run_delegation_test(
+ {
+ 'expected_error_mode': (KDC_ERR_MODIFIED,
+ KDC_ERR_TGT_REVOKED),
+ # We aren’t particular about whether or not we get an NTSTATUS.
+ 'expect_status': None,
+ 'expected_status': ntstatus.NT_STATUS_NOT_SUPPORTED,
+ 'allow_rbcd': True,
+ 'pac_options': '0001', # supports RBCD
+ 'modify_client_tkt_fn': self.remove_ticket_pac,
+ 'service2_opts': {
+ 'no_auth_data_required': True
+ }
+ })
+
+ def test_rbcd_no_client_pac_no_auth_data_required_b(self):
+ self.skip_unless_fl2008()
+
+ # Test constrained delegation when the client service ticket does not
+ # contain a PAC, and a non-empty msDS-AllowedToDelegateTo attribute.
+ self._run_delegation_test(
+ {
+ 'expected_error_mode': (KDC_ERR_MODIFIED,
+ KDC_ERR_TGT_REVOKED),
+ # We aren’t particular about whether or not we get an NTSTATUS.
+ 'expect_status': None,
+ 'expected_status': ntstatus.NT_STATUS_NO_MATCH,
+ 'allow_rbcd': True,
+ 'pac_options': '0001', # supports RBCD
+ 'modify_client_tkt_fn': self.remove_ticket_pac,
+ 'service1_opts': {
+ 'delegation_to_spn': ('host/test')
+ },
+ 'service2_opts': {
+ 'no_auth_data_required': True
+ }
+ })
+
+ def test_rbcd_no_service_pac_no_auth_data_required(self):
+ self.skip_unless_fl2008()
+
+ # Test constrained delegation when the service TGT does not contain a
+ # PAC.
+ self._run_delegation_test(
+ {
+ 'expected_error_mode': KDC_ERR_TGT_REVOKED,
+ 'allow_rbcd': True,
+ 'pac_options': '0001', # supports RBCD
+ 'modify_service_tgt_fn': self.remove_ticket_pac,
+ 'service2_opts': {
+ 'no_auth_data_required': True
+ },
+ 'expect_edata': False
+ })
+
+ def test_rbcd_non_forwardable(self):
+ self.skip_unless_fl2008()
+
+ # Test resource-based constrained delegation with a non-forwardable
+ # ticket.
+ self._run_delegation_test(
+ {
+ 'expected_error_mode': KDC_ERR_BADOPTION,
+ # We aren’t particular about whether or not we get an NTSTATUS.
+ 'expect_status': None,
+ 'expected_status': ntstatus.NT_STATUS_ACCOUNT_RESTRICTION,
+ 'allow_rbcd': True,
+ 'pac_options': '0001', # supports RBCD
+ 'modify_client_tkt_fn': functools.partial(
+ self.set_ticket_forwardable, flag=False)
+ })
+
+ def test_rbcd_no_pac_options_a(self):
+ self.skip_unless_fl2008()
+
+ # Test resource-based constrained delegation without the RBCD bit set
+ # in the PAC options, and an empty msDS-AllowedToDelegateTo attribute.
+ self._run_delegation_test(
+ {
+ 'expected_error_mode': KDC_ERR_BADOPTION,
+ # We aren’t particular about whether or not we get an NTSTATUS.
+ 'expect_status': None,
+ 'expected_status': ntstatus.NT_STATUS_NOT_SUPPORTED,
+ 'allow_rbcd': True,
+ 'pac_options': '1' # does not support RBCD
+ })
+
+ def test_rbcd_no_pac_options_b(self):
+ self.skip_unless_fl2008()
+
+ # Test resource-based constrained delegation without the RBCD bit set
+ # in the PAC options, and a non-empty msDS-AllowedToDelegateTo
+ # attribute.
+ self._run_delegation_test(
+ {
+ 'expected_error_mode': KDC_ERR_BADOPTION,
+ # We aren’t particular about whether or not we get an NTSTATUS.
+ 'expect_status': None,
+ 'expected_status': ntstatus.NT_STATUS_NO_MATCH,
+ 'allow_rbcd': True,
+ 'pac_options': '1', # does not support RBCD
+ 'service1_opts': {
+ 'delegation_to_spn': ('host/test')
+ }
+ })
+
+ def test_bronze_bit_constrained_delegation_old_checksum(self):
+ # Attempt to modify the ticket without updating the PAC checksums.
+ self._run_delegation_test(
+ {
+ 'expected_error_mode': (KDC_ERR_MODIFIED,
+ KDC_ERR_BAD_INTEGRITY),
+ 'allow_delegation': True,
+ 'client_tkt_options': '0', # non-forwardable ticket
+ 'modify_client_tkt_fn': functools.partial(
+ self.set_ticket_forwardable,
+ flag=True, update_pac_checksums=False),
+ 'expect_edata': False
+ })
+
+ def test_bronze_bit_rbcd_old_checksum(self):
+ self.skip_unless_fl2008()
+
+ # Attempt to modify the ticket without updating the PAC checksums.
+ self._run_delegation_test(
+ {
+ 'expected_error_mode': (KDC_ERR_MODIFIED,
+ KDC_ERR_BAD_INTEGRITY),
+ # We aren’t particular about whether or not we get an NTSTATUS.
+ 'expect_status': None,
+ 'expected_status': ntstatus.NT_STATUS_NOT_SUPPORTED,
+ 'allow_rbcd': True,
+ 'pac_options': '0001', # supports RBCD
+ 'client_tkt_options': '0', # non-forwardable ticket
+ 'modify_client_tkt_fn': functools.partial(
+ self.set_ticket_forwardable,
+ flag=True, update_pac_checksums=False)
+ })
+
+ def test_constrained_delegation_missing_client_checksum(self):
+ # Present a user ticket without the required checksums.
+ for checksum in self.pac_checksum_types:
+ with self.subTest(checksum=checksum):
+ if checksum == krb5pac.PAC_TYPE_TICKET_CHECKSUM:
+ expected_error_mode = (KDC_ERR_MODIFIED,
+ KDC_ERR_BADOPTION)
+ else:
+ expected_error_mode = KDC_ERR_GENERIC
+
+ self._run_delegation_test(
+ {
+ 'expected_error_mode': expected_error_mode,
+ 'allow_delegation': True,
+ 'modify_client_tkt_fn': functools.partial(
+ self.remove_pac_checksum, checksum=checksum),
+ 'expect_edata': False
+ })
+
+ def test_constrained_delegation_missing_service_checksum(self):
+ # Present the service's ticket without the required checksums.
+ for checksum in (krb5pac.PAC_TYPE_SRV_CHECKSUM,
+ krb5pac.PAC_TYPE_KDC_CHECKSUM):
+ with self.subTest(checksum=checksum):
+ self._run_delegation_test(
+ {
+ 'expected_error_mode': KDC_ERR_GENERIC,
+ # We aren’t particular about whether or not we get an
+ # NTSTATUS.
+ 'expect_status': None,
+ 'expected_status':
+ ntstatus.NT_STATUS_INSUFFICIENT_RESOURCES,
+ 'allow_delegation': True,
+ 'modify_service_tgt_fn': functools.partial(
+ self.remove_pac_checksum, checksum=checksum)
+ })
+
+ def test_rbcd_missing_client_checksum(self):
+ self.skip_unless_fl2008()
+
+ # Present a user ticket without the required checksums.
+ for checksum in self.pac_checksum_types:
+ with self.subTest(checksum=checksum):
+ if checksum == krb5pac.PAC_TYPE_TICKET_CHECKSUM:
+ expected_error_mode = (KDC_ERR_MODIFIED,
+ KDC_ERR_BADOPTION)
+ else:
+ expected_error_mode = KDC_ERR_GENERIC
+
+ self._run_delegation_test(
+ {
+ 'expected_error_mode': expected_error_mode,
+ # We aren’t particular about whether or not we get an
+ # NTSTATUS.
+ 'expect_status': None,
+ 'expected_status':
+ ntstatus.NT_STATUS_NOT_SUPPORTED,
+ 'allow_rbcd': True,
+ 'pac_options': '0001', # supports RBCD
+ 'modify_client_tkt_fn': functools.partial(
+ self.remove_pac_checksum, checksum=checksum)
+ })
+
+ def test_rbcd_missing_service_checksum(self):
+ self.skip_unless_fl2008()
+
+ # Present the service's ticket without the required checksums.
+ for checksum in (krb5pac.PAC_TYPE_SRV_CHECKSUM,
+ krb5pac.PAC_TYPE_KDC_CHECKSUM):
+ with self.subTest(checksum=checksum):
+ self._run_delegation_test(
+ {
+ 'expected_error_mode': KDC_ERR_GENERIC,
+ # We aren’t particular about whether or not we get an
+ # NTSTATUS.
+ 'expect_status': None,
+ 'expected_status':
+ ntstatus.NT_STATUS_INSUFFICIENT_RESOURCES,
+ 'allow_rbcd': True,
+ 'pac_options': '0001', # supports RBCD
+ 'modify_service_tgt_fn': functools.partial(
+ self.remove_pac_checksum, checksum=checksum)
+ })
+
+ def test_constrained_delegation_zeroed_client_checksum(self):
+ # Present a user ticket with invalid checksums.
+ for checksum in self.pac_checksum_types:
+ with self.subTest(checksum=checksum):
+ self._run_delegation_test(
+ {
+ 'expected_error_mode': (KDC_ERR_MODIFIED,
+ KDC_ERR_BAD_INTEGRITY),
+ 'allow_delegation': True,
+ 'modify_client_tkt_fn': functools.partial(
+ self.zeroed_pac_checksum, checksum=checksum),
+ 'expect_edata': False
+ })
+
+ def test_constrained_delegation_zeroed_service_checksum(self):
+ # Present the service's ticket with invalid checksums.
+ for checksum in self.pac_checksum_types:
+ with self.subTest(checksum=checksum):
+ if checksum == krb5pac.PAC_TYPE_SRV_CHECKSUM:
+ expected_error_mode = (KDC_ERR_MODIFIED,
+ KDC_ERR_BAD_INTEGRITY)
+ # We aren’t particular about whether or not we get an
+ # NTSTATUS.
+ expect_status = None
+ expected_status = ntstatus.NT_STATUS_WRONG_PASSWORD
+ else:
+ expected_error_mode = 0
+ expect_status = None
+ expected_status = None
+
+ self._run_delegation_test(
+ {
+ 'expected_error_mode': expected_error_mode,
+ 'expect_status': expect_status,
+ 'expected_status': expected_status,
+ 'allow_delegation': True,
+ 'modify_service_tgt_fn': functools.partial(
+ self.zeroed_pac_checksum, checksum=checksum)
+ })
+
+ def test_rbcd_zeroed_client_checksum(self):
+ self.skip_unless_fl2008()
+
+ # Present a user ticket with invalid checksums.
+ for checksum in self.pac_checksum_types:
+ with self.subTest(checksum=checksum):
+ self._run_delegation_test(
+ {
+ 'expected_error_mode': (KDC_ERR_MODIFIED,
+ KDC_ERR_BAD_INTEGRITY),
+ # We aren’t particular about whether or not we get an
+ # NTSTATUS.
+ 'expect_status': None,
+ 'expected_status':
+ ntstatus.NT_STATUS_NOT_SUPPORTED,
+ 'allow_rbcd': True,
+ 'pac_options': '0001', # supports RBCD
+ 'modify_client_tkt_fn': functools.partial(
+ self.zeroed_pac_checksum, checksum=checksum)
+ })
+
+ def test_rbcd_zeroed_service_checksum(self):
+ self.skip_unless_fl2008()
+
+ # Present the service's ticket with invalid checksums.
+ for checksum in self.pac_checksum_types:
+ with self.subTest(checksum=checksum):
+ if checksum == krb5pac.PAC_TYPE_SRV_CHECKSUM:
+ expected_error_mode = (KDC_ERR_MODIFIED,
+ KDC_ERR_BAD_INTEGRITY)
+ # We aren’t particular about whether or not we get an
+ # NTSTATUS.
+ expect_status = None
+ expected_status = ntstatus.NT_STATUS_WRONG_PASSWORD
+ else:
+ expected_error_mode = 0
+ expect_status = None
+ expected_status = None
+
+ self._run_delegation_test(
+ {
+ 'expected_error_mode': expected_error_mode,
+ 'expect_status': expect_status,
+ 'expected_status': expected_status,
+ 'allow_rbcd': True,
+ 'pac_options': '0001', # supports RBCD
+ 'modify_service_tgt_fn': functools.partial(
+ self.zeroed_pac_checksum, checksum=checksum)
+ })
+
+ unkeyed_ctypes = {Cksumtype.MD5, Cksumtype.SHA1, Cksumtype.CRC32}
+
+ def test_constrained_delegation_unkeyed_client_checksum(self):
+ # Present a user ticket with invalid checksums.
+ for checksum in self.pac_checksum_types:
+ for ctype in self.unkeyed_ctypes:
+ with self.subTest(checksum=checksum, ctype=ctype):
+ if (checksum == krb5pac.PAC_TYPE_SRV_CHECKSUM
+ and ctype == Cksumtype.SHA1):
+ expected_error_mode = (KDC_ERR_SUMTYPE_NOSUPP,
+ KDC_ERR_INAPP_CKSUM)
+ else:
+ expected_error_mode = (KDC_ERR_GENERIC,
+ KDC_ERR_INAPP_CKSUM)
+
+ self._run_delegation_test(
+ {
+ 'expected_error_mode': expected_error_mode,
+ 'allow_delegation': True,
+ 'modify_client_tkt_fn': functools.partial(
+ self.unkeyed_pac_checksum,
+ checksum=checksum, ctype=ctype),
+ 'expect_edata': False
+ })
+
+ def test_constrained_delegation_unkeyed_service_checksum(self):
+ # Present the service's ticket with invalid checksums.
+ for checksum in self.pac_checksum_types:
+ for ctype in self.unkeyed_ctypes:
+ with self.subTest(checksum=checksum, ctype=ctype):
+ if checksum == krb5pac.PAC_TYPE_SRV_CHECKSUM:
+ # We aren’t particular about whether or not we get an
+ # NTSTATUS.
+ expect_status = None
+ if ctype == Cksumtype.SHA1:
+ expected_error_mode = (KDC_ERR_SUMTYPE_NOSUPP,
+ KDC_ERR_INAPP_CKSUM)
+ expected_status = ntstatus.NT_STATUS_LOGON_FAILURE
+ else:
+ expected_error_mode = (KDC_ERR_GENERIC,
+ KDC_ERR_INAPP_CKSUM)
+ expected_status = (
+ ntstatus.NT_STATUS_INSUFFICIENT_RESOURCES)
+ else:
+ expected_error_mode = 0
+ expect_status = None
+ expected_status = None
+
+ self._run_delegation_test(
+ {
+ 'expected_error_mode': expected_error_mode,
+ 'expect_status': expect_status,
+ 'expected_status': expected_status,
+ 'allow_delegation': True,
+ 'modify_service_tgt_fn': functools.partial(
+ self.unkeyed_pac_checksum,
+ checksum=checksum, ctype=ctype)
+ })
+
+ def test_rbcd_unkeyed_client_checksum(self):
+ self.skip_unless_fl2008()
+
+ # Present a user ticket with invalid checksums.
+ for checksum in self.pac_checksum_types:
+ for ctype in self.unkeyed_ctypes:
+ with self.subTest(checksum=checksum, ctype=ctype):
+ if (checksum == krb5pac.PAC_TYPE_SRV_CHECKSUM
+ and ctype == Cksumtype.SHA1):
+ expected_error_mode = (KDC_ERR_SUMTYPE_NOSUPP,
+ KDC_ERR_INAPP_CKSUM)
+ else:
+ expected_error_mode = (KDC_ERR_GENERIC,
+ KDC_ERR_INAPP_CKSUM)
+
+ self._run_delegation_test(
+ {
+ 'expected_error_mode': expected_error_mode,
+ # We aren’t particular about whether or not we get
+ # an NTSTATUS.
+ 'expect_status': None,
+ 'expected_status':
+ ntstatus.NT_STATUS_NOT_SUPPORTED,
+ 'allow_rbcd': True,
+ 'pac_options': '0001', # supports RBCD
+ 'modify_client_tkt_fn': functools.partial(
+ self.unkeyed_pac_checksum,
+ checksum=checksum, ctype=ctype)
+ })
+
+ def test_rbcd_unkeyed_service_checksum(self):
+ self.skip_unless_fl2008()
+
+ # Present the service's ticket with invalid checksums.
+ for checksum in self.pac_checksum_types:
+ for ctype in self.unkeyed_ctypes:
+ with self.subTest(checksum=checksum, ctype=ctype):
+ if checksum == krb5pac.PAC_TYPE_SRV_CHECKSUM:
+ # We aren’t particular about whether or not we get an
+ # NTSTATUS.
+ expect_status = None
+ if ctype == Cksumtype.SHA1:
+ expected_error_mode = (KDC_ERR_SUMTYPE_NOSUPP,
+ KDC_ERR_INAPP_CKSUM)
+ expected_status = ntstatus.NT_STATUS_LOGON_FAILURE
+ else:
+ expected_error_mode = (KDC_ERR_GENERIC,
+ KDC_ERR_INAPP_CKSUM)
+ expected_status = (
+ ntstatus.NT_STATUS_INSUFFICIENT_RESOURCES)
+ else:
+ expected_error_mode = 0
+ expect_status = None
+ expected_status = None
+
+ self._run_delegation_test(
+ {
+ 'expected_error_mode': expected_error_mode,
+ 'expect_status': expect_status,
+ 'expected_status': expected_status,
+ 'allow_rbcd': True,
+ 'pac_options': '0001', # supports RBCD
+ 'modify_service_tgt_fn': functools.partial(
+ self.unkeyed_pac_checksum,
+ checksum=checksum, ctype=ctype)
+ })
+
+ def test_constrained_delegation_rc4_client_checksum(self):
+ # Present a user ticket with RC4 checksums.
+ samdb = self.get_samdb()
+ functional_level = self.get_domain_functional_level(samdb)
+
+ if functional_level >= dsdb.DS_DOMAIN_FUNCTION_2008:
+ expected_error_mode = (KDC_ERR_GENERIC,
+ KDC_ERR_INAPP_CKSUM)
+ expect_edata = False
+ else:
+ expected_error_mode = 0
+ expect_edata = None
+
+ self._run_delegation_test(
+ {
+ 'expected_error_mode': expected_error_mode,
+ 'allow_delegation': True,
+ 'modify_client_tkt_fn': self.rc4_pac_checksums,
+ 'expect_edata': expect_edata,
+ })
+
+ def test_rbcd_rc4_client_checksum(self):
+ self.skip_unless_fl2008()
+
+ # Present a user ticket with RC4 checksums.
+ expected_error_mode = (KDC_ERR_GENERIC,
+ KDC_ERR_BADOPTION)
+
+ self._run_delegation_test(
+ {
+ 'expected_error_mode': expected_error_mode,
+ # We aren’t particular about whether or not we get an NTSTATUS.
+ 'expect_status': None,
+ 'expected_status': ntstatus.NT_STATUS_NOT_SUPPORTED,
+ 'allow_rbcd': True,
+ 'pac_options': '0001', # supports RBCD
+ 'modify_client_tkt_fn': self.rc4_pac_checksums,
+ })
+
+ def test_constrained_delegation_rodc_issued(self):
+ self._run_delegation_test(
+ {
+ # Test that RODC-issued constrained delegation tickets are
+ # accepted.
+ 'expected_error_mode': 0,
+ 'allow_delegation': True,
+ # Both tickets must be signed by the same RODC.
+ 'modify_client_tkt_fn': self.signed_by_rodc,
+ 'modify_service_tgt_fn': self.issued_by_rodc,
+ 'client_opts': {
+ 'allowed_replication_mock': True,
+ 'revealed_to_mock_rodc': True,
+ },
+ 'service1_opts': {
+ 'allowed_replication_mock': True,
+ 'revealed_to_mock_rodc': True,
+ },
+ })
+
+ def test_rbcd_rodc_issued(self):
+ self.skip_unless_fl2008()
+
+ self._run_delegation_test(
+ {
+ # Test that RODC-issued constrained delegation tickets are
+ # accepted.
+ 'expected_error_mode': 0,
+ 'allow_rbcd': True,
+ 'pac_options': '0001', # supports RBCD
+ # Both tickets must be signed by the same RODC.
+ 'modify_client_tkt_fn': self.signed_by_rodc,
+ 'modify_service_tgt_fn': self.issued_by_rodc,
+ 'client_opts': {
+ 'allowed_replication_mock': True,
+ 'revealed_to_mock_rodc': True,
+ },
+ 'service1_opts': {
+ 'allowed_replication_mock': True,
+ 'revealed_to_mock_rodc': True,
+ },
+ })
+
+ def remove_pac_checksum(self, ticket, checksum):
+ checksum_keys = self.get_krbtgt_checksum_key()
+
+ return self.modified_ticket(ticket,
+ checksum_keys=checksum_keys,
+ include_checksums={checksum: False})
+
+ def zeroed_pac_checksum(self, ticket, checksum):
+ krbtgt_creds = self.get_krbtgt_creds()
+ krbtgt_key = self.TicketDecryptionKey_from_creds(krbtgt_creds)
+
+ server_key = ticket.decryption_key
+
+ checksum_keys = {
+ krb5pac.PAC_TYPE_SRV_CHECKSUM: server_key,
+ krb5pac.PAC_TYPE_KDC_CHECKSUM: krbtgt_key,
+ krb5pac.PAC_TYPE_TICKET_CHECKSUM: krbtgt_key,
+ }
+
+ if checksum == krb5pac.PAC_TYPE_SRV_CHECKSUM:
+ zeroed_key = server_key
+ else:
+ zeroed_key = krbtgt_key
+
+ checksum_keys[checksum] = ZeroedChecksumKey(zeroed_key.key,
+ zeroed_key.kvno)
+
+ return self.modified_ticket(ticket,
+ checksum_keys=checksum_keys,
+ include_checksums={checksum: True})
+
+ def unkeyed_pac_checksum(self, ticket, checksum, ctype):
+ krbtgt_creds = self.get_krbtgt_creds()
+ krbtgt_key = self.TicketDecryptionKey_from_creds(krbtgt_creds)
+
+ server_key = ticket.decryption_key
+
+ checksum_keys = {
+ krb5pac.PAC_TYPE_SRV_CHECKSUM: server_key,
+ krb5pac.PAC_TYPE_KDC_CHECKSUM: krbtgt_key,
+ krb5pac.PAC_TYPE_TICKET_CHECKSUM: krbtgt_key,
+ krb5pac.PAC_TYPE_FULL_CHECKSUM: krbtgt_key,
+ }
+
+ # Make a copy of the existing key and change the ctype.
+ key = checksum_keys[checksum]
+ new_key = RodcPacEncryptionKey(key.key, key.kvno)
+ new_key.ctype = ctype
+ checksum_keys[checksum] = new_key
+
+ return self.modified_ticket(ticket,
+ checksum_keys=checksum_keys,
+ include_checksums={checksum: True})
+
+ def rc4_pac_checksums(self, ticket):
+ krbtgt_creds = self.get_krbtgt_creds()
+ rc4_krbtgt_key = self.TicketDecryptionKey_from_creds(
+ krbtgt_creds, etype=Enctype.RC4)
+
+ server_key = ticket.decryption_key
+
+ checksum_keys = {
+ krb5pac.PAC_TYPE_SRV_CHECKSUM: server_key,
+ krb5pac.PAC_TYPE_KDC_CHECKSUM: rc4_krbtgt_key,
+ krb5pac.PAC_TYPE_TICKET_CHECKSUM: rc4_krbtgt_key,
+ krb5pac.PAC_TYPE_FULL_CHECKSUM: rc4_krbtgt_key,
+ }
+
+ include_checksums = {
+ krb5pac.PAC_TYPE_SRV_CHECKSUM: True,
+ krb5pac.PAC_TYPE_KDC_CHECKSUM: True,
+ krb5pac.PAC_TYPE_TICKET_CHECKSUM: True,
+ krb5pac.PAC_TYPE_FULL_CHECKSUM: True,
+ }
+
+ return self.modified_ticket(ticket,
+ checksum_keys=checksum_keys,
+ include_checksums=include_checksums)
+
+ def add_delegation_info(self, ticket, *, services):
+ def modify_pac_fn(pac):
+ pac_buffers = pac.buffers
+ self.assertNotIn(krb5pac.PAC_TYPE_CONSTRAINED_DELEGATION,
+ (buffer.type for buffer in pac_buffers))
+
+ transited_services = list(map(lsa.String, services))
+
+ delegation = krb5pac.PAC_CONSTRAINED_DELEGATION()
+ delegation.proxy_target = lsa.String('test_proxy_target')
+ delegation.transited_services = transited_services
+ delegation.num_transited_services = len(transited_services)
+
+ info = krb5pac.PAC_CONSTRAINED_DELEGATION_CTR()
+ info.info = delegation
+
+ pac_buffer = krb5pac.PAC_BUFFER()
+ pac_buffer.type = krb5pac.PAC_TYPE_CONSTRAINED_DELEGATION
+ pac_buffer.info = info
+
+ pac_buffers.append(pac_buffer)
+
+ pac.buffers = pac_buffers
+ pac.num_buffers += 1
+
+ return pac
+
+ checksum_keys = self.get_krbtgt_checksum_key()
+
+ return self.modified_ticket(ticket,
+ checksum_keys=checksum_keys,
+ modify_pac_fn=modify_pac_fn)
+
+ def set_ticket_forwardable(self, ticket, flag, update_pac_checksums=True):
+ modify_fn = functools.partial(self.modify_ticket_flag,
+ flag='forwardable',
+ value=flag)
+
+ if update_pac_checksums:
+ checksum_keys = self.get_krbtgt_checksum_key()
+ else:
+ checksum_keys = None
+
+ return self.modified_ticket(ticket,
+ modify_fn=modify_fn,
+ checksum_keys=checksum_keys,
+ update_pac_checksums=update_pac_checksums)
+
+ def remove_ticket_pac(self, ticket):
+ return self.modified_ticket(ticket,
+ exclude_pac=True)
+
+
+if __name__ == "__main__":
+ global_asn1_print = False
+ global_hexdump = False
+ import unittest
+ unittest.main()
diff --git a/python/samba/tests/krb5/salt_tests.py b/python/samba/tests/krb5/salt_tests.py
new file mode 100755
index 0000000..fcda533
--- /dev/null
+++ b/python/samba/tests/krb5/salt_tests.py
@@ -0,0 +1,469 @@
+#!/usr/bin/env python3
+# Unix SMB/CIFS implementation.
+# Copyright (C) Stefan Metzmacher 2020
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+import os
+
+sys.path.insert(0, "bin/python")
+os.environ["PYTHONUNBUFFERED"] = "1"
+
+import ldb
+
+from samba.tests.krb5.as_req_tests import AsReqBaseTest
+import samba.tests.krb5.kcrypto as kcrypto
+
+global_asn1_print = False
+global_hexdump = False
+
+
+class SaltTests(AsReqBaseTest):
+
+ def setUp(self):
+ super().setUp()
+ self.do_asn1_print = global_asn1_print
+ self.do_hexdump = global_hexdump
+
+ def _get_creds(self, *,
+ account_type,
+ opts=None):
+ try:
+ return self.get_cached_creds(
+ account_type=account_type,
+ opts=opts)
+ except ldb.LdbError:
+ self.fail()
+
+ def _run_salt_test(self, client_creds):
+ expected_salt = self.get_salt(client_creds)
+ self.assertIsNotNone(expected_salt)
+
+ etype_info2 = self._run_as_req_enc_timestamp(client_creds)
+
+ self.assertEqual(etype_info2[0]['etype'], kcrypto.Enctype.AES256)
+ self.assertEqual(etype_info2[0]['salt'], expected_salt)
+
+ def test_salt_at_user(self):
+ client_creds = self._get_creds(
+ account_type=self.AccountType.USER,
+ opts={'name_suffix': 'foo@bar'})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_at_mac(self):
+ client_creds = self._get_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'name_suffix': 'foo@bar'})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_at_managed_service(self):
+ client_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ opts={'name_suffix': 'foo@bar'})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_at_case_user(self):
+ client_creds = self._get_creds(
+ account_type=self.AccountType.USER,
+ opts={'name_suffix': 'Foo@bar'})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_at_case_mac(self):
+ client_creds = self._get_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'name_suffix': 'Foo@bar'})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_at_case_managed_service(self):
+ client_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ opts={'name_suffix': 'Foo@bar'})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_double_at_user(self):
+ client_creds = self._get_creds(
+ account_type=self.AccountType.USER,
+ opts={'name_suffix': 'foo@@bar'})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_double_at_mac(self):
+ client_creds = self._get_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'name_suffix': 'foo@@bar'})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_double_at_managed_service(self):
+ client_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ opts={'name_suffix': 'foo@@bar'})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_at_start_user(self):
+ client_creds = self._get_creds(
+ account_type=self.AccountType.USER,
+ opts={'name_prefix': '@foo'})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_at_start_mac(self):
+ client_creds = self._get_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'name_prefix': '@foo'})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_at_start_managed_service(self):
+ client_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ opts={'name_prefix': '@foo'})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_at_end_user(self):
+ client_creds = self._get_creds(
+ account_type=self.AccountType.USER,
+ opts={'name_suffix': 'foo@'})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_at_end_mac(self):
+ client_creds = self._get_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'name_suffix': 'foo@'})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_at_end_managed_service(self):
+ client_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ opts={'name_suffix': 'foo@',
+ 'add_dollar': True})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_at_end_no_dollar_mac(self):
+ client_creds = self._get_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'name_suffix': 'foo@',
+ 'add_dollar': False})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_at_end_add_dollar_managed_service(self):
+ client_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ opts={'name_suffix': 'foo@',
+ 'add_dollar': True})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_no_dollar_mac(self):
+ client_creds = self._get_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'add_dollar': False})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_add_dollar_managed_service(self):
+ client_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ opts={'add_dollar': True})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_dollar_mid_mac(self):
+ client_creds = self._get_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'name_suffix': 'foo$bar',
+ 'add_dollar': False})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_dollar_mid_managed_service(self):
+ client_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ opts={'name_suffix': 'foo$bar',
+ 'add_dollar': True})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_dollar_user(self):
+ client_creds = self._get_creds(
+ account_type=self.AccountType.USER,
+ opts={'name_suffix': 'foo$bar'})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_dollar_mac(self):
+ client_creds = self._get_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'name_suffix': 'foo$bar'})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_dollar_managed_service(self):
+ client_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ opts={'name_suffix': 'foo$bar'})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_dollar_end_user(self):
+ client_creds = self._get_creds(
+ account_type=self.AccountType.USER,
+ opts={'name_suffix': 'foo$'})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_dollar_end_mac(self):
+ client_creds = self._get_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'name_suffix': 'foo$'})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_dollar_end_managed_service(self):
+ client_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ opts={'name_suffix': 'foo$'})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_upn_user(self):
+ client_creds = self._get_creds(
+ account_type=self.AccountType.USER,
+ opts={'upn': 'foo0'})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_upn_mac(self):
+ client_creds = self._get_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'upn': 'foo1'})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_upn_managed_service(self):
+ client_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ opts={'upn': 'foo24'})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_upn_host_user(self):
+ client_creds = self._get_creds(
+ account_type=self.AccountType.USER,
+ opts={'upn': 'host/foo2'})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_upn_host_mac(self):
+ client_creds = self._get_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'upn': 'host/foo3'})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_upn_host_managed_service(self):
+ client_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ opts={'upn': 'host/foo25'})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_upn_realm_user(self):
+ realm = self.get_samdb().domain_dns_name()
+ client_creds = self._get_creds(
+ account_type=self.AccountType.USER,
+ opts={'upn': 'foo4@' + realm})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_upn_realm_mac(self):
+ realm = self.get_samdb().domain_dns_name()
+ client_creds = self._get_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'upn': 'foo5@' + realm})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_upn_realm_managed_service(self):
+ realm = self.get_samdb().domain_dns_name()
+ client_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ opts={'upn': 'foo26@' + realm})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_upn_host_realm_user(self):
+ realm = self.get_samdb().domain_dns_name()
+ client_creds = self._get_creds(
+ account_type=self.AccountType.USER,
+ opts={'upn': 'host/foo6@' + realm})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_upn_host_realm_mac(self):
+ realm = self.get_samdb().domain_dns_name()
+ client_creds = self._get_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'upn': 'host/foo7@' + realm})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_upn_host_realm_managed_service(self):
+ realm = self.get_samdb().domain_dns_name()
+ client_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ opts={'upn': 'host/foo27@' + realm})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_upn_dollar_realm_user(self):
+ realm = self.get_samdb().domain_dns_name()
+ client_creds = self._get_creds(
+ account_type=self.AccountType.USER,
+ opts={'upn': 'foo8$@' + realm})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_upn_dollar_realm_mac(self):
+ realm = self.get_samdb().domain_dns_name()
+ client_creds = self._get_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'upn': 'foo9$@' + realm})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_upn_dollar_realm_managed_service(self):
+ realm = self.get_samdb().domain_dns_name()
+ client_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ opts={'upn': 'foo28$@' + realm})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_upn_host_dollar_realm_user(self):
+ realm = self.get_samdb().domain_dns_name()
+ client_creds = self._get_creds(
+ account_type=self.AccountType.USER,
+ opts={'upn': 'host/foo10$@' + realm})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_upn_host_dollar_realm_mac(self):
+ realm = self.get_samdb().domain_dns_name()
+ client_creds = self._get_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'upn': 'host/foo11$@' + realm})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_upn_host_dollar_realm_managed_service(self):
+ realm = self.get_samdb().domain_dns_name()
+ client_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ opts={'upn': 'host/foo29$@' + realm})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_upn_other_realm_user(self):
+ client_creds = self._get_creds(
+ account_type=self.AccountType.USER,
+ opts={'upn': 'foo12@other.realm'})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_upn_other_realm_mac(self):
+ client_creds = self._get_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'upn': 'foo13@other.realm'})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_upn_other_realm_managed_service(self):
+ client_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ opts={'upn': 'foo30@other.realm'})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_upn_host_other_realm_user(self):
+ client_creds = self._get_creds(
+ account_type=self.AccountType.USER,
+ opts={'upn': 'host/foo14@other.realm'})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_upn_host_other_realm_mac(self):
+ client_creds = self._get_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'upn': 'host/foo15@other.realm'})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_upn_host_other_realm_managed_service(self):
+ client_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ opts={'upn': 'host/foo31@other.realm'})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_upn_case_user(self):
+ client_creds = self._get_creds(
+ account_type=self.AccountType.USER,
+ opts={'upn': 'Foo16'})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_upn_case_mac(self):
+ client_creds = self._get_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'upn': 'Foo17'})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_upn_case_managed_service(self):
+ client_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ opts={'upn': 'Foo32'})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_upn_dollar_mid_realm_user(self):
+ realm = self.get_samdb().domain_dns_name()
+ client_creds = self._get_creds(
+ account_type=self.AccountType.USER,
+ opts={'upn': 'foo$18@' + realm})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_upn_dollar_mid_realm_mac(self):
+ realm = self.get_samdb().domain_dns_name()
+ client_creds = self._get_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'upn': 'foo$19@' + realm})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_upn_dollar_mid_realm_managed_service(self):
+ realm = self.get_samdb().domain_dns_name()
+ client_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ opts={'upn': 'foo$33@' + realm})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_upn_host_dollar_mid_realm_user(self):
+ realm = self.get_samdb().domain_dns_name()
+ client_creds = self._get_creds(
+ account_type=self.AccountType.USER,
+ opts={'upn': 'host/foo$20@' + realm})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_upn_host_dollar_mid_realm_mac(self):
+ realm = self.get_samdb().domain_dns_name()
+ client_creds = self._get_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'upn': 'host/foo$21@' + realm})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_upn_host_dollar_mid_realm_managed_service(self):
+ realm = self.get_samdb().domain_dns_name()
+ client_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ opts={'upn': 'host/foo$34@' + realm})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_upn_at_realm_user(self):
+ realm = self.get_samdb().domain_dns_name()
+ client_creds = self._get_creds(
+ account_type=self.AccountType.USER,
+ opts={'upn': 'foo22@bar@' + realm})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_upn_at_realm_mac(self):
+ realm = self.get_samdb().domain_dns_name()
+ client_creds = self._get_creds(
+ account_type=self.AccountType.COMPUTER,
+ opts={'upn': 'foo23@bar@' + realm})
+ self._run_as_req_enc_timestamp(client_creds)
+
+ def test_salt_upn_at_realm_managed_service(self):
+ realm = self.get_samdb().domain_dns_name()
+ client_creds = self._get_creds(
+ account_type=self.AccountType.MANAGED_SERVICE,
+ opts={'upn': 'foo35@bar@' + realm})
+ self._run_as_req_enc_timestamp(client_creds)
+
+
+if __name__ == "__main__":
+ global_asn1_print = False
+ global_hexdump = False
+ import unittest
+ unittest.main()
diff --git a/python/samba/tests/krb5/simple_tests.py b/python/samba/tests/krb5/simple_tests.py
new file mode 100755
index 0000000..81587bb
--- /dev/null
+++ b/python/samba/tests/krb5/simple_tests.py
@@ -0,0 +1,185 @@
+#!/usr/bin/env python3
+# Unix SMB/CIFS implementation.
+# Copyright (C) Stefan Metzmacher 2020
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+import os
+
+sys.path.insert(0, "bin/python")
+os.environ["PYTHONUNBUFFERED"] = "1"
+
+from samba.tests.krb5.raw_testcase import RawKerberosTest
+from samba.tests.krb5.rfc4120_constants import (
+ KU_AS_REP_ENC_PART,
+ KU_PA_ENC_TIMESTAMP,
+ KU_TGS_REP_ENC_PART_SUB_KEY,
+)
+import samba.tests.krb5.rfc4120_pyasn1 as krb5_asn1
+
+global_asn1_print = False
+global_hexdump = False
+
+
+class SimpleKerberosTests(RawKerberosTest):
+
+ def setUp(self):
+ super().setUp()
+ self.do_asn1_print = global_asn1_print
+ self.do_hexdump = global_hexdump
+
+ def test_simple(self):
+ user_creds = self.get_user_creds()
+ user = user_creds.get_username()
+ krbtgt_creds = self.get_krbtgt_creds(require_keys=False)
+ krbtgt_account = krbtgt_creds.get_username()
+ realm = krbtgt_creds.get_realm()
+
+ cname = self.PrincipalName_create(name_type=1, names=[user])
+ sname = self.PrincipalName_create(name_type=2, names=[krbtgt_account, realm])
+
+ till = self.get_KerberosTime(offset=36000)
+
+ kdc_options = krb5_asn1.KDCOptions('forwardable')
+ padata = None
+
+ etypes = (18, 17, 23)
+
+ req = self.AS_REQ_create(padata=padata,
+ kdc_options=str(kdc_options),
+ cname=cname,
+ realm=realm,
+ sname=sname,
+ from_time=None,
+ till_time=till,
+ renew_time=None,
+ nonce=0x7fffffff,
+ etypes=etypes,
+ addresses=None,
+ additional_tickets=None)
+ rep = self.send_recv_transaction(req)
+ self.assertIsNotNone(rep)
+
+ self.assertEqual(rep['msg-type'], 30)
+ self.assertEqual(rep['error-code'], 25)
+ rep_padata = self.der_decode(
+ rep['e-data'], asn1Spec=krb5_asn1.METHOD_DATA())
+
+ for pa in rep_padata:
+ if pa['padata-type'] == 19:
+ etype_info2 = pa['padata-value']
+ break
+
+ etype_info2 = self.der_decode(
+ etype_info2, asn1Spec=krb5_asn1.ETYPE_INFO2())
+
+ key = self.PasswordKey_from_etype_info2(user_creds, etype_info2[0])
+
+ (patime, pausec) = self.get_KerberosTimeWithUsec()
+ pa_ts = self.PA_ENC_TS_ENC_create(patime, pausec)
+ pa_ts = self.der_encode(pa_ts, asn1Spec=krb5_asn1.PA_ENC_TS_ENC())
+
+ pa_ts = self.EncryptedData_create(key, KU_PA_ENC_TIMESTAMP, pa_ts)
+ pa_ts = self.der_encode(pa_ts, asn1Spec=krb5_asn1.EncryptedData())
+
+ pa_ts = self.PA_DATA_create(2, pa_ts)
+
+ kdc_options = krb5_asn1.KDCOptions('forwardable')
+ padata = [pa_ts]
+
+ req = self.AS_REQ_create(padata=padata,
+ kdc_options=str(kdc_options),
+ cname=cname,
+ realm=realm,
+ sname=sname,
+ from_time=None,
+ till_time=till,
+ renew_time=None,
+ nonce=0x7fffffff,
+ etypes=etypes,
+ addresses=None,
+ additional_tickets=None)
+ rep = self.send_recv_transaction(req)
+ self.assertIsNotNone(rep)
+
+ msg_type = rep['msg-type']
+ self.assertEqual(msg_type, 11)
+
+ enc_part2 = key.decrypt(KU_AS_REP_ENC_PART, rep['enc-part']['cipher'])
+
+ # MIT KDC encodes both EncASRepPart and EncTGSRepPart with
+ # application tag 26
+ try:
+ enc_part2 = self.der_decode(
+ enc_part2, asn1Spec=krb5_asn1.EncASRepPart())
+ except Exception:
+ enc_part2 = self.der_decode(
+ enc_part2, asn1Spec=krb5_asn1.EncTGSRepPart())
+
+ # TGS Request
+ service_creds = self.get_service_creds(allow_missing_password=True)
+ service_name = service_creds.get_username()
+
+ sname = self.PrincipalName_create(
+ name_type=2, names=["host", service_name])
+ kdc_options = krb5_asn1.KDCOptions('forwardable')
+ till = self.get_KerberosTime(offset=36000)
+ ticket = rep['ticket']
+ ticket_session_key = self.EncryptionKey_import(enc_part2['key'])
+ padata = []
+
+ subkey = self.RandomKey(ticket_session_key.etype)
+
+ (ctime, cusec) = self.get_KerberosTimeWithUsec()
+
+ req = self.TGS_REQ_create(padata=padata,
+ cusec=cusec,
+ ctime=ctime,
+ ticket=ticket,
+ kdc_options=str(kdc_options),
+ cname=cname,
+ realm=realm,
+ sname=sname,
+ from_time=None,
+ till_time=till,
+ renew_time=None,
+ nonce=0x7ffffffe,
+ etypes=etypes,
+ addresses=None,
+ EncAuthorizationData=None,
+ EncAuthorizationData_key=None,
+ additional_tickets=None,
+ ticket_session_key=ticket_session_key,
+ authenticator_subkey=subkey)
+ rep = self.send_recv_transaction(req)
+ self.assertIsNotNone(rep)
+
+ msg_type = rep['msg-type']
+ self.assertEqual(msg_type, 13)
+
+ enc_part2 = subkey.decrypt(
+ KU_TGS_REP_ENC_PART_SUB_KEY, rep['enc-part']['cipher'])
+ enc_part2 = self.der_decode(
+ enc_part2, asn1Spec=krb5_asn1.EncTGSRepPart())
+
+ return
+
+
+if __name__ == "__main__":
+ global_asn1_print = False
+ global_hexdump = False
+ import unittest
+ unittest.main()
diff --git a/python/samba/tests/krb5/spn_tests.py b/python/samba/tests/krb5/spn_tests.py
new file mode 100755
index 0000000..5bcc0bd
--- /dev/null
+++ b/python/samba/tests/krb5/spn_tests.py
@@ -0,0 +1,212 @@
+#!/usr/bin/env python3
+# Unix SMB/CIFS implementation.
+# Copyright (C) Stefan Metzmacher 2020
+# Copyright (C) 2020 Catalyst.Net Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+import os
+
+sys.path.insert(0, "bin/python")
+os.environ["PYTHONUNBUFFERED"] = "1"
+
+from samba.tests import DynamicTestCase
+
+import ldb
+
+from samba.tests.krb5.kdc_base_test import KDCBaseTest
+from samba.tests.krb5.raw_testcase import KerberosCredentials
+from samba.tests.krb5.rfc4120_constants import (
+ AES256_CTS_HMAC_SHA1_96,
+ ARCFOUR_HMAC_MD5,
+ KDC_ERR_S_PRINCIPAL_UNKNOWN,
+ NT_PRINCIPAL,
+)
+
+global_asn1_print = False
+global_hexdump = False
+
+
+@DynamicTestCase
+class SpnTests(KDCBaseTest):
+ test_account_types = {
+ 'computer': KDCBaseTest.AccountType.COMPUTER,
+ 'server': KDCBaseTest.AccountType.SERVER,
+ 'rodc': KDCBaseTest.AccountType.RODC
+ }
+ test_spns = {
+ '2_part': 'ldap/{{account}}',
+ '3_part_our_domain': 'ldap/{{account}}/{netbios_domain_name}',
+ '3_part_our_realm': 'ldap/{{account}}/{dns_domain_name}',
+ '3_part_not_our_realm': 'ldap/{{account}}/test',
+ '3_part_instance': 'ldap/{{account}}:test/{dns_domain_name}'
+ }
+
+ @classmethod
+ def setUpClass(cls):
+ super().setUpClass()
+
+ cls._mock_rodc_creds = None
+
+ @classmethod
+ def setUpDynamicTestCases(cls):
+ for account_type_name, account_type in cls.test_account_types.items():
+ for spn_name, spn in cls.test_spns.items():
+ tname = f'{spn_name}_spn_{account_type_name}'
+ targs = (account_type, spn)
+ cls.generate_dynamic_test('test_spn', tname, *targs)
+
+ def _test_spn_with_args(self, account_type, spn):
+ target_creds = self._get_creds(account_type)
+ spn = self._format_spn(spn, target_creds)
+
+ sname = self.PrincipalName_create(name_type=NT_PRINCIPAL,
+ names=spn.split('/'))
+
+ client_creds = self.get_client_creds()
+ tgt = self.get_tgt(client_creds)
+
+ samdb = self.get_samdb()
+ netbios_domain_name = samdb.domain_netbios_name()
+ dns_domain_name = samdb.domain_dns_name()
+
+ subkey = self.RandomKey(tgt.session_key.etype)
+
+ etypes = (AES256_CTS_HMAC_SHA1_96, ARCFOUR_HMAC_MD5,)
+
+ if account_type is self.AccountType.SERVER:
+ ticket_etype = AES256_CTS_HMAC_SHA1_96
+ else:
+ ticket_etype = None
+ decryption_key = self.TicketDecryptionKey_from_creds(
+ target_creds, etype=ticket_etype)
+
+ if (spn.count('/') > 1
+ and (spn.endswith(netbios_domain_name)
+ or spn.endswith(dns_domain_name))
+ and account_type is not self.AccountType.SERVER
+ and account_type is not self.AccountType.RODC):
+ expected_error_mode = KDC_ERR_S_PRINCIPAL_UNKNOWN
+ check_error_fn = self.generic_check_kdc_error
+ check_rep_fn = None
+ else:
+ expected_error_mode = 0
+ check_error_fn = None
+ check_rep_fn = self.generic_check_kdc_rep
+
+ kdc_exchange_dict = self.tgs_exchange_dict(
+ expected_crealm=tgt.crealm,
+ expected_cname=tgt.cname,
+ expected_srealm=tgt.srealm,
+ expected_sname=sname,
+ ticket_decryption_key=decryption_key,
+ check_rep_fn=check_rep_fn,
+ check_error_fn=check_error_fn,
+ check_kdc_private_fn=self.generic_check_kdc_private,
+ expected_error_mode=expected_error_mode,
+ tgt=tgt,
+ authenticator_subkey=subkey,
+ kdc_options='0',
+ expect_edata=False)
+
+ self._generic_kdc_exchange(kdc_exchange_dict,
+ cname=None,
+ realm=tgt.srealm,
+ sname=sname,
+ etypes=etypes)
+
+ def setUp(self):
+ super().setUp()
+ self.do_asn1_print = global_asn1_print
+ self.do_hexdump = global_hexdump
+
+ def _format_spns(self, spns, creds=None):
+ return map(lambda spn: self._format_spn(spn, creds), spns)
+
+ def _format_spn(self, spn, creds=None):
+ samdb = self.get_samdb()
+
+ spn = spn.format(netbios_domain_name=samdb.domain_netbios_name(),
+ dns_domain_name=samdb.domain_dns_name())
+
+ if creds is not None:
+ account_name = creds.get_username()
+ spn = spn.format(account=account_name)
+
+ return spn
+
+ def _get_creds(self, account_type):
+ spns = self._format_spns(self.test_spns.values())
+
+ if account_type is self.AccountType.RODC:
+ creds = self._mock_rodc_creds
+ if creds is None:
+ creds = self._get_mock_rodc_creds(spns)
+ type(self)._mock_rodc_creds = creds
+ else:
+ creds = self.get_cached_creds(
+ account_type=account_type,
+ opts={
+ 'spn': spns
+ })
+
+ return creds
+
+ def _get_mock_rodc_creds(self, spns):
+ rodc_ctx = self.get_mock_rodc_ctx()
+
+ for spn in spns:
+ spn = spn.format(account=rodc_ctx.myname)
+ if spn not in rodc_ctx.SPNs:
+ rodc_ctx.SPNs.append(spn)
+
+ samdb = self.get_samdb()
+ rodc_dn = ldb.Dn(samdb, rodc_ctx.acct_dn)
+
+ msg = ldb.Message(rodc_dn)
+ msg['servicePrincipalName'] = ldb.MessageElement(
+ rodc_ctx.SPNs,
+ ldb.FLAG_MOD_REPLACE,
+ 'servicePrincipalName')
+ samdb.modify(msg)
+
+ creds = KerberosCredentials()
+ creds.guess(self.get_lp())
+ creds.set_realm(rodc_ctx.realm.upper())
+ creds.set_domain(rodc_ctx.domain_name)
+ creds.set_password(rodc_ctx.acct_pass)
+ creds.set_username(rodc_ctx.myname)
+ creds.set_workstation(rodc_ctx.samname)
+ creds.set_dn(rodc_dn)
+ creds.set_spn(rodc_ctx.SPNs)
+
+ res = samdb.search(base=rodc_dn,
+ scope=ldb.SCOPE_BASE,
+ attrs=['msDS-KeyVersionNumber'])
+ kvno = int(res[0].get('msDS-KeyVersionNumber', idx=0))
+ creds.set_kvno(kvno)
+
+ keys = self.get_keys(creds)
+ self.creds_set_keys(creds, keys)
+
+ return creds
+
+
+if __name__ == "__main__":
+ global_asn1_print = False
+ global_hexdump = False
+ import unittest
+ unittest.main()
diff --git a/python/samba/tests/krb5/test_ccache.py b/python/samba/tests/krb5/test_ccache.py
new file mode 100755
index 0000000..6413bfa
--- /dev/null
+++ b/python/samba/tests/krb5/test_ccache.py
@@ -0,0 +1,173 @@
+#!/usr/bin/env python3
+# Unix SMB/CIFS implementation.
+# Copyright (C) Stefan Metzmacher 2020
+# Copyright (C) 2021 Catalyst.Net Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+import os
+
+sys.path.insert(0, "bin/python")
+os.environ["PYTHONUNBUFFERED"] = "1"
+
+import ldb
+
+from ldb import SCOPE_SUBTREE
+from samba import NTSTATUSError, gensec
+from samba.auth import AuthContext
+from samba.dcerpc import security
+from samba.ndr import ndr_unpack
+from samba.ntstatus import NT_STATUS_NO_IMPERSONATION_TOKEN
+
+from samba.tests.krb5.kdc_base_test import KDCBaseTest
+
+global_asn1_print = False
+global_hexdump = False
+
+
+class CcacheTests(KDCBaseTest):
+ """Test for authentication using Kerberos credentials stored in a
+ credentials cache file.
+ """
+
+ def test_ccache(self):
+ self._run_ccache_test()
+
+ def test_ccache_rename(self):
+ self._run_ccache_test(rename=True)
+
+ def test_ccache_no_pac(self):
+ self._run_ccache_test(include_pac=False,
+ expect_anon=True, allow_error=True)
+
+ def _run_ccache_test(self, rename=False, include_pac=True,
+ expect_anon=False, allow_error=False):
+ # Create a user account and a machine account, along with a Kerberos
+ # credentials cache file where the service ticket authenticating the
+ # user are stored.
+
+ mach_name = "ccachemac"
+ service = "host"
+
+ samdb = self.get_samdb()
+
+ # Create the user account.
+ user_credentials = self.get_cached_creds(
+ account_type=self.AccountType.USER,
+ use_cache=False)
+ user_name = user_credentials.get_username()
+
+ # Create the machine account.
+ (mach_credentials, _) = self.create_account(
+ samdb,
+ mach_name,
+ account_type=self.AccountType.COMPUTER,
+ spn="%s/%s" % (service,
+ mach_name))
+
+ # Talk to the KDC to obtain the service ticket, which gets placed into
+ # the cache. The machine account name has to match the name in the
+ # ticket, to ensure that the krbtgt ticket doesn't also need to be
+ # stored.
+ (creds, cachefile) = self.create_ccache_with_user(user_credentials,
+ mach_credentials,
+ pac=include_pac)
+ # Remove the cached credentials file.
+ self.addCleanup(os.remove, cachefile.name)
+
+ # Retrieve the user account's SID.
+ ldb_res = samdb.search(scope=SCOPE_SUBTREE,
+ expression="(sAMAccountName=%s)" % user_name,
+ attrs=["objectSid"])
+ self.assertEqual(1, len(ldb_res))
+ sid = ndr_unpack(security.dom_sid, ldb_res[0]["objectSid"][0])
+
+ if rename:
+ # Rename the account.
+
+ new_name = self.get_new_username()
+
+ msg = ldb.Message(user_credentials.get_dn())
+ msg['sAMAccountName'] = ldb.MessageElement(new_name,
+ ldb.FLAG_MOD_REPLACE,
+ 'sAMAccountName')
+ samdb.modify(msg)
+
+ # Authenticate in-process to the machine account using the user's
+ # cached credentials.
+
+ lp = self.get_lp()
+ lp.set('server role', 'active directory domain controller')
+
+ settings = {}
+ settings["lp_ctx"] = lp
+ settings["target_hostname"] = mach_name
+
+ gensec_client = gensec.Security.start_client(settings)
+ gensec_client.set_credentials(creds)
+ gensec_client.want_feature(gensec.FEATURE_SEAL)
+ gensec_client.start_mech_by_sasl_name("GSSAPI")
+
+ auth_context = AuthContext(lp_ctx=lp, ldb=samdb, methods=[])
+
+ gensec_server = gensec.Security.start_server(settings, auth_context)
+ gensec_server.set_credentials(mach_credentials)
+
+ gensec_server.start_mech_by_sasl_name("GSSAPI")
+
+ client_finished = False
+ server_finished = False
+ server_to_client = b''
+
+ # Operate as both the client and the server to verify the user's
+ # credentials.
+ while not client_finished or not server_finished:
+ if not client_finished:
+ print("running client gensec_update")
+ (client_finished, client_to_server) = gensec_client.update(
+ server_to_client)
+ if not server_finished:
+ print("running server gensec_update")
+ (server_finished, server_to_client) = gensec_server.update(
+ client_to_server)
+
+ # Ensure that the first SID contained within the obtained security
+ # token is the SID of the user we created.
+
+ # Retrieve the SIDs from the security token.
+ try:
+ session = gensec_server.session_info()
+ except NTSTATUSError as e:
+ if not allow_error:
+ self.fail()
+
+ enum, _ = e.args
+ self.assertEqual(NT_STATUS_NO_IMPERSONATION_TOKEN, enum)
+ return
+
+ token = session.security_token
+ token_sids = token.sids
+ self.assertGreater(len(token_sids), 0)
+
+ # Ensure that they match.
+ self.assertEqual(sid, token_sids[0])
+
+
+if __name__ == "__main__":
+ global_asn1_print = False
+ global_hexdump = False
+ import unittest
+ unittest.main()
diff --git a/python/samba/tests/krb5/test_idmap_nss.py b/python/samba/tests/krb5/test_idmap_nss.py
new file mode 100755
index 0000000..1ee0201
--- /dev/null
+++ b/python/samba/tests/krb5/test_idmap_nss.py
@@ -0,0 +1,232 @@
+#!/usr/bin/env python3
+# Unix SMB/CIFS implementation.
+# Copyright (C) Stefan Metzmacher 2020
+# Copyright (C) 2021 Catalyst.Net Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+import os
+
+sys.path.insert(0, 'bin/python')
+os.environ['PYTHONUNBUFFERED'] = '1'
+
+from ldb import SCOPE_SUBTREE
+from samba import NTSTATUSError
+from samba.credentials import DONT_USE_KERBEROS
+from samba.dcerpc import security
+from samba.ndr import ndr_unpack
+from samba.ntstatus import (
+ NT_STATUS_NO_IMPERSONATION_TOKEN,
+ NT_STATUS_LOGON_FAILURE
+)
+from samba.samba3 import libsmb_samba_internal as libsmb
+from samba.samba3 import param as s3param
+
+from samba.tests.krb5.kdc_base_test import KDCBaseTest
+
+global_asn1_print = False
+global_hexdump = False
+
+
+class IdmapNssTests(KDCBaseTest):
+
+ mappeduser_uid = 0xffff - 14
+ mappeduser_sid = security.dom_sid(f'S-1-22-1-{mappeduser_uid}')
+ unmappeduser_uid = 0xffff - 15
+ unmappeduser_sid = security.dom_sid(f'S-1-22-1-{unmappeduser_uid}')
+
+ def get_mapped_creds(self,
+ allow_missing_password=False,
+ allow_missing_keys=True):
+ c = self._get_krb5_creds(prefix='MAPPED',
+ allow_missing_password=allow_missing_password,
+ allow_missing_keys=allow_missing_keys)
+ c.set_workstation('')
+ return c
+
+ def get_unmapped_creds(self,
+ allow_missing_password=False,
+ allow_missing_keys=True):
+ c = self._get_krb5_creds(prefix='UNMAPPED',
+ allow_missing_password=allow_missing_password,
+ allow_missing_keys=allow_missing_keys)
+ c.set_workstation('')
+ return c
+
+ def get_invalid_creds(self,
+ allow_missing_password=False,
+ allow_missing_keys=True):
+ c = self._get_krb5_creds(prefix='INVALID',
+ allow_missing_password=allow_missing_password,
+ allow_missing_keys=allow_missing_keys)
+ c.set_workstation('')
+ return c
+
+ # Expect a mapping to the local user SID.
+ def test_mapped_user_kerberos(self):
+ user_creds = self.get_mapped_creds()
+ self._run_idmap_nss_test(user_creds, use_kerberos=True,
+ expected_first_sid=self.mappeduser_sid,
+ expected_uid=self.mappeduser_uid)
+
+ # Expect a mapping to the local user SID.
+ def test_mapped_user_ntlm(self):
+ user_creds = self.get_mapped_creds()
+ self._run_idmap_nss_test(user_creds, use_kerberos=False,
+ expected_first_sid=self.mappeduser_sid,
+ expected_uid=self.mappeduser_uid)
+
+ def test_mapped_user_no_pac_kerberos(self):
+ user_creds = self.get_mapped_creds()
+ self._run_idmap_nss_test(
+ user_creds, use_kerberos=True, remove_pac=True,
+ expected_error=NT_STATUS_NO_IMPERSONATION_TOKEN)
+
+ def test_unmapped_user_kerberos(self):
+ user_creds = self.get_unmapped_creds()
+ self._run_idmap_nss_test(user_creds, use_kerberos=True,
+ expected_additional_sid=self.unmappeduser_sid,
+ expected_uid=self.unmappeduser_uid)
+
+ def test_unmapped_user_ntlm(self):
+ user_creds = self.get_unmapped_creds()
+ self._run_idmap_nss_test(user_creds, use_kerberos=False,
+ expected_additional_sid=self.unmappeduser_sid,
+ expected_uid=self.unmappeduser_uid)
+
+ def test_unmapped_user_no_pac_kerberos(self):
+ user_creds = self.get_unmapped_creds()
+ self._run_idmap_nss_test(
+ user_creds, use_kerberos=True, remove_pac=True,
+ expected_error=NT_STATUS_NO_IMPERSONATION_TOKEN)
+
+ def test_invalid_user_kerberos(self):
+ user_creds = self.get_invalid_creds()
+ self._run_idmap_nss_test(user_creds, use_kerberos=True,
+ expected_error=NT_STATUS_LOGON_FAILURE)
+
+ def test_invalid_user_ntlm(self):
+ user_creds = self.get_invalid_creds()
+ self._run_idmap_nss_test(user_creds, use_kerberos=False,
+ expected_error=NT_STATUS_LOGON_FAILURE)
+
+ def test_invalid_user_no_pac_kerberos(self):
+ user_creds = self.get_invalid_creds()
+ self._run_idmap_nss_test(
+ user_creds, use_kerberos=True, remove_pac=True,
+ expected_error=NT_STATUS_NO_IMPERSONATION_TOKEN)
+
+ def _run_idmap_nss_test(self, user_creds,
+ use_kerberos,
+ remove_pac=False,
+ expected_error=None,
+ expected_first_sid=None,
+ expected_additional_sid=None,
+ expected_uid=None):
+ if expected_first_sid is not None:
+ self.assertIsNotNone(expected_uid)
+ if expected_additional_sid is not None:
+ self.assertIsNotNone(expected_uid)
+ if expected_uid is not None:
+ self.assertIsNone(expected_error)
+
+ if not use_kerberos:
+ self.assertFalse(remove_pac)
+
+ samdb = self.get_samdb()
+
+ server_name = self.host
+ service = 'cifs'
+ share = 'tmp'
+
+ server_creds = self.get_server_creds()
+
+ if expected_first_sid is None:
+ # Retrieve the user account's SID.
+ user_name = user_creds.get_username()
+ res = samdb.search(scope=SCOPE_SUBTREE,
+ expression=f'(sAMAccountName={user_name})',
+ attrs=['objectSid'])
+ self.assertEqual(1, len(res))
+
+ expected_first_sid = ndr_unpack(security.dom_sid,
+ res[0].get('objectSid', idx=0))
+
+ if use_kerberos:
+ # Talk to the KDC to obtain the service ticket, which gets placed
+ # into the cache. The machine account name has to match the name in
+ # the ticket, to ensure that the krbtgt ticket doesn't also need to
+ # be stored.
+ creds, cachefile = self.create_ccache_with_user(
+ user_creds,
+ server_creds,
+ service,
+ server_name,
+ pac=not remove_pac)
+
+ # Remove the cached creds file.
+ self.addCleanup(os.remove, cachefile.name)
+
+ # Set the Kerberos 5 creds cache environment variable. This is
+ # required because the codepath that gets run (gse_krb5) looks for
+ # it in here and not in the creds object.
+ krb5_ccname = os.environ.get('KRB5CCNAME', '')
+ self.addCleanup(os.environ.__setitem__, 'KRB5CCNAME', krb5_ccname)
+ os.environ['KRB5CCNAME'] = 'FILE:' + cachefile.name
+ else:
+ creds = user_creds
+ creds.set_kerberos_state(DONT_USE_KERBEROS)
+
+ # Connect to a share and retrieve the user SID.
+ s3_lp = s3param.get_context()
+ s3_lp.load(self.get_lp().configfile)
+
+ min_protocol = s3_lp.get('client min protocol')
+ self.addCleanup(s3_lp.set, 'client min protocol', min_protocol)
+ s3_lp.set('client min protocol', 'NT1')
+
+ max_protocol = s3_lp.get('client max protocol')
+ self.addCleanup(s3_lp.set, 'client max protocol', max_protocol)
+ s3_lp.set('client max protocol', 'NT1')
+
+ try:
+ conn = libsmb.Conn(server_name, share, lp=s3_lp, creds=creds)
+ except NTSTATUSError as e:
+ enum, _ = e.args
+ self.assertEqual(expected_error, enum)
+ return
+ else:
+ self.assertIsNone(expected_error)
+
+ uid, gid, gids, sids, guest = conn.posix_whoami()
+
+ # Ensure that they match.
+ self.assertEqual(expected_first_sid, sids[0])
+ self.assertNotIn(expected_first_sid, sids[1:-1])
+
+ if expected_additional_sid:
+ self.assertNotEqual(expected_additional_sid, sids[0])
+ self.assertIn(expected_additional_sid, sids)
+
+ self.assertIsNotNone(expected_uid)
+ self.assertEqual(expected_uid, uid)
+
+
+if __name__ == '__main__':
+ global_asn1_print = False
+ global_hexdump = False
+ import unittest
+ unittest.main()
diff --git a/python/samba/tests/krb5/test_ldap.py b/python/samba/tests/krb5/test_ldap.py
new file mode 100755
index 0000000..eaf79e7
--- /dev/null
+++ b/python/samba/tests/krb5/test_ldap.py
@@ -0,0 +1,168 @@
+#!/usr/bin/env python3
+# Unix SMB/CIFS implementation.
+# Copyright (C) Stefan Metzmacher 2020
+# Copyright (C) 2021 Catalyst.Net Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+import os
+
+sys.path.insert(0, "bin/python")
+os.environ["PYTHONUNBUFFERED"] = "1"
+
+import ldb
+
+from ldb import LdbError, ERR_OPERATIONS_ERROR, SCOPE_BASE, SCOPE_SUBTREE
+from samba.dcerpc import security
+from samba.ndr import ndr_unpack
+from samba.samdb import SamDB
+from samba import credentials
+
+from samba.tests.krb5.kdc_base_test import KDCBaseTest
+
+global_asn1_print = False
+global_hexdump = False
+
+
+class LdapTests(KDCBaseTest):
+ """Test for LDAP authentication using Kerberos credentials stored in a
+ credentials cache file.
+ """
+
+ def test_ldap(self):
+ self._run_ldap_test()
+
+ def test_ldap_rename(self):
+ self._run_ldap_test(rename=True)
+
+ def test_ldap_no_pac(self):
+ self._run_ldap_test(include_pac=False,
+ expect_anon=True, allow_error=True)
+
+ def _run_ldap_test(self, rename=False, include_pac=True,
+ expect_anon=False, allow_error=False):
+ # Create a user account and a machine account, along with a Kerberos
+ # credentials cache file where the service ticket authenticating the
+ # user are stored.
+
+ samdb = self.get_samdb()
+
+ mach_name = samdb.host_dns_name()
+ service = "ldap"
+
+ # Create the user account.
+ user_credentials = self.get_cached_creds(
+ account_type=self.AccountType.USER,
+ use_cache=False)
+ user_name = user_credentials.get_username()
+
+ mach_credentials = self.get_dc_creds()
+
+ # Talk to the KDC to obtain the service ticket, which gets placed into
+ # the cache. The machine account name has to match the name in the
+ # ticket, to ensure that the krbtgt ticket doesn't also need to be
+ # stored.
+ (creds, cachefile) = self.create_ccache_with_user(user_credentials,
+ mach_credentials,
+ service,
+ mach_name,
+ pac=include_pac)
+ # Remove the cached credentials file.
+ self.addCleanup(os.remove, cachefile.name)
+
+ # Retrieve the user account's SID.
+ ldb_res = samdb.search(scope=SCOPE_SUBTREE,
+ expression="(sAMAccountName=%s)" % user_name,
+ attrs=["objectSid"])
+ self.assertEqual(1, len(ldb_res))
+ sid = ndr_unpack(security.dom_sid, ldb_res[0]["objectSid"][0])
+
+ if rename:
+ # Rename the account.
+
+ new_name = self.get_new_username()
+
+ msg = ldb.Message(user_credentials.get_dn())
+ msg['sAMAccountName'] = ldb.MessageElement(new_name,
+ ldb.FLAG_MOD_REPLACE,
+ 'sAMAccountName')
+ samdb.modify(msg)
+
+ # Authenticate in-process to the machine account using the user's
+ # cached credentials.
+
+ # Connect to the machine account and retrieve the user SID.
+ try:
+ ldb_as_user = SamDB(url="ldap://%s" % mach_name,
+ credentials=creds,
+ lp=self.get_lp())
+ except LdbError as e:
+ if not allow_error:
+ self.fail()
+
+ enum, estr = e.args
+ self.assertEqual(ERR_OPERATIONS_ERROR, enum)
+ self.assertIn('NT_STATUS_NO_IMPERSONATION_TOKEN', estr)
+ return
+
+ ldb_res = ldb_as_user.search('',
+ scope=SCOPE_BASE,
+ attrs=["tokenGroups"])
+ self.assertEqual(1, len(ldb_res))
+
+ token_groups = ldb_res[0]["tokenGroups"]
+ token_sid = ndr_unpack(security.dom_sid, token_groups[0])
+
+ if expect_anon:
+ # Ensure we got an anonymous token.
+ self.assertEqual(security.SID_NT_ANONYMOUS, str(token_sid))
+ token_sid = ndr_unpack(security.dom_sid, token_groups[1])
+ self.assertEqual(security.SID_NT_NETWORK, str(token_sid))
+ if len(token_groups) >= 3:
+ token_sid = ndr_unpack(security.dom_sid, token_groups[2])
+ self.assertEqual(security.SID_NT_THIS_ORGANISATION,
+ str(token_sid))
+ else:
+ # Ensure that they match.
+ self.assertEqual(sid, token_sid)
+
+ def test_ldap_anonymous(self):
+ samdb = self.get_samdb()
+ mach_name = samdb.host_dns_name()
+
+ anon_creds = credentials.Credentials()
+ anon_creds.set_anonymous()
+
+ # Connect to the machine account and retrieve the user SID.
+ ldb_as_user = SamDB(url="ldap://%s" % mach_name,
+ credentials=anon_creds,
+ lp=self.get_lp())
+ ldb_res = ldb_as_user.search('',
+ scope=SCOPE_BASE,
+ attrs=["tokenGroups"])
+ self.assertEqual(1, len(ldb_res))
+
+ # Ensure we got an anonymous token.
+ token_sid = ndr_unpack(security.dom_sid, ldb_res[0]["tokenGroups"][0])
+ self.assertEqual(security.SID_NT_ANONYMOUS, str(token_sid))
+ self.assertEqual(len(ldb_res[0]["tokenGroups"]), 1)
+
+
+if __name__ == "__main__":
+ global_asn1_print = False
+ global_hexdump = False
+ import unittest
+ unittest.main()
diff --git a/python/samba/tests/krb5/test_min_domain_uid.py b/python/samba/tests/krb5/test_min_domain_uid.py
new file mode 100755
index 0000000..9cabb7c
--- /dev/null
+++ b/python/samba/tests/krb5/test_min_domain_uid.py
@@ -0,0 +1,122 @@
+#!/usr/bin/env python3
+# Unix SMB/CIFS implementation.
+# Copyright (C) Samuel Cabrero 2021
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+import os
+
+sys.path.insert(0, "bin/python")
+os.environ["PYTHONUNBUFFERED"] = "1"
+
+import pwd
+import ctypes
+
+from samba.tests import env_get_var_value
+from samba.samba3 import libsmb_samba_internal as libsmb
+from samba.samba3 import param as s3param
+from samba import NTSTATUSError, ntstatus
+
+from samba.tests.krb5.kdc_base_test import KDCBaseTest
+from samba.credentials import MUST_USE_KERBEROS, DONT_USE_KERBEROS
+
+class SmbMinDomainUid(KDCBaseTest):
+ """Test for SMB authorization without NSS winbind. In such setup domain
+ accounts are mapped to local accounts using the 'username map' option.
+ """
+
+ def setUp(self):
+ super().setUp()
+
+ # Create a user account, along with a Kerberos credentials cache file
+ # where the service ticket authenticating the user are stored.
+ self.samdb = self.get_samdb()
+
+ self.mach_name = env_get_var_value('SERVER')
+ self.user_name = "root"
+ self.service = "cifs"
+ self.share = "tmp"
+
+ # Create the user account.
+ (self.user_creds, _) = self.create_account(self.samdb, self.user_name)
+
+ # Build the global inject file path
+ server_conf = env_get_var_value('SMB_CONF_PATH')
+ server_conf_dir = os.path.dirname(server_conf)
+ self.global_inject = os.path.join(server_conf_dir, "global_inject.conf")
+
+ def _test_min_uid(self, creds):
+ # Assert unix root uid is less than 'idmap config ADDOMAIN' minimum
+ s3_lp = s3param.get_context()
+ s3_lp.load(self.get_lp().configfile)
+
+ domain_range = s3_lp.get("idmap config * : range").split('-')
+ domain_range_low = int(domain_range[0])
+ unix_root_pw = pwd.getpwnam(self.user_name)
+ self.assertLess(unix_root_pw.pw_uid, domain_range_low)
+ self.assertLess(unix_root_pw.pw_gid, domain_range_low)
+
+ conn = libsmb.Conn(self.mach_name, self.share, lp=s3_lp, creds=creds)
+ # Disconnect
+ conn = None
+
+ # Restrict access to local root account uid
+ with open(self.global_inject, 'w') as f:
+ f.write("min domain uid = %s\n" % (unix_root_pw.pw_uid + 1))
+
+ with self.assertRaises(NTSTATUSError) as cm:
+ conn = libsmb.Conn(self.mach_name,
+ self.share,
+ lp=s3_lp,
+ creds=creds)
+ code = ctypes.c_uint32(cm.exception.args[0]).value
+ self.assertEqual(code, ntstatus.NT_STATUS_INVALID_TOKEN)
+
+ # check that the local root account uid is now allowed
+ with open(self.global_inject, 'w') as f:
+ f.write("min domain uid = %s\n" % unix_root_pw.pw_uid)
+
+ conn = libsmb.Conn(self.mach_name, self.share, lp=s3_lp, creds=creds)
+ # Disconnect
+ del conn
+
+ with open(self.global_inject, 'w') as f:
+ f.truncate()
+
+ def test_min_domain_uid_krb5(self):
+ krb5_state = self.user_creds.get_kerberos_state()
+ self.user_creds.set_kerberos_state(MUST_USE_KERBEROS)
+ ret = self._test_min_uid(self.user_creds)
+ self.user_creds.set_kerberos_state(krb5_state)
+ return ret
+
+ def test_min_domain_uid_ntlmssp(self):
+ krb5_state = self.user_creds.get_kerberos_state()
+ self.user_creds.set_kerberos_state(DONT_USE_KERBEROS)
+ ret = self._test_min_uid(self.user_creds)
+ self.user_creds.set_kerberos_state(krb5_state)
+ return ret
+
+ def tearDown(self):
+ # Ensure no leftovers in global inject file
+ with open(self.global_inject, 'w') as f:
+ f.truncate()
+
+ super().tearDown()
+
+if __name__ == "__main__":
+ import unittest
+ unittest.main()
diff --git a/python/samba/tests/krb5/test_rpc.py b/python/samba/tests/krb5/test_rpc.py
new file mode 100755
index 0000000..6faf2a0
--- /dev/null
+++ b/python/samba/tests/krb5/test_rpc.py
@@ -0,0 +1,138 @@
+#!/usr/bin/env python3
+# Unix SMB/CIFS implementation.
+# Copyright (C) Stefan Metzmacher 2020
+# Copyright (C) 2021 Catalyst.Net Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+import os
+
+sys.path.insert(0, "bin/python")
+os.environ["PYTHONUNBUFFERED"] = "1"
+
+import ldb
+
+from samba import NTSTATUSError, credentials
+from samba.dcerpc import lsa
+from samba.ntstatus import (
+ NT_STATUS_ACCESS_DENIED,
+ NT_STATUS_NO_IMPERSONATION_TOKEN
+)
+
+from samba.tests.krb5.kdc_base_test import KDCBaseTest
+
+global_asn1_print = False
+global_hexdump = False
+
+
+class RpcTests(KDCBaseTest):
+ """Test for RPC authentication using Kerberos credentials stored in a
+ credentials cache file.
+ """
+
+ def test_rpc(self):
+ self._run_rpc_test()
+
+ def test_rpc_rename(self):
+ self._run_rpc_test(rename=True)
+
+ def test_rpc_no_pac(self):
+ self._run_rpc_test(include_pac=False,
+ expect_anon=True, allow_error=True)
+
+ def _run_rpc_test(self, rename=False, include_pac=True,
+ expect_anon=False, allow_error=False):
+ # Create a user account and a machine account, along with a Kerberos
+ # credentials cache file where the service ticket authenticating the
+ # user are stored.
+
+ samdb = self.get_samdb()
+
+ mach_name = self.host
+ service = "cifs"
+
+ # Create the user account.
+ user_credentials = self.get_cached_creds(
+ account_type=self.AccountType.USER,
+ use_cache=False)
+ user_name = user_credentials.get_username()
+
+ mach_credentials = self.get_server_creds()
+
+ # Talk to the KDC to obtain the service ticket, which gets placed into
+ # the cache. The machine account name has to match the name in the
+ # ticket, to ensure that the krbtgt ticket doesn't also need to be
+ # stored.
+ (creds, cachefile) = self.create_ccache_with_user(user_credentials,
+ mach_credentials,
+ service,
+ mach_name,
+ pac=include_pac)
+ # Remove the cached credentials file.
+ self.addCleanup(os.remove, cachefile.name)
+
+ if rename:
+ # Rename the account.
+
+ new_name = self.get_new_username()
+
+ msg = ldb.Message(user_credentials.get_dn())
+ msg['sAMAccountName'] = ldb.MessageElement(new_name,
+ ldb.FLAG_MOD_REPLACE,
+ 'sAMAccountName')
+ samdb.modify(msg)
+
+ # Authenticate in-process to the machine account using the user's
+ # cached credentials.
+
+ binding_str = "ncacn_np:%s[\\pipe\\lsarpc]" % mach_name
+ try:
+ conn = lsa.lsarpc(binding_str, self.get_lp(), creds)
+ except NTSTATUSError as e:
+ if not allow_error:
+ self.fail()
+
+ enum, _ = e.args
+ self.assertIn(enum, {NT_STATUS_ACCESS_DENIED,
+ NT_STATUS_NO_IMPERSONATION_TOKEN})
+ return
+
+ (account_name, _) = conn.GetUserName(None, None, None)
+
+ if expect_anon:
+ self.assertNotEqual(user_name, account_name.string)
+ else:
+ self.assertEqual(user_name, account_name.string)
+
+ def test_rpc_anonymous(self):
+ mach_name = self.host
+
+ anon_creds = credentials.Credentials()
+ anon_creds.set_anonymous()
+
+ binding_str = "ncacn_np:%s[\\pipe\\lsarpc]" % mach_name
+ conn = lsa.lsarpc(binding_str, self.get_lp(), anon_creds)
+
+ (account_name, _) = conn.GetUserName(None, None, None)
+
+ self.assertEqual('ANONYMOUS LOGON', account_name.string.upper())
+
+
+if __name__ == "__main__":
+ global_asn1_print = False
+ global_hexdump = False
+ import unittest
+ unittest.main()
diff --git a/python/samba/tests/krb5/test_smb.py b/python/samba/tests/krb5/test_smb.py
new file mode 100755
index 0000000..f0a82a4
--- /dev/null
+++ b/python/samba/tests/krb5/test_smb.py
@@ -0,0 +1,153 @@
+#!/usr/bin/env python3
+# Unix SMB/CIFS implementation.
+# Copyright (C) Stefan Metzmacher 2020
+# Copyright (C) 2021 Catalyst.Net Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+import os
+
+sys.path.insert(0, "bin/python")
+os.environ["PYTHONUNBUFFERED"] = "1"
+
+import ldb
+
+from ldb import SCOPE_SUBTREE
+from samba import NTSTATUSError
+from samba.dcerpc import security
+from samba.ndr import ndr_unpack
+from samba.ntstatus import NT_STATUS_NO_IMPERSONATION_TOKEN
+from samba.samba3 import libsmb_samba_internal as libsmb
+from samba.samba3 import param as s3param
+
+from samba.tests.krb5.kdc_base_test import KDCBaseTest
+
+global_asn1_print = False
+global_hexdump = False
+
+
+class SmbTests(KDCBaseTest):
+ """Test for SMB authentication using Kerberos credentials stored in a
+ credentials cache file.
+ """
+
+ def test_smb(self):
+ self._run_smb_test()
+
+ def test_smb_rename(self):
+ self._run_smb_test(rename=True)
+
+ def test_smb_no_pac(self):
+ self._run_smb_test(include_pac=False,
+ expect_error=True)
+
+ def _run_smb_test(self, rename=False, include_pac=True,
+ expect_error=False):
+ # Create a user account and a machine account, along with a Kerberos
+ # credentials cache file where the service ticket authenticating the
+ # user are stored.
+
+ samdb = self.get_samdb()
+
+ mach_name = samdb.host_dns_name()
+ service = "cifs"
+ share = "tmp"
+
+ # Create the user account.
+ user_credentials = self.get_cached_creds(
+ account_type=self.AccountType.USER,
+ use_cache=False)
+ user_name = user_credentials.get_username()
+
+ mach_credentials = self.get_dc_creds()
+
+ mach_credentials = self.get_dc_creds()
+
+ # Talk to the KDC to obtain the service ticket, which gets placed into
+ # the cache. The machine account name has to match the name in the
+ # ticket, to ensure that the krbtgt ticket doesn't also need to be
+ # stored.
+ (creds, cachefile) = self.create_ccache_with_user(user_credentials,
+ mach_credentials,
+ service,
+ mach_name,
+ pac=include_pac)
+ # Remove the cached credentials file.
+ self.addCleanup(os.remove, cachefile.name)
+
+ # Retrieve the user account's SID.
+ ldb_res = samdb.search(scope=SCOPE_SUBTREE,
+ expression="(sAMAccountName=%s)" % user_name,
+ attrs=["objectSid"])
+ self.assertEqual(1, len(ldb_res))
+ sid = ndr_unpack(security.dom_sid, ldb_res[0]["objectSid"][0])
+
+ if rename:
+ # Rename the account.
+
+ new_name = self.get_new_username()
+
+ msg = ldb.Message(user_credentials.get_dn())
+ msg['sAMAccountName'] = ldb.MessageElement(new_name,
+ ldb.FLAG_MOD_REPLACE,
+ 'sAMAccountName')
+ samdb.modify(msg)
+
+ # Set the Kerberos 5 credentials cache environment variable. This is
+ # required because the codepath that gets run (gse_krb5) looks for it
+ # in here and not in the credentials object.
+ krb5_ccname = os.environ.get("KRB5CCNAME", "")
+ self.addCleanup(os.environ.__setitem__, "KRB5CCNAME", krb5_ccname)
+ os.environ["KRB5CCNAME"] = "FILE:" + cachefile.name
+
+ # Authenticate in-process to the machine account using the user's
+ # cached credentials.
+
+ # Connect to a share and retrieve the user SID.
+ s3_lp = s3param.get_context()
+ s3_lp.load(self.get_lp().configfile)
+
+ min_protocol = s3_lp.get("client min protocol")
+ self.addCleanup(s3_lp.set, "client min protocol", min_protocol)
+ s3_lp.set("client min protocol", "NT1")
+
+ max_protocol = s3_lp.get("client max protocol")
+ self.addCleanup(s3_lp.set, "client max protocol", max_protocol)
+ s3_lp.set("client max protocol", "NT1")
+
+ try:
+ conn = libsmb.Conn(mach_name, share, lp=s3_lp, creds=creds)
+ except NTSTATUSError as e:
+ if not expect_error:
+ self.fail()
+
+ enum, _ = e.args
+ self.assertEqual(NT_STATUS_NO_IMPERSONATION_TOKEN, enum)
+ return
+ else:
+ self.assertFalse(expect_error)
+
+ (uid, gid, gids, sids, guest) = conn.posix_whoami()
+
+ # Ensure that they match.
+ self.assertEqual(sid, sids[0])
+
+
+if __name__ == "__main__":
+ global_asn1_print = False
+ global_hexdump = False
+ import unittest
+ unittest.main()
diff --git a/python/samba/tests/krb5/xrealm_tests.py b/python/samba/tests/krb5/xrealm_tests.py
new file mode 100755
index 0000000..70e06f8
--- /dev/null
+++ b/python/samba/tests/krb5/xrealm_tests.py
@@ -0,0 +1,187 @@
+#!/usr/bin/env python3
+# Unix SMB/CIFS implementation.
+# Copyright (C) Stefan Metzmacher 2020
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+import os
+
+sys.path.insert(0, "bin/python")
+os.environ["PYTHONUNBUFFERED"] = "1"
+
+from samba.tests.krb5.raw_testcase import RawKerberosTest
+from samba.tests.krb5.rfc4120_constants import (
+ KU_PA_ENC_TIMESTAMP,
+ KU_AS_REP_ENC_PART,
+ KU_TGS_REP_ENC_PART_SUB_KEY,
+)
+import samba.tests.krb5.rfc4120_pyasn1 as krb5_asn1
+import samba.tests
+
+global_asn1_print = False
+global_hexdump = False
+
+
+class XrealmKerberosTests(RawKerberosTest):
+
+ def setUp(self):
+ super().setUp()
+ self.do_asn1_print = global_asn1_print
+ self.do_hexdump = global_hexdump
+
+ def test_xrealm(self):
+ user_creds = self.get_user_creds()
+ user = user_creds.get_username()
+ realm = user_creds.get_realm()
+
+ cname = self.PrincipalName_create(name_type=1, names=[user])
+ sname = self.PrincipalName_create(name_type=2, names=["krbtgt", realm])
+
+ till = self.get_KerberosTime(offset=36000)
+
+ kdc_options = krb5_asn1.KDCOptions('forwardable')
+ padata = None
+
+ etypes = (18, 17, 23)
+
+ req = self.AS_REQ_create(padata=padata,
+ kdc_options=str(kdc_options),
+ cname=cname,
+ realm=realm,
+ sname=sname,
+ from_time=None,
+ till_time=till,
+ renew_time=None,
+ nonce=0x7fffffff,
+ etypes=etypes,
+ addresses=None,
+ additional_tickets=None)
+ rep = self.send_recv_transaction(req)
+ self.assertIsNotNone(rep)
+
+ self.assertEqual(rep['msg-type'], 30)
+ self.assertEqual(rep['error-code'], 25)
+ rep_padata = self.der_decode(
+ rep['e-data'], asn1Spec=krb5_asn1.METHOD_DATA())
+
+ for pa in rep_padata:
+ if pa['padata-type'] == 19:
+ etype_info2 = pa['padata-value']
+ break
+
+ etype_info2 = self.der_decode(
+ etype_info2, asn1Spec=krb5_asn1.ETYPE_INFO2())
+
+ key = self.PasswordKey_from_etype_info2(user_creds, etype_info2[0])
+
+ (patime, pausec) = self.get_KerberosTimeWithUsec()
+ pa_ts = self.PA_ENC_TS_ENC_create(patime, pausec)
+ pa_ts = self.der_encode(pa_ts, asn1Spec=krb5_asn1.PA_ENC_TS_ENC())
+
+ pa_ts = self.EncryptedData_create(key, KU_PA_ENC_TIMESTAMP, pa_ts)
+ pa_ts = self.der_encode(pa_ts, asn1Spec=krb5_asn1.EncryptedData())
+
+ pa_ts = self.PA_DATA_create(2, pa_ts)
+
+ kdc_options = krb5_asn1.KDCOptions('forwardable')
+ padata = [pa_ts]
+
+ req = self.AS_REQ_create(padata=padata,
+ kdc_options=str(kdc_options),
+ cname=cname,
+ realm=realm,
+ sname=sname,
+ from_time=None,
+ till_time=till,
+ renew_time=None,
+ nonce=0x7fffffff,
+ etypes=etypes,
+ addresses=None,
+ additional_tickets=None)
+ rep = self.send_recv_transaction(req)
+ self.assertIsNotNone(rep)
+
+ msg_type = rep['msg-type']
+ self.assertEqual(msg_type, 11)
+
+ enc_part2 = key.decrypt(KU_AS_REP_ENC_PART, rep['enc-part']['cipher'])
+
+ # MIT KDC encodes both EncASRepPart and EncTGSRepPart with
+ # application tag 26
+ try:
+ enc_part2 = self.der_decode(
+ enc_part2, asn1Spec=krb5_asn1.EncASRepPart())
+ except Exception:
+ enc_part2 = self.der_decode(
+ enc_part2, asn1Spec=krb5_asn1.EncTGSRepPart())
+
+ # TGS Request (for cross-realm TGT)
+ trust_realm = samba.tests.env_get_var_value('TRUST_REALM')
+ sname = self.PrincipalName_create(
+ name_type=2, names=["krbtgt", trust_realm])
+
+ kdc_options = krb5_asn1.KDCOptions('forwardable')
+ till = self.get_KerberosTime(offset=36000)
+ ticket = rep['ticket']
+ ticket_session_key = self.EncryptionKey_import(enc_part2['key'])
+ padata = []
+
+ subkey = self.RandomKey(ticket_session_key.etype)
+
+ (ctime, cusec) = self.get_KerberosTimeWithUsec()
+
+ req = self.TGS_REQ_create(padata=padata,
+ cusec=cusec,
+ ctime=ctime,
+ ticket=ticket,
+ kdc_options=str(kdc_options),
+ cname=cname,
+ realm=realm,
+ sname=sname,
+ from_time=None,
+ till_time=till,
+ renew_time=None,
+ nonce=0x7ffffffe,
+ etypes=etypes,
+ addresses=None,
+ EncAuthorizationData=None,
+ EncAuthorizationData_key=None,
+ additional_tickets=None,
+ ticket_session_key=ticket_session_key,
+ authenticator_subkey=subkey)
+ rep = self.send_recv_transaction(req)
+ self.assertIsNotNone(rep)
+
+ msg_type = rep['msg-type']
+ self.assertEqual(msg_type, 13)
+
+ enc_part2 = subkey.decrypt(
+ KU_TGS_REP_ENC_PART_SUB_KEY, rep['enc-part']['cipher'])
+ enc_part2 = self.der_decode(
+ enc_part2, asn1Spec=krb5_asn1.EncTGSRepPart())
+
+ # Check the forwardable flag
+ fwd_pos = len(tuple(krb5_asn1.TicketFlags('forwardable'))) - 1
+ assert(krb5_asn1.TicketFlags(enc_part2['flags'])[fwd_pos])
+
+ return
+
+
+if __name__ == "__main__":
+ global_asn1_print = False
+ global_hexdump = False
+ import unittest
+ unittest.main()
diff --git a/python/samba/tests/krb5_credentials.py b/python/samba/tests/krb5_credentials.py
new file mode 100644
index 0000000..192d787
--- /dev/null
+++ b/python/samba/tests/krb5_credentials.py
@@ -0,0 +1,111 @@
+# Integration tests for pycredentials
+#
+# Copyright (C) Catalyst IT Ltd. 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+from samba.tests import TestCase, delete_force
+import os
+
+import samba
+from samba.auth import system_session
+from samba.credentials import (
+ Credentials,
+)
+from samba.dsdb import (
+ UF_WORKSTATION_TRUST_ACCOUNT,
+ UF_PASSWD_NOTREQD,
+)
+from samba.samdb import SamDB
+
+"""KRB5 Integration tests for pycredentials.
+
+Separated from py_credentials so as to allow running against just one
+environment so we know the server that we add the user on will be our
+KDC
+
+"""
+
+MACHINE_NAME = "krb5credstest"
+
+
+class PyKrb5CredentialsTests(TestCase):
+
+ def setUp(self):
+ super().setUp()
+
+ self.server = os.environ["SERVER"]
+ self.domain = os.environ["DOMAIN"]
+ self.host = os.environ["SERVER_IP"]
+ self.lp = self.get_loadparm()
+
+ self.credentials = self.get_credentials()
+
+ self.session = system_session()
+ self.ldb = SamDB(url="ldap://%s" % self.host,
+ session_info=self.session,
+ credentials=self.credentials,
+ lp=self.lp)
+
+ self.create_machine_account()
+
+ def tearDown(self):
+ super().tearDown()
+ delete_force(self.ldb, self.machine_dn)
+
+ def test_get_named_ccache(self):
+ name = "MEMORY:py_creds_machine"
+ ccache = self.machine_creds.get_named_ccache(self.lp,
+ name)
+ self.assertEqual(ccache.get_name(), name)
+
+ def test_get_unnamed_ccache(self):
+ ccache = self.machine_creds.get_named_ccache(self.lp)
+ self.assertIsNotNone(ccache.get_name())
+
+ def test_set_named_ccache(self):
+ ccache = self.machine_creds.get_named_ccache(self.lp)
+
+ creds = Credentials()
+ creds.set_named_ccache(ccache.get_name())
+
+ ccache2 = creds.get_named_ccache(self.lp)
+ self.assertEqual(ccache.get_name(), ccache2.get_name())
+
+ #
+ # Create the machine account
+ def create_machine_account(self):
+ self.machine_pass = samba.generate_random_password(32, 32)
+ self.machine_name = MACHINE_NAME
+ self.machine_dn = "cn=%s,%s" % (self.machine_name, self.ldb.domain_dn())
+
+ # remove the account if it exists, this will happen if a previous test
+ # run failed
+ delete_force(self.ldb, self.machine_dn)
+ # get unicode str for both py2 and py3
+ pass_unicode = self.machine_pass.encode('utf-8').decode('utf-8')
+ utf16pw = u'"{0}"'.format(pass_unicode).encode('utf-16-le')
+ self.ldb.add({
+ "dn": self.machine_dn,
+ "objectclass": "computer",
+ "sAMAccountName": "%s$" % self.machine_name,
+ "userAccountControl":
+ str(UF_WORKSTATION_TRUST_ACCOUNT | UF_PASSWD_NOTREQD),
+ "unicodePwd": utf16pw})
+
+ self.machine_creds = Credentials()
+ self.machine_creds.guess(self.get_loadparm())
+ self.machine_creds.set_password(self.machine_pass)
+ self.machine_creds.set_username(self.machine_name + "$")
+ self.machine_creds.set_workstation(self.machine_name)
diff --git a/python/samba/tests/ldap_raw.py b/python/samba/tests/ldap_raw.py
new file mode 100644
index 0000000..548039f
--- /dev/null
+++ b/python/samba/tests/ldap_raw.py
@@ -0,0 +1,939 @@
+# Integration tests for the ldap server, using raw socket IO
+#
+# Tests for handling of malformed or large packets.
+#
+# Copyright (C) Catalyst.Net Ltd 2020
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import socket
+import ssl
+
+import samba.tests
+from samba.tests import TestCase
+
+
+#
+# LDAP Operations
+#
+DELETE = b'\x4a'
+DELETE_RES = b'\x6b'
+
+# Bind
+BIND = b'\x60'
+BIND_RES = b'\x61'
+SIMPLE_AUTH = b'\x80'
+SASL_AUTH = b'\xa3'
+
+# Search
+SEARCH = b'\x63'
+SEARCH_RES = b'\x64'
+EQUALS = b'\xa3'
+
+
+#
+# LDAP response codes.
+#
+SUCCESS = b'\x00'
+OPERATIONS_ERROR = b'\x01'
+INVALID_CREDENTIALS = b'\x31'
+INVALID_DN_SYNTAX = b'\x22'
+
+#
+# ASN.1 Element types
+#
+BOOLEAN = b'\x01'
+INTEGER = b'\x02'
+OCTET_STRING = b'\x04'
+NULL = b'\x05'
+ENUMERATED = b'\x0a'
+SEQUENCE = b'\x30'
+SET = b'\x31'
+
+
+#
+# ASN.1 Helper functions.
+#
+def encode_element(ber_type, data):
+ """ Encode an ASN.1 BER element. """
+ if data is None:
+ return ber_type + encode_length(0)
+ return ber_type + encode_length(len(data)) + data
+
+
+def encode_length(length):
+ """ Encode the length of an ASN.1 BER element. """
+
+ if length > 0xFFFFFF:
+ return b'\x84' + length.to_bytes(4, "big")
+ if length > 0xFFFF:
+ return b'\x83' + length.to_bytes(3, "big")
+ if length > 0xFF:
+ return b'\x82' + length.to_bytes(2, "big")
+ if length > 0x7F:
+ return b'\x81' + length.to_bytes(1, "big")
+ return length.to_bytes(1, "big")
+
+
+def encode_string(string):
+ """ Encode an octet string """
+ return encode_element(OCTET_STRING, string)
+
+
+def encode_boolean(boolean):
+ """ Encode a boolean value """
+ if boolean:
+ return encode_element(BOOLEAN, b'\xFF')
+ return encode_element(BOOLEAN, b'\x00')
+
+
+def encode_integer(integer):
+ """ Encode an integer value """
+ bit_len = integer.bit_length()
+ byte_len = (bit_len // 8) + 1
+ return encode_element(INTEGER, integer.to_bytes(byte_len, "big"))
+
+
+def encode_enumerated(enum):
+ """ Encode an enumerated value """
+ return encode_element(ENUMERATED, enum.to_bytes(1, "big"))
+
+
+def encode_sequence(sequence):
+ """ Encode a sequence """
+ return encode_element(SEQUENCE, sequence)
+
+
+def decode_element(data):
+ """
+ decode an ASN.1 element
+ """
+ if data is None:
+ return None
+
+ if len(data) < 2:
+ return None
+
+ ber_type = data[0:1]
+ enc = int.from_bytes(data[1:2], byteorder='big')
+ if enc & 0x80:
+ l_end = 2 + (enc & ~0x80)
+ length = int.from_bytes(data[2:l_end], byteorder='big')
+ element = data[l_end:l_end + length]
+ rest = data[l_end + length:]
+ else:
+ length = enc
+ element = data[2:2 + length]
+ rest = data[2 + length:]
+
+ return (ber_type, length, element, rest)
+
+
+class RawLdapTest(TestCase):
+ """
+ A raw Ldap Test case.
+ The ldap connections are made over https on port 636
+
+ Uses the following environment variables:
+ SERVER
+ USERNAME
+ PASSWORD
+ DNSNAME
+ """
+
+ def setUp(self):
+ super().setUp()
+
+ self.host = samba.tests.env_get_var_value('SERVER')
+ self.port = 636
+ self.socket = None
+ self.user = samba.tests.env_get_var_value('USERNAME')
+ self.password = samba.tests.env_get_var_value('PASSWORD')
+ self.dns_name = samba.tests.env_get_var_value('DNSNAME')
+ self.connect()
+
+ def tearDown(self):
+ self.disconnect()
+ super().tearDown()
+
+ def disconnect(self):
+ """ Disconnect from and clean up the connection to the server """
+ if self.socket is None:
+ return
+ self.socket.close()
+ self.socket = None
+
+ def connect(self):
+ """ Establish an ldaps connection to the test server """
+ #
+ # Disable host name and certificate verification
+ context = ssl.create_default_context()
+ context.check_hostname = False
+ context.verify_mode = ssl.CERT_NONE
+
+ sock = None
+ try:
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sock.settimeout(10)
+ sock.connect((self.host, self.port))
+ self.socket = context.wrap_socket(sock, server_hostname=self.host)
+ except socket.error:
+ sock.close()
+ if self.socket is not None:
+ self.socket.close()
+ raise
+
+ def send(self, req):
+ """ Send the request to the server """
+ try:
+ self.socket.sendall(req)
+ except socket.error:
+ self.disconnect()
+ raise
+
+ def recv(self, num_recv=0xffff, timeout=None):
+ """ receive an array of bytes from the server """
+ data = None
+ try:
+ if timeout is not None:
+ self.socket.settimeout(timeout)
+ data = self.socket.recv(num_recv, 0)
+ self.socket.settimeout(10)
+ if len(data) == 0:
+ self.disconnect()
+ return None
+ except socket.timeout:
+ # We ignore timeout's as the ldap server will drop the connection
+ # on the errors we're testing. So returning None on a timeout is
+ # the desired behaviour.
+ self.socket.settimeout(10)
+ except socket.error:
+ self.disconnect()
+ raise
+ return data
+
+ def bind(self):
+ """
+ Perform a simple bind
+ """
+
+ user = self.user.encode('UTF8')
+ ou = self.dns_name.replace('.', ',dc=').encode('UTF8')
+ dn = b'cn=' + user + b',cn=users,dc=' + ou
+
+ password = self.password.encode('UTF8')
+
+ # Lets build an simple bind request
+ bind = encode_integer(3) # ldap version
+ bind += encode_string(dn)
+ bind += encode_element(SIMPLE_AUTH, password)
+
+ bind_op = encode_element(BIND, bind)
+
+ msg_no = encode_integer(1)
+ packet = encode_sequence(msg_no + bind_op)
+
+ self.send(packet)
+ data = self.recv()
+ self.assertIsNotNone(data)
+
+ #
+ # Decode and validate the response
+
+ # Should be a sequence
+ (ber_type, length, element, rest) = decode_element(data)
+ self.assertEqual(SEQUENCE.hex(), ber_type.hex())
+ self.assertTrue(length > 0)
+ self.assertEqual(0, len(rest))
+
+ # message id should be 1
+ (ber_type, length, element, rest) = decode_element(element)
+ self.assertEqual(INTEGER.hex(), ber_type.hex())
+ msg_no = int.from_bytes(element, byteorder='big')
+ self.assertEqual(1, msg_no)
+ self.assertGreater(len(rest), 0)
+
+ # Should have a Bind response element
+ (ber_type, length, element, rest) = decode_element(rest)
+ self.assertEqual(BIND_RES.hex(), ber_type.hex())
+ self.assertEqual(0, len(rest))
+
+ # Check the response code
+ (ber_type, length, element, rest) = decode_element(element)
+ self.assertEqual(ENUMERATED.hex(), ber_type.hex())
+ self.assertEqual(SUCCESS.hex(), element.hex())
+ self.assertGreater(len(rest), 0)
+
+ def test_decode_element(self):
+ """ Tests for the decode_element method """
+
+ # Boolean true value
+ data = b'\x01\x01\xff'
+ (ber_type, length, element, rest) = decode_element(data)
+ self.assertEqual(BOOLEAN.hex(), ber_type.hex())
+ self.assertEqual(1, length)
+ self.assertEqual(b'\xff'.hex(), element.hex())
+ self.assertEqual(0, len(rest))
+
+ # Boolean false value
+ data = b'\x01\x01\x00'
+ (ber_type, length, element, rest) = decode_element(data)
+ self.assertEqual(BOOLEAN.hex(), ber_type.hex())
+ self.assertEqual(1, length)
+ self.assertEqual(b'\x00'.hex(), element.hex())
+ self.assertEqual(0, len(rest))
+
+ # Boolean true value with trailing data
+ data = b'\x01\x01\xff\x05\x00'
+ (ber_type, length, element, rest) = decode_element(data)
+ self.assertEqual(BOOLEAN.hex(), ber_type.hex())
+ self.assertEqual(1, length)
+ self.assertEqual(b'\xff'.hex(), element.hex())
+ self.assertEqual(b'\x05\x00'.hex(), rest.hex())
+
+ # Octet string byte length encoding
+ data = b'\x04\x02\xca\xfe\x05\x00'
+ (ber_type, length, element, rest) = decode_element(data)
+ self.assertEqual(OCTET_STRING.hex(), ber_type.hex())
+ self.assertEqual(2, length)
+ self.assertEqual(b'\xca\xfe'.hex(), element.hex())
+ self.assertEqual(b'\x05\x00'.hex(), rest.hex())
+
+ # Octet string 81 byte length encoding
+ data = b'\x04\x81\x02\xca\xfe\x05\x00'
+ (ber_type, length, element, rest) = decode_element(data)
+ self.assertEqual(OCTET_STRING.hex(), ber_type.hex())
+ self.assertEqual(2, length)
+ self.assertEqual(b'\xca\xfe'.hex(), element.hex())
+ self.assertEqual(b'\x05\x00'.hex(), rest.hex())
+
+ # Octet string 82 byte length encoding
+ data = b'\x04\x82\x00\x02\xca\xfe\x05\x00'
+ (ber_type, length, element, rest) = decode_element(data)
+ self.assertEqual(OCTET_STRING.hex(), ber_type.hex())
+ self.assertEqual(2, length)
+ self.assertEqual(b'\xca\xfe'.hex(), element.hex())
+ self.assertEqual(b'\x05\x00'.hex(), rest.hex())
+
+ # Octet string 85 byte length encoding
+ # For Samba we limit the length encoding to 4 bytes, but it's useful
+ # to be able to decode longer lengths in a test.
+ data = b'\x04\x85\x00\x00\x00\x00\x02\xca\xfe\x05\x00'
+ (ber_type, length, element, rest) = decode_element(data)
+ self.assertEqual(OCTET_STRING.hex(), ber_type.hex())
+ self.assertEqual(2, length)
+ self.assertEqual(b'\xca\xfe'.hex(), element.hex())
+ self.assertEqual(b'\x05\x00'.hex(), rest.hex())
+
+ def test_search_equals_maximum_permitted_size(self):
+ """
+ Check that an LDAP search request equal to the maximum size is accepted
+ This test is done on a authenticated connection so that the maximum
+ non search request is 16MiB.
+ """
+ self.bind()
+
+ # Lets build an ldap search packet to query the RootDSE
+ header = encode_string(None) # Base DN, ""
+ header += encode_enumerated(0) # Enumeration scope
+ header += encode_enumerated(0) # Enumeration dereference
+ header += encode_integer(0) # Integer size limit
+ header += encode_integer(0) # Integer time limit
+ header += encode_boolean(False) # Boolean attributes only
+
+ #
+ # build an equality search of the form x...x=y...y
+ # With the length of x...x and y...y chosen to generate an
+ # ldap request of 256000 bytes.
+ x = encode_string(b'x' * 127974)
+ y = encode_string(b'y' * 127979)
+ equals = encode_element(EQUALS, x + y)
+ trailer = encode_sequence(None)
+ search = encode_element(SEARCH, header + equals + trailer)
+
+ msg_no = encode_integer(2)
+ packet = encode_sequence(msg_no + search)
+ #
+ # The length of the packet should be equal to the
+ # Maximum length of a search query
+ self.assertEqual(256000, len(packet))
+
+ self.send(packet)
+ data = self.recv()
+ self.assertIsNotNone(data)
+
+ #
+ # Decode and validate the response
+
+ # Should be a sequence
+ (ber_type, length, element, rest) = decode_element(data)
+ self.assertEqual(SEQUENCE.hex(), ber_type.hex())
+ self.assertTrue(length > 0)
+ self.assertEqual(0, len(rest))
+
+ # message id should be 2
+ (ber_type, length, element, rest) = decode_element(element)
+ self.assertEqual(INTEGER.hex(), ber_type.hex())
+ msg_no = int.from_bytes(element, byteorder='big')
+ self.assertEqual(2, msg_no)
+ self.assertGreater(len(rest), 0)
+
+ # Should have a Search response element
+ (ber_type, length, element, rest) = decode_element(rest)
+ self.assertEqual(SEARCH_RES.hex(), ber_type.hex())
+ self.assertEqual(0, len(rest))
+
+ # Should have an empty matching DN
+ (ber_type, length, element, rest) = decode_element(element)
+ self.assertEqual(OCTET_STRING.hex(), ber_type.hex())
+ self.assertEqual(0, len(element))
+ self.assertGreater(len(rest), 0)
+
+ # Then a sequence of attribute sequences
+ (ber_type, length, element, rest) = decode_element(rest)
+ self.assertEqual(SEQUENCE.hex(), ber_type.hex())
+ self.assertEqual(0, len(rest))
+
+ # Check the first attribute sequence, it should be
+ # "configurationNamingContext"
+ # The remaining attribute sequences will be ignored but
+ # check that they exist.
+ (ber_type, length, element, rest) = decode_element(element)
+ self.assertEqual(SEQUENCE.hex(), ber_type.hex())
+ # Check that there are remaining attribute sequences.
+ self.assertGreater(len(rest), 0)
+
+ # Check the name of the first attribute
+ (ber_type, length, element, rest) = decode_element(element)
+ self.assertEqual(OCTET_STRING.hex(), ber_type.hex())
+ self.assertGreater(len(rest), 0)
+ self.assertEqual(b'configurationNamingContext', element)
+
+ # And check that there is an attribute value set
+ (ber_type, length, element, rest) = decode_element(rest)
+ self.assertEqual(SET.hex(), ber_type.hex())
+ self.assertGreater(len(element), 0)
+ self.assertEqual(0, len(rest))
+
+ def test_search_exceeds_maximum_permitted_size(self):
+ """
+ Test that a search query longer than the maximum permitted
+ size is rejected.
+ This test is done on a authenticated connection so that the maximum
+ non search request is 16MiB.
+ """
+
+ self.bind()
+
+ # Lets build an ldap search packet to query the RootDSE
+ header = encode_string(None) # Base DN, ""
+ header += encode_enumerated(0) # Enumeration scope
+ header += encode_enumerated(0) # Enumeration dereference
+ header += encode_integer(0) # Integer size limit
+ header += encode_integer(0) # Integer time limit
+ header += encode_boolean(False) # Boolean attributes only
+
+ #
+ # build an equality search of the form x...x=y...y
+ # With the length of x...x and y...y chosen to generate an
+ # ldap request of 256001 bytes.
+ x = encode_string(b'x' * 127979)
+ y = encode_string(b'y' * 127975)
+ equals = encode_element(EQUALS, x + y)
+ trailer = encode_sequence(None)
+ search = encode_element(SEARCH, header + equals + trailer)
+
+ msg_no = encode_integer(2)
+ packet = encode_sequence(msg_no + search)
+ #
+ # The length of the sequence data should be one greater than the
+ # Maximum length of a search query
+ self.assertEqual(256001, len(packet))
+
+ self.send(packet)
+ data = self.recv()
+ #
+ # The connection should be closed by the server and we should not
+ # see any data.
+ self.assertIsNone(data)
+
+ def test_simple_anonymous_bind(self):
+ """
+ Test a simple anonymous bind
+ """
+
+ # Lets build an anonymous simple bind request
+ bind = encode_integer(3) # ldap version
+ bind += encode_string(b'') # Empty name
+ bind += encode_element(SIMPLE_AUTH, b'') # Empty password
+
+ bind_op = encode_element(BIND, bind)
+
+ msg_no = encode_integer(1)
+ packet = encode_sequence(msg_no + bind_op)
+
+ self.send(packet)
+ data = self.recv()
+ self.assertIsNotNone(data)
+
+ #
+ # Decode and validate the response
+
+ # Should be a sequence
+ (ber_type, length, element, rest) = decode_element(data)
+ self.assertEqual(SEQUENCE.hex(), ber_type.hex())
+ self.assertTrue(length > 0)
+ self.assertEqual(0, len(rest))
+
+ # message id should be 1
+ (ber_type, length, element, rest) = decode_element(element)
+ self.assertEqual(INTEGER.hex(), ber_type.hex())
+ msg_no = int.from_bytes(element, byteorder='big')
+ self.assertEqual(1, msg_no)
+ self.assertGreater(len(rest), 0)
+
+ # Should have a Bind response element
+ (ber_type, length, element, rest) = decode_element(rest)
+ self.assertEqual(BIND_RES.hex(), ber_type.hex())
+ self.assertEqual(0, len(rest))
+
+ # Check the response code
+ (ber_type, length, element, rest) = decode_element(element)
+ self.assertEqual(ENUMERATED.hex(), ber_type.hex())
+ self.assertEqual(SUCCESS.hex(), element.hex())
+ self.assertGreater(len(rest), 0)
+
+ def test_simple_bind_at_limit(self):
+ """
+ Test a simple bind, with a large invalid
+ user name. As the resulting packet is equal
+ to the maximum unauthenticated packet size we should see
+ an INVALID_CREDENTIALS response
+ """
+
+ # Lets build a simple bind request
+ bind = encode_integer(3) # ldap version
+ bind += encode_string(b' ' * 255977) # large name
+ bind += encode_element(SIMPLE_AUTH, b'') # Empty password
+
+ bind_op = encode_element(BIND, bind)
+
+ msg_no = encode_integer(1)
+ packet = encode_sequence(msg_no + bind_op)
+ #
+ # The length of the sequence data should be equal to the maximum
+ # Unauthenticated packet length
+ self.assertEqual(256000, len(packet))
+
+ self.send(packet)
+ data = self.recv()
+ self.assertIsNotNone(data)
+
+ #
+ # Decode and validate the response
+
+ # Should be a sequence
+ (ber_type, length, element, rest) = decode_element(data)
+ self.assertEqual(SEQUENCE.hex(), ber_type.hex())
+ self.assertTrue(length > 0)
+ self.assertEqual(0, len(rest))
+
+ # message id should be 1
+ (ber_type, length, element, rest) = decode_element(element)
+ self.assertEqual(INTEGER.hex(), ber_type.hex())
+ msg_no = int.from_bytes(element, byteorder='big')
+ self.assertEqual(1, msg_no)
+ self.assertGreater(len(rest), 0)
+
+ # Should have a Bind response element
+ (ber_type, length, element, rest) = decode_element(rest)
+ self.assertEqual(BIND_RES.hex(), ber_type.hex())
+ self.assertEqual(0, len(rest))
+
+ # Check the response code
+ (ber_type, length, element, rest) = decode_element(element)
+ self.assertEqual(ENUMERATED.hex(), ber_type.hex())
+ self.assertEqual(INVALID_CREDENTIALS.hex(), element.hex())
+ self.assertGreater(len(rest), 0)
+
+ def test_simple_bind_gt_limit(self):
+ """
+ Test a simple bind, with a large invalid
+ user name. As the resulting packet is one greater than
+ the maximum unauthenticated packet size we should see
+ the connection reset.
+ """
+
+ # Lets build a simple bind request
+ bind = encode_integer(3) # ldap version
+ bind += encode_string(b' ' * 255978) # large name
+ bind += encode_element(SIMPLE_AUTH, b'') # Empty password
+
+ bind_op = encode_element(BIND, bind)
+
+ msg_no = encode_integer(1)
+ packet = encode_sequence(msg_no + bind_op)
+ #
+ # The length of the sequence data should be equal to the maximum
+ # Unauthenticated packet length
+ self.assertEqual(256001, len(packet))
+
+ self.send(packet)
+ data = self.recv()
+ self.assertIsNone(data)
+
+ def test_unauthenticated_delete_at_limit(self):
+ """
+ Test a delete, with a large invalid DN
+ As the resulting packet is equal to the maximum unauthenticated
+ packet size we should see an INVALID_DN_SYNTAX response
+ """
+
+ # Lets build a delete request, with a large invalid DN
+ dn = b' ' * 255987
+ del_op = encode_element(DELETE, dn)
+
+ msg_no = encode_integer(1)
+ packet = encode_sequence(msg_no + del_op)
+ #
+ # The length of the sequence data should be equal to the maximum
+ # Unauthenticated packet length
+ self.assertEqual(256000, len(packet))
+
+ self.send(packet)
+ data = self.recv()
+ self.assertIsNotNone(data)
+
+ #
+ # Decode and validate the response
+
+ # Should be a sequence
+ (ber_type, length, element, rest) = decode_element(data)
+ self.assertEqual(SEQUENCE.hex(), ber_type.hex())
+ self.assertTrue(length > 0)
+ self.assertEqual(0, len(rest))
+
+ # message id should be 1
+ (ber_type, length, element, rest) = decode_element(element)
+ self.assertEqual(INTEGER.hex(), ber_type.hex())
+ msg_no = int.from_bytes(element, byteorder='big')
+ self.assertEqual(1, msg_no)
+ self.assertGreater(len(rest), 0)
+
+ # Should have a delete response element
+ (ber_type, length, element, rest) = decode_element(rest)
+ self.assertEqual(DELETE_RES.hex(), ber_type.hex())
+ self.assertEqual(0, len(rest))
+
+ # Check the response code
+ (ber_type, length, element, rest) = decode_element(element)
+ self.assertEqual(ENUMERATED.hex(), ber_type.hex())
+ self.assertEqual(INVALID_DN_SYNTAX.hex(), element.hex())
+ self.assertGreater(len(rest), 0)
+
+ def test_unauthenticated_delete_gt_limit(self):
+ """
+ Test a delete, with a large invalid DN
+ As the resulting packet is greater than the maximum unauthenticated
+ packet size we should see a connection reset
+ """
+
+ # Lets build a delete request, with a large invalid DN
+ dn = b' ' * 255988
+ del_op = encode_element(DELETE, dn)
+
+ msg_no = encode_integer(1)
+ packet = encode_sequence(msg_no + del_op)
+ #
+ # The length of the sequence data should one greater than the maximum
+ # unauthenticated packet length
+ self.assertEqual(256001, len(packet))
+
+ self.send(packet)
+ data = self.recv()
+ self.assertIsNone(data)
+
+ def test_authenticated_delete_at_limit(self):
+ """
+ Test a delete, with a large invalid DN
+ As the resulting packet is equal to the maximum authenticated
+ packet size we should see an INVALID_DN_SYNTAX response
+ """
+
+ # Lets build a delete request, with a large invalid DN
+ dn = b' ' * 16777203
+ del_op = encode_element(DELETE, dn)
+
+ self.bind()
+
+ msg_no = encode_integer(2)
+ packet = encode_sequence(msg_no + del_op)
+ #
+ # The length of the sequence data should be equal to the maximum
+ # authenticated packet length currently 16MiB
+ self.assertEqual(16 * 1024 * 1024, len(packet))
+
+ self.send(packet)
+ data = self.recv()
+ self.assertIsNotNone(data)
+
+ #
+ # Decode and validate the response
+
+ # Should be a sequence
+ (ber_type, length, element, rest) = decode_element(data)
+ self.assertEqual(SEQUENCE.hex(), ber_type.hex())
+ self.assertTrue(length > 0)
+ self.assertEqual(0, len(rest))
+
+ # message id should be 2
+ (ber_type, length, element, rest) = decode_element(element)
+ self.assertEqual(INTEGER.hex(), ber_type.hex())
+ msg_no = int.from_bytes(element, byteorder='big')
+ self.assertEqual(2, msg_no)
+ self.assertGreater(len(rest), 0)
+
+ # Should have a delete response element
+ (ber_type, length, element, rest) = decode_element(rest)
+ self.assertEqual(DELETE_RES.hex(), ber_type.hex())
+ self.assertEqual(0, len(rest))
+
+ # Check the response code
+ (ber_type, length, element, rest) = decode_element(element)
+ self.assertEqual(ENUMERATED.hex(), ber_type.hex())
+ self.assertEqual(INVALID_DN_SYNTAX.hex(), element.hex())
+ self.assertGreater(len(rest), 0)
+
+ def test_authenticated_delete_gt_limit(self):
+ """
+ Test a delete, with a large invalid DN
+ As the resulting packet is one greater than the maximum
+ authenticated packet size we should see a connection reset
+ """
+
+ # Lets build a delete request, with a large invalid DN
+ dn = b' ' * 16777204
+ del_op = encode_element(DELETE, dn)
+
+ self.bind()
+
+ msg_no = encode_integer(2)
+ packet = encode_sequence(msg_no + del_op)
+ #
+ # The length of the sequence data should be one greater than the
+ # maximum authenticated packet length currently 16MiB
+ self.assertEqual(16 * 1024 * 1024 + 1, len(packet))
+
+ self.send(packet)
+ data = self.recv()
+ self.assertIsNone(data)
+
+
+class RawCldapTest(TestCase):
+ """
+ A raw cldap Test case.
+ The ldap connections are made over UDP port 389
+
+ Uses the following environment variables:
+ SERVER
+ """
+
+ def setUp(self):
+ super().setUp()
+
+ self.host = samba.tests.env_get_var_value('SERVER')
+ self.port = 389
+ self.socket = None
+ self.connect()
+
+ def tearDown(self):
+ self.disconnect()
+ super().tearDown()
+
+ def disconnect(self):
+ """ Disconnect from and clean up the connection to the server """
+ if self.socket is None:
+ return
+ self.socket.close()
+ self.socket = None
+
+ def connect(self):
+ """ Establish an UDP connection to the test server """
+
+ try:
+ self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ self.socket.settimeout(10)
+ self.socket.connect((self.host, self.port))
+ except socket.error:
+ if self.socket is not None:
+ self.socket.close()
+ raise
+
+ def send(self, req):
+ """ Send the request to the server """
+ try:
+ self.socket.sendall(req)
+ except socket.error:
+ self.disconnect()
+ raise
+
+ def recv(self, num_recv=0xffff, timeout=None):
+ """ receive an array of bytes from the server """
+ data = None
+ try:
+ if timeout is not None:
+ self.socket.settimeout(timeout)
+ data = self.socket.recv(num_recv, 0)
+ self.socket.settimeout(10)
+ if len(data) == 0:
+ self.disconnect()
+ return None
+ except socket.timeout:
+ # We ignore timeout's as the ldap server will drop the connection
+ # on the errors we're testing. So returning None on a timeout is
+ # the desired behaviour.
+ self.socket.settimeout(10)
+ except socket.error:
+ self.disconnect()
+ raise
+ return data
+
+ def test_search_equals_maximum_permitted_size(self):
+ """
+ Check that an CLDAP search request equal to the maximum size is
+ accepted
+ """
+
+ # Lets build an ldap search packet to query the RootDSE
+ header = encode_string(None) # Base DN, ""
+ header += encode_enumerated(0) # Enumeration scope
+ header += encode_enumerated(0) # Enumeration dereference
+ header += encode_integer(0) # Integer size limit
+ header += encode_integer(0) # Integer time limit
+ header += encode_boolean(False) # Boolean attributes only
+
+ #
+ # build an equality search of the form x...x=y...y
+ # With the length of x...x and y...y chosen to generate an
+ # cldap request of 4096 bytes.
+ x = encode_string(b'x' * 2027)
+ y = encode_string(b'y' * 2027)
+ equals = encode_element(EQUALS, x + y)
+ trailer = encode_sequence(None)
+ search = encode_element(SEARCH, header + equals + trailer)
+
+ msg_no = encode_integer(2)
+ packet = encode_sequence(msg_no + search)
+ #
+ # The length of the packet should be equal to the
+ # Maximum length of a cldap packet
+ self.assertEqual(4096, len(packet))
+
+ self.send(packet)
+ data = self.recv()
+ self.assertIsNotNone(data)
+
+ #
+ # Decode and validate the response
+
+ # Should be a sequence
+ (ber_type, length, element, rest) = decode_element(data)
+ self.assertEqual(SEQUENCE.hex(), ber_type.hex())
+ self.assertTrue(length > 0)
+ self.assertGreater(len(rest), 0)
+ # rest should contain a Search request done element, but it's
+ # not validated in this test.
+
+ # message id should be 2
+ (ber_type, length, element, rest) = decode_element(element)
+ self.assertEqual(INTEGER.hex(), ber_type.hex())
+ msg_no = int.from_bytes(element, byteorder='big')
+ self.assertEqual(2, msg_no)
+ self.assertGreater(len(rest), 0)
+
+ # Should have a Search response element
+ (ber_type, length, element, rest) = decode_element(rest)
+ self.assertEqual(SEARCH_RES.hex(), ber_type.hex())
+ self.assertEqual(0, len(rest))
+
+ # Should have an empty matching DN
+ (ber_type, length, element, rest) = decode_element(element)
+ self.assertEqual(OCTET_STRING.hex(), ber_type.hex())
+ self.assertEqual(0, len(element))
+ self.assertGreater(len(rest), 0)
+
+ # Then a sequence of attribute sequences
+ (ber_type, length, element, rest) = decode_element(rest)
+ self.assertEqual(SEQUENCE.hex(), ber_type.hex())
+ self.assertEqual(0, len(rest))
+
+ # Check the first attribute sequence, it should be
+ # "configurationNamingContext"
+ # The remaining attribute sequences will be ignored but
+ # check that they exist.
+ (ber_type, length, element, rest) = decode_element(element)
+ self.assertEqual(SEQUENCE.hex(), ber_type.hex())
+ # Check that there are remaining attribute sequences.
+ self.assertGreater(len(rest), 0)
+
+ # Check the name of the first attribute
+ (ber_type, length, element, rest) = decode_element(element)
+ self.assertEqual(OCTET_STRING.hex(), ber_type.hex())
+ self.assertGreater(len(rest), 0)
+ self.assertEqual(b'configurationNamingContext', element)
+
+ # And check that there is an attribute value set
+ (ber_type, length, element, rest) = decode_element(rest)
+ self.assertEqual(SET.hex(), ber_type.hex())
+ self.assertGreater(len(element), 0)
+ self.assertEqual(0, len(rest))
+
+ def test_search_exceeds_maximum_permitted_size(self):
+ """
+ Test that a cldap request longer than the maximum permitted
+ size is rejected.
+ """
+
+ # Lets build an ldap search packet to query the RootDSE
+ header = encode_string(None) # Base DN, ""
+ header += encode_enumerated(0) # Enumeration scope
+ header += encode_enumerated(0) # Enumeration dereference
+ header += encode_integer(0) # Integer size limit
+ header += encode_integer(0) # Integer time limit
+ header += encode_boolean(False) # Boolean attributes only
+
+ #
+ # build an equality search of the form x...x=y...y
+ # With the length of x...x and y...y chosen to generate an
+ # cldap request of 4097 bytes.
+ x = encode_string(b'x' * 2027)
+ y = encode_string(b'y' * 2028)
+ equals = encode_element(EQUALS, x + y)
+ trailer = encode_sequence(None)
+ search = encode_element(SEARCH, header + equals + trailer)
+
+ msg_no = encode_integer(2)
+ packet = encode_sequence(msg_no + search)
+ #
+ # The length of the sequence data should be one greater than the
+ # Maximum length of a cldap packet
+ self.assertEqual(4097, len(packet))
+
+ self.send(packet)
+ data = self.recv()
+ #
+ # The connection should be closed by the server and we should not
+ # see any data.
+ self.assertIsNone(data)
diff --git a/python/samba/tests/ldap_referrals.py b/python/samba/tests/ldap_referrals.py
new file mode 100644
index 0000000..406b196
--- /dev/null
+++ b/python/samba/tests/ldap_referrals.py
@@ -0,0 +1,87 @@
+# Test that ldap referral entiries are created and formatted correctly
+#
+# Copyright (C) Andrew Bartlett 2019
+#
+# Based on Unit tests for the notification control
+# Copyright (C) Stefan Metzmacher 2016
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import optparse
+import os
+import sys
+
+import samba
+from samba.auth import system_session
+import samba.getopt as options
+from samba import ldb
+from samba.samdb import SamDB
+import samba.tests
+from samba.tests.subunitrun import SubunitOptions
+
+sys.path.insert(0, "bin/python")
+parser = optparse.OptionParser("ldap_referrals.py [options]")
+sambaopts = options.SambaOptions(parser)
+parser.add_option_group(sambaopts)
+parser.add_option_group(options.VersionOptions(parser))
+# use command line creds if available
+credopts = options.CredentialsOptions(parser)
+parser.add_option_group(credopts)
+subunitopts = SubunitOptions(parser)
+parser.add_option_group(subunitopts)
+opts, args = parser.parse_args()
+
+lp = sambaopts.get_loadparm()
+creds = credopts.get_credentials(lp)
+
+
+class LdapReferralTest(samba.tests.TestCase):
+
+ # The referral entries for an ldap request should have the ldap scheme
+ # i.e. then should all start with "ldap://"
+ def test_ldap_search(self):
+ server = os.environ["SERVER"]
+ url = "ldap://{0}".format(server)
+ db = SamDB(
+ url, credentials=creds, session_info=system_session(lp), lp=lp)
+ res = db.search(
+ base=db.domain_dn(),
+ expression="(objectClass=nonexistent)",
+ scope=ldb.SCOPE_SUBTREE,
+ attrs=["objectGUID", "samAccountName"])
+
+ referrals = res.referals
+ for referral in referrals:
+ self.assertTrue(
+ referral.startswith("ldap://"),
+ "{0} does not start with ldap://".format(referral))
+
+ # The referral entries for an ldaps request should have the ldaps scheme
+ # i.e. then should all start with "ldaps://"
+ def test_ldaps_search(self):
+ server = os.environ["SERVER"]
+ url = "ldaps://{0}".format(server)
+ db = SamDB(
+ url, credentials=creds, session_info=system_session(lp), lp=lp)
+ res = db.search(
+ base=db.domain_dn(),
+ expression="(objectClass=nonexistent)",
+ scope=ldb.SCOPE_SUBTREE,
+ attrs=["objectGUID", "samAccountName"])
+
+ referrals = res.referals
+ for referral in referrals:
+ self.assertTrue(
+ referral.startswith("ldaps://"),
+ "{0} does not start with ldaps://".format(referral))
diff --git a/python/samba/tests/ldap_spn.py b/python/samba/tests/ldap_spn.py
new file mode 100644
index 0000000..6ebdf8f
--- /dev/null
+++ b/python/samba/tests/ldap_spn.py
@@ -0,0 +1,924 @@
+# Unix SMB/CIFS implementation.
+#
+# Copyright 2021 (C) Catalyst IT Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+import sys
+import os
+import pprint
+import re
+from samba.samdb import SamDB
+from samba.auth import system_session
+import ldb
+from samba.sd_utils import SDUtils
+from samba.credentials import DONT_USE_KERBEROS, Credentials
+from samba.gensec import FEATURE_SEAL
+from samba.tests.subunitrun import SubunitOptions, TestProgram
+from samba.tests import TestCase, ldb_err
+from samba.tests import DynamicTestCase
+import samba.getopt as options
+import optparse
+from samba.colour import c_RED, c_GREEN, c_DARK_YELLOW
+from samba.dsdb import (
+ UF_SERVER_TRUST_ACCOUNT,
+ UF_TRUSTED_FOR_DELEGATION,
+)
+
+
+SPN_GUID = 'f3a64788-5306-11d1-a9c5-0000f80367c1'
+
+RELEVANT_ATTRS = {'dNSHostName',
+ 'servicePrincipalName',
+ 'sAMAccountName',
+ 'dn'}
+
+ok = True
+bad = False
+report = 'report'
+
+operr = ldb.ERR_OPERATIONS_ERROR
+denied = ldb.ERR_INSUFFICIENT_ACCESS_RIGHTS
+constraint = ldb.ERR_CONSTRAINT_VIOLATION
+exists = ldb.ERR_ENTRY_ALREADY_EXISTS
+
+add = ldb.FLAG_MOD_ADD
+replace = ldb.FLAG_MOD_REPLACE
+delete = ldb.FLAG_MOD_DELETE
+
+try:
+ breakpoint
+except NameError:
+ # for python <= 3.6
+ def breakpoint():
+ import pdb
+ pdb.set_trace()
+
+
+def init():
+ # This needs to happen before the class definition, and we put it
+ # in a function to keep the namespace clean.
+ global LP, CREDS, SERVER, REALM, COLOUR_TEXT, subunitopts, FILTER
+
+ parser = optparse.OptionParser(
+ "python3 ldap_spn.py <server> [options]")
+ sambaopts = options.SambaOptions(parser)
+ parser.add_option_group(sambaopts)
+
+ # use command line creds if available
+ credopts = options.CredentialsOptions(parser)
+ parser.add_option_group(credopts)
+ subunitopts = SubunitOptions(parser)
+ parser.add_option_group(subunitopts)
+
+ parser.add_option('--colour', action="store_true",
+ help="use colour text",
+ default=sys.stdout.isatty())
+
+ parser.add_option('--filter', help="only run tests matching this regex")
+
+ opts, args = parser.parse_args()
+ if len(args) != 1:
+ parser.print_usage()
+ sys.exit(1)
+
+ LP = sambaopts.get_loadparm()
+ CREDS = credopts.get_credentials(LP)
+ SERVER = args[0]
+ REALM = CREDS.get_realm()
+ COLOUR_TEXT = opts.colour
+ FILTER = opts.filter
+
+
+init()
+
+
+def colour_text(x, state=None):
+ if not COLOUR_TEXT:
+ return x
+ if state == 'error':
+ return c_RED(x)
+ if state == 'pass':
+ return c_GREEN(x)
+
+ return c_DARK_YELLOW(x)
+
+
+def get_samdb(creds=None):
+ if creds is None:
+ creds = CREDS
+ session = system_session()
+ else:
+ session = None
+
+ return SamDB(url=f"ldap://{SERVER}",
+ lp=LP,
+ session_info=session,
+ credentials=creds)
+
+
+def add_unpriv_user(samdb, ou, username,
+ writeable_objects=None,
+ password="samba123@"):
+ creds = Credentials()
+ creds.set_username(username)
+ creds.set_password(password)
+ creds.set_domain(CREDS.get_domain())
+ creds.set_realm(CREDS.get_realm())
+ creds.set_workstation(CREDS.get_workstation())
+ creds.set_gensec_features(CREDS.get_gensec_features() | FEATURE_SEAL)
+ creds.set_kerberos_state(DONT_USE_KERBEROS)
+ dnstr = f"CN={username},{ou}"
+
+ # like, WTF, samdb.newuser(), this is what you make us do.
+ short_ou = ou.split(',', 1)[0]
+
+ samdb.newuser(username, password, userou=short_ou)
+
+ if writeable_objects:
+ sd_utils = SDUtils(samdb)
+ sid = sd_utils.get_object_sid(dnstr)
+ for obj in writeable_objects:
+ mod = f"(OA;CI;WP;{ SPN_GUID };;{ sid })"
+ sd_utils.dacl_add_ace(obj, mod)
+
+ unpriv_samdb = get_samdb(creds=creds)
+ return unpriv_samdb
+
+
+class LdapSpnTestBase(TestCase):
+ _disabled = False
+
+ @classmethod
+ def setUpDynamicTestCases(cls):
+ if getattr(cls, '_disabled', False):
+ return
+ for doc, *rows in cls.cases:
+ if FILTER:
+ if not re.search(FILTER, doc):
+ continue
+ name = re.sub(r'\W+', '_', doc)
+ cls.generate_dynamic_test("test_spn", name, rows, doc)
+
+ def setup_objects(self, rows):
+ objects = set(r[0] for r in rows)
+ for name in objects:
+ if ':' in name:
+ objtype, name = name.split(':', 1)
+ else:
+ objtype = 'dc'
+ getattr(self, f'add_{objtype}')(name)
+
+ def setup_users(self, rows):
+ # When you are adding an SPN that aliases (or would be aliased
+ # by) another SPN on another object, you need to have write
+ # permission on that other object too.
+ #
+ # To test this negatively and positively, we need to have
+ # users with various combinations of write permission, which
+ # means fiddling with SDs on the objects.
+ #
+ # The syntax is:
+ # '' : user with no special permissions
+ # '*' : admin user
+ # 'A' : user can write to A only
+ # 'A,C' : user can write to A and C
+ # 'C,A' : same, but makes another user
+ self.userdbs = {
+ '*': self.samdb
+ }
+
+ permissions = set(r[2] for r in rows)
+ for p in permissions:
+ if p == '*':
+ continue
+ if p == '':
+ user = 'nobody'
+ writeable_objects = None
+ else:
+ user = 'writes_' + p.replace(",", '_')
+ writeable_objects = [self.objects[x][0] for x in p.split(',')]
+
+ self.userdbs[p] = add_unpriv_user(self.samdb, self.ou, user,
+ writeable_objects)
+
+ def _test_spn_with_args(self, rows, doc):
+ cdoc = colour_text(doc)
+ edoc = colour_text(doc, 'error')
+ pdoc = colour_text(doc, 'pass')
+
+ if COLOUR_TEXT:
+ sys.stderr.flush()
+ print('\n', c_DARK_YELLOW('#' * 10), f'starting «{cdoc}»\n')
+ sys.stdout.flush()
+
+ self.samdb = get_samdb()
+ self.base_dn = self.samdb.get_default_basedn()
+ self.short_id = self.id().rsplit('.', 1)[1][:63]
+ self.objects = {}
+ self.ou = f"OU={ self.short_id },{ self.base_dn }"
+ self.addCleanup(self.samdb.delete, self.ou, ["tree_delete:1"])
+ self.samdb.create_ou(self.ou)
+
+ self.setup_objects(rows)
+ self.setup_users(rows)
+
+ for i, row in enumerate(rows):
+ if len(row) == 5:
+ obj, data, rights, expected, op = row
+ else:
+ obj, data, rights, expected = row
+ op = ldb.FLAG_MOD_REPLACE
+
+ # We use this DB with possibly restricted rights for this row
+ samdb = self.userdbs[rights]
+
+ if ':' in obj:
+ objtype, obj = obj.split(':', 1)
+ else:
+ objtype = 'dc'
+
+ dn, dnsname = self.objects[obj]
+ m = {"dn": dn}
+
+ if isinstance(data, dict):
+ m.update(data)
+ else:
+ m['servicePrincipalName'] = data
+
+ # for python's sake (and our sanity) we try to ensure we
+ # have consistent canonical case in our attributes
+ keys = set(m.keys())
+ if not keys.issubset(RELEVANT_ATTRS):
+ raise ValueError(f"unexpected attr {keys - RELEVANT_ATTRS}. "
+ "Casefold typo?")
+
+ for k in ('dNSHostName', 'servicePrincipalName'):
+ if isinstance(m.get(k), str):
+ m[k] = m[k].format(dnsname=f"x.{REALM}")
+ elif isinstance(m.get(k), list):
+ m[k] = [x.format(dnsname=f"x.{REALM}") for x in m[k]]
+
+ msg = ldb.Message.from_dict(samdb, m, op)
+
+ if expected is bad:
+ try:
+ samdb.modify(msg)
+ except ldb.LdbError as e:
+ print(f"row {i+1} of '{pdoc}' failed as expected with "
+ f"{ldb_err(e)}\n")
+ continue
+ self.fail(f"row {i+1}: "
+ f"{rights} {pprint.pformat(m)} on {objtype} {obj} "
+ f"should fail ({edoc})")
+
+ elif expected is ok:
+ try:
+ samdb.modify(msg)
+ except ldb.LdbError as e:
+ self.fail(f"row {i+1} of {edoc} failed with {ldb_err(e)}:\n"
+ f"{rights} {pprint.pformat(m)} on {objtype} {obj}")
+
+ elif expected is report:
+ try:
+ self.samdb.modify(msg)
+ print(f"row {i+1} "
+ f"of '{cdoc}' {colour_text('SUCCEEDED', 'pass')}:\n"
+ f"{pprint.pformat(m)} on {obj}")
+ except ldb.LdbError as e:
+ print(f"row {i+1} "
+ f"of '{cdoc}' {colour_text('FAILED', 'error')} "
+ f"with {ldb_err(e)}:\n{pprint.pformat(m)} on {obj}")
+
+ elif expected is breakpoint:
+ try:
+ breakpoint()
+ samdb.modify(msg)
+ except ldb.LdbError as e:
+ print(f"row {i+1} of '{pdoc}' FAILED with {ldb_err(e)}\n")
+
+ else: # an ldb error number
+ try:
+ samdb.modify(msg)
+ except ldb.LdbError as e:
+ if e.args[0] == expected:
+ continue
+ self.fail(f"row {i+1} of '{edoc}' "
+ f"should have failed with {ldb_err(expected)}:\n"
+ f"not {ldb_err(e)}:\n"
+ f"{rights} {pprint.pformat(m)} on {objtype} {obj}")
+ self.fail(f"row {i+1} of '{edoc}' "
+ f"should have failed with {ldb_err(expected)}:\n"
+ f"{rights} {pprint.pformat(m)} on {objtype} {obj}")
+
+ def add_dc(self, name):
+ dn = f"CN={name},OU=Domain Controllers,{self.base_dn}"
+ dnsname = f"{name}.{REALM}".lower()
+ self.samdb.add({
+ "dn": dn,
+ "objectclass": "computer",
+ "userAccountControl": str(UF_SERVER_TRUST_ACCOUNT |
+ UF_TRUSTED_FOR_DELEGATION),
+ "dnsHostName": dnsname,
+ "carLicense": self.id()
+ })
+ self.addCleanup(self.remove_object, name)
+ self.objects[name] = (dn, dnsname)
+
+ def add_user(self, name):
+ dn = f"CN={name},{self.ou}"
+ self.samdb.add({
+ "dn": dn,
+ "name": name,
+ "samAccountName": name,
+ "objectclass": "user",
+ "carLicense": self.id()
+ })
+ self.addCleanup(self.remove_object, name)
+ self.objects[name] = (dn, None)
+
+ def remove_object(self, name):
+ dn, dnsname = self.objects.pop(name)
+ self.samdb.delete(dn)
+
+
+@DynamicTestCase
+class LdapSpnTest(LdapSpnTestBase):
+ """Make sure we can't add clashing servicePrincipalNames.
+
+ This would be possible using sPNMappings aliases — for example, if
+ the mapping maps host/ to cifs/, we should not be able to add
+ different addresses for each.
+ """
+
+ # default sPNMappings: host=alerter, appmgmt, cisvc, clipsrv,
+ # browser, dhcp, dnscache, replicator, eventlog, eventsystem,
+ # policyagent, oakley, dmserver, dns, mcsvc, fax, msiserver, ias,
+ # messenger, netlogon, netman, netdde, netddedsm, nmagent,
+ # plugplay, protectedstorage, rasman, rpclocator, rpc, rpcss,
+ # remoteaccess, rsvp, samss, scardsvr, scesrv, seclogon, scm,
+ # dcom, cifs, spooler, snmp, schedule, tapisrv, trksvr, trkwks,
+ # ups, time, wins, www, http, w3svc, iisadmin, msdtc
+ #
+ # I think in practice this is rarely if ever changed or added to.
+
+ cases = [
+ ("add one as admin",
+ ('A', 'host/{dnsname}', '*', ok),
+ ),
+ ("add one as rightful user",
+ ('A', 'host/{dnsname}', 'A', ok),
+ ),
+ ("attempt to add one as nobody",
+ ('A', 'host/{dnsname}', '', denied),
+ ),
+
+ ("add and replace as admin",
+ ('A', 'host/{dnsname}', '*', ok),
+ ('A', 'host/x.{dnsname}', '*', ok),
+ ),
+ ("replace as rightful user",
+ ('A', 'host/{dnsname}', 'A', ok),
+ ('A', 'host/x.{dnsname}', 'A', ok),
+ ),
+ ("attempt to replace one as nobody",
+ ('A', 'host/{dnsname}', '*', ok),
+ ('A', 'host/x.{dnsname}', '', denied),
+ ),
+
+ ("add second as admin",
+ ('A', 'host/{dnsname}', '*', ok),
+ ('A', 'host/x.{dnsname}', '*', ok, add),
+ ),
+ ("add second as rightful user",
+ ('A', 'host/{dnsname}', 'A', ok),
+ ('A', 'host/x.{dnsname}', 'A', ok, add),
+ ),
+ ("attempt to add second as nobody",
+ ('A', 'host/{dnsname}', '*', ok),
+ ('A', 'host/x.{dnsname}', '', denied, add),
+ ),
+
+ ("add the same one twice, simple duplicate error",
+ ('A', 'host/{dnsname}', '*', ok),
+ ('A', 'host/{dnsname}', '*', bad, add),
+ ),
+ ("simple duplicate attributes, as non-admin",
+ ('A', 'host/{dnsname}', '*', ok),
+ ('A', 'host/{dnsname}', 'A', bad, add),
+ ),
+
+ ("add the same one twice, identical duplicate",
+ ('A', 'host/{dnsname}', '*', ok),
+ ('A', 'host/{dnsname}', '*', bad, add),
+ ),
+
+ ("add a conflict, host first, as nobody",
+ ('A', 'host/z.{dnsname}', '*', ok),
+ ('B', 'cifs/z.{dnsname}', '', denied),
+ ),
+
+ ("add a conflict, service first, as nobody",
+ ('A', 'cifs/{dnsname}', '*', ok),
+ ('B', 'host/{dnsname}', '', denied),
+ ),
+
+
+ ("three way conflict, host first, as admin",
+ ('A', 'host/z.{dnsname}', '*', ok),
+ ('B', 'cifs/z.{dnsname}', '*', ok),
+ ('C', 'www/z.{dnsname}', '*', ok),
+ ),
+ ("three way conflict, host first, with sufficient rights",
+ ('A', 'host/z.{dnsname}', 'A', ok),
+ ('B', 'cifs/z.{dnsname}', 'B,A', ok),
+ ('C', 'www/z.{dnsname}', 'C,A', ok),
+ ),
+ ("three way conflict, host first, adding duplicate",
+ ('A', 'host/z.{dnsname}', 'A', ok),
+ ('B', 'cifs/z.{dnsname}', 'B,A', ok),
+ ('C', 'cifs/z.{dnsname}', 'C,A', bad),
+ ),
+ ("three way conflict, host first, adding duplicate, full rights",
+ ('A', 'host/z.{dnsname}', 'A', ok),
+ ('B', 'cifs/z.{dnsname}', 'B,A', ok),
+ ('C', 'cifs/z.{dnsname}', 'C,B,A', bad),
+ ),
+
+ ("three way conflict, host first, with other write rights",
+ ('A', 'host/z.{dnsname}', '*', ok),
+ ('B', 'cifs/z.{dnsname}', 'A,B', ok),
+ ('C', 'cifs/z.{dnsname}', 'A,B', bad),
+
+ ),
+ ("three way conflict, host first, as nobody",
+ ('A', 'host/z.{dnsname}', '*', ok),
+ ('B', 'cifs/z.{dnsname}', '*', ok),
+ ('C', 'www/z.{dnsname}', '', denied),
+ ),
+
+ ("three way conflict, services first, as admin",
+ ('A', 'cifs/{dnsname}', '*', ok),
+ ('B', 'www/{dnsname}', '*', ok),
+ ('C', 'host/{dnsname}', '*', constraint),
+ ),
+ ("three way conflict, services first, with service write rights",
+ ('A', 'cifs/{dnsname}', '*', ok),
+ ('B', 'www/{dnsname}', '*', ok),
+ ('C', 'host/{dnsname}', 'A,B', bad),
+ ),
+
+ ("three way conflict, service first, as nobody",
+ ('A', 'cifs/{dnsname}', '*', ok),
+ ('B', 'www/{dnsname}', '*', ok),
+ ('C', 'host/{dnsname}', '', denied),
+ ),
+ ("replace host before specific",
+ ('A', 'host/{dnsname}', '*', ok),
+ ('A', 'cifs/{dnsname}', '*', ok),
+ ),
+ ("replace host after specific, as nobody",
+ ('A', 'cifs/{dnsname}', '*', ok),
+ ('A', 'host/{dnsname}', '', denied),
+ ),
+
+ ("non-conflict host before specific",
+ ('A', 'host/{dnsname}', '*', ok),
+ ('A', 'cifs/{dnsname}', '*', ok, add),
+ ),
+ ("non-conflict host after specific",
+ ('A', 'cifs/{dnsname}', '*', ok),
+ ('A', 'host/{dnsname}', '*', ok, add),
+ ),
+ ("non-conflict host before specific, non-admin",
+ ('A', 'host/{dnsname}', 'A', ok),
+ ('A', 'cifs/{dnsname}', 'A', ok, add),
+ ),
+ ("non-conflict host after specific, as nobody",
+ ('A', 'cifs/{dnsname}', '*', ok),
+ ('A', 'host/{dnsname}', '', denied, add),
+ ),
+
+ ("add a conflict, host first on user, as admin",
+ ('user:C', 'host/{dnsname}', '*', ok),
+ ('B', 'cifs/{dnsname}', '*', ok),
+ ),
+ ("add a conflict, host first on user, host rights",
+ ('user:C', 'host/{dnsname}', '*', ok),
+ ('B', 'cifs/{dnsname}', 'C', denied),
+ ),
+ ("add a conflict, host first on user, both rights",
+ ('user:C', 'host/{dnsname}', '*', ok),
+ ('B', 'cifs/{dnsname}', 'B,C', ok),
+ ),
+ ("add a conflict, host first both on user",
+ ('user:C', 'host/{dnsname}', '*', ok),
+ ('user:D', 'www/{dnsname}', '*', ok),
+ ),
+ ("add a conflict, host first both on user, host rights",
+ ('user:C', 'host/{dnsname}', '*', ok),
+ ('user:D', 'www/{dnsname}', 'C', denied),
+ ),
+ ("add a conflict, host first both on user, both rights",
+ ('user:C', 'host/{dnsname}', '*', ok),
+ ('user:D', 'www/{dnsname}', 'C,D', ok),
+ ),
+ ("add a conflict, host first both on user, as nobody",
+ ('user:C', 'host/{dnsname}', '*', ok),
+ ('user:D', 'www/{dnsname}', '', denied),
+ ),
+ ("add a conflict, host first, with both write rights",
+ ('A', 'host/z.{dnsname}', '*', ok),
+ ('B', 'cifs/z.{dnsname}', 'A,B', ok),
+ ),
+
+ ("add a conflict, host first, second on user, as admin",
+ ('A', 'host/{dnsname}', '*', ok),
+ ('user:D', 'cifs/{dnsname}', '*', ok),
+ ),
+ ("add a conflict, host first, second on user, with rights",
+ ('A', 'host/{dnsname}', '*', ok),
+ ('user:D', 'cifs/{dnsname}', 'A,D', ok),
+ ),
+
+ ("nonsense SPNs, part 1, as admin",
+ ('A', 'a-b-c/{dnsname}', '*', ok),
+ ('A', 'rrrrrrrrrrrrr /{dnsname}', '*', ok),
+ ),
+ ("nonsense SPNs, part 1, as user",
+ ('A', 'a-b-c/{dnsname}', 'A', ok),
+ ('A', 'rrrrrrrrrrrrr /{dnsname}', 'A', ok),
+ ),
+ ("nonsense SPNs, part 1, as nobody",
+ ('A', 'a-b-c/{dnsname}', '', denied),
+ ('A', 'rrrrrrrrrrrrr /{dnsname}', '', denied),
+ ),
+
+ ("add a conflict, using port",
+ ('A', 'dns/{dnsname}', '*', ok),
+ ('B', 'dns/{dnsname}:53', '*', ok),
+ ),
+ ("add a conflict, using port, port first",
+ ('user:C', 'dns/{dnsname}:53', '*', ok),
+ ('user:D', 'dns/{dnsname}', '*', ok),
+ ),
+ ("three part spns",
+ ('A', {'dNSHostName': '{dnsname}'}, '*', ok),
+ ('A', 'cifs/{dnsname}/DomainDNSZones.{dnsname}', '*', ok),
+ ('B', 'cifs/{dnsname}/DomainDNSZones.{dnsname}', '*', constraint),
+ ('A', {'dNSHostName': 'y.{dnsname}'}, '*', ok),
+ ('B', 'cifs/{dnsname}/DomainDNSZones.{dnsname}', '*', ok),
+ ('B', 'cifs/y.{dnsname}/DomainDNSZones.{dnsname}', '*', constraint),
+ ),
+ ("three part nonsense spns",
+ ('A', {'dNSHostName': 'bean'}, '*', ok),
+ ('A', 'cifs/bean/DomainDNSZones.bean', '*', ok),
+ ('B', 'cifs/bean/DomainDNSZones.bean', '*', constraint),
+ ('A', {'dNSHostName': 'y.bean'}, '*', ok),
+ ('B', 'cifs/bean/DomainDNSZones.bean', '*', ok),
+ ('B', 'cifs/y.bean/DomainDNSZones.bean', '*', constraint),
+ ('C', 'host/bean/bean', '*', ok),
+ ),
+
+ ("one part spns (no slashes)",
+ ('A', '{dnsname}', '*', constraint),
+ ('B', 'cifs', '*', constraint),
+ ('B', 'cifs/', '*', ok),
+ ('B', ' ', '*', constraint),
+ ('user:C', 'host', '*', constraint),
+ ),
+
+ ("dodgy spns",
+ # These tests pass on Windows. An SPN must have one or two
+ # slashes, with at least one character before the first one,
+ # UNLESS the first slash is followed by a good enough service
+ # name (e.g. "/host/x.y" rather than "sdfsd/x.y").
+ ('A', '\\/{dnsname}', '*', ok),
+ ('B', 'cifs/\\\\{dnsname}', '*', ok),
+ ('B', r'cifs/\\\{dnsname}', '*', ok),
+ ('B', r'cifs/\\\{dnsname}/', '*', ok),
+ ('A', r'cīfs/\\\{dnsname}/', '*', constraint), # 'ī' maps to 'i'
+ # on the next two, full-width solidus (U+FF0F) does not work
+ # as '/'.
+ ('A', 'cifs/sfic', '*', constraint, add),
+ ('A', r'cifs/\\\{dnsname}', '*', constraint, add),
+ ('B', '\n', '*', constraint),
+ ('B', '\n/\n', '*', ok),
+ ('B', '\n/\n/\n', '*', ok),
+ ('B', '\n/\n/\n/\n', '*', constraint),
+ ('B', ' /* and so on */ ', '*', ok, add),
+ ('B', r'¯\_(ツ)_/¯', '*', ok, add), # ¯\_(ツ)_/¯
+ # つ is hiragana for katakana ツ, so the next one fails for
+ # something analogous to casefold reasons.
+ ('A', r'¯\_(つ)_/¯', '*', constraint),
+ ('A', r'¯\_(㋡)_/¯', '*', constraint), # circled ツ
+ ('B', '//', '*', constraint), # all can't be empty,
+ ('B', ' //', '*', ok), # service can be space
+ ('B', '/host/{dnsname}', '*', ok), # or empty if others aren't
+ ('B', '/host/x.y.z', '*', ok),
+ ('B', '/ /x.y.z', '*', ok),
+ ('B', ' / / ', '*', ok),
+ ('user:C', b'host/', '*', ok),
+ ('user:C', ' /host', '*', ok), # service is ' ' (space)
+ ('B', ' /host', '*', constraint), # already on C
+ ('B', ' /HōST', '*', constraint), # ō equiv to O
+ ('B', ' /ħØşt', '*', constraint), # maps to ' /host'
+ ('B', ' /H0ST', '*', ok), # 0 is zero
+ ('B', ' /НoST', '*', ok), # Cyrillic Н (~N)
+ ('B', ' /host', '*', ok), # two space
+ ('B', '\u00a0/host', '*', ok), # non-breaking space
+ ('B', ' 2/HōST/⌷[ ][]¨(', '*', ok),
+ ('B', ' (//)', '*', ok, add),
+ ('B', ' ///', '*', constraint),
+ ('B', r' /\//', '*', constraint), # escape doesn't help
+ ('B', ' /\\//', '*', constraint), # double escape doesn't help
+ ('B', r'\//', '*', ok),
+ ('A', r'\\/\\/', '*', ok),
+ ('B', '|//|', '*', ok, add),
+ ('B', r'\/\/\\', '*', ok, add),
+
+ ('A', ':', '*', constraint),
+ ('A', ':/:', '*', ok),
+ ('A', ':/:80', '*', ok), # port number syntax is not special
+ ('A', ':/:( ツ', '*', ok),
+ ('A', ':/:/:', '*', ok),
+ ('B', b'cifs/\x11\xaa\xbb\xcc\\example.com', '*', ok),
+ ('A', b':/\xcc\xcc\xcc\xcc', '*', ok),
+ ('A', b':/b\x00/b/b/b', '*', ok), # string handlng truncates at \x00
+ ('A', b'a@b/a@b/a@b', '*', ok),
+ ('A', b'a/a@b/a@b', '*', ok),
+ ),
+ ("empty part spns (consecutive slashes)",
+ ('A', 'cifs//{dnsname}', '*', ok),
+ ('B', 'cifs//{dnsname}', '*', bad), # should clash with line 1
+ ('B', 'cifs/zzzy.{dnsname}/', '*', ok),
+ ('B', '/host/zzzy.{dnsname}', '*', ok),
+ ),
+ ("too many spn parts",
+ ('A', 'cifs/{dnsname}/{dnsname}/{dnsname}', '*', bad),
+ ('A', {'dNSHostName': 'y.{dnsname}'}, '*', ok),
+ ('B', 'cifs/{dnsname}/{dnsname}/', '*', bad),
+ ('B', 'cifs/y.{dnsname}/{dnsname}/toop', '*', bad),
+ ('B', 'host/{dnsname}/a/b/c', '*', bad),
+ ),
+ ("add a conflict, host first, as admin",
+ ('A', 'host/z.{dnsname}', '*', ok),
+ ('B', 'cifs/z.{dnsname}', '*', ok),
+ ),
+ ("add a conflict, host first, with host write rights",
+ ('A', 'host/z.{dnsname}', '*', ok),
+ ('B', 'cifs/z.{dnsname}', 'A', denied),
+ ),
+ ("add a conflict, service first, with service write rights",
+ ('A', 'cifs/{dnsname}', '*', ok),
+ ('B', 'host/{dnsname}', 'A', denied),
+ ),
+ ("adding dNSHostName after cifs with no old dNSHostName",
+ ('A', 'cifs/{dnsname}', '*', ok),
+ ('A', {'dNSHostName': 'y.{dnsname}'}, '*', ok),
+ ('B', 'cifs/{dnsname}', '*', constraint),
+ ('B', 'cifs/y.{dnsname}', '*', ok),
+ ('B', 'host/y.{dnsname}', '*', ok),
+ ),
+ ("changing dNSHostName after cifs",
+ ('A', {'dNSHostName': '{dnsname}'}, '*', ok),
+ ('A', 'cifs/{dnsname}', '*', ok),
+ ('A', {'dNSHostName': 'y.{dnsname}'}, '*', ok),
+ ('B', 'cifs/{dnsname}', '*', ok),
+ ('B', 'cifs/y.{dnsname}', '*', bad),
+ ('B', 'host/y.{dnsname}', '*', bad),
+ ),
+ ]
+
+
+@DynamicTestCase
+class LdapSpnSambaOnlyTest(LdapSpnTestBase):
+ # We don't run these ones outside of selftest, where we are
+ # probably testing against Windows and these are known failures.
+ _disabled = 'SAMBA_SELFTEST' not in os.environ
+ cases = [
+ ("add a conflict, host first, with service write rights",
+ ('A', 'host/z.{dnsname}', '*', ok),
+ ('B', 'cifs/z.{dnsname}', 'B', denied),
+ ),
+ ("add a conflict, service first, with host write rights",
+ ('A', 'cifs/{dnsname}', '*', ok),
+ ('B', 'host/{dnsname}', 'B', constraint),
+ ),
+ ("add a conflict, service first, as admin",
+ ('A', 'cifs/{dnsname}', '*', ok),
+ ('B', 'host/{dnsname}', '*', constraint),
+ ),
+ ("add a conflict, service first, with both write rights",
+ ('A', 'cifs/{dnsname}', '*', ok),
+ ('B', 'host/{dnsname}', 'A,B', constraint),
+ ),
+ ("add a conflict, host first both on user, service rights",
+ ('user:C', 'host/{dnsname}', '*', ok),
+ ('user:D', 'www/{dnsname}', 'D', denied),
+ ),
+ ("add a conflict, along with a re-added SPN",
+ ('A', 'cifs/{dnsname}', '*', ok),
+ ('B', 'cifs/heeble.example.net', 'B', ok),
+ ('B', ['cifs/heeble.example.net', 'host/{dnsname}'], 'B', constraint),
+ ),
+
+ ("changing dNSHostName after host",
+ ('A', {'dNSHostName': '{dnsname}'}, '*', ok),
+ ('A', 'host/{dnsname}', '*', ok),
+ ('A', {'dNSHostName': 'y.{dnsname}'}, '*', ok),
+ ('B', 'cifs/{dnsname}', 'B', ok), # no clash with A
+ ('B', 'cifs/y.{dnsname}', 'B', bad), # should clash with A
+ ('B', 'host/y.{dnsname}', '*', bad),
+ ),
+
+ ("mystery dnsname clash, host first",
+ ('user:C', 'host/heeble.example.net', '*', ok),
+ ('user:D', 'www/heeble.example.net', '*', ok),
+ ),
+ ("mystery dnsname clash, www first",
+ ('user:D', 'www/heeble.example.net', '*', ok),
+ ('user:C', 'host/heeble.example.net', '*', constraint),
+ ),
+ ("replace as admin",
+ ('A', 'cifs/{dnsname}', '*', ok),
+ ('A', 'host/{dnsname}', '*', ok),
+ ('A', 'cifs/{dnsname}', '*', ok),
+ ),
+ ("replace as non-admin with rights",
+ ('A', 'cifs/{dnsname}', '*', ok),
+ ('A', 'host/{dnsname}', 'A', ok),
+ ('A', 'cifs/{dnsname}', 'A', ok),
+ ),
+ ("replace vial delete as non-admin with rights",
+ ('A', 'cifs/{dnsname}', '*', ok),
+ ('A', 'host/{dnsname}', 'A', ok),
+ ('A', 'host/{dnsname}', 'A', ok, delete),
+ ('A', 'cifs/{dnsname}', 'A', ok, add),
+ ),
+ ("replace as non-admin without rights",
+ ('B', 'cifs/b', '*', ok),
+ ('A', 'cifs/{dnsname}', '*', ok),
+ ('A', 'host/{dnsname}', 'B', denied),
+ ('A', 'cifs/{dnsname}', 'B', denied),
+ ),
+ ("replace as nobody",
+ ('B', 'cifs/b', '*', ok),
+ ('A', 'cifs/{dnsname}', '*', ok),
+ ('A', 'host/{dnsname}', '', denied),
+ ('A', 'cifs/{dnsname}', '', denied),
+ ),
+ ("accumulate and delete as admin",
+ ('A', 'cifs/{dnsname}', '*', ok),
+ ('A', 'host/{dnsname}', '*', ok, add),
+ ('A', 'www/{dnsname}', '*', ok, add),
+ ('A', 'www/...', '*', ok, add),
+ ('A', 'host/...', '*', ok, add),
+ ('A', 'www/{dnsname}', '*', ok, delete),
+ ('A', 'host/{dnsname}', '*', ok, delete),
+ ('A', 'host/{dnsname}', '*', ok, add),
+ ('A', 'www/{dnsname}', '*', ok, add),
+ ('A', 'host/...', '*', ok, delete),
+ ),
+ ("accumulate and delete with user rights",
+ ('A', 'cifs/{dnsname}', '*', ok),
+ ('A', 'host/{dnsname}', 'A', ok, add),
+ ('A', 'www/{dnsname}', 'A', ok, add),
+ ('A', 'www/...', 'A', ok, add),
+ ('A', 'host/...', 'A', ok, add),
+ ('A', 'www/{dnsname}', 'A', ok, delete),
+ ('A', 'host/{dnsname}', 'A', ok, delete),
+ ('A', 'host/{dnsname}', 'A', ok, add),
+ ('A', 'www/{dnsname}', 'A', ok, add),
+ ('A', 'host/...', 'A', ok, delete),
+ ),
+ ("three way conflict, host first, with partial write rights",
+ ('A', 'host/z.{dnsname}', 'A', ok),
+ ('B', 'cifs/z.{dnsname}', 'B', denied),
+ ('C', 'www/z.{dnsname}', 'C', denied),
+ ),
+ ("three way conflict, host first, with partial write rights 2",
+ ('A', 'host/z.{dnsname}', 'A', ok),
+ ('B', 'cifs/z.{dnsname}', 'B', bad),
+ ('C', 'www/z.{dnsname}', 'C,A', ok),
+ ),
+
+ ("three way conflict sandwich, sufficient rights",
+ ('B', 'host/{dnsname}', 'B', ok),
+ ('A', 'cifs/{dnsname}', 'A,B', ok),
+ # the replaces don't fail even though they appear to affect A
+ # and B, because they are effectively no-ops, leaving
+ # everything as it was before.
+ ('A', 'cifs/{dnsname}', 'A', ok),
+ ('B', 'host/{dnsname}', 'B', ok),
+ ('C', 'www/{dnsname}', 'A,B,C', ok),
+ ('C', 'www/{dnsname}', 'B,C', ok),
+ # because B already has host/, C doesn't matter
+ ('B', 'host/{dnsname}', 'A,B', ok),
+ # removing host (via replace) frees others, needs B only
+ ('B', 'ldap/{dnsname}', 'B', ok),
+ ('C', 'www/{dnsname}', 'C', ok),
+ ('A', 'cifs/{dnsname}', 'A', ok),
+
+ # re-adding host is now impossible while A and C have {dnsname} spns
+ ('B', 'host/{dnsname}', '*', bad),
+ ('B', 'host/{dnsname}', 'A,B,C', bad),
+ # so let's remove those... (not needing B rights)
+ ('C', 'www/{dnsname}', 'C', ok, delete),
+ ('A', 'cifs/{dnsname}', 'A', ok, delete),
+ # and now we can add host/ again
+ ('B', 'host/{dnsname}', 'B', ok),
+ ('C', 'www/{dnsname}', 'B,C', ok, add),
+ ('A', 'cifs/{dnsname}', 'A,B', ok),
+ ),
+ ("three way conflict, service first, with all write rights",
+ ('A', 'cifs/{dnsname}', '*', ok),
+ ('B', 'www/{dnsname}', 'A,B,C', ok),
+ ('C', 'host/{dnsname}', 'A,B,C', bad),
+ ),
+ ("three way conflict, service first, just sufficient rights",
+ ('A', 'cifs/{dnsname}', 'A', ok),
+ ('B', 'www/{dnsname}', 'B', ok),
+ ('C', 'host/{dnsname}', 'A,B,C', bad),
+ ),
+
+ ("three way conflict, service first, with host write rights",
+ ('A', 'cifs/{dnsname}', '*', ok),
+ ('B', 'www/{dnsname}', '*', ok),
+ ('C', 'host/{dnsname}', 'C', bad),
+ ),
+ ("three way conflict, service first, with both write rights",
+ ('A', 'cifs/{dnsname}', '*', ok),
+ ('A', 'cifs/{dnsname}', '*', ok, delete),
+ ('A', 'www/{dnsname}', 'A,B,C', ok),
+ ('B', 'host/{dnsname}', 'A,B', bad),
+ ('A', 'www/{dnsname}', 'A', ok, delete),
+ ('B', 'host/{dnsname}', 'A,B', ok),
+ ('C', 'cifs/{dnsname}', 'C', bad),
+ ('C', 'cifs/{dnsname}', 'B,C', ok),
+ ),
+ ("three way conflict, services first, with partial rights",
+ ('A', 'cifs/{dnsname}', 'A,C', ok),
+ ('B', 'www/{dnsname}', '*', ok),
+ ('C', 'host/{dnsname}', 'A,C', bad),
+ ),
+ ]
+
+
+@DynamicTestCase
+class LdapSpnAmbitiousTest(LdapSpnTestBase):
+ _disabled = True
+ cases = [
+ ("add a conflict with port, host first both on user",
+ ('user:C', 'host/{dnsname}', '*', ok),
+ ('user:D', 'www/{dnsname}:80', '*', bad),
+ ),
+ # see https://bugzilla.samba.org/show_bug.cgi?id=8929
+ ("add the same one twice, case-insensitive duplicate",
+ ('A', 'host/{dnsname}', '*', ok),
+ ('A', 'Host/{dnsname}', '*', bad, add),
+ ),
+ ("special SPN",
+ # should fail because we don't have all the DSA infrastructure
+ ('A', ("E3514235-4B06-11D1-AB04-00C04FC2DCD2/"
+ "75b84f00-a81b-4a19-8ef2-8e483cccff11/"
+ "{dnsname}"), '*', constraint)
+ ),
+ ("single part SPNs matching sAMAccountName",
+ # setting them both together is allegedly a MacOS behaviour,
+ # but all we get from Windows is a mysterious NO_SUCH_OBJECT.
+ ('user:A', {'sAMAccountName': 'A',
+ 'servicePrincipalName': 'A'}, '*', ldb.ERR_NO_SUCH_OBJECT),
+ ('user:B', {'sAMAccountName': 'B'}, '*', ok),
+ ('user:B', {'servicePrincipalName': 'B'}, '*', constraint),
+ ('user:C', {'servicePrincipalName': 'C'}, '*', constraint),
+ ('user:C', {'sAMAccountName': 'C'}, '*', ok),
+ ),
+ ("three part spns with dnsHostName",
+ ('A', {'dNSHostName': '{dnsname}'}, '*', ok),
+ ('A', 'cifs/{dnsname}/DomainDNSZones.{dnsname}', '*', ok),
+ ('A', {'dNSHostName': 'y.{dnsname}'}, '*', ok),
+ ('B', 'cifs/{dnsname}/DomainDNSZones.{dnsname}', '*', ok),
+ ('B', 'cifs/y.{dnsname}/DomainDNSZones.{dnsname}', '*', constraint),
+ ('C', 'host/{y.dnsname}/{y.dnsname}', '*', constraint),
+ ('A', 'host/y.{dnsname}/{dnsname}', '*', constraint),
+ ),
+ ]
+
+
+def main():
+ TestProgram(module=__name__, opts=subunitopts)
+
+main()
diff --git a/python/samba/tests/ldap_upn_sam_account.py b/python/samba/tests/ldap_upn_sam_account.py
new file mode 100644
index 0000000..ab4c389
--- /dev/null
+++ b/python/samba/tests/ldap_upn_sam_account.py
@@ -0,0 +1,510 @@
+# Unix SMB/CIFS implementation.
+#
+# Copyright 2021 (C) Catalyst IT Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+import os
+import sys
+from samba.samdb import SamDB
+from samba.auth import system_session
+import ldb
+from samba.tests.subunitrun import SubunitOptions, TestProgram
+from samba.tests import TestCase, ldb_err
+from samba.tests import DynamicTestCase
+import samba.getopt as options
+import optparse
+from samba.colour import c_DARK_YELLOW
+import re
+import pprint
+from samba.dsdb import (
+ UF_SERVER_TRUST_ACCOUNT,
+ UF_TRUSTED_FOR_DELEGATION,
+)
+
+
+# bad sAMAccountName characters from [MS-SAMR]
+# "3.1.1.6 Attribute Constraints for Originating Updates"
+BAD_SAM_CHARS = (''.join(chr(x) for x in range(0, 32)) +
+ '"/\\[]:|<>+=;?,*')
+
+# 0x7f is *said* to be bad, but turns out to be fine.
+ALLEGED_BAD_SAM_CHARS = chr(127)
+
+LATIN1_BAD_CHARS = set([chr(x) for x in range(129, 160)] +
+ list("ªºÿ") +
+ [chr(x) for x in range(0xc0, 0xc6)] +
+ [chr(x) for x in range(0xc7, 0xd7)] +
+ [chr(x) for x in range(0xd8, 0xde)] +
+ [chr(x) for x in range(0xe0, 0xe6)] +
+ [chr(x) for x in range(0xe7, 0xf7)] +
+ [chr(x) for x in range(0xf8, 0xfe)])
+
+
+LATIN_EXTENDED_A_NO_CLASH = {306, 307, 330, 331, 338, 339, 358, 359, 383}
+
+#XXX does '\x00' just truncate the string though?
+#XXX elsewhere we see "[\\\"|,/:<>+=;?*']" with "'"
+
+
+## UPN limits
+# max length 1024 UTF-8 bytes, following "rfc822"
+# for o365 sync https://docs.microsoft.com/en-us/microsoft-365/enterprise/prepare-for-directory-synchronization?view=o365-worldwide
+# max length is 113 [64 before @] "@" [48 after @]
+# invalid chars: '\\%&*+/=?{}|<>();:,[]"'
+# allowed chars: A – Z, a - z, 0 – 9, ' . - _ ! # ^ ~
+# "Letters with diacritical marks, such as umlauts, accents, and tildes, are invalid characters."
+#
+# "@" can't be first
+# "The username cannot end with a period (.), an ampersand (&), a space, or an at sign (@)."
+#
+
+# per RFC 822, «"a b" @ example.org» is
+
+
+ok = True
+bad = False
+report = 'report'
+exists = ldb.ERR_ENTRY_ALREADY_EXISTS
+
+
+if sys.stdout.isatty():
+ c_doc = c_DARK_YELLOW
+else:
+ c_doc = lambda x: x
+
+
+def get_samdb():
+ return SamDB(url=f"ldap://{SERVER}",
+ lp=LP,
+ session_info=system_session(),
+ credentials=CREDS)
+
+
+def format(s):
+ if type(s) is str:
+ s = s.format(realm=REALM.upper(),
+ lrealm=REALM.lower(),
+ other_realm=(REALM + ".another.example.net"))
+ return s
+
+
+class LdapUpnSamTestBase(TestCase):
+ """Make sure we can't add userPrincipalNames or sAMAccountNames that
+ implicitly collide.
+ """
+ _disabled = False
+
+ @classmethod
+ def setUpDynamicTestCases(cls):
+ if getattr(cls, '_disabled', False):
+ return
+ for doc, *rows in cls.cases:
+ name = re.sub(r'\W+', '_', doc)
+ cls.generate_dynamic_test("test_upn_sam", name, rows, doc)
+
+ def setup_objects(self, rows):
+ objects = set(r[0] for r in rows)
+ for name in objects:
+ if ':' in name:
+ objtype, name = name.split(':', 1)
+ else:
+ objtype = 'user'
+ getattr(self, f'add_{objtype}')(name)
+ self.addCleanup(self.remove_object, name)
+
+ def _test_upn_sam_with_args(self, rows, doc):
+ self.setup_objects(rows)
+ cdoc = c_doc(doc)
+
+ for i, row in enumerate(rows):
+ if len(row) == 4:
+ obj, data, expected, op = row
+ else:
+ obj, data, expected = row
+ op = ldb.FLAG_MOD_REPLACE
+
+ dn, dnsname = self.objects[obj]
+ sam, upn = None, None
+ if isinstance(data, dict):
+ sam = data.get('sam')
+ upn = data.get('upn')
+ elif isinstance(data, str):
+ if '@' in data:
+ upn = data
+ else:
+ sam = data
+ else: # bytes
+ if b'@' in data:
+ upn = data
+ else:
+ sam = data
+
+ m = {"dn": dn}
+
+ if upn is not None:
+ m["userPrincipalName"] = format(upn)
+
+ if sam is not None:
+ m["sAMAccountName"] = format(sam)
+
+ msg = ldb.Message.from_dict(self.samdb, m, op)
+
+ if expected is bad:
+ try:
+ self.samdb.modify(msg)
+ except ldb.LdbError as e:
+ print(f"row {i+1} of '{cdoc}' failed as expected with "
+ f"{ldb_err(e)}\n")
+ continue
+ self.fail(f"row {i+1} of '{cdoc}' should have failed:\n"
+ f"{pprint.pformat(m)} on {obj}")
+ elif expected is ok:
+ try:
+ self.samdb.modify(msg)
+ except ldb.LdbError as e:
+ raise AssertionError(
+ f"row {i+1} of '{cdoc}' failed with {ldb_err(e)}:\n"
+ f"{pprint.pformat(m)} on {obj}") from None
+ elif expected is report:
+ try:
+ self.samdb.modify(msg)
+ print(f"row {i+1} of '{cdoc}' SUCCEEDED:\n"
+ f"{pprint.pformat(m)} on {obj}")
+ except ldb.LdbError as e:
+ print(f"row {i+1} of '{cdoc}' FAILED "
+ f"with {ldb_err(e)}:\n"
+ f"{pprint.pformat(m)} on {obj}")
+
+ else:
+ try:
+ self.samdb.modify(msg)
+ except ldb.LdbError as e:
+ if hasattr(expected, '__contains__'):
+ if e.args[0] in expected:
+ continue
+
+ if e.args[0] == expected:
+ continue
+
+ self.fail(f"row {i+1} of '{cdoc}' "
+ f"should have failed with {ldb_err(expected)} "
+ f"but instead failed with {ldb_err(e)}:\n"
+ f"{pprint.pformat(m)} on {obj}")
+ self.fail(f"row {i+1} of '{cdoc}' "
+ f"should have failed with {ldb_err(expected)}:\n"
+ f"{pprint.pformat(m)} on {obj}")
+
+ def add_dc(self, name):
+ dn = f"CN={name},OU=Domain Controllers,{self.base_dn}"
+ dnsname = f"{name}.{REALM}".lower()
+ self.samdb.add({
+ "dn": dn,
+ "objectclass": "computer",
+ "userAccountControl": str(UF_SERVER_TRUST_ACCOUNT |
+ UF_TRUSTED_FOR_DELEGATION),
+ "dnsHostName": dnsname,
+ "carLicense": self.id()
+ })
+ self.objects[name] = (dn, dnsname)
+
+ def add_user(self, name):
+ dn = f"CN={name},{self.ou}"
+ self.samdb.add({
+ "dn": dn,
+ "name": name,
+ "objectclass": "user",
+ "carLicense": self.id()
+ })
+ self.objects[name] = (dn, None)
+
+ def remove_object(self, name):
+ dn, dnsname = self.objects.pop(name)
+ self.samdb.delete(dn)
+
+ def setUp(self):
+ super().setUp()
+ self.samdb = get_samdb()
+ self.base_dn = self.samdb.get_default_basedn()
+ self.short_id = self.id().rsplit('.', 1)[1][:63]
+ self.objects = {}
+ self.ou = f"OU={ self.short_id },{ self.base_dn }"
+ self.addCleanup(self.samdb.delete, self.ou, ["tree_delete:1"])
+ self.samdb.add({"dn": self.ou, "objectclass": "organizationalUnit"})
+
+
+@DynamicTestCase
+class LdapUpnSamTest(LdapUpnSamTestBase):
+ cases = [
+ # The structure is
+ # ( «documentation/message that becomes test name»,
+ # («short object id», «upn or sam or mapping», «expected»),
+ # («short object id», «upn or sam or mapping», «expected»),
+ # ...,
+ # )
+ #
+ # where the first item is a one line string explaining the
+ # test, and subsequent items describe database modifications,
+ # to be applied in series.
+ #
+ # First is a short ID, which maps to an object DN. Second is
+ # either a string or a dictionary.
+ #
+ # * If a string, if it contains '@', it is a UPN, otherwise a
+ # samaccountname.
+ #
+ # * If a dictionary, it is a mapping of some of ['sam', 'upn']
+ # to strings (in this way, you can add two attributes in one
+ # message, or attempt a samaccountname with '@').
+ #
+ # expected can be «ok», «bad» (mapped to True and False,
+ # respectively), or a specific LDB error code, if that exact
+ # exception is wanted.
+ ("add good UPN",
+ ('A', 'a@{realm}', ok),
+ ),
+ ("add the same upn to different objects",
+ ('A', 'a@{realm}', ok),
+ ('B', 'a@{realm}', ldb.ERR_CONSTRAINT_VIOLATION),
+ ('B', 'a@{lrealm}', ldb.ERR_CONSTRAINT_VIOLATION), # lowercase realm
+ ),
+ ("replace UPN with itself",
+ ('A', 'a@{realm}', ok),
+ ('A', 'a@{realm}', ok),
+ ('A', 'a@{lrealm}', ok),
+ ),
+ ("replace SAM with itself",
+ ('A', 'a', ok),
+ ('A', 'a', ok),
+ ),
+ ("replace UPN realm",
+ ('A', 'a@{realm}', ok),
+ ('A', 'a@{other_realm}', ok),
+ ),
+ ("matching SAM and UPN",
+ ('A', 'a', ok),
+ ('A', 'a@{realm}', ok),
+ ),
+ ("matching SAM and UPN, other realm",
+ ('A', 'a', ok),
+ ('A', 'a@{other_realm}', ok),
+ ),
+ ("matching SAM and UPN, single message",
+ ('A', {'sam': 'a', 'upn': 'a@{realm}'}, ok),
+ ('A', {'sam': 'a', 'upn': 'a@{other_realm}'}, ok),
+ ),
+ ("different objects, different realms",
+ ('A', 'a@{realm}', ok),
+ ('B', 'a@{other_realm}', ok),
+ ),
+ ("different objects, same UPN, different case",
+ ('A', 'a@{realm}', ok),
+ ('B', 'A@{realm}', ldb.ERR_CONSTRAINT_VIOLATION),
+ ),
+ ("different objects, SAM after UPN",
+ ('A', 'a@{realm}', ok),
+ ('B', 'a', ldb.ERR_CONSTRAINT_VIOLATION),
+ ),
+ ("different objects, SAM before UPN",
+ ('A', 'a', ok),
+ ('B', 'a@{realm}', exists),
+ ),
+ ("different objects, SAM account clash",
+ ('A', 'a', ok),
+ ('B', 'a', exists),
+ ),
+ ("different objects, SAM account clash, different case",
+ ('A', 'a', ok),
+ ('B', 'A', exists),
+ ),
+ ("two way clash",
+ ('A', {'sam': 'x', 'upn': 'y@{realm}'}, ok),
+ # The sam account raises EXISTS while the UPN raises
+ # CONSTRAINT_VIOLATION. We don't really care in which order
+ # they are checked, so either error is ok.
+ ('B', {'sam': 'y', 'upn': 'x@{realm}'},
+ (exists, ldb.ERR_CONSTRAINT_VIOLATION)),
+ ),
+ ("two way clash, other realm",
+ ('A', {'sam': 'x', 'upn': 'y@{other_realm}'}, ok),
+ ('B', {'sam': 'y', 'upn': 'x@{other_realm}'}, ok),
+ ),
+ # UPN versions of bad sam account names
+ ("UPN clash on other realm",
+ ('A', 'a@x.x', ok),
+ ('B', 'a@x.x', ldb.ERR_CONSTRAINT_VIOLATION),
+ ),
+ ("UPN same but for trailing spaces",
+ ('A', 'a@{realm}', ok),
+ ('B', 'a @{realm}', ok),
+ ),
+ # UPN has no at
+ ("UPN has no at",
+ ('A', {'upn': 'noat'}, ok),
+ ('B', {'upn': 'noat'}, ldb.ERR_CONSTRAINT_VIOLATION),
+ ('C', {'upn': 'NOAT'}, ldb.ERR_CONSTRAINT_VIOLATION),
+ ),
+ # UPN has non-ascii at, followed by real at.
+ ("UPN with non-ascii at vs real at",
+ ('A', {'upn': 'smallat﹫{realm}'}, ok),
+ ('B', {'upn': 'smallat@{realm}'}, ok),
+ ('C', {'upn': 'tagat\U000e0040{realm}'}, ok),
+ ('D', {'upn': 'tagat@{realm}'}, ok),
+ ),
+ ("UPN with unicode at vs real at, real at first",
+ ('B', {'upn': 'smallat@{realm}'}, ok),
+ ('A', {'upn': 'smallat﹫{realm}'}, ok),
+ ('D', {'upn': 'tagat@{realm}'}, ok),
+ ('C', {'upn': 'tagat\U000e0040{realm}'}, ok),
+ ),
+ ("UPN username too long",
+ # SPN soft limit 20; hard limit 256, overall UPN 1024
+ ('A', 'a' * 25 + '@b.c', ok),
+ ('A', 'a' * 65 + '@b.c', ok), # Azure AD limit is 64
+ ('A', 'a' * 257 + '@b.c', ok), # 256 is sam account name limit
+ ),
+ ("sam account name 20 long",
+ # SPN soft limit 20
+ ('A', 'a' * 20, ok),
+ ),
+ ("UPN has two at signs",
+ ('A', 'a@{realm}', ok),
+ ('A', 'a@{realm}@{realm}', ok),
+ ('A', 'a@a.b', ok),
+ ('A', 'a@a@a.b', ok),
+ ),
+ ("SAM has at signs clashing upn second, non-realm",
+ ('A', {'sam': 'a@a.b'}, ok),
+ ('B', 'a@a.b@a.b', ok), # UPN won't clash with SAM, because realm
+ ),
+ ("SAM has at signs clashing upn second",
+ ('A', {'sam': 'a@{realm}'}, ok),
+ ('B', 'a@{realm}@{realm}', bad), # UPN would clashes with SAM
+ ),
+ ("SAM has at signs clashing upn first",
+ ('B', 'a@{realm}@{realm}', ok),
+ ('A', {'sam': 'a@{realm}'}, bad),
+ ),
+ ("spaces around at",
+ ('A', 'a name @ {realm}', ok),
+ ('B', 'a name @ {realm}', ldb.ERR_CONSTRAINT_VIOLATION),
+ ('B', 'a name @{realm}', ok), # because realm looks different
+ ('C', 'a name@{realm}', ok),
+ ('D', 'a name', ldb.ERR_CONSTRAINT_VIOLATION),
+ ('D', 'a name ', (exists, ldb.ERR_CONSTRAINT_VIOLATION)), # matches B
+ ),
+ ("SAM starts with at",
+ ('A', {'sam': '@{realm}'}, ok),
+ ('B', {'sam': '@a'}, ok),
+ ('C', {'sam': '@{realm}'}, exists),
+ ('C', {'sam': '@a'}, exists),
+ ('C', {'upn': '@{realm}@{realm}'}, bad),
+ ('C', {'upn': '@a@{realm}'}, bad),
+ ),
+ ("UPN starts with at",
+ ('A', {'upn': '@{realm}'}, ok),
+ ('B', {'upn': '@a@{realm}'}, ok),
+ ('C', {'upn': '@{realm}'}, bad),
+ ('C', {'sam': '@a'}, bad),
+ ),
+ ("SAM ends with at",
+ ('A', {'sam': '{realm}@'}, ok),
+ ('B', {'sam': 'a@'}, ok),
+ ('C', {'sam': '{realm}@'}, exists),
+ ('C', {'sam': 'a@'}, exists),
+ ('C', {'upn': 'a@@{realm}'}, bad),
+ ('C', {'upn': '{realm}@@{realm}'}, bad),
+ ),
+ ("UPN ends with at",
+ ('A', {'upn': '{realm}@'}, ok),
+ ('B', {'upn': '@a@{realm}@'}, ok),
+ ('C', {'upn': '{realm}@'}, bad),
+ ('C', {'sam': '@a@{realm}'}, ok), # not like B, because other realm
+ ),
+ ]
+
+
+@DynamicTestCase
+class LdapUpnSamSambaOnlyTest(LdapUpnSamTestBase):
+ # We don't run these ones outside of selftest, where we are
+ # probably testing against Windows and these are known failures.
+ _disabled = 'SAMBA_SELFTEST' not in os.environ
+ cases = [
+ ("sam account name too long",
+ # SPN soft limit 20
+ ('A', 'a' * 19, ok),
+ ('A', 'a' * 20, ok),
+ ('A', 'a' * 65, ok),
+ ('A', 'a' * 255, ok),
+ ('A', 'a' * 256, ok),
+ ('A', 'a' * 257, ldb.ERR_INVALID_ATTRIBUTE_SYNTAX),
+ ),
+ ("UPN username too long",
+ ('A', 'a' * 254 + '@' + 'b.c' * 257,
+ ldb.ERR_INVALID_ATTRIBUTE_SYNTAX), # 1024 is alleged UPN limit
+ ),
+ ("UPN same but for internal spaces",
+ ('A', 'a b@x.x', ok),
+ ('B', 'a b@x.x', ldb.ERR_CONSTRAINT_VIOLATION),
+ ),
+ ("SAM contains delete",
+ # forbidden according to documentation, but works in practice on Windows
+ ('A', 'a\x7f', ldb.ERR_CONSTRAINT_VIOLATION),
+ ('A', 'a\x7f'.encode(), ldb.ERR_CONSTRAINT_VIOLATION),
+ ('A', 'a\x7fb', ldb.ERR_CONSTRAINT_VIOLATION),
+ ('A', 'a\x7fb'.encode(), ldb.ERR_CONSTRAINT_VIOLATION),
+ ('A', '\x7fb', ldb.ERR_CONSTRAINT_VIOLATION),
+ ('A', '\x7fb'.encode(), ldb.ERR_CONSTRAINT_VIOLATION),
+ ),
+ # The wide at symbol ('@' U+FF20) does not count as '@' for Samba
+ # so it will look like a string with no @s.
+ ("UPN with unicode wide at vs real at",
+ ('A', {'upn': 'wideat@{realm}'}, ok),
+ ('B', {'upn': 'wideat@{realm}'}, ok),
+ ),
+ ("UPN with real at vs wide at",
+ ('B', {'upn': 'wideat@{realm}'}, ok),
+ ('A', {'upn': 'wideat@{realm}'}, ok)
+ ),
+ ]
+
+
+def main():
+ global LP, CREDS, SERVER, REALM
+
+ parser = optparse.OptionParser(
+ "python3 ldap_upn_sam_account.py <server> [options]")
+ sambaopts = options.SambaOptions(parser)
+ parser.add_option_group(sambaopts)
+
+ # use command line creds if available
+ credopts = options.CredentialsOptions(parser)
+ parser.add_option_group(credopts)
+ subunitopts = SubunitOptions(parser)
+ parser.add_option_group(subunitopts)
+
+ opts, args = parser.parse_args()
+ if len(args) != 1:
+ parser.print_usage()
+ sys.exit(1)
+
+ LP = sambaopts.get_loadparm()
+ CREDS = credopts.get_credentials(LP)
+ SERVER = args[0]
+ REALM = CREDS.get_realm()
+
+ TestProgram(module=__name__, opts=subunitopts)
+
+main()
diff --git a/python/samba/tests/ldap_whoami.py b/python/samba/tests/ldap_whoami.py
new file mode 100644
index 0000000..607ebce
--- /dev/null
+++ b/python/samba/tests/ldap_whoami.py
@@ -0,0 +1,38 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Volker Lendecke <vl@samba.org> 2023
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from samba import Ldb,tests
+from samba.samba3 import param as s3param
+from samba import credentials
+import os
+
+class LdapWhoami(tests.TestCase):
+ def test_ldap_whoami(self):
+ lp = s3param.get_context()
+ lp.load(os.getenv("SERVERCONFFILE"))
+
+ domain=os.getenv("DOMAIN")
+ username=os.getenv("DC_USERNAME")
+
+ creds = credentials.Credentials()
+ creds.guess(lp)
+ creds.set_domain(domain)
+ creds.set_username(username)
+ creds.set_password(os.getenv("DC_PASSWORD"))
+
+ l=Ldb(f'ldap://{os.getenv("DC_SERVER_IP")}/', credentials=creds, lp=lp)
+ w=l.whoami()
+ self.assertEqual(w,f'u:{domain}\\{username}')
diff --git a/python/samba/tests/libsmb-basic.py b/python/samba/tests/libsmb-basic.py
new file mode 100644
index 0000000..20d3dd4
--- /dev/null
+++ b/python/samba/tests/libsmb-basic.py
@@ -0,0 +1,268 @@
+# Unix SMB/CIFS implementation.
+# Copyright Volker Lendecke <vl@samba.org> 2012
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for samba.samba3.libsmb."""
+
+from samba.samba3 import libsmb_samba_internal as libsmb
+from samba.dcerpc import security
+from samba import NTSTATUSError,ntstatus
+from samba.ntstatus import NT_STATUS_DELETE_PENDING
+from samba.credentials import SMB_ENCRYPTION_REQUIRED
+import samba.tests.libsmb
+import threading
+import sys
+import random
+
+
+class LibsmbTestCase(samba.tests.libsmb.LibsmbTests):
+
+ class OpenClose(threading.Thread):
+
+ def __init__(self, conn, filename, num_ops):
+ threading.Thread.__init__(self)
+ self.conn = conn
+ self.filename = filename
+ self.num_ops = num_ops
+ self.exc = False
+
+ def run(self):
+ c = self.conn
+ try:
+ for i in range(self.num_ops):
+ f = c.create(self.filename, CreateDisposition=3,
+ DesiredAccess=security.SEC_STD_DELETE)
+ c.delete_on_close(f, True)
+ c.close(f)
+ except Exception:
+ self.exc = sys.exc_info()
+
+ def test_OpenClose(self):
+
+ c = libsmb.Conn(
+ self.server_ip,
+ "tmp",
+ self.lp,
+ self.creds,
+ multi_threaded=True,
+ force_smb1=True)
+
+ mythreads = []
+
+ for i in range(3):
+ t = LibsmbTestCase.OpenClose(c, "test" + str(i), 10)
+ mythreads.append(t)
+
+ for t in mythreads:
+ t.start()
+
+ for t in mythreads:
+ t.join()
+ if t.exc:
+ raise t.exc[0](t.exc[1])
+
+ def test_SMB3EncryptionRequired(self):
+ test_dir = 'testing_%d' % random.randint(0, 0xFFFF)
+
+ self.creds.set_smb_encryption(SMB_ENCRYPTION_REQUIRED)
+
+ c = libsmb.Conn(self.server_ip, "tmp", self.lp, self.creds)
+
+ c.mkdir(test_dir)
+ c.rmdir(test_dir)
+
+ def test_SMB1EncryptionRequired(self):
+ test_dir = 'testing_%d' % random.randint(0, 0xFFFF)
+
+ self.creds.set_smb_encryption(SMB_ENCRYPTION_REQUIRED)
+
+ c = libsmb.Conn(
+ self.server_ip,
+ "tmp",
+ self.lp,
+ self.creds,
+ force_smb1=True)
+
+ c.mkdir(test_dir)
+ c.rmdir(test_dir)
+
+ def test_RenameDstDelOnClose(self):
+
+ dstdir = "\\dst-subdir"
+
+ c1 = libsmb.Conn(self.server_ip, "tmp", self.lp, self.creds)
+ c2 = libsmb.Conn(self.server_ip, "tmp", self.lp, self.creds)
+
+ try:
+ c1.deltree(dstdir)
+ except:
+ pass
+
+ c1.mkdir(dstdir)
+ dnum = c1.create(dstdir, DesiredAccess=security.SEC_STD_DELETE)
+ c1.delete_on_close(dnum,1)
+ c2.savefile("\\src.txt", b"Content")
+
+ with self.assertRaises(NTSTATUSError) as cm:
+ c2.rename("\\src.txt", dstdir + "\\dst.txt")
+ if (cm.exception.args[0] != NT_STATUS_DELETE_PENDING):
+ raise AssertionError("Rename must fail with DELETE_PENDING")
+
+ c1.delete_on_close(dnum,0)
+ c1.close(dnum)
+
+ try:
+ c1.deltree(dstdir)
+ c1.unlink("\\src.txt")
+ except:
+ pass
+
+ def test_libsmb_CreateContexts(self):
+ c = libsmb.Conn(self.server_ip, "tmp", self.lp, self.creds)
+ cc_in = [(libsmb.SMB2_CREATE_TAG_MXAC, b'')]
+ fnum,cr,cc = c.create_ex("",CreateContexts=cc_in)
+ self.assertEqual(
+ cr['file_attributes'] & libsmb.FILE_ATTRIBUTE_DIRECTORY,
+ libsmb.FILE_ATTRIBUTE_DIRECTORY)
+ self.assertEqual(cc[0][0],libsmb.SMB2_CREATE_TAG_MXAC)
+ self.assertEqual(len(cc[0][1]),8)
+ c.close(fnum)
+
+ def test_libsmb_TortureCaseSensitivity(self):
+ testdir = "test_libsmb_torture_case_sensitivity"
+ filename = "file"
+ filepath = testdir + "/" + filename
+
+ c = libsmb.Conn(self.server_ip, "tmp", self.lp, self.creds)
+
+ try:
+ c.deltree(testdir)
+ except:
+ pass
+
+ c.mkdir(testdir)
+
+ try:
+ # Now check for all possible upper-/lowercase combinations:
+ # - testdir/file
+ # - TESTDIR/file
+ # - testdir/FILE
+ # - TESTDIR/FILE
+
+ dircases = [testdir, testdir, testdir.upper(), testdir.upper()]
+ filecases = [filename, filename.upper(), filename, filename.upper()]
+ tcases = [{'dir':dir, 'file':file} for dir,file in zip(dircases,filecases)]
+
+ for tcase in tcases:
+ testpath = tcase['dir'] + "/" + tcase['file']
+
+ # Create the testfile
+ h = c.create(filepath,
+ DesiredAccess=security.SEC_FILE_ALL,
+ CreateDisposition=libsmb.FILE_OPEN_IF)
+ c.close(h)
+
+ # Open
+ c.loadfile(testpath)
+
+ # Search
+ ls = [f['name'] for f in c.list(tcase['dir'], mask=tcase['file'])]
+ self.assertIn(filename, ls, msg='When searching for "%s" not found in "%s"' % (tcase['file'], tcase['dir']))
+
+ # Rename
+ c.rename(testpath, tcase['dir'] + "/tmp")
+ c.rename(tcase['dir'] + "/TMP", filepath)
+ c.loadfile(testpath)
+
+ # Delete
+ c.unlink(testpath)
+
+ finally:
+ c.deltree(testdir)
+
+ def test_libsmb_TortureDirCaseSensitive(self):
+ c = libsmb.Conn(self.server_ip, "lowercase", self.lp, self.creds)
+ c.mkdir("subdir")
+ c.mkdir("subdir/b")
+ ret = c.chkpath("SubDir/b")
+ c.rmdir("subdir/b")
+ c.rmdir("subdir")
+ self.assertTrue(ret)
+
+ def test_libsmb_shadow_depot(self):
+ c = libsmb.Conn(self.server_ip, "shadow_depot", self.lp, self.creds)
+ try:
+ fnum=c.create("x:y",CreateDisposition=libsmb.FILE_CREATE)
+ c.close(fnum)
+ except:
+ self.fail()
+ finally:
+ # "c" might have crashed, get a new connection
+ c1 = libsmb.Conn(self.server_ip, "shadow_depot", self.lp, self.creds)
+ c1.unlink("x")
+ c1 = None
+
+ def test_gencache_pollution_bz15481(self):
+ c = libsmb.Conn(self.server_ip, "tmp", self.lp, self.creds)
+ fh = c.create("file",
+ DesiredAccess=security.SEC_STD_DELETE,
+ CreateDisposition=libsmb.FILE_CREATE)
+
+ # prime the gencache File->file
+ fh_upper = c.create("File",
+ DesiredAccess=security.SEC_FILE_READ_ATTRIBUTE,
+ CreateDisposition=libsmb.FILE_OPEN)
+ c.close(fh_upper)
+
+ c.delete_on_close(fh, 1)
+ c.close(fh)
+
+ fh = c.create("File",
+ DesiredAccess=security.SEC_STD_DELETE,
+ CreateDisposition=libsmb.FILE_CREATE)
+
+ directory = c.list("\\", "File")
+
+ c.delete_on_close(fh, 1)
+ c.close(fh)
+
+ # Without the bugfix for 15481 we get 'file' not 'File'
+ self.assertEqual(directory[0]['name'], 'File')
+
+ def test_stream_close_with_full_information(self):
+ c = libsmb.Conn(self.server_ip, "streams_xattr", self.lp, self.creds)
+
+ try:
+ c.deltree("teststreams")
+ except:
+ pass
+
+ c.mkdir("teststreams")
+ fh = c.create("teststreams\\stream_full_close_info.txt:Stream",
+ DesiredAccess=security.SEC_STD_DELETE,
+ CreateDisposition=libsmb.FILE_CREATE)
+ c.delete_on_close(fh, 1)
+
+ try:
+ c.close(fh, libsmb.SMB2_CLOSE_FLAGS_FULL_INFORMATION)
+ except:
+ self.fail()
+
+ c.deltree("teststreams")
+
+if __name__ == "__main__":
+ import unittest
+ unittest.main()
diff --git a/python/samba/tests/libsmb.py b/python/samba/tests/libsmb.py
new file mode 100644
index 0000000..cb632d0
--- /dev/null
+++ b/python/samba/tests/libsmb.py
@@ -0,0 +1,55 @@
+# Unix SMB/CIFS implementation.
+# Copyright Volker Lendecke <vl@samba.org> 2012
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for samba.samba3.libsmb."""
+
+from samba.samba3 import libsmb_samba_internal as libsmb
+from samba.dcerpc import security
+from samba.samba3 import param as s3param
+from samba import credentials
+from samba import (ntstatus,NTSTATUSError)
+import samba.tests
+import os
+
+class LibsmbTests(samba.tests.TestCase):
+
+ def setUp(self):
+ self.lp = s3param.get_context()
+ self.lp.load(samba.tests.env_get_var_value("SMB_CONF_PATH"))
+
+ self.creds = credentials.Credentials()
+ self.creds.guess(self.lp)
+ self.creds.set_domain(samba.tests.env_get_var_value("DOMAIN"))
+ self.creds.set_username(samba.tests.env_get_var_value("USERNAME"))
+ self.creds.set_password(samba.tests.env_get_var_value("PASSWORD"))
+
+ # Build the global inject file path
+ server_conf = samba.tests.env_get_var_value("SERVERCONFFILE")
+ server_conf_dir = os.path.dirname(server_conf)
+ self.global_inject = os.path.join(server_conf_dir, "global_inject.conf")
+
+ self.server_ip = samba.tests.env_get_var_value("SERVER_IP")
+
+ def clean_file(self, conn, filename):
+ try:
+ conn.unlink(filename)
+ except NTSTATUSError as e:
+ if e.args[0] == ntstatus.NT_STATUS_FILE_IS_A_DIRECTORY:
+ conn.rmdir(filename)
+ elif not (e.args[0] == ntstatus.NT_STATUS_OBJECT_NAME_NOT_FOUND or
+ e.args[0] == ntstatus.NT_STATUS_OBJECT_PATH_NOT_FOUND):
+ raise
diff --git a/python/samba/tests/loadparm.py b/python/samba/tests/loadparm.py
new file mode 100644
index 0000000..fc79e28
--- /dev/null
+++ b/python/samba/tests/loadparm.py
@@ -0,0 +1,84 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Andrew Bartlett <abartlet@samba.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from samba.tests import TestCaseInTempDir
+from samba import param
+import os
+
+# the python bindings for LoadParm objects map (by default) to a single global
+# object in the underlying C code. E.g. if we create 2 different LoadParm
+# objects in python, really they're just the same object underneath.
+
+
+class LoadParmTest(TestCaseInTempDir):
+
+ def test_global_loadparm(self):
+ # create 2 different Loadparm objects (which are really the same
+ # object underneath)
+ lp1 = param.LoadParm()
+ lp2 = param.LoadParm()
+
+ # we can prove this by setting a value on lp1 and assert that the
+ # change is also reflected on lp2
+ lp1_realm = "JUST.A.TEST"
+ self.assertNotEqual(lp2.get('realm'), lp1_realm)
+ lp1.set('realm', lp1_realm)
+ self.assertEqual(lp1.get('realm'), lp1_realm)
+ self.assertEqual(lp2.get('realm'), lp1_realm)
+
+ def touch_temp_file(self, filename):
+ filepath = os.path.join(self.tempdir, filename)
+ open(filepath, 'a').close()
+ # delete the file once the test completes
+ self.addCleanup(os.remove, filepath)
+ return filepath
+
+ def test_non_global_loadparm(self):
+ # create a empty smb.conf file
+ smb_conf = self.touch_temp_file("smb.conf")
+
+ # we can create a non-global Loadparm that overrides the default
+ # behaviour and creates a separate underlying object
+ lp1 = param.LoadParm()
+ lp2 = param.LoadParm(filename_for_non_global_lp=smb_conf)
+
+ # setting a value for the global LP does not affect the non-global LP
+ lp1_realm = "JUST.A.TEST"
+ self.assertNotEqual(lp2.get('realm'), lp1_realm)
+ lp1.set('realm', lp1_realm)
+ self.assertEqual(lp1.get('realm'), lp1_realm)
+ self.assertNotEqual(lp2.get('realm'), lp1_realm)
+
+ # and vice versa
+ lp2_realm = "TEST.REALM.LP2"
+ lp2.set('realm', lp2_realm)
+ self.assertEqual(lp2.get('realm'), lp2_realm)
+ self.assertEqual(lp1.get('realm'), lp1_realm)
+
+ def test_non_global_loadparm_bad_path(self):
+ non_existent_file = os.path.join(self.tempdir, 'not-there')
+
+ # we can create a non-global Loadparm that overrides the default
+ # behaviour and creates a separate underlying object
+ self.assertRaises(ValueError,
+ param.LoadParm,
+ filename_for_non_global_lp=non_existent_file)
+
+ # still shouldn't be there
+ self.assertRaises(ValueError,
+ param.LoadParm,
+ non_existent_file)
diff --git a/python/samba/tests/logfiles.py b/python/samba/tests/logfiles.py
new file mode 100644
index 0000000..38c3804
--- /dev/null
+++ b/python/samba/tests/logfiles.py
@@ -0,0 +1,381 @@
+# Unix SMB/CIFS implementation.
+#
+# Copyright (C) Catalyst.Net Ltd. 2022
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+import subprocess
+import os
+from samba.tests import TestCaseInTempDir
+from pprint import pprint
+
+HERE = os.path.dirname(__file__)
+S4_SERVER = os.path.join(HERE, '../../../../bin/test_s4_logging')
+S3_SERVER = os.path.join(HERE, '../../../../bin/test_s3_logging')
+
+CLASS_LIST = ["all", "tdb", "printdrivers", "lanman", "smb",
+ "rpc_parse", "rpc_srv", "rpc_cli", "passdb", "sam", "auth",
+ "winbind", "vfs", "idmap", "quota", "acls", "locking", "msdfs",
+ "dmapi", "registry", "scavenger", "dns", "ldb", "tevent",
+ "auth_audit", "auth_json_audit", "kerberos", "drs_repl",
+ "smb2", "smb2_credits", "dsdb_audit", "dsdb_json_audit",
+ "dsdb_password_audit", "dsdb_password_json_audit",
+ "dsdb_transaction_audit", "dsdb_transaction_json_audit",
+ "dsdb_group_audit", "dsdb_group_json_audit"]
+
+
+CLASS_CODES = {k: i for i, k in enumerate(CLASS_LIST)}
+
+
+class S4LoggingTests(TestCaseInTempDir):
+ server = S4_SERVER
+ def _write_smb_conf(self,
+ default_level=2,
+ default_file="default",
+ mapping=()):
+ self.smbconf = os.path.join(self.tempdir, "smb.conf")
+
+ with open(self.smbconf, "w") as f:
+ f.write('[global]\n')
+ if default_file is not None:
+ dest = os.path.join(self.tempdir,
+ default_file)
+ f.write(f" log file = {dest}\n")
+
+ f.write(" log level = ")
+ if default_level:
+ f.write(f"{default_level}")
+
+ for dbg_class, log_level, log_file in mapping:
+ f.write(' ')
+ f.write(dbg_class)
+ if log_level is not None:
+ f.write(f':{log_level}')
+ if log_file is not None:
+ dest = os.path.join(self.tempdir,
+ log_file)
+
+ f.write(f'@{dest}')
+ f.write('\n')
+ self.addCleanup(os.unlink, self.smbconf)
+
+ def _extract_log_level_line(self, new_level=2):
+ # extricate the 'log level' line from the smb.conf, returning
+ # the value, and replacing the log level line with something
+ # innocuous.
+ smbconf2 = self.smbconf + 'new'
+ with open(self.smbconf) as f:
+ with open(smbconf2, 'w') as f2:
+ for line in f:
+ if 'log level' in line:
+ debug_arg = line.split('=', 1)[1].strip()
+ if new_level is not None:
+ f2.write(f' log level = {new_level}\n')
+ else:
+ f2.write(line)
+ os.replace(smbconf2, self.smbconf)
+ return debug_arg
+
+ def _get_expected_strings(self, mapping,
+ level_filter,
+ default_file='default',
+ file_filter=None):
+ default = os.path.join(self.tempdir, default_file)
+ expected = {default: []}
+ # this kind of thing:
+ # " logging for 'dns' [21], at level 4"
+ for dbg_class, log_level, log_file in mapping:
+ if log_file is None:
+ log_file = default_file
+
+ f = os.path.join(self.tempdir, log_file)
+ expected.setdefault(f, [])
+ if log_level < level_filter:
+ continue
+ if file_filter not in (None, log_file):
+ continue
+ s = (f" logging for '{dbg_class}' [{CLASS_CODES[dbg_class]}], "
+ f"at level {level_filter}")
+ expected[f].append(s)
+
+ return expected
+
+ def _run_s4_logger(self, log_level, *extra_args):
+ cmd = [self.server,
+ '-s', self.smbconf,
+ '-L', str(log_level),
+ *extra_args]
+
+ p = subprocess.run(cmd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ self.assertEqual(p.returncode, 0,
+ f"'{' '.join(cmd)}' failed ({p.returncode})")
+
+ return p.stdout.decode(), p.stderr.decode()
+
+ def assert_string_contains(self, string, expected_lines,
+ filename=None):
+ expected_lines = set(expected_lines)
+ string_lines = set(string.split('\n'))
+ present_lines = string_lines & expected_lines
+ if present_lines != expected_lines:
+ if filename:
+ print(filename)
+ print("expected %d lines, found %d" %
+ (len(expected_lines), len(present_lines)))
+ print("missing lines:")
+ pprint(expected_lines - present_lines)
+ raise AssertionError("missing lines")
+
+ def assert_file_contains(self, filename, expected_lines):
+ with open(filename) as f:
+ string = f.read()
+ self.assert_string_contains(string, expected_lines, filename)
+
+ def assert_n_known_lines_string(self, string, n):
+ count = string.count("logging for '")
+ if count != n:
+ raise AssertionError(
+ f"string has {count} lines, expected {n}")
+
+ def assert_n_known_lines(self, filename, n):
+ with open(filename) as f:
+ string = f.read()
+ count = string.count(" logging for '")
+ if count != n:
+ raise AssertionError(
+ f"{filename} has {count} lines, expected {n}")
+
+ def assert_unlink_expected_strings(self, expected_strings):
+ for k, v in expected_strings.items():
+ if not os.path.exists(k):
+ self.fail(f"{k} does not exist")
+ self.assert_file_contains(k, v)
+ self.assert_n_known_lines(k, len(v))
+ os.unlink(k)
+
+ def test_each_to_its_own(self):
+ level = 4
+ mapping = [(x, level, x) for x in CLASS_LIST]
+ expected_strings = self._get_expected_strings(mapping, level)
+
+ self._write_smb_conf(mapping=mapping)
+ stdout, stderr = self._run_s4_logger(level)
+ self.assert_unlink_expected_strings(expected_strings)
+
+ def test_all_to_one(self):
+ level = 4
+ dest = 'everything'
+ mapping = [(x, level, dest) for x in CLASS_LIST]
+ expected_strings = self._get_expected_strings(mapping, level)
+
+ self._write_smb_conf(mapping=mapping)
+ stdout, stderr = self._run_s4_logger(level)
+ self.assert_unlink_expected_strings(expected_strings)
+
+ def test_bifurcate(self):
+ level = 4
+ dests = ['even', 'odd']
+ mapping = [(x, level + 1, dests[i & 1])
+ for i, x in enumerate(CLASS_LIST)]
+ expected_strings = self._get_expected_strings(mapping, level)
+
+ self._write_smb_conf(mapping=mapping)
+ stdout, stderr = self._run_s4_logger(level)
+ self.assert_unlink_expected_strings(expected_strings)
+
+ def test_bifurcate_level_out_of_range(self):
+ # nothing will be logged, because we're logging at a too high
+ # level.
+ level = 4
+ dests = ['even', 'odd']
+ mapping = [(x, level - 1, dests[i & 1])
+ for i, x in enumerate(CLASS_LIST)]
+ expected_strings = self._get_expected_strings(mapping, level)
+
+ self._write_smb_conf(mapping=mapping)
+ stdout, stderr = self._run_s4_logger(level)
+ self.assert_unlink_expected_strings(expected_strings)
+
+ def test_bifurcate_misc_log_level(self):
+ # We are sending even numbers to default and odd numbers to
+ # 'odd', at various levels, depending on mod 3. Like this:
+ #
+ # log level = 2 all:5 \
+ # tdb:4@odd \
+ # printdrivers:3 \
+ # lanman:5@odd \
+ # smb:4 \
+ # rpc_parse:3@odd \
+ # rpc_srv:5 ...
+ #
+ # Therefore, 'default' should get classes that are (0 or 4) % 6
+ # and 'odd' should get classes that are (1 or 3) % 6.
+
+ level = 4
+ dests = [None, 'odd']
+ mapping = []
+ for i, x in enumerate(CLASS_LIST):
+ parity = i & 1
+ log_level = level + 1 - (i % 3)
+ mapping.append((x, log_level, dests[parity]))
+
+ expected_strings = self._get_expected_strings(mapping, level)
+
+ self._write_smb_conf(mapping=mapping)
+ stdout, stderr = self._run_s4_logger(level)
+ self.assert_unlink_expected_strings(expected_strings)
+
+ def test_all_different_ways_cmdline_d(self):
+ level = 4
+ dests = [None, 'a', 'b', 'c']
+ mapping = []
+ seed = 123
+ for i, x in enumerate(CLASS_LIST):
+ d = seed & 3
+ seed = seed * 17 + 1
+ log_level = seed % 10
+ seed &= 0xff
+ mapping.append((x, log_level, dests[d]))
+
+ expected_strings = self._get_expected_strings(mapping, level)
+
+ self._write_smb_conf(mapping=mapping)
+ debug_arg = self._extract_log_level_line(26)
+
+ stdout, stderr = self._run_s4_logger(level, '-d', debug_arg)
+ self.assert_unlink_expected_strings(expected_strings)
+
+ def test_all_different_ways_cmdline_d_interactive(self):
+ level = 4
+ dests = [None, 'a', 'b', 'c']
+ mapping = []
+ seed = 1234
+ for i, x in enumerate(CLASS_LIST):
+ d = seed & 3
+ seed = seed * 13 + 1
+ log_level = seed % 10
+ seed &= 0xff
+ mapping.append((x, log_level, dests[d]))
+
+ expected_strings = self._get_expected_strings(mapping, level)
+
+ self._write_smb_conf(mapping=mapping)
+ debug_arg = self._extract_log_level_line(None)
+ stdout, stderr = self._run_s4_logger(level, '-d', debug_arg, '-i')
+ expected_lines = []
+ for v in expected_strings.values():
+ # stderr doesn't end up with leading ' '
+ expected_lines.extend([x.strip() for x in v])
+
+ self.assert_string_contains(stderr, expected_lines)
+ self.assert_n_known_lines_string(stderr, len(expected_lines))
+
+ def test_only_some_level_0(self):
+ # running the logger with -L 0 makes the log messages run at
+ # level 0 (i.e DBG_ERR), so we always see them in default,
+ # even though smb.conf doesn't ask.
+ mapping = [(x, 3, ['default', 'bees']['b' in x])
+ for x in CLASS_LIST]
+ expected_strings = self._get_expected_strings(mapping, 0)
+ self._write_smb_conf(mapping=[x for x in mapping if x[2] == 'bees'])
+ stdout, stderr = self._run_s4_logger(0)
+ self.assert_unlink_expected_strings(expected_strings)
+
+ def test_only_some_level_3(self):
+ # here, we're expecting the unmentioned non-b classes to just
+ # disappear.
+ level = 3
+ mapping = [(x, level, 'bees') for x in CLASS_LIST if 'b' in x]
+ expected_strings = self._get_expected_strings(mapping, level)
+ self._write_smb_conf(mapping=[x for x in mapping if x[2] == 'bees'])
+ stdout, stderr = self._run_s4_logger(level)
+ self.assert_unlink_expected_strings(expected_strings)
+
+ def test_none(self):
+ level = 4
+ mapping = []
+ expected_strings = self._get_expected_strings(mapping, level)
+ self._write_smb_conf(mapping=mapping)
+ stdout, stderr = self._run_s4_logger(level)
+ self.assert_unlink_expected_strings(expected_strings)
+
+ def test_none_high_default(self):
+ # We set the default level to 5 and do nothing else special,
+ # which means we need a different mapping for the smb.conf
+ # than the expected strings.
+ level = 4
+ mapping = [(x, 5, 'default') for x in CLASS_LIST]
+ expected_strings = self._get_expected_strings(mapping, level)
+ # note the empty mapping in smb.conf
+ self._write_smb_conf(mapping=[], default_level=5)
+ stdout, stderr = self._run_s4_logger(level)
+ self.assert_unlink_expected_strings(expected_strings)
+
+ def test_none_high_cmdline_d(self):
+ # We set the default level to 2, but run the 'server' with -d 10.
+ level = 4
+ mapping = [(x, 10, 'default') for x in CLASS_LIST]
+ expected_strings = self._get_expected_strings(mapping, level)
+ # note the empty mapping in smb.conf
+ self._write_smb_conf(mapping=[])
+ stdout, stderr = self._run_s4_logger(level, '-d', '10')
+ self.assert_unlink_expected_strings(expected_strings)
+
+ def test_interactive_high_default_simple(self):
+ # running with -i should send everything to stderr.
+ level = 4
+ mapping = [(x, 5, 'default') for x in CLASS_LIST]
+ expected_strings = self._get_expected_strings(mapping, level)
+ self._write_smb_conf(mapping=[], default_level=5)
+ stdout, stderr = self._run_s4_logger(level, '-i')
+ expected_lines = []
+ for v in expected_strings.values():
+ # stderr doesn't end up with leading ' '
+ expected_lines.extend([x.strip() for x in v])
+
+ self.assert_string_contains(stderr, expected_lines)
+
+ def test_interactive_complex_smb_conf(self):
+ # running with -i should send everything to stderr. The
+ # smb.conf will set the levels, but the target files are
+ # overridden.
+ # (this is the test_bifurcate_misc_log_level() smb.conf).
+ level = 4
+ dests = [None, 'odd']
+ mapping = []
+ for i, x in enumerate(CLASS_LIST):
+ parity = i & 1
+ log_level = level + 1 - (i % 3)
+ mapping.append((x, log_level, dests[parity]))
+
+ expected_strings = self._get_expected_strings(mapping, level)
+
+ self._write_smb_conf(mapping=mapping)
+ stdout, stderr = self._run_s4_logger(level, '-i')
+ expected_lines = []
+ for v in expected_strings.values():
+ # stderr doesn't end up with leading ' '
+ expected_lines.extend([x.strip() for x in v])
+
+ self.assert_string_contains(stderr, expected_lines)
+
+
+class S3LoggingTests(S4LoggingTests):
+ server = S3_SERVER
+ # These tests were developed for testing the test_logger when
+ # linked against CMDLINE_S4 (see lib/util/wscript_build), but can
+ # also run when linked against CMDLINE_S3.
diff --git a/python/samba/tests/lsa_string.py b/python/samba/tests/lsa_string.py
new file mode 100644
index 0000000..bcc76b5
--- /dev/null
+++ b/python/samba/tests/lsa_string.py
@@ -0,0 +1,68 @@
+# Tests for lsa.String helpers in source4/librpc/ndr/py_lsa.c
+#
+# Copyright (C) Catalyst IT Ltd. 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from samba.tests import TestCase
+from samba.dcerpc import lsa
+from samba.ndr import ndr_pack, ndr_unpack
+"""
+Tests for the C helper functions in source4/librpc/ndr/py_lsa.c
+for samba.dcerpc.lsa.String
+"""
+
+
+class LsaStringTests(TestCase):
+
+ def test_default_constructor(self):
+ s = lsa.String()
+ self.assertEqual(None, s.string)
+ self.assertEqual(0, s.size)
+ self.assertEqual(0, s.length)
+
+ def test_string_constructor(self):
+ CONTENT = "The content string"
+ s = lsa.String(CONTENT)
+ self.assertEqual(CONTENT, s.string)
+
+ # These should be zero
+ self.assertEqual(0, s.size)
+ self.assertEqual(0, s.length)
+
+ packed = ndr_pack(s)
+ unpacked = ndr_unpack(lsa.String, packed)
+
+ # Original object should be unchanged
+ self.assertEqual(0, s.size)
+ self.assertEqual(0, s.length)
+
+ # But they should be correct in the unpacked object
+ self.assertEqual(36, unpacked.size)
+ self.assertEqual(36, unpacked.length)
+
+ def test_repr(self):
+ # test an empty string
+ self.assertEqual("lsaString(None)", repr(lsa.String()))
+ # and one with contents
+ self.assertEqual("lsaString('Hello world')",
+ repr(lsa.String("Hello world")))
+
+ def test_to_string(self):
+ # test an empty string
+ self.assertEqual("", str(lsa.String()))
+ # and one with contents
+ self.assertEqual("Hello world",
+ str(lsa.String("Hello world")))
diff --git a/python/samba/tests/messaging.py b/python/samba/tests/messaging.py
new file mode 100644
index 0000000..0cadd0d
--- /dev/null
+++ b/python/samba/tests/messaging.py
@@ -0,0 +1,174 @@
+# -*- coding: utf-8 -*-
+#
+# Unix SMB/CIFS implementation.
+# Copyright © Jelmer Vernooij <jelmer@samba.org> 2008
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for samba.messaging."""
+import samba
+from samba.messaging import Messaging
+from samba.tests import TestCase
+import time
+from samba.ndr import ndr_print
+from samba.dcerpc import server_id
+import random
+import os
+
+
+class MessagingTests(TestCase):
+
+ def get_context(self, *args, **kwargs):
+ kwargs['lp_ctx'] = samba.tests.env_loadparm()
+ return Messaging(*args, **kwargs)
+
+ def test_register(self):
+ x = self.get_context()
+
+ def callback():
+ pass
+ callback_and_context = (callback, None)
+ msg_type = x.register(callback_and_context)
+ self.addCleanup(x.deregister, callback_and_context, msg_type)
+ self.assertTrue(isinstance(msg_type, int))
+
+ def test_all_servers(self):
+ x = self.get_context()
+ self.assertTrue(isinstance(x.irpc_all_servers(), list))
+
+ def test_by_name(self):
+ x = self.get_context()
+ for name in x.irpc_all_servers():
+ self.assertTrue(isinstance(x.irpc_servers_byname(name.name), list))
+
+ def test_unknown_name(self):
+ x = self.get_context()
+ self.assertRaises(KeyError,
+ x.irpc_servers_byname, "samba.messaging test NONEXISTING")
+
+ def test_assign_server_id(self):
+ x = self.get_context()
+ self.assertTrue(isinstance(x.server_id, server_id.server_id))
+
+ def test_add_remove_name(self):
+ x = self.get_context()
+ name = "samba.messaging test-%d" % random.randint(1, 1000000)
+ x.irpc_add_name(name)
+ name_list = x.irpc_servers_byname(name)
+ self.assertEqual(len(name_list), 1)
+ self.assertEqual(ndr_print(x.server_id),
+ ndr_print(name_list[0]))
+ x.irpc_remove_name(name)
+ self.assertRaises(KeyError,
+ x.irpc_servers_byname, name)
+
+ def test_ping_speed(self):
+ got_ping = {"count": 0}
+ got_pong = {"count": 0}
+ timeout = False
+
+ msg_pong = 0
+ msg_ping = 0
+
+ server_ctx = self.get_context((0, 1))
+
+ def ping_callback(got_ping, msg_type, src, data):
+ got_ping["count"] += 1
+ server_ctx.send(src, msg_pong, data)
+
+ ping_callback_and_context = (ping_callback, got_ping)
+ msg_ping = server_ctx.register(ping_callback_and_context)
+ self.addCleanup(server_ctx.deregister,
+ ping_callback_and_context,
+ msg_ping)
+
+ def pong_callback(got_pong, msg_type, src, data):
+ got_pong["count"] += 1
+
+ client_ctx = self.get_context((0, 2))
+ pong_callback_and_context = (pong_callback, got_pong)
+ msg_pong = client_ctx.register(pong_callback_and_context)
+ self.addCleanup(client_ctx.deregister,
+ pong_callback_and_context,
+ msg_pong)
+
+ # Try both server_id forms (structure and tuple)
+ client_ctx.send((0, 1), msg_ping, "testing")
+
+ client_ctx.send((0, 1), msg_ping, "testing2")
+
+ start_time = time.time()
+
+ # NOTE WELL: If debugging this with GDB, then the timeout will
+ # fire while you are trying to understand it.
+
+ while (got_ping["count"] < 2 or got_pong["count"] < 2) and not timeout:
+ client_ctx.loop_once(0.1)
+ server_ctx.loop_once(0.1)
+ if time.time() - start_time > 1:
+ timeout = True
+
+ self.assertEqual(got_ping["count"], 2)
+ self.assertEqual(got_pong["count"], 2)
+
+ def test_pid_defaulting(self):
+ got_ping = {"count": 0}
+ got_pong = {"count": 0}
+ timeout = False
+
+ msg_pong = 0
+ msg_ping = 0
+
+ pid = os.getpid()
+ server_ctx = self.get_context((pid, 1))
+
+ def ping_callback(got_ping, msg_type, src, data):
+ got_ping["count"] += 1
+ server_ctx.send(src, msg_pong, data)
+
+ ping_callback_and_context = (ping_callback, got_ping)
+ msg_ping = server_ctx.register(ping_callback_and_context)
+ self.addCleanup(server_ctx.deregister,
+ ping_callback_and_context,
+ msg_ping)
+
+ def pong_callback(got_pong, msg_type, src, data):
+ got_pong["count"] += 1
+
+ client_ctx = self.get_context((2,))
+ pong_callback_and_context = (pong_callback, got_pong)
+ msg_pong = client_ctx.register(pong_callback_and_context)
+ self.addCleanup(client_ctx.deregister,
+ pong_callback_and_context,
+ msg_pong)
+
+ # Try one and two element tuple forms
+ client_ctx.send((pid, 1), msg_ping, "testing")
+
+ client_ctx.send((1,), msg_ping, "testing2")
+
+ start_time = time.time()
+
+ # NOTE WELL: If debugging this with GDB, then the timeout will
+ # fire while you are trying to understand it.
+
+ while (got_ping["count"] < 2 or got_pong["count"] < 2) and not timeout:
+ client_ctx.loop_once(0.1)
+ server_ctx.loop_once(0.1)
+ if time.time() - start_time > 1:
+ timeout = True
+
+ self.assertEqual(got_ping["count"], 2)
+ self.assertEqual(got_pong["count"], 2)
diff --git a/python/samba/tests/ndr/gkdi.py b/python/samba/tests/ndr/gkdi.py
new file mode 100755
index 0000000..fc88df9
--- /dev/null
+++ b/python/samba/tests/ndr/gkdi.py
@@ -0,0 +1,397 @@
+#!/usr/bin/env python3
+# Unix SMB/CIFS implementation.
+# Copyright (C) Catalyst.Net Ltd 2023
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <https://www.gnu.org/licenses/>.
+#
+
+import sys
+import os
+
+sys.path.insert(0, "bin/python")
+os.environ["PYTHONUNBUFFERED"] = "1"
+
+from samba.dcerpc import gkdi, misc
+from samba.ndr import ndr_pack, ndr_unpack
+import samba.tests
+
+
+def utf16_encoded_len(s: str) -> int:
+ """Return the number of bytes required to encode a string as null‐terminated
+ UTF‐16."""
+ if "\x00" in s:
+ raise ValueError("string contains an embedded null")
+
+ return len(s.encode("utf-16-le")) + 2
+
+
+class KeyEnvelopeTests(samba.tests.TestCase):
+ key_envelope_blob = (
+ b"\x01\x00\x00\x00KDSK\x02\x00\x00\x00j\x01\x00\x00\x01\x00\x00\x00"
+ b"\x0e\x00\x00\x001\"\x92\x9d'\xaf;\xb7\x10V\xae\xb1\x8e\xec\xa7\x1a"
+ b"\x00\x00\x00\x00\x18\x00\x00\x00\x18\x00\x00\x00e\x00x\x00a\x00m\x00"
+ b"p\x00l\x00e\x00.\x00c\x00o\x00m\x00\x00\x00e\x00x\x00a\x00m\x00p\x00l\x00"
+ b"e\x00.\x00c\x00o\x00m\x00\x00\x00"
+ )
+
+ root_key_id = misc.GUID("9d922231-af27-b73b-1056-aeb18eeca71a")
+
+ domain_name = "example.com"
+ forest_name = "example.com"
+
+ def test_unpack(self):
+ """Unpack a GKDI Key Envelope blob and check its fields."""
+
+ envelope = ndr_unpack(gkdi.KeyEnvelope, self.key_envelope_blob)
+
+ self.assertEqual(1, envelope.version)
+ self.assertEqual(int.from_bytes(b"KDSK", byteorder="little"), envelope.magic)
+ self.assertEqual(gkdi.ENVELOPE_FLAG_KEY_MAY_ENCRYPT_NEW_DATA, envelope.flags)
+
+ self.assertEqual(362, envelope.l0_index)
+ self.assertEqual(1, envelope.l1_index)
+ self.assertEqual(14, envelope.l2_index)
+
+ self.assertEqual(self.root_key_id, envelope.root_key_id)
+
+ self.assertEqual(0, envelope.additional_info_len)
+ self.assertFalse(envelope.additional_info)
+
+ self.assertEqual(self.domain_name, envelope.domain_name)
+ self.assertEqual(utf16_encoded_len(self.domain_name), envelope.domain_name_len)
+ self.assertEqual(self.forest_name, envelope.forest_name)
+ self.assertEqual(utf16_encoded_len(self.forest_name), envelope.forest_name_len)
+
+ def test_pack(self):
+ """Create a GKDI Key Envelope object and test that it packs to the
+ blob we expect."""
+
+ envelope = gkdi.KeyEnvelope()
+
+ envelope.version = 1
+ envelope.flags = gkdi.ENVELOPE_FLAG_KEY_MAY_ENCRYPT_NEW_DATA
+
+ envelope.l0_index = 362
+ envelope.l1_index = 1
+ envelope.l2_index = 14
+
+ envelope.root_key_id = self.root_key_id
+
+ envelope.additional_info = []
+ envelope.additional_info_len = 0
+
+ envelope.domain_name = self.domain_name
+ envelope.forest_name = self.forest_name
+
+ self.assertEqual(self.key_envelope_blob, ndr_pack(envelope))
+
+
+class GroupKeyEnvelopeTests(samba.tests.TestCase):
+ group_key_envelope_blob = (
+ b"\x01\x00\x00\x00KDSK\x00\x00\x00\x00j\x01\x00\x00\x01\x00\x00\x00"
+ b"\x0e\x00\x00\x00\x8c\xc4\x8c\xdevp\x94\x97\x05m\x897{Z\x80R&\x00\x00\x00"
+ b"\x1e\x00\x00\x00\x06\x00\x00\x00\x0c\x02\x00\x00\x00\x02\x00\x00"
+ b"\x00\x08\x00\x00@\x00\x00\x00@\x00\x00\x00\x18\x00\x00\x00\x18\x00\x00\x00"
+ b"S\x00P\x008\x000\x000\x00_\x001\x000\x008\x00_\x00C\x00T\x00R\x00_\x00"
+ b"H\x00M\x00A\x00C\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x0e\x00"
+ b"\x00\x00\x00\x00\x00\x00S\x00H\x00A\x005\x001\x002\x00\x00\x00D\x00H\x00"
+ b"\x00\x00\x0c\x02\x00\x00DHPM\x00\x01\x00\x00\x87\xa8\xe6\x1d\xb4\xb6"
+ b"f<\xff\xbb\xd1\x9ce\x19Y\x99\x8c\xee\xf6\x08f\r\xd0\xf2],\xee\xd4C^"
+ b";\x00\xe0\r\xf8\xf1\xd6\x19W\xd4\xfa\xf7\xdfEa\xb2\xaa0\x16\xc3\xd9\x114\t"
+ b"o\xaa;\xf4)m\x83\x0e\x9a| \x9e\x0cd\x97Qz\xbdZ\x8a\x9d0k\xcfg\xed\x91\xf9"
+ b'\xe6r[GX\xc0"\xe0\xb1\xefBu\xbf{l[\xfc\x11\xd4_\x90\x88\xb9A\xf5N\xb1\xe5'
+ b"\x9b\xb8\xbc9\xa0\xbf\x120\x7f\\O\xdbp\xc5\x81\xb2?v\xb6:\xca\xe1\xca\xa6"
+ b"\xb7\x90-RRg5H\x8a\x0e\xf1<m\x9aQ\xbf\xa4\xab:\xd84w\x96RM\x8e\xf6\xa1"
+ b"g\xb5\xa4\x18%\xd9g\xe1D\xe5\x14\x05d%\x1c\xca\xcb\x83\xe6\xb4"
+ b"\x86\xf6\xb3\xca?yqP`&\xc0\xb8W\xf6\x89\x96(V\xde\xd4\x01\n\xbd\x0b"
+ b"\xe6!\xc3\xa3\x96\nT\xe7\x10\xc3u\xf2cu\xd7\x01A\x03\xa4\xb5C0\xc1\x98"
+ b"\xaf\x12a\x16\xd2'n\x11q_i8w\xfa\xd7\xef\t\xca\xdb\tJ\xe9\x1e\x1a"
+ b"\x15\x97?\xb3,\x9bs\x13M\x0b.wPf`\xed\xbdHL\xa7\xb1\x8f!\xef T\x07\xf4"
+ b"y:\x1a\x0b\xa1%\x10\xdb\xc1Pw\xbeF?\xffO\xedJ\xac\x0b\xb5U\xbe:l\x1b\x0ck"
+ b"G\xb1\xbc7s\xbf~\x8cob\x90\x12(\xf8\xc2\x8c\xbb\x18\xa5Z\xe3\x13A\x00"
+ b"\ne\x01\x96\xf91\xc7zW\xf2\xdd\xf4c\xe5\xe9\xec\x14Kw}\xe6*\xaa\xb8"
+ b"\xa8b\x8a\xc3v\xd2\x82\xd6\xed8d\xe6y\x82B\x8e\xbc\x83\x1d\x144\x8fo/"
+ b"\x91\x93\xb5\x04Z\xf2vqd\xe1\xdf\xc9g\xc1\xfb?.U\xa4\xbd\x1b\xff\xe8;"
+ b"\x9c\x80\xd0R\xb9\x85\xd1\x82\xea\n\xdb*;s\x13\xd3\xfe\x14\xc8HK\x1e\x05%"
+ b"\x88\xb9\xb7\xd2\xbb\xd2\xdf\x01a\x99\xec\xd0n\x15W\xcd\t\x15\xb35;\xbbd\xe0"
+ b"\xec7\x7f\xd0(7\r\xf9+R\xc7\x89\x14(\xcd\xc6~\xb6\x18KR=\x1d\xb2F\xc3/c"
+ b"\x07\x84\x90\xf0\x0e\xf8\xd6G\xd1H\xd4yTQ^#'\xcf\xef\x98\xc5\x82fKL\x0fl\xc4"
+ b"\x16Ye\x00x\x00a\x00m\x00p\x00l\x00e\x00.\x00c\x00o\x00m\x00\x00\x00e\x00"
+ b"x\x00a\x00m\x00p\x00l\x00e\x00.\x00c\x00o\x00m\x00\x00\x00D\x12\x1e\r[y"
+ b'\xf4\x91\x92\xf4\xb8\xff\xc7;\x03@|Xs\xda\x051\xf9"A\xd6\xc1\x1c\xceA'
+ b"\xa5\x05\x11\x84\x8f\xe3q\x81\xda\t\xcb\"\x8e\xbd\xa9p'\x0fM\xd6"
+ b"\xe8\xa1E\x00\x8b\xc1\x8bw\x91\xac{\x1d\x8d\xba\x03P\x13-\xa5\xf2\xfc\x94<'"
+ b"\xf3\xf6\x08\x17\xe3\xb4c\xd4\xc6\x08\xec\r\x03\x0e\xcd\xfdD\xe2\xbf\x90"
+ b"\xeai\xb6\xb1x\xa9s\x88w\xeci\xf9\xb5\xc1\xc43\x1a4^\x0f\xfd\xa0He"
+ b"(\x93\x95\x10\xc0\x85\xcb\x041D"
+ )
+
+ root_key_id = misc.GUID("de8cc48c-7076-9794-056d-89377b5a8052")
+
+ kdf_algorithm = "SP800_108_CTR_HMAC"
+
+ kdf_parameters = (
+ b"\x00\x00\x00\x00\x01\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00S\x00H\x00"
+ b"A\x005\x001\x002\x00\x00\x00"
+ )
+
+ secret_agreement_algorithm = "DH"
+
+ secret_agreement_parameters = (
+ b"\x0c\x02\x00\x00DHPM\x00\x01\x00\x00\x87\xa8\xe6\x1d\xb4\xb6f<"
+ b"\xff\xbb\xd1\x9ce\x19Y\x99\x8c\xee\xf6\x08f\r\xd0\xf2],\xee\xd4C^;\x00"
+ b"\xe0\r\xf8\xf1\xd6\x19W\xd4\xfa\xf7\xdfEa\xb2\xaa0\x16\xc3\xd9\x114\to\xaa"
+ b";\xf4)m\x83\x0e\x9a| \x9e\x0cd\x97Qz\xbdZ\x8a\x9d0k\xcfg\xed\x91\xf9\xe6r"
+ b'[GX\xc0"\xe0\xb1\xefBu\xbf{l[\xfc\x11\xd4_\x90\x88\xb9A\xf5N\xb1\xe5\x9b\xb8'
+ b"\xbc9\xa0\xbf\x120\x7f\\O\xdbp\xc5\x81\xb2?v\xb6:\xca\xe1\xca\xa6\xb7\x90"
+ b"-RRg5H\x8a\x0e\xf1<m\x9aQ\xbf\xa4\xab:\xd84w\x96RM\x8e\xf6\xa1g\xb5"
+ b"\xa4\x18%\xd9g\xe1D\xe5\x14\x05d%\x1c\xca\xcb\x83\xe6\xb4\x86\xf6\xb3\xca?y"
+ b"qP`&\xc0\xb8W\xf6\x89\x96(V\xde\xd4\x01\n\xbd\x0b\xe6!\xc3\xa3\x96\n"
+ b"T\xe7\x10\xc3u\xf2cu\xd7\x01A\x03\xa4\xb5C0\xc1\x98\xaf\x12a\x16\xd2'n\x11q_"
+ b"i8w\xfa\xd7\xef\t\xca\xdb\tJ\xe9\x1e\x1a\x15\x97?\xb3,\x9bs\x13M\x0b.wPf"
+ b"`\xed\xbdHL\xa7\xb1\x8f!\xef T\x07\xf4y:\x1a\x0b\xa1%\x10\xdb\xc1Pw\xbeF?"
+ b"\xffO\xedJ\xac\x0b\xb5U\xbe:l\x1b\x0ckG\xb1\xbc7s\xbf~\x8cob\x90\x12(\xf8"
+ b"\xc2\x8c\xbb\x18\xa5Z\xe3\x13A\x00\ne\x01\x96\xf91\xc7zW\xf2\xdd\xf4c\xe5"
+ b"\xe9\xec\x14Kw}\xe6*\xaa\xb8\xa8b\x8a\xc3v\xd2\x82\xd6\xed8d\xe6y\x82"
+ b"B\x8e\xbc\x83\x1d\x144\x8fo/\x91\x93\xb5\x04Z\xf2vqd\xe1\xdf\xc9g\xc1\xfb?.U"
+ b"\xa4\xbd\x1b\xff\xe8;\x9c\x80\xd0R\xb9\x85\xd1\x82\xea\n\xdb*;s"
+ b"\x13\xd3\xfe\x14\xc8HK\x1e\x05%\x88\xb9\xb7\xd2\xbb\xd2\xdf\x01a\x99"
+ b"\xec\xd0n\x15W\xcd\t\x15\xb35;\xbbd\xe0\xec7\x7f\xd0(7\r\xf9+R\xc7\x89\x14("
+ b"\xcd\xc6~\xb6\x18KR=\x1d\xb2F\xc3/c\x07\x84\x90\xf0\x0e\xf8\xd6G\xd1H\xd4yTQ"
+ b"^#'\xcf\xef\x98\xc5\x82fKL\x0fl\xc4\x16Y"
+ )
+
+ domain_name = "example.com"
+ forest_name = "example.com"
+
+ l1_key = (
+ b'D\x12\x1e\r[y\xf4\x91\x92\xf4\xb8\xff\xc7;\x03@|Xs\xda\x051\xf9"'
+ b'A\xd6\xc1\x1c\xceA\xa5\x05\x11\x84\x8f\xe3q\x81\xda\t\xcb"\x8e\xbd'
+ b"\xa9p'\x0fM\xd6\xe8\xa1E\x00\x8b\xc1\x8bw\x91\xac{\x1d\x8d\xba"
+ )
+
+ l2_key = (
+ b"\x03P\x13-\xa5\xf2\xfc\x94<'\xf3\xf6\x08\x17\xe3\xb4c\xd4\xc6\x08"
+ b"\xec\r\x03\x0e\xcd\xfdD\xe2\xbf\x90\xeai\xb6\xb1x\xa9s\x88w\xeci\xf9\xb5\xc1"
+ b"\xc43\x1a4^\x0f\xfd\xa0He(\x93\x95\x10\xc0\x85\xcb\x041D"
+ )
+
+ def test_unpack(self):
+ """Unpack a GKDI Group Key Envelope blob and check its fields."""
+
+ envelope = ndr_unpack(gkdi.GroupKeyEnvelope, self.group_key_envelope_blob)
+
+ self.assertEqual(1, envelope.version)
+ self.assertEqual(int.from_bytes(b"KDSK", byteorder="little"), envelope.magic)
+ self.assertEqual(0, envelope.flags)
+
+ self.assertEqual(362, envelope.l0_index)
+ self.assertEqual(1, envelope.l1_index)
+ self.assertEqual(14, envelope.l2_index)
+
+ self.assertEqual(self.root_key_id, envelope.root_key_id)
+
+ self.assertEqual(512, envelope.private_key_len)
+ self.assertEqual(2048, envelope.public_key_len)
+
+ self.assertEqual(self.kdf_algorithm, envelope.kdf_algorithm)
+ self.assertEqual(
+ utf16_encoded_len(self.kdf_algorithm), envelope.kdf_algorithm_len
+ )
+ self.assertEqual(len(self.kdf_parameters), envelope.kdf_parameters_len)
+ self.assertEqual(list(self.kdf_parameters), envelope.kdf_parameters)
+
+ self.assertEqual(
+ utf16_encoded_len(self.secret_agreement_algorithm),
+ envelope.secret_agreement_algorithm_len,
+ )
+ self.assertEqual(
+ self.secret_agreement_algorithm, envelope.secret_agreement_algorithm
+ )
+ self.assertEqual(
+ len(self.secret_agreement_parameters),
+ envelope.secret_agreement_parameters_len,
+ )
+ self.assertEqual(
+ list(self.secret_agreement_parameters), envelope.secret_agreement_parameters
+ )
+
+ self.assertEqual(self.domain_name, envelope.domain_name)
+ self.assertEqual(utf16_encoded_len(self.domain_name), envelope.domain_name_len)
+ self.assertEqual(self.forest_name, envelope.forest_name)
+ self.assertEqual(utf16_encoded_len(self.forest_name), envelope.forest_name_len)
+
+ self.assertEqual(len(self.l1_key), envelope.l1_key_len)
+ self.assertEqual(list(self.l1_key), envelope.l1_key)
+ self.assertEqual(len(self.l2_key), envelope.l2_key_len)
+ self.assertEqual(list(self.l2_key), envelope.l2_key)
+
+ def test_pack(self):
+ """Create a GKDI Group Key Envelope object and test that it packs to the
+ blob we expect."""
+
+ envelope = gkdi.GroupKeyEnvelope()
+
+ envelope.version = 1
+ envelope.flags = 0
+
+ envelope.l0_index = 362
+ envelope.l1_index = 1
+ envelope.l2_index = 14
+
+ envelope.root_key_id = self.root_key_id
+
+ envelope.private_key_len = 512
+ envelope.public_key_len = 2048
+
+ envelope.kdf_algorithm = self.kdf_algorithm
+
+ envelope.kdf_parameters = list(self.kdf_parameters)
+ envelope.kdf_parameters_len = len(self.kdf_parameters)
+
+ envelope.secret_agreement_algorithm = self.secret_agreement_algorithm
+
+ envelope.secret_agreement_parameters = list(self.secret_agreement_parameters)
+ envelope.secret_agreement_parameters_len = len(self.secret_agreement_parameters)
+
+ envelope.domain_name = self.domain_name
+ envelope.forest_name = self.forest_name
+
+ envelope.l1_key = list(self.l1_key)
+ envelope.l1_key_len = len(self.l1_key)
+
+ envelope.l2_key = list(self.l2_key)
+ envelope.l2_key_len = len(self.l2_key)
+
+ self.assertEqual(self.group_key_envelope_blob, ndr_pack(envelope))
+
+
+class KdfParametersTests(samba.tests.TestCase):
+ kdf_parameters_blob = (
+ b"\x00\x00\x00\x00\x01\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00S\x00H\x00"
+ b"A\x005\x001\x002\x00\x00\x00"
+ )
+
+ hash_algorithm = "SHA512"
+
+ def test_unpack(self):
+ """Unpack a GKDI KDF Parameters blob and check its fields."""
+
+ kdf_parameters = ndr_unpack(gkdi.KdfParameters, self.kdf_parameters_blob)
+
+ self.assertEqual(0, kdf_parameters.padding_0)
+ self.assertEqual(1, kdf_parameters.padding_1)
+ self.assertEqual(0, kdf_parameters.padding_2)
+
+ self.assertEqual(self.hash_algorithm, kdf_parameters.hash_algorithm)
+ self.assertEqual(
+ utf16_encoded_len(self.hash_algorithm), kdf_parameters.hash_algorithm_len
+ )
+
+ def test_pack(self):
+ """Create a GKDI KDF Parameters object and test that it packs to the
+ blob we expect."""
+
+ kdf_parameters = gkdi.KdfParameters()
+ kdf_parameters.hash_algorithm = self.hash_algorithm
+
+ self.assertEqual(self.kdf_parameters_blob, ndr_pack(kdf_parameters))
+
+
+class FfcDhParametersTests(samba.tests.TestCase):
+ ffc_dh_parameters_blob = (
+ b"\x0c\x02\x00\x00DHPM\x00\x01\x00\x00\x87\xa8\xe6\x1d\xb4\xb6f<"
+ b"\xff\xbb\xd1\x9ce\x19Y\x99\x8c\xee\xf6\x08f\r\xd0\xf2],\xee\xd4C^;\x00"
+ b"\xe0\r\xf8\xf1\xd6\x19W\xd4\xfa\xf7\xdfEa\xb2\xaa0\x16\xc3\xd9\x114\to\xaa"
+ b";\xf4)m\x83\x0e\x9a| \x9e\x0cd\x97Qz\xbdZ\x8a\x9d0k\xcfg\xed\x91\xf9\xe6r"
+ b'[GX\xc0"\xe0\xb1\xefBu\xbf{l[\xfc\x11\xd4_\x90\x88\xb9A\xf5N\xb1\xe5\x9b\xb8'
+ b"\xbc9\xa0\xbf\x120\x7f\\O\xdbp\xc5\x81\xb2?v\xb6:\xca\xe1\xca\xa6\xb7\x90"
+ b"-RRg5H\x8a\x0e\xf1<m\x9aQ\xbf\xa4\xab:\xd84w\x96RM\x8e\xf6\xa1g\xb5"
+ b"\xa4\x18%\xd9g\xe1D\xe5\x14\x05d%\x1c\xca\xcb\x83\xe6\xb4\x86\xf6\xb3\xca?y"
+ b"qP`&\xc0\xb8W\xf6\x89\x96(V\xde\xd4\x01\n\xbd\x0b\xe6!\xc3\xa3\x96\n"
+ b"T\xe7\x10\xc3u\xf2cu\xd7\x01A\x03\xa4\xb5C0\xc1\x98\xaf\x12a\x16\xd2'n\x11q_"
+ b"i8w\xfa\xd7\xef\t\xca\xdb\tJ\xe9\x1e\x1a\x15\x97?\xb3,\x9bs\x13M\x0b.wPf"
+ b"`\xed\xbdHL\xa7\xb1\x8f!\xef T\x07\xf4y:\x1a\x0b\xa1%\x10\xdb\xc1Pw\xbeF?"
+ b"\xffO\xedJ\xac\x0b\xb5U\xbe:l\x1b\x0ckG\xb1\xbc7s\xbf~\x8cob\x90\x12(\xf8"
+ b"\xc2\x8c\xbb\x18\xa5Z\xe3\x13A\x00\ne\x01\x96\xf91\xc7zW\xf2\xdd\xf4c\xe5"
+ b"\xe9\xec\x14Kw}\xe6*\xaa\xb8\xa8b\x8a\xc3v\xd2\x82\xd6\xed8d\xe6y\x82"
+ b"B\x8e\xbc\x83\x1d\x144\x8fo/\x91\x93\xb5\x04Z\xf2vqd\xe1\xdf\xc9g\xc1\xfb?.U"
+ b"\xa4\xbd\x1b\xff\xe8;\x9c\x80\xd0R\xb9\x85\xd1\x82\xea\n\xdb*;s"
+ b"\x13\xd3\xfe\x14\xc8HK\x1e\x05%\x88\xb9\xb7\xd2\xbb\xd2\xdf\x01a\x99"
+ b"\xec\xd0n\x15W\xcd\t\x15\xb35;\xbbd\xe0\xec7\x7f\xd0(7\r\xf9+R\xc7\x89\x14("
+ b"\xcd\xc6~\xb6\x18KR=\x1d\xb2F\xc3/c\x07\x84\x90\xf0\x0e\xf8\xd6G\xd1H\xd4yTQ"
+ b"^#'\xcf\xef\x98\xc5\x82fKL\x0fl\xc4\x16Y"
+ )
+
+ field_order = (
+ b"\x87\xa8\xe6\x1d\xb4\xb6f<\xff\xbb\xd1\x9ce\x19Y\x99\x8c\xee\xf6\x08"
+ b"f\r\xd0\xf2],\xee\xd4C^;\x00\xe0\r\xf8\xf1\xd6\x19W\xd4\xfa\xf7\xdfE"
+ b"a\xb2\xaa0\x16\xc3\xd9\x114\to\xaa;\xf4)m\x83\x0e\x9a| \x9e\x0cd\x97Qz\xbd"
+ b'Z\x8a\x9d0k\xcfg\xed\x91\xf9\xe6r[GX\xc0"\xe0\xb1\xefBu\xbf{l[\xfc\x11'
+ b"\xd4_\x90\x88\xb9A\xf5N\xb1\xe5\x9b\xb8\xbc9\xa0\xbf\x120\x7f\\O\xdbp\xc5"
+ b"\x81\xb2?v\xb6:\xca\xe1\xca\xa6\xb7\x90-RRg5H\x8a\x0e\xf1<m\x9aQ\xbf\xa4\xab"
+ b":\xd84w\x96RM\x8e\xf6\xa1g\xb5\xa4\x18%\xd9g\xe1D\xe5\x14\x05d%"
+ b"\x1c\xca\xcb\x83\xe6\xb4\x86\xf6\xb3\xca?yqP`&\xc0\xb8W\xf6\x89\x96(V"
+ b"\xde\xd4\x01\n\xbd\x0b\xe6!\xc3\xa3\x96\nT\xe7\x10\xc3u\xf2cu\xd7\x01A\x03"
+ b"\xa4\xb5C0\xc1\x98\xaf\x12a\x16\xd2'n\x11q_i8w\xfa\xd7\xef\t\xca\xdb\tJ\xe9"
+ b"\x1e\x1a\x15\x97"
+ )
+
+ generator = (
+ b"?\xb3,\x9bs\x13M\x0b.wPf`\xed\xbdHL\xa7\xb1\x8f!\xef T\x07\xf4y:"
+ b"\x1a\x0b\xa1%\x10\xdb\xc1Pw\xbeF?\xffO\xedJ\xac\x0b\xb5U\xbe:l\x1b\x0ckG\xb1"
+ b"\xbc7s\xbf~\x8cob\x90\x12(\xf8\xc2\x8c\xbb\x18\xa5Z\xe3\x13A\x00\ne"
+ b"\x01\x96\xf91\xc7zW\xf2\xdd\xf4c\xe5\xe9\xec\x14Kw}\xe6*\xaa\xb8\xa8b"
+ b"\x8a\xc3v\xd2\x82\xd6\xed8d\xe6y\x82B\x8e\xbc\x83\x1d\x144\x8fo/\x91\x93"
+ b"\xb5\x04Z\xf2vqd\xe1\xdf\xc9g\xc1\xfb?.U\xa4\xbd\x1b\xff\xe8;\x9c\x80"
+ b"\xd0R\xb9\x85\xd1\x82\xea\n\xdb*;s\x13\xd3\xfe\x14\xc8HK\x1e\x05%\x88\xb9"
+ b"\xb7\xd2\xbb\xd2\xdf\x01a\x99\xec\xd0n\x15W\xcd\t\x15\xb35;\xbbd\xe0\xec7"
+ b"\x7f\xd0(7\r\xf9+R\xc7\x89\x14(\xcd\xc6~\xb6\x18KR=\x1d\xb2F\xc3/c\x07\x84"
+ b"\x90\xf0\x0e\xf8\xd6G\xd1H\xd4yTQ^#'\xcf\xef\x98\xc5\x82fKL\x0fl\xc4\x16Y"
+ )
+
+ def test_unpack(self):
+ """Unpack a GKDI FFC DH Parameters blob and check its fields."""
+
+ ffc_dh_parameters = ndr_unpack(
+ gkdi.FfcDhParameters, self.ffc_dh_parameters_blob
+ )
+
+ self.assertEqual(len(self.ffc_dh_parameters_blob), ffc_dh_parameters.length)
+ self.assertEqual(
+ int.from_bytes(b"DHPM", byteorder="little"), ffc_dh_parameters.magic
+ )
+
+ self.assertEqual(len(self.field_order), ffc_dh_parameters.key_length)
+ self.assertEqual(list(self.field_order), ffc_dh_parameters.field_order)
+ self.assertEqual(list(self.generator), ffc_dh_parameters.generator)
+
+ def test_pack(self):
+ """Create a GKDI FFC DH Parameters object and test that it packs to the
+ blob we expect."""
+
+ ffc_dh_parameters = gkdi.FfcDhParameters()
+
+ ffc_dh_parameters.field_order = list(self.field_order)
+ ffc_dh_parameters.generator = list(self.generator)
+ self.assertEqual(len(self.field_order), len(self.generator))
+ ffc_dh_parameters.key_length = len(self.field_order)
+
+ self.assertEqual(self.ffc_dh_parameters_blob, ndr_pack(ffc_dh_parameters))
+
+
+if __name__ == "__main__":
+ import unittest
+
+ unittest.main()
diff --git a/python/samba/tests/ndr/gmsa.py b/python/samba/tests/ndr/gmsa.py
new file mode 100755
index 0000000..16398af
--- /dev/null
+++ b/python/samba/tests/ndr/gmsa.py
@@ -0,0 +1,99 @@
+#!/usr/bin/env python3
+# Unix SMB/CIFS implementation.
+# Copyright (C) Catalyst.Net Ltd 2023
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+import os
+
+sys.path.insert(0, 'bin/python')
+os.environ['PYTHONUNBUFFERED'] = '1'
+
+from samba.dcerpc import gmsa
+from samba.ndr import ndr_pack, ndr_unpack
+import samba.tests
+
+
+class GmsaTests(samba.tests.TestCase):
+ managed_password_blob = (
+ b'\x01\x00\x00\x00"\x01\x00\x00\x10\x00\x00\x00\x12\x01\x1a\x01g\x86W\xa1'
+ b'\x13nT\x7fF\xeey\x88\xc8\x08\xd9\x04\xed\x0eK\x05\x92\xf8\x9e\xb8+\xd2\x92h'
+ b'Xg\xc3\x11\x9d\xd6\xea\xae\xf5\x81\n\x1a\xa4\xe0\x8eI|\xc3\x11c'
+ b'\xb2\xe7\x99\xe6\xeaf\xe3\x02,\x10\x0b\xf5\x95\x85\xa3FBt\xeb\xad$\x88\xfc('
+ b'\xac\xbd\x10\xa9\xb4M\xdeCjm5\xff\xf0\xe9Z\xe7\x906\t\xe8%"\n\xd3\r\xb6\xa8k'
+ b'\xb5D\xfa4\x0f\x86M-8\x95\x19=@\x07\xdfrG\x8dq\xce?x\x9b\xb19\xc4\xc1\xcf'
+ b"\xfdm9\x94\x8c\n\xfaje\xe3\xf5\xf8\xf9\r\x8cp\xf7',\xe6Z?c'\x93\xeb\x0eF"
+ b'\x97\xe5v\xc2\x1f6\xacU\xf4\x16z"\xb4\xeb\xb2Y<-"\xdcJ\xc8\xd4\xcaE_)\x9a'
+ b'\x18+\x8dM\x8d\xd1#-\xde\x1e\xfe:\xca\xf1K\x13tS\x19_EE_]H\xa0\xc4A'
+ b'\x91;\x80\xf9MF\x96\xb1q7\x9bZ\xc3\xb0,P\x1c\xf8\xe1kC\xbe\xac\xa5"cA\x1d'
+ b'\\\xf7r\xe7c\xe8\xd2\x9ap\xa1)>r\x18\xa1\xe3\x00\x00t\x95\x01i\x80\x17'
+ b'\x00\x00t71\xb6\x7f\x17\x00\x00'
+ )
+
+ current_password = (
+ b'g\x86W\xa1\x13nT\x7fF\xeey\x88\xc8\x08\xd9\x04\xed\x0eK\x05\x92\xf8\x9e\xb8'
+ b'+\xd2\x92hXg\xc3\x11\x9d\xd6\xea\xae\xf5\x81\n\x1a\xa4\xe0\x8eI|\xc3\x11c'
+ b'\xb2\xe7\x99\xe6\xeaf\xe3\x02,\x10\x0b\xf5\x95\x85\xa3FBt\xeb\xad$\x88\xfc('
+ b'\xac\xbd\x10\xa9\xb4M\xdeCjm5\xff\xf0\xe9Z\xe7\x906\t\xe8%"\n\xd3\r\xb6\xa8k'
+ b'\xb5D\xfa4\x0f\x86M-8\x95\x19=@\x07\xdfrG\x8dq\xce?x\x9b\xb19\xc4\xc1\xcf'
+ b"\xfdm9\x94\x8c\n\xfaje\xe3\xf5\xf8\xf9\r\x8cp\xf7',\xe6Z?c'\x93\xeb\x0eF"
+ b'\x97\xe5v\xc2\x1f6\xacU\xf4\x16z"\xb4\xeb\xb2Y<-"\xdcJ\xc8\xd4\xcaE_)\x9a'
+ b'\x18+\x8dM\x8d\xd1#-\xde\x1e\xfe:\xca\xf1K\x13tS\x19_EE_]H\xa0\xc4A'
+ b'\x91;\x80\xf9MF\x96\xb1q7\x9bZ\xc3\xb0,P\x1c\xf8\xe1kC\xbe\xac\xa5"cA\x1d'
+ b'\\\xf7r\xe7c\xe8\xd2\x9ap\xa1)>r\x18\xa1\xe3'
+ )
+
+ query_interval = 0x178069019574
+ unchanged_interval = 0x177fb6313774
+
+ def test_managed_password_blob_unpack(self):
+ """Unpack a GMSA Managed Password blob and check its fields."""
+
+ managed_password = ndr_unpack(gmsa.MANAGEDPASSWORD_BLOB,
+ self.managed_password_blob)
+
+ self.assertEqual(1, managed_password.version)
+ self.assertEqual(0, managed_password.reserved)
+ self.assertEqual(len(self.managed_password_blob),
+ managed_password.length)
+
+ self.assertEqual(self.current_password,
+ managed_password.passwords.current)
+ self.assertIsNone(managed_password.passwords.previous)
+
+ self.assertEqual(self.query_interval,
+ managed_password.passwords.query_interval)
+ self.assertEqual(self.unchanged_interval,
+ managed_password.passwords.unchanged_interval)
+
+ def test_managed_password_blob_pack(self):
+ """Create a GMSA Managed Password blob and test that it packs to the
+ blob we expect."""
+
+ managed_password = gmsa.MANAGEDPASSWORD_BLOB()
+
+ managed_password.passwords.current = self.current_password
+ managed_password.passwords.query_interval = self.query_interval
+ managed_password.passwords.unchanged_interval = self.unchanged_interval
+
+ self.assertEqual(self.managed_password_blob,
+ ndr_pack(managed_password))
+
+
+if __name__ == '__main__':
+ import unittest
+
+ unittest.main()
diff --git a/python/samba/tests/ndr/wbint.py b/python/samba/tests/ndr/wbint.py
new file mode 100644
index 0000000..d967165
--- /dev/null
+++ b/python/samba/tests/ndr/wbint.py
@@ -0,0 +1,139 @@
+# -*- coding: utf-8 -*-
+
+# Unix SMB/CIFS implementation.
+# Copyright © Andrew Bartlett <abartlet@samba.org> 2021
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+
+"""Test of Network Data Representation (NDR) marshalling and unmarshalling."""
+import samba
+import samba.tests
+import samba.ndr as ndr
+from samba.dcerpc import winbind, security, lsa
+
+class NdrTestCase(samba.tests.TestCase):
+ def test_wbint_Principal(self):
+ x = winbind.wbint_Principal()
+
+ x.sid = security.dom_sid(security.SID_NT_SCHANNEL_AUTHENTICATION)
+
+ x.type = lsa.SID_NAME_USER
+
+ x.name = "fred"
+
+ b = ndr.ndr_pack(x)
+
+ y = ndr.ndr_unpack(winbind.wbint_Principal, b)
+
+ self.assertEqual(x.sid, y.sid)
+ self.assertEqual(x.type, y.type)
+ self.assertEqual(x.name, y.name)
+
+ def test_wbint_Principal_null_name(self):
+ x = winbind.wbint_Principal()
+
+ x.sid = security.dom_sid(security.SID_NT_SCHANNEL_AUTHENTICATION)
+
+ x.type = lsa.SID_NAME_USER
+
+ x.name = None
+
+ b = ndr.ndr_pack(x)
+
+ y = ndr.ndr_unpack(winbind.wbint_Principal, b)
+
+ self.assertEqual(x.sid, y.sid)
+ self.assertEqual(x.type, y.type)
+ self.assertEqual(x.name, y.name)
+
+ def test_wbint_Principals(self):
+
+ principals = []
+
+ for i in range(0, 10):
+ x = winbind.wbint_Principal()
+
+ x.sid = security.dom_sid(security.SID_NT_SCHANNEL_AUTHENTICATION)
+
+ x.type = lsa.SID_NAME_USER
+
+ x.name = None
+
+ principals.append(x)
+
+ wb_principals = winbind.wbint_Principals()
+ wb_principals.num_principals = 10
+ wb_principals.principals = principals
+
+ b = ndr.ndr_pack(wb_principals)
+
+ unpacked_principals = ndr.ndr_unpack(winbind.wbint_Principals,
+ b)
+
+ self.assertEqual(wb_principals.num_principals,
+ unpacked_principals.num_principals)
+
+ for i in range(0, 10):
+ x = principals[i]
+ y = unpacked_principals.principals[i]
+ self.assertEqual(x.sid, y.sid)
+ self.assertEqual(x.type, y.type)
+ self.assertEqual(x.name, y.name)
+
+ def test_wbint_10_Principals(self):
+ num = 10
+ (principals, unpacked_principals) = self._test_wbint_Principals(num)
+
+ for i in range(0, num):
+ x = principals[i]
+ y = unpacked_principals.principals[i]
+ self.assertEqual(x.sid, y.sid)
+ self.assertEqual(x.type, y.type)
+ self.assertEqual(x.name, y.name)
+
+ def test_wbint_max_token_Principals(self):
+ self._test_wbint_Principals(samba._glue.ndr_token_max_list_size()+1)
+
+ def _test_wbint_Principals(self, num):
+
+ principals = []
+ for i in range(0, num):
+ x = winbind.wbint_Principal()
+
+ x.sid = security.dom_sid(security.SID_NT_SCHANNEL_AUTHENTICATION + "-%d" % num)
+
+ x.type = lsa.SID_NAME_USER
+
+ x.name = "fred%d" % num
+
+ principals.append(x)
+
+ wb_principals = winbind.wbint_Principals()
+ wb_principals.num_principals = num
+ wb_principals.principals = principals
+
+ b = ndr.ndr_pack(wb_principals)
+
+ try:
+ unpacked_principals = ndr.ndr_unpack(winbind.wbint_Principals,
+ b)
+ except RuntimeError as e:
+ self.fail(e)
+
+ self.assertEqual(wb_principals.num_principals,
+ unpacked_principals.num_principals)
+
+ return (principals, unpacked_principals)
diff --git a/python/samba/tests/net_join.py b/python/samba/tests/net_join.py
new file mode 100644
index 0000000..5bc3a1a
--- /dev/null
+++ b/python/samba/tests/net_join.py
@@ -0,0 +1,63 @@
+# Unix SMB/CIFS implementation.
+#
+# Copyright (C) Catalyst.Net Ltd. 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""
+Confirm that net.join_member works
+"""
+
+import samba.tests
+import os
+from samba.net import Net, LIBNET_JOIN_AUTOMATIC
+from samba.credentials import DONT_USE_KERBEROS
+from samba import NTSTATUSError, ntstatus
+import ctypes
+
+
+class NetJoinTests(samba.tests.TestCaseInTempDir):
+
+ def setUp(self):
+ super().setUp()
+ self.domain = os.environ["DOMAIN"]
+ self.server = os.environ["SERVER"]
+ self.lp = self.get_loadparm()
+ self.lp.set("private dir", self.tempdir)
+ self.lp.set("lock dir", self.tempdir)
+ self.lp.set("state directory", self.tempdir)
+
+ def test_net_join(self):
+ netbios_name = "NetJoinTest"
+ machinepass = "abcdefghij"
+ creds = self.insta_creds(template=self.get_credentials(),
+ kerberos_state=DONT_USE_KERBEROS)
+
+ net = Net(creds, self.lp, server=self.server)
+
+ # NOTE WELL: We must not run more than one successful
+ # net.join_member per file (process), as the shared
+ # secrets.ldb handle will be kept between runs.
+ try:
+ (join_password, sid, domain_name) = net.join_member(
+ self.domain, netbios_name, LIBNET_JOIN_AUTOMATIC,
+ machinepass=machinepass)
+ except NTSTATUSError as e:
+ code = ctypes.c_uint32(e.args[0]).value
+ if code == ntstatus.NT_STATUS_CONNECTION_DISCONNECTED:
+ self.fail("Connection failure")
+ raise
+ os.unlink(os.path.join(self.tempdir, "secrets.ldb"))
+ pass
diff --git a/python/samba/tests/net_join_no_spnego.py b/python/samba/tests/net_join_no_spnego.py
new file mode 100644
index 0000000..d0da28f
--- /dev/null
+++ b/python/samba/tests/net_join_no_spnego.py
@@ -0,0 +1,90 @@
+# Unix SMB/CIFS implementation.
+#
+# Copyright (C) Catalyst.Net Ltd. 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""
+Detect null pointer exception in /source3/smbd/sessetup.c
+"""
+
+import samba.tests
+import os
+from samba.net import Net, LIBNET_JOIN_AUTOMATIC
+from samba.credentials import DONT_USE_KERBEROS
+from samba import NTSTATUSError, ntstatus
+import ctypes
+
+
+class NetJoinNoSpnegoTests(samba.tests.TestCaseInTempDir):
+
+ def setUp(self):
+ super().setUp()
+ self.domain = os.environ["DOMAIN"]
+ self.server = os.environ["SERVER"]
+ self.lp = self.get_loadparm()
+ self.lp.set("private dir", self.tempdir)
+ self.lp.set("lock dir", self.tempdir)
+ self.lp.set("state directory", self.tempdir)
+
+ def test_net_join_no_spnego(self):
+ self.lp.set("client ipc max protocol", "NT1")
+ self.lp.set("client use spnego", "no")
+ netbios_name = "NetJoinNoSpnego"
+ machinepass = "abcdefghij"
+ creds = self.insta_creds(template=self.get_credentials(),
+ kerberos_state=DONT_USE_KERBEROS)
+
+ net = Net(creds, self.lp, server=self.server)
+
+ try:
+ (join_password, sid, domain_name) = net.join_member(
+ self.domain, netbios_name, LIBNET_JOIN_AUTOMATIC,
+ machinepass=machinepass)
+ except NTSTATUSError as e:
+ code = ctypes.c_uint32(e.args[0]).value
+ if code == ntstatus.NT_STATUS_CONNECTION_DISCONNECTED:
+ self.fail("Connection failure")
+ elif code == ntstatus.NT_STATUS_ACCESS_DENIED:
+ return
+ else:
+ raise
+ self.fail("Should have rejected NTLMv2 without SPNEGO")
+
+ def test_net_join_no_spnego_ntlmv1(self):
+ self.lp.set("client ipc max protocol", "NT1")
+ self.lp.set("client use spnego", "no")
+ self.lp.set("client ntlmv2 auth", "no")
+ netbios_name = "NetJoinNoSpnego"
+ machinepass = "abcdefghij"
+ creds = self.insta_creds(template=self.get_credentials(),
+ kerberos_state=DONT_USE_KERBEROS)
+
+ net = Net(creds, self.lp, server=self.server)
+
+ # NOTE WELL: We must not run more than one successful
+ # net.join_member per file (process), as the shared
+ # secrets.ldb handle will be kept between runs.
+ try:
+ (join_password, sid, domain_name) = net.join_member(
+ self.domain, netbios_name, LIBNET_JOIN_AUTOMATIC,
+ machinepass=machinepass)
+ except NTSTATUSError as e:
+ code = ctypes.c_uint32(e.args[0]).value
+ if code == ntstatus.NT_STATUS_CONNECTION_DISCONNECTED:
+ self.fail("Connection failure")
+ raise
+ os.unlink(os.path.join(self.tempdir, "secrets.ldb"))
+ pass
diff --git a/python/samba/tests/netbios.py b/python/samba/tests/netbios.py
new file mode 100644
index 0000000..0358b0b
--- /dev/null
+++ b/python/samba/tests/netbios.py
@@ -0,0 +1,65 @@
+# -*- coding: utf-8 -*-
+# Unix SMB/CIFS implementation. Tests for netbios py module
+# Copyright (C) Noel Power <noel.power@suse.com> 2018
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import samba
+import os
+from samba import netbios
+
+
+class NetBiosTests(samba.tests.TestCase):
+ def setUp(self):
+ super().setUp()
+ self.n = netbios.Node()
+ self.ifc = os.environ["SERVER_IP"]
+ self.dc = os.environ["DC_NETBIOSNAME"]
+
+ def test_query_name(self):
+ (reply_from, names, addresses) = self.n.query_name(self.dc, self.ifc, timeout=4)
+ assert reply_from == self.ifc
+ assert names[0] == self.dc
+ assert addresses[0] == self.ifc
+
+ def test_name_status(self):
+ (reply_from, name, name_list) = self.n.name_status(self.dc, self.ifc, timeout=4)
+ assert reply_from == self.ifc
+ assert name[0] == self.dc
+ assert len(name_list) > 0
+
+ def test_register_name(self):
+ address = '127.0.0.3'
+ (reply_from, name, reply_address, code) = self.n.register_name((self.dc, 0x20), address, self.ifc, multi_homed=False, timeout=4)
+ assert reply_from == self.ifc
+ assert name[0] == self.dc
+ assert reply_address == address
+ assert code == 6
+
+ def disabled_test_refresh(self):
+ # can't get the below test to work, disabling
+ address = '127.0.0.3'
+ res = self.n.refresh_name((self.dc, 0x20), address, self.ifc, timeout=10)
+
+
+class ValidNetbiosNameTests(samba.tests.TestCase):
+
+ def test_valid(self):
+ self.assertTrue(samba.valid_netbios_name("FOO"))
+
+ def test_too_long(self):
+ self.assertFalse(samba.valid_netbios_name("FOO" * 10))
+
+ def test_invalid_characters(self):
+ self.assertFalse(samba.valid_netbios_name("*BLA"))
diff --git a/python/samba/tests/netcmd.py b/python/samba/tests/netcmd.py
new file mode 100644
index 0000000..63f204a
--- /dev/null
+++ b/python/samba/tests/netcmd.py
@@ -0,0 +1,165 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2009-2011
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for samba.netcmd."""
+
+import os
+import tempfile
+
+from io import StringIO
+from samba.netcmd import Command
+from samba.netcmd.testparm import cmd_testparm
+from samba.netcmd.main import cmd_sambatool
+import samba.tests
+
+
+class NetCmdTestCase(samba.tests.TestCaseInTempDir):
+
+ def run_netcmd(self, cmd_klass, args, retcode=0):
+ cmd = cmd_klass(outf=StringIO(), errf=StringIO())
+ cmd.command_name = "apricots"
+ try:
+ retval = cmd._run(*args)
+ except Exception as e:
+ cmd.show_command_error(e)
+ retval = 1
+ self.assertEqual(retcode, retval)
+ return cmd.outf.getvalue(), cmd.errf.getvalue()
+
+ def iter_all_subcommands(self):
+ todo = list(cmd_sambatool.subcommands.items())
+ while todo:
+ (path, cmd) = todo.pop()
+ yield path, cmd
+ subcmds = getattr(cmd, "subcommands", {})
+ todo.extend([(path + " " + k, v) for (k, v) in
+ subcmds.items()])
+
+
+class TestParmTests(NetCmdTestCase):
+
+ def setUp(self):
+ super().setUp()
+
+ # Override these global parameters in case their default values are
+ # invalid.
+ contents = """[global]
+ netbios name = test
+ lock dir = /
+ pid directory = /
+[tmp]
+ path = /
+"""
+ self.smbconf = self.create_smbconf(contents)
+
+ def create_smbconf(self, contents):
+ smbconf = tempfile.NamedTemporaryFile(mode='w',
+ dir=self.tempdir,
+ delete=False)
+ self.addCleanup(os.remove, smbconf.name)
+
+ try:
+ smbconf.write(contents)
+ finally:
+ smbconf.close()
+
+ return smbconf
+
+ def test_no_client_ip(self):
+ out, err = self.run_netcmd(cmd_testparm, ["--client-name=foo"],
+ retcode=-1)
+ self.assertEqual("", out)
+ self.assertEqual(
+ "ERROR: Both a DNS name and an IP address are "
+ "required for the host access check\n", err)
+
+ def test_section(self):
+ # We don't get an opportunity to verify the output, as the parameters
+ # are dumped directly to stdout, so just check the return code.
+ self.run_netcmd(cmd_testparm,
+ ["--configfile=%s" % self.smbconf.name,
+ "--section-name=tmp"],
+ retcode=None)
+
+ def test_section_globals(self):
+ # We can have '[global]' and '[globals]'
+ for name in ['global', 'globals']:
+ self.run_netcmd(cmd_testparm,
+ [f"--configfile={self.smbconf.name}",
+ f"--section-name={name}"],
+ retcode=None)
+
+ def test_no_such_section(self):
+ out, err = self.run_netcmd(cmd_testparm,
+ ["--configfile=%s" % self.smbconf.name,
+ "--section-name=foo"],
+ retcode=-1)
+ # Ensure all exceptions are caught.
+ self.assertEqual("", out)
+ self.assertNotIn("uncaught exception", err)
+
+ out, err = self.run_netcmd(cmd_testparm,
+ ["--configfile=%s" % self.smbconf.name,
+ "--section-name=foo",
+ "--parameter-name=foo"],
+ retcode=-1)
+ # Ensure all exceptions are caught.
+ self.assertEqual("", out)
+ self.assertNotIn("uncaught exception", err)
+
+ def test_no_such_parameter(self):
+ out, err = self.run_netcmd(cmd_testparm,
+ ["--configfile=%s" % self.smbconf.name,
+ "--section-name=tmp",
+ "--parameter-name=foo"],
+ retcode=-1)
+ # Ensure all exceptions are caught.
+ self.assertEqual("", out)
+ self.assertNotIn("uncaught exception", err)
+
+
+class CommandTests(NetCmdTestCase):
+
+ def test_description(self):
+ class cmd_foo(Command):
+ """Mydescription"""
+ self.assertEqual("Mydescription", cmd_foo().short_description)
+
+ def test_name(self):
+ class cmd_foo(Command):
+ pass
+ self.assertEqual("foo", cmd_foo().name)
+
+ def test_synopsis_everywhere(self):
+ missing = []
+ for path, cmd in self.iter_all_subcommands():
+ if cmd.synopsis is None:
+ missing.append(path)
+ if missing:
+ self.fail("The following commands do not have a synopsis set: %r" %
+ missing)
+
+ def test_short_description_everywhere(self):
+ missing = []
+ for path, cmd in self.iter_all_subcommands():
+ if cmd.short_description is None:
+ missing.append(path)
+ if not missing:
+ return
+ self.fail(
+ "The following commands do not have a short description set: %r" %
+ missing)
diff --git a/python/samba/tests/netlogonsvc.py b/python/samba/tests/netlogonsvc.py
new file mode 100644
index 0000000..a509930
--- /dev/null
+++ b/python/samba/tests/netlogonsvc.py
@@ -0,0 +1,66 @@
+# Tests to check the netlogon service is only running when it's required
+#
+# Copyright (C) Catalyst IT Ltd. 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+from samba.tests import TestCase
+import os
+
+from samba.credentials import Credentials
+from samba.dcerpc import netlogon
+from samba import NTSTATUSError, ntstatus
+import ctypes
+
+"""
+Tests whether the netlogon service is running
+"""
+
+
+class NetlogonServiceTests(TestCase):
+
+ def setUp(self):
+ super().setUp()
+
+ self.server = os.environ["SERVER"]
+ self.lp = self.get_loadparm()
+ self.creds = Credentials()
+
+ # prefer the DC user/password in environments that have it
+ if "DC_USERNAME" in os.environ and "DC_PASSWORD" in os.environ:
+ self.creds.set_username(os.environ["DC_USERNAME"])
+ self.creds.set_password(os.environ["DC_PASSWORD"])
+ else:
+ self.creds.set_username(os.environ["USERNAME"])
+ self.creds.set_password(os.environ["PASSWORD"])
+
+ self.creds.guess(self.lp)
+
+ def test_have_netlogon_connection(self):
+ try:
+ c = self.get_netlogon_connection()
+ self.assertIsNotNone(c)
+ except NTSTATUSError as e:
+ # On non-DC test environments, netlogon should not be running on
+ # the server, so we expect the test to fail here
+ enum = ctypes.c_uint32(e.args[0]).value
+ if enum == ntstatus.NT_STATUS_OBJECT_NAME_NOT_FOUND:
+ self.fail("netlogon service is not running")
+ else:
+ raise
+
+ # Establish netlogon connection over NP
+ def get_netlogon_connection(self):
+ return netlogon.netlogon("ncacn_np:%s[seal]" % self.server, self.lp,
+ self.creds)
diff --git a/python/samba/tests/ntacls.py b/python/samba/tests/ntacls.py
new file mode 100644
index 0000000..0b7963d
--- /dev/null
+++ b/python/samba/tests/ntacls.py
@@ -0,0 +1,87 @@
+# Unix SMB/CIFS implementation. Tests for ntacls manipulation
+# Copyright (C) Matthieu Patou <mat@matws.net> 2009-2010
+# Copyright (C) Andrew Bartlett 2012
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for samba.ntacls."""
+
+import os
+
+from samba.ntacls import setntacl, getntacl, XattrBackendError
+from samba.param import LoadParm
+from samba.dcerpc import security
+from samba.tests import TestCaseInTempDir, SkipTest
+from samba.auth_util import system_session_unix
+
+NTACL_SDDL = "O:S-1-5-21-2212615479-2695158682-2101375467-512G:S-1-5-21-2212615479-2695158682-2101375467-513D:(A;OICI;FA;;;S-1-5-21-2212615479-2695158682-2101375467-512)"
+DOMAIN_SID = "S-1-5-21-2212615479-2695158682-2101375467"
+
+
+class NtaclsTests(TestCaseInTempDir):
+
+ def setUp(self):
+ super().setUp()
+ self.tempf = os.path.join(self.tempdir, "test")
+ open(self.tempf, 'w').write("empty")
+ self.session_info = system_session_unix()
+
+ def tearDown(self):
+ os.unlink(self.tempf)
+ super().tearDown()
+
+ def test_setntacl(self):
+ lp = LoadParm()
+ open(self.tempf, 'w').write("empty")
+ lp.set("posix:eadb", os.path.join(self.tempdir, "eadbtest.tdb"))
+ setntacl(lp, self.tempf, NTACL_SDDL, DOMAIN_SID, self.session_info)
+ os.unlink(os.path.join(self.tempdir, "eadbtest.tdb"))
+
+ def test_setntacl_getntacl(self):
+ lp = LoadParm()
+ open(self.tempf, 'w').write("empty")
+ lp.set("posix:eadb", os.path.join(self.tempdir, "eadbtest.tdb"))
+ setntacl(lp, self.tempf, NTACL_SDDL, DOMAIN_SID, self.session_info)
+ facl = getntacl(lp, self.tempf, self.session_info)
+ anysid = security.dom_sid(security.SID_NT_SELF)
+ self.assertEqual(facl.as_sddl(anysid), NTACL_SDDL)
+ os.unlink(os.path.join(self.tempdir, "eadbtest.tdb"))
+
+ def test_setntacl_getntacl_param(self):
+ lp = LoadParm()
+ open(self.tempf, 'w').write("empty")
+ setntacl(lp, self.tempf, NTACL_SDDL, DOMAIN_SID, self.session_info, "tdb",
+ os.path.join(self.tempdir, "eadbtest.tdb"))
+ facl = getntacl(lp, self.tempf, self.session_info, "tdb", os.path.join(
+ self.tempdir, "eadbtest.tdb"))
+ domsid = security.dom_sid(security.SID_NT_SELF)
+ self.assertEqual(facl.as_sddl(domsid), NTACL_SDDL)
+ os.unlink(os.path.join(self.tempdir, "eadbtest.tdb"))
+
+ def test_setntacl_invalidbackend(self):
+ lp = LoadParm()
+ open(self.tempf, 'w').write("empty")
+ self.assertRaises(XattrBackendError, setntacl, lp, self.tempf,
+ NTACL_SDDL, DOMAIN_SID, self.session_info, "ttdb",
+ os.path.join(self.tempdir, "eadbtest.tdb"))
+
+ def test_setntacl_forcenative(self):
+ if os.getuid() == 0:
+ raise SkipTest("Running test as root, test skipped")
+ lp = LoadParm()
+ open(self.tempf, 'w').write("empty")
+ lp.set("posix:eadb", os.path.join(self.tempdir, "eadbtest.tdb"))
+ self.assertRaises(Exception, setntacl, lp, self.tempf, NTACL_SDDL,
+ DOMAIN_SID, self.session_info, "native")
diff --git a/python/samba/tests/ntacls_backup.py b/python/samba/tests/ntacls_backup.py
new file mode 100644
index 0000000..d4e4294
--- /dev/null
+++ b/python/samba/tests/ntacls_backup.py
@@ -0,0 +1,198 @@
+# Unix SMB/CIFS implementation. Tests for ntacls manipulation
+# Copyright (C) Andrew Bartlett 2018
+# Copyright (C) Joe Guo <joeg@catalyst.net.nz> 2018
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for samba ntacls backup"""
+import os
+
+from samba.samba3 import libsmb_samba_internal as libsmb
+from samba.samba3 import smbd
+from samba import samdb
+from samba import ntacls
+
+from samba.auth import system_session
+from samba.auth_util import system_session_unix
+from samba.dcerpc import security
+from samba.tests import env_loadparm
+from samba.tests.smbd_base import SmbdBaseTests
+
+
+class NtaclsBackupRestoreTests(SmbdBaseTests):
+ """
+ Tests for NTACLs backup and restore.
+ """
+
+ def setUp(self):
+ super().setUp()
+
+ self.server = os.environ["SERVER"] # addc
+ samdb_url = 'ldap://' + self.server
+
+ self.service = 'test1' # service/share to test
+ # root path for service
+ self.service_root = os.path.join(
+ os.environ["LOCAL_PATH"], self.service)
+
+ self.smb_conf_path = os.environ['SMB_CONF_PATH']
+ self.creds = self.insta_creds(template=self.get_credentials())
+
+ self.samdb_conn = samdb.SamDB(
+ url=samdb_url, session_info=system_session(),
+ credentials=self.creds, lp=env_loadparm())
+
+ self.dom_sid = security.dom_sid(self.samdb_conn.get_domain_sid())
+
+ # helper will load conf into lp, that's how smbd can find services.
+ self.ntacls_helper = ntacls.NtaclsHelper(self.service,
+ self.smb_conf_path,
+ self.dom_sid)
+ self.lp = self.ntacls_helper.lp
+
+ self.smb_conn = libsmb.Conn(
+ self.server, self.service, lp=self.lp, creds=self.creds)
+
+ self.smb_helper = ntacls.SMBHelper(self.smb_conn, self.dom_sid)
+
+ self.tarfile_path = os.path.join(self.tempdir,
+ 'ntacls-backup.tar.gz')
+
+ # an example file tree
+ self.tree = {
+ 'file0.txt': b'test file0',
+ 'dir1': {
+ 'file1.txt': b'test file1',
+ 'dir2': {} # an empty dir in dir
+ },
+ }
+
+ self._delete_tarfile()
+ self.smb_helper.delete_tree()
+
+ self.smb_helper.create_tree(self.tree)
+ self._check_tree()
+ # keep a copy of ntacls after tree just created
+ self.original_ntacls = self.smb_helper.get_ntacls()
+
+ def tearDown(self):
+ self._delete_tarfile()
+ self.smb_helper.delete_tree()
+ super().tearDown()
+
+ def _delete_tarfile(self):
+ try:
+ os.remove(self.tarfile_path)
+ except OSError:
+ pass
+
+ def _check_tarfile(self):
+ self.assertTrue(os.path.isfile(self.tarfile_path))
+
+ def _check_tree(self):
+ actual_tree = self.smb_helper.get_tree()
+ self.assertDictEqual(self.tree, actual_tree)
+
+ def test_smbd_mkdir(self):
+ """
+ A smoke test for smbd.mkdir API
+ """
+
+ dirpath = os.path.join(self.service_root, 'a-dir')
+ smbd.mkdir(dirpath, system_session_unix(), self.service)
+ mode = os.stat(dirpath).st_mode
+
+ # This works in conjunction with the TEST_UMASK in smbd_base
+ # to ensure that permissions are not related to the umask
+ # but instead the smb.conf settings
+ self.assertEqual(mode & 0o777, 0o755)
+ self.assertTrue(os.path.isdir(dirpath))
+
+ def test_smbd_create_file(self):
+ """
+ A smoke test for smbd.create_file and smbd.unlink API
+ """
+
+ filepath = os.path.join(self.service_root, 'a-file')
+ smbd.create_file(filepath, system_session_unix(), self.service)
+ self.assertTrue(os.path.isfile(filepath))
+
+ mode = os.stat(filepath).st_mode
+
+ # This works in conjunction with the TEST_UMASK in smbd_base
+ # to ensure that permissions are not related to the umask
+ # but instead the smb.conf settings
+ self.assertEqual(mode & 0o777, 0o644)
+
+ # As well as checking that unlink works, this removes the
+ # fake xattrs from the dev/inode based DB
+ smbd.unlink(filepath, system_session_unix(), self.service)
+ self.assertFalse(os.path.isfile(filepath))
+
+ def test_compare_getntacl(self):
+ """
+ Ntacls get from different ways should be the same
+ """
+
+ file_name = 'file0.txt'
+ file_path = os.path.join(self.service_root, file_name)
+
+ sd0 = self.smb_helper.get_acl(file_name, as_sddl=True)
+
+ sd1 = self.ntacls_helper.getntacl(
+ file_path, system_session_unix(), as_sddl=True, direct_db_access=False)
+
+ sd2 = self.ntacls_helper.getntacl(
+ file_path, system_session_unix(), as_sddl=True, direct_db_access=True)
+
+ self.assertEqual(sd0, sd1)
+ self.assertEqual(sd1, sd2)
+
+ def test_backup_online(self):
+ """
+ Backup service online, delete files, restore and check.
+ """
+ ntacls.backup_online(
+ self.smb_conn, self.tarfile_path, self.dom_sid)
+ self._check_tarfile()
+
+ self.smb_helper.delete_tree()
+ ntacls.backup_restore(
+ self.tarfile_path, self.service_root,
+ self.samdb_conn, self.smb_conf_path)
+ self._check_tree()
+
+ # compare ntacls after restored
+ self.assertDictEqual(
+ self.original_ntacls, self.smb_helper.get_ntacls())
+
+ def test_backup_offline(self):
+ """
+ Backup service offline, delete files, restore and check.
+ """
+ ntacls.backup_offline(
+ self.service_root, self.tarfile_path,
+ self.smb_conf_path, self.dom_sid)
+ self._check_tarfile()
+
+ self.smb_helper.delete_tree()
+ ntacls.backup_restore(
+ self.tarfile_path, self.service_root,
+ self.samdb_conn, self.smb_conf_path)
+ self._check_tree()
+
+ # compare ntacls after restored
+ self.assertDictEqual(
+ self.original_ntacls, self.smb_helper.get_ntacls())
diff --git a/python/samba/tests/ntlm_auth.py b/python/samba/tests/ntlm_auth.py
new file mode 100644
index 0000000..fc5fa20
--- /dev/null
+++ b/python/samba/tests/ntlm_auth.py
@@ -0,0 +1,342 @@
+# Unix SMB/CIFS implementation.
+#
+# Copyright (C) Samuel Cabrero <scabrero@suse.de> 2018
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import os
+from subprocess import Popen, PIPE
+from samba.tests import BlackboxProcessError
+from samba.tests.ntlm_auth_base import NTLMAuthTestCase
+from samba.common import get_string
+
+class NTLMAuthHelpersTests(NTLMAuthTestCase):
+
+ def setUp(self):
+ super().setUp()
+ self.username = os.environ["DC_USERNAME"]
+ self.password = os.environ["DC_PASSWORD"]
+ self.domain = os.environ["DOMAIN"]
+ out = get_string(self.check_output("wbinfo -n %s" % self.username))
+ self.group_sid = out.split(" ")[0]
+ self.assertTrue(self.group_sid.startswith("S-1-5-21-"))
+ self.bad_group_sid = self.group_sid[:-2]
+
+ def test_specified_domain(self):
+ """ ntlm_auth with specified domain """
+
+ username = "foo"
+ password = "secret"
+ domain = "FOO"
+
+ ret = self.run_helper(client_username=username,
+ client_password=password,
+ client_domain=domain,
+ server_username=username,
+ server_password=password,
+ server_domain=domain,
+ server_use_winbind=False)
+ self.assertTrue(ret)
+
+ username = "foo"
+ password = "secret"
+ domain = "fOo"
+
+ ret = self.run_helper(client_username=username,
+ client_password=password,
+ client_domain=domain,
+ server_username=username,
+ server_password=password,
+ server_domain=domain,
+ server_use_winbind=False)
+ self.assertTrue(ret)
+
+ def test_against_winbind(self):
+ """ ntlm_auth against winbindd """
+
+ ret = self.run_helper(client_username=self.username,
+ client_password=self.password,
+ client_domain=self.domain,
+ server_use_winbind=True)
+ self.assertTrue(ret)
+
+ def test_ntlmssp_gss_spnego(self):
+ """ ntlm_auth with NTLMSSP client and gss-spnego server """
+
+ username = "foo"
+ password = "secret"
+ domain = "fOo"
+
+ ret = self.run_helper(client_username=username,
+ client_password=password,
+ client_domain=domain,
+ server_username=username,
+ server_password=password,
+ server_domain=domain,
+ client_helper="ntlmssp-client-1",
+ server_helper="gss-spnego",
+ server_use_winbind=False)
+ self.assertTrue(ret)
+
+ def test_gss_spnego(self):
+ """ ntlm_auth with NTLMSSP gss-spnego-client and gss-spnego server """
+
+ username = "foo"
+ password = "secret"
+ domain = "fOo"
+
+ ret = self.run_helper(client_username=username,
+ client_password=password,
+ client_domain=domain,
+ server_username=username,
+ server_password=password,
+ server_domain=domain,
+ client_helper="gss-spnego-client",
+ server_helper="gss-spnego",
+ server_use_winbind=False)
+ self.assertTrue(ret)
+
+ def test_gss_spnego_winbind(self):
+ """ ntlm_auth with NTLMSSP gss-spnego-client and gss-spnego server
+ against winbind """
+
+ ret = self.run_helper(client_username=self.username,
+ client_password=self.password,
+ client_domain=self.domain,
+ client_helper="gss-spnego-client",
+ server_helper="gss-spnego",
+ server_use_winbind=True)
+ self.assertTrue(ret)
+
+ def test_ntlmssp_gss_spnego_cached_creds(self):
+ """ ntlm_auth with NTLMSSP client and gss-spnego server against
+ winbind with cached credentials """
+
+ param = "--ccache-save=%s%s%s%%%s" % (self.domain,
+ self.winbind_separator,
+ self.username,
+ self.password)
+ cache_cmd = ["wbinfo",
+ param]
+ self.check_exit_code(cache_cmd, 0)
+
+ ret = self.run_helper(client_username=self.username,
+ client_password=self.password,
+ client_domain=self.domain,
+ client_use_cached_creds=True,
+ client_helper="ntlmssp-client-1",
+ server_helper="gss-spnego",
+ server_use_winbind=True)
+ self.assertTrue(ret)
+
+ def test_require_membership(self):
+ """ ntlm_auth against winbindd with require-membership-of """
+
+ ret = self.run_helper(client_username=self.username,
+ client_password=self.password,
+ client_domain=self.domain,
+ require_membership=self.group_sid,
+ server_use_winbind=True)
+ self.assertTrue(ret)
+
+ ret = self.run_helper(client_username=self.username,
+ client_password=self.password,
+ client_domain=self.domain,
+ require_membership=self.bad_group_sid,
+ server_use_winbind=True)
+ self.assertFalse(ret)
+
+ def test_require_membership_gss_spnego(self):
+ """ ntlm_auth with NTLMSSP gss-spnego-client and gss-spnego server
+ against winbind with require-membership-of """
+
+ ret = self.run_helper(client_username=self.username,
+ client_password=self.password,
+ client_domain=self.domain,
+ require_membership=self.group_sid,
+ client_helper="gss-spnego-client",
+ server_helper="gss-spnego",
+ server_use_winbind=True)
+ self.assertTrue(ret)
+
+ ret = self.run_helper(client_username=self.username,
+ client_password=self.password,
+ client_domain=self.domain,
+ require_membership=self.bad_group_sid,
+ client_helper="gss-spnego-client",
+ server_helper="gss-spnego",
+ server_use_winbind=True)
+ self.assertFalse(ret)
+
+ def test_plaintext_with_membership(self):
+ """ ntlm_auth plaintext authentication with require-membership-of """
+
+ proc = Popen([self.ntlm_auth_path,
+ "--require-membership-of", self.group_sid,
+ "--helper-protocol", "squid-2.5-basic"],
+ stdout=PIPE, stdin=PIPE, stderr=PIPE)
+ creds = "%s%s%s %s\n" % (self.domain, self.winbind_separator,
+ self.username,
+ self.password)
+ (out, err) = proc.communicate(input=creds.encode('utf-8'))
+ self.assertEqual(proc.returncode, 0)
+ self.assertTrue(out.startswith(b"OK\n"))
+
+ # Check membership failure
+ proc = Popen([self.ntlm_auth_path,
+ "--require-membership-of", self.bad_group_sid,
+ "--helper-protocol", "squid-2.5-basic"],
+ stdout=PIPE, stdin=PIPE, stderr=PIPE)
+ creds = "%s%s%s %s\n" % (self.domain,
+ self.winbind_separator,
+ self.username,
+ self.password)
+ (out, err) = proc.communicate(input=creds.encode('utf-8'))
+ self.assertEqual(proc.returncode, 0)
+ self.assertTrue(out.startswith(b"ERR\n"))
+
+ def test_ntlm_server_1_with_fixed_password(self):
+ """ ntlm_auth ntlm-server-1 with fixed password """
+
+ ntlm_cmds = [
+ "LANMAN-Challenge: 0123456789abcdef",
+ "NT-Response: 25a98c1c31e81847466b29b2df4680f39958fb8c213a9cc6",
+ "NT-Domain: TEST",
+ "Username: testuser",
+ "Request-User-Session-Key: Yes",
+ ".\n" ]
+
+ proc = Popen([self.ntlm_auth_path,
+ "--password", "SecREt01",
+ "--helper-protocol", "ntlm-server-1"],
+ stdout=PIPE, stdin=PIPE, stderr=PIPE)
+ buf = "\n".join(ntlm_cmds)
+ (out, err) = proc.communicate(input=buf.encode('utf-8'))
+ self.assertEqual(proc.returncode, 0)
+
+ lines = out.split(b"\n")
+
+ self.assertEqual(len(lines), 4)
+ self.assertEqual(lines[0], b"Authenticated: Yes")
+ self.assertEqual(
+ lines[1], b"User-Session-Key: 3F373EA8E4AF954F14FAA506F8EEBDC4")
+ self.assertEqual(lines[2], b".")
+ self.assertEqual(lines[3], b"")
+
+ # Break the password with a leading A on the challenge
+ ntlm_cmds[0] = "LANMAN-Challenge: A123456789abcdef"
+
+ proc = Popen([self.ntlm_auth_path,
+ "--password", "SecREt01",
+ "--helper-protocol", "ntlm-server-1"],
+ stdout=PIPE, stdin=PIPE, stderr=PIPE)
+ buf = "\n".join(ntlm_cmds)
+ (out, err) = proc.communicate(input=buf.encode('utf-8'))
+ self.assertEqual(proc.returncode, 0)
+
+ lines = out.split(b"\n")
+ self.assertEqual(len(lines), 5)
+ self.assertEqual(lines[0], b"Authenticated: No")
+
+ def test_ntlm_server_1_with_plaintext_winbind(self):
+ """ ntlm_auth ntlm-server-1 with plaintext password against winbind """
+
+ ntlm_cmds = [
+ "Password: %s" % self.password,
+ "NT-Domain: %s" % self.domain,
+ "Username: %s" % self.username,
+ "Request-User-Session-Key: Yes",
+ ".\n" ]
+
+ proc = Popen([self.ntlm_auth_path,
+ "--require-membership-of", self.group_sid,
+ "--helper-protocol", "ntlm-server-1"],
+ stdout=PIPE, stdin=PIPE, stderr=PIPE)
+ buf = "\n".join(ntlm_cmds)
+ (out, err) = proc.communicate(input=buf.encode('utf-8'))
+ self.assertEqual(proc.returncode, 0)
+
+ lines = out.split(b"\n")
+
+ self.assertEqual(len(lines), 3)
+ self.assertEqual(lines[0], b"Authenticated: Yes")
+ self.assertEqual(lines[1], b".")
+ self.assertEqual(lines[2], b"")
+
+ # Check membership failure
+
+ proc = Popen([self.ntlm_auth_path,
+ "--require-membership-of", self.bad_group_sid,
+ "--helper-protocol", "ntlm-server-1"],
+ stdout=PIPE, stdin=PIPE, stderr=PIPE)
+ buf = "\n".join(ntlm_cmds)
+ (out, err) = proc.communicate(input=buf.encode('utf-8'))
+ self.assertEqual(proc.returncode, 0)
+
+ lines = out.split(b"\n")
+
+ self.assertEqual(len(lines), 3)
+ self.assertEqual(lines[0], b"Authenticated: No")
+ self.assertEqual(lines[1], b".")
+ self.assertEqual(lines[2], b"")
+
+ def test_ntlm_server_1_with_incorrect_password_winbind(self):
+ """ ntlm_auth ntlm-server-1 with incorrect fixed password against
+ winbind """
+
+ ntlm_cmds = [
+ "LANMAN-Challenge: 0123456789abcdef",
+ "NT-Response: 25a98c1c31e81847466b29b2df4680f39958fb8c213a9cc6",
+ "NT-Domain: %s" % self.domain,
+ "Username: %s" % self.username,
+ "Request-User-Session-Key: Yes",
+ ".\n" ]
+
+ proc = Popen([self.ntlm_auth_path,
+ "--helper-protocol", "ntlm-server-1"],
+ stdout=PIPE, stdin=PIPE, stderr=PIPE)
+ buf = "\n".join(ntlm_cmds)
+ (out, err) = proc.communicate(input=buf.encode('utf-8'))
+ self.assertEqual(proc.returncode, 0)
+
+ lines = out.split(b"\n")
+
+ self.assertEqual(len(lines), 5)
+ self.assertEqual(lines[0], b"Authenticated: No")
+
+ def test_diagnostics(self):
+ """ ntlm_auth diagnostics """
+ cmd_line = [self.ntlm_auth_path,
+ "--username", self.username,
+ "--password", self.password,
+ "--domain", self.domain,
+ "--diagnostics"]
+ try:
+ self.check_exit_code(cmd_line, 0)
+ except BlackboxProcessError as e:
+ self.fail(e)
+
+ def test_diagnostics_lm(self):
+ """ ntlm_auth diagnostics """
+ cmd_line = [self.ntlm_auth_path,
+ "--username", self.username,
+ "--password", self.password,
+ "--domain", self.domain,
+ "--diagnostics",
+ "--request-lm-key"]
+ try:
+ self.check_exit_code(cmd_line, 0)
+ except BlackboxProcessError as e:
+ self.fail(e)
diff --git a/python/samba/tests/ntlm_auth_base.py b/python/samba/tests/ntlm_auth_base.py
new file mode 100644
index 0000000..993ac12
--- /dev/null
+++ b/python/samba/tests/ntlm_auth_base.py
@@ -0,0 +1,210 @@
+# Unix SMB/CIFS implementation.
+# A test for the ntlm_auth tool
+# Copyright (C) Kai Blin <kai@samba.org> 2008
+# Copyright (C) Samuel Cabrero <scabrero@suse.de> 2018
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+"""Test ntlm_auth
+This test program will start ntlm_auth with the given command line switches and
+see if it will get the expected results.
+"""
+
+import os
+import samba
+import subprocess
+from samba.tests import BlackboxTestCase
+
+class NTLMAuthTestCase(BlackboxTestCase):
+
+ def setUp(self):
+ super().setUp()
+ bindir = os.path.normpath(os.getenv("BINDIR", "./bin"))
+ self.ntlm_auth_path = os.path.join(bindir, 'ntlm_auth')
+ self.lp = samba.tests.env_loadparm()
+ self.winbind_separator = self.lp.get('winbind separator')
+
+ def readLine(self, text_stream):
+ buf = text_stream.readline()
+ newline = buf.find('\n')
+ if newline == -1:
+ raise Exception("Failed to read line")
+ return buf[:newline]
+
+ def writeLine(self, text_stream, buf):
+ text_stream.write(buf)
+ text_stream.write("\n")
+
+ def run_helper(self,
+ client_username=None,
+ client_password=None,
+ client_domain=None,
+ client_use_cached_creds=False,
+ server_username=None,
+ server_password=None,
+ server_domain=None,
+ client_helper="ntlmssp-client-1",
+ server_helper="squid-2.5-ntlmssp",
+ server_use_winbind=False,
+ require_membership=None,
+ target_hostname=None,
+ target_service=None):
+ self.assertTrue(os.access(self.ntlm_auth_path, os.X_OK))
+
+ if client_username is None:
+ raise Exception("client_username required")
+
+ # Client helper args
+ client_args = []
+ client_args.append(self.ntlm_auth_path)
+ client_args.append("--helper-protocol=%s" % client_helper)
+ client_args.append("--username=%s" % client_username)
+ if client_domain:
+ client_args.append("--domain=%s" % client_domain)
+ if client_use_cached_creds:
+ client_args.append("--use-cached-creds")
+ else:
+ if client_password is None:
+ raise Exception("client_password required")
+ client_args.append("--password=%s" % client_password)
+ if target_service:
+ client_args.append("--target-service=%s" % target_service)
+ if target_hostname:
+ client_args.append("--target-hostname=%s" % target_hostname)
+ client_args.append("--configfile=%s" % self.lp.configfile)
+
+ # Server helper args
+ server_args = []
+ server_args.append(self.ntlm_auth_path)
+ server_args.append("--helper-protocol=%s" % server_helper)
+ server_args.append("--configfile=%s" % self.lp.configfile)
+ if not server_use_winbind:
+ if server_username is None or server_password is None or server_domain is None:
+ raise Exception("Server credentials required if not using winbind")
+ server_args.append("--username=%s" % server_username)
+ server_args.append("--password=%s" % server_password)
+ server_args.append("--domain=%s" % server_domain)
+ if require_membership is not None:
+ raise Exception("Server must be using winbind for require-membership-of")
+ else:
+ if require_membership is not None:
+ server_args.append("--require-membership-of=%s" % require_membership)
+
+ # Run helpers
+ result = False
+ server_proc = subprocess.Popen(server_args, stdout=subprocess.PIPE, stdin=subprocess.PIPE, bufsize=0, universal_newlines=True)
+ client_proc = subprocess.Popen(client_args, stdout=subprocess.PIPE, stdin=subprocess.PIPE, bufsize=0, universal_newlines=True)
+
+ try:
+ if client_helper == "ntlmssp-client-1" and server_helper == "squid-2.5-ntlmssp":
+ self.writeLine(client_proc.stdin, "YR")
+ buf = self.readLine(client_proc.stdout)
+ self.assertTrue(buf.startswith("YR "))
+
+ self.writeLine(server_proc.stdin, buf)
+ buf = self.readLine(server_proc.stdout)
+ self.assertTrue(buf.startswith("TT "))
+
+ self.writeLine(client_proc.stdin, buf)
+ buf = self.readLine(client_proc.stdout)
+ self.assertTrue(buf.startswith("AF "))
+
+ # Client sends 'AF <base64 blob>' but server
+ # expects 'KK <base64 blob>'
+ buf = buf.replace("AF", "KK", 1)
+
+ self.writeLine(server_proc.stdin, buf)
+ buf = self.readLine(server_proc.stdout)
+ result = buf.startswith("AF ")
+ elif client_helper == "ntlmssp-client-1" and server_helper == "gss-spnego":
+ self.writeLine(client_proc.stdin, "YR")
+ buf = self.readLine(client_proc.stdout)
+ self.assertTrue(buf.startswith("YR "))
+
+ self.writeLine(server_proc.stdin, buf)
+ buf = self.readLine(server_proc.stdout)
+ self.assertTrue(buf.startswith("TT "))
+
+ self.writeLine(client_proc.stdin, buf)
+ buf = self.readLine(client_proc.stdout)
+ self.assertTrue(buf.startswith("AF "))
+
+ # Client sends 'AF <base64 blob>' but server expects 'KK <abse64 blob>'
+ buf = buf.replace("AF", "KK", 1)
+
+ self.writeLine(server_proc.stdin, buf)
+ buf = self.readLine(server_proc.stdout)
+ result = buf.startswith("AF * ")
+ elif client_helper == "gss-spnego-client" and server_helper == "gss-spnego":
+ self.writeLine(server_proc.stdin, "YR")
+ buf = self.readLine(server_proc.stdout)
+
+ while True:
+ if (buf.startswith("NA * ")):
+ result = False
+ break
+
+ self.assertTrue(buf.startswith("AF ") or buf.startswith("TT "))
+
+ self.writeLine(client_proc.stdin, buf)
+ buf = self.readLine(client_proc.stdout)
+
+ if buf.startswith("AF"):
+ result = True
+ break
+
+ self.assertTrue(buf.startswith("AF ") or buf.startswith("KK ") or buf.startswith("TT "))
+
+ self.writeLine(server_proc.stdin, buf)
+ buf = self.readLine(server_proc.stdout)
+
+ if buf.startswith("AF * "):
+ result = True
+ break
+ else:
+ self.fail("Helper protocols not handled")
+
+ if result is True and client_helper == "ntlmssp-client-1":
+ self.writeLine(client_proc.stdin, "GK")
+ buf = self.readLine(client_proc.stdout)
+ self.assertTrue(buf.startswith("GK "))
+
+ self.writeLine(client_proc.stdin, "GF")
+ buf = self.readLine(client_proc.stdout)
+ self.assertTrue(buf.startswith("GF "))
+
+ if result is True and server_helper == "squid-2.5-ntlmssp":
+ self.writeLine(server_proc.stdin, "GK")
+ buf = self.readLine(server_proc.stdout)
+ self.assertTrue(buf.startswith("GK "))
+
+ self.writeLine(server_proc.stdin, "GF")
+ buf = self.readLine(server_proc.stdout)
+ self.assertTrue(buf.startswith("GF "))
+
+ client_proc.stdin.close()
+ client_proc.wait()
+ self.assertEqual(client_proc.returncode, 0)
+
+ server_proc.stdin.close()
+ server_proc.wait()
+ self.assertEqual(server_proc.returncode, 0)
+
+ return result
+ except:
+ client_proc.kill()
+ client_proc.wait()
+ server_proc.kill()
+ server_proc.wait()
+ raise
diff --git a/python/samba/tests/ntlm_auth_krb5.py b/python/samba/tests/ntlm_auth_krb5.py
new file mode 100644
index 0000000..db57400
--- /dev/null
+++ b/python/samba/tests/ntlm_auth_krb5.py
@@ -0,0 +1,83 @@
+# Unix SMB/CIFS implementation.
+#
+# Copyright (C) Samuel Cabrero <scabrero@suse.de> 2018
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import os
+import samba
+from subprocess import Popen, PIPE
+from samba.tests.ntlm_auth_base import NTLMAuthTestCase
+
+class NTLMAuthKerberosTests(NTLMAuthTestCase):
+
+ def setUp(self):
+ super().setUp()
+ self.old_ccache = os.path.join(os.environ["SELFTEST_PREFIX"],
+ "ktest", "krb5_ccache-2")
+ self.ccache = os.path.join(os.environ["SELFTEST_PREFIX"],
+ "ktest", "krb5_ccache-3")
+
+ def test_krb5_gss_spnego_client_gss_spnego_server(self):
+ """ ntlm_auth with krb5 gss-spnego-client and gss-spnego server """
+
+ os.environ["KRB5CCNAME"] = self.old_ccache
+ ret = self.run_helper(client_username="foo",
+ client_password="secret",
+ client_domain="FOO",
+ target_hostname=os.environ["SERVER"],
+ target_service="host",
+ client_helper="gss-spnego-client",
+ server_helper="gss-spnego",
+ server_use_winbind=True)
+ self.assertTrue(ret)
+
+ os.environ["KRB5CCNAME"] = self.ccache
+ ret = self.run_helper(client_username="foo",
+ client_password="secret",
+ client_domain="FOO",
+ target_hostname=os.environ["SERVER"],
+ target_service="host",
+ client_helper="gss-spnego-client",
+ server_helper="gss-spnego",
+ server_use_winbind=True)
+ self.assertTrue(ret)
+
+ def test_krb5_invalid_keytab(self):
+ """ ntlm_auth with krb5 and an invalid keytab """
+
+ dedicated_keytab = "FILE:%s.%s" % (
+ self.old_ccache, "keytab-does-not-exists")
+ proc = Popen([self.ntlm_auth_path,
+ "--helper-protocol", "gss-spnego",
+ "--option", "security=ads",
+ "--option", "kerberosmethod=dedicatedkeytab",
+ "--option", "dedicatedkeytabfile=%s" % dedicated_keytab],
+ stdout=PIPE, stdin=PIPE, stderr=PIPE)
+ buf = "YR\n"
+ (out, err) = proc.communicate(input=buf.encode('utf-8'))
+ self.assertEqual(proc.returncode, 0)
+
+ dedicated_keytab = "FILE:%s.%s" % (
+ self.ccache, "keytab-does-not-exists")
+ proc = Popen([self.ntlm_auth_path,
+ "--helper-protocol", "gss-spnego",
+ "--option", "security=ads",
+ "--option", "kerberosmethod=dedicatedkeytab",
+ "--option", "dedicatedkeytabfile=%s" % dedicated_keytab],
+ stdout=PIPE, stdin=PIPE, stderr=PIPE)
+ buf = "YR\n"
+ (out, err) = proc.communicate(input=buf.encode('utf-8'))
+ self.assertEqual(proc.returncode, 0)
diff --git a/python/samba/tests/ntlmdisabled.py b/python/samba/tests/ntlmdisabled.py
new file mode 100644
index 0000000..405f47d
--- /dev/null
+++ b/python/samba/tests/ntlmdisabled.py
@@ -0,0 +1,84 @@
+# Tests basic behaviour when NTLM is disabled
+#
+# Copyright (C) Catalyst IT Ltd. 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+from samba.tests import TestCase
+import os
+
+import samba
+from samba.credentials import Credentials, DONT_USE_KERBEROS, MUST_USE_KERBEROS
+
+from samba import NTSTATUSError, ntstatus
+import ctypes
+
+from samba.dcerpc import srvsvc, samr, lsa
+
+"""
+Tests behaviour when NTLM is disabled
+"""
+
+
+class NtlmDisabledTests(TestCase):
+
+ def setUp(self):
+ super().setUp()
+
+ self.lp = self.get_loadparm()
+ self.server = os.getenv("SERVER")
+
+ self.creds = Credentials()
+ self.creds.guess(self.lp)
+ self.creds.set_username(os.getenv("USERNAME"))
+ self.creds.set_domain(self.server)
+ self.creds.set_password(os.getenv("PASSWORD"))
+ self.creds.set_kerberos_state(DONT_USE_KERBEROS)
+
+ def test_ntlm_connection(self):
+ try:
+ conn = srvsvc.srvsvc("ncacn_np:%s[smb2,ntlm]" % self.server, self.lp, self.creds)
+
+ self.assertIsNotNone(conn)
+ except NTSTATUSError as e:
+ # NTLM might be blocked on this server
+ enum = ctypes.c_uint32(e.args[0]).value
+ if enum == ntstatus.NT_STATUS_NTLM_BLOCKED:
+ self.fail("NTLM is disabled on this server")
+ else:
+ raise
+
+ def test_samr_change_password(self):
+ self.creds.set_kerberos_state(MUST_USE_KERBEROS)
+ conn = samr.samr("ncacn_np:%s[krb5,seal,smb2]" % os.getenv("SERVER"))
+
+ # we want to check whether this gets rejected outright because NTLM is
+ # disabled, so we don't actually need to encrypt a valid password here
+ server = lsa.String()
+ server.string = self.server
+ username = lsa.String()
+ username.string = os.getenv("USERNAME")
+
+ try:
+ conn.ChangePasswordUser2(server, username, None, None, True, None, None)
+ except NTSTATUSError as e:
+ # changing passwords should be rejected when NTLM is disabled
+ enum = ctypes.c_uint32(e.args[0]).value
+ if enum == ntstatus.NT_STATUS_NTLM_BLOCKED:
+ self.fail("NTLM is disabled on this server")
+ elif enum == ntstatus.NT_STATUS_WRONG_PASSWORD:
+ # expected error case when NTLM is enabled
+ pass
+ else:
+ raise
diff --git a/python/samba/tests/pam_winbind.py b/python/samba/tests/pam_winbind.py
new file mode 100644
index 0000000..708f408
--- /dev/null
+++ b/python/samba/tests/pam_winbind.py
@@ -0,0 +1,72 @@
+# Unix SMB/CIFS implementation.
+#
+# Copyright (C) 2017 Andreas Schneider <asn@samba.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import samba.tests
+import pypamtest
+import os
+
+
+class SimplePamTests(samba.tests.TestCase):
+ def test_authenticate(self):
+ domain = os.environ["DOMAIN"]
+ username = os.environ["USERNAME"]
+ password = os.environ["PASSWORD"]
+ if domain != "":
+ unix_username = "%s/%s" % (domain, username)
+ else:
+ unix_username = "%s" % username
+ expected_rc = 0 # PAM_SUCCESS
+
+ tc = pypamtest.TestCase(pypamtest.PAMTEST_AUTHENTICATE, expected_rc)
+ try:
+ res = pypamtest.run_pamtest(unix_username, "samba", [tc], [password])
+ except pypamtest.PamTestError as e:
+ raise AssertionError(str(e))
+
+ self.assertTrue(res is not None)
+
+ def test_authenticate_error(self):
+ domain = os.environ["DOMAIN"]
+ username = os.environ["USERNAME"]
+ password = "WrongPassword"
+ if domain != "":
+ unix_username = "%s/%s" % (domain, username)
+ else:
+ unix_username = "%s" % username
+ expected_rc = 7 # PAM_AUTH_ERR
+
+ tc = pypamtest.TestCase(pypamtest.PAMTEST_AUTHENTICATE, expected_rc)
+ try:
+ res = pypamtest.run_pamtest(unix_username, "samba", [tc], [password])
+ except pypamtest.PamTestError as e:
+ raise AssertionError(str(e))
+
+ self.assertTrue(res is not None)
+
+ # Authenticate again to check that we are not locked out with just one
+ # failed login
+ password = os.environ["PASSWORD"]
+ expected_rc = 0 # PAM_SUCCESS
+
+ tc = pypamtest.TestCase(pypamtest.PAMTEST_AUTHENTICATE, expected_rc)
+ try:
+ res = pypamtest.run_pamtest(unix_username, "samba", [tc], [password])
+ except pypamtest.PamTestError as e:
+ raise AssertionError(str(e))
+
+ self.assertTrue(res is not None)
diff --git a/python/samba/tests/pam_winbind_chauthtok.py b/python/samba/tests/pam_winbind_chauthtok.py
new file mode 100644
index 0000000..c1d569b
--- /dev/null
+++ b/python/samba/tests/pam_winbind_chauthtok.py
@@ -0,0 +1,42 @@
+# Unix SMB/CIFS implementation.
+#
+# Copyright (C) 2017 Andreas Schneider <asn@samba.org>
+# Copyright (C) 2018 Mathieu Parent <math.parent@gmail.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import samba.tests
+import pypamtest
+import os
+
+class PamChauthtokTests(samba.tests.TestCase):
+ def test_chauthtok(self):
+ domain = os.environ["DOMAIN"]
+ username = os.environ["USERNAME"]
+ password = os.environ["PASSWORD"]
+ newpassword = os.environ["NEWPASSWORD"]
+ if domain != "":
+ unix_username = "%s/%s" % (domain, username)
+ else:
+ unix_username = "%s" % username
+ expected_rc = 0 # PAM_SUCCESS
+
+ tc = pypamtest.TestCase(pypamtest.PAMTEST_CHAUTHTOK, expected_rc)
+ try:
+ res = pypamtest.run_pamtest(unix_username, "samba", [tc], [password, newpassword, newpassword])
+ except pypamtest.PamTestError as e:
+ raise AssertionError(str(e))
+
+ self.assertTrue(res is not None)
diff --git a/python/samba/tests/pam_winbind_setcred.py b/python/samba/tests/pam_winbind_setcred.py
new file mode 100644
index 0000000..055eac2
--- /dev/null
+++ b/python/samba/tests/pam_winbind_setcred.py
@@ -0,0 +1,56 @@
+# Unix SMB/CIFS implementation.
+#
+# Copyright (C) 2022 Samuel Cabrero <scabrero@samba.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import samba.tests
+import pypamtest
+import os
+
+class PamChauthtokTests(samba.tests.TestCase):
+ def test_setcred_delete_cred(self):
+ domain = os.environ["DOMAIN"]
+ username = os.environ["USERNAME"]
+ password = os.environ["PASSWORD"]
+
+ if domain != "":
+ unix_username = "%s/%s" % (domain, username)
+ else:
+ unix_username = "%s" % username
+ expected_rc = 0 # PAM_SUCCESS
+
+ tc = pypamtest.TestCase(pypamtest.PAMTEST_AUTHENTICATE, expected_rc)
+ tc1 = pypamtest.TestCase(pypamtest.PAMTEST_GETENVLIST, expected_rc)
+ tc2 = pypamtest.TestCase(pypamtest.PAMTEST_KEEPHANDLE, expected_rc)
+ try:
+ res = pypamtest.run_pamtest(unix_username, "samba", [tc, tc1, tc2], [password])
+ except pypamtest.PamTestError as e:
+ raise AssertionError(str(e))
+
+ self.assertTrue(res is not None)
+
+ ccache = tc1.pam_env["KRB5CCNAME"]
+ ccache = ccache[ccache.index(":") + 1:]
+ self.assertTrue(os.path.exists(ccache))
+
+ handle = tc2.pam_handle
+ tc3 = pypamtest.TestCase(pypamtest.PAMTEST_SETCRED, expected_rc, pypamtest.PAMTEST_FLAG_DELETE_CRED)
+ try:
+ res = pypamtest.run_pamtest(unix_username, "samba", [tc3], handle=handle)
+ except pypamtest.PamTestError as e:
+ raise AssertionError(str(e))
+
+ self.assertFalse(os.path.exists(ccache))
diff --git a/python/samba/tests/pam_winbind_warn_pwd_expire.py b/python/samba/tests/pam_winbind_warn_pwd_expire.py
new file mode 100644
index 0000000..56f5da9
--- /dev/null
+++ b/python/samba/tests/pam_winbind_warn_pwd_expire.py
@@ -0,0 +1,52 @@
+# Unix SMB/CIFS implementation.
+#
+# Copyright (C) 2017 Andreas Schneider <asn@samba.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import samba.tests
+import pypamtest
+import os
+
+
+class PasswordExpirePamTests(samba.tests.TestCase):
+ def test_auth_expire_warning(self):
+ domain = os.environ["DOMAIN"]
+ username = os.environ["USERNAME"]
+ password = os.environ["PASSWORD"]
+ warn_pwd_expire = int(os.environ["WARN_PWD_EXPIRE"])
+ if domain != "":
+ unix_username = "%s/%s" % (domain, username)
+ else:
+ unix_username = "%s" % username
+ expected_rc = 0 # PAM_SUCCESS
+
+ tc = pypamtest.TestCase(pypamtest.PAMTEST_AUTHENTICATE, expected_rc)
+ try:
+ res = pypamtest.run_pamtest(unix_username, "samba", [tc], [password])
+ except pypamtest.PamTestError as e:
+ raise AssertionError(str(e))
+
+ self.assertTrue(res is not None)
+ if warn_pwd_expire == 0:
+ self.assertTrue(res.info == ())
+ elif warn_pwd_expire == 50:
+ # This is needed as otherwise a build started around
+ # midnight can fail
+ if (res.info[0] != u"Your password will expire in 41 days.\n") and \
+ (res.info[0] != u"Your password will expire in 43 days.\n"):
+ self.assertEqual(res.info[0], u"Your password will expire in 42 days.\n")
+ else:
+ self.assertEqual(warn_pwd_expire, 0)
diff --git a/python/samba/tests/param.py b/python/samba/tests/param.py
new file mode 100644
index 0000000..7c45d91
--- /dev/null
+++ b/python/samba/tests/param.py
@@ -0,0 +1,107 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for samba.param."""
+
+from samba import param
+import samba.tests
+import os
+
+
+class LoadParmTestCase(samba.tests.TestCaseInTempDir):
+
+ def setUp(self):
+ super().setUp()
+ self.tempf = os.path.join(self.tempdir, "test")
+ open(self.tempf, 'w').write("empty")
+
+ def tearDown(self):
+ os.unlink(self.tempf)
+ super().tearDown()
+
+ def test_init(self):
+ file = param.LoadParm()
+ self.assertTrue(file is not None)
+
+ def test_length(self):
+ file = param.LoadParm()
+ self.assertEqual(0, len(file))
+
+ def test_set_workgroup(self):
+ file = param.LoadParm()
+ file.set("workgroup", "bla")
+ self.assertEqual("BLA", file.get("workgroup"))
+
+ def test_is_mydomain(self):
+ file = param.LoadParm()
+ file.set("workgroup", "bla")
+ self.assertTrue(file.is_mydomain("BLA"))
+ self.assertFalse(file.is_mydomain("FOOBAR"))
+
+ def test_is_myname(self):
+ file = param.LoadParm()
+ file.set("netbios name", "bla")
+ self.assertTrue(file.is_myname("BLA"))
+ self.assertFalse(file.is_myname("FOOBAR"))
+
+ def test_load_default(self):
+ file = param.LoadParm()
+ file.load_default()
+
+ def test_section_nonexistent(self):
+ samba_lp = param.LoadParm()
+ samba_lp.load_default()
+ self.assertRaises(KeyError, samba_lp.__getitem__, "nonexistent")
+
+ def test_log_level(self):
+ samba_lp = param.LoadParm()
+ samba_lp.set("log level", "5 auth:4")
+ self.assertEqual(5, samba_lp.log_level())
+
+ def test_dump(self):
+ samba_lp = param.LoadParm()
+ # Just test successful method execution (outputs to stdout)
+ self.assertEqual(None, samba_lp.dump())
+
+ def test_dump_to_file(self):
+ samba_lp = param.LoadParm()
+ self.assertEqual(None, samba_lp.dump(False, self.tempf))
+ content = open(self.tempf, 'r').read()
+ self.assertIn('[global]', content)
+ self.assertIn('interfaces', content)
+
+ def test_dump_a_parameter(self):
+ samba_lp = param.LoadParm()
+ samba_lp.load_default()
+ # Just test successful method execution
+ self.assertEqual(None, samba_lp.dump_a_parameter('interfaces'))
+
+ def test_dump_a_parameter_to_file(self):
+ samba_lp = param.LoadParm()
+ samba_lp.load_default()
+ self.assertEqual(None,
+ samba_lp.dump_a_parameter('interfaces',
+ 'global',
+ self.tempf))
+ content = open(self.tempf, 'r').read()
+ self.assertIn('10.53.57.', content)
+
+ def test_samdb_url(self):
+ samba_lp = param.LoadParm()
+ samdb_url = samba_lp.samdb_url()
+ self.assertTrue(samdb_url.startswith('tdb://'))
+ self.assertTrue(samdb_url.endswith('/sam.ldb'))
diff --git a/python/samba/tests/password_hash.py b/python/samba/tests/password_hash.py
new file mode 100644
index 0000000..1b7af7d
--- /dev/null
+++ b/python/samba/tests/password_hash.py
@@ -0,0 +1,335 @@
+# Tests for Tests for source4/dsdb/samdb/ldb_modules/password_hash.c
+#
+# Copyright (C) Catalyst IT Ltd. 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""
+Base class for tests for source4/dsdb/samdb/ldb_modules/password_hash.c
+"""
+
+from samba.credentials import Credentials
+from samba.samdb import SamDB
+from samba.auth import system_session
+from samba.tests import TestCase
+from samba.ndr import ndr_unpack
+from samba.dcerpc import drsblobs
+from samba.dcerpc.samr import DOMAIN_PASSWORD_STORE_CLEARTEXT
+from samba.dsdb import UF_ENCRYPTED_TEXT_PASSWORD_ALLOWED
+from samba.tests import delete_force
+from samba.tests.password_test import PasswordCommon
+import ldb
+import samba
+import binascii
+from hashlib import md5
+import crypt
+
+
+USER_NAME = "PasswordHashTestUser"
+USER_PASS = samba.generate_random_password(32, 32)
+UPN = "PWHash@User.Principle"
+
+# Get named package from the passed supplemental credentials
+#
+# returns the package and it's position within the supplemental credentials
+
+
+def get_package(sc, name):
+ if sc is None:
+ return None
+
+ idx = 0
+ for p in sc.sub.packages:
+ idx += 1
+ if name == p.name:
+ return (idx, p)
+
+ return None
+
+# Calculate the MD5 password digest from the supplied user, realm and password
+#
+
+
+def calc_digest(user, realm, password):
+
+ data = "%s:%s:%s" % (user, realm, password)
+ if isinstance(data, str):
+ data = data.encode('utf8')
+
+ return md5(data).hexdigest()
+
+
+class PassWordHashTests(TestCase):
+
+ def setUp(self):
+ self.lp = samba.tests.env_loadparm()
+ super().setUp()
+
+ def set_store_cleartext(self, cleartext):
+ # get the current pwdProperties
+ pwdProperties = self.ldb.get_pwdProperties()
+ # update the clear-text properties flag
+ props = int(pwdProperties)
+ if cleartext:
+ props |= DOMAIN_PASSWORD_STORE_CLEARTEXT
+ else:
+ props &= ~DOMAIN_PASSWORD_STORE_CLEARTEXT
+ self.ldb.set_pwdProperties(str(props))
+
+ # Add a user to ldb, this will exercise the password_hash code
+ # and calculate the appropriate supplemental credentials
+ def add_user(self, options=None, clear_text=False, ldb=None):
+ # set any needed options
+ if options is not None:
+ for (option, value) in options:
+ self.lp.set(option, value)
+
+ if ldb is None:
+ self.creds = Credentials()
+ self.session = system_session()
+ self.creds.guess(self.lp)
+ self.session = system_session()
+ self.ldb = SamDB(session_info=self.session,
+ credentials=self.creds,
+ lp=self.lp)
+ else:
+ self.ldb = ldb
+
+ res = self.ldb.search(base=self.ldb.get_config_basedn(),
+ expression="ncName=%s" % self.ldb.get_default_basedn(),
+ attrs=["nETBIOSName"])
+ self.netbios_domain = str(res[0]["nETBIOSName"][0])
+ self.dns_domain = self.ldb.domain_dns_name()
+
+ # Gets back the basedn
+ base_dn = self.ldb.domain_dn()
+
+ # Gets back the configuration basedn
+ configuration_dn = self.ldb.get_config_basedn().get_linearized()
+
+ # permit password changes during this test
+ PasswordCommon.allow_password_changes(self, self.ldb)
+
+ self.base_dn = self.ldb.domain_dn()
+
+ account_control = 0
+ if clear_text:
+ # Restore the current domain setting on exit.
+ pwdProperties = self.ldb.get_pwdProperties()
+ self.addCleanup(self.ldb.set_pwdProperties, pwdProperties)
+ # Update the domain setting
+ self.set_store_cleartext(clear_text)
+ account_control |= UF_ENCRYPTED_TEXT_PASSWORD_ALLOWED
+
+ # (Re)adds the test user USER_NAME with password USER_PASS
+ # and userPrincipalName UPN
+ delete_force(self.ldb, "cn=" + USER_NAME + ",cn=users," + self.base_dn)
+ self.ldb.add({
+ "dn": "cn=" + USER_NAME + ",cn=users," + self.base_dn,
+ "objectclass": "user",
+ "sAMAccountName": USER_NAME,
+ "userPassword": USER_PASS,
+ "userPrincipalName": UPN,
+ "userAccountControl": str(account_control)
+ })
+
+ # Get the supplemental credentials for the user under test
+ def get_supplemental_creds(self):
+ base = "cn=" + USER_NAME + ",cn=users," + self.base_dn
+ res = self.ldb.search(scope=ldb.SCOPE_BASE,
+ base=base,
+ attrs=["supplementalCredentials"])
+ self.assertIs(True, len(res) > 0)
+ obj = res[0]
+ sc_blob = obj["supplementalCredentials"][0]
+ sc = ndr_unpack(drsblobs.supplementalCredentialsBlob, sc_blob)
+ return sc
+
+ # Calculate and validate a Wdigest value
+ def check_digest(self, user, realm, password, digest):
+ expected = calc_digest(user, realm, password)
+ actual = binascii.hexlify(bytearray(digest)).decode('utf8')
+ error = "Digest expected[%s], actual[%s], " \
+ "user[%s], realm[%s], pass[%s]" % \
+ (expected, actual, user, realm, password)
+ self.assertEqual(expected, actual, error)
+
+ # Check all of the 29 expected WDigest values
+ #
+ def check_wdigests(self, digests):
+
+ self.assertEqual(29, digests.num_hashes)
+
+ # Using the n-1 pattern in the array indexes to make it easier
+ # to check the tests against the spec and the samba-tool user tests.
+ self.check_digest(USER_NAME,
+ self.netbios_domain,
+ USER_PASS,
+ digests.hashes[1 - 1].hash)
+ self.check_digest(USER_NAME.lower(),
+ self.netbios_domain.lower(),
+ USER_PASS,
+ digests.hashes[2 - 1].hash)
+ self.check_digest(USER_NAME.upper(),
+ self.netbios_domain.upper(),
+ USER_PASS,
+ digests.hashes[3 - 1].hash)
+ self.check_digest(USER_NAME,
+ self.netbios_domain.upper(),
+ USER_PASS,
+ digests.hashes[4 - 1].hash)
+ self.check_digest(USER_NAME,
+ self.netbios_domain.lower(),
+ USER_PASS,
+ digests.hashes[5 - 1].hash)
+ self.check_digest(USER_NAME.upper(),
+ self.netbios_domain.lower(),
+ USER_PASS,
+ digests.hashes[6 - 1].hash)
+ self.check_digest(USER_NAME.lower(),
+ self.netbios_domain.upper(),
+ USER_PASS,
+ digests.hashes[7 - 1].hash)
+ self.check_digest(USER_NAME,
+ self.dns_domain,
+ USER_PASS,
+ digests.hashes[8 - 1].hash)
+ self.check_digest(USER_NAME.lower(),
+ self.dns_domain.lower(),
+ USER_PASS,
+ digests.hashes[9 - 1].hash)
+ self.check_digest(USER_NAME.upper(),
+ self.dns_domain.upper(),
+ USER_PASS,
+ digests.hashes[10 - 1].hash)
+ self.check_digest(USER_NAME,
+ self.dns_domain.upper(),
+ USER_PASS,
+ digests.hashes[11 - 1].hash)
+ self.check_digest(USER_NAME,
+ self.dns_domain.lower(),
+ USER_PASS,
+ digests.hashes[12 - 1].hash)
+ self.check_digest(USER_NAME.upper(),
+ self.dns_domain.lower(),
+ USER_PASS,
+ digests.hashes[13 - 1].hash)
+ self.check_digest(USER_NAME.lower(),
+ self.dns_domain.upper(),
+ USER_PASS,
+ digests.hashes[14 - 1].hash)
+ self.check_digest(UPN,
+ "",
+ USER_PASS,
+ digests.hashes[15 - 1].hash)
+ self.check_digest(UPN.lower(),
+ "",
+ USER_PASS,
+ digests.hashes[16 - 1].hash)
+ self.check_digest(UPN.upper(),
+ "",
+ USER_PASS,
+ digests.hashes[17 - 1].hash)
+
+ name = "%s\\%s" % (self.netbios_domain, USER_NAME)
+ self.check_digest(name,
+ "",
+ USER_PASS,
+ digests.hashes[18 - 1].hash)
+
+ name = "%s\\%s" % (self.netbios_domain.lower(), USER_NAME.lower())
+ self.check_digest(name,
+ "",
+ USER_PASS,
+ digests.hashes[19 - 1].hash)
+
+ name = "%s\\%s" % (self.netbios_domain.upper(), USER_NAME.upper())
+ self.check_digest(name,
+ "",
+ USER_PASS,
+ digests.hashes[20 - 1].hash)
+ self.check_digest(USER_NAME,
+ "Digest",
+ USER_PASS,
+ digests.hashes[21 - 1].hash)
+ self.check_digest(USER_NAME.lower(),
+ "Digest",
+ USER_PASS,
+ digests.hashes[22 - 1].hash)
+ self.check_digest(USER_NAME.upper(),
+ "Digest",
+ USER_PASS,
+ digests.hashes[23 - 1].hash)
+ self.check_digest(UPN,
+ "Digest",
+ USER_PASS,
+ digests.hashes[24 - 1].hash)
+ self.check_digest(UPN.lower(),
+ "Digest",
+ USER_PASS,
+ digests.hashes[25 - 1].hash)
+ self.check_digest(UPN.upper(),
+ "Digest",
+ USER_PASS,
+ digests.hashes[26 - 1].hash)
+ name = "%s\\%s" % (self.netbios_domain, USER_NAME)
+ self.check_digest(name,
+ "Digest",
+ USER_PASS,
+ digests.hashes[27 - 1].hash)
+
+ name = "%s\\%s" % (self.netbios_domain.lower(), USER_NAME.lower())
+ self.check_digest(name,
+ "Digest",
+ USER_PASS,
+ digests.hashes[28 - 1].hash)
+
+ name = "%s\\%s" % (self.netbios_domain.upper(), USER_NAME.upper())
+ self.check_digest(name,
+ "Digest",
+ USER_PASS,
+ digests.hashes[29 - 1].hash)
+
+ def checkUserPassword(self, up, expected):
+
+ # Check we've received the correct number of hashes
+ self.assertEqual(len(expected), up.num_hashes)
+
+ i = 0
+ for (tag, alg, rounds) in expected:
+ self.assertEqual(tag, up.hashes[i].scheme)
+
+ data = up.hashes[i].value.decode('utf8').split("$")
+ # Check we got the expected crypt algorithm
+ self.assertEqual(alg, data[1])
+
+ if rounds is None:
+ cmd = "$%s$%s" % (alg, data[2])
+ else:
+ cmd = "$%s$rounds=%d$%s" % (alg, rounds, data[3])
+
+ # Calculate the expected hash value
+ expected = crypt.crypt(USER_PASS, cmd)
+ self.assertEqual(expected, up.hashes[i].value.decode('utf8'))
+ i += 1
+
+ # Check that the correct nt_hash was stored for userPassword
+ def checkNtHash(self, password, nt_hash):
+ creds = Credentials()
+ creds.set_anonymous()
+ creds.set_password(password)
+ expected = creds.get_nt_hash()
+ actual = bytearray(nt_hash)
+ self.assertEqual(expected, actual)
diff --git a/python/samba/tests/password_hash_fl2003.py b/python/samba/tests/password_hash_fl2003.py
new file mode 100644
index 0000000..ff9b237
--- /dev/null
+++ b/python/samba/tests/password_hash_fl2003.py
@@ -0,0 +1,196 @@
+# Tests for Tests for source4/dsdb/samdb/ldb_modules/password_hash.c
+#
+# Copyright (C) Catalyst IT Ltd. 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""
+Tests for source4/dsdb/samdb/ldb_modules/password_hash.c
+
+These tests need to be run in an environment in which
+io->ac->gpg_key_ids == NULL, so that the gpg supplemental credentials
+are not generated. And also need to be in an environment with a
+functional level less than 2008 to ensure the kerberos newer keys are not
+generated
+"""
+
+from samba.tests.password_hash import (
+ PassWordHashTests,
+ get_package,
+ USER_PASS
+)
+from samba.ndr import ndr_unpack
+from samba.dcerpc import drsblobs
+import binascii
+
+
+class PassWordHashFl2003Tests(PassWordHashTests):
+
+ def test_default_supplementalCredentials(self):
+ self.add_user(options=[("password hash userPassword schemes", "")])
+
+ sc = self.get_supplemental_creds()
+
+ # Check that we got all the expected supplemental credentials
+ # And they are in the expected order.
+ size = len(sc.sub.packages)
+ self.assertEqual(3, size)
+
+ (pos, package) = get_package(sc, "Primary:Kerberos")
+ self.assertEqual(1, pos)
+ self.assertEqual("Primary:Kerberos", package.name)
+
+ (pos, package) = get_package(sc, "Packages")
+ self.assertEqual(2, pos)
+ self.assertEqual("Packages", package.name)
+
+ (pos, package) = get_package(sc, "Primary:WDigest")
+ self.assertEqual(3, pos)
+ self.assertEqual("Primary:WDigest", package.name)
+
+ # Check that the WDigest values are correct.
+ #
+ digests = ndr_unpack(drsblobs.package_PrimaryWDigestBlob,
+ binascii.a2b_hex(package.data))
+ self.check_wdigests(digests)
+
+ def test_userPassword_sha256(self):
+ self.add_user(options=[("password hash userPassword schemes",
+ "CryptSHA256")])
+
+ sc = self.get_supplemental_creds()
+
+ # Check that we got all the expected supplemental credentials
+ # And they are in the expected order.
+ size = len(sc.sub.packages)
+ self.assertEqual(4, size)
+
+ (pos, package) = get_package(sc, "Primary:Kerberos")
+ self.assertEqual(1, pos)
+ self.assertEqual("Primary:Kerberos", package.name)
+
+ (pos, wd_package) = get_package(sc, "Primary:WDigest")
+ self.assertEqual(2, pos)
+ self.assertEqual("Primary:WDigest", wd_package.name)
+
+ (pos, package) = get_package(sc, "Packages")
+ self.assertEqual(3, pos)
+ self.assertEqual("Packages", package.name)
+
+ (pos, up_package) = get_package(sc, "Primary:userPassword")
+ self.assertEqual(4, pos)
+ self.assertEqual("Primary:userPassword", up_package.name)
+
+ # Check that the WDigest values are correct.
+ #
+ digests = ndr_unpack(drsblobs.package_PrimaryWDigestBlob,
+ binascii.a2b_hex(wd_package.data))
+ self.check_wdigests(digests)
+
+ # Check that the userPassword hashes are computed correctly
+ #
+ up = ndr_unpack(drsblobs.package_PrimaryUserPasswordBlob,
+ binascii.a2b_hex(up_package.data))
+
+ self.checkUserPassword(up, [("{CRYPT}", "5", None)])
+ self.checkNtHash(USER_PASS, up.current_nt_hash.hash)
+
+ def test_supplementalCredentials_cleartext(self):
+ self.add_user(clear_text=True,
+ options=[("password hash userPassword schemes", "")])
+
+ sc = self.get_supplemental_creds()
+
+ # Check that we got all the expected supplemental credentials
+ # And they are in the expected order.
+ size = len(sc.sub.packages)
+ self.assertEqual(4, size)
+
+ (pos, package) = get_package(sc, "Primary:Kerberos")
+ self.assertEqual(1, pos)
+ self.assertEqual("Primary:Kerberos", package.name)
+
+ (pos, wd_package) = get_package(sc, "Primary:WDigest")
+ self.assertEqual(2, pos)
+ self.assertEqual("Primary:WDigest", wd_package.name)
+
+ (pos, package) = get_package(sc, "Packages")
+ self.assertEqual(3, pos)
+ self.assertEqual("Packages", package.name)
+
+ (pos, ct_package) = get_package(sc, "Primary:CLEARTEXT")
+ self.assertEqual(4, pos)
+ self.assertEqual("Primary:CLEARTEXT", ct_package.name)
+
+ # Check that the WDigest values are correct.
+ #
+ digests = ndr_unpack(drsblobs.package_PrimaryWDigestBlob,
+ binascii.a2b_hex(wd_package.data))
+ self.check_wdigests(digests)
+
+ # Check the clear text value is correct.
+ ct = ndr_unpack(drsblobs.package_PrimaryCLEARTEXTBlob,
+ binascii.a2b_hex(ct_package.data))
+ self.assertEqual(USER_PASS.encode('utf-16-le'), ct.cleartext)
+
+ def test_userPassword_cleartext_sha512(self):
+ self.add_user(clear_text=True,
+ options=[("password hash userPassword schemes",
+ "CryptSHA512:rounds=10000")])
+
+ sc = self.get_supplemental_creds()
+
+ # Check that we got all the expected supplemental credentials
+ # And they are in the expected order.
+ size = len(sc.sub.packages)
+ self.assertEqual(5, size)
+
+ (pos, package) = get_package(sc, "Primary:Kerberos")
+ self.assertEqual(1, pos)
+ self.assertEqual("Primary:Kerberos", package.name)
+
+ (pos, wd_package) = get_package(sc, "Primary:WDigest")
+ self.assertEqual(2, pos)
+ self.assertEqual("Primary:WDigest", wd_package.name)
+
+ (pos, ct_package) = get_package(sc, "Primary:CLEARTEXT")
+ self.assertEqual(3, pos)
+ self.assertEqual("Primary:CLEARTEXT", ct_package.name)
+
+ (pos, package) = get_package(sc, "Packages")
+ self.assertEqual(4, pos)
+ self.assertEqual("Packages", package.name)
+
+ (pos, up_package) = get_package(sc, "Primary:userPassword")
+ self.assertEqual(5, pos)
+ self.assertEqual("Primary:userPassword", up_package.name)
+
+ # Check that the WDigest values are correct.
+ #
+ digests = ndr_unpack(drsblobs.package_PrimaryWDigestBlob,
+ binascii.a2b_hex(wd_package.data))
+ self.check_wdigests(digests)
+
+ # Check the clear text value is correct.
+ ct = ndr_unpack(drsblobs.package_PrimaryCLEARTEXTBlob,
+ binascii.a2b_hex(ct_package.data))
+ self.assertEqual(USER_PASS.encode('utf-16-le'), ct.cleartext)
+
+ # Check that the userPassword hashes are computed correctly
+ #
+ up = ndr_unpack(drsblobs.package_PrimaryUserPasswordBlob,
+ binascii.a2b_hex(up_package.data))
+ self.checkUserPassword(up, [("{CRYPT}", "6", 10000)])
+ self.checkNtHash(USER_PASS, up.current_nt_hash.hash)
diff --git a/python/samba/tests/password_hash_fl2008.py b/python/samba/tests/password_hash_fl2008.py
new file mode 100644
index 0000000..c1ceeb7
--- /dev/null
+++ b/python/samba/tests/password_hash_fl2008.py
@@ -0,0 +1,207 @@
+# Tests for Tests for source4/dsdb/samdb/ldb_modules/password_hash.c
+#
+# Copyright (C) Catalyst IT Ltd. 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""
+Tests for source4/dsdb/samdb/ldb_modules/password_hash.c
+
+These tests need to be run in an environment in which
+io->ac->gpg_key_ids == NULL, so that the gpg supplemental credentials
+are not generated. And also need to be in an environment with a
+functional level of 2008 or greater to ensure the kerberos newer keys are
+generated
+"""
+from samba.tests.password_hash import (
+ PassWordHashTests,
+ get_package,
+ USER_PASS
+)
+from samba.ndr import ndr_unpack
+from samba.dcerpc import drsblobs
+import binascii
+
+
+class PassWordHashFl2008Tests(PassWordHashTests):
+
+ def test_default_supplementalCredentials(self):
+ self.add_user()
+
+ sc = self.get_supplemental_creds()
+
+ # Check that we got all the expected supplemental credentials
+ # And they are in the expected order.
+ size = len(sc.sub.packages)
+ self.assertEqual(4, size)
+ (pos, package) = get_package(sc, "Primary:Kerberos-Newer-Keys")
+ self.assertEqual(1, pos)
+ self.assertEqual("Primary:Kerberos-Newer-Keys", package.name)
+
+ (pos, package) = get_package(sc, "Primary:Kerberos")
+ self.assertEqual(2, pos)
+ self.assertEqual("Primary:Kerberos", package.name)
+
+ (pos, package) = get_package(sc, "Packages")
+ self.assertEqual(3, pos)
+ self.assertEqual("Packages", package.name)
+
+ (pos, package) = get_package(sc, "Primary:WDigest")
+ self.assertEqual(4, pos)
+ self.assertEqual("Primary:WDigest", package.name)
+
+ # Check that the WDigest values are correct.
+ #
+ digests = ndr_unpack(drsblobs.package_PrimaryWDigestBlob,
+ binascii.a2b_hex(package.data))
+ self.check_wdigests(digests)
+
+ def test_userPassword_sha512(self):
+ self.add_user(options=[("password hash userPassword schemes",
+ "CryptSHA512")])
+
+ sc = self.get_supplemental_creds()
+
+ # Check that we got all the expected supplemental credentials
+ # And they are in the expected order.
+ size = len(sc.sub.packages)
+ self.assertEqual(5, size)
+
+ (pos, package) = get_package(sc, "Primary:Kerberos-Newer-Keys")
+ self.assertEqual(1, pos)
+ self.assertEqual("Primary:Kerberos-Newer-Keys", package.name)
+
+ (pos, package) = get_package(sc, "Primary:Kerberos")
+ self.assertEqual(2, pos)
+ self.assertEqual("Primary:Kerberos", package.name)
+
+ (pos, wp_package) = get_package(sc, "Primary:WDigest")
+ self.assertEqual(3, pos)
+ self.assertEqual("Primary:WDigest", wp_package.name)
+
+ (pos, package) = get_package(sc, "Packages")
+ self.assertEqual(4, pos)
+ self.assertEqual("Packages", package.name)
+
+ (pos, up_package) = get_package(sc, "Primary:userPassword")
+ self.assertEqual(5, pos)
+ self.assertEqual("Primary:userPassword", up_package.name)
+
+ # Check that the WDigest values are correct.
+ #
+ digests = ndr_unpack(drsblobs.package_PrimaryWDigestBlob,
+ binascii.a2b_hex(wp_package.data))
+ self.check_wdigests(digests)
+
+ # Check that the userPassword hashes are computed correctly
+ #
+ up = ndr_unpack(drsblobs.package_PrimaryUserPasswordBlob,
+ binascii.a2b_hex(up_package.data))
+ self.checkUserPassword(up, [("{CRYPT}", "6", None)])
+ self.checkNtHash(USER_PASS, up.current_nt_hash.hash)
+
+ def test_supplementalCredentials_cleartext(self):
+ self.add_user(clear_text=True)
+
+ sc = self.get_supplemental_creds()
+
+ # Check that we got all the expected supplemental credentials
+ # And they are in the expected order.
+ size = len(sc.sub.packages)
+ self.assertEqual(5, size)
+ (pos, package) = get_package(sc, "Primary:Kerberos-Newer-Keys")
+ self.assertEqual(1, pos)
+ self.assertEqual("Primary:Kerberos-Newer-Keys", package.name)
+
+ (pos, package) = get_package(sc, "Primary:Kerberos")
+ self.assertEqual(2, pos)
+ self.assertEqual("Primary:Kerberos", package.name)
+
+ (pos, wd_package) = get_package(sc, "Primary:WDigest")
+ self.assertEqual(3, pos)
+ self.assertEqual("Primary:WDigest", wd_package.name)
+
+ (pos, package) = get_package(sc, "Packages")
+ self.assertEqual(4, pos)
+ self.assertEqual("Packages", package.name)
+
+ (pos, ct_package) = get_package(sc, "Primary:CLEARTEXT")
+ self.assertEqual(5, pos)
+ self.assertEqual("Primary:CLEARTEXT", ct_package.name)
+
+ # Check that the WDigest values are correct.
+ #
+ digests = ndr_unpack(drsblobs.package_PrimaryWDigestBlob,
+ binascii.a2b_hex(wd_package.data))
+ self.check_wdigests(digests)
+
+ # Check the clear text value is correct.
+ ct = ndr_unpack(drsblobs.package_PrimaryCLEARTEXTBlob,
+ binascii.a2b_hex(ct_package.data))
+ self.assertEqual(USER_PASS.encode('utf-16-le'), ct.cleartext)
+
+ def test_userPassword_cleartext_sha256(self):
+ self.add_user(clear_text=True,
+ options=[("password hash userPassword schemes",
+ "CryptSHA256:rounds=5000")])
+
+ sc = self.get_supplemental_creds()
+
+ # Check that we got all the expected supplemental credentials
+ # And they are in the expected order.
+ size = len(sc.sub.packages)
+ self.assertEqual(6, size)
+
+ (pos, package) = get_package(sc, "Primary:Kerberos-Newer-Keys")
+ self.assertEqual(1, pos)
+ self.assertEqual("Primary:Kerberos-Newer-Keys", package.name)
+
+ (pos, package) = get_package(sc, "Primary:Kerberos")
+ self.assertEqual(2, pos)
+ self.assertEqual("Primary:Kerberos", package.name)
+
+ (pos, wd_package) = get_package(sc, "Primary:WDigest")
+ self.assertEqual(3, pos)
+ self.assertEqual("Primary:WDigest", wd_package.name)
+
+ (pos, ct_package) = get_package(sc, "Primary:CLEARTEXT")
+ self.assertEqual(4, pos)
+ self.assertEqual("Primary:CLEARTEXT", ct_package.name)
+
+ (pos, package) = get_package(sc, "Packages")
+ self.assertEqual(5, pos)
+ self.assertEqual("Packages", package.name)
+
+ (pos, up_package) = get_package(sc, "Primary:userPassword")
+ self.assertEqual(6, pos)
+ self.assertEqual("Primary:userPassword", up_package.name)
+
+ # Check that the WDigest values are correct.
+ #
+ digests = ndr_unpack(drsblobs.package_PrimaryWDigestBlob,
+ binascii.a2b_hex(wd_package.data))
+ self.check_wdigests(digests)
+
+ # Check the clear text value is correct.
+ ct = ndr_unpack(drsblobs.package_PrimaryCLEARTEXTBlob,
+ binascii.a2b_hex(ct_package.data))
+ self.assertEqual(USER_PASS.encode('utf-16-le'), ct.cleartext)
+
+ # Check that the userPassword hashes are computed correctly
+ #
+ up = ndr_unpack(drsblobs.package_PrimaryUserPasswordBlob,
+ binascii.a2b_hex(up_package.data))
+ self.checkUserPassword(up, [("{CRYPT}", "5", 5000)])
+ self.checkNtHash(USER_PASS, up.current_nt_hash.hash)
diff --git a/python/samba/tests/password_hash_gpgme.py b/python/samba/tests/password_hash_gpgme.py
new file mode 100644
index 0000000..9c0a511
--- /dev/null
+++ b/python/samba/tests/password_hash_gpgme.py
@@ -0,0 +1,293 @@
+# Tests for Tests for source4/dsdb/samdb/ldb_modules/password_hash.c
+#
+# Copyright (C) Catalyst IT Ltd. 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""
+Tests for source4/dsdb/samdb/ldb_modules/password_hash.c
+These tests need to be run in an environment in which
+io->ac->gpg_key_ids != NULL, so that the gpg supplemental credentials
+are generated. The functional level needs to be >= 2008 so that the
+kerberos newer keys are generated.
+"""
+
+
+from samba.tests.password_hash import (
+ PassWordHashTests,
+ get_package,
+ USER_PASS,
+ USER_NAME
+)
+from samba.ndr import ndr_unpack
+from samba.dcerpc import drsblobs
+import binascii
+from samba.tests.pso import PasswordSettings
+import samba
+
+
+class PassWordHashGpgmeTests(PassWordHashTests):
+
+ def test_default_supplementalCredentials(self):
+ self.add_user()
+ if not self.lp.get("password hash gpg key ids"):
+ self.skipTest("No password hash gpg key ids, " +
+ "Primary:SambaGPG will not be generated")
+
+ sc = self.get_supplemental_creds()
+
+ # Check that we got all the expected supplemental credentials
+ # And they are in the expected order.
+ size = len(sc.sub.packages)
+ self.assertEqual(5, size)
+ (pos, package) = get_package(sc, "Primary:Kerberos-Newer-Keys")
+ self.assertEqual(1, pos)
+ self.assertEqual("Primary:Kerberos-Newer-Keys", package.name)
+
+ (pos, package) = get_package(sc, "Primary:Kerberos")
+ self.assertEqual(2, pos)
+ self.assertEqual("Primary:Kerberos", package.name)
+
+ (pos, wd_package) = get_package(sc, "Primary:WDigest")
+ self.assertEqual(3, pos)
+ self.assertEqual("Primary:WDigest", wd_package.name)
+
+ (pos, package) = get_package(sc, "Packages")
+ self.assertEqual(4, pos)
+ self.assertEqual("Packages", package.name)
+
+ (pos, package) = get_package(sc, "Primary:SambaGPG")
+ self.assertEqual(5, pos)
+ self.assertEqual("Primary:SambaGPG", package.name)
+
+ # Check that the WDigest values are correct.
+ #
+ digests = ndr_unpack(drsblobs.package_PrimaryWDigestBlob,
+ binascii.a2b_hex(wd_package.data))
+ self.check_wdigests(digests)
+
+ def test_supplementalCredentials_cleartext(self):
+ self.add_user(clear_text=True)
+ if not self.lp.get("password hash gpg key ids"):
+ self.skipTest("No password hash gpg key ids, " +
+ "Primary:SambaGPG will not be generated")
+
+ sc = self.get_supplemental_creds()
+
+ # Check that we got all the expected supplemental credentials
+ # And they are in the expected order.
+ size = len(sc.sub.packages)
+ self.assertEqual(6, size)
+ (pos, package) = get_package(sc, "Primary:Kerberos-Newer-Keys")
+ self.assertEqual(1, pos)
+ self.assertEqual("Primary:Kerberos-Newer-Keys", package.name)
+
+ (pos, package) = get_package(sc, "Primary:Kerberos")
+ self.assertEqual(2, pos)
+ self.assertEqual("Primary:Kerberos", package.name)
+
+ (pos, wd_package) = get_package(sc, "Primary:WDigest")
+ self.assertEqual(3, pos)
+ self.assertEqual("Primary:WDigest", wd_package.name)
+
+ (pos, ct_package) = get_package(sc, "Primary:CLEARTEXT")
+ self.assertEqual(4, pos)
+ self.assertEqual("Primary:CLEARTEXT", ct_package.name)
+
+ (pos, package) = get_package(sc, "Packages")
+ self.assertEqual(5, pos)
+ self.assertEqual("Packages", package.name)
+
+ (pos, package) = get_package(sc, "Primary:SambaGPG")
+ self.assertEqual(6, pos)
+ self.assertEqual("Primary:SambaGPG", package.name)
+
+ # Check that the WDigest values are correct.
+ #
+ digests = ndr_unpack(drsblobs.package_PrimaryWDigestBlob,
+ binascii.a2b_hex(wd_package.data))
+ self.check_wdigests(digests)
+
+ # Check the clear text value is correct.
+ ct = ndr_unpack(drsblobs.package_PrimaryCLEARTEXTBlob,
+ binascii.a2b_hex(ct_package.data))
+ self.assertEqual(USER_PASS.encode('utf-16-le'), ct.cleartext)
+
+ def assert_cleartext(self, expect_cleartext, password=None):
+ """Checks cleartext is (or isn't) returned as expected"""
+ sc = self.get_supplemental_creds()
+ if expect_cleartext:
+ (pos, ct_package) = get_package(sc, "Primary:CLEARTEXT")
+ self.assertTrue(ct_package is not None, "Failed to retrieve cleartext")
+
+ # Check the clear-text value is correct.
+ ct = ndr_unpack(drsblobs.package_PrimaryCLEARTEXTBlob,
+ binascii.a2b_hex(ct_package.data))
+ self.assertEqual(password.encode('utf-16-le'), ct.cleartext)
+ else:
+ ct_package = get_package(sc, "Primary:CLEARTEXT")
+ self.assertTrue(ct_package is None,
+ "Got cleartext when we shouldn't have")
+
+ def test_supplementalCredentials_cleartext_pso(self):
+ """Checks that a PSO's cleartext setting can override the domain's"""
+
+ # create a user that stores plain-text passwords
+ self.add_user(clear_text=True)
+
+ # check that clear-text is present in the supplementary-credentials
+ self.assert_cleartext(expect_cleartext=True, password=USER_PASS)
+
+ # create a PSO overriding the plain-text setting & apply it to the user
+ no_plaintext_pso = PasswordSettings("no-plaintext-PSO", self.ldb,
+ precedence=200,
+ store_plaintext=False)
+ self.addCleanup(self.ldb.delete, no_plaintext_pso.dn)
+ userdn = "cn=" + USER_NAME + ",cn=users," + self.base_dn
+ no_plaintext_pso.apply_to(userdn)
+
+ # set the password to update the cleartext password stored
+ new_password = samba.generate_random_password(32, 32)
+ self.ldb.setpassword("(sAMAccountName=%s)" % USER_NAME, new_password)
+
+ # this time cleartext shouldn't be in the supplementary creds
+ self.assert_cleartext(expect_cleartext=False)
+
+ # unapply PSO, update password, and check we get the cleartext again
+ no_plaintext_pso.unapply(userdn)
+ new_password = samba.generate_random_password(32, 32)
+ self.ldb.setpassword("(sAMAccountName=%s)" % USER_NAME, new_password)
+ self.assert_cleartext(expect_cleartext=True, password=new_password)
+
+ # Now update the domain setting and check we no longer get cleartext
+ self.set_store_cleartext(False)
+ new_password = samba.generate_random_password(32, 32)
+ self.ldb.setpassword("(sAMAccountName=%s)" % USER_NAME, new_password)
+ self.assert_cleartext(expect_cleartext=False)
+
+ # create a PSO overriding the domain setting & apply it to the user
+ plaintext_pso = PasswordSettings("plaintext-PSO", self.ldb,
+ precedence=100, store_plaintext=True)
+ self.addCleanup(self.ldb.delete, plaintext_pso.dn)
+ plaintext_pso.apply_to(userdn)
+ new_password = samba.generate_random_password(32, 32)
+ self.ldb.setpassword("(sAMAccountName=%s)" % USER_NAME, new_password)
+ self.assert_cleartext(expect_cleartext=True, password=new_password)
+
+ def test_userPassword_multiple_hashes(self):
+ self.add_user(options=[(
+ "password hash userPassword schemes",
+ "CryptSHA512 CryptSHA256 CryptSHA512")])
+
+ sc = self.get_supplemental_creds()
+
+ # Check that we got all the expected supplemental credentials
+ # And they are in the expected order.
+ size = len(sc.sub.packages)
+ self.assertEqual(6, size)
+
+ (pos, package) = get_package(sc, "Primary:Kerberos-Newer-Keys")
+ self.assertEqual(1, pos)
+ self.assertEqual("Primary:Kerberos-Newer-Keys", package.name)
+
+ (pos, package) = get_package(sc, "Primary:Kerberos")
+ self.assertEqual(2, pos)
+ self.assertEqual("Primary:Kerberos", package.name)
+
+ (pos, wp_package) = get_package(sc, "Primary:WDigest")
+ self.assertEqual(3, pos)
+ self.assertEqual("Primary:WDigest", wp_package.name)
+
+ (pos, up_package) = get_package(sc, "Primary:userPassword")
+ self.assertEqual(4, pos)
+ self.assertEqual("Primary:userPassword", up_package.name)
+
+ (pos, package) = get_package(sc, "Packages")
+ self.assertEqual(5, pos)
+ self.assertEqual("Packages", package.name)
+
+ (pos, package) = get_package(sc, "Primary:SambaGPG")
+ self.assertEqual(6, pos)
+ self.assertEqual("Primary:SambaGPG", package.name)
+
+ # Check that the WDigest values are correct.
+ #
+ digests = ndr_unpack(drsblobs.package_PrimaryWDigestBlob,
+ binascii.a2b_hex(wp_package.data))
+ self.check_wdigests(digests)
+
+ # Check that the userPassword hashes are computed correctly
+ # Expect three hashes to be calculated
+ up = ndr_unpack(drsblobs.package_PrimaryUserPasswordBlob,
+ binascii.a2b_hex(up_package.data))
+ self.checkUserPassword(up, [
+ ("{CRYPT}", "6", None),
+ ("{CRYPT}", "5", None),
+ ("{CRYPT}", "6", None)
+ ])
+ self.checkNtHash(USER_PASS, up.current_nt_hash.hash)
+
+ def test_userPassword_multiple_hashes_rounds_specified(self):
+ self.add_user(options=[(
+ "password hash userPassword schemes",
+ "CryptSHA512:rounds=5120 CryptSHA256:rounds=2560 CryptSHA512:rounds=5122")])
+
+ sc = self.get_supplemental_creds()
+
+ # Check that we got all the expected supplemental credentials
+ # And they are in the expected order.
+ size = len(sc.sub.packages)
+ self.assertEqual(6, size)
+
+ (pos, package) = get_package(sc, "Primary:Kerberos-Newer-Keys")
+ self.assertEqual(1, pos)
+ self.assertEqual("Primary:Kerberos-Newer-Keys", package.name)
+
+ (pos, package) = get_package(sc, "Primary:Kerberos")
+ self.assertEqual(2, pos)
+ self.assertEqual("Primary:Kerberos", package.name)
+
+ (pos, wp_package) = get_package(sc, "Primary:WDigest")
+ self.assertEqual(3, pos)
+ self.assertEqual("Primary:WDigest", wp_package.name)
+
+ (pos, up_package) = get_package(sc, "Primary:userPassword")
+ self.assertEqual(4, pos)
+ self.assertEqual("Primary:userPassword", up_package.name)
+
+ (pos, package) = get_package(sc, "Packages")
+ self.assertEqual(5, pos)
+ self.assertEqual("Packages", package.name)
+
+ (pos, package) = get_package(sc, "Primary:SambaGPG")
+ self.assertEqual(6, pos)
+ self.assertEqual("Primary:SambaGPG", package.name)
+
+ # Check that the WDigest values are correct.
+ #
+ digests = ndr_unpack(drsblobs.package_PrimaryWDigestBlob,
+ binascii.a2b_hex(wp_package.data))
+ self.check_wdigests(digests)
+
+ # Check that the userPassword hashes are computed correctly
+ # Expect three hashes to be calculated
+ up = ndr_unpack(drsblobs.package_PrimaryUserPasswordBlob,
+ binascii.a2b_hex(up_package.data))
+ self.checkUserPassword(up, [
+ ("{CRYPT}", "6", 5120),
+ ("{CRYPT}", "5", 2560),
+ ("{CRYPT}", "6", 5122)
+ ])
+ self.checkNtHash(USER_PASS, up.current_nt_hash.hash)
diff --git a/python/samba/tests/password_hash_ldap.py b/python/samba/tests/password_hash_ldap.py
new file mode 100644
index 0000000..2657e75
--- /dev/null
+++ b/python/samba/tests/password_hash_ldap.py
@@ -0,0 +1,129 @@
+# Tests for Tests for source4/dsdb/samdb/ldb_modules/password_hash.c
+#
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""
+Tests for source4/dsdb/samdb/ldb_modules/password_hash.c
+
+These tests are designed to also run against Windows to confirm the values
+returned from Windows.
+
+To run against Windows:
+Set the following environment variables:
+ PASSWORD=Administrator password
+ USERNAME=Administrator
+ SMB_CONF_PATH=/dev/null
+ PYTHONPATH=bin/python
+ SERVER=Windows server IP
+
+ /usr/bin/python source4/scripting/bin/subunitrun
+ samba.tests.password_hash_ldap.PassWordHashLDAPTests
+ -U"Administrator%adminpassword"
+"""
+
+from samba.tests.password_hash import (
+ PassWordHashTests,
+ get_package,
+ USER_NAME,
+)
+from samba.samdb import SamDB
+from samba.ndr import ndr_unpack
+from samba.dcerpc import drsblobs, drsuapi, misc
+from samba import drs_utils, net
+from samba.credentials import Credentials
+import binascii
+import os
+
+
+def attid_equal(a1, a2):
+ return (a1 & 0xffffffff) == (a2 & 0xffffffff)
+
+
+class PassWordHashLDAPTests(PassWordHashTests):
+
+ # Get the supplemental credentials for the user under test
+ def get_supplemental_creds_drs(self):
+ binding_str = "ncacn_ip_tcp:%s[seal]" % os.environ["SERVER"]
+ dn = "cn=" + USER_NAME + ",cn=users," + self.base_dn
+ drs = drsuapi.drsuapi(binding_str, self.get_loadparm(), self.creds)
+ (drs_handle, supported_extensions) = drs_utils.drs_DsBind(drs)
+
+ req8 = drsuapi.DsGetNCChangesRequest8()
+
+ null_guid = misc.GUID()
+ req8.destination_dsa_guid = null_guid
+ req8.source_dsa_invocation_id = null_guid
+ req8.naming_context = drsuapi.DsReplicaObjectIdentifier()
+ req8.naming_context.dn = dn
+
+ req8.highwatermark = drsuapi.DsReplicaHighWaterMark()
+ req8.highwatermark.tmp_highest_usn = 0
+ req8.highwatermark.reserved_usn = 0
+ req8.highwatermark.highest_usn = 0
+ req8.uptodateness_vector = None
+ req8.replica_flags = (drsuapi.DRSUAPI_DRS_INIT_SYNC |
+ drsuapi.DRSUAPI_DRS_PER_SYNC |
+ drsuapi.DRSUAPI_DRS_GET_ANC |
+ drsuapi.DRSUAPI_DRS_NEVER_SYNCED |
+ drsuapi.DRSUAPI_DRS_WRIT_REP)
+ req8.max_object_count = 402
+ req8.max_ndr_size = 402116
+ req8.extended_op = drsuapi.DRSUAPI_EXOP_REPL_OBJ
+ req8.fsmo_info = 0
+ req8.partial_attribute_set = None
+ req8.partial_attribute_set_ex = None
+ req8.mapping_ctr.num_mappings = 0
+ req8.mapping_ctr.mappings = None
+ (level, ctr) = drs.DsGetNCChanges(drs_handle, 8, req8)
+
+ obj_item = ctr.first_object
+ obj = obj_item.object
+
+ sc_blob = None
+
+ for i in range(0, obj.attribute_ctr.num_attributes):
+ attr = obj.attribute_ctr.attributes[i]
+ if attid_equal(attr.attid,
+ drsuapi.DRSUAPI_ATTID_supplementalCredentials):
+ net_ctx = net.Net(self.creds)
+ net_ctx.replicate_decrypt(drs, attr, 0)
+ sc_blob = attr.value_ctr.values[0].blob
+
+ sc = ndr_unpack(drsblobs.supplementalCredentialsBlob, sc_blob)
+ return sc
+
+ def test_wDigest_supplementalCredentials(self):
+ self.creds = Credentials()
+ self.creds.set_username(os.environ["USERNAME"])
+ self.creds.set_password(os.environ["PASSWORD"])
+ self.creds.guess(self.lp)
+ ldb = SamDB("ldap://" + os.environ["SERVER"],
+ credentials=self.creds,
+ lp=self.lp)
+
+ self.add_user(ldb=ldb)
+
+ sc = self.get_supplemental_creds_drs()
+
+ (pos, package) = get_package(sc, "Primary:WDigest")
+ self.assertEqual("Primary:WDigest", package.name)
+
+ # Check that the WDigest values are correct.
+ #
+ digests = ndr_unpack(drsblobs.package_PrimaryWDigestBlob,
+ binascii.a2b_hex(package.data))
+ self.check_wdigests(digests)
diff --git a/python/samba/tests/password_quality.py b/python/samba/tests/password_quality.py
new file mode 100644
index 0000000..4ecf34a
--- /dev/null
+++ b/python/samba/tests/password_quality.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+
+# Unix SMB/CIFS implementation.
+# Copyright (C) Catalyst IT Ltd. 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for the python wrapper of the check_password_quality function
+"""
+
+from samba import check_password_quality
+from samba.tests import TestCase
+
+
+class PasswordQualityTests(TestCase):
+ def test_check_password_quality(self):
+ self.assertFalse(check_password_quality(""),
+ "empty password")
+ self.assertFalse(check_password_quality("a"),
+ "one char password")
+ self.assertFalse(check_password_quality("aaaaaaaaaaaa"),
+ "same char password")
+ self.assertFalse(check_password_quality("BLA"),
+ "multiple upcases password")
+ self.assertFalse(check_password_quality("123"),
+ "digits only")
+ self.assertFalse(check_password_quality("matthiéu"),
+ "not enough high symbols")
+ self.assertFalse(check_password_quality("abcdééàçè"),
+ "only lower case")
+ self.assertFalse(check_password_quality("abcdééàçè+"),
+ "only lower and symbols")
+ self.assertTrue(check_password_quality("abcdééàçè+ढ"),
+ "valid")
+ self.assertTrue(check_password_quality("ç+ढ"),
+ "valid")
+ self.assertTrue(check_password_quality("A2e"),
+ "valid")
+ self.assertTrue(check_password_quality("BA2eLi443"),
+ "valid")
diff --git a/python/samba/tests/password_test.py b/python/samba/tests/password_test.py
new file mode 100644
index 0000000..ba9f065
--- /dev/null
+++ b/python/samba/tests/password_test.py
@@ -0,0 +1,59 @@
+# -*- coding: utf-8 -*-
+#
+# Common functionality for all password change tests
+#
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2018
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import samba.tests
+
+
+class PasswordCommon:
+
+ @staticmethod
+ def allow_password_changes(testcase, samdb):
+ """Updates the DC to allow password changes during the current test"""
+
+ # Get the old "dSHeuristics" if it was set
+ dsheuristics = samdb.get_dsheuristics()
+
+ # Reset the "dSHeuristics" as they were before
+ testcase.addCleanup(samdb.set_dsheuristics, dsheuristics)
+
+ # Set the "dSHeuristics" to activate the correct "userPassword" behaviour
+ samdb.set_dsheuristics("000000001")
+
+ # Get the old "minPwdAge"
+ minPwdAge = samdb.get_minPwdAge()
+
+ # Reset the "minPwdAge" as it was before
+ testcase.addCleanup(samdb.set_minPwdAge, minPwdAge)
+
+ # Set it temporarily to "0"
+ samdb.set_minPwdAge("0")
+
+
+class PasswordTestCase(samba.tests.TestCase):
+
+ # this requires that an LDB connection has already been setup (so is not
+ # part of the inherited setUp())
+ def allow_password_changes(self, samdb=None):
+ """Updates the DC to allow password changes during the current test"""
+
+ if samdb is None:
+ samdb = self.ldb
+
+ PasswordCommon.allow_password_changes(self, samdb)
diff --git a/python/samba/tests/policy.py b/python/samba/tests/policy.py
new file mode 100644
index 0000000..4029150
--- /dev/null
+++ b/python/samba/tests/policy.py
@@ -0,0 +1,34 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2010
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for the libpolicy Python bindings.
+
+"""
+
+from samba.tests import TestCase
+from samba import policy
+
+
+class PolicyTests(TestCase):
+
+ def test_get_gpo_flags(self):
+ self.assertEqual(["GPO_FLAG_USER_DISABLE"],
+ policy.get_gpo_flags(policy.GPO_FLAG_USER_DISABLE))
+
+ def test_get_gplink_options(self):
+ self.assertEqual(["GPLINK_OPT_DISABLE"],
+ policy.get_gplink_options(policy.GPLINK_OPT_DISABLE))
diff --git a/python/samba/tests/posixacl.py b/python/samba/tests/posixacl.py
new file mode 100644
index 0000000..a82fa76
--- /dev/null
+++ b/python/samba/tests/posixacl.py
@@ -0,0 +1,878 @@
+# Unix SMB/CIFS implementation. Tests for NT and posix ACL manipulation
+# Copyright (C) Matthieu Patou <mat@matws.net> 2009-2010
+# Copyright (C) Andrew Bartlett 2012
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for the Samba3 NT -> posix ACL layer"""
+
+from samba.ntacls import setntacl, getntacl, checkset_backend
+from samba.dcerpc import security, smb_acl, idmap
+from samba.tests.smbd_base import SmbdBaseTests
+from samba import provision
+import os
+from samba.samba3 import smbd, passdb
+from samba.samba3 import param as s3param
+from samba import auth
+from samba.samdb import SamDB
+from samba.auth_util import system_session_unix
+from errno import ENODATA
+
+DOM_SID = "S-1-5-21-2212615479-2695158682-2101375467"
+ACL = "O:S-1-5-21-2212615479-2695158682-2101375467-512G:S-1-5-21-2212615479-2695158682-2101375467-513D:(A;OICI;FA;;;S-1-5-21-2212615479-2695158682-2101375467-512)"
+
+
+class PosixAclMappingTests(SmbdBaseTests):
+
+ def setUp(self):
+ super().setUp()
+ s3conf = s3param.get_context()
+ s3conf.load(self.get_loadparm().configfile)
+ s3conf.set("xattr_tdb:file", os.path.join(self.tempdir, "xattr.tdb"))
+ self.lp = s3conf
+ self.tempf = os.path.join(self.tempdir, "test")
+ open(self.tempf, 'w').write("empty")
+ self.samdb = SamDB(lp=self.lp, session_info=auth.system_session())
+
+ def tearDown(self):
+ smbd.unlink(self.tempf, self.get_session_info())
+ os.unlink(os.path.join(self.tempdir, "xattr.tdb"))
+ super().tearDown()
+
+ def get_session_info(self, domsid=DOM_SID):
+ """
+ Get session_info for setntacl.
+ """
+ return system_session_unix()
+
+ def print_posix_acl(self, posix_acl):
+ aclstr = ""
+ for entry in posix_acl.acl:
+ aclstr += "a_type: %d\n" % entry.a_type +\
+ "a_perm: %o\n" % entry.a_perm
+ if entry.a_type == smb_acl.SMB_ACL_USER:
+ aclstr += "uid: %d\n" % entry.info.uid
+ if entry.a_type == smb_acl.SMB_ACL_GROUP:
+ aclstr += "gid: %d\n" % entry.info.gid
+ return aclstr
+
+ def test_setntacl(self):
+ acl = ACL
+ setntacl(self.lp, self.tempf, acl, DOM_SID,
+ self.get_session_info(), use_ntvfs=False)
+
+ def test_setntacl_smbd_getntacl(self):
+ acl = ACL
+ setntacl(self.lp, self.tempf, acl, DOM_SID,
+ self.get_session_info(), use_ntvfs=True)
+ facl = getntacl(self.lp, self.tempf, self.get_session_info(), direct_db_access=True)
+ anysid = security.dom_sid(security.SID_NT_SELF)
+ self.assertEqual(facl.as_sddl(anysid), acl)
+
+ def test_setntacl_smbd_setposixacl_getntacl(self):
+ acl = ACL
+ setntacl(self.lp, self.tempf, acl, DOM_SID,
+ self.get_session_info(), use_ntvfs=True)
+
+ # This will invalidate the ACL, as we have a hook!
+ smbd.set_simple_acl(self.tempf, 0o640, self.get_session_info())
+
+ # However, this only asks the xattr
+ with self.assertRaises(OSError) as cm:
+ getntacl(self.lp, self.tempf, self.get_session_info(),
+ direct_db_access=True)
+
+ self.assertEqual(cm.exception.errno, ENODATA)
+
+ def test_setntacl_invalidate_getntacl(self):
+ acl = ACL
+ setntacl(self.lp, self.tempf, acl, DOM_SID,
+ self.get_session_info(), use_ntvfs=True)
+
+ # This should invalidate the ACL, as we include the posix ACL in the hash
+ (backend_obj, dbname) = checkset_backend(self.lp, None, None)
+ backend_obj.wrap_setxattr(dbname,
+ self.tempf, "system.fake_access_acl", b"")
+
+ # however, as this is direct DB access, we do not notice it
+ facl = getntacl(self.lp, self.tempf, self.get_session_info(), direct_db_access=True)
+ anysid = security.dom_sid(security.SID_NT_SELF)
+ self.assertEqual(acl, facl.as_sddl(anysid))
+
+ def test_setntacl_invalidate_getntacl_smbd(self):
+ acl = ACL
+ setntacl(self.lp, self.tempf, acl, DOM_SID,
+ self.get_session_info(), use_ntvfs=False)
+
+ # This should invalidate the ACL, as we include the posix ACL in the hash
+ (backend_obj, dbname) = checkset_backend(self.lp, None, None)
+ backend_obj.wrap_setxattr(dbname,
+ self.tempf, "system.fake_access_acl", b"")
+
+ # the hash would break, and we return an ACL based only on the mode, except we set the ACL using the 'ntvfs' mode that doesn't include a hash
+ facl = getntacl(self.lp, self.tempf, self.get_session_info())
+ anysid = security.dom_sid(security.SID_NT_SELF)
+ self.assertEqual(acl, facl.as_sddl(anysid))
+
+ def test_setntacl_smbd_invalidate_getntacl_smbd(self):
+ acl = ACL
+ simple_acl_from_posix = "O:S-1-5-21-2212615479-2695158682-2101375467-512G:S-1-5-21-2212615479-2695158682-2101375467-513D:(A;;FA;;;S-1-5-21-2212615479-2695158682-2101375467-512)(A;;0x1200a9;;;S-1-5-21-2212615479-2695158682-2101375467-513)(A;;;;;WD)"
+ os.chmod(self.tempf, 0o750)
+ setntacl(self.lp, self.tempf, acl, DOM_SID,
+ self.get_session_info(), use_ntvfs=False)
+
+ # This should invalidate the ACL, as we include the posix ACL in the hash
+ (backend_obj, dbname) = checkset_backend(self.lp, None, None)
+ backend_obj.wrap_setxattr(dbname,
+ self.tempf, "system.fake_access_acl", b"")
+
+ # the hash will break, and we return an ACL based only on the mode
+ facl = getntacl(self.lp, self.tempf, self.get_session_info(), direct_db_access=False)
+ anysid = security.dom_sid(security.SID_NT_SELF)
+ self.assertEqual(simple_acl_from_posix, facl.as_sddl(anysid))
+
+ def test_setntacl_getntacl_smbd(self):
+ acl = ACL
+ setntacl(self.lp, self.tempf, acl, DOM_SID,
+ self.get_session_info(), use_ntvfs=True)
+ facl = getntacl(self.lp, self.tempf, self.get_session_info(), direct_db_access=False)
+ anysid = security.dom_sid(security.SID_NT_SELF)
+ self.assertEqual(facl.as_sddl(anysid), acl)
+
+ def test_setntacl_smbd_getntacl_smbd(self):
+ acl = ACL
+ setntacl(self.lp, self.tempf, acl, DOM_SID,
+ self.get_session_info(), use_ntvfs=False)
+ facl = getntacl(self.lp, self.tempf, self.get_session_info(), direct_db_access=False)
+ anysid = security.dom_sid(security.SID_NT_SELF)
+ self.assertEqual(facl.as_sddl(anysid), acl)
+
+ def test_setntacl_smbd_setposixacl_getntacl_smbd(self):
+ acl = ACL
+ simple_acl_from_posix = "O:S-1-5-21-2212615479-2695158682-2101375467-512G:S-1-5-21-2212615479-2695158682-2101375467-513D:(A;;0x1f019f;;;S-1-5-21-2212615479-2695158682-2101375467-512)(A;;FR;;;S-1-5-21-2212615479-2695158682-2101375467-513)(A;;;;;WD)"
+ setntacl(self.lp, self.tempf, acl, DOM_SID,
+ self.get_session_info(), use_ntvfs=False)
+ # This invalidates the hash of the NT acl just set because there is a hook in the posix ACL set code
+ smbd.set_simple_acl(self.tempf, 0o640, self.get_session_info())
+ facl = getntacl(self.lp, self.tempf, self.get_session_info(), direct_db_access=False)
+ anysid = security.dom_sid(security.SID_NT_SELF)
+ self.assertEqual(simple_acl_from_posix, facl.as_sddl(anysid))
+
+ def test_setntacl_smbd_setposixacl_group_getntacl_smbd(self):
+ acl = ACL
+ BA_sid = security.dom_sid(security.SID_BUILTIN_ADMINISTRATORS)
+ simple_acl_from_posix = "O:S-1-5-21-2212615479-2695158682-2101375467-512G:S-1-5-21-2212615479-2695158682-2101375467-513D:(A;;0x1f019f;;;S-1-5-21-2212615479-2695158682-2101375467-512)(A;;FR;;;BA)(A;;FR;;;S-1-5-21-2212615479-2695158682-2101375467-513)(A;;;;;WD)"
+ setntacl(self.lp, self.tempf, acl, DOM_SID,
+ self.get_session_info(), use_ntvfs=False)
+ # This invalidates the hash of the NT acl just set because there is a hook in the posix ACL set code
+ s4_passdb = passdb.PDB(self.lp.get("passdb backend"))
+ (BA_gid, BA_type) = s4_passdb.sid_to_id(BA_sid)
+ smbd.set_simple_acl(self.tempf, 0o640, self.get_session_info(), BA_gid)
+
+ # This should re-calculate an ACL based on the posix details
+ facl = getntacl(self.lp, self.tempf, self.get_session_info(), direct_db_access=False)
+ anysid = security.dom_sid(security.SID_NT_SELF)
+ self.assertEqual(simple_acl_from_posix, facl.as_sddl(anysid))
+
+ def test_setntacl_smbd_getntacl_smbd_gpo(self):
+ acl = "O:DAG:DUD:P(A;OICI;FA;;;DA)(A;OICI;FA;;;EA)(A;OICIIO;FA;;;CO)(A;OICI;FA;;;DA)(A;OICI;FA;;;SY)(A;OICI;0x1200a9;;;AU)(A;OICI;0x1200a9;;;ED)S:AI(OU;CIIDSA;WP;f30e3bbe-9ff0-11d1-b603-0000f80367c1;bf967aa5-0de6-11d0-a285-00aa003049e2;WD)(OU;CIIDSA;WP;f30e3bbf-9ff0-11d1-b603-0000f80367c1;bf967aa5-0de6-11d0-a285-00aa003049e2;WD)"
+ setntacl(self.lp, self.tempf, acl, DOM_SID,
+ self.get_session_info(), use_ntvfs=False)
+ facl = getntacl(self.lp, self.tempf, self.get_session_info(), direct_db_access=False)
+ domsid = security.dom_sid(DOM_SID)
+ self.assertEqual(facl.as_sddl(domsid), acl)
+
+ def test_setntacl_getposixacl(self):
+ acl = ACL
+ setntacl(self.lp, self.tempf, acl, DOM_SID,
+ self.get_session_info(), use_ntvfs=False)
+ facl = getntacl(self.lp, self.tempf, self.get_session_info())
+ anysid = security.dom_sid(security.SID_NT_SELF)
+ self.assertEqual(facl.as_sddl(anysid), acl)
+ posix_acl = smbd.get_sys_acl(self.tempf, smb_acl.SMB_ACL_TYPE_ACCESS, self.get_session_info())
+
+ def test_setposixacl_getntacl(self):
+ smbd.set_simple_acl(self.tempf, 0o750, self.get_session_info())
+ # We don't expect the xattr to be filled in in this case
+ with self.assertRaises(OSError) as cm:
+ getntacl(self.lp, self.tempf, self.get_session_info())
+
+ self.assertEqual(cm.exception.errno, ENODATA)
+
+ def test_setposixacl_getntacl_smbd(self):
+ s4_passdb = passdb.PDB(self.lp.get("passdb backend"))
+ group_SID = s4_passdb.gid_to_sid(os.stat(self.tempf).st_gid)
+ user_SID = s4_passdb.uid_to_sid(os.stat(self.tempf).st_uid)
+ smbd.set_simple_acl(self.tempf, 0o640, self.get_session_info())
+ facl = getntacl(self.lp, self.tempf, self.get_session_info(), direct_db_access=False)
+ acl = "O:%sG:%sD:(A;;0x1f019f;;;%s)(A;;FR;;;%s)(A;;;;;WD)" % (user_SID, group_SID, user_SID, group_SID)
+ anysid = security.dom_sid(security.SID_NT_SELF)
+ self.assertEqual(acl, facl.as_sddl(anysid))
+
+ def test_setposixacl_dir_getntacl_smbd(self):
+ s4_passdb = passdb.PDB(self.lp.get("passdb backend"))
+ user_SID = s4_passdb.uid_to_sid(os.stat(self.tempdir).st_uid)
+ BA_sid = security.dom_sid(security.SID_BUILTIN_ADMINISTRATORS)
+ s4_passdb = passdb.PDB(self.lp.get("passdb backend"))
+ (BA_id, BA_type) = s4_passdb.sid_to_id(BA_sid)
+ self.assertEqual(BA_type, idmap.ID_TYPE_BOTH)
+ SO_sid = security.dom_sid(security.SID_BUILTIN_SERVER_OPERATORS)
+ (SO_id, SO_type) = s4_passdb.sid_to_id(SO_sid)
+ self.assertEqual(SO_type, idmap.ID_TYPE_BOTH)
+ smbd.chown(self.tempdir, BA_id, SO_id, self.get_session_info())
+ smbd.set_simple_acl(self.tempdir, 0o750, self.get_session_info())
+ facl = getntacl(self.lp, self.tempdir, self.get_session_info(), direct_db_access=False)
+ acl = "O:BAG:SOD:(A;;FA;;;BA)(A;;0x1200a9;;;SO)(A;;;;;WD)(A;OICIIO;FA;;;CO)(A;OICIIO;0x1200a9;;;CG)(A;OICIIO;0x1200a9;;;WD)"
+
+ anysid = security.dom_sid(security.SID_NT_SELF)
+ self.assertEqual(acl, facl.as_sddl(anysid))
+
+ def test_setposixacl_group_getntacl_smbd(self):
+ BA_sid = security.dom_sid(security.SID_BUILTIN_ADMINISTRATORS)
+ s4_passdb = passdb.PDB(self.lp.get("passdb backend"))
+ (BA_gid, BA_type) = s4_passdb.sid_to_id(BA_sid)
+ group_SID = s4_passdb.gid_to_sid(os.stat(self.tempf).st_gid)
+ user_SID = s4_passdb.uid_to_sid(os.stat(self.tempf).st_uid)
+ self.assertEqual(BA_type, idmap.ID_TYPE_BOTH)
+ smbd.set_simple_acl(self.tempf, 0o640, self.get_session_info(), BA_gid)
+ facl = getntacl(self.lp, self.tempf, self.get_session_info(), direct_db_access=False)
+ domsid = passdb.get_global_sam_sid()
+ acl = "O:%sG:%sD:(A;;0x1f019f;;;%s)(A;;FR;;;BA)(A;;FR;;;%s)(A;;;;;WD)" % (user_SID, group_SID, user_SID, group_SID)
+ anysid = security.dom_sid(security.SID_NT_SELF)
+ self.assertEqual(acl, facl.as_sddl(anysid))
+
+ def test_setposixacl_getposixacl(self):
+ smbd.set_simple_acl(self.tempf, 0o640, self.get_session_info())
+ posix_acl = smbd.get_sys_acl(self.tempf, smb_acl.SMB_ACL_TYPE_ACCESS, self.get_session_info())
+ self.assertEqual(posix_acl.count, 4, self.print_posix_acl(posix_acl))
+
+ self.assertEqual(posix_acl.acl[0].a_type, smb_acl.SMB_ACL_USER_OBJ)
+ self.assertEqual(posix_acl.acl[0].a_perm, 6)
+
+ self.assertEqual(posix_acl.acl[1].a_type, smb_acl.SMB_ACL_GROUP_OBJ)
+ self.assertEqual(posix_acl.acl[1].a_perm, 4)
+
+ self.assertEqual(posix_acl.acl[2].a_type, smb_acl.SMB_ACL_OTHER)
+ self.assertEqual(posix_acl.acl[2].a_perm, 0)
+
+ self.assertEqual(posix_acl.acl[3].a_type, smb_acl.SMB_ACL_MASK)
+ self.assertEqual(posix_acl.acl[3].a_perm, 7)
+
+ def test_setposixacl_dir_getposixacl(self):
+ smbd.set_simple_acl(self.tempdir, 0o750, self.get_session_info())
+ posix_acl = smbd.get_sys_acl(self.tempdir, smb_acl.SMB_ACL_TYPE_ACCESS, self.get_session_info())
+ self.assertEqual(posix_acl.count, 4, self.print_posix_acl(posix_acl))
+
+ self.assertEqual(posix_acl.acl[0].a_type, smb_acl.SMB_ACL_USER_OBJ)
+ self.assertEqual(posix_acl.acl[0].a_perm, 7)
+
+ self.assertEqual(posix_acl.acl[1].a_type, smb_acl.SMB_ACL_GROUP_OBJ)
+ self.assertEqual(posix_acl.acl[1].a_perm, 5)
+
+ self.assertEqual(posix_acl.acl[2].a_type, smb_acl.SMB_ACL_OTHER)
+ self.assertEqual(posix_acl.acl[2].a_perm, 0)
+
+ self.assertEqual(posix_acl.acl[3].a_type, smb_acl.SMB_ACL_MASK)
+ self.assertEqual(posix_acl.acl[3].a_perm, 7)
+
+ def test_setposixacl_group_getposixacl(self):
+ BA_sid = security.dom_sid(security.SID_BUILTIN_ADMINISTRATORS)
+ s4_passdb = passdb.PDB(self.lp.get("passdb backend"))
+ (BA_gid, BA_type) = s4_passdb.sid_to_id(BA_sid)
+ self.assertEqual(BA_type, idmap.ID_TYPE_BOTH)
+ smbd.set_simple_acl(self.tempf, 0o670, self.get_session_info(), BA_gid)
+ posix_acl = smbd.get_sys_acl(self.tempf, smb_acl.SMB_ACL_TYPE_ACCESS, self.get_session_info())
+
+ self.assertEqual(posix_acl.count, 5, self.print_posix_acl(posix_acl))
+
+ self.assertEqual(posix_acl.acl[0].a_type, smb_acl.SMB_ACL_USER_OBJ)
+ self.assertEqual(posix_acl.acl[0].a_perm, 6)
+
+ self.assertEqual(posix_acl.acl[1].a_type, smb_acl.SMB_ACL_GROUP_OBJ)
+ self.assertEqual(posix_acl.acl[1].a_perm, 7)
+
+ self.assertEqual(posix_acl.acl[2].a_type, smb_acl.SMB_ACL_OTHER)
+ self.assertEqual(posix_acl.acl[2].a_perm, 0)
+
+ self.assertEqual(posix_acl.acl[3].a_type, smb_acl.SMB_ACL_GROUP)
+ self.assertEqual(posix_acl.acl[3].a_perm, 7)
+ self.assertEqual(posix_acl.acl[3].info.gid, BA_gid)
+
+ self.assertEqual(posix_acl.acl[4].a_type, smb_acl.SMB_ACL_MASK)
+ self.assertEqual(posix_acl.acl[4].a_perm, 7)
+
+ def test_setntacl_sysvol_check_getposixacl(self):
+ acl = provision.SYSVOL_ACL
+ domsid = passdb.get_global_sam_sid()
+ session_info = self.get_session_info(domsid)
+ setntacl(self.lp, self.tempf, acl, str(domsid),
+ session_info, use_ntvfs=False)
+ facl = getntacl(self.lp, self.tempf, session_info)
+ self.assertEqual(facl.as_sddl(domsid), acl)
+ posix_acl = smbd.get_sys_acl(self.tempf, smb_acl.SMB_ACL_TYPE_ACCESS, session_info)
+
+ nwrap_module_so_path = os.getenv('NSS_WRAPPER_MODULE_SO_PATH')
+ nwrap_module_fn_prefix = os.getenv('NSS_WRAPPER_MODULE_FN_PREFIX')
+
+ nwrap_winbind_active = (nwrap_module_so_path != "" and
+ nwrap_module_fn_prefix == "winbind")
+ is_user_session = not session_info.security_token.is_system()
+
+ LA_sid = security.dom_sid(str(domsid) + "-" + str(security.DOMAIN_RID_ADMINISTRATOR))
+ BA_sid = security.dom_sid(security.SID_BUILTIN_ADMINISTRATORS)
+ SO_sid = security.dom_sid(security.SID_BUILTIN_SERVER_OPERATORS)
+ SY_sid = security.dom_sid(security.SID_NT_SYSTEM)
+ AU_sid = security.dom_sid(security.SID_NT_AUTHENTICATED_USERS)
+
+ s4_passdb = passdb.PDB(self.lp.get("passdb backend"))
+
+ # These assertions correct for current ad_dc selftest
+ # configuration. When other environments have a broad range of
+ # groups mapped via passdb, we can relax some of these checks
+ (LA_uid, LA_type) = s4_passdb.sid_to_id(LA_sid)
+ self.assertEqual(LA_type, idmap.ID_TYPE_UID)
+ (BA_gid, BA_type) = s4_passdb.sid_to_id(BA_sid)
+ self.assertEqual(BA_type, idmap.ID_TYPE_BOTH)
+ (SO_gid, SO_type) = s4_passdb.sid_to_id(SO_sid)
+ self.assertEqual(SO_type, idmap.ID_TYPE_BOTH)
+ (SY_gid, SY_type) = s4_passdb.sid_to_id(SY_sid)
+ self.assertEqual(SO_type, idmap.ID_TYPE_BOTH)
+ (AU_gid, AU_type) = s4_passdb.sid_to_id(AU_sid)
+ self.assertEqual(AU_type, idmap.ID_TYPE_BOTH)
+
+ self.assertEqual(posix_acl.count, 13, self.print_posix_acl(posix_acl))
+
+ self.assertEqual(posix_acl.acl[0].a_type, smb_acl.SMB_ACL_GROUP)
+ self.assertEqual(posix_acl.acl[0].a_perm, 7)
+ self.assertEqual(posix_acl.acl[0].info.gid, BA_gid)
+
+ self.assertEqual(posix_acl.acl[1].a_type, smb_acl.SMB_ACL_USER)
+ if nwrap_winbind_active or is_user_session:
+ self.assertEqual(posix_acl.acl[1].a_perm, 7)
+ else:
+ self.assertEqual(posix_acl.acl[1].a_perm, 6)
+ self.assertEqual(posix_acl.acl[1].info.uid, LA_uid)
+
+ self.assertEqual(posix_acl.acl[2].a_type, smb_acl.SMB_ACL_OTHER)
+ self.assertEqual(posix_acl.acl[2].a_perm, 0)
+
+ self.assertEqual(posix_acl.acl[3].a_type, smb_acl.SMB_ACL_USER_OBJ)
+ if nwrap_winbind_active or is_user_session:
+ self.assertEqual(posix_acl.acl[3].a_perm, 7)
+ else:
+ self.assertEqual(posix_acl.acl[3].a_perm, 6)
+
+ self.assertEqual(posix_acl.acl[4].a_type, smb_acl.SMB_ACL_USER)
+ self.assertEqual(posix_acl.acl[4].a_perm, 7)
+ self.assertEqual(posix_acl.acl[4].info.uid, BA_gid)
+
+ self.assertEqual(posix_acl.acl[5].a_type, smb_acl.SMB_ACL_GROUP_OBJ)
+ self.assertEqual(posix_acl.acl[5].a_perm, 7)
+
+ self.assertEqual(posix_acl.acl[6].a_type, smb_acl.SMB_ACL_USER)
+ self.assertEqual(posix_acl.acl[6].a_perm, 5)
+ self.assertEqual(posix_acl.acl[6].info.uid, SO_gid)
+
+ self.assertEqual(posix_acl.acl[7].a_type, smb_acl.SMB_ACL_GROUP)
+ self.assertEqual(posix_acl.acl[7].a_perm, 5)
+ self.assertEqual(posix_acl.acl[7].info.gid, SO_gid)
+
+ self.assertEqual(posix_acl.acl[8].a_type, smb_acl.SMB_ACL_USER)
+ self.assertEqual(posix_acl.acl[8].a_perm, 7)
+ self.assertEqual(posix_acl.acl[8].info.uid, SY_gid)
+
+ self.assertEqual(posix_acl.acl[9].a_type, smb_acl.SMB_ACL_GROUP)
+ self.assertEqual(posix_acl.acl[9].a_perm, 7)
+ self.assertEqual(posix_acl.acl[9].info.gid, SY_gid)
+
+ self.assertEqual(posix_acl.acl[10].a_type, smb_acl.SMB_ACL_USER)
+ self.assertEqual(posix_acl.acl[10].a_perm, 5)
+ self.assertEqual(posix_acl.acl[10].info.uid, AU_gid)
+
+ self.assertEqual(posix_acl.acl[11].a_type, smb_acl.SMB_ACL_GROUP)
+ self.assertEqual(posix_acl.acl[11].a_perm, 5)
+ self.assertEqual(posix_acl.acl[11].info.gid, AU_gid)
+
+ self.assertEqual(posix_acl.acl[12].a_type, smb_acl.SMB_ACL_MASK)
+ self.assertEqual(posix_acl.acl[12].a_perm, 7)
+
+ # check that it matches:
+ # user::rwx
+ # user:root:rwx (selftest user actually)
+ # group::rwx
+ # group:Local Admins:rwx
+ # group:3000000:r-x
+ # group:3000001:rwx
+ # group:3000002:r-x
+ # mask::rwx
+ # other::---
+
+ # This is in this order in the NDR smb_acl(not re-orderded for display)
+ # a_type: GROUP
+ # a_perm: 7
+ # uid: -1
+ # gid: 10
+ # a_type: USER
+ # a_perm: 6
+ # uid: 0 (selftest user actually)
+ # gid: -1
+ # a_type: OTHER
+ # a_perm: 0
+ # uid: -1
+ # gid: -1
+ # a_type: USER_OBJ
+ # a_perm: 6
+ # uid: -1
+ # gid: -1
+ # a_type: GROUP_OBJ
+ # a_perm: 7
+ # uid: -1
+ # gid: -1
+ # a_type: GROUP
+ # a_perm: 5
+ # uid: -1
+ # gid: 3000020
+ # a_type: GROUP
+ # a_perm: 7
+ # uid: -1
+ # gid: 3000000
+ # a_type: GROUP
+ # a_perm: 5
+ # uid: -1
+ # gid: 3000001
+ # a_type: MASK
+ # a_perm: 7
+ # uid: -1
+ # gid: -1
+
+ def test_setntacl_sysvol_dir_check_getposixacl(self):
+ acl = provision.SYSVOL_ACL
+ domsid = passdb.get_global_sam_sid()
+ session_info = self.get_session_info(domsid)
+ setntacl(self.lp, self.tempdir, acl, str(domsid),
+ session_info, use_ntvfs=False)
+ facl = getntacl(self.lp, self.tempdir, session_info)
+ self.assertEqual(facl.as_sddl(domsid), acl)
+ posix_acl = smbd.get_sys_acl(self.tempdir, smb_acl.SMB_ACL_TYPE_ACCESS, session_info)
+
+ LA_sid = security.dom_sid(str(domsid) + "-" + str(security.DOMAIN_RID_ADMINISTRATOR))
+ BA_sid = security.dom_sid(security.SID_BUILTIN_ADMINISTRATORS)
+ SO_sid = security.dom_sid(security.SID_BUILTIN_SERVER_OPERATORS)
+ SY_sid = security.dom_sid(security.SID_NT_SYSTEM)
+ AU_sid = security.dom_sid(security.SID_NT_AUTHENTICATED_USERS)
+
+ s4_passdb = passdb.PDB(self.lp.get("passdb backend"))
+
+ # These assertions correct for current ad_dc selftest
+ # configuration. When other environments have a broad range of
+ # groups mapped via passdb, we can relax some of these checks
+ (LA_uid, LA_type) = s4_passdb.sid_to_id(LA_sid)
+ self.assertEqual(LA_type, idmap.ID_TYPE_UID)
+ (BA_gid, BA_type) = s4_passdb.sid_to_id(BA_sid)
+ self.assertEqual(BA_type, idmap.ID_TYPE_BOTH)
+ (SO_gid, SO_type) = s4_passdb.sid_to_id(SO_sid)
+ self.assertEqual(SO_type, idmap.ID_TYPE_BOTH)
+ (SY_gid, SY_type) = s4_passdb.sid_to_id(SY_sid)
+ self.assertEqual(SO_type, idmap.ID_TYPE_BOTH)
+ (AU_gid, AU_type) = s4_passdb.sid_to_id(AU_sid)
+ self.assertEqual(AU_type, idmap.ID_TYPE_BOTH)
+
+ self.assertEqual(posix_acl.count, 13, self.print_posix_acl(posix_acl))
+
+ self.assertEqual(posix_acl.acl[0].a_type, smb_acl.SMB_ACL_GROUP)
+ self.assertEqual(posix_acl.acl[0].a_perm, 7)
+ self.assertEqual(posix_acl.acl[0].info.gid, BA_gid)
+
+ self.assertEqual(posix_acl.acl[1].a_type, smb_acl.SMB_ACL_USER)
+ self.assertEqual(posix_acl.acl[1].a_perm, 7)
+ self.assertEqual(posix_acl.acl[1].info.uid, LA_uid)
+
+ self.assertEqual(posix_acl.acl[2].a_type, smb_acl.SMB_ACL_OTHER)
+ self.assertEqual(posix_acl.acl[2].a_perm, 0)
+
+ self.assertEqual(posix_acl.acl[3].a_type, smb_acl.SMB_ACL_USER_OBJ)
+ self.assertEqual(posix_acl.acl[3].a_perm, 7)
+
+ self.assertEqual(posix_acl.acl[4].a_type, smb_acl.SMB_ACL_USER)
+ self.assertEqual(posix_acl.acl[4].a_perm, 7)
+ self.assertEqual(posix_acl.acl[4].info.uid, BA_gid)
+
+ self.assertEqual(posix_acl.acl[5].a_type, smb_acl.SMB_ACL_GROUP_OBJ)
+ self.assertEqual(posix_acl.acl[5].a_perm, 7)
+
+ self.assertEqual(posix_acl.acl[6].a_type, smb_acl.SMB_ACL_USER)
+ self.assertEqual(posix_acl.acl[6].a_perm, 5)
+ self.assertEqual(posix_acl.acl[6].info.uid, SO_gid)
+
+ self.assertEqual(posix_acl.acl[7].a_type, smb_acl.SMB_ACL_GROUP)
+ self.assertEqual(posix_acl.acl[7].a_perm, 5)
+ self.assertEqual(posix_acl.acl[7].info.gid, SO_gid)
+
+ self.assertEqual(posix_acl.acl[8].a_type, smb_acl.SMB_ACL_USER)
+ self.assertEqual(posix_acl.acl[8].a_perm, 7)
+ self.assertEqual(posix_acl.acl[8].info.uid, SY_gid)
+
+ self.assertEqual(posix_acl.acl[9].a_type, smb_acl.SMB_ACL_GROUP)
+ self.assertEqual(posix_acl.acl[9].a_perm, 7)
+ self.assertEqual(posix_acl.acl[9].info.gid, SY_gid)
+
+ self.assertEqual(posix_acl.acl[10].a_type, smb_acl.SMB_ACL_USER)
+ self.assertEqual(posix_acl.acl[10].a_perm, 5)
+ self.assertEqual(posix_acl.acl[10].info.uid, AU_gid)
+
+ self.assertEqual(posix_acl.acl[11].a_type, smb_acl.SMB_ACL_GROUP)
+ self.assertEqual(posix_acl.acl[11].a_perm, 5)
+ self.assertEqual(posix_acl.acl[11].info.gid, AU_gid)
+
+ self.assertEqual(posix_acl.acl[12].a_type, smb_acl.SMB_ACL_MASK)
+ self.assertEqual(posix_acl.acl[12].a_perm, 7)
+
+ # check that it matches:
+ # user::rwx
+ # user:root:rwx (selftest user actually)
+ # group::rwx
+ # group:3000000:rwx
+ # group:3000001:r-x
+ # group:3000002:rwx
+ # group:3000003:r-x
+ # mask::rwx
+ # other::---
+
+ def test_setntacl_policies_dir_check_getposixacl(self):
+ acl = provision.POLICIES_ACL
+ domsid = passdb.get_global_sam_sid()
+ session_info = self.get_session_info(domsid)
+ setntacl(self.lp, self.tempdir, acl, str(domsid),
+ session_info, use_ntvfs=False)
+ facl = getntacl(self.lp, self.tempdir, session_info)
+ self.assertEqual(facl.as_sddl(domsid), acl)
+ posix_acl = smbd.get_sys_acl(self.tempdir, smb_acl.SMB_ACL_TYPE_ACCESS, session_info)
+
+ LA_sid = security.dom_sid(str(domsid) + "-" + str(security.DOMAIN_RID_ADMINISTRATOR))
+ BA_sid = security.dom_sid(security.SID_BUILTIN_ADMINISTRATORS)
+ SO_sid = security.dom_sid(security.SID_BUILTIN_SERVER_OPERATORS)
+ SY_sid = security.dom_sid(security.SID_NT_SYSTEM)
+ AU_sid = security.dom_sid(security.SID_NT_AUTHENTICATED_USERS)
+ PA_sid = security.dom_sid(str(domsid) + "-" + str(security.DOMAIN_RID_POLICY_ADMINS))
+
+ s4_passdb = passdb.PDB(self.lp.get("passdb backend"))
+
+ # These assertions correct for current ad_dc selftest
+ # configuration. When other environments have a broad range of
+ # groups mapped via passdb, we can relax some of these checks
+ (LA_uid, LA_type) = s4_passdb.sid_to_id(LA_sid)
+ self.assertEqual(LA_type, idmap.ID_TYPE_UID)
+ (BA_gid, BA_type) = s4_passdb.sid_to_id(BA_sid)
+ self.assertEqual(BA_type, idmap.ID_TYPE_BOTH)
+ (SO_gid, SO_type) = s4_passdb.sid_to_id(SO_sid)
+ self.assertEqual(SO_type, idmap.ID_TYPE_BOTH)
+ (SY_gid, SY_type) = s4_passdb.sid_to_id(SY_sid)
+ self.assertEqual(SO_type, idmap.ID_TYPE_BOTH)
+ (AU_gid, AU_type) = s4_passdb.sid_to_id(AU_sid)
+ self.assertEqual(AU_type, idmap.ID_TYPE_BOTH)
+ (PA_gid, PA_type) = s4_passdb.sid_to_id(PA_sid)
+ self.assertEqual(PA_type, idmap.ID_TYPE_BOTH)
+
+ self.assertEqual(posix_acl.count, 15, self.print_posix_acl(posix_acl))
+
+ self.assertEqual(posix_acl.acl[0].a_type, smb_acl.SMB_ACL_GROUP)
+ self.assertEqual(posix_acl.acl[0].a_perm, 7)
+ self.assertEqual(posix_acl.acl[0].info.gid, BA_gid)
+
+ self.assertEqual(posix_acl.acl[1].a_type, smb_acl.SMB_ACL_USER)
+ self.assertEqual(posix_acl.acl[1].a_perm, 7)
+ self.assertEqual(posix_acl.acl[1].info.uid, LA_uid)
+
+ self.assertEqual(posix_acl.acl[2].a_type, smb_acl.SMB_ACL_OTHER)
+ self.assertEqual(posix_acl.acl[2].a_perm, 0)
+
+ self.assertEqual(posix_acl.acl[3].a_type, smb_acl.SMB_ACL_USER_OBJ)
+ self.assertEqual(posix_acl.acl[3].a_perm, 7)
+
+ self.assertEqual(posix_acl.acl[4].a_type, smb_acl.SMB_ACL_USER)
+ self.assertEqual(posix_acl.acl[4].a_perm, 7)
+ self.assertEqual(posix_acl.acl[4].info.uid, BA_gid)
+
+ self.assertEqual(posix_acl.acl[5].a_type, smb_acl.SMB_ACL_GROUP_OBJ)
+ self.assertEqual(posix_acl.acl[5].a_perm, 7)
+
+ self.assertEqual(posix_acl.acl[6].a_type, smb_acl.SMB_ACL_USER)
+ self.assertEqual(posix_acl.acl[6].a_perm, 5)
+ self.assertEqual(posix_acl.acl[6].info.uid, SO_gid)
+
+ self.assertEqual(posix_acl.acl[7].a_type, smb_acl.SMB_ACL_GROUP)
+ self.assertEqual(posix_acl.acl[7].a_perm, 5)
+ self.assertEqual(posix_acl.acl[7].info.gid, SO_gid)
+
+ self.assertEqual(posix_acl.acl[8].a_type, smb_acl.SMB_ACL_USER)
+ self.assertEqual(posix_acl.acl[8].a_perm, 7)
+ self.assertEqual(posix_acl.acl[8].info.uid, SY_gid)
+
+ self.assertEqual(posix_acl.acl[9].a_type, smb_acl.SMB_ACL_GROUP)
+ self.assertEqual(posix_acl.acl[9].a_perm, 7)
+ self.assertEqual(posix_acl.acl[9].info.gid, SY_gid)
+
+ self.assertEqual(posix_acl.acl[10].a_type, smb_acl.SMB_ACL_USER)
+ self.assertEqual(posix_acl.acl[10].a_perm, 5)
+ self.assertEqual(posix_acl.acl[10].info.uid, AU_gid)
+
+ self.assertEqual(posix_acl.acl[11].a_type, smb_acl.SMB_ACL_GROUP)
+ self.assertEqual(posix_acl.acl[11].a_perm, 5)
+ self.assertEqual(posix_acl.acl[11].info.gid, AU_gid)
+
+ self.assertEqual(posix_acl.acl[12].a_type, smb_acl.SMB_ACL_USER)
+ self.assertEqual(posix_acl.acl[12].a_perm, 7)
+ self.assertEqual(posix_acl.acl[12].info.uid, PA_gid)
+
+ self.assertEqual(posix_acl.acl[13].a_type, smb_acl.SMB_ACL_GROUP)
+ self.assertEqual(posix_acl.acl[13].a_perm, 7)
+ self.assertEqual(posix_acl.acl[13].info.gid, PA_gid)
+
+ self.assertEqual(posix_acl.acl[14].a_type, smb_acl.SMB_ACL_MASK)
+ self.assertEqual(posix_acl.acl[14].a_perm, 7)
+
+ # check that it matches:
+ # user::rwx
+ # user:root:rwx (selftest user actually)
+ # group::rwx
+ # group:3000000:rwx
+ # group:3000001:r-x
+ # group:3000002:rwx
+ # group:3000003:r-x
+ # group:3000004:rwx
+ # mask::rwx
+ # other::---
+
+ def test_setntacl_policies_check_getposixacl(self):
+ acl = provision.POLICIES_ACL
+
+ domsid = passdb.get_global_sam_sid()
+ session_info = self.get_session_info(domsid)
+ setntacl(self.lp, self.tempf, acl, str(domsid),
+ session_info, use_ntvfs=False)
+ facl = getntacl(self.lp, self.tempf, session_info)
+ self.assertEqual(facl.as_sddl(domsid), acl)
+ posix_acl = smbd.get_sys_acl(self.tempf, smb_acl.SMB_ACL_TYPE_ACCESS, session_info)
+
+ nwrap_module_so_path = os.getenv('NSS_WRAPPER_MODULE_SO_PATH')
+ nwrap_module_fn_prefix = os.getenv('NSS_WRAPPER_MODULE_FN_PREFIX')
+
+ nwrap_winbind_active = (nwrap_module_so_path != "" and
+ nwrap_module_fn_prefix == "winbind")
+ is_user_session = not session_info.security_token.is_system()
+
+ LA_sid = security.dom_sid(str(domsid) + "-" + str(security.DOMAIN_RID_ADMINISTRATOR))
+ BA_sid = security.dom_sid(security.SID_BUILTIN_ADMINISTRATORS)
+ SO_sid = security.dom_sid(security.SID_BUILTIN_SERVER_OPERATORS)
+ SY_sid = security.dom_sid(security.SID_NT_SYSTEM)
+ AU_sid = security.dom_sid(security.SID_NT_AUTHENTICATED_USERS)
+ PA_sid = security.dom_sid(str(domsid) + "-" + str(security.DOMAIN_RID_POLICY_ADMINS))
+
+ s4_passdb = passdb.PDB(self.lp.get("passdb backend"))
+
+ # These assertions correct for current ad_dc selftest
+ # configuration. When other environments have a broad range of
+ # groups mapped via passdb, we can relax some of these checks
+ (LA_uid, LA_type) = s4_passdb.sid_to_id(LA_sid)
+ self.assertEqual(LA_type, idmap.ID_TYPE_UID)
+ (BA_gid, BA_type) = s4_passdb.sid_to_id(BA_sid)
+ self.assertEqual(BA_type, idmap.ID_TYPE_BOTH)
+ (SO_gid, SO_type) = s4_passdb.sid_to_id(SO_sid)
+ self.assertEqual(SO_type, idmap.ID_TYPE_BOTH)
+ (SY_gid, SY_type) = s4_passdb.sid_to_id(SY_sid)
+ self.assertEqual(SO_type, idmap.ID_TYPE_BOTH)
+ (AU_gid, AU_type) = s4_passdb.sid_to_id(AU_sid)
+ self.assertEqual(AU_type, idmap.ID_TYPE_BOTH)
+ (PA_gid, PA_type) = s4_passdb.sid_to_id(PA_sid)
+ self.assertEqual(PA_type, idmap.ID_TYPE_BOTH)
+
+ self.assertEqual(posix_acl.count, 15, self.print_posix_acl(posix_acl))
+
+ self.assertEqual(posix_acl.acl[0].a_type, smb_acl.SMB_ACL_GROUP)
+ self.assertEqual(posix_acl.acl[0].a_perm, 7)
+ self.assertEqual(posix_acl.acl[0].info.gid, BA_gid)
+
+ self.assertEqual(posix_acl.acl[1].a_type, smb_acl.SMB_ACL_USER)
+ if nwrap_winbind_active or is_user_session:
+ self.assertEqual(posix_acl.acl[1].a_perm, 7)
+ else:
+ self.assertEqual(posix_acl.acl[1].a_perm, 6)
+ self.assertEqual(posix_acl.acl[1].info.uid, LA_uid)
+
+ self.assertEqual(posix_acl.acl[2].a_type, smb_acl.SMB_ACL_OTHER)
+ self.assertEqual(posix_acl.acl[2].a_perm, 0)
+
+ self.assertEqual(posix_acl.acl[3].a_type, smb_acl.SMB_ACL_USER_OBJ)
+ if nwrap_winbind_active or is_user_session:
+ self.assertEqual(posix_acl.acl[3].a_perm, 7)
+ else:
+ self.assertEqual(posix_acl.acl[3].a_perm, 6)
+
+ self.assertEqual(posix_acl.acl[4].a_type, smb_acl.SMB_ACL_USER)
+ self.assertEqual(posix_acl.acl[4].a_perm, 7)
+ self.assertEqual(posix_acl.acl[4].info.uid, BA_gid)
+
+ self.assertEqual(posix_acl.acl[5].a_type, smb_acl.SMB_ACL_GROUP_OBJ)
+ self.assertEqual(posix_acl.acl[5].a_perm, 7)
+
+ self.assertEqual(posix_acl.acl[6].a_type, smb_acl.SMB_ACL_USER)
+ self.assertEqual(posix_acl.acl[6].a_perm, 5)
+ self.assertEqual(posix_acl.acl[6].info.uid, SO_gid)
+
+ self.assertEqual(posix_acl.acl[7].a_type, smb_acl.SMB_ACL_GROUP)
+ self.assertEqual(posix_acl.acl[7].a_perm, 5)
+ self.assertEqual(posix_acl.acl[7].info.gid, SO_gid)
+
+ self.assertEqual(posix_acl.acl[8].a_type, smb_acl.SMB_ACL_USER)
+ self.assertEqual(posix_acl.acl[8].a_perm, 7)
+ self.assertEqual(posix_acl.acl[8].info.uid, SY_gid)
+
+ self.assertEqual(posix_acl.acl[9].a_type, smb_acl.SMB_ACL_GROUP)
+ self.assertEqual(posix_acl.acl[9].a_perm, 7)
+ self.assertEqual(posix_acl.acl[9].info.gid, SY_gid)
+
+ self.assertEqual(posix_acl.acl[10].a_type, smb_acl.SMB_ACL_USER)
+ self.assertEqual(posix_acl.acl[10].a_perm, 5)
+ self.assertEqual(posix_acl.acl[10].info.uid, AU_gid)
+
+ self.assertEqual(posix_acl.acl[11].a_type, smb_acl.SMB_ACL_GROUP)
+ self.assertEqual(posix_acl.acl[11].a_perm, 5)
+ self.assertEqual(posix_acl.acl[11].info.gid, AU_gid)
+
+ self.assertEqual(posix_acl.acl[12].a_type, smb_acl.SMB_ACL_USER)
+ self.assertEqual(posix_acl.acl[12].a_perm, 7)
+ self.assertEqual(posix_acl.acl[12].info.uid, PA_gid)
+
+ self.assertEqual(posix_acl.acl[13].a_type, smb_acl.SMB_ACL_GROUP)
+ self.assertEqual(posix_acl.acl[13].a_perm, 7)
+ self.assertEqual(posix_acl.acl[13].info.gid, PA_gid)
+
+ self.assertEqual(posix_acl.acl[14].a_type, smb_acl.SMB_ACL_MASK)
+ self.assertEqual(posix_acl.acl[14].a_perm, 7)
+
+ # check that it matches:
+ # user::rwx
+ # user:root:rwx (selftest user actually)
+ # group::rwx
+ # group:Local Admins:rwx
+ # group:3000000:r-x
+ # group:3000001:rwx
+ # group:3000002:r-x
+ # group:3000003:rwx
+ # mask::rwx
+ # other::---
+
+ # This is in this order in the NDR smb_acl(not re-orderded for display)
+ # a_type: GROUP
+ # a_perm: 7
+ # uid: -1
+ # gid: 10
+ # a_type: USER
+ # a_perm: 6
+ # uid: 0 (selftest user actually)
+ # gid: -1
+ # a_type: OTHER
+ # a_perm: 0
+ # uid: -1
+ # gid: -1
+ # a_type: USER_OBJ
+ # a_perm: 6
+ # uid: -1
+ # gid: -1
+ # a_type: GROUP_OBJ
+ # a_perm: 7
+ # uid: -1
+ # gid: -1
+ # a_type: GROUP
+ # a_perm: 5
+ # uid: -1
+ # gid: 3000020
+ # a_type: GROUP
+ # a_perm: 7
+ # uid: -1
+ # gid: 3000000
+ # a_type: GROUP
+ # a_perm: 5
+ # uid: -1
+ # gid: 3000001
+ # a_type: GROUP
+ # a_perm: 7
+ # uid: -1
+ # gid: 3000003
+ # a_type: MASK
+ # a_perm: 7
+ # uid: -1
+ # gid: -1
+
+
+class SessionedPosixAclMappingTests(PosixAclMappingTests):
+ """
+ Run same test suite with session enabled.
+ """
+
+ def get_session_info(self, domsid=DOM_SID):
+ """
+ Get session_info for setntacl.
+ """
+ if str(domsid) != str(self.samdb.get_domain_sid()):
+ # fake it with admin session as domsid is not in local db
+ admin_session = auth.admin_session(self.lp, str(domsid))
+ auth.session_info_fill_unix(admin_session,
+ lp_ctx=self.lp,
+ user_name="Administrator")
+ return admin_session
+
+ dn = '<SID={0}-{1}>'.format(domsid, security.DOMAIN_RID_ADMINISTRATOR)
+ flags = (auth.AUTH_SESSION_INFO_DEFAULT_GROUPS |
+ auth.AUTH_SESSION_INFO_AUTHENTICATED |
+ auth.AUTH_SESSION_INFO_SIMPLE_PRIVILEGES)
+ user_session = auth.user_session(self.samdb,
+ lp_ctx=self.lp,
+ dn=dn,
+ session_info_flags=flags)
+ auth.session_info_fill_unix(user_session,
+ lp_ctx=self.lp,
+ user_name="Administrator")
+ return user_session
+
+
+class UnixSessionedPosixAclMappingTests(PosixAclMappingTests):
+ """
+ Run same test suite with session enabled.
+ """
+
+ def get_session_info(self, domsid=DOM_SID):
+ """
+ Get session_info for setntacl.
+ """
+ if str(domsid) != str(self.samdb.get_domain_sid()):
+ # fake it with admin session as domsid is not in local db
+ admin_session = auth.admin_session(self.lp, str(domsid))
+ auth.session_info_fill_unix(admin_session,
+ lp_ctx=self.lp,
+ user_name="Administrator")
+ return admin_session
+
+ dn = '<SID={0}-{1}>'.format(domsid, security.DOMAIN_RID_ADMINISTRATOR)
+ flags = (auth.AUTH_SESSION_INFO_DEFAULT_GROUPS |
+ auth.AUTH_SESSION_INFO_AUTHENTICATED |
+ auth.AUTH_SESSION_INFO_SIMPLE_PRIVILEGES)
+
+ session = auth.user_session(self.samdb, lp_ctx=self.lp, dn=dn,
+ session_info_flags=flags)
+ auth.session_info_fill_unix(session,
+ lp_ctx=self.lp,
+ user_name="Administrator")
+ return session
diff --git a/python/samba/tests/prefork_restart.py b/python/samba/tests/prefork_restart.py
new file mode 100644
index 0000000..6c11200
--- /dev/null
+++ b/python/samba/tests/prefork_restart.py
@@ -0,0 +1,462 @@
+# Tests for process restarting in the pre-fork process model
+#
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2018
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+"""Tests process restarting in the pre-fork process model.
+ NOTE: As this test kills samba processes it won't play nicely with other
+ tests, so needs to be run in it's own environment.
+"""
+
+
+import os
+import signal
+import time
+
+import samba
+from samba.tests import TestCase, delete_force
+from samba.dcerpc import echo, netlogon
+from samba.messaging import Messaging
+from samba.samdb import SamDB
+from samba.credentials import Credentials, DONT_USE_KERBEROS
+from samba.common import get_string
+from samba.dsdb import (
+ UF_WORKSTATION_TRUST_ACCOUNT,
+ UF_PASSWD_NOTREQD)
+from samba.dcerpc.misc import SEC_CHAN_WKSTA
+from samba.auth import system_session
+
+NUM_WORKERS = 4
+MACHINE_NAME = "PFRS"
+
+
+class PreforkProcessRestartTests(TestCase):
+
+ def setUp(self):
+ super().setUp()
+ lp_ctx = self.get_loadparm()
+ self.msg_ctx = Messaging(lp_ctx=lp_ctx)
+
+ def get_process_data(self):
+ services = self.msg_ctx.irpc_all_servers()
+
+ processes = []
+ for service in services:
+ for id in service.ids:
+ processes.append((service.name, id.pid))
+ return processes
+
+ def get_process(self, name):
+ processes = self.get_process_data()
+ for pname, pid in processes:
+ if name == pname:
+ return pid
+ return None
+
+ def get_worker_pids(self, name, workers):
+ pids = []
+ for x in range(workers):
+ process_name = "prefork-worker-{0}-{1}".format(name, x)
+ pids.append(self.get_process(process_name))
+ self.assertIsNotNone(pids[x])
+ return pids
+
+ def wait_for_workers(self, name, workers):
+ num_workers = len(workers)
+ for x in range(num_workers):
+ process_name = "prefork-worker-{0}-{1}".format(name, x)
+ self.wait_for_process(process_name, workers[x], 0, 1, 30)
+
+ def wait_for_process(self, name, pid, initial_delay, wait, timeout):
+ time.sleep(initial_delay)
+ delay = initial_delay
+ while delay < timeout:
+ p = self.get_process(name)
+ if p is not None and p != pid:
+ # process has restarted
+ return
+ time.sleep(wait)
+ delay += wait
+ self.fail("Times out after {0} seconds waiting for {1} to restart".
+ format(delay, name))
+
+ def check_for_duplicate_processes(self):
+ processes = self.get_process_data()
+ process_map = {}
+ for name, p in processes:
+ if (name.startswith("prefork-") or
+ name.endswith("_server") or
+ name.endswith("srv")):
+
+ if name in process_map:
+ if p != process_map[name]:
+ self.fail(
+ "Duplicate process for {0}, pids {1} and {2}".
+ format(name, p, process_map[name]))
+
+ def simple_bind(self):
+ creds = self.insta_creds(template=self.get_credentials())
+ creds.set_bind_dn("%s\\%s" % (creds.get_domain(),
+ creds.get_username()))
+
+ self.samdb = SamDB(url="ldaps://%s" % os.environ["SERVER"],
+ lp=self.get_loadparm(),
+ credentials=creds)
+
+ def rpc_echo(self):
+ conn = echo.rpcecho("ncalrpc:", self.get_loadparm())
+ self.assertEqual([1, 2, 3], conn.EchoData([1, 2, 3]))
+
+ def netlogon(self):
+ server = os.environ["SERVER"]
+ host = os.environ["SERVER_IP"]
+ lp = self.get_loadparm()
+
+ credentials = self.get_credentials()
+
+ session = system_session()
+ ldb = SamDB(url="ldap://%s" % host,
+ session_info=session,
+ credentials=credentials,
+ lp=lp)
+ machine_pass = samba.generate_random_password(32, 32)
+ machine_name = MACHINE_NAME
+ machine_dn = "cn=%s,%s" % (machine_name, ldb.domain_dn())
+
+ delete_force(ldb, machine_dn)
+
+ utf16pw = ('"%s"' % get_string(machine_pass)).encode('utf-16-le')
+ ldb.add({
+ "dn": machine_dn,
+ "objectclass": "computer",
+ "sAMAccountName": "%s$" % machine_name,
+ "userAccountControl":
+ str(UF_WORKSTATION_TRUST_ACCOUNT | UF_PASSWD_NOTREQD),
+ "unicodePwd": utf16pw})
+
+ machine_creds = Credentials()
+ machine_creds.guess(lp)
+ machine_creds.set_secure_channel_type(SEC_CHAN_WKSTA)
+ machine_creds.set_kerberos_state(DONT_USE_KERBEROS)
+ machine_creds.set_password(machine_pass)
+ machine_creds.set_username(machine_name + "$")
+ machine_creds.set_workstation(machine_name)
+
+ netlogon.netlogon(
+ "ncacn_ip_tcp:%s[schannel,seal]" % server,
+ lp,
+ machine_creds)
+
+ delete_force(ldb, machine_dn)
+
+ def test_ldap_master_restart(self):
+ # check ldap connection, do a simple bind
+ self.simple_bind()
+
+ # get ldap master process
+ pid = self.get_process("prefork-master-ldap")
+ self.assertIsNotNone(pid)
+
+ # Get the worker processes
+ workers = self.get_worker_pids("ldap", NUM_WORKERS)
+
+ # kill it
+ os.kill(pid, signal.SIGTERM)
+
+ # wait for the process to restart
+ self.wait_for_process("prefork-master-ldap", pid, 1, 1, 30)
+
+ # restarting the master restarts the workers as well, so make sure
+ # they have finished restarting
+ self.wait_for_workers("ldap", workers)
+
+ # get ldap master process
+ new_pid = self.get_process("prefork-master-ldap")
+ self.assertIsNotNone(new_pid)
+
+ # check that the pid has changed
+ self.assertNotEqual(pid, new_pid)
+
+ # check that the worker processes have restarted
+ new_workers = self.get_worker_pids("ldap", NUM_WORKERS)
+ for x in range(NUM_WORKERS):
+ self.assertNotEqual(workers[x], new_workers[x])
+
+ # check that the previous server entries have been removed.
+ self.check_for_duplicate_processes()
+
+ # check ldap connection, another simple bind
+ self.simple_bind()
+
+ def test_ldap_worker_restart(self):
+ # check ldap connection, do a simple bind
+ self.simple_bind()
+
+ # get ldap master process
+ pid = self.get_process("prefork-master-ldap")
+ self.assertIsNotNone(pid)
+
+ # Get the worker processes
+ workers = self.get_worker_pids("ldap", NUM_WORKERS)
+
+ # kill worker 0
+ os.kill(workers[0], signal.SIGTERM)
+
+ # wait for the process to restart
+ self.wait_for_process("prefork-worker-ldap-0", pid, 1, 1, 30)
+
+ # get ldap master process
+ new_pid = self.get_process("prefork-master-ldap")
+ self.assertIsNotNone(new_pid)
+
+ # check that the pid has not changed
+ self.assertEqual(pid, new_pid)
+
+ # check that the worker processes have restarted
+ new_workers = self.get_worker_pids("ldap", NUM_WORKERS)
+ # process 0 should have a new pid the others should be unchanged
+ self.assertNotEqual(workers[0], new_workers[0])
+ self.assertEqual(workers[1], new_workers[1])
+ self.assertEqual(workers[2], new_workers[2])
+ self.assertEqual(workers[3], new_workers[3])
+
+ # check that the previous server entries have been removed.
+ self.check_for_duplicate_processes()
+
+ # check ldap connection, another simple bind
+ self.simple_bind()
+
+ #
+ # Kill all the ldap worker processes and ensure that they are restarted
+ # correctly
+ #
+ def test_ldap_all_workers_restart(self):
+ # check ldap connection, do a simple bind
+ self.simple_bind()
+
+ # get ldap master process
+ pid = self.get_process("prefork-master-ldap")
+ self.assertIsNotNone(pid)
+
+ # Get the worker processes
+ workers = self.get_worker_pids("ldap", NUM_WORKERS)
+
+ # kill all the worker processes
+ for x in workers:
+ os.kill(x, signal.SIGTERM)
+
+ # wait for the worker processes to restart
+ self.wait_for_workers("ldap", workers)
+
+ # get ldap master process
+ new_pid = self.get_process("prefork-master-ldap")
+ self.assertIsNotNone(new_pid)
+
+ # check that the pid has not changed
+ self.assertEqual(pid, new_pid)
+
+ # check that the worker processes have restarted
+ new_workers = self.get_worker_pids("ldap", NUM_WORKERS)
+ for x in range(NUM_WORKERS):
+ self.assertNotEqual(workers[x], new_workers[x])
+
+ # check that the previous server entries have been removed.
+ self.check_for_duplicate_processes()
+
+ # check ldap connection, another simple bind
+ self.simple_bind()
+
+ def test_rpc_master_restart(self):
+ # check rpc connection, make a rpc echo request
+ self.rpc_echo()
+
+ # get rpc master process
+ pid = self.get_process("prefork-master-rpc")
+ self.assertIsNotNone(pid)
+
+ # Get the worker processes
+ workers = self.get_worker_pids("rpc", NUM_WORKERS)
+
+ # kill it
+ os.kill(pid, signal.SIGTERM)
+
+ # wait for the process to restart
+ self.wait_for_process("prefork-master-rpc", pid, 1, 1, 30)
+
+ # wait for workers to restart as well
+ self.wait_for_workers("rpc", workers)
+
+ # get ldap master process
+ new_pid = self.get_process("prefork-master-rpc")
+ self.assertIsNotNone(new_pid)
+
+ # check that the pid has changed
+ self.assertNotEqual(pid, new_pid)
+
+ # check that the worker processes have restarted
+ new_workers = self.get_worker_pids("rpc", NUM_WORKERS)
+ for x in range(NUM_WORKERS):
+ self.assertNotEqual(workers[x], new_workers[x])
+
+ # check that the previous server entries have been removed.
+ self.check_for_duplicate_processes()
+
+ # check rpc connection, another rpc echo request
+ self.rpc_echo()
+
+ def test_rpc_worker_zero_restart(self):
+ # check rpc connection, make a rpc echo request and a netlogon request
+ self.rpc_echo()
+ self.netlogon()
+
+ # get rpc master process
+ pid = self.get_process("prefork-master-rpc")
+ self.assertIsNotNone(pid)
+
+ # Get the worker processes
+ workers = self.get_worker_pids("rpc", NUM_WORKERS)
+
+ # kill worker 0
+ os.kill(workers[0], signal.SIGTERM)
+
+ # wait for the process to restart
+ self.wait_for_process("prefork-worker-rpc-0", workers[0], 1, 1, 30)
+
+ # get rpc master process
+ new_pid = self.get_process("prefork-master-rpc")
+ self.assertIsNotNone(new_pid)
+
+ # check that the pid has not changed
+ self.assertEqual(pid, new_pid)
+
+ # check that the worker processes have restarted
+ new_workers = self.get_worker_pids("rpc", NUM_WORKERS)
+ # process 0 should have a new pid the others should be unchanged
+ self.assertNotEqual(workers[0], new_workers[0])
+ self.assertEqual(workers[1], new_workers[1])
+ self.assertEqual(workers[2], new_workers[2])
+ self.assertEqual(workers[3], new_workers[3])
+
+ # check that the previous server entries have been removed.
+ self.check_for_duplicate_processes()
+
+ # check rpc connection, another rpc echo request, and netlogon request
+ self.rpc_echo()
+ self.netlogon()
+
+ def test_rpc_all_workers_restart(self):
+ # check rpc connection, make a rpc echo request, and a netlogon request
+ self.rpc_echo()
+ self.netlogon()
+
+ # get rpc master process
+ pid = self.get_process("prefork-master-rpc")
+ self.assertIsNotNone(pid)
+
+ # Get the worker processes
+ workers = self.get_worker_pids("rpc", NUM_WORKERS)
+
+ # kill all the worker processes
+ for x in workers:
+ os.kill(x, signal.SIGTERM)
+
+ # wait for the worker processes to restart
+ for x in range(NUM_WORKERS):
+ self.wait_for_process(
+ "prefork-worker-rpc-{0}".format(x), workers[x], 0, 1, 30)
+
+ # get rpc master process
+ new_pid = self.get_process("prefork-master-rpc")
+ self.assertIsNotNone(new_pid)
+
+ # check that the pid has not changed
+ self.assertEqual(pid, new_pid)
+
+ # check that the worker processes have restarted
+ new_workers = self.get_worker_pids("rpc", NUM_WORKERS)
+ for x in range(NUM_WORKERS):
+ self.assertNotEqual(workers[x], new_workers[x])
+
+ # check that the previous server entries have been removed.
+ self.check_for_duplicate_processes()
+
+ # check rpc connection, another rpc echo request and netlogon
+ self.rpc_echo()
+ self.netlogon()
+
+ def test_master_restart_backoff(self):
+
+ # get kdc master process
+ pid = self.get_process("prefork-master-echo")
+ self.assertIsNotNone(pid)
+
+ #
+ # Check that the processes get backed off as expected
+ #
+ # have prefork backoff increment = 5
+ # prefork maximum backoff = 10
+ backoff_increment = 5
+ for expected in [0, 5, 10, 10]:
+ # Get the worker processes
+ workers = self.get_worker_pids("kdc", NUM_WORKERS)
+
+ process = self.get_process("prefork-master-echo")
+ os.kill(process, signal.SIGTERM)
+ # wait for the process to restart
+ start = time.time()
+ self.wait_for_process("prefork-master-echo", process, 0, 1, 30)
+ # wait for the workers to restart as well
+ self.wait_for_workers("echo", workers)
+ end = time.time()
+ duration = end - start
+
+ # process restart will take some time. Check that the elapsed
+ # duration falls somewhere in the expected range, i.e. we haven't
+ # taken longer than the backoff increment
+ self.assertLess(duration, expected + backoff_increment)
+ self.assertGreaterEqual(duration, expected)
+
+ # check that the worker processes have restarted
+ new_workers = self.get_worker_pids("echo", NUM_WORKERS)
+ for x in range(NUM_WORKERS):
+ self.assertNotEqual(workers[x], new_workers[x])
+
+ # check that the previous server entries have been removed.
+ self.check_for_duplicate_processes()
+
+ def test_worker_restart_backoff(self):
+ #
+ # Check that the processes get backed off as expected
+ #
+ # have prefork backoff increment = 5
+ # prefork maximum backoff = 10
+ backoff_increment = 5
+ for expected in [0, 5, 10, 10]:
+ process = self.get_process("prefork-worker-echo-2")
+ self.assertIsNotNone(process)
+ os.kill(process, signal.SIGTERM)
+ # wait for the process to restart
+ start = time.time()
+ self.wait_for_process("prefork-worker-echo-2", process, 0, 1, 30)
+ end = time.time()
+ duration = end - start
+
+ # process restart will take some time. Check that the elapsed
+ # duration falls somewhere in the expected range, i.e. we haven't
+ # taken longer than the backoff increment
+ self.assertLess(duration, expected + backoff_increment)
+ self.assertGreaterEqual(duration, expected)
+
+ self.check_for_duplicate_processes()
diff --git a/python/samba/tests/process_limits.py b/python/samba/tests/process_limits.py
new file mode 100644
index 0000000..e980005
--- /dev/null
+++ b/python/samba/tests/process_limits.py
@@ -0,0 +1,70 @@
+# Tests for limiting processes forked on accept by the standard process model
+#
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2018
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+"""Tests limits on processes forked by fork on accept in the standard process
+ model.
+ NOTE: This test runs in an environment with an artificially low setting for
+ smbd max processes
+"""
+
+
+import os
+from samba.tests import TestCase
+from samba.samdb import SamDB
+from ldb import LdbError, ERR_OPERATIONS_ERROR
+
+
+class StandardModelProcessLimitTests(TestCase):
+
+ def simple_bind(self):
+ creds = self.insta_creds(template=self.get_credentials())
+ creds.set_bind_dn("%s\\%s" % (creds.get_domain(),
+ creds.get_username()))
+
+ return SamDB(url="ldaps://%s" % os.environ["SERVER"],
+ lp=self.get_loadparm(),
+ credentials=creds)
+
+ def test_process_limits(self):
+ creds = self.insta_creds(template=self.get_credentials())
+ creds.set_bind_dn("%s\\%s" % (creds.get_domain(),
+ creds.get_username()))
+
+ connections = []
+ try:
+ # Open a series of LDAP connections, the maximum number of
+ # active connections should be 20, so the 21st should fail.
+ # But as it is possible that there may be other processes holding
+ # connections, need to allow for earlier connection failures.
+ for _ in range(21):
+ connections.append(self.simple_bind())
+ self.fail(
+ "Processes not limited, able to make more than 20 connections")
+ except LdbError as e:
+ (errno, estr) = e.args
+ if errno != ERR_OPERATIONS_ERROR:
+ raise
+ if not (estr.endswith("NT_STATUS_CONNECTION_DISCONNECTED") or
+ estr.endswith("NT_STATUS_CONNECTION_RESET")):
+ raise
+ pass
+ #
+ # Clean up the connections we've just opened, by deleting the
+ # connection in python. This should invoke the talloc destructor to
+ # release any resources and close the actual connection to the server.
+ for c in connections:
+ del c
diff --git a/python/samba/tests/provision.py b/python/samba/tests/provision.py
new file mode 100644
index 0000000..b87a9a8
--- /dev/null
+++ b/python/samba/tests/provision.py
@@ -0,0 +1,201 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007-2012
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for samba.provision."""
+
+import os
+from samba.provision import (
+ ProvisionNames,
+ ProvisionPaths,
+ ProvisionResult,
+ determine_netbios_name,
+ sanitize_server_role,
+ setup_secretsdb,
+ findnss,
+)
+import samba.tests
+from samba.tests import env_loadparm, TestCase
+
+
+def create_dummy_secretsdb(path, lp=None):
+ """Create a dummy secrets database for use in tests.
+
+ :param path: Path to store the secrets db
+ :param lp: Optional loadparm context. A simple one will
+ be generated if not specified.
+ """
+ if lp is None:
+ lp = env_loadparm()
+ paths = ProvisionPaths()
+ paths.secrets = path
+ paths.private_dir = os.path.dirname(path)
+ paths.binddns_dir = os.path.dirname(path)
+ paths.keytab = "no.keytab"
+ paths.dns_keytab = "no.dns.keytab"
+ secrets_ldb = setup_secretsdb(paths, None, lp=lp)
+ secrets_ldb.transaction_commit()
+ return secrets_ldb
+
+
+class ProvisionTestCase(samba.tests.TestCaseInTempDir):
+ """Some simple tests for individual functions in the provisioning code.
+ """
+
+ def test_setup_secretsdb(self):
+ path = os.path.join(self.tempdir, "secrets.ldb")
+ paths = ProvisionPaths()
+ secrets_tdb_path = os.path.join(self.tempdir, "secrets.tdb")
+ paths.secrets = path
+ paths.private_dir = os.path.dirname(path)
+ paths.binddns_dir = os.path.dirname(path)
+ paths.keytab = "no.keytab"
+ paths.dns_keytab = "no.dns.keytab"
+ ldb = setup_secretsdb(paths, None, lp=env_loadparm())
+ try:
+ self.assertEqual("LSA Secrets",
+ ldb.searchone(basedn="CN=LSA Secrets", attribute="CN").decode('utf8'))
+ finally:
+ del ldb
+ os.unlink(path)
+ if os.path.exists(secrets_tdb_path):
+ os.unlink(secrets_tdb_path)
+
+
+class FindNssTests(TestCase):
+ """Test findnss() function."""
+
+ def test_nothing(self):
+ def x(y):
+ raise KeyError
+ self.assertRaises(KeyError, findnss, x, [])
+
+ def test_first(self):
+ self.assertEqual("bla", findnss(lambda x: "bla", ["bla"]))
+
+ def test_skip_first(self):
+ def x(y):
+ if y != "bla":
+ raise KeyError
+ return "ha"
+ self.assertEqual("ha", findnss(x, ["bloe", "bla"]))
+
+
+class Disabled(object):
+
+ def test_setup_templatesdb(self):
+ raise NotImplementedError(self.test_setup_templatesdb)
+
+ def test_setup_registry(self):
+ raise NotImplementedError(self.test_setup_registry)
+
+ def test_setup_samdb_rootdse(self):
+ raise NotImplementedError(self.test_setup_samdb_rootdse)
+
+ def test_setup_samdb_partitions(self):
+ raise NotImplementedError(self.test_setup_samdb_partitions)
+
+ def test_provision_dns(self):
+ raise NotImplementedError(self.test_provision_dns)
+
+ def test_provision_ldapbase(self):
+ raise NotImplementedError(self.test_provision_ldapbase)
+
+ def test_provision_guess(self):
+ raise NotImplementedError(self.test_provision_guess)
+
+ def test_join_domain(self):
+ raise NotImplementedError(self.test_join_domain)
+
+
+class SanitizeServerRoleTests(TestCase):
+
+ def test_same(self):
+ self.assertEqual("standalone server",
+ sanitize_server_role("standalone server"))
+ self.assertEqual("member server",
+ sanitize_server_role("member server"))
+
+ def test_invalid(self):
+ self.assertRaises(ValueError, sanitize_server_role, "foo")
+
+ def test_valid(self):
+ self.assertEqual(
+ "standalone server",
+ sanitize_server_role("ROLE_STANDALONE"))
+ self.assertEqual(
+ "standalone server",
+ sanitize_server_role("standalone"))
+ self.assertEqual(
+ "active directory domain controller",
+ sanitize_server_role("domain controller"))
+
+
+class DummyLogger(object):
+
+ def __init__(self):
+ self.entries = []
+
+ def info(self, text, *args):
+ self.entries.append(("INFO", text % args))
+
+
+class ProvisionResultTests(TestCase):
+
+ def report_logger(self, result):
+ logger = DummyLogger()
+ result.report_logger(logger)
+ return logger.entries
+
+ def base_result(self):
+ result = ProvisionResult()
+ result.server_role = "domain controller"
+ result.names = ProvisionNames()
+ result.names.hostname = "hostnaam"
+ result.names.domain = "DOMEIN"
+ result.names.dnsdomain = "dnsdomein"
+ result.domainsid = "S1-1-1"
+ result.paths = ProvisionPaths()
+ return result
+
+ def test_basic_report_logger(self):
+ result = self.base_result()
+ entries = self.report_logger(result)
+ self.assertEqual(entries, [
+ ('INFO', 'Once the above files are installed, your Samba AD server '
+ 'will be ready to use'),
+ ('INFO', 'Server Role: domain controller'),
+ ('INFO', 'Hostname: hostnaam'),
+ ('INFO', 'NetBIOS Domain: DOMEIN'),
+ ('INFO', 'DNS Domain: dnsdomein'),
+ ('INFO', 'DOMAIN SID: S1-1-1')])
+
+ def test_report_logger_adminpass(self):
+ result = self.base_result()
+ result.adminpass_generated = True
+ result.adminpass = "geheim"
+ entries = self.report_logger(result)
+ self.assertEqual(entries[1],
+ ("INFO", 'Admin password: geheim'))
+
+
+class DetermineNetbiosNameTests(TestCase):
+
+ def test_limits_to_15(self):
+ self.assertEqual("A" * 15, determine_netbios_name("a" * 30))
+
+ def test_strips_invalid(self):
+ self.assertEqual("BLABLA", determine_netbios_name("bla/bla"))
diff --git a/python/samba/tests/pso.py b/python/samba/tests/pso.py
new file mode 100644
index 0000000..4038ee7
--- /dev/null
+++ b/python/samba/tests/pso.py
@@ -0,0 +1,272 @@
+#
+# Helper classes for testing Password Settings Objects.
+#
+# This also tests the default password complexity (i.e. pwdProperties),
+# minPwdLength, pwdHistoryLength settings as a side-effect.
+#
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2018
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import ldb
+from ldb import FLAG_MOD_DELETE, FLAG_MOD_ADD, FLAG_MOD_REPLACE
+from samba.dcerpc.samr import (DOMAIN_PASSWORD_COMPLEX,
+ DOMAIN_PASSWORD_STORE_CLEARTEXT)
+
+
+class TestUser:
+ def __init__(self, username, samdb, userou=None):
+ initial_password = "Initial12#"
+ self.name = username
+ self.ldb = samdb
+ self.dn = "CN=%s,%s,%s" % (username, (userou or "CN=Users"),
+ self.ldb.domain_dn())
+
+ # store all passwords that have ever been used for this user, as well
+ # as a pwd_history that more closely resembles the history on the DC
+ self.all_old_passwords = [initial_password]
+ self.pwd_history = [initial_password]
+ self.ldb.newuser(username, initial_password, userou=userou)
+ self.ldb.enable_account("(sAMAccountName=%s)" % username)
+ self.last_pso = None
+
+ def old_invalid_passwords(self, hist_len):
+ """Returns the expected password history for the DC"""
+ if hist_len == 0:
+ return []
+
+ # return the last n items in the list
+ return self.pwd_history[-hist_len:]
+
+ def old_valid_passwords(self, hist_len):
+ """Returns old passwords that fall outside the DC's expected history"""
+ # if PasswordHistoryLength is zero, any previous password can be valid
+ if hist_len == 0:
+ return self.all_old_passwords[:]
+
+ # just exclude our pwd_history if there's not much in it. This can
+ # happen if we've been using a lower PasswordHistoryLength setting
+ # previously
+ hist_len = min(len(self.pwd_history), hist_len)
+
+ # return any passwords up to the nth-from-last item
+ return self.all_old_passwords[:-hist_len]
+
+ def update_pwd_history(self, new_password):
+ """Updates the user's password history to reflect a password change"""
+ # we maintain 2 lists: all passwords the user has ever had, and an
+ # effective password-history that should roughly mirror the DC.
+ # pwd_history_change() handles the corner-case where we need to
+ # truncate password-history due to PasswordHistoryLength settings
+ # changes
+ if new_password in self.all_old_passwords:
+ self.all_old_passwords.remove(new_password)
+ self.all_old_passwords.append(new_password)
+
+ if new_password in self.pwd_history:
+ self.pwd_history.remove(new_password)
+ self.pwd_history.append(new_password)
+
+ def get_resultant_PSO(self):
+ """Returns the DN of the applicable PSO, or None if none applies"""
+ res = self.ldb.search(self.dn, attrs=['msDS-ResultantPSO'])
+
+ if 'msDS-ResultantPSO' in res[0]:
+ return str(res[0]['msDS-ResultantPSO'][0])
+ else:
+ return None
+
+ def get_password(self):
+ """Returns the user's current password"""
+ # current password in the last item in the list
+ return self.all_old_passwords[-1]
+
+ def set_password(self, new_password):
+ """Attempts to change a user's password"""
+ ldif = """
+dn: %s
+changetype: modify
+delete: userPassword
+userPassword: %s
+add: userPassword
+userPassword: %s
+""" % (self.dn, self.get_password(), new_password)
+ # this modify will throw an exception if new_password doesn't meet the
+ # PSO constraints (which the test code catches if it's expected to
+ # fail)
+ self.ldb.modify_ldif(ldif)
+ self.update_pwd_history(new_password)
+
+ def pwd_history_change(self, old_hist_len, new_hist_len):
+ """
+ Updates the effective password history, to reflect changes on the DC.
+ When the PasswordHistoryLength applied to a user changes from a low
+ setting (e.g. 2) to a higher setting (e.g. 4), passwords #3 and #4
+ won't actually have been stored on the DC, so we need to make sure they
+ are removed them from our mirror pwd_history list.
+ """
+
+ # our list may have been tracking more passwords than the DC actually
+ # stores. Truncate the list now to match what the DC currently has
+ hist_len = min(new_hist_len, old_hist_len)
+ if hist_len == 0:
+ self.pwd_history = []
+ elif hist_len < len(self.pwd_history):
+ self.pwd_history = self.pwd_history[-hist_len:]
+
+ # corner-case where history-length goes from zero to non-zero. Windows
+ # counts the current password as being in the history even before it
+ # changes (Samba only counts it from the next change onwards). We don't
+ # exercise this in the PSO tests due to this discrepancy, but the
+ # following check will support the Windows behaviour
+ if old_hist_len == 0 and new_hist_len > 0:
+ self.pwd_history = [self.get_password()]
+
+ def set_primary_group(self, group_dn):
+ """Sets a user's primaryGroupID to be that of the specified group"""
+
+ # get the primaryGroupToken of the group
+ res = self.ldb.search(base=group_dn, attrs=["primaryGroupToken"],
+ scope=ldb.SCOPE_BASE)
+ group_id = res[0]["primaryGroupToken"]
+
+ # set primaryGroupID attribute of the user to that group
+ m = ldb.Message()
+ m.dn = ldb.Dn(self.ldb, self.dn)
+ m["primaryGroupID"] = ldb.MessageElement(group_id, FLAG_MOD_REPLACE,
+ "primaryGroupID")
+ self.ldb.modify(m)
+
+
+class PasswordSettings:
+ def default_settings(self, samdb):
+ """
+ Returns a object representing the default password settings that will
+ take effect (i.e. when no other Fine-Grained Password Policy applies)
+ """
+ pw_attrs = ["minPwdAge", "lockoutDuration", "lockOutObservationWindow",
+ "lockoutThreshold", "maxPwdAge", "minPwdAge",
+ "minPwdLength", "pwdHistoryLength", "pwdProperties"]
+ res = samdb.search(samdb.domain_dn(), scope=ldb.SCOPE_BASE,
+ attrs=pw_attrs)
+
+ self.name = "Defaults"
+ self.dn = None
+ self.ldb = samdb
+ self.precedence = 0
+ self.complexity = \
+ int(res[0]["pwdProperties"][0]) & DOMAIN_PASSWORD_COMPLEX
+ self.store_plaintext = \
+ int(res[0]["pwdProperties"][0]) & DOMAIN_PASSWORD_STORE_CLEARTEXT
+ self.password_len = int(res[0]["minPwdLength"][0])
+ self.lockout_attempts = int(res[0]["lockoutThreshold"][0])
+ self.history_len = int(res[0]["pwdHistoryLength"][0])
+ # convert to time in secs
+ self.lockout_duration = int(res[0]["lockoutDuration"][0]) / -int(1e7)
+ self.lockout_window =\
+ int(res[0]["lockOutObservationWindow"][0]) / -int(1e7)
+ self.password_age_min = int(res[0]["minPwdAge"][0]) / -int(1e7)
+ self.password_age_max = int(res[0]["maxPwdAge"][0]) / -int(1e7)
+
+ def __init__(self, name, samdb, precedence=10, complexity=True,
+ password_len=10, lockout_attempts=0, lockout_duration=5,
+ password_age_min=0, password_age_max=60 * 60 * 24 * 30,
+ history_len=2, store_plaintext=False, container=None):
+
+ # if no PSO was specified, return an object representing the global
+ # password settings (i.e. the default settings, if no PSO trumps them)
+ if name is None:
+ return self.default_settings(samdb)
+
+ # only PSOs in the Password Settings Container are considered. You can
+ # create PSOs outside of this container, but it's not recommended
+ if container is None:
+ base_dn = samdb.domain_dn()
+ container = "CN=Password Settings Container,CN=System,%s" % base_dn
+
+ self.name = name
+ self.dn = "CN=%s,%s" % (name, container)
+ self.ldb = samdb
+ self.precedence = precedence
+ self.complexity = complexity
+ self.store_plaintext = store_plaintext
+ self.password_len = password_len
+ self.lockout_attempts = lockout_attempts
+ self.history_len = history_len
+ # times in secs
+ self.lockout_duration = lockout_duration
+ # lockout observation-window must be <= lockout-duration (the existing
+ # lockout tests just use the same value for both settings)
+ self.lockout_window = lockout_duration
+ self.password_age_min = password_age_min
+ self.password_age_max = password_age_max
+
+ # add the PSO to the DB
+ self.ldb.add_ldif(self.get_ldif())
+
+ def get_ldif(self):
+ complexity_str = "TRUE" if self.complexity else "FALSE"
+ plaintext_str = "TRUE" if self.store_plaintext else "FALSE"
+
+ # timestamps here are in units of -100 nano-seconds
+ lockout_duration = -int(self.lockout_duration * (1e7))
+ lockout_window = -int(self.lockout_window * (1e7))
+ min_age = -int(self.password_age_min * (1e7))
+ max_age = -int(self.password_age_max * (1e7))
+
+ # all the following fields are mandatory for the PSO object
+ ldif = """
+dn: {0}
+objectClass: msDS-PasswordSettings
+msDS-PasswordSettingsPrecedence: {1}
+msDS-PasswordReversibleEncryptionEnabled: {2}
+msDS-PasswordHistoryLength: {3}
+msDS-PasswordComplexityEnabled: {4}
+msDS-MinimumPasswordLength: {5}
+msDS-MinimumPasswordAge: {6}
+msDS-MaximumPasswordAge: {7}
+msDS-LockoutThreshold: {8}
+msDS-LockoutObservationWindow: {9}
+msDS-LockoutDuration: {10}
+""".format(self.dn, self.precedence, plaintext_str, self.history_len,
+ complexity_str, self.password_len, min_age, max_age,
+ self.lockout_attempts, lockout_window, lockout_duration)
+
+ return ldif
+
+ def apply_to(self, user_group, operation=FLAG_MOD_ADD):
+ """Updates this Password Settings Object to apply to a user or group"""
+ m = ldb.Message()
+ m.dn = ldb.Dn(self.ldb, self.dn)
+ m["msDS-PSOAppliesTo"] = ldb.MessageElement(user_group, operation,
+ "msDS-PSOAppliesTo")
+ self.ldb.modify(m)
+
+ def unapply(self, user_group):
+ """Updates this PSO to no longer apply to a user or group"""
+ # just delete the msDS-PSOAppliesTo attribute (instead of adding it)
+ self.apply_to(user_group, operation=FLAG_MOD_DELETE)
+
+ def set_precedence(self, new_precedence, samdb=None):
+ if samdb is None:
+ samdb = self.ldb
+ ldif = """
+dn: %s
+changetype: modify
+replace: msDS-PasswordSettingsPrecedence
+msDS-PasswordSettingsPrecedence: %u
+""" % (self.dn, new_precedence)
+ samdb.modify_ldif(ldif)
+ self.precedence = new_precedence
diff --git a/python/samba/tests/py_credentials.py b/python/samba/tests/py_credentials.py
new file mode 100644
index 0000000..3db7282
--- /dev/null
+++ b/python/samba/tests/py_credentials.py
@@ -0,0 +1,677 @@
+# Integration tests for pycredentials
+#
+# Copyright (C) Catalyst IT Ltd. 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+from samba.tests import TestCase, delete_force
+import os
+
+import ldb
+
+import samba
+from samba.auth import system_session
+from samba.credentials import (
+ Credentials,
+ CLI_CRED_NTLMv2_AUTH,
+ CLI_CRED_NTLM_AUTH,
+ DONT_USE_KERBEROS)
+from samba.dcerpc import lsa, netlogon, ntlmssp, security, srvsvc
+from samba.dcerpc.netlogon import (
+ netr_Authenticator,
+ netr_WorkstationInformation,
+ MSV1_0_ALLOW_MSVCHAPV2
+)
+from samba.dcerpc.misc import SEC_CHAN_WKSTA
+from samba.dsdb import (
+ UF_WORKSTATION_TRUST_ACCOUNT,
+ UF_PASSWD_NOTREQD,
+ UF_NORMAL_ACCOUNT)
+from samba.ndr import ndr_pack, ndr_unpack
+from samba.samdb import SamDB
+from samba import NTSTATUSError, ntstatus
+from samba.common import get_string
+from samba.sd_utils import SDUtils
+
+import ctypes
+
+
+"""
+Integration tests for pycredentials
+"""
+
+MACHINE_NAME = "PCTM"
+USER_NAME = "PCTU"
+
+class PyCredentialsTests(TestCase):
+
+ def setUp(self):
+ super().setUp()
+
+ self.server = os.environ["SERVER"]
+ self.domain = os.environ["DOMAIN"]
+ self.host = os.environ["SERVER_IP"]
+ self.lp = self.get_loadparm()
+
+ self.credentials = self.get_credentials()
+
+ self.session = system_session()
+ self.ldb = SamDB(url="ldap://%s" % self.host,
+ session_info=self.session,
+ credentials=self.credentials,
+ lp=self.lp)
+
+ self.create_machine_account()
+ self.create_user_account()
+
+ def tearDown(self):
+ super().tearDown()
+ delete_force(self.ldb, self.machine_dn)
+ delete_force(self.ldb, self.user_dn)
+
+ # Until a successful netlogon connection has been established there will
+ # not be a valid authenticator associated with the credentials
+ # and new_client_authenticator should throw a ValueError
+ def test_no_netlogon_connection(self):
+ self.assertRaises(ValueError,
+ self.machine_creds.new_client_authenticator)
+
+ # Once a netlogon connection has been established,
+ # new_client_authenticator should return a value
+ #
+ def test_have_netlogon_connection(self):
+ c = self.get_netlogon_connection()
+ a = self.machine_creds.new_client_authenticator()
+ self.assertIsNotNone(a)
+
+ # Get an authenticator and use it on a sequence of operations requiring
+ # an authenticator
+ def test_client_authenticator(self):
+ c = self.get_netlogon_connection()
+ (authenticator, subsequent) = self.get_authenticator(c)
+ self.do_NetrLogonSamLogonWithFlags(c, authenticator, subsequent)
+ (authenticator, subsequent) = self.get_authenticator(c)
+ self.do_NetrLogonGetDomainInfo(c, authenticator, subsequent)
+ (authenticator, subsequent) = self.get_authenticator(c)
+ self.do_NetrLogonGetDomainInfo(c, authenticator, subsequent)
+ (authenticator, subsequent) = self.get_authenticator(c)
+ self.do_NetrLogonGetDomainInfo(c, authenticator, subsequent)
+
+ # Test using LogonGetDomainInfo to update dNSHostName to an allowed value.
+ def test_set_dns_hostname_valid(self):
+ c = self.get_netlogon_connection()
+ authenticator, subsequent = self.get_authenticator(c)
+
+ domain_hostname = self.ldb.domain_dns_name()
+
+ new_dns_hostname = f'{self.machine_name}.{domain_hostname}'
+ new_dns_hostname = new_dns_hostname.encode('utf-8')
+
+ query = netr_WorkstationInformation()
+ query.os_name = lsa.String('some OS')
+ query.dns_hostname = new_dns_hostname
+
+ c.netr_LogonGetDomainInfo(
+ server_name=self.server,
+ computer_name=self.user_creds.get_workstation(),
+ credential=authenticator,
+ return_authenticator=subsequent,
+ level=1,
+ query=query)
+
+ # Check the result.
+
+ res = self.ldb.search(self.machine_dn,
+ scope=ldb.SCOPE_BASE,
+ attrs=['dNSHostName'])
+ self.assertEqual(1, len(res))
+
+ got_dns_hostname = res[0].get('dNSHostName', idx=0)
+ self.assertEqual(new_dns_hostname, got_dns_hostname)
+
+ # Test using LogonGetDomainInfo to update dNSHostName to an allowed value,
+ # when we are denied the right to do so.
+ def test_set_dns_hostname_valid_denied(self):
+ c = self.get_netlogon_connection()
+ authenticator, subsequent = self.get_authenticator(c)
+
+ res = self.ldb.search(self.machine_dn,
+ scope=ldb.SCOPE_BASE,
+ attrs=['objectSid'])
+ self.assertEqual(1, len(res))
+
+ machine_sid = ndr_unpack(security.dom_sid,
+ res[0].get('objectSid', idx=0))
+
+ sd_utils = SDUtils(self.ldb)
+
+ # Deny Validated Write and Write Property.
+ mod = (f'(OD;;SWWP;{security.GUID_DRS_DNS_HOST_NAME};;'
+ f'{machine_sid})')
+ sd_utils.dacl_add_ace(self.machine_dn, mod)
+
+ domain_hostname = self.ldb.domain_dns_name()
+
+ new_dns_hostname = f'{self.machine_name}.{domain_hostname}'
+ new_dns_hostname = new_dns_hostname.encode('utf-8')
+
+ query = netr_WorkstationInformation()
+ query.os_name = lsa.String('some OS')
+ query.dns_hostname = new_dns_hostname
+
+ c.netr_LogonGetDomainInfo(
+ server_name=self.server,
+ computer_name=self.user_creds.get_workstation(),
+ credential=authenticator,
+ return_authenticator=subsequent,
+ level=1,
+ query=query)
+
+ # Check the result.
+
+ res = self.ldb.search(self.machine_dn,
+ scope=ldb.SCOPE_BASE,
+ attrs=['dNSHostName'])
+ self.assertEqual(1, len(res))
+
+ got_dns_hostname = res[0].get('dNSHostName', idx=0)
+ self.assertEqual(new_dns_hostname, got_dns_hostname)
+
+ # Ensure we can't use LogonGetDomainInfo to update dNSHostName to an
+ # invalid value, even with Validated Write.
+ def test_set_dns_hostname_invalid_validated_write(self):
+ c = self.get_netlogon_connection()
+ authenticator, subsequent = self.get_authenticator(c)
+
+ res = self.ldb.search(self.machine_dn,
+ scope=ldb.SCOPE_BASE,
+ attrs=['objectSid'])
+ self.assertEqual(1, len(res))
+
+ machine_sid = ndr_unpack(security.dom_sid,
+ res[0].get('objectSid', idx=0))
+
+ sd_utils = SDUtils(self.ldb)
+
+ # Grant Validated Write.
+ mod = (f'(OA;;SW;{security.GUID_DRS_DNS_HOST_NAME};;'
+ f'{machine_sid})')
+ sd_utils.dacl_add_ace(self.machine_dn, mod)
+
+ new_dns_hostname = b'invalid'
+
+ query = netr_WorkstationInformation()
+ query.os_name = lsa.String('some OS')
+ query.dns_hostname = new_dns_hostname
+
+ c.netr_LogonGetDomainInfo(
+ server_name=self.server,
+ computer_name=self.user_creds.get_workstation(),
+ credential=authenticator,
+ return_authenticator=subsequent,
+ level=1,
+ query=query)
+
+ # Check the result.
+
+ res = self.ldb.search(self.machine_dn,
+ scope=ldb.SCOPE_BASE,
+ attrs=['dNSHostName'])
+ self.assertEqual(1, len(res))
+
+ got_dns_hostname = res[0].get('dNSHostName', idx=0)
+ self.assertIsNone(got_dns_hostname)
+
+ # Ensure we can't use LogonGetDomainInfo to update dNSHostName to an
+ # invalid value, even with Write Property.
+ def test_set_dns_hostname_invalid_write_property(self):
+ c = self.get_netlogon_connection()
+ authenticator, subsequent = self.get_authenticator(c)
+
+ res = self.ldb.search(self.machine_dn,
+ scope=ldb.SCOPE_BASE,
+ attrs=['objectSid'])
+ self.assertEqual(1, len(res))
+
+ machine_sid = ndr_unpack(security.dom_sid,
+ res[0].get('objectSid', idx=0))
+
+ sd_utils = SDUtils(self.ldb)
+
+ # Grant Write Property.
+ mod = (f'(OA;;WP;{security.GUID_DRS_DNS_HOST_NAME};;'
+ f'{machine_sid})')
+ sd_utils.dacl_add_ace(self.machine_dn, mod)
+
+ new_dns_hostname = b'invalid'
+
+ query = netr_WorkstationInformation()
+ query.os_name = lsa.String('some OS')
+ query.dns_hostname = new_dns_hostname
+
+ c.netr_LogonGetDomainInfo(
+ server_name=self.server,
+ computer_name=self.user_creds.get_workstation(),
+ credential=authenticator,
+ return_authenticator=subsequent,
+ level=1,
+ query=query)
+
+ # Check the result.
+
+ res = self.ldb.search(self.machine_dn,
+ scope=ldb.SCOPE_BASE,
+ attrs=['dNSHostName'])
+ self.assertEqual(1, len(res))
+
+ got_dns_hostname = res[0].get('dNSHostName', idx=0)
+ self.assertIsNone(got_dns_hostname)
+
+ # Show we can't use LogonGetDomainInfo to set the dNSHostName to just the
+ # machine name.
+ def test_set_dns_hostname_to_machine_name(self):
+ c = self.get_netlogon_connection()
+ authenticator, subsequent = self.get_authenticator(c)
+
+ new_dns_hostname = self.machine_name.encode('utf-8')
+
+ query = netr_WorkstationInformation()
+ query.os_name = lsa.String('some OS')
+ query.dns_hostname = new_dns_hostname
+
+ c.netr_LogonGetDomainInfo(
+ server_name=self.server,
+ computer_name=self.user_creds.get_workstation(),
+ credential=authenticator,
+ return_authenticator=subsequent,
+ level=1,
+ query=query)
+
+ # Check the result.
+
+ res = self.ldb.search(self.machine_dn,
+ scope=ldb.SCOPE_BASE,
+ attrs=['dNSHostName'])
+ self.assertEqual(1, len(res))
+
+ got_dns_hostname = res[0].get('dNSHostName', idx=0)
+ self.assertIsNone(got_dns_hostname)
+
+ # Show we can't use LogonGetDomainInfo to set dNSHostName with an invalid
+ # suffix.
+ def test_set_dns_hostname_invalid_suffix(self):
+ c = self.get_netlogon_connection()
+ authenticator, subsequent = self.get_authenticator(c)
+
+ domain_hostname = self.ldb.domain_dns_name()
+
+ new_dns_hostname = f'{self.machine_name}.foo.{domain_hostname}'
+ new_dns_hostname = new_dns_hostname.encode('utf-8')
+
+ query = netr_WorkstationInformation()
+ query.os_name = lsa.String('some OS')
+ query.dns_hostname = new_dns_hostname
+
+ c.netr_LogonGetDomainInfo(
+ server_name=self.server,
+ computer_name=self.user_creds.get_workstation(),
+ credential=authenticator,
+ return_authenticator=subsequent,
+ level=1,
+ query=query)
+
+ # Check the result.
+
+ res = self.ldb.search(self.machine_dn,
+ scope=ldb.SCOPE_BASE,
+ attrs=['dNSHostName'])
+ self.assertEqual(1, len(res))
+
+ got_dns_hostname = res[0].get('dNSHostName', idx=0)
+ self.assertIsNone(got_dns_hostname)
+
+ # Test that setting the HANDLES_SPN_UPDATE flag inhibits the dNSHostName
+ # update, but other attributes are still updated.
+ def test_set_dns_hostname_with_flag(self):
+ c = self.get_netlogon_connection()
+ authenticator, subsequent = self.get_authenticator(c)
+
+ domain_hostname = self.ldb.domain_dns_name()
+
+ new_dns_hostname = f'{self.machine_name}.{domain_hostname}'
+ new_dns_hostname = new_dns_hostname.encode('utf-8')
+
+ operating_system = 'some OS'
+
+ query = netr_WorkstationInformation()
+ query.os_name = lsa.String(operating_system)
+
+ query.dns_hostname = new_dns_hostname
+ query.workstation_flags = netlogon.NETR_WS_FLAG_HANDLES_SPN_UPDATE
+
+ c.netr_LogonGetDomainInfo(
+ server_name=self.server,
+ computer_name=self.user_creds.get_workstation(),
+ credential=authenticator,
+ return_authenticator=subsequent,
+ level=1,
+ query=query)
+
+ # Check the result.
+
+ res = self.ldb.search(self.machine_dn,
+ scope=ldb.SCOPE_BASE,
+ attrs=['dNSHostName',
+ 'operatingSystem'])
+ self.assertEqual(1, len(res))
+
+ got_dns_hostname = res[0].get('dNSHostName', idx=0)
+ self.assertIsNone(got_dns_hostname)
+
+ got_os = res[0].get('operatingSystem', idx=0)
+ self.assertEqual(operating_system.encode('utf-8'), got_os)
+
+ def test_SamLogonEx(self):
+ c = self.get_netlogon_connection()
+
+ logon = samlogon_logon_info(self.domain,
+ self.machine_name,
+ self.user_creds)
+
+ logon_level = netlogon.NetlogonNetworkTransitiveInformation
+ validation_level = netlogon.NetlogonValidationSamInfo4
+ netr_flags = 0
+
+ try:
+ c.netr_LogonSamLogonEx(self.server,
+ self.user_creds.get_workstation(),
+ logon_level,
+ logon,
+ validation_level,
+ netr_flags)
+ except NTSTATUSError as e:
+ enum = ctypes.c_uint32(e.args[0]).value
+ if enum == ntstatus.NT_STATUS_WRONG_PASSWORD:
+ self.fail("got wrong password error")
+ else:
+ raise
+
+ def test_SamLogonEx_no_domain(self):
+ c = self.get_netlogon_connection()
+
+ self.user_creds.set_domain('')
+
+ logon = samlogon_logon_info(self.domain,
+ self.machine_name,
+ self.user_creds)
+
+ logon_level = netlogon.NetlogonNetworkTransitiveInformation
+ validation_level = netlogon.NetlogonValidationSamInfo4
+ netr_flags = 0
+
+ try:
+ c.netr_LogonSamLogonEx(self.server,
+ self.user_creds.get_workstation(),
+ logon_level,
+ logon,
+ validation_level,
+ netr_flags)
+ except NTSTATUSError as e:
+ enum = ctypes.c_uint32(e.args[0]).value
+ if enum == ntstatus.NT_STATUS_WRONG_PASSWORD:
+ self.fail("got wrong password error")
+ else:
+ self.fail("got unexpected error" + str(e))
+
+ def test_SamLogonExNTLM(self):
+ c = self.get_netlogon_connection()
+
+ logon = samlogon_logon_info(self.domain,
+ self.machine_name,
+ self.user_creds,
+ flags=CLI_CRED_NTLM_AUTH)
+
+ logon_level = netlogon.NetlogonNetworkTransitiveInformation
+ validation_level = netlogon.NetlogonValidationSamInfo4
+ netr_flags = 0
+
+ try:
+ c.netr_LogonSamLogonEx(self.server,
+ self.user_creds.get_workstation(),
+ logon_level,
+ logon,
+ validation_level,
+ netr_flags)
+ except NTSTATUSError as e:
+ enum = ctypes.c_uint32(e.args[0]).value
+ if enum == ntstatus.NT_STATUS_WRONG_PASSWORD:
+ self.fail("got wrong password error")
+ else:
+ raise
+
+ def test_SamLogonExMSCHAPv2(self):
+ c = self.get_netlogon_connection()
+
+ logon = samlogon_logon_info(self.domain,
+ self.machine_name,
+ self.user_creds,
+ flags=CLI_CRED_NTLM_AUTH)
+
+ logon.identity_info.parameter_control = MSV1_0_ALLOW_MSVCHAPV2
+
+ logon_level = netlogon.NetlogonNetworkTransitiveInformation
+ validation_level = netlogon.NetlogonValidationSamInfo4
+ netr_flags = 0
+
+ try:
+ c.netr_LogonSamLogonEx(self.server,
+ self.user_creds.get_workstation(),
+ logon_level,
+ logon,
+ validation_level,
+ netr_flags)
+ except NTSTATUSError as e:
+ enum = ctypes.c_uint32(e.args[0]).value
+ if enum == ntstatus.NT_STATUS_WRONG_PASSWORD:
+ self.fail("got wrong password error")
+ else:
+ raise
+
+ # Test Credentials.encrypt_netr_crypt_password
+ # By performing a NetrServerPasswordSet2
+ # And the logging on using the new password.
+
+ def test_encrypt_netr_password(self):
+ # Change the password
+ self.do_Netr_ServerPasswordSet2()
+ # Now use the new password to perform an operation
+ srvsvc.srvsvc("ncacn_np:%s" % (self.server),
+ self.lp,
+ self.machine_creds)
+
+ # Change the current machine account password with a
+ # netr_ServerPasswordSet2 call.
+
+ def do_Netr_ServerPasswordSet2(self):
+ c = self.get_netlogon_connection()
+ (authenticator, subsequent) = self.get_authenticator(c)
+ PWD_LEN = 32
+ DATA_LEN = 512
+ newpass = samba.generate_random_password(PWD_LEN, PWD_LEN)
+ encoded = newpass.encode('utf-16-le')
+ pwd_len = len(encoded)
+ filler = [x if isinstance(x, int) else ord(x) for x in os.urandom(DATA_LEN - pwd_len)]
+ pwd = netlogon.netr_CryptPassword()
+ pwd.length = pwd_len
+ pwd.data = filler + [x if isinstance(x, int) else ord(x) for x in encoded]
+ self.machine_creds.encrypt_netr_crypt_password(pwd)
+ c.netr_ServerPasswordSet2(self.server,
+ self.machine_creds.get_workstation(),
+ SEC_CHAN_WKSTA,
+ self.machine_name,
+ authenticator,
+ pwd)
+
+ self.machine_pass = newpass
+ self.machine_creds.set_password(newpass)
+
+ # Establish sealed schannel netlogon connection over TCP/IP
+ #
+ def get_netlogon_connection(self):
+ return netlogon.netlogon("ncacn_ip_tcp:%s[schannel,seal]" % self.server,
+ self.lp,
+ self.machine_creds)
+
+ #
+ # Create the machine account
+ def create_machine_account(self):
+ self.machine_pass = samba.generate_random_password(32, 32)
+ self.machine_name = MACHINE_NAME
+ self.machine_dn = "cn=%s,%s" % (self.machine_name, self.ldb.domain_dn())
+
+ # remove the account if it exists, this will happen if a previous test
+ # run failed
+ delete_force(self.ldb, self.machine_dn)
+
+ utf16pw = ('"%s"' % get_string(self.machine_pass)).encode('utf-16-le')
+ self.ldb.add({
+ "dn": self.machine_dn,
+ "objectclass": "computer",
+ "sAMAccountName": "%s$" % self.machine_name,
+ "userAccountControl":
+ str(UF_WORKSTATION_TRUST_ACCOUNT | UF_PASSWD_NOTREQD),
+ "unicodePwd": utf16pw})
+
+ self.machine_creds = Credentials()
+ self.machine_creds.guess(self.get_loadparm())
+ self.machine_creds.set_secure_channel_type(SEC_CHAN_WKSTA)
+ self.machine_creds.set_kerberos_state(DONT_USE_KERBEROS)
+ self.machine_creds.set_password(self.machine_pass)
+ self.machine_creds.set_username(self.machine_name + "$")
+ self.machine_creds.set_workstation(self.machine_name)
+
+ #
+ # Create a test user account
+ def create_user_account(self):
+ self.user_pass = samba.generate_random_password(32, 32)
+ self.user_name = USER_NAME
+ self.user_dn = "cn=%s,%s" % (self.user_name, self.ldb.domain_dn())
+
+ # remove the account if it exists, this will happen if a previous test
+ # run failed
+ delete_force(self.ldb, self.user_dn)
+
+ utf16pw = ('"%s"' % get_string(self.user_pass)).encode('utf-16-le')
+ self.ldb.add({
+ "dn": self.user_dn,
+ "objectclass": "user",
+ "sAMAccountName": "%s" % self.user_name,
+ "userAccountControl": str(UF_NORMAL_ACCOUNT),
+ "unicodePwd": utf16pw})
+
+ self.user_creds = Credentials()
+ self.user_creds.guess(self.get_loadparm())
+ self.user_creds.set_password(self.user_pass)
+ self.user_creds.set_username(self.user_name)
+ self.user_creds.set_workstation(self.machine_name)
+ pass
+
+ #
+ # Get the authenticator from the machine creds.
+ def get_authenticator(self, c):
+ auth = self.machine_creds.new_client_authenticator()
+ current = netr_Authenticator()
+ current.cred.data = [x if isinstance(x, int) else ord(x) for x in auth["credential"]]
+ current.timestamp = auth["timestamp"]
+
+ subsequent = netr_Authenticator()
+ return (current, subsequent)
+
+ def do_NetrLogonSamLogonWithFlags(self, c, current, subsequent):
+ logon = samlogon_logon_info(self.domain,
+ self.machine_name,
+ self.user_creds)
+
+ logon_level = netlogon.NetlogonNetworkTransitiveInformation
+ validation_level = netlogon.NetlogonValidationSamInfo4
+ netr_flags = 0
+ c.netr_LogonSamLogonWithFlags(self.server,
+ self.user_creds.get_workstation(),
+ current,
+ subsequent,
+ logon_level,
+ logon,
+ validation_level,
+ netr_flags)
+
+ def do_NetrLogonGetDomainInfo(self, c, current, subsequent):
+ query = netr_WorkstationInformation()
+
+ c.netr_LogonGetDomainInfo(self.server,
+ self.user_creds.get_workstation(),
+ current,
+ subsequent,
+ 2,
+ query)
+
+#
+# Build the logon data required by NetrLogonSamLogonWithFlags
+
+
+def samlogon_logon_info(domain_name, computer_name, creds,
+ flags=CLI_CRED_NTLMv2_AUTH):
+
+ target_info_blob = samlogon_target(domain_name, computer_name)
+
+ challenge = b"abcdefgh"
+ # User account under test
+ response = creds.get_ntlm_response(flags=flags,
+ challenge=challenge,
+ target_info=target_info_blob)
+
+ logon = netlogon.netr_NetworkInfo()
+
+ logon.challenge = [x if isinstance(x, int) else ord(x) for x in challenge]
+ logon.nt = netlogon.netr_ChallengeResponse()
+ logon.nt.length = len(response["nt_response"])
+ logon.nt.data = [x if isinstance(x, int) else ord(x) for x in response["nt_response"]]
+ logon.identity_info = netlogon.netr_IdentityInfo()
+
+ (username, domain) = creds.get_ntlm_username_domain()
+ logon.identity_info.domain_name.string = domain
+ logon.identity_info.account_name.string = username
+ logon.identity_info.workstation.string = creds.get_workstation()
+
+ return logon
+
+#
+# Build the samlogon target info.
+
+
+def samlogon_target(domain_name, computer_name):
+ target_info = ntlmssp.AV_PAIR_LIST()
+ target_info.count = 3
+ computername = ntlmssp.AV_PAIR()
+ computername.AvId = ntlmssp.MsvAvNbComputerName
+ computername.Value = computer_name
+
+ domainname = ntlmssp.AV_PAIR()
+ domainname.AvId = ntlmssp.MsvAvNbDomainName
+ domainname.Value = domain_name
+
+ eol = ntlmssp.AV_PAIR()
+ eol.AvId = ntlmssp.MsvAvEOL
+ target_info.pair = [domainname, computername, eol]
+
+ return ndr_pack(target_info)
diff --git a/python/samba/tests/registry.py b/python/samba/tests/registry.py
new file mode 100644
index 0000000..8154ae3
--- /dev/null
+++ b/python/samba/tests/registry.py
@@ -0,0 +1,79 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for samba.registry."""
+
+import os
+from samba import registry
+import samba.tests
+from samba import WERRORError
+from subprocess import Popen, PIPE
+
+
+class HelperTests(samba.tests.TestCase):
+
+ def test_predef_to_name(self):
+ self.assertEqual("HKEY_LOCAL_MACHINE",
+ registry.get_predef_name(0x80000002))
+
+ def test_str_regtype(self):
+ self.assertEqual("REG_DWORD", registry.str_regtype(4))
+
+
+class HiveTests(samba.tests.TestCaseInTempDir):
+
+ def setUp(self):
+ super().setUp()
+ self.hive_path = os.path.join(self.tempdir, "ldb_new.ldb")
+ self.hive = registry.open_ldb(self.hive_path)
+
+ def tearDown(self):
+ del self.hive
+ os.unlink(self.hive_path)
+ super().tearDown()
+
+ def test_ldb_new(self):
+ self.assertTrue(self.hive is not None)
+
+ def test_set_value(self):
+ self.assertIsNone(self.hive.set_value('foo1', 1, 'bar1'))
+
+ def test_flush(self):
+ self.assertIsNone(self.hive.set_value('foo2', 1, 'bar2'))
+ self.assertIsNone(self.hive.flush())
+
+ tdbdump_tool = 'tdbdump'
+ if os.path.isfile('bin/tdbdump'):
+ tdbdump_tool = 'bin/tdbdump'
+
+ proc = Popen([tdbdump_tool, self.hive_path], stdout=PIPE, stderr=PIPE)
+ tdb_dump, err = proc.communicate()
+ self.assertTrue(b'DN=VALUE=FOO2,HIVE=NONE' in tdb_dump)
+
+ def test_del_value(self):
+ self.assertIsNone(self.hive.set_value('foo3', 1, 'bar3'))
+ self.assertIsNone(self.hive.del_value('foo3'))
+
+ def test_del_nonexisting_value(self):
+ self.assertRaises(WERRORError, self.hive.del_value, 'foo4')
+
+
+class RegistryTests(samba.tests.TestCase):
+
+ def test_new(self):
+ self.registry = registry.Registry()
+ self.assertIsNotNone(self.registry)
diff --git a/python/samba/tests/reparsepoints.py b/python/samba/tests/reparsepoints.py
new file mode 100644
index 0000000..cb7421d
--- /dev/null
+++ b/python/samba/tests/reparsepoints.py
@@ -0,0 +1,241 @@
+# Unix SMB/CIFS implementation.
+# Copyright Volker Lendecke <vl@samba.org> 2022
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from samba.samba3 import libsmb_samba_internal as libsmb
+from samba import (ntstatus,NTSTATUSError)
+from samba.dcerpc import security as sec
+from samba import reparse_symlink
+import samba.tests.libsmb
+
+class ReparsePoints(samba.tests.libsmb.LibsmbTests):
+
+ def connection(self):
+ share = samba.tests.env_get_var_value("SHARENAME")
+ smb1 = samba.tests.env_get_var_value("SMB1", allow_missing=True)
+ conn = libsmb.Conn(
+ self.server_ip,
+ share,
+ self.lp,
+ self.creds,
+ force_smb1=smb1)
+ return conn
+
+ def clean_file(self, conn, filename):
+ try:
+ conn.unlink(filename)
+ except NTSTATUSError as e:
+ err = e.args[0]
+ ok = (err == ntstatus.NT_STATUS_OBJECT_NAME_NOT_FOUND)
+ ok |= (err == ntstatus.NT_STATUS_OBJECT_PATH_NOT_FOUND)
+ ok |= (err == ntstatus.NT_STATUS_IO_REPARSE_TAG_NOT_HANDLED)
+ if not ok:
+ raise
+
+ def test_error_not_a_reparse_point(self):
+ conn = self.connection()
+ filename = 'reparse'
+ self.clean_file(conn, filename)
+
+ fd = conn.create(
+ filename,
+ DesiredAccess=sec.SEC_FILE_WRITE_ATTRIBUTE,
+ CreateDisposition=libsmb.FILE_CREATE)
+
+ with self.assertRaises(NTSTATUSError) as e:
+ conn.fsctl(fd, libsmb.FSCTL_GET_REPARSE_POINT, b'', 1024)
+
+ self.assertEqual(e.exception.args[0],
+ ntstatus.NT_STATUS_NOT_A_REPARSE_POINT)
+
+ conn.close(fd)
+
+ self.clean_file(conn, filename)
+
+ def test_create_reparse(self):
+ conn = self.connection()
+ filename = 'reparse'
+ self.clean_file(conn, filename)
+
+ fd = conn.create(
+ filename,
+ DesiredAccess=sec.SEC_FILE_WRITE_ATTRIBUTE,
+ CreateDisposition=libsmb.FILE_CREATE)
+
+ with self.assertRaises(NTSTATUSError) as e:
+ conn.fsctl(fd, libsmb.FSCTL_SET_REPARSE_POINT, b'', 0)
+
+ self.assertEqual(e.exception.args[0],
+ ntstatus.NT_STATUS_INVALID_BUFFER_SIZE)
+
+ for i in range(1,15):
+ with self.assertRaises(NTSTATUSError) as e:
+ conn.fsctl(fd, libsmb.FSCTL_SET_REPARSE_POINT, i * b'0', 0)
+
+ self.assertEqual(e.exception.args[0],
+ ntstatus.NT_STATUS_IO_REPARSE_DATA_INVALID)
+
+ # Create a syntactically valid [MS-FSCC] 2.1.2.2 REPARSE_DATA_BUFFER
+ b = reparse_symlink.put(0x80000025, 0, b'asdfasdfasdfasdfasdfasdf')
+
+ # Show that SET_REPARSE_POINT does exact length checks
+
+ with self.assertRaises(NTSTATUSError) as e:
+ conn.fsctl(fd, libsmb.FSCTL_SET_REPARSE_POINT, b + b'0', 0)
+ self.assertEqual(e.exception.args[0],
+ ntstatus.NT_STATUS_IO_REPARSE_DATA_INVALID)
+
+ with self.assertRaises(NTSTATUSError) as e:
+ conn.fsctl(fd, libsmb.FSCTL_SET_REPARSE_POINT, b[:-1], 0)
+ self.assertEqual(e.exception.args[0],
+ ntstatus.NT_STATUS_IO_REPARSE_DATA_INVALID)
+
+ conn.fsctl(fd, libsmb.FSCTL_SET_REPARSE_POINT, b, 0)
+ b = reparse_symlink.put(0x80000026, 0, b'asdfasdfasdfasdfasdfasdf')
+ conn.fsctl(fd, libsmb.FSCTL_SET_REPARSE_POINT, b, 0)
+
+ # Show that we can write to a reparse point when opened properly
+ def test_write_reparse(self):
+ conn = self.connection()
+ filename = 'reparse'
+ self.clean_file(conn, filename)
+
+ fd = conn.create(
+ filename,
+ DesiredAccess=sec.SEC_FILE_WRITE_ATTRIBUTE,
+ CreateDisposition=libsmb.FILE_CREATE)
+ b = reparse_symlink.put(0x80000025, 0, b'asdfasdfasdfasdfasdfasdf')
+ conn.fsctl(fd, libsmb.FSCTL_SET_REPARSE_POINT, b, 0)
+ conn.close(fd)
+
+ fd,cr,_ = conn.create_ex(
+ filename,
+ DesiredAccess=sec.SEC_FILE_WRITE_DATA|sec.SEC_STD_DELETE,
+ CreateOptions=libsmb.FILE_OPEN_REPARSE_POINT,
+ CreateDisposition=libsmb.FILE_OPEN)
+ self.assertEqual(
+ cr['file_attributes'] & libsmb.FILE_ATTRIBUTE_REPARSE_POINT,
+ libsmb.FILE_ATTRIBUTE_REPARSE_POINT)
+
+ conn.write(fd, b'x', 1)
+
+ conn.delete_on_close(fd, 1)
+ conn.close(fd)
+
+ # Show that directories can carry reparse points
+
+ def test_create_reparse_directory(self):
+ conn = self.connection()
+ dirname = "reparse_dir"
+ filename = f'{dirname}\\file.txt'
+
+ self.clean_file(conn, filename)
+ self.clean_file(conn, dirname)
+
+ dir_fd = conn.create(
+ dirname,
+ DesiredAccess=sec.SEC_FILE_WRITE_ATTRIBUTE|
+ sec.SEC_STD_DELETE,
+ CreateDisposition=libsmb.FILE_CREATE,
+ CreateOptions=libsmb.FILE_DIRECTORY_FILE)
+ b = reparse_symlink.put(0x80000025, 0, b'asdfasdfasdfasdfasdfasdf')
+ conn.fsctl(dir_fd, libsmb.FSCTL_SET_REPARSE_POINT, b, 0)
+
+ with self.assertRaises(NTSTATUSError) as e:
+ fd = conn.create(
+ filename,
+ DesiredAccess=sec.SEC_STD_DELETE,
+ CreateDisposition=libsmb.FILE_CREATE)
+
+ self.assertEqual(e.exception.args[0],
+ ntstatus.NT_STATUS_IO_REPARSE_TAG_NOT_HANDLED)
+
+ conn.delete_on_close(dir_fd, 1)
+ conn.close(dir_fd)
+
+ # Only empty directories can carry reparse points
+
+ def test_create_reparse_nonempty_directory(self):
+ conn = self.connection()
+ dirname = "reparse_dir"
+ filename = f'{dirname}\\file.txt'
+
+ self.clean_file(conn, filename)
+ self.clean_file(conn, dirname)
+
+ dir_fd = conn.create(
+ dirname,
+ DesiredAccess=sec.SEC_FILE_WRITE_ATTRIBUTE|
+ sec.SEC_STD_DELETE,
+ CreateDisposition=libsmb.FILE_CREATE,
+ CreateOptions=libsmb.FILE_DIRECTORY_FILE)
+ fd = conn.create(
+ filename,
+ DesiredAccess=sec.SEC_FILE_WRITE_ATTRIBUTE|
+ sec.SEC_STD_DELETE,
+ CreateDisposition=libsmb.FILE_CREATE)
+
+ b = reparse_symlink.put(0x80000025, 0, b'asdf')
+ try:
+ conn.fsctl(dir_fd, libsmb.FSCTL_SET_REPARSE_POINT, b, 0)
+ except NTSTATUSError as e:
+ err = e.args[0]
+ ok = (err == ntstatus.NT_STATUS_DIRECTORY_NOT_EMPTY)
+ if not ok:
+ raise
+
+ conn.delete_on_close(fd, 1)
+ conn.close(fd)
+ conn.delete_on_close(dir_fd, 1)
+ conn.close(dir_fd)
+
+ # Show that reparse point opens respect share modes
+
+ def test_reparse_share_modes(self):
+ conn = self.connection()
+ filename = 'reparse'
+ self.clean_file(conn, filename)
+
+ fd = conn.create(
+ filename,
+ DesiredAccess=sec.SEC_FILE_WRITE_ATTRIBUTE,
+ CreateDisposition=libsmb.FILE_CREATE)
+ b = reparse_symlink.put(0x80000025, 0, b'asdfasdfasdfasdfasdfasdf')
+ conn.fsctl(fd, libsmb.FSCTL_SET_REPARSE_POINT, b, 0)
+ conn.close(fd)
+
+ fd1 = conn.create(
+ filename,
+ DesiredAccess=sec.SEC_FILE_READ_DATA|sec.SEC_STD_DELETE,
+ CreateDisposition=libsmb.FILE_OPEN,
+ CreateOptions=libsmb.FILE_OPEN_REPARSE_POINT)
+
+ with self.assertRaises(NTSTATUSError) as e:
+ fd2 = conn.create(
+ filename,
+ DesiredAccess=sec.SEC_FILE_READ_DATA,
+ CreateDisposition=libsmb.FILE_OPEN,
+ CreateOptions=libsmb.FILE_OPEN_REPARSE_POINT)
+
+ self.assertEqual(e.exception.args[0],
+ ntstatus.NT_STATUS_SHARING_VIOLATION)
+
+ conn.delete_on_close(fd1, 1)
+ conn.close(fd1)
+
+if __name__ == '__main__':
+ import unittest
+ unittest.main()
diff --git a/python/samba/tests/s3_net_join.py b/python/samba/tests/s3_net_join.py
new file mode 100644
index 0000000..101f4b1
--- /dev/null
+++ b/python/samba/tests/s3_net_join.py
@@ -0,0 +1,77 @@
+# Unix SMB/CIFS implementation.
+#
+# Copyright (C) David Mulder <dmulder@samba.org> 2020
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""
+Confirm that net_s3.join_member works
+"""
+
+import samba.tests
+import os
+from samba.net_s3 import Net as s3_Net
+from samba.credentials import DONT_USE_KERBEROS
+from samba.samba3 import param as s3param
+from samba import WERRORError
+
+
+def rm(rmdir):
+ for f in os.listdir(rmdir):
+ if os.path.isdir(os.path.join(rmdir, f)):
+ rm(os.path.join(rmdir, f))
+ os.rmdir(os.path.join(rmdir, f))
+ else:
+ os.unlink(os.path.join(rmdir, f))
+
+class NetS3JoinTests(samba.tests.TestCaseInTempDir):
+
+ def setUp(self):
+ super().setUp()
+ self.realm = os.environ["REALM"]
+ self.domain = os.environ["DOMAIN"]
+ self.server = os.environ["SERVER"]
+ self.lp = self.get_loadparm()
+
+ def test_net_join(self):
+ netbios_name = "S3NetJoinTest"
+ machinepass = "abcdefghij"
+ creds = self.insta_creds(template=self.get_credentials(),
+ kerberos_state=DONT_USE_KERBEROS)
+ s3_lp = s3param.get_context()
+ s3_lp.load(self.lp.configfile)
+
+ s3_lp.set('realm', self.realm)
+ s3_lp.set('workgroup', self.domain)
+ s3_lp.set("private dir", self.tempdir)
+ s3_lp.set("lock dir", self.tempdir)
+ s3_lp.set("state directory", self.tempdir)
+ s3_lp.set('server role', 'member server')
+ net = s3_Net(creds, s3_lp, server=self.server)
+
+ try:
+ (domain_sid, domain_name) = net.join_member(netbios_name,
+ machinepass=machinepass)
+ except WERRORError as e:
+ self.fail('Join failed: %s' % e.args[1])
+ raise
+
+ try:
+ ret = net.leave()
+ except WERRORError as e:
+ self.fail('Leave failed: %s' % e.args[1])
+ raise
+ self.assertTrue(ret, 'Leave failed!')
+ rm(self.tempdir)
diff --git a/python/samba/tests/s3idmapdb.py b/python/samba/tests/s3idmapdb.py
new file mode 100644
index 0000000..ca16786
--- /dev/null
+++ b/python/samba/tests/s3idmapdb.py
@@ -0,0 +1,57 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for samba.samba3."""
+
+from samba.samba3 import IdmapDatabase
+from samba.tests import TestCase
+import os
+
+for p in ["../../../../../testdata/samba3", "../../../../testdata/samba3"]:
+ DATADIR = os.path.join(os.path.dirname(__file__), p)
+ if os.path.exists(DATADIR):
+ break
+
+
+class IdmapDbTestCase(TestCase):
+
+ def setUp(self):
+ super().setUp()
+ self.idmapdb = IdmapDatabase(os.path.join(DATADIR,
+ "winbindd_idmap"))
+
+ def test_user_hwm(self):
+ self.assertEqual(10000, self.idmapdb.get_user_hwm())
+
+ def test_group_hwm(self):
+ self.assertEqual(10002, self.idmapdb.get_group_hwm())
+
+ def test_uids(self):
+ self.assertEqual(1, len(list(self.idmapdb.uids())))
+
+ def test_gids(self):
+ self.assertEqual(3, len(list(self.idmapdb.gids())))
+
+ def test_get_user_sid(self):
+ self.assertEqual(b"S-1-5-21-58189338-3053988021-627566699-501", self.idmapdb.get_user_sid(65534))
+
+ def test_get_group_sid(self):
+ self.assertEqual(b"S-1-5-21-2447931902-1787058256-3961074038-3007", self.idmapdb.get_group_sid(10001))
+
+ def tearDown(self):
+ self.idmapdb.close()
+ super().tearDown()
diff --git a/python/samba/tests/s3param.py b/python/samba/tests/s3param.py
new file mode 100644
index 0000000..263ea21
--- /dev/null
+++ b/python/samba/tests/s3param.py
@@ -0,0 +1,50 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for samba.samba3.param"""
+
+from samba.samba3 import param as s3param
+from samba.tests import TestCaseInTempDir
+import os
+
+
+for p in ["../../../../../testdata/samba3", "../../../../testdata/samba3"]:
+ DATADIR = os.path.join(os.path.dirname(__file__), p)
+ if os.path.exists(DATADIR):
+ break
+
+
+class ParamTestCase(TestCaseInTempDir):
+
+ def setUp(self):
+ super().setUp()
+ os.system("cp -r %s %s" % (DATADIR, self.tempdir))
+ datadir = os.path.join(self.tempdir, "samba3")
+
+ self.lp = s3param.get_context()
+ self.lp.load(os.path.join(datadir, "smb.conf"))
+
+ def tearDown(self):
+ self.lp = []
+ os.system("rm -rf %s" % os.path.join(self.tempdir, "samba3"))
+ super().tearDown()
+
+ def test_param(self):
+ self.assertEqual("BEDWYR", self.lp.get("netbios name"))
+ self.assertEqual("SAMBA", self.lp.get("workgroup"))
+ self.assertEqual("USER", self.lp.get("security"))
+ self.assertEqual("/mnt/cd1", self.lp.get("path", "cd1"))
diff --git a/python/samba/tests/s3passdb.py b/python/samba/tests/s3passdb.py
new file mode 100644
index 0000000..b584e07
--- /dev/null
+++ b/python/samba/tests/s3passdb.py
@@ -0,0 +1,138 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for samba.s3passdb"""
+
+from samba.samba3 import passdb
+from samba.samba3 import param as s3param
+from samba.tests import TestCaseInTempDir
+from samba.dcerpc.security import dom_sid
+import os
+
+
+for p in ["../../../../../testdata/samba3", "../../../../testdata/samba3"]:
+ DATADIR = os.path.join(os.path.dirname(__file__), p)
+ if os.path.exists(DATADIR):
+ break
+
+
+class PassdbTestCase(TestCaseInTempDir):
+
+ def setUp(self):
+ super().setUp()
+ os.system("cp -r %s %s" % (DATADIR, self.tempdir))
+ datadir = os.path.join(self.tempdir, "samba3")
+
+ self.lp = s3param.get_context()
+ self.lp.load(os.path.join(datadir, "smb.conf"))
+ self.lp.set("private dir", datadir)
+ self.lp.set("state directory", datadir)
+ self.lp.set("lock directory", datadir)
+ self.lp.set("cache directory", datadir)
+ passdb.set_secrets_dir(datadir)
+ self.pdb = passdb.PDB("tdbsam")
+
+ def tearDown(self):
+ self.lp = []
+ self.pdb = []
+ os.system("rm -rf %s" % os.path.join(self.tempdir, "samba3"))
+ super().tearDown()
+
+ def test_policy(self):
+ policy = self.pdb.get_account_policy()
+ self.assertEqual(0, policy['bad lockout attempt'])
+ self.assertEqual(-1, policy['disconnect time'])
+ self.assertEqual(0, policy['lockout duration'])
+ self.assertEqual(999999999, policy['maximum password age'])
+ self.assertEqual(0, policy['minimum password age'])
+ self.assertEqual(5, policy['min password length'])
+ self.assertEqual(0, policy['password history'])
+ self.assertEqual(0, policy['refuse machine password change'])
+ self.assertEqual(0, policy['reset count minutes'])
+ self.assertEqual(0, policy['user must logon to change password'])
+
+ def test_get_sid(self):
+ domain_sid = passdb.get_global_sam_sid()
+ self.assertEqual(dom_sid("S-1-5-21-2470180966-3899876309-2637894779"), domain_sid)
+
+ def test_usernames(self):
+ userlist = self.pdb.search_users(0)
+ self.assertEqual(3, len(userlist))
+
+ def test_getuser(self):
+ user = self.pdb.getsampwnam("root")
+
+ self.assertEqual(16, user.acct_ctrl)
+ self.assertEqual("", user.acct_desc)
+ self.assertEqual(0, user.bad_password_count)
+ self.assertEqual(0, user.bad_password_time)
+ self.assertEqual(0, user.code_page)
+ self.assertEqual(0, user.country_code)
+ self.assertEqual("", user.dir_drive)
+ self.assertEqual("BEDWYR", user.domain)
+ self.assertEqual("root", user.full_name)
+ self.assertEqual(dom_sid('S-1-5-21-2470180966-3899876309-2637894779-513'), user.group_sid)
+ self.assertEqual("\\\\BEDWYR\\root", user.home_dir)
+ self.assertEqual([-1 for i in range(21)], user.hours)
+ self.assertEqual(21, user.hours_len)
+ self.assertEqual(9223372036854775807, user.kickoff_time)
+ self.assertEqual(None, user.lanman_passwd)
+ self.assertEqual(9223372036854775807, user.logoff_time)
+ self.assertEqual(0, user.logon_count)
+ self.assertEqual(168, user.logon_divs)
+ self.assertEqual("", user.logon_script)
+ self.assertEqual(0, user.logon_time)
+ self.assertEqual("", user.munged_dial)
+ self.assertEqual(b'\x87\x8d\x80\x14`l\xda)gzD\xef\xa15?\xc7', user.nt_passwd)
+ self.assertEqual("", user.nt_username)
+ self.assertEqual(1125418267, user.pass_can_change_time)
+ self.assertEqual(1125418267, user.pass_last_set_time)
+ self.assertEqual(2125418266, user.pass_must_change_time)
+ self.assertEqual(None, user.plaintext_passwd)
+ self.assertEqual("\\\\BEDWYR\\root\\profile", user.profile_path)
+ self.assertEqual(None, user.pw_history)
+ self.assertEqual(dom_sid("S-1-5-21-2470180966-3899876309-2637894779-1000"), user.user_sid)
+ self.assertEqual("root", user.username)
+ self.assertEqual("", user.workstations)
+
+ def test_group_length(self):
+ grouplist = self.pdb.enum_group_mapping()
+ self.assertEqual(13, len(grouplist))
+
+ def test_get_group(self):
+ group = self.pdb.getgrsid(dom_sid("S-1-5-32-544"))
+ self.assertEqual("Administrators", group.nt_name)
+ self.assertEqual(-1, group.gid)
+ self.assertEqual(5, group.sid_name_use)
+
+ def test_groupsids(self):
+ grouplist = self.pdb.enum_group_mapping()
+ sids = []
+ for g in grouplist:
+ sids.append(str(g.sid))
+ self.assertTrue("S-1-5-32-544" in sids)
+ self.assertTrue("S-1-5-32-545" in sids)
+ self.assertTrue("S-1-5-32-546" in sids)
+ self.assertTrue("S-1-5-32-548" in sids)
+ self.assertTrue("S-1-5-32-549" in sids)
+ self.assertTrue("S-1-5-32-550" in sids)
+ self.assertTrue("S-1-5-32-551" in sids)
+
+ def test_alias_length(self):
+ aliaslist = self.pdb.search_aliases()
+ self.assertEqual(1, len(aliaslist))
+ self.assertEqual("Jelmers NT Group", aliaslist[0]['account_name'])
diff --git a/python/samba/tests/s3registry.py b/python/samba/tests/s3registry.py
new file mode 100644
index 0000000..1932d06
--- /dev/null
+++ b/python/samba/tests/s3registry.py
@@ -0,0 +1,53 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for samba.samba3."""
+
+from samba.samba3 import Registry
+from samba.tests import TestCase
+import os
+
+
+for p in ["../../../../../testdata/samba3", "../../../../testdata/samba3"]:
+ DATADIR = os.path.join(os.path.dirname(__file__), p)
+ if os.path.exists(DATADIR):
+ break
+
+
+class RegistryTestCase(TestCase):
+
+ def setUp(self):
+ super().setUp()
+ self.registry = Registry(os.path.join(DATADIR, "registry"))
+
+ def tearDown(self):
+ self.registry.close()
+ super().tearDown()
+
+ def test_length(self):
+ self.assertEqual(28, len(self.registry))
+
+ def test_keys(self):
+ self.assertTrue(b"HKLM" in self.registry.keys())
+
+ def test_subkeys(self):
+ self.assertEqual([b"SOFTWARE", b"SYSTEM"], self.registry.subkeys(b"HKLM"))
+
+ def test_values(self):
+ self.assertEqual({b'DisplayName': (1, b'E\x00v\x00e\x00n\x00t\x00 \x00L\x00o\x00g\x00\x00\x00'),
+ b'ErrorControl': (4, b'\x01\x00\x00\x00')},
+ self.registry.values(b"HKLM/SYSTEM/CURRENTCONTROLSET/SERVICES/EVENTLOG"))
diff --git a/python/samba/tests/s3windb.py b/python/samba/tests/s3windb.py
new file mode 100644
index 0000000..62f429c
--- /dev/null
+++ b/python/samba/tests/s3windb.py
@@ -0,0 +1,45 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for samba.samba3."""
+
+from samba.samba3 import WinsDatabase
+from samba.tests import TestCase
+import os
+
+
+for p in ["../../../../../testdata/samba3", "../../../../testdata/samba3"]:
+ DATADIR = os.path.join(os.path.dirname(__file__), p)
+ if os.path.exists(DATADIR):
+ break
+
+
+class WinsDatabaseTestCase(TestCase):
+
+ def setUp(self):
+ super().setUp()
+ self.winsdb = WinsDatabase(os.path.join(DATADIR, "wins.dat"))
+
+ def test_length(self):
+ self.assertEqual(22, len(self.winsdb))
+
+ def test_first_entry(self):
+ self.assertEqual((1124185120, ["192.168.1.5"], 0x64), self.winsdb["ADMINISTRATOR#03"])
+
+ def tearDown(self):
+ self.winsdb.close()
+ super().tearDown()
diff --git a/python/samba/tests/safe_tarfile.py b/python/samba/tests/safe_tarfile.py
new file mode 100644
index 0000000..1f2cb03
--- /dev/null
+++ b/python/samba/tests/safe_tarfile.py
@@ -0,0 +1,81 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Douglas Bagnall <douglas.bagnall@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+import tarfile
+from samba import safe_tarfile
+
+import os
+from samba.tests import TestCaseInTempDir
+
+
+def filterer(prefix):
+ def f(info):
+ info.name = prefix + info.name
+ return info
+ return f
+
+
+class SafeTarFileTestCase(TestCaseInTempDir):
+
+ def test_dots(self):
+ filename = os.path.join(self.tempdir, 'x')
+ tarname = os.path.join(self.tempdir, 'tar.tar')
+ f = open(filename, 'w')
+ f.write('x')
+ f.close()
+
+ tf = tarfile.open(tarname, 'w')
+ tf.add(filename, filter=filterer('../../'))
+ tf.close()
+
+ stf = safe_tarfile.open(tarname)
+
+ # If we have data_filter, we have a patched python to address
+ # CVE-2007-4559.
+ if hasattr(tarfile, "data_filter"):
+ self.assertRaises(tarfile.OutsideDestinationError,
+ stf.extractall,
+ tarname)
+ else:
+ self.assertRaises(tarfile.ExtractError,
+ stf.extractall,
+ tarname)
+ self.rm_files('x', 'tar.tar')
+
+ def test_slash(self):
+ filename = os.path.join(self.tempdir, 'x')
+ tarname = os.path.join(self.tempdir, 'tar.tar')
+ f = open(filename, 'w')
+ f.write('x')
+ f.close()
+
+ tf = tarfile.open(tarname, 'w')
+ tf.add(filename, filter=filterer('/'))
+ tf.close()
+
+ stf = safe_tarfile.open(tarname)
+
+ # If we have data_filter, we have a patched python to address
+ # CVE-2007-4559.
+ if hasattr(tarfile, "data_filter"):
+ self.assertRaises(NotADirectoryError,
+ stf.extractall,
+ tarname)
+ else:
+ self.assertRaises(tarfile.ExtractError,
+ stf.extractall,
+ tarname)
+
+ self.rm_files('x', 'tar.tar')
diff --git a/python/samba/tests/samba3sam.py b/python/samba/tests/samba3sam.py
new file mode 100644
index 0000000..1f88602
--- /dev/null
+++ b/python/samba/tests/samba3sam.py
@@ -0,0 +1,1125 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2005-2008
+# Copyright (C) Martin Kuehl <mkhl@samba.org> 2006
+#
+# This is a Python port of the original in testprogs/ejs/samba3sam.js
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+"""Tests for the samba3sam LDB module, which maps Samba3 LDAP to AD LDAP."""
+
+import os
+import ldb
+from ldb import SCOPE_DEFAULT, SCOPE_BASE
+from samba import Ldb, substitute_var
+from samba.tests import TestCaseInTempDir, env_loadparm
+import samba.dcerpc.security
+import samba.ndr
+from samba.auth import system_session
+from operator import attrgetter
+
+
+def read_datafile(filename):
+ paths = ["../../../../../testdata/samba3",
+ "../../../../testdata/samba3"]
+ for p in paths:
+ datadir = os.path.join(os.path.dirname(__file__), p)
+ if os.path.exists(datadir):
+ break
+ return open(os.path.join(datadir, filename), 'r').read()
+
+
+def ldb_debug(l, text):
+ print(text)
+
+
+class MapBaseTestCase(TestCaseInTempDir):
+ """Base test case for mapping tests."""
+
+ def setup_modules(self, ldb, s3, s4):
+ ldb.add({"dn": "@MAP=samba3sam",
+ "@FROM": s4.basedn,
+ "@TO": "sambaDomainName=TESTS," + s3.basedn})
+
+ ldb.add({"dn": "@MODULES",
+ "@LIST": "rootdse,dsdb_paged_results,server_sort,asq,samldb,password_hash,operational,objectguid,rdn_name,samba3sam,samba3sid,show_deleted_ignore,dsdb_flags_ignore,partition"})
+
+ ldb.add({"dn": "@PARTITION",
+ "partition": ["%s" % (s4.basedn_casefold),
+ "%s" % (s3.basedn_casefold)],
+ "replicateEntries": ["@ATTRIBUTES", "@INDEXLIST"],
+ "modules": "*:"})
+
+ def setUp(self):
+ self.lp = env_loadparm()
+ self.lp.set("workgroup", "TESTS")
+ self.lp.set("netbios name", "TESTS")
+ super().setUp()
+
+ def make_dn(basedn, rdn):
+ return "%s,sambaDomainName=TESTS,%s" % (rdn, basedn)
+
+ def make_s4dn(basedn, rdn):
+ return "%s,%s" % (rdn, basedn)
+
+ self.ldbfile = os.path.join(self.tempdir, "sam.ldb")
+ self.ldburl = "tdb://" + self.ldbfile
+
+ tempdir = self.tempdir
+
+ class Target:
+ """Simple helper class that contains data for a specific SAM
+ connection."""
+
+ def __init__(self, basedn, dn, lp):
+ self.db = Ldb(lp=lp, session_info=system_session())
+ self.db.set_opaque("skip_allocate_sids", "true")
+ self.basedn = basedn
+ self.basedn_casefold = ldb.Dn(self.db, basedn).get_casefold()
+ self.substvars = {"BASEDN": self.basedn}
+ self.file = os.path.join(tempdir, "%s.ldb" % self.basedn_casefold)
+ self.url = "tdb://" + self.file
+ self._dn = dn
+
+ def dn(self, rdn):
+ return self._dn(self.basedn, rdn)
+
+ def connect(self):
+ return self.db.connect(self.url)
+
+ def setup_data(self, path):
+ self.add_ldif(read_datafile(path))
+
+ def subst(self, text):
+ return substitute_var(text, self.substvars)
+
+ def add_ldif(self, ldif):
+ self.db.add_ldif(self.subst(ldif))
+
+ def modify_ldif(self, ldif):
+ self.db.modify_ldif(self.subst(ldif))
+
+ self.samba4 = Target("dc=vernstok,dc=nl", make_s4dn, self.lp)
+ self.samba3 = Target("cn=Samba3Sam", make_dn, self.lp)
+
+ self.samba3.connect()
+ self.samba4.connect()
+
+ def tearDown(self):
+ os.unlink(self.ldbfile)
+ os.unlink(self.samba3.file)
+ os.unlink(self.samba4.file)
+ pdir = "%s.d" % self.ldbfile
+ mdata = os.path.join(pdir, "metadata.tdb")
+ if os.path.exists(mdata):
+ os.unlink(mdata)
+ os.rmdir(pdir)
+ super().tearDown()
+
+ def assertSidEquals(self, text, ndr_sid):
+ sid_obj1 = samba.ndr.ndr_unpack(samba.dcerpc.security.dom_sid,
+ ndr_sid[0])
+ sid_obj2 = samba.dcerpc.security.dom_sid(text)
+ self.assertEqual(sid_obj1, sid_obj2)
+
+
+class Samba3SamTestCase(MapBaseTestCase):
+
+ def setUp(self):
+ super().setUp()
+ ldb = Ldb(self.ldburl, lp=self.lp, session_info=system_session())
+ ldb.set_opaque("skip_allocate_sids", "true")
+ self.samba3.setup_data("samba3.ldif")
+ ldif = read_datafile("provision_samba3sam.ldif")
+ ldb.add_ldif(self.samba4.subst(ldif))
+ self.setup_modules(ldb, self.samba3, self.samba4)
+ del ldb
+ self.ldb = Ldb(self.ldburl, lp=self.lp, session_info=system_session())
+ self.ldb.set_opaque("skip_allocate_sids", "true")
+
+ def test_search_non_mapped(self):
+ """Looking up by non-mapped attribute"""
+ msg = self.ldb.search(expression="(cn=Administrator)")
+ self.assertEqual(len(msg), 1)
+ self.assertEqual(str(msg[0]["cn"]), "Administrator")
+
+ def test_search_mapped(self):
+ """Looking up by mapped attribute"""
+ msg = self.ldb.search(expression="(name=Backup Operators)")
+ self.assertEqual(len(msg), 1)
+ self.assertEqual(str(msg[0]["name"]), "Backup Operators")
+
+ def test_old_name_of_renamed(self):
+ """Looking up by old name of renamed attribute"""
+ msg = self.ldb.search(expression="(displayName=Backup Operators)")
+ self.assertEqual(len(msg), 0)
+
+ def test_mapped_containing_sid(self):
+ """Looking up mapped entry containing SID"""
+ msg = self.ldb.search(expression="(cn=Replicator)")
+ self.assertEqual(len(msg), 1)
+ self.assertEqual(str(msg[0].dn),
+ "cn=Replicator,ou=Groups,dc=vernstok,dc=nl")
+ self.assertTrue("objectSid" in msg[0])
+ self.assertSidEquals("S-1-5-21-4231626423-2410014848-2360679739-1052",
+ msg[0]["objectSid"])
+ oc = set(msg[0]["objectClass"])
+ self.assertEqual(oc, set([b"group"]))
+
+ def test_search_by_objclass(self):
+ """Looking up by objectClass"""
+ msg = self.ldb.search(expression="(|(objectClass=user)(cn=Administrator))")
+ self.assertEqual(set([str(m.dn) for m in msg]),
+ set(["unixName=Administrator,ou=Users,dc=vernstok,dc=nl",
+ "unixName=nobody,ou=Users,dc=vernstok,dc=nl"]))
+
+ def test_s3sam_modify(self):
+ # Adding a record that will be fallbacked
+ self.ldb.add({
+ "dn": "cn=Foo",
+ "foo": "bar",
+ "blah": "Blie",
+ "cn": "Foo",
+ "showInAdvancedViewOnly": "TRUE"})
+
+ # Checking for existence of record (local)
+ # TODO: This record must be searched in the local database, which is
+ # currently only supported for base searches
+ # msg = ldb.search(expression="(cn=Foo)", ['foo','blah','cn','showInAdvancedViewOnly')]
+ # TODO: Actually, this version should work as well but doesn't...
+ #
+ #
+ msg = self.ldb.search(expression="(cn=Foo)", base="cn=Foo",
+ scope=SCOPE_BASE,
+ attrs=['foo', 'blah', 'cn', 'showInAdvancedViewOnly'])
+ self.assertEqual(len(msg), 1)
+ self.assertEqual(str(msg[0]["showInAdvancedViewOnly"]), "TRUE")
+ self.assertEqual(str(msg[0]["foo"]), "bar")
+ self.assertEqual(str(msg[0]["blah"]), "Blie")
+
+ # Adding record that will be mapped
+ self.ldb.add({"dn": "cn=Niemand,cn=Users,dc=vernstok,dc=nl",
+ "objectClass": "user",
+ "unixName": "bin",
+ "sambaUnicodePwd": "geheim",
+ "cn": "Niemand"})
+
+ # Checking for existence of record (remote)
+ msg = self.ldb.search(expression="(unixName=bin)",
+ attrs=['unixName', 'cn', 'dn', 'sambaUnicodePwd'])
+ self.assertEqual(len(msg), 1)
+ self.assertEqual(str(msg[0]["cn"]), "Niemand")
+ self.assertEqual(str(msg[0]["sambaUnicodePwd"]), "geheim")
+
+ # Checking for existence of record (local && remote)
+ msg = self.ldb.search(expression="(&(unixName=bin)(sambaUnicodePwd=geheim))",
+ attrs=['unixName', 'cn', 'dn', 'sambaUnicodePwd'])
+ self.assertEqual(len(msg), 1) # TODO: should check with more records
+ self.assertEqual(str(msg[0]["cn"]), "Niemand")
+ self.assertEqual(str(msg[0]["unixName"]), "bin")
+ self.assertEqual(str(msg[0]["sambaUnicodePwd"]), "geheim")
+
+ # Checking for existence of record (local || remote)
+ msg = self.ldb.search(expression="(|(unixName=bin)(sambaUnicodePwd=geheim))",
+ attrs=['unixName', 'cn', 'dn', 'sambaUnicodePwd'])
+ # print "got %d replies" % len(msg)
+ self.assertEqual(len(msg), 1) # TODO: should check with more records
+ self.assertEqual(str(msg[0]["cn"]), "Niemand")
+ self.assertEqual(str(msg[0]["unixName"]), "bin")
+ self.assertEqual(str(msg[0]["sambaUnicodePwd"]), "geheim")
+
+ # Checking for data in destination database
+ msg = self.samba3.db.search(expression="(cn=Niemand)")
+ self.assertTrue(len(msg) >= 1)
+ self.assertEqual(str(msg[0]["sambaSID"]),
+ "S-1-5-21-4231626423-2410014848-2360679739-2001")
+ self.assertEqual(str(msg[0]["displayName"]), "Niemand")
+
+ # Adding attribute...
+ self.ldb.modify_ldif("""
+dn: cn=Niemand,cn=Users,dc=vernstok,dc=nl
+changetype: modify
+add: description
+description: Blah
+""")
+
+ # Checking whether changes are still there...
+ msg = self.ldb.search(expression="(cn=Niemand)")
+ self.assertTrue(len(msg) >= 1)
+ self.assertEqual(str(msg[0]["cn"]), "Niemand")
+ self.assertEqual(str(msg[0]["description"]), "Blah")
+
+ # Modifying attribute...
+ self.ldb.modify_ldif("""
+dn: cn=Niemand,cn=Users,dc=vernstok,dc=nl
+changetype: modify
+replace: description
+description: Blie
+""")
+
+ # Checking whether changes are still there...
+ msg = self.ldb.search(expression="(cn=Niemand)")
+ self.assertTrue(len(msg) >= 1)
+ self.assertEqual(str(msg[0]["description"]), "Blie")
+
+ # Deleting attribute...
+ self.ldb.modify_ldif("""
+dn: cn=Niemand,cn=Users,dc=vernstok,dc=nl
+changetype: modify
+delete: description
+""")
+
+ # Checking whether changes are no longer there...
+ msg = self.ldb.search(expression="(cn=Niemand)")
+ self.assertTrue(len(msg) >= 1)
+ self.assertTrue("description" not in msg[0])
+
+ # Renaming record...
+ self.ldb.rename("cn=Niemand,cn=Users,dc=vernstok,dc=nl",
+ "cn=Niemand2,cn=Users,dc=vernstok,dc=nl")
+
+ # Checking whether DN has changed...
+ msg = self.ldb.search(expression="(cn=Niemand2)")
+ self.assertEqual(len(msg), 1)
+ self.assertEqual(str(msg[0].dn),
+ "cn=Niemand2,cn=Users,dc=vernstok,dc=nl")
+
+ # Deleting record...
+ self.ldb.delete("cn=Niemand2,cn=Users,dc=vernstok,dc=nl")
+
+ # Checking whether record is gone...
+ msg = self.ldb.search(expression="(cn=Niemand2)")
+ self.assertEqual(len(msg), 0)
+
+
+class MapTestCase(MapBaseTestCase):
+
+ def setUp(self):
+ super().setUp()
+ ldb = Ldb(self.ldburl, lp=self.lp, session_info=system_session())
+ ldb.set_opaque("skip_allocate_sids", "true")
+ ldif = read_datafile("provision_samba3sam.ldif")
+ ldb.add_ldif(self.samba4.subst(ldif))
+ self.setup_modules(ldb, self.samba3, self.samba4)
+ del ldb
+ self.ldb = Ldb(self.ldburl, lp=self.lp, session_info=system_session())
+ self.ldb.set_opaque("skip_allocate_sids", "true")
+
+ def test_map_search(self):
+ """Running search tests on mapped data."""
+ self.samba3.db.add({
+ "dn": "sambaDomainName=TESTS," + self.samba3.basedn,
+ "objectclass": ["sambaDomain", "top"],
+ "sambaSID": "S-1-5-21-4231626423-2410014848-2360679739",
+ "sambaNextRid": "2000",
+ "sambaDomainName": "TESTS"
+ })
+
+ # Add a set of split records
+ self.ldb.add_ldif("""
+dn: """ + self.samba4.dn("cn=Domain Users") + """
+objectClass: group
+cn: Domain Users
+objectSid: S-1-5-21-4231626423-2410014848-2360679739-513
+""")
+
+ # Add a set of split records
+ self.ldb.add_ldif("""
+dn: """ + self.samba4.dn("cn=X") + """
+objectClass: user
+cn: X
+codePage: x
+revision: x
+dnsHostName: x
+nextRid: y
+lastLogon: x
+description: x
+objectSid: S-1-5-21-4231626423-2410014848-2360679739-1052
+""")
+
+ self.ldb.add({
+ "dn": self.samba4.dn("cn=Y"),
+ "objectClass": "top",
+ "cn": "Y",
+ "codePage": "x",
+ "revision": "x",
+ "dnsHostName": "y",
+ "nextRid": "y",
+ "lastLogon": "y",
+ "description": "x"})
+
+ self.ldb.add({
+ "dn": self.samba4.dn("cn=Z"),
+ "objectClass": "top",
+ "cn": "Z",
+ "codePage": "x",
+ "revision": "y",
+ "dnsHostName": "z",
+ "nextRid": "y",
+ "lastLogon": "z",
+ "description": "y"})
+
+ # Add a set of remote records
+
+ self.samba3.db.add({
+ "dn": self.samba3.dn("cn=A"),
+ "objectClass": "posixAccount",
+ "cn": "A",
+ "sambaNextRid": "x",
+ "sambaBadPasswordCount": "x",
+ "sambaLogonTime": "x",
+ "description": "x",
+ "sambaSID": "S-1-5-21-4231626423-2410014848-2360679739-1052",
+ "sambaPrimaryGroupSID": "S-1-5-21-4231626423-2410014848-2360679739-512"})
+
+ self.samba3.db.add({
+ "dn": self.samba3.dn("cn=B"),
+ "objectClass": "top",
+ "cn": "B",
+ "sambaNextRid": "x",
+ "sambaBadPasswordCount": "x",
+ "sambaLogonTime": "y",
+ "description": "x"})
+
+ self.samba3.db.add({
+ "dn": self.samba3.dn("cn=C"),
+ "objectClass": "top",
+ "cn": "C",
+ "sambaNextRid": "x",
+ "sambaBadPasswordCount": "y",
+ "sambaLogonTime": "z",
+ "description": "y"})
+
+ # Testing search by DN
+
+ # Search remote record by local DN
+ dn = self.samba4.dn("cn=A")
+ res = self.ldb.search(dn, scope=SCOPE_BASE,
+ attrs=["dnsHostName", "lastLogon"])
+ self.assertEqual(len(res), 1)
+ self.assertEqual(str(res[0].dn), dn)
+ self.assertTrue("dnsHostName" not in res[0])
+ self.assertEqual(str(res[0]["lastLogon"]), "x")
+
+ # Search remote record by remote DN
+ dn = self.samba3.dn("cn=A")
+ res = self.samba3.db.search(dn, scope=SCOPE_BASE,
+ attrs=["dnsHostName", "lastLogon", "sambaLogonTime"])
+ self.assertEqual(len(res), 1)
+ self.assertEqual(str(res[0].dn), dn)
+ self.assertTrue("dnsHostName" not in res[0])
+ self.assertTrue("lastLogon" not in res[0])
+ self.assertEqual(str(res[0]["sambaLogonTime"]), "x")
+
+ # Search split record by local DN
+ dn = self.samba4.dn("cn=X")
+ res = self.ldb.search(dn, scope=SCOPE_BASE,
+ attrs=["dnsHostName", "lastLogon"])
+ self.assertEqual(len(res), 1)
+ self.assertEqual(str(res[0].dn), dn)
+ self.assertEqual(str(res[0]["dnsHostName"]), "x")
+ self.assertEqual(str(res[0]["lastLogon"]), "x")
+
+ # Search split record by remote DN
+ dn = self.samba3.dn("cn=X")
+ res = self.samba3.db.search(dn, scope=SCOPE_BASE,
+ attrs=["dnsHostName", "lastLogon", "sambaLogonTime"])
+ self.assertEqual(len(res), 1)
+ self.assertEqual(str(res[0].dn), dn)
+ self.assertTrue("dnsHostName" not in res[0])
+ self.assertTrue("lastLogon" not in res[0])
+ self.assertEqual(str(res[0]["sambaLogonTime"]), "x")
+
+ # Testing search by attribute
+
+ # Search by ignored attribute
+ res = self.ldb.search(expression="(revision=x)", scope=SCOPE_DEFAULT,
+ attrs=["dnsHostName", "lastLogon"])
+ self.assertEqual(len(res), 2)
+ res = sorted(res, key=attrgetter('dn'))
+ self.assertEqual(str(res[0].dn), self.samba4.dn("cn=X"))
+ self.assertEqual(str(res[0]["dnsHostName"]), "x")
+ self.assertEqual(str(res[0]["lastLogon"]), "x")
+ self.assertEqual(str(res[1].dn), self.samba4.dn("cn=Y"))
+ self.assertEqual(str(res[1]["dnsHostName"]), "y")
+ self.assertEqual(str(res[1]["lastLogon"]), "y")
+
+ # Search by kept attribute
+ res = self.ldb.search(expression="(description=y)",
+ scope=SCOPE_DEFAULT, attrs=["dnsHostName", "lastLogon"])
+ self.assertEqual(len(res), 2)
+ res = sorted(res, key=attrgetter('dn'))
+ self.assertEqual(str(res[0].dn), self.samba4.dn("cn=C"))
+ self.assertTrue("dnsHostName" not in res[0])
+ self.assertEqual(str(res[0]["lastLogon"]), "z")
+ self.assertEqual(str(res[1].dn), self.samba4.dn("cn=Z"))
+ self.assertEqual(str(res[1]["dnsHostName"]), "z")
+ self.assertEqual(str(res[1]["lastLogon"]), "z")
+
+ # Search by renamed attribute
+ res = self.ldb.search(expression="(badPwdCount=x)", scope=SCOPE_DEFAULT,
+ attrs=["dnsHostName", "lastLogon"])
+ self.assertEqual(len(res), 2)
+ res = sorted(res, key=attrgetter('dn'))
+ self.assertEqual(str(res[0].dn), self.samba4.dn("cn=A"))
+ self.assertTrue("dnsHostName" not in res[0])
+ self.assertEqual(str(res[0]["lastLogon"]), "x")
+ self.assertEqual(str(res[1].dn), self.samba4.dn("cn=B"))
+ self.assertTrue("dnsHostName" not in res[1])
+ self.assertEqual(str(res[1]["lastLogon"]), "y")
+
+ # Search by converted attribute
+ # TODO:
+ # Using the SID directly in the parse tree leads to conversion
+ # errors, letting the search fail with no results.
+ # res = self.ldb.search("(objectSid=S-1-5-21-4231626423-2410014848-2360679739-1052)", scope=SCOPE_DEFAULT, attrs)
+ res = self.ldb.search(expression="(objectSid=*)", base=None, scope=SCOPE_DEFAULT, attrs=["dnsHostName", "lastLogon", "objectSid"])
+ self.assertEqual(len(res), 4)
+ res = sorted(res, key=attrgetter('dn'))
+ self.assertEqual(str(res[1].dn), self.samba4.dn("cn=X"))
+ self.assertEqual(str(res[1]["dnsHostName"]), "x")
+ self.assertEqual(str(res[1]["lastLogon"]), "x")
+ self.assertSidEquals("S-1-5-21-4231626423-2410014848-2360679739-1052",
+ res[1]["objectSid"])
+ self.assertTrue("objectSid" in res[1])
+ self.assertEqual(str(res[0].dn), self.samba4.dn("cn=A"))
+ self.assertTrue("dnsHostName" not in res[0])
+ self.assertEqual(str(res[0]["lastLogon"]), "x")
+ self.assertSidEquals("S-1-5-21-4231626423-2410014848-2360679739-1052",
+ res[0]["objectSid"])
+ self.assertTrue("objectSid" in res[0])
+
+ # Search by generated attribute
+ # In most cases, this even works when the mapping is missing
+ # a `convert_operator' by enumerating the remote db.
+ res = self.ldb.search(expression="(primaryGroupID=512)",
+ attrs=["dnsHostName", "lastLogon", "primaryGroupID"])
+ self.assertEqual(len(res), 1)
+ self.assertEqual(str(res[0].dn), self.samba4.dn("cn=A"))
+ self.assertTrue("dnsHostName" not in res[0])
+ self.assertEqual(str(res[0]["lastLogon"]), "x")
+ self.assertEqual(str(res[0]["primaryGroupID"]), "512")
+
+ # Note that Xs "objectSid" seems to be fine in the previous search for
+ # "objectSid"...
+ # res = ldb.search(expression="(primaryGroupID=*)", NULL, ldb. SCOPE_DEFAULT, attrs)
+ # print len(res) + " results found"
+ # for i in range(len(res)):
+ # for (obj in res[i]) {
+ # print obj + ": " + res[i][obj]
+ # }
+ # print "---"
+ #
+
+ # Search by remote name of renamed attribute */
+ res = self.ldb.search(expression="(sambaBadPasswordCount=*)",
+ attrs=["dnsHostName", "lastLogon"])
+ self.assertEqual(len(res), 0)
+
+ # Search by objectClass
+ attrs = ["dnsHostName", "lastLogon", "objectClass"]
+ res = self.ldb.search(expression="(objectClass=user)", attrs=attrs)
+ self.assertEqual(len(res), 2)
+ res = sorted(res, key=attrgetter('dn'))
+ self.assertEqual(str(res[0].dn), self.samba4.dn("cn=A"))
+ self.assertTrue("dnsHostName" not in res[0])
+ self.assertEqual(str(res[0]["lastLogon"]), "x")
+ self.assertEqual(str(res[0]["objectClass"][0]), "user")
+ self.assertEqual(str(res[1].dn), self.samba4.dn("cn=X"))
+ self.assertEqual(str(res[1]["dnsHostName"]), "x")
+ self.assertEqual(str(res[1]["lastLogon"]), "x")
+ self.assertEqual(str(res[1]["objectClass"][0]), "user")
+
+ # Prove that the objectClass is actually used for the search
+ res = self.ldb.search(expression="(|(objectClass=user)(badPwdCount=x))",
+ attrs=attrs)
+ self.assertEqual(len(res), 3)
+ res = sorted(res, key=attrgetter('dn'))
+ self.assertEqual(str(res[0].dn), self.samba4.dn("cn=A"))
+ self.assertTrue("dnsHostName" not in res[0])
+ self.assertEqual(str(res[0]["lastLogon"]), "x")
+ self.assertEqual(str(res[0]["objectClass"][0]), "user")
+ self.assertEqual(str(res[1].dn), self.samba4.dn("cn=B"))
+ self.assertTrue("dnsHostName" not in res[1])
+ self.assertEqual(str(res[1]["lastLogon"]), "y")
+ self.assertEqual(set(res[1]["objectClass"]), set([b"top"]))
+ self.assertEqual(str(res[2].dn), self.samba4.dn("cn=X"))
+ self.assertEqual(str(res[2]["dnsHostName"]), "x")
+ self.assertEqual(str(res[2]["lastLogon"]), "x")
+ self.assertEqual(str(res[2]["objectClass"][0]), "user")
+
+ # Testing search by parse tree
+
+ # Search by conjunction of local attributes
+ res = self.ldb.search(expression="(&(codePage=x)(revision=x))",
+ attrs=["dnsHostName", "lastLogon"])
+ self.assertEqual(len(res), 2)
+ res = sorted(res, key=attrgetter('dn'))
+ self.assertEqual(str(res[0].dn), self.samba4.dn("cn=X"))
+ self.assertEqual(str(res[0]["dnsHostName"]), "x")
+ self.assertEqual(str(res[0]["lastLogon"]), "x")
+ self.assertEqual(str(res[1].dn), self.samba4.dn("cn=Y"))
+ self.assertEqual(str(res[1]["dnsHostName"]), "y")
+ self.assertEqual(str(res[1]["lastLogon"]), "y")
+
+ # Search by conjunction of remote attributes
+ res = self.ldb.search(expression="(&(lastLogon=x)(description=x))",
+ attrs=["dnsHostName", "lastLogon"])
+ self.assertEqual(len(res), 2)
+ res = sorted(res, key=attrgetter('dn'))
+ self.assertEqual(str(res[0].dn), self.samba4.dn("cn=A"))
+ self.assertTrue("dnsHostName" not in res[0])
+ self.assertEqual(str(res[0]["lastLogon"]), "x")
+ self.assertEqual(str(res[1].dn), self.samba4.dn("cn=X"))
+ self.assertEqual(str(res[1]["dnsHostName"]), "x")
+ self.assertEqual(str(res[1]["lastLogon"]), "x")
+
+ # Search by conjunction of local and remote attribute
+ res = self.ldb.search(expression="(&(codePage=x)(description=x))",
+ attrs=["dnsHostName", "lastLogon"])
+ self.assertEqual(len(res), 2)
+ res = sorted(res, key=attrgetter('dn'))
+ self.assertEqual(str(res[0].dn), self.samba4.dn("cn=X"))
+ self.assertEqual(str(res[0]["dnsHostName"]), "x")
+ self.assertEqual(str(res[0]["lastLogon"]), "x")
+ self.assertEqual(str(res[1].dn), self.samba4.dn("cn=Y"))
+ self.assertEqual(str(res[1]["dnsHostName"]), "y")
+ self.assertEqual(str(res[1]["lastLogon"]), "y")
+
+ # Search by conjunction of local and remote attribute w/o match
+ attrs = ["dnsHostName", "lastLogon"]
+ res = self.ldb.search(expression="(&(codePage=x)(nextRid=x))",
+ attrs=attrs)
+ self.assertEqual(len(res), 0)
+ res = self.ldb.search(expression="(&(revision=x)(lastLogon=z))",
+ attrs=attrs)
+ self.assertEqual(len(res), 0)
+
+ # Search by disjunction of local attributes
+ res = self.ldb.search(expression="(|(revision=x)(dnsHostName=x))",
+ attrs=["dnsHostName", "lastLogon"])
+ self.assertEqual(len(res), 2)
+ res = sorted(res, key=attrgetter('dn'))
+ self.assertEqual(str(res[0].dn), self.samba4.dn("cn=X"))
+ self.assertEqual(str(res[0]["dnsHostName"]), "x")
+ self.assertEqual(str(res[0]["lastLogon"]), "x")
+ self.assertEqual(str(res[1].dn), self.samba4.dn("cn=Y"))
+ self.assertEqual(str(res[1]["dnsHostName"]), "y")
+ self.assertEqual(str(res[1]["lastLogon"]), "y")
+
+ # Search by disjunction of remote attributes
+ res = self.ldb.search(expression="(|(badPwdCount=x)(lastLogon=x))",
+ attrs=["dnsHostName", "lastLogon"])
+ self.assertEqual(len(res), 3)
+ res = sorted(res, key=attrgetter('dn'))
+ self.assertEqual(str(res[0].dn), self.samba4.dn("cn=A"))
+ self.assertFalse("dnsHostName" in res[0])
+ self.assertEqual(str(res[0]["lastLogon"]), "x")
+ self.assertEqual(str(res[1].dn), self.samba4.dn("cn=B"))
+ self.assertFalse("dnsHostName" in res[1])
+ self.assertEqual(str(res[1]["lastLogon"]), "y")
+ self.assertEqual(str(res[2].dn), self.samba4.dn("cn=X"))
+ self.assertEqual(str(res[2]["dnsHostName"]), "x")
+ self.assertEqual(str(res[2]["lastLogon"]), "x")
+
+ # Search by disjunction of local and remote attribute
+ res = self.ldb.search(expression="(|(revision=x)(lastLogon=y))",
+ attrs=["dnsHostName", "lastLogon"])
+ self.assertEqual(len(res), 3)
+ res = sorted(res, key=attrgetter('dn'))
+ self.assertEqual(str(res[0].dn), self.samba4.dn("cn=B"))
+ self.assertFalse("dnsHostName" in res[0])
+ self.assertEqual(str(res[0]["lastLogon"]), "y")
+ self.assertEqual(str(res[1].dn), self.samba4.dn("cn=X"))
+ self.assertEqual(str(res[1]["dnsHostName"]), "x")
+ self.assertEqual(str(res[1]["lastLogon"]), "x")
+ self.assertEqual(str(res[2].dn), self.samba4.dn("cn=Y"))
+ self.assertEqual(str(res[2]["dnsHostName"]), "y")
+ self.assertEqual(str(res[2]["lastLogon"]), "y")
+
+ # Search by disjunction of local and remote attribute w/o match
+ res = self.ldb.search(expression="(|(codePage=y)(nextRid=z))",
+ attrs=["dnsHostName", "lastLogon"])
+ self.assertEqual(len(res), 0)
+
+ # Search by negated local attribute
+ res = self.ldb.search(expression="(!(revision=x))",
+ attrs=["dnsHostName", "lastLogon"])
+ self.assertEqual(len(res), 6)
+ res = sorted(res, key=attrgetter('dn'))
+ self.assertEqual(str(res[0].dn), self.samba4.dn("cn=A"))
+ self.assertTrue("dnsHostName" not in res[0])
+ self.assertEqual(str(res[0]["lastLogon"]), "x")
+ self.assertEqual(str(res[1].dn), self.samba4.dn("cn=B"))
+ self.assertTrue("dnsHostName" not in res[1])
+ self.assertEqual(str(res[1]["lastLogon"]), "y")
+ self.assertEqual(str(res[2].dn), self.samba4.dn("cn=C"))
+ self.assertTrue("dnsHostName" not in res[2])
+ self.assertEqual(str(res[2]["lastLogon"]), "z")
+ self.assertEqual(str(res[3].dn), self.samba4.dn("cn=Z"))
+ self.assertEqual(str(res[3]["dnsHostName"]), "z")
+ self.assertEqual(str(res[3]["lastLogon"]), "z")
+
+ # Search by negated remote attribute
+ res = self.ldb.search(expression="(!(description=x))",
+ attrs=["dnsHostName", "lastLogon"])
+ self.assertEqual(len(res), 4)
+ res = sorted(res, key=attrgetter('dn'))
+ self.assertEqual(str(res[0].dn), self.samba4.dn("cn=C"))
+ self.assertTrue("dnsHostName" not in res[0])
+ self.assertEqual(str(res[0]["lastLogon"]), "z")
+ self.assertEqual(str(res[1].dn), self.samba4.dn("cn=Z"))
+ self.assertEqual(str(res[1]["dnsHostName"]), "z")
+ self.assertEqual(str(res[1]["lastLogon"]), "z")
+
+ # Search by negated conjunction of local attributes
+ res = self.ldb.search(expression="(!(&(codePage=x)(revision=x)))",
+ attrs=["dnsHostName", "lastLogon"])
+ self.assertEqual(len(res), 6)
+ res = sorted(res, key=attrgetter('dn'))
+ self.assertEqual(str(res[0].dn), self.samba4.dn("cn=A"))
+ self.assertTrue("dnsHostName" not in res[0])
+ self.assertEqual(str(res[0]["lastLogon"]), "x")
+ self.assertEqual(str(res[1].dn), self.samba4.dn("cn=B"))
+ self.assertTrue("dnsHostName" not in res[1])
+ self.assertEqual(str(res[1]["lastLogon"]), "y")
+ self.assertEqual(str(res[2].dn), self.samba4.dn("cn=C"))
+ self.assertTrue("dnsHostName" not in res[2])
+ self.assertEqual(str(res[2]["lastLogon"]), "z")
+ self.assertEqual(str(res[3].dn), self.samba4.dn("cn=Z"))
+ self.assertEqual(str(res[3]["dnsHostName"]), "z")
+ self.assertEqual(str(res[3]["lastLogon"]), "z")
+
+ # Search by negated conjunction of remote attributes
+ res = self.ldb.search(expression="(!(&(lastLogon=x)(description=x)))",
+ attrs=["dnsHostName", "lastLogon"])
+ self.assertEqual(len(res), 6)
+ res = sorted(res, key=attrgetter('dn'))
+ self.assertEqual(str(res[0].dn), self.samba4.dn("cn=B"))
+ self.assertTrue("dnsHostName" not in res[0])
+ self.assertEqual(str(res[0]["lastLogon"]), "y")
+ self.assertEqual(str(res[1].dn), self.samba4.dn("cn=C"))
+ self.assertTrue("dnsHostName" not in res[1])
+ self.assertEqual(str(res[1]["lastLogon"]), "z")
+ self.assertEqual(str(res[2].dn), self.samba4.dn("cn=Y"))
+ self.assertEqual(str(res[2]["dnsHostName"]), "y")
+ self.assertEqual(str(res[2]["lastLogon"]), "y")
+ self.assertEqual(str(res[3].dn), self.samba4.dn("cn=Z"))
+ self.assertEqual(str(res[3]["dnsHostName"]), "z")
+ self.assertEqual(str(res[3]["lastLogon"]), "z")
+
+ # Search by negated conjunction of local and remote attribute
+ res = self.ldb.search(expression="(!(&(codePage=x)(description=x)))",
+ attrs=["dnsHostName", "lastLogon"])
+ self.assertEqual(len(res), 6)
+ res = sorted(res, key=attrgetter('dn'))
+ self.assertEqual(str(res[0].dn), self.samba4.dn("cn=A"))
+ self.assertTrue("dnsHostName" not in res[0])
+ self.assertEqual(str(res[0]["lastLogon"]), "x")
+ self.assertEqual(str(res[1].dn), self.samba4.dn("cn=B"))
+ self.assertTrue("dnsHostName" not in res[1])
+ self.assertEqual(str(res[1]["lastLogon"]), "y")
+ self.assertEqual(str(res[2].dn), self.samba4.dn("cn=C"))
+ self.assertTrue("dnsHostName" not in res[2])
+ self.assertEqual(str(res[2]["lastLogon"]), "z")
+ self.assertEqual(str(res[3].dn), self.samba4.dn("cn=Z"))
+ self.assertEqual(str(res[3]["dnsHostName"]), "z")
+ self.assertEqual(str(res[3]["lastLogon"]), "z")
+
+ # Search by negated disjunction of local attributes
+ res = self.ldb.search(expression="(!(|(revision=x)(dnsHostName=x)))",
+ attrs=["dnsHostName", "lastLogon"])
+ res = sorted(res, key=attrgetter('dn'))
+ self.assertEqual(str(res[0].dn), self.samba4.dn("cn=A"))
+ self.assertTrue("dnsHostName" not in res[0])
+ self.assertEqual(str(res[0]["lastLogon"]), "x")
+ self.assertEqual(str(res[1].dn), self.samba4.dn("cn=B"))
+ self.assertTrue("dnsHostName" not in res[1])
+ self.assertEqual(str(res[1]["lastLogon"]), "y")
+ self.assertEqual(str(res[2].dn), self.samba4.dn("cn=C"))
+ self.assertTrue("dnsHostName" not in res[2])
+ self.assertEqual(str(res[2]["lastLogon"]), "z")
+ self.assertEqual(str(res[3].dn), self.samba4.dn("cn=Z"))
+ self.assertEqual(str(res[3]["dnsHostName"]), "z")
+ self.assertEqual(str(res[3]["lastLogon"]), "z")
+
+ # Search by negated disjunction of remote attributes
+ res = self.ldb.search(expression="(!(|(badPwdCount=x)(lastLogon=x)))",
+ attrs=["dnsHostName", "lastLogon"])
+ self.assertEqual(len(res), 5)
+ res = sorted(res, key=attrgetter('dn'))
+ self.assertEqual(str(res[0].dn), self.samba4.dn("cn=C"))
+ self.assertTrue("dnsHostName" not in res[0])
+ self.assertEqual(str(res[0]["lastLogon"]), "z")
+ self.assertEqual(str(res[1].dn), self.samba4.dn("cn=Y"))
+ self.assertEqual(str(res[1]["dnsHostName"]), "y")
+ self.assertEqual(str(res[1]["lastLogon"]), "y")
+ self.assertEqual(str(res[2].dn), self.samba4.dn("cn=Z"))
+ self.assertEqual(str(res[2]["dnsHostName"]), "z")
+ self.assertEqual(str(res[2]["lastLogon"]), "z")
+
+ # Search by negated disjunction of local and remote attribute
+ res = self.ldb.search(expression="(!(|(revision=x)(lastLogon=y)))",
+ attrs=["dnsHostName", "lastLogon"])
+ self.assertEqual(len(res), 5)
+ res = sorted(res, key=attrgetter('dn'))
+ self.assertEqual(str(res[0].dn), self.samba4.dn("cn=A"))
+ self.assertTrue("dnsHostName" not in res[0])
+ self.assertEqual(str(res[0]["lastLogon"]), "x")
+ self.assertEqual(str(res[1].dn), self.samba4.dn("cn=C"))
+ self.assertTrue("dnsHostName" not in res[1])
+ self.assertEqual(str(res[1]["lastLogon"]), "z")
+ self.assertEqual(str(res[2].dn), self.samba4.dn("cn=Z"))
+ self.assertEqual(str(res[2]["dnsHostName"]), "z")
+ self.assertEqual(str(res[2]["lastLogon"]), "z")
+
+ # Search by complex parse tree
+ res = self.ldb.search(expression="(|(&(revision=x)(dnsHostName=x))(!(&(description=x)(nextRid=y)))(badPwdCount=y))", attrs=["dnsHostName", "lastLogon"])
+ self.assertEqual(len(res), 7)
+ res = sorted(res, key=attrgetter('dn'))
+ self.assertEqual(str(res[0].dn), self.samba4.dn("cn=A"))
+ self.assertTrue("dnsHostName" not in res[0])
+ self.assertEqual(str(res[0]["lastLogon"]), "x")
+ self.assertEqual(str(res[1].dn), self.samba4.dn("cn=B"))
+ self.assertTrue("dnsHostName" not in res[1])
+ self.assertEqual(str(res[1]["lastLogon"]), "y")
+ self.assertEqual(str(res[2].dn), self.samba4.dn("cn=C"))
+ self.assertTrue("dnsHostName" not in res[2])
+ self.assertEqual(str(res[2]["lastLogon"]), "z")
+ self.assertEqual(str(res[3].dn), self.samba4.dn("cn=X"))
+ self.assertEqual(str(res[3]["dnsHostName"]), "x")
+ self.assertEqual(str(res[3]["lastLogon"]), "x")
+ self.assertEqual(str(res[4].dn), self.samba4.dn("cn=Z"))
+ self.assertEqual(str(res[4]["dnsHostName"]), "z")
+ self.assertEqual(str(res[4]["lastLogon"]), "z")
+
+ # Clean up
+ dns = [self.samba4.dn("cn=%s" % n) for n in ["A", "B", "C", "X", "Y", "Z"]]
+ for dn in dns:
+ self.ldb.delete(dn)
+
+ def test_map_modify_local(self):
+ """Modification of local records."""
+ # Add local record
+ dn = "cn=test,dc=idealx,dc=org"
+ self.ldb.add({"dn": dn,
+ "cn": "test",
+ "foo": "bar",
+ "revision": "1",
+ "description": "test"})
+ # Check it's there
+ attrs = ["foo", "revision", "description"]
+ res = self.ldb.search(dn, scope=SCOPE_BASE, attrs=attrs)
+ self.assertEqual(len(res), 1)
+ self.assertEqual(str(res[0].dn), dn)
+ self.assertEqual(str(res[0]["foo"]), "bar")
+ self.assertEqual(str(res[0]["revision"]), "1")
+ self.assertEqual(str(res[0]["description"]), "test")
+ # Check it's not in the local db
+ res = self.samba4.db.search(expression="(cn=test)",
+ scope=SCOPE_DEFAULT, attrs=attrs)
+ self.assertEqual(len(res), 0)
+ # Check it's not in the remote db
+ res = self.samba3.db.search(expression="(cn=test)",
+ scope=SCOPE_DEFAULT, attrs=attrs)
+ self.assertEqual(len(res), 0)
+
+ # Modify local record
+ ldif = """
+dn: """ + dn + """
+replace: foo
+foo: baz
+replace: description
+description: foo
+"""
+ self.ldb.modify_ldif(ldif)
+ # Check in local db
+ res = self.ldb.search(dn, scope=SCOPE_BASE, attrs=attrs)
+ self.assertEqual(len(res), 1)
+ self.assertEqual(str(res[0].dn), dn)
+ self.assertEqual(str(res[0]["foo"]), "baz")
+ self.assertEqual(str(res[0]["revision"]), "1")
+ self.assertEqual(str(res[0]["description"]), "foo")
+
+ # Rename local record
+ dn2 = "cn=toast,dc=idealx,dc=org"
+ self.ldb.rename(dn, dn2)
+ # Check in local db
+ res = self.ldb.search(dn2, scope=SCOPE_BASE, attrs=attrs)
+ self.assertEqual(len(res), 1)
+ self.assertEqual(str(res[0].dn), dn2)
+ self.assertEqual(str(res[0]["foo"]), "baz")
+ self.assertEqual(str(res[0]["revision"]), "1")
+ self.assertEqual(str(res[0]["description"]), "foo")
+
+ # Delete local record
+ self.ldb.delete(dn2)
+ # Check it's gone
+ res = self.ldb.search(dn2, scope=SCOPE_BASE)
+ self.assertEqual(len(res), 0)
+
+ def test_map_modify_remote_remote(self):
+ """Modification of remote data of remote records"""
+ # Add remote record
+ dn = self.samba4.dn("cn=test")
+ dn2 = self.samba3.dn("cn=test")
+ self.samba3.db.add({"dn": dn2,
+ "cn": "test",
+ "description": "foo",
+ "sambaBadPasswordCount": "3",
+ "sambaNextRid": "1001"})
+ # Check it's there
+ res = self.samba3.db.search(dn2, scope=SCOPE_BASE,
+ attrs=["description", "sambaBadPasswordCount", "sambaNextRid"])
+ self.assertEqual(len(res), 1)
+ self.assertEqual(str(res[0].dn), dn2)
+ self.assertEqual(str(res[0]["description"]), "foo")
+ self.assertEqual(str(res[0]["sambaBadPasswordCount"]), "3")
+ self.assertEqual(str(res[0]["sambaNextRid"]), "1001")
+ # Check in mapped db
+ attrs = ["description", "badPwdCount", "nextRid"]
+ res = self.ldb.search(dn, scope=SCOPE_BASE, attrs=attrs, expression="")
+ self.assertEqual(len(res), 1)
+ self.assertEqual(str(res[0].dn), dn)
+ self.assertEqual(str(res[0]["description"]), "foo")
+ self.assertEqual(str(res[0]["badPwdCount"]), "3")
+ self.assertEqual(str(res[0]["nextRid"]), "1001")
+ # Check in local db
+ res = self.samba4.db.search(dn, scope=SCOPE_BASE, attrs=attrs)
+ self.assertEqual(len(res), 0)
+
+ # Modify remote data of remote record
+ ldif = """
+dn: """ + dn + """
+replace: description
+description: test
+replace: badPwdCount
+badPwdCount: 4
+"""
+ self.ldb.modify_ldif(ldif)
+ # Check in mapped db
+ res = self.ldb.search(dn, scope=SCOPE_BASE,
+ attrs=["description", "badPwdCount", "nextRid"])
+ self.assertEqual(len(res), 1)
+ self.assertEqual(str(res[0].dn), dn)
+ self.assertEqual(str(res[0]["description"]), "test")
+ self.assertEqual(str(res[0]["badPwdCount"]), "4")
+ self.assertEqual(str(res[0]["nextRid"]), "1001")
+ # Check in remote db
+ res = self.samba3.db.search(dn2, scope=SCOPE_BASE,
+ attrs=["description", "sambaBadPasswordCount", "sambaNextRid"])
+ self.assertEqual(len(res), 1)
+ self.assertEqual(str(res[0].dn), dn2)
+ self.assertEqual(str(res[0]["description"]), "test")
+ self.assertEqual(str(res[0]["sambaBadPasswordCount"]), "4")
+ self.assertEqual(str(res[0]["sambaNextRid"]), "1001")
+
+ # Rename remote record
+ dn2 = self.samba4.dn("cn=toast")
+ self.ldb.rename(dn, dn2)
+ # Check in mapped db
+ dn = dn2
+ res = self.ldb.search(dn, scope=SCOPE_BASE,
+ attrs=["description", "badPwdCount", "nextRid"])
+ self.assertEqual(len(res), 1)
+ self.assertEqual(str(res[0].dn), dn)
+ self.assertEqual(str(res[0]["description"]), "test")
+ self.assertEqual(str(res[0]["badPwdCount"]), "4")
+ self.assertEqual(str(res[0]["nextRid"]), "1001")
+ # Check in remote db
+ dn2 = self.samba3.dn("cn=toast")
+ res = self.samba3.db.search(dn2, scope=SCOPE_BASE,
+ attrs=["description", "sambaBadPasswordCount", "sambaNextRid"])
+ self.assertEqual(len(res), 1)
+ self.assertEqual(str(res[0].dn), dn2)
+ self.assertEqual(str(res[0]["description"]), "test")
+ self.assertEqual(str(res[0]["sambaBadPasswordCount"]), "4")
+ self.assertEqual(str(res[0]["sambaNextRid"]), "1001")
+
+ # Delete remote record
+ self.ldb.delete(dn)
+ # Check in mapped db that it's removed
+ res = self.ldb.search(dn, scope=SCOPE_BASE)
+ self.assertEqual(len(res), 0)
+ # Check in remote db
+ res = self.samba3.db.search(dn2, scope=SCOPE_BASE)
+ self.assertEqual(len(res), 0)
+
+ def test_map_modify_remote_local(self):
+ """Modification of local data of remote records"""
+ # Add remote record (same as before)
+ dn = self.samba4.dn("cn=test")
+ dn2 = self.samba3.dn("cn=test")
+ self.samba3.db.add({"dn": dn2,
+ "cn": "test",
+ "description": "foo",
+ "sambaBadPasswordCount": "3",
+ "sambaNextRid": "1001"})
+
+ # Modify local data of remote record
+ ldif = """
+dn: """ + dn + """
+add: revision
+revision: 1
+replace: description
+description: test
+
+"""
+ self.ldb.modify_ldif(ldif)
+ # Check in mapped db
+ attrs = ["revision", "description"]
+ res = self.ldb.search(dn, scope=SCOPE_BASE, attrs=attrs)
+ self.assertEqual(len(res), 1)
+ self.assertEqual(str(res[0].dn), dn)
+ self.assertEqual(str(res[0]["description"]), "test")
+ self.assertEqual(str(res[0]["revision"]), "1")
+ # Check in remote db
+ res = self.samba3.db.search(dn2, scope=SCOPE_BASE, attrs=attrs)
+ self.assertEqual(len(res), 1)
+ self.assertEqual(str(res[0].dn), dn2)
+ self.assertEqual(str(res[0]["description"]), "test")
+ self.assertTrue("revision" not in res[0])
+ # Check in local db
+ res = self.samba4.db.search(dn, scope=SCOPE_BASE, attrs=attrs)
+ self.assertEqual(len(res), 1)
+ self.assertEqual(str(res[0].dn), dn)
+ self.assertTrue("description" not in res[0])
+ self.assertEqual(str(res[0]["revision"]), "1")
+
+ # Delete (newly) split record
+ self.ldb.delete(dn)
+
+ def test_map_modify_split(self):
+ """Testing modification of split records"""
+ # Add split record
+ dn = self.samba4.dn("cn=test")
+ dn2 = self.samba3.dn("cn=test")
+ self.ldb.add({
+ "dn": dn,
+ "cn": "test",
+ "description": "foo",
+ "badPwdCount": "3",
+ "nextRid": "1001",
+ "revision": "1"})
+ # Check it's there
+ attrs = ["description", "badPwdCount", "nextRid", "revision"]
+ res = self.ldb.search(dn, scope=SCOPE_BASE, attrs=attrs)
+ self.assertEqual(len(res), 1)
+ self.assertEqual(str(res[0].dn), dn)
+ self.assertEqual(str(res[0]["description"]), "foo")
+ self.assertEqual(str(res[0]["badPwdCount"]), "3")
+ self.assertEqual(str(res[0]["nextRid"]), "1001")
+ self.assertEqual(str(res[0]["revision"]), "1")
+ # Check in local db
+ res = self.samba4.db.search(dn, scope=SCOPE_BASE, attrs=attrs)
+ self.assertEqual(len(res), 1)
+ self.assertEqual(str(res[0].dn), dn)
+ self.assertTrue("description" not in res[0])
+ self.assertTrue("badPwdCount" not in res[0])
+ self.assertTrue("nextRid" not in res[0])
+ self.assertEqual(str(res[0]["revision"]), "1")
+ # Check in remote db
+ attrs = ["description", "sambaBadPasswordCount", "sambaNextRid",
+ "revision"]
+ res = self.samba3.db.search(dn2, scope=SCOPE_BASE, attrs=attrs)
+ self.assertEqual(len(res), 1)
+ self.assertEqual(str(res[0].dn), dn2)
+ self.assertEqual(str(res[0]["description"]), "foo")
+ self.assertEqual(str(res[0]["sambaBadPasswordCount"]), "3")
+ self.assertEqual(str(res[0]["sambaNextRid"]), "1001")
+ self.assertTrue("revision" not in res[0])
+
+ # Modify of split record
+ ldif = """
+dn: """ + dn + """
+replace: description
+description: test
+replace: badPwdCount
+badPwdCount: 4
+replace: revision
+revision: 2
+"""
+ self.ldb.modify_ldif(ldif)
+ # Check in mapped db
+ attrs = ["description", "badPwdCount", "nextRid", "revision"]
+ res = self.ldb.search(dn, scope=SCOPE_BASE, attrs=attrs)
+ self.assertEqual(len(res), 1)
+ self.assertEqual(str(res[0].dn), dn)
+ self.assertEqual(str(res[0]["description"]), "test")
+ self.assertEqual(str(res[0]["badPwdCount"]), "4")
+ self.assertEqual(str(res[0]["nextRid"]), "1001")
+ self.assertEqual(str(res[0]["revision"]), "2")
+ # Check in local db
+ res = self.samba4.db.search(dn, scope=SCOPE_BASE, attrs=attrs)
+ self.assertEqual(len(res), 1)
+ self.assertEqual(str(res[0].dn), dn)
+ self.assertTrue("description" not in res[0])
+ self.assertTrue("badPwdCount" not in res[0])
+ self.assertTrue("nextRid" not in res[0])
+ self.assertEqual(str(res[0]["revision"]), "2")
+ # Check in remote db
+ attrs = ["description", "sambaBadPasswordCount", "sambaNextRid",
+ "revision"]
+ res = self.samba3.db.search(dn2, scope=SCOPE_BASE, attrs=attrs)
+ self.assertEqual(len(res), 1)
+ self.assertEqual(str(res[0].dn), dn2)
+ self.assertEqual(str(res[0]["description"]), "test")
+ self.assertEqual(str(res[0]["sambaBadPasswordCount"]), "4")
+ self.assertEqual(str(res[0]["sambaNextRid"]), "1001")
+ self.assertTrue("revision" not in res[0])
+
+ # Rename split record
+ dn2 = self.samba4.dn("cn=toast")
+ self.ldb.rename(dn, dn2)
+ # Check in mapped db
+ dn = dn2
+ attrs = ["description", "badPwdCount", "nextRid", "revision"]
+ res = self.ldb.search(dn, scope=SCOPE_BASE, attrs=attrs)
+ self.assertEqual(len(res), 1)
+ self.assertEqual(str(res[0].dn), dn)
+ self.assertEqual(str(res[0]["description"]), "test")
+ self.assertEqual(str(res[0]["badPwdCount"]), "4")
+ self.assertEqual(str(res[0]["nextRid"]), "1001")
+ self.assertEqual(str(res[0]["revision"]), "2")
+ # Check in local db
+ res = self.samba4.db.search(dn, scope=SCOPE_BASE, attrs=attrs)
+ self.assertEqual(len(res), 1)
+ self.assertEqual(str(res[0].dn), dn)
+ self.assertTrue("description" not in res[0])
+ self.assertTrue("badPwdCount" not in res[0])
+ self.assertTrue("nextRid" not in res[0])
+ self.assertEqual(str(res[0]["revision"]), "2")
+ # Check in remote db
+ dn2 = self.samba3.dn("cn=toast")
+ res = self.samba3.db.search(dn2, scope=SCOPE_BASE,
+ attrs=["description", "sambaBadPasswordCount", "sambaNextRid",
+ "revision"])
+ self.assertEqual(len(res), 1)
+ self.assertEqual(str(res[0].dn), dn2)
+ self.assertEqual(str(res[0]["description"]), "test")
+ self.assertEqual(str(res[0]["sambaBadPasswordCount"]), "4")
+ self.assertEqual(str(res[0]["sambaNextRid"]), "1001")
+ self.assertTrue("revision" not in res[0])
+
+ # Delete split record
+ self.ldb.delete(dn)
+ # Check in mapped db
+ res = self.ldb.search(dn, scope=SCOPE_BASE)
+ self.assertEqual(len(res), 0)
+ # Check in local db
+ res = self.samba4.db.search(dn, scope=SCOPE_BASE)
+ self.assertEqual(len(res), 0)
+ # Check in remote db
+ res = self.samba3.db.search(dn2, scope=SCOPE_BASE)
+ self.assertEqual(len(res), 0)
diff --git a/python/samba/tests/samba_startup_fl_change.py b/python/samba/tests/samba_startup_fl_change.py
new file mode 100644
index 0000000..54fa9f8
--- /dev/null
+++ b/python/samba/tests/samba_startup_fl_change.py
@@ -0,0 +1,180 @@
+# Unix SMB/CIFS implementation. Tests for dsdb
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2023
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for samba.dsdb."""
+
+from samba.credentials import Credentials
+from samba.samdb import SamDB
+from samba.auth import system_session
+from samba.param import LoadParm
+from samba import dsdb, functional_level
+import ldb
+
+
+from samba.tests.samba_tool.base import SambaToolCmdTest
+import os
+import shutil
+import tempfile
+
+class SambaFLStartUpTests(SambaToolCmdTest):
+ """Test the samba binary sets the DC FL on startup for RW DCs"""
+
+ @classmethod
+ def setUpClass(cls):
+ super().setUpClass()
+ cls.classtempdir = tempfile.mkdtemp()
+ cls.tempsambadir = os.path.join(cls.classtempdir, "samba")
+
+ command = (
+ "samba-tool " +
+ "domain provision " +
+ "--realm=foo.example.com " +
+ "--domain=FOO " +
+ ("--targetdir=%s " % cls.tempsambadir) +
+ "--use-ntvfs"
+ )
+
+ (result, out, err) = cls.run_command(command)
+ if (result != 0):
+ raise AssertionError
+
+ @classmethod
+ def tearDownClass(cls):
+ super().tearDownClass()
+ shutil.rmtree(cls.tempsambadir)
+
+ def setUp(self):
+ super().setUp()
+ path = os.path.join(self.tempsambadir, "etc/smb.conf")
+ self.lp = LoadParm(filename_for_non_global_lp=path)
+ self.creds = Credentials()
+ self.creds.guess(self.lp)
+ self.session = system_session()
+ self.samdb = SamDB(session_info=self.session,
+ credentials=self.creds,
+ lp=self.lp)
+
+
+ def test_initial_db_fl_state(self):
+ server_dn = self.samdb.get_dsServiceName()
+ res = self.samdb.search(base=server_dn,
+ scope=ldb.SCOPE_BASE,
+ attrs=["msDS-Behavior-Version"])
+ # This confirms the domain is in FL 2008 R2 by default, this is
+ # important to verify the original state
+ self.assertEqual(int(res[0]["msDS-Behavior-Version"][0]),
+ dsdb.DS_DOMAIN_FUNCTION_2008_R2)
+
+ def test_initial_rootdse_domain_fl_state(self):
+ res = self.samdb.search(base="",
+ scope=ldb.SCOPE_BASE,
+ attrs=["domainControllerFunctionality"])
+ self.assertEqual(int(res[0]["domainControllerFunctionality"][0]),
+ dsdb.DS_DOMAIN_FUNCTION_2008_R2)
+
+ def test_initial_rootdse_dc_fl_state(self):
+ res = self.samdb.search(base="",
+ scope=ldb.SCOPE_BASE,
+ attrs=["domainFunctionality"])
+ self.assertEqual(int(res[0]["domainFunctionality"][0]),
+ dsdb.DS_DOMAIN_FUNCTION_2008_R2)
+
+ def test_initial_lp_fl_state(self):
+ lp_fl = self.lp.get("ad dc functional level")
+ # This confirms the domain is in FL 2008 R2 by default, this is
+ # important to verify the original state
+ self.assertEqual(lp_fl, "2008_R2")
+
+ def test_initial_lp_fl_state_mapped(self):
+ # Confirm the same via the dc_level_from_lp wrapper
+ self.assertEqual(functional_level.dc_level_from_lp(self.lp),
+ dsdb.DS_DOMAIN_FUNCTION_2008_R2)
+
+ def fixup_fl(self, dn, fl):
+ msg = ldb.Message()
+ msg.dn = dn
+ msg["msDS-Behavior-Version"] = (
+ ldb.MessageElement(str(fl),
+ ldb.FLAG_MOD_REPLACE,
+ "msDS-Behavior-Version"))
+ self.samdb.modify(msg)
+
+ def test_change_db_dc_fl(self):
+ server_dn = ldb.Dn(self.samdb, self.samdb.get_dsServiceName())
+ msg = ldb.Message()
+ msg.dn = server_dn
+ msg["msDS-Behavior-Version"] = (
+ ldb.MessageElement(str(dsdb.DS_DOMAIN_FUNCTION_2012_R2),
+ ldb.FLAG_MOD_REPLACE,
+ "msDS-Behavior-Version"))
+ self.samdb.modify(msg)
+ self.addCleanup(self.fixup_fl, msg.dn, dsdb.DS_DOMAIN_FUNCTION_2008_R2)
+
+ samdb2 = SamDB(session_info=self.session,
+ credentials=self.creds,
+ lp=self.lp)
+
+ # Check that the DB set to 2012_R2 has got as far as the rootDSE handler on a new connection
+ res = samdb2.search(base="",
+ scope=ldb.SCOPE_BASE,
+ attrs=["domainControllerFunctionality"])
+ self.assertEqual(int(res[0]["domainControllerFunctionality"][0]),
+ dsdb.DS_DOMAIN_FUNCTION_2012_R2)
+
+ def test_incorrect_db_dc_fl(self):
+ server_dn = ldb.Dn(self.samdb, self.samdb.get_dsServiceName())
+ self.addCleanup(self.fixup_fl, server_dn, dsdb.DS_DOMAIN_FUNCTION_2008_R2)
+
+ old_lp_fl = self.lp.get("ad dc functional level")
+ self.lp.set("ad dc functional level",
+ "2016")
+ self.addCleanup(self.lp.set, "ad dc functional level", old_lp_fl)
+
+ dsdb.check_and_update_fl(self.samdb, self.lp)
+
+ # Check this has been set to 2016 per the smb.conf setting
+ res = self.samdb.search(base="",
+ scope=ldb.SCOPE_BASE,
+ attrs=["domainControllerFunctionality"])
+ self.assertEqual(int(res[0]["domainControllerFunctionality"][0]),
+ dsdb.DS_DOMAIN_FUNCTION_2016)
+
+ samdb3 = SamDB(session_info=self.session,
+ credentials=self.creds,
+ lp=self.lp)
+
+ # Check this is still set on re-read (not just the opaque)
+ res = samdb3.search(base="",
+ scope=ldb.SCOPE_BASE,
+ attrs=["domainControllerFunctionality"])
+ self.assertEqual(int(res[0]["domainControllerFunctionality"][0]),
+ dsdb.DS_DOMAIN_FUNCTION_2016)
+
+ res = self.samdb.search(base=server_dn,
+ scope=ldb.SCOPE_BASE,
+ attrs=["msDS-Behavior-Version"])
+ self.assertEqual(int(res[0]["msDS-Behavior-Version"][0]),
+ dsdb.DS_DOMAIN_FUNCTION_2016)
+
+ self.assertEqual(functional_level.dc_level_from_lp(self.lp),
+ dsdb.DS_DOMAIN_FUNCTION_2016)
+ self.assertEqual(self.lp.get("ad dc functional level"),
+ "2016")
+
+if __name__ == "__main__":
+ import unittest
+ unittest.main()
diff --git a/python/samba/tests/samba_tool/__init__.py b/python/samba/tests/samba_tool/__init__.py
new file mode 100644
index 0000000..3d7f059
--- /dev/null
+++ b/python/samba/tests/samba_tool/__init__.py
@@ -0,0 +1,15 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Sean Dague <sdague@linux.vnet.ibm.com
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
diff --git a/python/samba/tests/samba_tool/base.py b/python/samba/tests/samba_tool/base.py
new file mode 100644
index 0000000..a4f4578
--- /dev/null
+++ b/python/samba/tests/samba_tool/base.py
@@ -0,0 +1,137 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Sean Dague <sdague@linux.vnet.ibm.com> 2011
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+# This provides a wrapper around the cmd interface so that tests can
+# easily be built on top of it and have minimal code to run basic tests
+# of the commands. A list of the environmental variables can be found in
+# ~/selftest/selftest.pl
+#
+# These can all be accessed via os.environ["VARIABLENAME"] when needed
+
+import os
+import random
+import string
+from io import StringIO
+
+import samba.getopt as options
+import samba.tests
+from samba.auth import system_session
+from samba.getopt import OptionParser
+from samba.netcmd.main import cmd_sambatool
+from samba.samdb import SamDB
+
+
+def truncate_string(s, cutoff=100):
+ if len(s) < cutoff + 15:
+ return s
+ return s[:cutoff] + '[%d more characters]' % (len(s) - cutoff)
+
+
+class SambaToolCmdTest(samba.tests.BlackboxTestCase):
+ # Use a class level reference to StringIO, which subclasses can
+ # override if they need to (to e.g. add a lying isatty() method).
+ stringIO = StringIO
+
+ @staticmethod
+ def getSamDB(*argv):
+ """a convenience function to get a samdb instance so that we can query it"""
+
+ parser = OptionParser()
+ sambaopts = options.SambaOptions(parser)
+ credopts = options.CredentialsOptions(parser)
+ hostopts = options.HostOptions(parser)
+ parser.parse_args(list(argv))
+
+ lp = sambaopts.get_loadparm()
+ creds = credopts.get_credentials(lp, fallback_machine=True)
+
+ return SamDB(url=hostopts.H, session_info=system_session(),
+ credentials=creds, lp=lp)
+
+ @classmethod
+ def _run(cls, *argv):
+ """run a samba-tool command"""
+ cmd, args = cmd_sambatool()._resolve('samba-tool', *argv,
+ outf=cls.stringIO(),
+ errf=cls.stringIO())
+ result = cmd._run(*args)
+ return (result, cmd.outf.getvalue(), cmd.errf.getvalue())
+
+ runcmd = _run
+ runsubcmd = _run
+
+ def runsublevelcmd(self, name, sublevels, *args):
+ """run a command with any number of sub command levels"""
+ # This is a weird and clunky interface for running a
+ # subcommand. Use self.runcmd() instead.
+ return self._run(name, *sublevels, *args)
+
+ def assertCmdSuccess(self, exit, out, err, msg=""):
+ # Make sure we allow '\n]\n' in stdout and stderr
+ # without causing problems with the subunit protocol.
+ # We just inject a space...
+ msg = "exit[%s] stdout[%s] stderr[%s]: %s" % (exit, out, err, msg)
+ self.assertIsNone(exit, msg=msg.replace("\n]\n", "\n] \n"))
+
+ def assertCmdFail(self, val, msg=""):
+ self.assertIsNotNone(val, msg)
+
+ def assertMatch(self, base, string, msg=None):
+ # Note: we should stop doing this and just use self.assertIn()
+ if msg is None:
+ msg = "%r is not in %r" % (truncate_string(string),
+ truncate_string(base))
+ self.assertIn(string, base, msg)
+
+ def randomName(self, count=8):
+ """Create a random name, cap letters and numbers, and always starting with a letter"""
+ name = random.choice(string.ascii_uppercase)
+ name += ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase + string.digits) for x in range(count - 1))
+ return name
+
+ def randomXid(self):
+ # pick some unused, high UID/GID range to avoid interference
+ # from the system the test runs on
+
+ # initialize a list to store used IDs
+ try:
+ self.used_xids
+ except AttributeError:
+ self.used_xids = []
+
+ # try to get an unused ID
+ failed = 0
+ while failed < 50:
+ xid = random.randint(4711000, 4799000)
+ if xid not in self.used_xids:
+ self.used_xids += [xid]
+ return xid
+ failed += 1
+ assert False, "No Xid are available"
+
+ def assertWithin(self, val1, val2, delta, msg=""):
+ """Assert that val1 is within delta of val2, useful for time computations"""
+ self.assertTrue(((val1 + delta) > val2) and ((val1 - delta) < val2), msg)
+
+ def cleanup_join(self, netbios_name):
+ (result, out, err) \
+ = self.runsubcmd("domain",
+ "demote",
+ ("--remove-other-dead-server=%s " % netbios_name),
+ ("-U%s%%%s" % (os.environ["USERNAME"], os.environ["PASSWORD"])),
+ ("--server=%s" % os.environ["SERVER"]))
+
+ self.assertCmdSuccess(result, out, err)
diff --git a/python/samba/tests/samba_tool/computer.py b/python/samba/tests/samba_tool/computer.py
new file mode 100644
index 0000000..b60e756
--- /dev/null
+++ b/python/samba/tests/samba_tool/computer.py
@@ -0,0 +1,378 @@
+# Unix SMB/CIFS implementation.
+#
+# Copyright (C) Bjoern Baumbach <bb@sernet.de> 2018
+#
+# based on group.py:
+# Copyright (C) Michael Adam 2012
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import os
+import ldb
+from samba.tests.samba_tool.base import SambaToolCmdTest
+from samba import dsdb
+from samba.ndr import ndr_unpack, ndr_pack
+from samba.dcerpc import dnsp
+
+
+class ComputerCmdTestCase(SambaToolCmdTest):
+ """Tests for samba-tool computer subcommands"""
+ computers = []
+ samdb = None
+
+ def setUp(self):
+ super().setUp()
+ self.creds = "-U%s%%%s" % (os.environ["DC_USERNAME"], os.environ["DC_PASSWORD"])
+ self.samdb = self.getSamDB("-H", "ldap://%s" % os.environ["DC_SERVER"], self.creds)
+ # ips used to test --ip-address option
+ self.ipv4 = '10.10.10.10'
+ self.ipv6 = '2001:0db8:0a0b:12f0:0000:0000:0000:0001'
+ computer_basename = self.randomName().lower()
+ data = [
+ {
+ 'name': computer_basename + 'cmp1',
+ 'ip_address_list': [self.ipv4]
+ },
+ {
+ 'name': computer_basename + 'cmp2',
+ 'ip_address_list': [self.ipv6],
+ 'service_principal_name_list': [
+ 'host/' + computer_basename + 'SPN20',
+ ],
+ },
+ {
+ 'name': computer_basename + 'cmp3$',
+ 'ip_address_list': [self.ipv4, self.ipv6],
+ 'service_principal_name_list': [
+ 'host/' + computer_basename + 'SPN30',
+ 'host/' + computer_basename + 'SPN31',
+ ],
+ },
+ {
+ 'name': computer_basename + 'cmp4$',
+ },
+ ]
+ self.computers = [self._randomComputer(base=item) for item in data]
+
+ # setup the 4 computers and ensure they are correct
+ for computer in self.computers:
+ (result, out, err) = self._create_computer(computer)
+
+ self.assertCmdSuccess(result, out, err)
+ self.assertNotIn(
+ "ERROR", err, "There shouldn't be any error message")
+ self.assertIn("Computer '%s' added successfully" %
+ computer["name"], out)
+
+ found = self._find_computer(computer["name"])
+
+ self.assertIsNotNone(found)
+
+ expectedname = computer["name"].rstrip('$')
+ expectedsamaccountname = computer["name"]
+ if not computer["name"].endswith('$'):
+ expectedsamaccountname = "%s$" % computer["name"]
+ self.assertEqual("%s" % found.get("name"), expectedname)
+ self.assertEqual("%s" % found.get("sAMAccountName"),
+ expectedsamaccountname)
+ self.assertEqual("%s" % found.get("description"),
+ computer["description"])
+
+ def tearDown(self):
+ super().tearDown()
+ # clean up all the left over computers, just in case
+ for computer in self.computers:
+ if self._find_computer(computer["name"]):
+ (result, out, err) = self.runsubcmd("computer", "delete",
+ "%s" % computer["name"])
+ self.assertCmdSuccess(result, out, err,
+ "Failed to delete computer '%s'" %
+ computer["name"])
+
+ def test_newcomputer_with_service_principal_name(self):
+ # Each computer should have correct servicePrincipalName as provided.
+ for computer in self.computers:
+ expected_names = computer.get('service_principal_name_list', [])
+ found = self._find_service_principal_name(computer['name'], expected_names)
+ self.assertTrue(found)
+
+ def test_newcomputer_with_dns_records(self):
+
+ # Each computer should have correct DNS record and ip address.
+ for computer in self.computers:
+ for ip_address in computer.get('ip_address_list', []):
+ found = self._find_dns_record(computer['name'], ip_address)
+ self.assertTrue(found)
+
+ # try to delete all the computers we just created
+ for computer in self.computers:
+ (result, out, err) = self.runsubcmd("computer", "delete",
+ "%s" % computer["name"])
+ self.assertCmdSuccess(result, out, err,
+ "Failed to delete computer '%s'" %
+ computer["name"])
+ found = self._find_computer(computer["name"])
+ self.assertIsNone(found,
+ "Deleted computer '%s' still exists" %
+ computer["name"])
+
+ # all DNS records should be gone
+ for computer in self.computers:
+ for ip_address in computer.get('ip_address_list', []):
+ found = self._find_dns_record(computer['name'], ip_address)
+ self.assertFalse(found)
+
+ def test_newcomputer(self):
+ """This tests the "computer add" and "computer delete" commands"""
+ # try to create all the computers again, this should fail
+ for computer in self.computers:
+ (result, out, err) = self._create_computer(computer)
+ self.assertCmdFail(result, "Succeeded to add existing computer")
+ self.assertIn("already exists", err)
+
+ # try to delete all the computers we just added
+ for computer in self.computers:
+ (result, out, err) = self.runsubcmd("computer", "delete", "%s" %
+ computer["name"])
+ self.assertCmdSuccess(result, out, err,
+ "Failed to delete computer '%s'" %
+ computer["name"])
+ found = self._find_computer(computer["name"])
+ self.assertIsNone(found,
+ "Deleted computer '%s' still exists" %
+ computer["name"])
+
+ # test creating computers
+ for computer in self.computers:
+ (result, out, err) = self.runsubcmd(
+ "computer", "add", "%s" % computer["name"],
+ "--description=%s" % computer["description"])
+
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "There shouldn't be any error message")
+ self.assertIn("Computer '%s' added successfully" %
+ computer["name"], out)
+
+ found = self._find_computer(computer["name"])
+
+ expectedname = computer["name"].rstrip('$')
+ expectedsamaccountname = computer["name"]
+ if not computer["name"].endswith('$'):
+ expectedsamaccountname = "%s$" % computer["name"]
+ self.assertEqual("%s" % found.get("name"), expectedname)
+ self.assertEqual("%s" % found.get("sAMAccountName"),
+ expectedsamaccountname)
+ self.assertEqual("%s" % found.get("description"),
+ computer["description"])
+
+ def test_list(self):
+ (result, out, err) = self.runsubcmd("computer", "list")
+ self.assertCmdSuccess(result, out, err, "Error running list")
+
+ search_filter = ("(sAMAccountType=%u)" %
+ dsdb.ATYPE_WORKSTATION_TRUST)
+
+ computerlist = self.samdb.search(base=self.samdb.domain_dn(),
+ scope=ldb.SCOPE_SUBTREE,
+ expression=search_filter,
+ attrs=["samaccountname"])
+
+ self.assertTrue(len(computerlist) > 0, "no computers found in samdb")
+
+ for computerobj in computerlist:
+ name = computerobj.get("samaccountname", idx=0)
+ found = self.assertMatch(out, str(name),
+ "computer '%s' not found" % name)
+
+ def test_list_full_dn(self):
+ (result, out, err) = self.runsubcmd("computer", "list", "--full-dn")
+ self.assertCmdSuccess(result, out, err, "Error running list")
+
+ search_filter = ("(sAMAccountType=%u)" %
+ dsdb.ATYPE_WORKSTATION_TRUST)
+
+ computerlist = self.samdb.search(base=self.samdb.domain_dn(),
+ scope=ldb.SCOPE_SUBTREE,
+ expression=search_filter,
+ attrs=[])
+
+ self.assertTrue(len(computerlist) > 0, "no computers found in samdb")
+
+ for computerobj in computerlist:
+ name = computerobj.get("dn", idx=0)
+ found = self.assertMatch(out, str(name),
+ "computer '%s' not found" % name)
+
+ def test_list_base_dn(self):
+ base_dn = str(self.samdb.domain_dn())
+ (result, out, err) = self.runsubcmd("computer", "list", "-b", base_dn)
+ self.assertCmdSuccess(result, out, err, "Error running list")
+
+ search_filter = ("(sAMAccountType=%u)" %
+ dsdb.ATYPE_WORKSTATION_TRUST)
+
+ computerlist = self.samdb.search(base=base_dn,
+ scope=ldb.SCOPE_SUBTREE,
+ expression=search_filter,
+ attrs=["name"])
+
+ self.assertTrue(len(computerlist) > 0, "no computers found in samdb")
+
+ for computerobj in computerlist:
+ name = computerobj.get("name", idx=0)
+ found = self.assertMatch(out, str(name),
+ "computer '%s' not found" % name)
+
+ def test_move(self):
+ parentou = self._randomOU({"name": "parentOU"})
+ (result, out, err) = self._create_ou(parentou)
+ self.assertCmdSuccess(result, out, err)
+
+ for computer in self.computers:
+ olddn = self._find_computer(computer["name"]).get("dn")
+
+ (result, out, err) = self.runsubcmd("computer", "move",
+ "%s" % computer["name"],
+ "OU=%s" % parentou["name"])
+ self.assertCmdSuccess(result, out, err,
+ "Failed to move computer '%s'" %
+ computer["name"])
+ self.assertEqual(err, "", "There shouldn't be any error message")
+ self.assertIn('Moved computer "%s"' % computer["name"], out)
+
+ found = self._find_computer(computer["name"])
+ self.assertNotEqual(found.get("dn"), olddn,
+ ("Moved computer '%s' still exists with the "
+ "same dn" % computer["name"]))
+ computername = computer["name"].rstrip('$')
+ newexpecteddn = ldb.Dn(self.samdb,
+ "CN=%s,OU=%s,%s" %
+ (computername, parentou["name"],
+ self.samdb.domain_dn()))
+ self.assertEqual(found.get("dn"), newexpecteddn,
+ "Moved computer '%s' does not exist" %
+ computer["name"])
+
+ (result, out, err) = self.runsubcmd("computer", "move",
+ "%s" % computer["name"],
+ "%s" % olddn.parent())
+ self.assertCmdSuccess(result, out, err,
+ "Failed to move computer '%s'" %
+ computer["name"])
+
+ (result, out, err) = self.runsubcmd("ou", "delete",
+ "OU=%s" % parentou["name"])
+ self.assertCmdSuccess(result, out, err,
+ "Failed to delete ou '%s'" % parentou["name"])
+
+ def _randomComputer(self, base=None):
+ """create a computer with random attribute values, you can specify base
+ attributes"""
+ if base is None:
+ base = {}
+
+ computer = {
+ "name": self.randomName(),
+ "description": self.randomName(count=100),
+ }
+ computer.update(base)
+ return computer
+
+ def _randomOU(self, base=None):
+ """create an ou with random attribute values, you can specify base
+ attributes"""
+ if base is None:
+ base = {}
+
+ ou = {
+ "name": self.randomName(),
+ "description": self.randomName(count=100),
+ }
+ ou.update(base)
+ return ou
+
+ def _create_computer(self, computer):
+ args = '{0} {1} --description={2}'.format(
+ computer['name'], self.creds, computer["description"])
+
+ for ip_address in computer.get('ip_address_list', []):
+ args += ' --ip-address={0}'.format(ip_address)
+
+ for service_principal_name in computer.get('service_principal_name_list', []):
+ args += ' --service-principal-name={0}'.format(service_principal_name)
+
+ args = args.split()
+
+ return self.runsubcmd('computer', 'add', *args)
+
+ def _create_ou(self, ou):
+ return self.runsubcmd("ou", "add", "OU=%s" % ou["name"],
+ "--description=%s" % ou["description"])
+
+ def _find_computer(self, name):
+ samaccountname = name
+ if not name.endswith('$'):
+ samaccountname = "%s$" % name
+ search_filter = ("(&(sAMAccountName=%s)(objectCategory=%s,%s))" %
+ (ldb.binary_encode(samaccountname),
+ "CN=Computer,CN=Schema,CN=Configuration",
+ self.samdb.domain_dn()))
+ computerlist = self.samdb.search(base=self.samdb.domain_dn(),
+ scope=ldb.SCOPE_SUBTREE,
+ expression=search_filter)
+ if computerlist:
+ return computerlist[0]
+ else:
+ return None
+
+ def _find_dns_record(self, name, ip_address):
+ name = name.rstrip('$') # computername
+ records = self.samdb.search(
+ base="DC=DomainDnsZones,{0}".format(self.samdb.get_default_basedn()),
+ scope=ldb.SCOPE_SUBTREE,
+ expression="(&(objectClass=dnsNode)(name={0}))".format(name),
+ attrs=['dnsRecord', 'dNSTombstoned'])
+
+ # unpack data and compare
+ for record in records:
+ if 'dNSTombstoned' in record and str(record['dNSTombstoned']) == 'TRUE':
+ # if a record is dNSTombstoned, ignore it.
+ continue
+ for dns_record_bin in record['dnsRecord']:
+ dns_record_obj = ndr_unpack(dnsp.DnssrvRpcRecord, dns_record_bin)
+ ip = str(dns_record_obj.data)
+
+ if str(ip) == str(ip_address):
+ return True
+
+ return False
+
+ def _find_service_principal_name(self, name, expected_service_principal_names):
+ """Find all servicePrincipalName values and compare with expected_service_principal_names"""
+ samaccountname = name.strip('$') + '$'
+ search_filter = ("(&(sAMAccountName=%s)(objectCategory=%s,%s))" %
+ (ldb.binary_encode(samaccountname),
+ "CN=Computer,CN=Schema,CN=Configuration",
+ self.samdb.domain_dn()))
+ computer_list = self.samdb.search(
+ base=self.samdb.domain_dn(),
+ scope=ldb.SCOPE_SUBTREE,
+ expression=search_filter,
+ attrs=['servicePrincipalName'])
+ names = set()
+ for computer in computer_list:
+ for name in computer.get('servicePrincipalName', []):
+ names.add(str(name))
+ return names == set(expected_service_principal_names)
diff --git a/python/samba/tests/samba_tool/computer_edit.sh b/python/samba/tests/samba_tool/computer_edit.sh
new file mode 100755
index 0000000..da32760
--- /dev/null
+++ b/python/samba/tests/samba_tool/computer_edit.sh
@@ -0,0 +1,197 @@
+#!/bin/sh
+#
+# Test for 'samba-tool computer edit'
+
+if [ $# -lt 3 ]; then
+ cat <<EOF
+Usage: computer_edit.sh SERVER USERNAME PASSWORD
+EOF
+ exit 1
+fi
+
+SERVER="$1"
+USERNAME="$2"
+PASSWORD="$3"
+
+STpath=$(pwd)
+. $STpath/testprogs/blackbox/subunit.sh
+. "${STpath}/testprogs/blackbox/common_test_fns.inc"
+
+ldbsearch=$(system_or_builddir_binary ldbsearch "${BINDIR}")
+
+display_name="Björns laptop"
+display_name_b64="QmrDtnJucyBsYXB0b3A="
+display_name_new="Bjoerns new laptop"
+# attribute value including control character
+# echo -e "test \a string" | base64
+display_name_con_b64="dGVzdCAHIHN0cmluZwo="
+
+tmpeditor=$(mktemp --suffix .sh -p $SELFTEST_TMPDIR samba-tool-editor-XXXXXXXX)
+chmod +x $tmpeditor
+
+TEST_MACHINE="$(mktemp -u testmachineXXXXXX)"
+
+create_test_computer()
+{
+ $PYTHON ${STpath}/source4/scripting/bin/samba-tool \
+ computer create ${TEST_MACHINE} \
+ -H "ldap://$SERVER" "-U$USERNAME" "--password=$PASSWORD"
+}
+
+edit_computer()
+{
+ # create editor.sh
+ # enable computer account
+ cat >$tmpeditor <<-'EOF'
+#!/usr/bin/env bash
+computer_ldif="$1"
+SED=$(which sed)
+$SED -i -e 's/userAccountControl: 4098/userAccountControl: 4096/' $computer_ldif
+EOF
+
+ $PYTHON ${STpath}/source4/scripting/bin/samba-tool \
+ computer edit ${TEST_MACHINE} --editor=$tmpeditor \
+ -H "ldap://$SERVER" "-U$USERNAME" "--password=$PASSWORD"
+}
+
+# Test edit computer - add base64 attributes
+add_attribute_base64()
+{
+ # create editor.sh
+ cat >$tmpeditor <<EOF
+#!/usr/bin/env bash
+computer_ldif="\$1"
+
+grep -v '^\$' \$computer_ldif > \${computer_ldif}.tmp
+echo "displayName:: $display_name_b64" >> \${computer_ldif}.tmp
+
+mv \${computer_ldif}.tmp \$computer_ldif
+EOF
+
+ $PYTHON ${STpath}/source4/scripting/bin/samba-tool computer edit \
+ ${TEST_MACHINE} --editor=$tmpeditor \
+ -H "ldap://$SERVER" "-U$USERNAME" "--password=$PASSWORD"
+}
+
+get_attribute_base64()
+{
+ ${ldbsearch} "(sAMAccountName=${TEST_MACHINE}\$)" displayName \
+ -H "ldap://$SERVER" "-U$USERNAME" "--password=$PASSWORD"
+}
+
+delete_attribute()
+{
+ # create editor.sh
+ cat >$tmpeditor <<EOF
+#!/usr/bin/env bash
+computer_ldif="\$1"
+
+grep -v '^displayName' \$computer_ldif >> \${computer_ldif}.tmp
+mv \${computer_ldif}.tmp \$computer_ldif
+EOF
+ $PYTHON ${STpath}/source4/scripting/bin/samba-tool computer edit \
+ ${TEST_MACHINE} --editor=$tmpeditor \
+ -H "ldap://$SERVER" "-U$USERNAME" "--password=$PASSWORD"
+}
+
+# Test edit computer - add base64 attribute value including control character
+add_attribute_base64_control()
+{
+ # create editor.sh
+ cat >$tmpeditor <<EOF
+#!/usr/bin/env bash
+computer_ldif="\$1"
+
+grep -v '^\$' \$computer_ldif > \${computer_ldif}.tmp
+echo "displayName:: $display_name_con_b64" >> \${computer_ldif}.tmp
+
+mv \${computer_ldif}.tmp \$computer_ldif
+EOF
+ $PYTHON ${STpath}/source4/scripting/bin/samba-tool computer edit \
+ ${TEST_MACHINE} --editor=$tmpeditor \
+ -H "ldap://$SERVER" "-U$USERNAME" "--password=$PASSWORD"
+}
+
+get_attribute_base64_control()
+{
+ $PYTHON ${STpath}/source4/scripting/bin/samba-tool computer show \
+ ${TEST_MACHINE} --attributes=displayName \
+ -H "ldap://$SERVER" "-U$USERNAME" "--password=$PASSWORD"
+}
+
+get_attribute_force_no_base64()
+{
+ # LDB_FLAG_FORCE_NO_BASE64_LDIF should be used here.
+ $PYTHON ${STpath}/source4/scripting/bin/samba-tool computer show \
+ ${TEST_MACHINE} --attributes=displayName \
+ -H "ldap://$SERVER" "-U$USERNAME" "--password=$PASSWORD"
+}
+
+# Test edit computer - change base64 attribute value including control character
+change_attribute_base64_control()
+{
+ # create editor.sh
+ cat >$tmpeditor <<EOF
+#!/usr/bin/env bash
+computer_ldif="\$1"
+
+sed -i -e 's/displayName:: $display_name_con_b64/displayName: $display_name/' \
+ \$computer_ldif
+EOF
+ $PYTHON ${STpath}/source4/scripting/bin/samba-tool computer edit \
+ ${TEST_MACHINE} --editor=$tmpeditor \
+ -H "ldap://$SERVER" "-U$USERNAME" "--password=$PASSWORD"
+}
+
+# Test edit computer - change attributes with LDB_FLAG_FORCE_NO_BASE64_LDIF
+change_attribute_force_no_base64()
+{
+ # create editor.sh
+ # Expects that the original attribute is available as clear text,
+ # because the LDB_FLAG_FORCE_NO_BASE64_LDIF should be used here.
+ cat >$tmpeditor <<EOF
+#!/usr/bin/env bash
+computer_ldif="\$1"
+
+sed -i -e 's/displayName: $display_name/displayName: $display_name_new/' \
+ \$computer_ldif
+EOF
+
+ $PYTHON ${STpath}/source4/scripting/bin/samba-tool computer edit \
+ ${TEST_MACHINE} --editor=$tmpeditor \
+ -H "ldap://$SERVER" "-U$USERNAME" "--password=$PASSWORD"
+}
+
+get_changed_attribute_force_no_base64()
+{
+ $PYTHON ${STpath}/source4/scripting/bin/samba-tool computer show \
+ ${TEST_MACHINE} --attributes=displayName \
+ -H "ldap://$SERVER" "-U$USERNAME" "--password=$PASSWORD"
+}
+
+delete_computer()
+{
+ $PYTHON ${STpath}/source4/scripting/bin/samba-tool \
+ computer delete ${TEST_MACHINE} \
+ -H "ldap://$SERVER" "-U$USERNAME" "--password=$PASSWORD"
+}
+
+failed=0
+
+testit "create_test_computer" create_test_computer || failed=$(expr $failed + 1)
+testit "edit_computer" edit_computer || failed=$(expr $failed + 1)
+testit "add_attribute_base64" add_attribute_base64 || failed=$(expr $failed + 1)
+testit_grep "get_attribute_base64" "^displayName:: $display_name_b64" get_attribute_base64 || failed=$(expr $failed + 1)
+testit "delete_attribute" delete_attribute || failed=$(expr $failed + 1)
+testit "add_attribute_base64_control" add_attribute_base64_control || failed=$(expr $failed + 1)
+testit_grep "get_attribute_base64_control" "^displayName:: $display_name_con_b64" get_attribute_base64_control || failed=$(expr $failed + 1)
+testit "change_attribute_base64_control" change_attribute_base64_control || failed=$(expr $failed + 1)
+testit_grep "get_attribute_base64" "^displayName:: $display_name_b64" get_attribute_base64 || failed=$(expr $failed + 1)
+testit_grep "get_attribute_force_no_base64" "^displayName: $display_name" get_attribute_force_no_base64 || failed=$(expr $failed + 1)
+testit "change_attribute_force_no_base64" change_attribute_force_no_base64 || failed=$(expr $failed + 1)
+testit_grep "get_changed_attribute_force_no_base64" "^displayName: $display_name_new" get_changed_attribute_force_no_base64 || failed=$(expr $failed + 1)
+testit "delete_computer" delete_computer || failed=$(expr $failed + 1)
+
+rm -f $tmpeditor
+
+exit $failed
diff --git a/python/samba/tests/samba_tool/contact.py b/python/samba/tests/samba_tool/contact.py
new file mode 100644
index 0000000..2bec813
--- /dev/null
+++ b/python/samba/tests/samba_tool/contact.py
@@ -0,0 +1,468 @@
+# Unix SMB/CIFS implementation.
+#
+# Tests for samba-tool contact management commands
+#
+# Copyright (C) Bjoern Baumbach <bbaumbach@samba.org> 2019
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import os
+import ldb
+from samba.tests.samba_tool.base import SambaToolCmdTest
+
+class ContactCmdTestCase(SambaToolCmdTest):
+ """Tests for samba-tool contact subcommands"""
+ contacts = []
+ samdb = None
+
+ def setUp(self):
+ super().setUp()
+ self.creds = "-U%s%%%s" % (os.environ["DC_USERNAME"],
+ os.environ["DC_PASSWORD"])
+ self.samdb = self.getSamDB("-H",
+ "ldap://%s" % os.environ["DC_SERVER"],
+ self.creds)
+ contact = None
+ self.contacts = []
+
+ contact = self._randomContact({"expectedname": "contact1",
+ "name": "contact1"})
+ self.contacts.append(contact)
+
+ # No 'name' is given here, so the name will be made from givenname.
+ contact = self._randomContact({"expectedname": "contact2",
+ "givenName": "contact2"})
+ self.contacts.append(contact)
+
+ contact = self._randomContact({"expectedname": "contact3",
+ "name": "contact3",
+ "displayName": "contact3displayname",
+ "givenName": "not_contact3",
+ "initials": "I",
+ "sn": "not_contact3",
+ "mobile": "12345"})
+ self.contacts.append(contact)
+
+ # No 'name' is given here, so the name will be made from the the
+ # sn, initials and givenName attributes.
+ contact = self._randomContact({"expectedname": "James T. Kirk",
+ "sn": "Kirk",
+ "initials": "T",
+ "givenName": "James"})
+ self.contacts.append(contact)
+
+ # setup the 4 contacts and ensure they are correct
+ for contact in self.contacts:
+ (result, out, err) = self._create_contact(contact)
+
+ self.assertCmdSuccess(result, out, err)
+ self.assertNotIn(
+ "ERROR", err, "There shouldn't be any error message")
+ self.assertIn("Contact '%s' added successfully" %
+ contact["expectedname"], out)
+
+ found = self._find_contact(contact["expectedname"])
+
+ self.assertIsNotNone(found)
+
+ contactname = contact["expectedname"]
+ self.assertEqual("%s" % found.get("name"), contactname)
+ self.assertEqual("%s" % found.get("description"),
+ contact["description"])
+
+ def tearDown(self):
+ super().tearDown()
+ # clean up all the left over contacts, just in case
+ for contact in self.contacts:
+ if self._find_contact(contact["expectedname"]):
+ (result, out, err) = self.runsubcmd(
+ "contact", "delete", "%s" % contact["expectedname"])
+ self.assertCmdSuccess(result, out, err,
+ "Failed to delete contact '%s'" %
+ contact["expectedname"])
+
+ def test_newcontact(self):
+ """This tests the "contact create" and "contact delete" commands"""
+ # try to create all the contacts again, this should fail
+ for contact in self.contacts:
+ (result, out, err) = self._create_contact(contact)
+ self.assertCmdFail(result, "Succeeded to create existing contact")
+ self.assertIn("already exists", err)
+
+ # try to delete all the contacts we just added
+ for contact in self.contacts:
+ (result, out, err) = self.runsubcmd("contact", "delete", "%s" %
+ contact["expectedname"])
+ self.assertCmdSuccess(result, out, err,
+ "Failed to delete contact '%s'" %
+ contact["expectedname"])
+ found = self._find_contact(contact["expectedname"])
+ self.assertIsNone(found,
+ "Deleted contact '%s' still exists" %
+ contact["expectedname"])
+
+ # test creating contacts in an specified OU
+ parentou = self._randomOU({"name": "testOU"})
+ (result, out, err) = self._create_ou(parentou)
+ self.assertCmdSuccess(result, out, err)
+
+ for contact in self.contacts:
+ (result, out, err) = self._create_contact(contact, ou="OU=testOU")
+
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "There shouldn't be any error message")
+ self.assertIn("Contact '%s' added successfully" %
+ contact["expectedname"], out)
+
+ found = self._find_contact(contact["expectedname"])
+
+ contactname = contact["expectedname"]
+ self.assertEqual("%s" % found.get("name"), contactname)
+ self.assertEqual("%s" % found.get("description"),
+ contact["description"])
+
+ # try to delete all the contacts we just added, by DN
+ for contact in self.contacts:
+ expecteddn = ldb.Dn(self.samdb,
+ "CN=%s,OU=%s,%s" %
+ (contact["expectedname"],
+ parentou["name"],
+ self.samdb.domain_dn()))
+ (result, out, err) = self.runsubcmd("contact", "delete", "%s" %
+ expecteddn)
+ self.assertCmdSuccess(result, out, err,
+ "Failed to delete contact '%s'" %
+ contact["expectedname"])
+ found = self._find_contact(contact["expectedname"])
+ self.assertIsNone(found,
+ "Deleted contact '%s' still exists" %
+ contact["expectedname"])
+
+ (result, out, err) = self.runsubcmd("ou", "delete",
+ "OU=%s" % parentou["name"])
+ self.assertCmdSuccess(result, out, err,
+ "Failed to delete ou '%s'" % parentou["name"])
+
+ # creating contacts, again for further tests
+ for contact in self.contacts:
+ (result, out, err) = self._create_contact(contact)
+
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "There shouldn't be any error message")
+ self.assertIn("Contact '%s' added successfully" %
+ contact["expectedname"], out)
+
+ found = self._find_contact(contact["expectedname"])
+
+ contactname = contact["expectedname"]
+ self.assertEqual("%s" % found.get("name"), contactname)
+ self.assertEqual("%s" % found.get("description"),
+ contact["description"])
+
+ def test_list(self):
+ (result, out, err) = self.runsubcmd("contact", "list")
+ self.assertCmdSuccess(result, out, err, "Error running list")
+
+ search_filter = "(objectClass=contact)"
+ contactlist = self.samdb.search(base=self.samdb.domain_dn(),
+ scope=ldb.SCOPE_SUBTREE,
+ expression=search_filter,
+ attrs=["name"])
+
+ self.assertTrue(len(contactlist) > 0, "no contacts found in samdb")
+
+ for contactobj in contactlist:
+ name = contactobj.get("name", idx=0)
+ self.assertMatch(out, str(name),
+ "contact '%s' not found" % name)
+
+ def test_list_full_dn(self):
+ (result, out, err) = self.runsubcmd("contact", "list", "--full-dn")
+ self.assertCmdSuccess(result, out, err, "Error running list")
+
+ search_filter = "(objectClass=contact)"
+ contactlist = self.samdb.search(base=self.samdb.domain_dn(),
+ scope=ldb.SCOPE_SUBTREE,
+ expression=search_filter,
+ attrs=["dn"])
+
+ self.assertTrue(len(contactlist) > 0, "no contacts found in samdb")
+
+ for contactobj in contactlist:
+ self.assertMatch(out, str(contactobj.dn),
+ "contact '%s' not found" % str(contactobj.dn))
+
+ def test_list_base_dn(self):
+ base_dn = str(self.samdb.domain_dn())
+ (result, out, err) = self.runsubcmd("contact", "list",
+ "-b", base_dn)
+ self.assertCmdSuccess(result, out, err, "Error running list")
+
+ search_filter = "(objectClass=contact)"
+ contactlist = self.samdb.search(base=base_dn,
+ scope=ldb.SCOPE_SUBTREE,
+ expression=search_filter,
+ attrs=["name"])
+
+ self.assertTrue(len(contactlist) > 0, "no contacts found in samdb")
+
+ for contactobj in contactlist:
+ name = contactobj.get("name", idx=0)
+ self.assertMatch(out, str(name),
+ "contact '%s' not found" % name)
+
+ def test_move(self):
+ parentou = self._randomOU({"name": "parentOU"})
+ (result, out, err) = self._create_ou(parentou)
+ self.assertCmdSuccess(result, out, err)
+
+ for contact in self.contacts:
+ olddn = self._find_contact(contact["expectedname"]).get("dn")
+
+ (result, out, err) = self.runsubcmd("contact", "move",
+ "%s" % contact["expectedname"],
+ "OU=%s" % parentou["name"])
+ self.assertCmdSuccess(result, out, err,
+ "Failed to move contact '%s'" %
+ contact["expectedname"])
+ self.assertEqual(err, "", "There shouldn't be any error message")
+ self.assertIn('Moved contact "%s"' % contact["expectedname"], out)
+
+ found = self._find_contact(contact["expectedname"])
+ self.assertNotEqual(found.get("dn"), olddn,
+ ("Moved contact '%s' still exists with the "
+ "same dn" % contact["expectedname"]))
+ contactname = contact["expectedname"]
+ newexpecteddn = ldb.Dn(self.samdb,
+ "CN=%s,OU=%s,%s" %
+ (contactname,
+ parentou["name"],
+ self.samdb.domain_dn()))
+ self.assertEqual(found.get("dn"), newexpecteddn,
+ "Moved contact '%s' does not exist" %
+ contact["expectedname"])
+
+ (result, out, err) = self.runsubcmd("contact", "move",
+ "%s" % contact["expectedname"],
+ "%s" % olddn.parent())
+ self.assertCmdSuccess(result, out, err,
+ "Failed to move contact '%s'" %
+ contact["expectedname"])
+
+ (result, out, err) = self.runsubcmd("ou", "delete",
+ "OU=%s" % parentou["name"])
+ self.assertCmdSuccess(result, out, err,
+ "Failed to delete ou '%s'" % parentou["name"])
+
+ def test_rename_givenname_initials_surname(self):
+ """rename and remove given name, initials and surname for all contacts"""
+ for contact in self.contacts:
+ name = contact["name"] if "name" in contact else contact["expectedname"]
+
+ new_givenname = "new_given_name_of_" + name
+ new_initials = "A"
+ new_surname = "new_surname_of_" + name
+ new_cn = "new_cn_of_" + name
+ expected_cn = "%s %s. %s" % (new_givenname, new_initials, new_surname)
+
+ # rename given name, initials and surname
+ (result, out, err) = self.runsubcmd("contact", "rename", name,
+ "--reset-cn",
+ "--surname=%s" % new_surname,
+ "--initials=%s" % new_initials,
+ "--given-name=%s" % new_givenname)
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ self.assertIn('successfully', out)
+
+ found = self._find_contact(expected_cn)
+ self.assertEqual("%s" % found.get("givenName"), new_givenname)
+ self.assertEqual("%s" % found.get("initials"), new_initials)
+ self.assertEqual("%s" % found.get("sn"), new_surname)
+ self.assertEqual("%s" % found.get("name"), expected_cn)
+ self.assertEqual("%s" % found.get("cn"), expected_cn)
+
+ # remove given name, initials and surname
+ # (must force new cn, because en empty new CN throws an error)
+ (result, out, err) = self.runsubcmd("contact", "rename", expected_cn,
+ "--force-new-cn=%s" % expected_cn,
+ "--surname=",
+ "--initials=",
+ "--given-name=")
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ self.assertIn('successfully', out)
+
+ found = self._find_contact(expected_cn)
+ self.assertEqual(found.get("givenName"), None)
+ self.assertEqual(found.get("initials"), None)
+ self.assertEqual(found.get("sn"), None)
+
+ # reset changes (initials are already removed)
+ old_surname = contact["sn"] if "sn" in contact else ""
+ old_initials = contact["initials"] if "initials" in contact else ""
+ old_givenname = contact["givenName"] if "givenName" in contact else ""
+ old_cn = contact["cn"] if "cn" in contact else name
+ (result, out, err) = self.runsubcmd("contact", "rename", expected_cn,
+ "--force-new-cn=%s" % old_cn,
+ "--surname=%s" % old_surname,
+ "--initials=%s" % old_initials,
+ "--given-name=%s" % old_givenname)
+ self.assertCmdSuccess(result, out, err)
+
+ def test_rename_cn(self):
+ """rename and try to remove the cn of all contacts"""
+ for contact in self.contacts:
+ name = contact["name"] if "name" in contact else contact["expectedname"]
+ new_cn = "new_cn_of_" + name
+
+ # rename cn
+ (result, out, err) = self.runsubcmd("contact", "rename", name,
+ "--force-new-cn=%s" % new_cn)
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ self.assertIn('successfully', out)
+
+ found = self._find_contact(new_cn)
+ self.assertEqual("%s" % found.get("cn"), new_cn)
+
+ # trying to remove cn (throws an error)
+ (result, out, err) = self.runsubcmd("contact", "rename", new_cn,
+ "--force-new-cn=")
+ self.assertCmdFail(result)
+ self.assertIn('Failed to rename contact', err)
+ self.assertIn("delete protected attribute", err)
+
+ # reset changes (cn must be the name)
+ (result, out, err) = self.runsubcmd("contact", "rename", new_cn,
+ "--force-new-cn=%s" % name)
+ self.assertCmdSuccess(result, out, err)
+
+
+ def test_rename_mailaddress_displayname(self):
+ """rename and remove the mail and the displayname attribute of all contacts"""
+ for contact in self.contacts:
+ name = contact["name"] if "name" in contact else contact["expectedname"]
+ new_mail = "new_mailaddress_of_" + name
+ new_displayname = "new displayname of " + name
+
+ # change mail and displayname
+ (result, out, err) = self.runsubcmd("contact", "rename", name,
+ "--mail-address=%s"
+ % new_mail,
+ "--display-name=%s"
+ % new_displayname)
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ self.assertIn('successfully', out)
+
+ found = self._find_contact(name)
+ self.assertEqual("%s" % found.get("mail"), new_mail)
+ self.assertEqual("%s" % found.get("displayName"), new_displayname)
+
+ # remove mail and displayname
+ (result, out, err) = self.runsubcmd("contact", "rename", name,
+ "--mail-address=",
+ "--display-name=")
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ self.assertIn('successfully', out)
+
+ found = self._find_contact(name)
+ self.assertEqual(found.get("mail"), None)
+ self.assertEqual(found.get("displayName"), None)
+
+ # reset changes
+ old_mail = contact["givenName"] if "givenName" in contact else ""
+ old_displayname = contact["cn"] if "cn" in contact else ""
+ (result, out, err) = self.runsubcmd("contact", "rename", name,
+ "--mail-address=%s" % old_mail,
+ "--display-name=%s" % old_displayname)
+ self.assertCmdSuccess(result, out, err)
+
+ def _randomContact(self, base=None):
+ """Create a contact with random attribute values, you can specify base
+ attributes"""
+ if base is None:
+ base = {}
+
+ # No name attributes are given here, because the object name will
+ # be made from the sn, givenName and initials attributes, if no name
+ # is given.
+ contact = {
+ "description": self.randomName(count=100),
+ }
+ contact.update(base)
+ return contact
+
+ def _randomOU(self, base=None):
+ """Create an ou with random attribute values, you can specify base
+ attributes."""
+ if base is None:
+ base = {}
+
+ ou = {
+ "name": self.randomName(),
+ "description": self.randomName(count=100),
+ }
+ ou.update(base)
+ return ou
+
+ def _create_contact(self, contact, ou=None):
+ args = ""
+
+ if "name" in contact:
+ args += '{0}'.format(contact['name'])
+
+ args += ' {0}'.format(self.creds)
+
+ if ou is not None:
+ args += ' --ou={0}'.format(ou)
+
+ if "description" in contact:
+ args += ' --description={0}'.format(contact["description"])
+ if "sn" in contact:
+ args += ' --surname={0}'.format(contact["sn"])
+ if "initials" in contact:
+ args += ' --initials={0}'.format(contact["initials"])
+ if "givenName" in contact:
+ args += ' --given-name={0}'.format(contact["givenName"])
+ if "displayName" in contact:
+ args += ' --display-name={0}'.format(contact["displayName"])
+ if "mobile" in contact:
+ args += ' --mobile-number={0}'.format(contact["mobile"])
+
+ args = args.split()
+
+ return self.runsubcmd('contact', 'create', *args)
+
+ def _create_ou(self, ou):
+ return self.runsubcmd("ou",
+ "create",
+ "OU=%s" % ou["name"],
+ "--description=%s" % ou["description"])
+
+ def _find_contact(self, name):
+ contactname = name
+ search_filter = ("(&(objectClass=contact)(name=%s))" %
+ ldb.binary_encode(contactname))
+ contactlist = self.samdb.search(base=self.samdb.domain_dn(),
+ scope=ldb.SCOPE_SUBTREE,
+ expression=search_filter,
+ attrs=[])
+ if contactlist:
+ return contactlist[0]
+ else:
+ return None
diff --git a/python/samba/tests/samba_tool/contact_edit.sh b/python/samba/tests/samba_tool/contact_edit.sh
new file mode 100755
index 0000000..d31413d
--- /dev/null
+++ b/python/samba/tests/samba_tool/contact_edit.sh
@@ -0,0 +1,183 @@
+#!/bin/sh
+#
+# Test for 'samba-tool contact edit'
+
+if [ $# -lt 3 ]; then
+ cat <<EOF
+Usage: contact_edit.sh SERVER USERNAME PASSWORD
+EOF
+ exit 1
+fi
+
+SERVER="$1"
+USERNAME="$2"
+PASSWORD="$3"
+
+samba_ldbsearch=ldbsearch
+if test -x $BINDIR/ldbsearch; then
+ samba_ldbsearch=$BINDIR/ldbsearch
+fi
+
+STpath=$(pwd)
+. $STpath/testprogs/blackbox/subunit.sh
+
+display_name="Björn"
+display_name_b64="QmrDtnJu"
+display_name_new="Renamed Bjoern"
+# attribute value including control character
+# echo -e "test \a string" | base64
+display_name_con_b64="dGVzdCAHIHN0cmluZwo="
+
+TEST_USER="$(mktemp -u testcontactXXXXXX)"
+
+tmpeditor=$(mktemp --suffix .sh -p $SELFTEST_TMPDIR samba-tool-editor-XXXXXXXX)
+chmod +x $tmpeditor
+
+create_test_contact()
+{
+ $PYTHON ${STpath}/source4/scripting/bin/samba-tool \
+ contact create ${TEST_USER} \
+ -H "ldap://$SERVER" "-U$USERNAME" "--password=$PASSWORD"
+}
+
+# Test edit contact - add base64 attributes
+add_attribute_base64()
+{
+ # create editor.sh
+ cat >$tmpeditor <<EOF
+#!/usr/bin/env bash
+contact_ldif="\$1"
+
+grep -v '^\$' \$contact_ldif > \${contact_ldif}.tmp
+echo "displayName:: $display_name_b64" >> \${contact_ldif}.tmp
+
+mv \${contact_ldif}.tmp \$contact_ldif
+EOF
+
+ $PYTHON ${STpath}/source4/scripting/bin/samba-tool contact edit \
+ ${TEST_USER} --editor=$tmpeditor \
+ -H "ldap://$SERVER" "-U$USERNAME" "--password=$PASSWORD"
+}
+
+get_attribute_base64()
+{
+ $samba_ldbsearch "(&(objectClass=contact)(name=${TEST_USER}))" \
+ displayName \
+ -H "ldap://$SERVER" "-U$USERNAME" "--password=$PASSWORD"
+}
+
+delete_attribute()
+{
+ # create editor.sh
+ cat >$tmpeditor <<EOF
+#!/usr/bin/env bash
+contact_ldif="\$1"
+
+grep -v '^displayName' \$contact_ldif >> \${contact_ldif}.tmp
+mv \${contact_ldif}.tmp \$contact_ldif
+EOF
+ $PYTHON ${STpath}/source4/scripting/bin/samba-tool contact edit \
+ ${TEST_USER} --editor=$tmpeditor \
+ -H "ldap://$SERVER" "-U$USERNAME" "--password=$PASSWORD"
+}
+
+# Test edit contact - add base64 attribute value including control character
+add_attribute_base64_control()
+{
+ # create editor.sh
+ cat >$tmpeditor <<EOF
+#!/usr/bin/env bash
+contact_ldif="\$1"
+
+grep -v '^\$' \$contact_ldif > \${contact_ldif}.tmp
+echo "displayName:: $display_name_con_b64" >> \${contact_ldif}.tmp
+
+mv \${contact_ldif}.tmp \$contact_ldif
+EOF
+ $PYTHON ${STpath}/source4/scripting/bin/samba-tool contact edit \
+ ${TEST_USER} --editor=$tmpeditor \
+ -H "ldap://$SERVER" "-U$USERNAME" "--password=$PASSWORD"
+}
+
+get_attribute_base64_control()
+{
+ $PYTHON ${STpath}/source4/scripting/bin/samba-tool contact show \
+ ${TEST_USER} --attributes=displayName \
+ -H "ldap://$SERVER" "-U$USERNAME" "--password=$PASSWORD"
+}
+
+get_attribute_force_no_base64()
+{
+ # LDB_FLAG_FORCE_NO_BASE64_LDIF should be used here.
+ $PYTHON ${STpath}/source4/scripting/bin/samba-tool contact show \
+ ${TEST_USER} --attributes=displayName \
+ -H "ldap://$SERVER" "-U$USERNAME" "--password=$PASSWORD"
+}
+
+# Test edit contact - change base64 attribute value including control character
+change_attribute_base64_control()
+{
+ # create editor.sh
+ cat >$tmpeditor <<EOF
+#!/usr/bin/env bash
+contact_ldif="\$1"
+
+sed -i -e 's/displayName:: $display_name_con_b64/displayName: $display_name/' \
+ \$contact_ldif
+EOF
+ $PYTHON ${STpath}/source4/scripting/bin/samba-tool contact edit \
+ ${TEST_USER} --editor=$tmpeditor \
+ -H "ldap://$SERVER" "-U$USERNAME" "--password=$PASSWORD"
+}
+
+# Test edit contact - change attributes with LDB_FLAG_FORCE_NO_BASE64_LDIF
+change_attribute_force_no_base64()
+{
+ # create editor.sh
+ # Expects that the original attribute is available as clear text,
+ # because the LDB_FLAG_FORCE_NO_BASE64_LDIF should be used here.
+ cat >$tmpeditor <<EOF
+#!/usr/bin/env bash
+contact_ldif="\$1"
+
+sed -i -e 's/displayName: $display_name/displayName: $display_name_new/' \
+ \$contact_ldif
+EOF
+
+ $PYTHON ${STpath}/source4/scripting/bin/samba-tool contact edit \
+ ${TEST_USER} --editor=$tmpeditor \
+ -H "ldap://$SERVER" "-U$USERNAME" "--password=$PASSWORD"
+}
+
+get_changed_attribute_force_no_base64()
+{
+ $PYTHON ${STpath}/source4/scripting/bin/samba-tool contact show \
+ ${TEST_USER} --attributes=displayName \
+ -H "ldap://$SERVER" "-U$USERNAME" "--password=$PASSWORD"
+}
+
+delete_contact()
+{
+ $PYTHON ${STpath}/source4/scripting/bin/samba-tool \
+ contact delete ${TEST_USER} \
+ -H "ldap://$SERVER" "-U$USERNAME" "--password=$PASSWORD"
+}
+
+failed=0
+
+testit "create_test_contact" create_test_contact || failed=$(expr $failed + 1)
+testit "add_attribute_base64" add_attribute_base64 || failed=$(expr $failed + 1)
+testit_grep "get_attribute_base64" "^displayName:: $display_name_b64" get_attribute_base64 || failed=$(expr $failed + 1)
+testit "delete_attribute" delete_attribute || failed=$(expr $failed + 1)
+testit "add_attribute_base64_control" add_attribute_base64_control || failed=$(expr $failed + 1)
+testit_grep "get_attribute_base64_control" "^displayName:: $display_name_con_b64" get_attribute_base64_control || failed=$(expr $failed + 1)
+testit "change_attribute_base64_control" change_attribute_base64_control || failed=$(expr $failed + 1)
+testit_grep "get_attribute_base64" "^displayName:: $display_name_b64" get_attribute_base64 || failed=$(expr $failed + 1)
+testit_grep "get_attribute_force_no_base64" "^displayName: $display_name" get_attribute_force_no_base64 || failed=$(expr $failed + 1)
+testit "change_attribute_force_no_base64" change_attribute_force_no_base64 || failed=$(expr $failed + 1)
+testit_grep "get_changed_attribute_force_no_base64" "^displayName: $display_name_new" get_changed_attribute_force_no_base64 || failed=$(expr $failed + 1)
+testit "delete_contact" delete_contact || failed=$(expr $failed + 1)
+
+rm -f $tmpeditor
+
+exit $failed
diff --git a/python/samba/tests/samba_tool/demote.py b/python/samba/tests/samba_tool/demote.py
new file mode 100644
index 0000000..2c63cca
--- /dev/null
+++ b/python/samba/tests/samba_tool/demote.py
@@ -0,0 +1,106 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2018
+# Written by Joe Guo <joeg@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import os
+from samba.tests.samba_tool.base import SambaToolCmdTest
+
+
+class DemoteCmdTestCase(SambaToolCmdTest):
+ """Test for samba-tool domain demote subcommand"""
+
+ def setUp(self):
+ super().setUp()
+ self.creds_string = "-U{0}%{1}".format(
+ os.environ["DC_USERNAME"], os.environ["DC_PASSWORD"])
+
+ self.dc_server = os.environ['DC_SERVER']
+ self.dburl = "ldap://%s" % os.environ["DC_SERVER"]
+ self.samdb = self.getSamDB("-H", self.dburl, self.creds_string)
+
+ def test_demote_and_remove_dns(self):
+ """
+ Test domain demote command will also remove dns references
+ """
+
+ server = os.environ['SERVER'] # the server to demote
+ zone = os.environ['REALM'].lower()
+
+ # make sure zone exist
+ result, out, err = self.runsubcmd(
+ "dns", "zoneinfo", server, zone, self.creds_string)
+ self.assertCmdSuccess(result, out, err)
+
+ # add a A record for the server to demote
+ result, out, err = self.runsubcmd(
+ "dns", "add", self.dc_server, zone,
+ server, "A", "192.168.0.193", self.creds_string)
+ self.assertCmdSuccess(result, out, err)
+
+ # make sure above A record exist
+ result, out, err = self.runsubcmd(
+ "dns", "query", self.dc_server, zone,
+ server, 'A', self.creds_string)
+ self.assertCmdSuccess(result, out, err)
+
+ # the above A record points to this host
+ dnshostname = '{0}.{1}'.format(server, zone)
+
+ # add a SRV record points to above host
+ srv_record = "{0} 65530 65530 65530".format(dnshostname)
+ self.runsubcmd(
+ "dns", "add", self.dc_server, zone, 'testrecord', "SRV",
+ srv_record, self.creds_string)
+
+ # make sure above SRV record exist
+ result, out, err = self.runsubcmd(
+ "dns", "query", self.dc_server, zone,
+ "testrecord", 'SRV', self.creds_string)
+ self.assertCmdSuccess(result, out, err)
+
+ for type_ in ['CNAME', 'NS', 'PTR']:
+ # create record
+ self.runsubcmd(
+ "dns", "add", self.dc_server, zone,
+ 'testrecord', type_, dnshostname,
+ self.creds_string)
+ self.assertCmdSuccess(result, out, err)
+
+ # check exist
+ result, out, err = self.runsubcmd(
+ "dns", "query", self.dc_server, zone,
+ "testrecord", 'SRV', self.creds_string)
+ self.assertCmdSuccess(result, out, err)
+
+ # now demote
+ result, out, err = self.runsubcmd(
+ "domain", "demote",
+ "--server", self.dc_server,
+ "--configfile", os.environ["CONFIGFILE"],
+ "--workgroup", os.environ["DOMAIN"],
+ self.creds_string)
+ self.assertCmdSuccess(result, out, err)
+
+ result, out, err = self.runsubcmd(
+ "dns", "query", self.dc_server, zone,
+ server, 'ALL', self.creds_string)
+ self.assertCmdFail(result)
+
+ result, out, err = self.runsubcmd(
+ "dns", "query", self.dc_server, zone,
+ "testrecord", 'ALL', self.creds_string)
+ self.assertCmdFail(result)
diff --git a/python/samba/tests/samba_tool/dnscmd.py b/python/samba/tests/samba_tool/dnscmd.py
new file mode 100644
index 0000000..d372bc5
--- /dev/null
+++ b/python/samba/tests/samba_tool/dnscmd.py
@@ -0,0 +1,1506 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Andrew Bartlett <abartlet@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import os
+import ldb
+import re
+
+from samba.ndr import ndr_unpack, ndr_pack
+from samba.dcerpc import dnsp
+from samba.tests.samba_tool.base import SambaToolCmdTest
+import time
+from samba import dsdb_dns
+
+
+class DnsCmdTestCase(SambaToolCmdTest):
+ def setUp(self):
+ super().setUp()
+
+ self.dburl = "ldap://%s" % os.environ["SERVER"]
+ self.creds_string = "-U%s%%%s" % (os.environ["DC_USERNAME"],
+ os.environ["DC_PASSWORD"])
+
+ self.samdb = self.getSamDB("-H", self.dburl, self.creds_string)
+ self.config_dn = str(self.samdb.get_config_basedn())
+
+ self.testip = "192.168.0.193"
+ self.testip2 = "192.168.0.194"
+
+ self.addCleanup(self.deleteZone)
+ self.addZone()
+
+ # Note: SOA types don't work (and shouldn't), as we only have one zone per DNS record.
+
+ good_dns = ["SAMDOM.EXAMPLE.COM",
+ "1.EXAMPLE.COM",
+ "%sEXAMPLE.COM" % ("1." * 100),
+ "EXAMPLE",
+ "!@#$%^&*()_",
+ "HIGH\xFFBYTE",
+ "@.EXAMPLE.COM",
+ "."]
+ bad_dns = ["...",
+ ".EXAMPLE.COM",
+ ".EXAMPLE.",
+ "",
+ "SAMDOM..EXAMPLE.COM"]
+
+ good_mx = ["SAMDOM.EXAMPLE.COM 65530",
+ "SAMDOM.EXAMPLE.COM 0"]
+ bad_mx = ["SAMDOM.EXAMPLE.COM -1",
+ "SAMDOM.EXAMPLE.COM",
+ " ",
+ "SAMDOM.EXAMPLE.COM 1 1",
+ "SAMDOM.EXAMPLE.COM SAMDOM.EXAMPLE.COM"]
+
+ good_srv = ["SAMDOM.EXAMPLE.COM 65530 65530 65530",
+ "SAMDOM.EXAMPLE.COM 1 1 1"]
+ bad_srv = ["SAMDOM.EXAMPLE.COM 0 65536 0",
+ "SAMDOM.EXAMPLE.COM 0 0 65536",
+ "SAMDOM.EXAMPLE.COM 65536 0 0"]
+
+ for bad_dn in bad_dns:
+ bad_mx.append("%s 1" % bad_dn)
+ bad_srv.append("%s 0 0 0" % bad_dn)
+ for good_dn in good_dns:
+ good_mx.append("%s 1" % good_dn)
+ good_srv.append("%s 0 0 0" % good_dn)
+
+ self.good_records = {
+ "A":["192.168.0.1", "255.255.255.255"],
+ "AAAA":["1234:5678:9ABC:DEF0:0000:0000:0000:0000",
+ "0000:0000:0000:0000:0000:0000:0000:0000",
+ "1234:5678:9ABC:DEF0:1234:5678:9ABC:DEF0",
+ "1234:1234:1234::",
+ "1234:5678:9ABC:DEF0::",
+ "0000:0000::0000",
+ "1234::5678:9ABC:0000:0000:0000:0000",
+ "::1",
+ "::",
+ "1:1:1:1:1:1:1:1"],
+ "PTR": good_dns,
+ "CNAME": good_dns,
+ "NS": good_dns,
+ "MX": good_mx,
+ "SRV": good_srv,
+ "TXT": ["text", "", "@#!", "\n"]
+ }
+
+ self.bad_records = {
+ "A":["192.168.0.500",
+ "255.255.255.255/32"],
+ "AAAA":["GGGG:1234:5678:9ABC:0000:0000:0000:0000",
+ "0000:0000:0000:0000:0000:0000:0000:0000/1",
+ "AAAA:AAAA:AAAA:AAAA:G000:0000:0000:1234",
+ "1234:5678:9ABC:DEF0:1234:5678:9ABC:DEF0:1234",
+ "1234:5678:9ABC:DEF0:1234:5678:9ABC",
+ "1111::1111::1111"],
+ "PTR": bad_dns,
+ "CNAME": bad_dns,
+ "NS": bad_dns,
+ "MX": bad_mx,
+ "SRV": bad_srv
+ }
+
+ def resetZone(self):
+ self.deleteZone()
+ self.addZone()
+
+ def addZone(self):
+ self.zone = "zone"
+ result, out, err = self.runsubcmd("dns",
+ "zonecreate",
+ os.environ["SERVER"],
+ self.zone,
+ self.creds_string)
+ self.assertCmdSuccess(result, out, err)
+
+ def deleteZone(self):
+ result, out, err = self.runsubcmd("dns",
+ "zonedelete",
+ os.environ["SERVER"],
+ self.zone,
+ self.creds_string)
+ self.assertCmdSuccess(result, out, err)
+
+ def get_all_records(self, zone_name):
+ zone_dn = (f"DC={zone_name},CN=MicrosoftDNS,DC=DomainDNSZones,"
+ f"{self.samdb.get_default_basedn()}")
+
+ expression = "(&(objectClass=dnsNode)(!(dNSTombstoned=TRUE)))"
+
+ nodes = self.samdb.search(base=zone_dn, scope=ldb.SCOPE_SUBTREE,
+ expression=expression,
+ attrs=["dnsRecord", "name"])
+
+ record_map = {}
+ for node in nodes:
+ name = node["name"][0].decode()
+ record_map[name] = list(node["dnsRecord"])
+
+ return record_map
+
+ def get_record_from_db(self, zone_name, record_name):
+ zones = self.samdb.search(base="DC=DomainDnsZones,%s"
+ % self.samdb.get_default_basedn(),
+ scope=ldb.SCOPE_SUBTREE,
+ expression="(objectClass=dnsZone)",
+ attrs=["cn"])
+
+ for zone in zones:
+ if zone_name in str(zone.dn):
+ zone_dn = zone.dn
+ break
+
+ records = self.samdb.search(base=zone_dn, scope=ldb.SCOPE_SUBTREE,
+ expression="(objectClass=dnsNode)",
+ attrs=["dnsRecord"])
+
+ for old_packed_record in records:
+ if record_name in str(old_packed_record.dn):
+ return (old_packed_record.dn,
+ ndr_unpack(dnsp.DnssrvRpcRecord,
+ old_packed_record["dnsRecord"][0]))
+
+ def test_rank_none(self):
+ record_str = "192.168.50.50"
+ record_type_str = "A"
+
+ result, out, err = self.runsubcmd("dns", "add", os.environ["SERVER"],
+ self.zone, "testrecord", record_type_str,
+ record_str, self.creds_string)
+ self.assertCmdSuccess(result, out, err,
+ "Failed to add record '%s' with type %s."
+ % (record_str, record_type_str))
+
+ dn, record = self.get_record_from_db(self.zone, "testrecord")
+ record.rank = 0 # DNS_RANK_NONE
+ res = self.samdb.dns_replace_by_dn(dn, [record])
+ if res is not None:
+ self.fail("Unable to update dns record to have DNS_RANK_NONE.")
+
+ errors = []
+
+ # The record should still exist
+ result, out, err = self.runsubcmd("dns", "query", os.environ["SERVER"],
+ self.zone, "testrecord", record_type_str,
+ self.creds_string)
+ try:
+ self.assertCmdSuccess(result, out, err,
+ "Failed to query for a record"
+ "which had DNS_RANK_NONE.")
+ self.assertTrue("testrecord" in out and record_str in out,
+ "Query for a record which had DNS_RANK_NONE"
+ "succeeded but produced no resulting records.")
+ except AssertionError:
+ # Windows produces no resulting records
+ pass
+
+ # We should not be able to add a duplicate
+ result, out, err = self.runsubcmd("dns", "add", os.environ["SERVER"],
+ self.zone, "testrecord", record_type_str,
+ record_str, self.creds_string)
+ try:
+ self.assertCmdFail(result, "Successfully added duplicate record"
+ "of one which had DNS_RANK_NONE.")
+ except AssertionError as e:
+ errors.append(e)
+
+ # We should be able to delete it
+ result, out, err = self.runsubcmd("dns", "delete", os.environ["SERVER"],
+ self.zone, "testrecord", record_type_str,
+ record_str, self.creds_string)
+ try:
+ self.assertCmdSuccess(result, out, err, "Failed to delete record"
+ "which had DNS_RANK_NONE.")
+ except AssertionError as e:
+ errors.append(e)
+
+ # Now the record should not exist
+ result, out, err = self.runsubcmd("dns", "query", os.environ["SERVER"],
+ self.zone, "testrecord",
+ record_type_str, self.creds_string)
+ try:
+ self.assertCmdFail(result, "Successfully queried for deleted record"
+ "which had DNS_RANK_NONE.")
+ except AssertionError as e:
+ errors.append(e)
+
+ if len(errors) > 0:
+ err_str = "Failed appropriate behaviour with DNS_RANK_NONE:"
+ for error in errors:
+ err_str = err_str + "\n" + str(error)
+ raise AssertionError(err_str)
+
+ def test_accept_valid_commands(self):
+ """
+ For all good records, attempt to add, query and delete them.
+ """
+ num_failures = 0
+ failure_msgs = []
+ for dnstype in self.good_records:
+ for record in self.good_records[dnstype]:
+ try:
+ result, out, err = self.runsubcmd("dns", "add",
+ os.environ["SERVER"],
+ self.zone, "testrecord",
+ dnstype, record,
+ self.creds_string)
+ self.assertCmdSuccess(result, out, err, "Failed to add"
+ "record %s with type %s."
+ % (record, dnstype))
+
+ result, out, err = self.runsubcmd("dns", "query",
+ os.environ["SERVER"],
+ self.zone, "testrecord",
+ dnstype,
+ self.creds_string)
+ self.assertCmdSuccess(result, out, err, "Failed to query"
+ "record %s with qualifier %s."
+ % (record, dnstype))
+
+ result, out, err = self.runsubcmd("dns", "delete",
+ os.environ["SERVER"],
+ self.zone, "testrecord",
+ dnstype, record,
+ self.creds_string)
+ self.assertCmdSuccess(result, out, err, "Failed to remove"
+ "record %s with type %s."
+ % (record, dnstype))
+ except AssertionError as e:
+ num_failures = num_failures + 1
+ failure_msgs.append(e)
+
+ if num_failures > 0:
+ for msg in failure_msgs:
+ print(msg)
+ self.fail("Failed to accept valid commands. %d total failures."
+ "Errors above." % num_failures)
+
+ def test_reject_invalid_commands(self):
+ """
+ For all bad records, attempt to add them and update to them,
+ making sure that both operations fail.
+ """
+ num_failures = 0
+ failure_msgs = []
+
+ # Add invalid records and make sure they fail to be added
+ for dnstype in self.bad_records:
+ for record in self.bad_records[dnstype]:
+ try:
+ result, out, err = self.runsubcmd("dns", "add",
+ os.environ["SERVER"],
+ self.zone, "testrecord",
+ dnstype, record,
+ self.creds_string)
+ self.assertCmdFail(result, "Successfully added invalid"
+ "record '%s' of type '%s'."
+ % (record, dnstype))
+ except AssertionError as e:
+ num_failures = num_failures + 1
+ failure_msgs.append(e)
+ self.resetZone()
+ try:
+ result, out, err = self.runsubcmd("dns", "delete",
+ os.environ["SERVER"],
+ self.zone, "testrecord",
+ dnstype, record,
+ self.creds_string)
+ self.assertCmdFail(result, "Successfully deleted invalid"
+ "record '%s' of type '%s' which"
+ "shouldn't exist." % (record, dnstype))
+ except AssertionError as e:
+ num_failures = num_failures + 1
+ failure_msgs.append(e)
+ self.resetZone()
+
+ # Update valid records to invalid ones and make sure they
+ # fail to be updated
+ for dnstype in self.bad_records:
+ for bad_record in self.bad_records[dnstype]:
+ good_record = self.good_records[dnstype][0]
+
+ try:
+ result, out, err = self.runsubcmd("dns", "add",
+ os.environ["SERVER"],
+ self.zone, "testrecord",
+ dnstype, good_record,
+ self.creds_string)
+ self.assertCmdSuccess(result, out, err, "Failed to add "
+ "record '%s' with type %s."
+ % (record, dnstype))
+
+ result, out, err = self.runsubcmd("dns", "update",
+ os.environ["SERVER"],
+ self.zone, "testrecord",
+ dnstype, good_record,
+ bad_record,
+ self.creds_string)
+ self.assertCmdFail(result, "Successfully updated valid "
+ "record '%s' of type '%s' to invalid "
+ "record '%s' of the same type."
+ % (good_record, dnstype, bad_record))
+
+ result, out, err = self.runsubcmd("dns", "delete",
+ os.environ["SERVER"],
+ self.zone, "testrecord",
+ dnstype, good_record,
+ self.creds_string)
+ self.assertCmdSuccess(result, out, err, "Could not delete "
+ "valid record '%s' of type '%s'."
+ % (good_record, dnstype))
+ except AssertionError as e:
+ num_failures = num_failures + 1
+ failure_msgs.append(e)
+ self.resetZone()
+
+ if num_failures > 0:
+ for msg in failure_msgs:
+ print(msg)
+ self.fail("Failed to reject invalid commands. %d total failures. "
+ "Errors above." % num_failures)
+
+ def test_update_invalid_type(self):
+ """Make sure that a record can't be updated to another type leaving
+ the data the same, where that data would be incompatible with
+ the new type. This is not always enforced at the C level.
+
+ We don't try with all types, because many types are compatible
+ in their representations (e.g. A records could be TXT or CNAME
+ records; PTR record values are exactly the same as CNAME
+ record values, etc).
+ """
+ dnstypes = ('A', 'AAAA', 'SRV')
+ for dnstype1 in dnstypes:
+ record1 = self.good_records[dnstype1][0]
+ result, out, err = self.runsubcmd("dns", "add",
+ os.environ["SERVER"],
+ self.zone, "testrecord",
+ dnstype1, record1,
+ self.creds_string)
+ self.assertCmdSuccess(result, out, err, "Failed to add "
+ "record %s with type %s."
+ % (record1, dnstype1))
+
+ for dnstype2 in dnstypes:
+ if dnstype1 == dnstype2:
+ continue
+
+ record2 = self.good_records[dnstype2][0]
+
+ # Check both ways: Give the current type and try to update,
+ # and give the new type and try to update.
+ result, out, err = self.runsubcmd("dns", "update",
+ os.environ["SERVER"],
+ self.zone, "testrecord",
+ dnstype1, record1,
+ record2, self.creds_string)
+ self.assertCmdFail(result, "Successfully updated record '%s' "
+ "to '%s', even though the latter is of "
+ "type '%s' where '%s' was expected."
+ % (record1, record2, dnstype2, dnstype1))
+
+ result, out, err = self.runsubcmd("dns", "update",
+ os.environ["SERVER"],
+ self.zone, "testrecord",
+ dnstype2, record1, record2,
+ self.creds_string)
+ self.assertCmdFail(result, "Successfully updated record "
+ "'%s' to '%s', even though the former "
+ "is of type '%s' where '%s' was expected."
+ % (record1, record2, dnstype1, dnstype2))
+
+ def test_update_valid_type(self):
+ for dnstype in self.good_records:
+ for record in self.good_records[dnstype]:
+ result, out, err = self.runsubcmd("dns", "add",
+ os.environ["SERVER"],
+ self.zone, "testrecord",
+ dnstype, record,
+ self.creds_string)
+ self.assertCmdSuccess(result, out, err, "Failed to add "
+ "record %s with type %s."
+ % (record, dnstype))
+
+ if record == '.' and dnstype != 'TXT':
+ # This will fail because the update finds a match
+ # for "." that is actually "" (in
+ # dns_record_match()), then uses the "" record in
+ # a call to dns_to_dnsp_convert() which calls
+ # dns_name_check() which rejects "" as a bad DNS
+ # name. Maybe FIXME, maybe not.
+ continue
+
+ # Update the record to be the same.
+ result, out, err = self.runsubcmd("dns", "update",
+ os.environ["SERVER"],
+ self.zone, "testrecord",
+ dnstype, record, record,
+ self.creds_string)
+ self.assertCmdSuccess(result, out, err,
+ "Could not update record "
+ "'%s' to be exactly the same." % record)
+
+ result, out, err = self.runsubcmd("dns", "delete",
+ os.environ["SERVER"],
+ self.zone, "testrecord",
+ dnstype, record,
+ self.creds_string)
+ self.assertCmdSuccess(result, out, err, "Could not delete "
+ "valid record '%s' of type '%s'."
+ % (record, dnstype))
+
+ for record in self.good_records["SRV"]:
+ result, out, err = self.runsubcmd("dns", "add",
+ os.environ["SERVER"],
+ self.zone, "testrecord",
+ "SRV", record,
+ self.creds_string)
+ self.assertCmdSuccess(result, out, err, "Failed to add "
+ "record %s with type 'SRV'." % record)
+
+ split = record.split()
+ new_bit = str(int(split[3]) + 1)
+ new_record = '%s %s %s %s' % (split[0], split[1], split[2], new_bit)
+
+ result, out, err = self.runsubcmd("dns", "update",
+ os.environ["SERVER"],
+ self.zone, "testrecord",
+ "SRV", record,
+ new_record, self.creds_string)
+ self.assertCmdSuccess(result, out, err, "Failed to update record "
+ "'%s' of type '%s' to '%s'."
+ % (record, "SRV", new_record))
+
+ result, out, err = self.runsubcmd("dns", "query",
+ os.environ["SERVER"],
+ self.zone, "testrecord",
+ "SRV", self.creds_string)
+ self.assertCmdSuccess(result, out, err, "Failed to query for "
+ "record '%s' of type '%s'."
+ % (new_record, "SRV"))
+
+ result, out, err = self.runsubcmd("dns", "delete",
+ os.environ["SERVER"],
+ self.zone, "testrecord",
+ "SRV", new_record,
+ self.creds_string)
+ self.assertCmdSuccess(result, out, err, "Could not delete "
+ "valid record '%s' of type '%s'."
+ % (new_record, "SRV"))
+
+ # Since 'dns update' takes the current value as a parameter, make sure
+ # we can't enter the wrong current value for a given record.
+ for dnstype in self.good_records:
+ if len(self.good_records[dnstype]) < 3:
+ continue # Not enough records of this type to do this test
+
+ used_record = self.good_records[dnstype][0]
+ unused_record = self.good_records[dnstype][1]
+ new_record = self.good_records[dnstype][2]
+
+ result, out, err = self.runsubcmd("dns", "add",
+ os.environ["SERVER"],
+ self.zone, "testrecord",
+ dnstype, used_record,
+ self.creds_string)
+ self.assertCmdSuccess(result, out, err, "Failed to add record %s "
+ "with type %s." % (used_record, dnstype))
+
+ result, out, err = self.runsubcmd("dns", "update",
+ os.environ["SERVER"],
+ self.zone, "testrecord",
+ dnstype, unused_record,
+ new_record,
+ self.creds_string)
+ self.assertCmdFail(result, "Successfully updated record '%s' "
+ "from '%s' to '%s', even though the given "
+ "source record is incorrect."
+ % (used_record, unused_record, new_record))
+
+ def test_invalid_types(self):
+ result, out, err = self.runsubcmd("dns", "add",
+ os.environ["SERVER"],
+ self.zone, "testrecord",
+ "SOA", "test",
+ self.creds_string)
+ self.assertCmdFail(result, "Successfully added record of type SOA, "
+ "when this type should not be available.")
+ self.assertTrue("type SOA is not supported" in err,
+ "Invalid error message '%s' when attempting to "
+ "add record of type SOA." % err)
+
+ def test_add_overlapping_different_type(self):
+ """
+ Make sure that we can add an entry with the same name as an existing one but a different type.
+ """
+
+ i = 0
+ for dnstype1 in self.good_records:
+ record1 = self.good_records[dnstype1][0]
+ for dnstype2 in self.good_records:
+ # Only do some subset of dns types, otherwise it takes a long time.
+ i += 1
+ if i % 4 != 0:
+ continue
+
+ if dnstype1 == dnstype2:
+ continue
+
+ record2 = self.good_records[dnstype2][0]
+
+ result, out, err = self.runsubcmd("dns", "add",
+ os.environ["SERVER"],
+ self.zone, "testrecord",
+ dnstype1, record1,
+ self.creds_string)
+ self.assertCmdSuccess(result, out, err, "Failed to add record "
+ "'%s' of type '%s'." % (record1, dnstype1))
+
+ result, out, err = self.runsubcmd("dns", "add",
+ os.environ["SERVER"],
+ self.zone, "testrecord",
+ dnstype2, record2,
+ self.creds_string)
+ self.assertCmdSuccess(result, out, err, "Failed to add record "
+ "'%s' of type '%s' when a record '%s' "
+ "of type '%s' with the same name exists."
+ % (record1, dnstype1, record2, dnstype2))
+
+ result, out, err = self.runsubcmd("dns", "query",
+ os.environ["SERVER"],
+ self.zone, "testrecord",
+ dnstype1, self.creds_string)
+ self.assertCmdSuccess(result, out, err, "Failed to query for "
+ "record '%s' of type '%s' when a new "
+ "record '%s' of type '%s' with the same "
+ "name was added."
+ % (record1, dnstype1, record2, dnstype2))
+
+ result, out, err = self.runsubcmd("dns", "query",
+ os.environ["SERVER"],
+ self.zone, "testrecord",
+ dnstype2, self.creds_string)
+ self.assertCmdSuccess(result, out, err, "Failed to query "
+ "record '%s' of type '%s' which should "
+ "have been added with the same name as "
+ "record '%s' of type '%s'."
+ % (record2, dnstype2, record1, dnstype1))
+
+ result, out, err = self.runsubcmd("dns", "delete",
+ os.environ["SERVER"],
+ self.zone, "testrecord",
+ dnstype1, record1,
+ self.creds_string)
+ self.assertCmdSuccess(result, out, err, "Failed to delete "
+ "record '%s' of type '%s'."
+ % (record1, dnstype1))
+
+ result, out, err = self.runsubcmd("dns", "delete",
+ os.environ["SERVER"],
+ self.zone, "testrecord",
+ dnstype2, record2,
+ self.creds_string)
+ self.assertCmdSuccess(result, out, err, "Failed to delete "
+ "record '%s' of type '%s'."
+ % (record2, dnstype2))
+
+ def test_query_deleted_record(self):
+ self.runsubcmd("dns", "add", os.environ["SERVER"], self.zone,
+ "testrecord", "A", self.testip, self.creds_string)
+ self.runsubcmd("dns", "delete", os.environ["SERVER"], self.zone,
+ "testrecord", "A", self.testip, self.creds_string)
+
+ result, out, err = self.runsubcmd("dns", "query",
+ os.environ["SERVER"],
+ self.zone, "testrecord",
+ "A", self.creds_string)
+ self.assertCmdFail(result)
+
+ def test_add_duplicate_record(self):
+ for record_type in self.good_records:
+ result, out, err = self.runsubcmd("dns", "add",
+ os.environ["SERVER"],
+ self.zone, "testrecord",
+ record_type,
+ self.good_records[record_type][0],
+ self.creds_string)
+ self.assertCmdSuccess(result, out, err)
+ result, out, err = self.runsubcmd("dns", "add",
+ os.environ["SERVER"],
+ self.zone, "testrecord",
+ record_type,
+ self.good_records[record_type][0],
+ self.creds_string)
+ self.assertCmdFail(result)
+ result, out, err = self.runsubcmd("dns", "query",
+ os.environ["SERVER"],
+ self.zone, "testrecord",
+ record_type, self.creds_string)
+ self.assertCmdSuccess(result, out, err)
+ result, out, err = self.runsubcmd("dns", "delete",
+ os.environ["SERVER"],
+ self.zone, "testrecord",
+ record_type,
+ self.good_records[record_type][0],
+ self.creds_string)
+ self.assertCmdSuccess(result, out, err)
+
+ def test_remove_deleted_record(self):
+ self.runsubcmd("dns", "add", os.environ["SERVER"], self.zone,
+ "testrecord", "A", self.testip, self.creds_string)
+ self.runsubcmd("dns", "delete", os.environ["SERVER"], self.zone,
+ "testrecord", "A", self.testip, self.creds_string)
+
+ # Attempting to delete a record that has already been deleted or has never existed should fail
+ result, out, err = self.runsubcmd("dns", "delete",
+ os.environ["SERVER"],
+ self.zone, "testrecord",
+ "A", self.testip, self.creds_string)
+ self.assertCmdFail(result)
+ result, out, err = self.runsubcmd("dns", "query",
+ os.environ["SERVER"],
+ self.zone, "testrecord",
+ "A", self.creds_string)
+ self.assertCmdFail(result)
+ result, out, err = self.runsubcmd("dns", "delete",
+ os.environ["SERVER"],
+ self.zone, "testrecord2",
+ "A", self.testip, self.creds_string)
+ self.assertCmdFail(result)
+
+ def test_cleanup_record(self):
+ """
+ Test dns cleanup command is working fine.
+ """
+
+ # add a A record
+ self.runsubcmd("dns", "add", os.environ["SERVER"], self.zone,
+ 'testa', "A", self.testip, self.creds_string)
+
+ # the above A record points to this host
+ dnshostname = '{0}.{1}'.format('testa', self.zone.lower())
+
+ # add a CNAME record points to above host
+ self.runsubcmd("dns", "add", os.environ["SERVER"], self.zone,
+ 'testcname', "CNAME", dnshostname, self.creds_string)
+
+ # add a NS record
+ self.runsubcmd("dns", "add", os.environ["SERVER"], self.zone,
+ 'testns', "NS", dnshostname, self.creds_string)
+
+ # add a PTR record points to above host
+ self.runsubcmd("dns", "add", os.environ["SERVER"], self.zone,
+ 'testptr', "PTR", dnshostname, self.creds_string)
+
+ # add a SRV record points to above host
+ srv_record = "{0} 65530 65530 65530".format(dnshostname)
+ self.runsubcmd("dns", "add", os.environ["SERVER"], self.zone,
+ 'testsrv', "SRV", srv_record, self.creds_string)
+
+ # cleanup record for this dns host
+ self.runsubcmd("dns", "cleanup", os.environ["SERVER"],
+ dnshostname, self.creds_string)
+
+ # all records should be marked as dNSTombstoned
+ for record_name in ['testa', 'testcname', 'testns', 'testptr', 'testsrv']:
+
+ records = self.samdb.search(
+ base="DC=DomainDnsZones,{0}".format(self.samdb.get_default_basedn()),
+ scope=ldb.SCOPE_SUBTREE,
+ expression="(&(objectClass=dnsNode)(name={0}))".format(record_name),
+ attrs=["dNSTombstoned"])
+
+ self.assertEqual(len(records), 1)
+ for record in records:
+ self.assertEqual(str(record['dNSTombstoned']), 'TRUE')
+
+ def test_cleanup_record_no_A_record(self):
+ """
+ Test dns cleanup command works with no A record.
+ """
+
+ # add a A record
+ self.runsubcmd("dns", "add", os.environ["SERVER"], self.zone,
+ 'notesta', "A", self.testip, self.creds_string)
+
+ # the above A record points to this host
+ dnshostname = '{0}.{1}'.format('testa', self.zone.lower())
+
+ # add a CNAME record points to above host
+ self.runsubcmd("dns", "add", os.environ["SERVER"], self.zone,
+ 'notestcname', "CNAME", dnshostname, self.creds_string)
+
+ # add a NS record
+ self.runsubcmd("dns", "add", os.environ["SERVER"], self.zone,
+ 'notestns', "NS", dnshostname, self.creds_string)
+
+ # add a PTR record points to above host
+ self.runsubcmd("dns", "add", os.environ["SERVER"], self.zone,
+ 'notestptr', "PTR", dnshostname, self.creds_string)
+
+ # add a SRV record points to above host
+ srv_record = "{0} 65530 65530 65530".format(dnshostname)
+ self.runsubcmd("dns", "add", os.environ["SERVER"], self.zone,
+ 'notestsrv', "SRV", srv_record, self.creds_string)
+
+ # Remove the initial A record (leading to hanging references)
+ self.runsubcmd("dns", "delete", os.environ["SERVER"], self.zone,
+ 'notesta', "A", self.testip, self.creds_string)
+
+ # cleanup record for this dns host
+ self.runsubcmd("dns", "cleanup", os.environ["SERVER"],
+ dnshostname, self.creds_string)
+
+ # all records should be marked as dNSTombstoned
+ for record_name in ['notestcname', 'notestns', 'notestptr', 'notestsrv']:
+
+ records = self.samdb.search(
+ base="DC=DomainDnsZones,{0}".format(self.samdb.get_default_basedn()),
+ scope=ldb.SCOPE_SUBTREE,
+ expression="(&(objectClass=dnsNode)(name={0}))".format(record_name),
+ attrs=["dNSTombstoned"])
+
+ self.assertEqual(len(records), 1)
+ for record in records:
+ self.assertEqual(str(record['dNSTombstoned']), 'TRUE')
+
+ def test_cleanup_multi_srv_record(self):
+ """
+ Test dns cleanup command for multi-valued SRV record.
+
+ Steps:
+ - Add 2 A records host1 and host2
+ - Add a SRV record srv1 and points to both host1 and host2
+ - Run cleanup command for host1
+ - Check records for srv1, data for host1 should be gone and host2 is kept.
+ """
+
+ hosts = ['host1', 'host2'] # A record names
+ srv_name = 'srv1'
+
+ # add A records
+ for host in hosts:
+ self.runsubcmd("dns", "add", os.environ["SERVER"], self.zone,
+ host, "A", self.testip, self.creds_string)
+
+ # the above A record points to this host
+ dnshostname = '{0}.{1}'.format(host, self.zone.lower())
+
+ # add a SRV record points to above host
+ srv_record = "{0} 65530 65530 65530".format(dnshostname)
+ self.runsubcmd("dns", "add", os.environ["SERVER"], self.zone,
+ srv_name, "SRV", srv_record, self.creds_string)
+
+ records = self.samdb.search(
+ base="DC=DomainDnsZones,{0}".format(self.samdb.get_default_basedn()),
+ scope=ldb.SCOPE_SUBTREE,
+ expression="(&(objectClass=dnsNode)(name={0}))".format(srv_name),
+ attrs=['dnsRecord'])
+ # should have 2 records here
+ self.assertEqual(len(records[0]['dnsRecord']), 2)
+
+ # cleanup record for dns host1
+ dnshostname1 = 'host1.{0}'.format(self.zone.lower())
+ self.runsubcmd("dns", "cleanup", os.environ["SERVER"],
+ dnshostname1, self.creds_string)
+
+ records = self.samdb.search(
+ base="DC=DomainDnsZones,{0}".format(self.samdb.get_default_basedn()),
+ scope=ldb.SCOPE_SUBTREE,
+ expression="(&(objectClass=dnsNode)(name={0}))".format(srv_name),
+ attrs=['dnsRecord', 'dNSTombstoned'])
+
+ # dnsRecord for host1 should be deleted
+ self.assertEqual(len(records[0]['dnsRecord']), 1)
+
+ # unpack data
+ dns_record_bin = records[0]['dnsRecord'][0]
+ dns_record_obj = ndr_unpack(dnsp.DnssrvRpcRecord, dns_record_bin)
+
+ # dnsRecord for host2 is still there and is the only one
+ dnshostname2 = 'host2.{0}'.format(self.zone.lower())
+ self.assertEqual(dns_record_obj.data.nameTarget, dnshostname2)
+
+ # assert that the record isn't spuriously tombstoned
+ self.assertTrue('dNSTombstoned' not in records[0] or
+ str(records[0]['dNSTombstoned']) == 'FALSE')
+
+ def test_dns_wildcards(self):
+ """
+ Ensure that DNS wild card entries can be added deleted and queried
+ """
+ num_failures = 0
+ failure_msgs = []
+ records = [("*.", "MISS", "A", "1.1.1.1"),
+ ("*.SAMDOM", "MISS.SAMDOM", "A", "1.1.1.2")]
+ for (name, miss, dnstype, record) in records:
+ try:
+ result, out, err = self.runsubcmd("dns", "add",
+ os.environ["SERVER"],
+ self.zone, name,
+ dnstype, record,
+ self.creds_string)
+ self.assertCmdSuccess(
+ result,
+ out,
+ err,
+ ("Failed to add record %s (%s) with type %s."
+ % (name, record, dnstype)))
+
+ result, out, err = self.runsubcmd("dns", "query",
+ os.environ["SERVER"],
+ self.zone, name,
+ dnstype,
+ self.creds_string)
+ self.assertCmdSuccess(
+ result,
+ out,
+ err,
+ ("Failed to query record %s with qualifier %s."
+ % (record, dnstype)))
+
+ # dns tool does not perform dns wildcard search if the name
+ # does not match
+ result, out, err = self.runsubcmd("dns", "query",
+ os.environ["SERVER"],
+ self.zone, miss,
+ dnstype,
+ self.creds_string)
+ self.assertCmdFail(
+ result,
+ ("Failed to query record %s with qualifier %s."
+ % (record, dnstype)))
+
+ result, out, err = self.runsubcmd("dns", "delete",
+ os.environ["SERVER"],
+ self.zone, name,
+ dnstype, record,
+ self.creds_string)
+ self.assertCmdSuccess(
+ result,
+ out,
+ err,
+ ("Failed to remove record %s with type %s."
+ % (record, dnstype)))
+ except AssertionError as e:
+ num_failures = num_failures + 1
+ failure_msgs.append(e)
+
+ if num_failures > 0:
+ for msg in failure_msgs:
+ print(msg)
+ self.fail("Failed to accept valid commands. %d total failures."
+ "Errors above." % num_failures)
+
+ def test_serverinfo(self):
+ for v in ['w2k', 'dotnet', 'longhorn']:
+ result, out, err = self.runsubcmd("dns",
+ "serverinfo",
+ "--client-version", v,
+ os.environ["SERVER"],
+ self.creds_string)
+ self.assertCmdSuccess(result,
+ out,
+ err,
+ "Failed to print serverinfo with "
+ "client version %s" % v)
+ self.assertTrue(out != '')
+
+ def test_zoneinfo(self):
+ result, out, err = self.runsubcmd("dns",
+ "zoneinfo",
+ os.environ["SERVER"],
+ self.zone,
+ self.creds_string)
+ self.assertCmdSuccess(result,
+ out,
+ err,
+ "Failed to print zoneinfo")
+ self.assertTrue(out != '')
+
+ def test_zoneoptions_aging(self):
+ for options, vals, error in (
+ (['--aging=1'], {'fAging': 'TRUE'}, False),
+ (['--aging=0'], {'fAging': 'FALSE'}, False),
+ (['--aging=-1'], {'fAging': 'FALSE'}, True),
+ (['--aging=2'], {}, True),
+ (['--aging=2', '--norefreshinterval=1'], {}, True),
+ (['--aging=1', '--norefreshinterval=1'],
+ {'fAging': 'TRUE', 'dwNoRefreshInterval': '1'}, False),
+ (['--aging=1', '--norefreshinterval=0'],
+ {'fAging': 'TRUE', 'dwNoRefreshInterval': '0'}, False),
+ (['--aging=0', '--norefreshinterval=99', '--refreshinterval=99'],
+ {'fAging': 'FALSE',
+ 'dwNoRefreshInterval': '99',
+ 'dwRefreshInterval': '99'}, False),
+ (['--aging=0', '--norefreshinterval=-99', '--refreshinterval=99'],
+ {}, True),
+ (['--refreshinterval=9999999'], {}, True),
+ (['--norefreshinterval=9999999'], {}, True),
+ ):
+ result, out, err = self.runsubcmd("dns",
+ "zoneoptions",
+ os.environ["SERVER"],
+ self.zone,
+ self.creds_string,
+ *options)
+ if error:
+ self.assertCmdFail(result, "zoneoptions should fail")
+ else:
+ self.assertCmdSuccess(result,
+ out,
+ err,
+ "zoneoptions shouldn't fail")
+
+
+ info_r, info_out, info_err = self.runsubcmd("dns",
+ "zoneinfo",
+ os.environ["SERVER"],
+ self.zone,
+ self.creds_string)
+
+ self.assertCmdSuccess(info_r,
+ info_out,
+ info_err,
+ "zoneinfo shouldn't fail after zoneoptions")
+
+ info = {k: v for k, v in re.findall(r'^\s*(\w+)\s*:\s*(\w+)\s*$',
+ info_out,
+ re.MULTILINE)}
+ for k, v in vals.items():
+ self.assertIn(k, info)
+ self.assertEqual(v, info[k])
+
+
+ def ldap_add_node_with_records(self, name, records):
+ dn = (f"DC={name},DC={self.zone},CN=MicrosoftDNS,DC=DomainDNSZones,"
+ f"{self.samdb.get_default_basedn()}")
+
+ dns_records = []
+ for r in records:
+ rec = dnsp.DnssrvRpcRecord()
+ rec.wType = r.get('wType', dnsp.DNS_TYPE_A)
+ rec.rank = dnsp.DNS_RANK_ZONE
+ rec.dwTtlSeconds = 900
+ rec.dwTimeStamp = r.get('dwTimeStamp', 0)
+ rec.data = r.get('data', '10.10.10.10')
+ dns_records.append(ndr_pack(rec))
+
+ msg = ldb.Message.from_dict(self.samdb,
+ {'dn': dn,
+ "objectClass": ["top", "dnsNode"],
+ 'dnsRecord': dns_records
+ })
+ self.samdb.add(msg)
+
+ def get_timestamp_map(self):
+ re_wtypes = (dnsp.DNS_TYPE_A,
+ dnsp.DNS_TYPE_AAAA,
+ dnsp.DNS_TYPE_TXT)
+
+ t = time.time()
+ now = dsdb_dns.unix_to_dns_timestamp(int(t))
+
+ records = self.get_all_records(self.zone)
+ tsmap = {}
+ for k, recs in records.items():
+ m = []
+ tsmap[k] = m
+ for rec in recs:
+ r = ndr_unpack(dnsp.DnssrvRpcRecord, rec)
+ timestamp = r.dwTimeStamp
+ if abs(timestamp - now) < 3:
+ timestamp = 'nowish'
+
+ if r.wType in re_wtypes:
+ m.append(('R', timestamp))
+ else:
+ m.append(('-', timestamp))
+
+ return tsmap
+
+
+ def test_zoneoptions_mark_records(self):
+ self.maxDiff = 10000
+ # We need a number of records to work with, so we'll use part
+ # of our known good records list, using three different names
+ # to test the regex. All these records will be static.
+ for dnstype in self.good_records:
+ for record in self.good_records[dnstype][:2]:
+ self.runsubcmd("dns", "add",
+ os.environ["SERVER"],
+ self.zone, "frobitz",
+ dnstype, record,
+ self.creds_string)
+ self.runsubcmd("dns", "add",
+ os.environ["SERVER"],
+ self.zone, "weergly",
+ dnstype, record,
+ self.creds_string)
+ self.runsubcmd("dns", "add",
+ os.environ["SERVER"],
+ self.zone, "snizle",
+ dnstype, record,
+ self.creds_string)
+
+ # and we also want some that aren't static, and some mixed
+ # static/dynamic records.
+ # timestamps are in hours since 1601; now ~= 3.7 million
+ for ts in (0, 100, 10 ** 6, 10 ** 7):
+ name = f"ts-{ts}"
+ self.ldap_add_node_with_records(name, [{"dwTimeStamp": ts}])
+
+ recs = []
+ for ts in (0, 100, 10 ** 6, 10 ** 7):
+ addr = f'10.{(ts >> 16) & 255}.{(ts >> 8) & 255}.{ts & 255}'
+ recs.append({"dwTimeStamp": ts, "data": addr})
+
+ self.ldap_add_node_with_records("ts-multi", recs)
+
+ # get the state of ALL records.
+ # then we make assertions about the diffs, keeping track of
+ # the current state.
+
+ tsmap = self.get_timestamp_map()
+
+
+
+ for options, diff, output_substrings, error in (
+ # --mark-old-records-static
+ # --mark-records-static-regex
+ # --mark-records-dynamic-regex
+ (
+ ['--mark-old-records-static=1971-13-04'],
+ {},
+ [],
+ "bad date"
+ ),
+ (
+ # using --dry-run, should be no change, but output.
+ ['--mark-old-records-static=1971-03-04', '--dry-run'],
+ {},
+ [
+ "would make 1/1 records static on ts-1000000.zone.",
+ "would make 1/1 records static on ts-100.zone.",
+ "would make 2/4 records static on ts-multi.zone.",
+ ],
+ False
+ ),
+ (
+ # timestamps < ~ 3.25 million are now static
+ ['--mark-old-records-static=1971-03-04'],
+ {
+ 'ts-100': [('R', 0)],
+ 'ts-1000000': [('R', 0)],
+ 'ts-multi': [('R', 0), ('R', 0), ('R', 0), ('R', 10000000)]
+ },
+ [
+ "made 1/1 records static on ts-1000000.zone.",
+ "made 1/1 records static on ts-100.zone.",
+ "made 2/4 records static on ts-multi.zone.",
+ ],
+ False
+ ),
+ (
+ # no change, old records already static
+ ['--mark-old-records-static=1972-03-04'],
+ {},
+ [],
+ False
+ ),
+ (
+ # no change, samba-tool added records already static
+ ['--mark-records-static-regex=sniz'],
+ {},
+ [],
+ False
+ ),
+ (
+ # snizle has 2 A, 2 AAAA, 10 fancy, and 2 TXT records, in
+ # that order.
+ # the A, AAAA, and TXT records should be dynamic
+ ['--mark-records-dynamic-regex=sniz'],
+ {'snizle': [('R', 'nowish'),
+ ('R', 'nowish'),
+ ('R', 'nowish'),
+ ('R', 'nowish'),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('R', 'nowish'),
+ ('R', 'nowish')]
+ },
+ ['made 6/16 records dynamic on snizle.zone.'],
+ False
+ ),
+ (
+ # This regex should catch snizle, weergly, and ts-*
+ # but we're doing dry-run so no change
+ ['--mark-records-dynamic-regex=[sw]', '-n'],
+ {},
+ ['would make 3/4 records dynamic on ts-multi.zone.',
+ 'would make 1/1 records dynamic on ts-0.zone.',
+ 'would make 1/1 records dynamic on ts-1000000.zone.',
+ 'would make 6/16 records dynamic on weergly.zone.',
+ 'would make 1/1 records dynamic on ts-100.zone.'
+ ],
+ False
+ ),
+ (
+ # This regex should catch snizle and frobitz
+ # but snizle has already been changed.
+ ['--mark-records-dynamic-regex=z'],
+ {'frobitz': [('R', 'nowish'),
+ ('R', 'nowish'),
+ ('R', 'nowish'),
+ ('R', 'nowish'),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('R', 'nowish'),
+ ('R', 'nowish')]
+ },
+ ['made 6/16 records dynamic on frobitz.zone.'],
+ False
+ ),
+ (
+ # This regex should catch snizle, frobitz, and
+ # ts-multi. Note that the 1e7 ts-multi record is
+ # already dynamic and doesn't change.
+ ['--mark-records-dynamic-regex=[i]'],
+ {'ts-multi': [('R', 'nowish'),
+ ('R', 'nowish'),
+ ('R', 'nowish'),
+ ('R', 10000000)]
+ },
+ ['made 3/4 records dynamic on ts-multi.zone.'],
+ False
+ ),
+ (
+ # matches no records
+ ['--mark-records-dynamic-regex=^aloooooo[qw]+'],
+ {},
+ [],
+ False
+ ),
+ (
+ # This should be an error, as only one --mark-*
+ # argument is allowed at a time
+ ['--mark-records-dynamic-regex=.',
+ '--mark-records-static-regex=.',
+ ],
+ {},
+ [],
+ True
+ ),
+ (
+ # This should also be an error
+ ['--mark-old-records-static=1997-07-07',
+ '--mark-records-static-regex=.',
+ ],
+ {},
+ [],
+ True
+ ),
+ (
+ # This should not be an error. --aging and refresh
+ # options can be mixed with --mark ones.
+ ['--mark-old-records-static=1997-07-07',
+ '--aging=0',
+ ],
+ {},
+ ['Set Aging to 0'],
+ False
+ ),
+ (
+ # This regex should catch weergly, but all the
+ # records are already static,
+ ['--mark-records-static-regex=wee'],
+ {},
+ [],
+ False
+ ),
+ (
+ # Make frobitz static again.
+ ['--mark-records-static-regex=obi'],
+ {'frobitz': [('R', 0),
+ ('R', 0),
+ ('R', 0),
+ ('R', 0),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('R', 0),
+ ('R', 0)]
+ },
+ ['made 6/16 records static on frobitz.zone.'],
+ False
+ ),
+ (
+ # would make almost everything static, but --dry-run
+ ['--mark-old-records-static=2222-03-04', '--dry-run'],
+ {},
+ [
+ 'would make 6/16 records static on snizle.zone.',
+ 'would make 3/4 records static on ts-multi.zone.'
+ ],
+ False
+ ),
+ (
+ # make everything static
+ ['--mark-records-static-regex=.'],
+ {'snizle': [('R', 0),
+ ('R', 0),
+ ('R', 0),
+ ('R', 0),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('R', 0),
+ ('R', 0)],
+ 'ts-10000000': [('R', 0)],
+ 'ts-multi': [('R', 0), ('R', 0), ('R', 0), ('R', 0)]
+ },
+ [
+ 'made 4/4 records static on ts-multi.zone.',
+ 'made 1/1 records static on ts-10000000.zone.',
+ 'made 6/16 records static on snizle.zone.',
+ ],
+ False
+ ),
+ (
+ # make everything dynamic that can be
+ ['--mark-records-dynamic-regex=.'],
+ {'frobitz': [('R', 'nowish'),
+ ('R', 'nowish'),
+ ('R', 'nowish'),
+ ('R', 'nowish'),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('R', 'nowish'),
+ ('R', 'nowish')],
+ 'snizle': [('R', 'nowish'),
+ ('R', 'nowish'),
+ ('R', 'nowish'),
+ ('R', 'nowish'),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('R', 'nowish'),
+ ('R', 'nowish')],
+ 'ts-0': [('R', 'nowish')],
+ 'ts-100': [('R', 'nowish')],
+ 'ts-1000000': [('R', 'nowish')],
+ 'ts-10000000': [('R', 'nowish')],
+ 'ts-multi': [('R', 'nowish'),
+ ('R', 'nowish'),
+ ('R', 'nowish'),
+ ('R', 'nowish')],
+ 'weergly': [('R', 'nowish'),
+ ('R', 'nowish'),
+ ('R', 'nowish'),
+ ('R', 'nowish'),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('-', 0),
+ ('R', 'nowish'),
+ ('R', 'nowish')]
+ },
+ [
+ 'made 4/4 records dynamic on ts-multi.zone.',
+ 'made 6/16 records dynamic on snizle.zone.',
+ 'made 1/1 records dynamic on ts-0.zone.',
+ 'made 1/1 records dynamic on ts-1000000.zone.',
+ 'made 1/1 records dynamic on ts-10000000.zone.',
+ 'made 1/1 records dynamic on ts-100.zone.',
+ 'made 6/16 records dynamic on frobitz.zone.',
+ 'made 6/16 records dynamic on weergly.zone.',
+ ],
+ False
+ ),
+ ):
+ result, out, err = self.runsubcmd("dns",
+ "zoneoptions",
+ os.environ["SERVER"],
+ self.zone,
+ self.creds_string,
+ *options)
+ if error:
+ self.assertCmdFail(result, f"zoneoptions should fail ({error})")
+ else:
+ self.assertCmdSuccess(result,
+ out,
+ err,
+ "zoneoptions shouldn't fail")
+
+ new_tsmap = self.get_timestamp_map()
+
+ # same keys, always
+ self.assertEqual(sorted(new_tsmap), sorted(tsmap))
+ changes = {}
+ for k in tsmap:
+ if tsmap[k] != new_tsmap[k]:
+ changes[k] = new_tsmap[k]
+
+ self.assertEqual(diff, changes)
+
+ for s in output_substrings:
+ self.assertIn(s, out)
+ tsmap = new_tsmap
+
+ def test_zonecreate_dns_domain_directory_partition(self):
+ zone = "test-dns-domain-dp-zone"
+ dns_dp_opt = "--dns-directory-partition=domain"
+
+ result, out, err = self.runsubcmd("dns",
+ "zonecreate",
+ os.environ["SERVER"],
+ zone,
+ self.creds_string,
+ dns_dp_opt)
+ self.assertCmdSuccess(result,
+ out,
+ err,
+ "Failed to create zone with "
+ "--dns-directory-partition option")
+ self.assertTrue('Zone %s created successfully' % zone in out,
+ "Unexpected output: %s")
+
+ result, out, err = self.runsubcmd("dns",
+ "zoneinfo",
+ os.environ["SERVER"],
+ zone,
+ self.creds_string)
+ self.assertCmdSuccess(result, out, err)
+ self.assertTrue("DNS_DP_DOMAIN_DEFAULT" in out,
+ "Missing DNS_DP_DOMAIN_DEFAULT flag")
+
+ result, out, err = self.runsubcmd("dns",
+ "zonedelete",
+ os.environ["SERVER"],
+ zone,
+ self.creds_string)
+ self.assertCmdSuccess(result, out, err,
+ "Failed to delete zone in domain DNS directory "
+ "partition")
+ result, out, err = self.runsubcmd("dns",
+ "zonelist",
+ os.environ["SERVER"],
+ self.creds_string)
+ self.assertCmdSuccess(result, out, err,
+ "Failed to delete zone in domain DNS directory "
+ "partition")
+ self.assertTrue(zone not in out,
+ "Deleted zone still exists")
+
+ def test_zonecreate_dns_forest_directory_partition(self):
+ zone = "test-dns-forest-dp-zone"
+ dns_dp_opt = "--dns-directory-partition=forest"
+
+ result, out, err = self.runsubcmd("dns",
+ "zonecreate",
+ os.environ["SERVER"],
+ zone,
+ self.creds_string,
+ dns_dp_opt)
+ self.assertCmdSuccess(result,
+ out,
+ err,
+ "Failed to create zone with "
+ "--dns-directory-partition option")
+ self.assertTrue('Zone %s created successfully' % zone in out,
+ "Unexpected output: %s")
+
+ result, out, err = self.runsubcmd("dns",
+ "zoneinfo",
+ os.environ["SERVER"],
+ zone,
+ self.creds_string)
+ self.assertCmdSuccess(result, out, err)
+ self.assertTrue("DNS_DP_FOREST_DEFAULT" in out,
+ "Missing DNS_DP_FOREST_DEFAULT flag")
+
+ result, out, err = self.runsubcmd("dns",
+ "zonedelete",
+ os.environ["SERVER"],
+ zone,
+ self.creds_string)
+ self.assertCmdSuccess(result, out, err,
+ "Failed to delete zone in forest DNS directory "
+ "partition")
+
+ result, out, err = self.runsubcmd("dns",
+ "zonelist",
+ os.environ["SERVER"],
+ self.creds_string)
+ self.assertCmdSuccess(result, out, err,
+ "Failed to delete zone in forest DNS directory "
+ "partition")
+ self.assertTrue(zone not in out,
+ "Deleted zone still exists")
diff --git a/python/samba/tests/samba_tool/domain_auth_policy.py b/python/samba/tests/samba_tool/domain_auth_policy.py
new file mode 100644
index 0000000..1854037
--- /dev/null
+++ b/python/samba/tests/samba_tool/domain_auth_policy.py
@@ -0,0 +1,1517 @@
+# Unix SMB/CIFS implementation.
+#
+# Tests for samba-tool domain auth policy command
+#
+# Copyright (C) Catalyst.Net Ltd. 2023
+#
+# Written by Rob van der Linde <rob@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import json
+from optparse import OptionValueError
+from unittest.mock import patch
+
+from samba.dcerpc import security
+from samba.ndr import ndr_pack, ndr_unpack
+from samba.netcmd.domain.models.exceptions import ModelError
+from samba.samdb import SamDB
+from samba.sd_utils import SDUtils
+
+from .silo_base import SiloTest
+
+
+class AuthPolicyCmdTestCase(SiloTest):
+
+ def test_list(self):
+ """Test listing authentication policies in list format."""
+ result, out, err = self.runcmd("domain", "auth", "policy", "list")
+ self.assertIsNone(result, msg=err)
+
+ expected_policies = ["User Policy", "Service Policy", "Computer Policy"]
+
+ for policy in expected_policies:
+ self.assertIn(policy, out)
+
+ def test_list__json(self):
+ """Test listing authentication policies in JSON format."""
+ result, out, err = self.runcmd("domain", "auth", "policy",
+ "list", "--json")
+ self.assertIsNone(result, msg=err)
+
+ # we should get valid json
+ policies = json.loads(out)
+
+ expected_policies = ["User Policy", "Service Policy", "Computer Policy"]
+
+ for name in expected_policies:
+ policy = policies[name]
+ self.assertIn("name", policy)
+ self.assertIn("msDS-AuthNPolicy", list(policy["objectClass"]))
+ self.assertIn("msDS-AuthNPolicyEnforced", policy)
+ self.assertIn("msDS-StrongNTLMPolicy", policy)
+ self.assertIn("objectGUID", policy)
+
+ def test_view(self):
+ """Test viewing a single authentication policy."""
+ result, out, err = self.runcmd("domain", "auth", "policy", "view",
+ "--name", "User Policy")
+ self.assertIsNone(result, msg=err)
+
+ # we should get valid json
+ policy = json.loads(out)
+
+ # check a few fields only
+ self.assertEqual(policy["cn"], "User Policy")
+ self.assertEqual(policy["msDS-AuthNPolicyEnforced"], True)
+
+ def test_view__notfound(self):
+ """Test viewing an authentication policy that doesn't exist."""
+ result, out, err = self.runcmd("domain", "auth", "policy", "view",
+ "--name", "doesNotExist")
+ self.assertEqual(result, -1)
+ self.assertIn("Authentication policy doesNotExist not found.", err)
+
+ def test_view__name_required(self):
+ """Test view authentication policy without --name argument."""
+ result, out, err = self.runcmd("domain", "auth", "policy", "view")
+ self.assertEqual(result, -1)
+ self.assertIn("Argument --name is required.", err)
+
+ def test_create__success(self):
+ """Test creating a new authentication policy."""
+ name = self.unique_name()
+
+ self.addCleanup(self.delete_authentication_policy, name=name, force=True)
+ result, out, err = self.runcmd("domain", "auth", "policy", "create",
+ "--name", name)
+ self.assertIsNone(result, msg=err)
+
+ # Check policy that was created
+ policy = self.get_authentication_policy(name)
+ self.assertEqual(str(policy["cn"]), name)
+ self.assertEqual(str(policy["msDS-AuthNPolicyEnforced"]), "TRUE")
+
+ def test_create__description(self):
+ """Test creating a new authentication policy with description set."""
+ name = self.unique_name()
+
+ self.addCleanup(self.delete_authentication_policy, name=name, force=True)
+ result, out, err = self.runcmd("domain", "auth", "policy", "create",
+ "--name", name,
+ "--description", "Custom Description")
+ self.assertIsNone(result, msg=err)
+
+ # Check policy description
+ policy = self.get_authentication_policy(name)
+ self.assertEqual(str(policy["cn"]), name)
+ self.assertEqual(str(policy["description"]), "Custom Description")
+
+ def test_create__user_tgt_lifetime_mins(self):
+ """Test create a new authentication policy with --user-tgt-lifetime-mins.
+
+ Also checks the upper and lower bounds are handled.
+ """
+ name = self.unique_name()
+
+ self.addCleanup(self.delete_authentication_policy, name=name, force=True)
+ result, out, err = self.runcmd("domain", "auth", "policy", "create",
+ "--name", name,
+ "--user-tgt-lifetime-mins", "60")
+ self.assertIsNone(result, msg=err)
+
+ # Check policy fields.
+ policy = self.get_authentication_policy(name)
+ self.assertEqual(str(policy["cn"]), name)
+ self.assertEqual(str(policy["msDS-UserTGTLifetime"]), "60")
+
+ # check lower bounds (45)
+ result, out, err = self.runcmd("domain", "auth", "policy", "create",
+ "--name", name + "Lower",
+ "--user-tgt-lifetime-mins", "44")
+ self.assertEqual(result, -1)
+ self.assertIn("--user-tgt-lifetime-mins must be between 45 and 2147483647",
+ err)
+
+ # check upper bounds (2147483647)
+ result, out, err = self.runcmd("domain", "auth", "policy", "create",
+ "--name", name + "Upper",
+ "--user-tgt-lifetime-mins", "2147483648")
+ self.assertEqual(result, -1)
+ self.assertIn("--user-tgt-lifetime-mins must be between 45 and 2147483647",
+ err)
+
+ def test_create__user_allowed_to_authenticate_from_device_group(self):
+ """Tests the --user-allowed-to-authenticate-from-device-group shortcut."""
+ name = self.unique_name()
+ expected = "O:SYG:SYD:(XA;OICI;CR;;;WD;(Member_of_any {SID(%s)}))" % (
+ self.device_group.object_sid)
+
+ self.addCleanup(self.delete_authentication_policy, name=name, force=True)
+ result, out, err = self.runcmd("domain", "auth", "policy", "create",
+ "--name", name,
+ "--user-allowed-to-authenticate-from-device-group",
+ self.device_group.name)
+ self.assertIsNone(result, msg=err)
+
+ # Check policy fields.
+ policy = self.get_authentication_policy(name)
+ self.assertEqual(str(policy["cn"]), name)
+
+ # Check generated SDDL.
+ desc = policy["msDS-UserAllowedToAuthenticateFrom"][0]
+ sddl = ndr_unpack(security.descriptor, desc).as_sddl()
+ self.assertEqual(sddl, expected)
+
+ def test_create__user_allowed_to_authenticate_from_device_silo(self):
+ """Tests the --user-allowed-to-authenticate-from-device-silo shortcut."""
+ name = self.unique_name()
+
+ self.addCleanup(self.delete_authentication_policy, name=name, force=True)
+ result, out, err = self.runcmd("domain", "auth", "policy", "create",
+ "--name", name,
+ "--user-allowed-to-authenticate-from-device-silo",
+ "Developers")
+ self.assertIsNone(result, msg=err)
+
+ # Check policy fields.
+ policy = self.get_authentication_policy(name)
+ self.assertEqual(str(policy["cn"]), name)
+
+ # Check generated SDDL.
+ desc = policy["msDS-UserAllowedToAuthenticateFrom"][0]
+ sddl = ndr_unpack(security.descriptor, desc).as_sddl()
+ self.assertEqual(
+ sddl,
+ 'O:SYG:SYD:(XA;OICI;CR;;;WD;(@USER.ad://ext/AuthenticationSilo == "Developers"))')
+
+ def test_create__user_allowed_to_authenticate_to_by_group(self):
+ """Tests the --user-allowed-to-authenticate-to-by-group shortcut."""
+ name = self.unique_name()
+ expected = "O:SYG:SYD:(XA;OICI;CR;;;WD;(Member_of_any {SID(%s)}))" % (
+ self.device_group.object_sid)
+
+ # Create a user with authenticate to by group attribute.
+ self.addCleanup(self.delete_authentication_policy, name=name, force=True)
+ result, out, err = self.runcmd(
+ "domain", "auth", "policy", "create", "--name", name,
+ "--user-allowed-to-authenticate-to-by-group",
+ self.device_group.name)
+ self.assertIsNone(result, msg=err)
+
+ # Check user allowed to authenticate to field was modified.
+ policy = self.get_authentication_policy(name)
+ self.assertEqual(str(policy["cn"]), name)
+ desc = policy["msDS-UserAllowedToAuthenticateTo"][0]
+ sddl = ndr_unpack(security.descriptor, desc).as_sddl()
+ self.assertEqual(sddl, expected)
+
+ def test_create__user_allowed_to_authenticate_to_by_silo(self):
+ """Tests the --user-allowed-to-authenticate-to-by-silo shortcut."""
+ name = self.unique_name()
+ expected = ('O:SYG:SYD:(XA;OICI;CR;;;WD;(@USER.ad://ext/'
+ 'AuthenticationSilo == "QA"))')
+
+ # Create a user with authenticate to by silo attribute.
+ self.addCleanup(self.delete_authentication_policy, name=name, force=True)
+ result, out, err = self.runcmd(
+ "domain", "auth", "policy", "create", "--name", name,
+ "--user-allowed-to-authenticate-to-by-silo", "QA")
+ self.assertIsNone(result, msg=err)
+
+ # Check user allowed to authenticate to field was modified.
+ policy = self.get_authentication_policy(name)
+ self.assertEqual(str(policy["cn"]), name)
+ desc = policy["msDS-UserAllowedToAuthenticateTo"][0]
+ sddl = ndr_unpack(security.descriptor, desc).as_sddl()
+ self.assertEqual(sddl, expected)
+
+ def test_create__service_tgt_lifetime_mins(self):
+ """Test create a new authentication policy with --service-tgt-lifetime-mins.
+
+ Also checks the upper and lower bounds are handled.
+ """
+ name = self.unique_name()
+
+ self.addCleanup(self.delete_authentication_policy, name=name, force=True)
+ result, out, err = self.runcmd("domain", "auth", "policy", "create",
+ "--name", name,
+ "--service-tgt-lifetime-mins", "60")
+ self.assertIsNone(result, msg=err)
+
+ # Check policy fields.
+ policy = self.get_authentication_policy(name)
+ self.assertEqual(str(policy["cn"]), name)
+ self.assertEqual(str(policy["msDS-ServiceTGTLifetime"]), "60")
+
+ # check lower bounds (45)
+ result, out, err = self.runcmd("domain", "auth", "policy", "create",
+ "--name", name,
+ "--service-tgt-lifetime-mins", "44")
+ self.assertEqual(result, -1)
+ self.assertIn("--service-tgt-lifetime-mins must be between 45 and 2147483647",
+ err)
+
+ # check upper bounds (2147483647)
+ result, out, err = self.runcmd("domain", "auth", "policy", "create",
+ "--name", name,
+ "--service-tgt-lifetime-mins", "2147483648")
+ self.assertEqual(result, -1)
+ self.assertIn("--service-tgt-lifetime-mins must be between 45 and 2147483647",
+ err)
+
+ def test_create__service_allowed_to_authenticate_from_device_group(self):
+ """Tests the --service-allowed-to-authenticate-from-device-group shortcut."""
+ name = self.unique_name()
+ expected = "O:SYG:SYD:(XA;OICI;CR;;;WD;(Member_of_any {SID(%s)}))" % (
+ self.device_group.object_sid)
+
+ self.addCleanup(self.delete_authentication_policy, name=name, force=True)
+ result, out, err = self.runcmd("domain", "auth", "policy", "create",
+ "--name", name,
+ "--service-allowed-to-authenticate-from-device-group",
+ self.device_group.name)
+ self.assertIsNone(result, msg=err)
+
+ # Check policy fields.
+ policy = self.get_authentication_policy(name)
+ self.assertEqual(str(policy["cn"]), name)
+
+ # Check generated SDDL.
+ desc = policy["msDS-ServiceAllowedToAuthenticateFrom"][0]
+ sddl = ndr_unpack(security.descriptor, desc).as_sddl()
+ self.assertEqual(sddl, expected)
+
+ def test_create__service_allowed_to_authenticate_from_device_silo(self):
+ """Tests the --service-allowed-to-authenticate-from-device-silo shortcut."""
+ name = self.unique_name()
+
+ self.addCleanup(self.delete_authentication_policy, name=name, force=True)
+ result, out, err = self.runcmd("domain", "auth", "policy", "create",
+ "--name", name,
+ "--service-allowed-to-authenticate-from-device-silo",
+ "Managers")
+ self.assertIsNone(result, msg=err)
+
+ # Check policy fields.
+ policy = self.get_authentication_policy(name)
+ self.assertEqual(str(policy["cn"]), name)
+ desc = policy["msDS-ServiceAllowedToAuthenticateFrom"][0]
+
+ # Check generated SDDL.
+ sddl = ndr_unpack(security.descriptor, desc).as_sddl()
+ self.assertEqual(
+ sddl,
+ 'O:SYG:SYD:(XA;OICI;CR;;;WD;(@USER.ad://ext/AuthenticationSilo == "Managers"))')
+
+ def test_create__service_allowed_to_authenticate_to_by_group(self):
+ """Tests the --service-allowed-to-authenticate-to-by-group shortcut."""
+ name = self.unique_name()
+ expected = "O:SYG:SYD:(XA;OICI;CR;;;WD;(Member_of_any {SID(%s)}))" % (
+ self.device_group.object_sid)
+
+ # Create a user with authenticate to by group attribute.
+ self.addCleanup(self.delete_authentication_policy, name=name, force=True)
+ result, out, err = self.runcmd(
+ "domain", "auth", "policy", "create", "--name", name,
+ "--service-allowed-to-authenticate-to-by-group",
+ self.device_group.name)
+ self.assertIsNone(result, msg=err)
+
+ # Check user allowed to authenticate to field was modified.
+ policy = self.get_authentication_policy(name)
+ self.assertEqual(str(policy["cn"]), name)
+ desc = policy["msDS-ServiceAllowedToAuthenticateTo"][0]
+ sddl = ndr_unpack(security.descriptor, desc).as_sddl()
+ self.assertEqual(sddl, expected)
+
+ def test_create__service_allowed_to_authenticate_to_by_silo(self):
+ """Tests the --service-allowed-to-authenticate-to-by-silo shortcut."""
+ name = self.unique_name()
+ expected = ('O:SYG:SYD:(XA;OICI;CR;;;WD;(@USER.ad://ext/'
+ 'AuthenticationSilo == "Managers"))')
+
+ # Create a user with authenticate to by silo attribute.
+ self.addCleanup(self.delete_authentication_policy, name=name, force=True)
+ result, out, err = self.runcmd(
+ "domain", "auth", "policy", "create", "--name", name,
+ "--service-allowed-to-authenticate-to-by-silo", "Managers")
+ self.assertIsNone(result, msg=err)
+
+ # Check user allowed to authenticate to field was modified.
+ policy = self.get_authentication_policy(name)
+ self.assertEqual(str(policy["cn"]), name)
+ desc = policy["msDS-ServiceAllowedToAuthenticateTo"][0]
+ sddl = ndr_unpack(security.descriptor, desc).as_sddl()
+ self.assertEqual(sddl, expected)
+
+ def test_create__computer_tgt_lifetime_mins(self):
+ """Test create a new authentication policy with --computer-tgt-lifetime-mins.
+
+ Also checks the upper and lower bounds are handled.
+ """
+ name = self.unique_name()
+
+ self.addCleanup(self.delete_authentication_policy, name=name, force=True)
+ result, out, err = self.runcmd("domain", "auth", "policy", "create",
+ "--name", name,
+ "--computer-tgt-lifetime-mins", "60")
+ self.assertIsNone(result, msg=err)
+
+ # Check policy fields.
+ policy = self.get_authentication_policy(name)
+ self.assertEqual(str(policy["cn"]), name)
+ self.assertEqual(str(policy["msDS-ComputerTGTLifetime"]), "60")
+
+ # check lower bounds (45)
+ result, out, err = self.runcmd("domain", "auth", "policy", "create",
+ "--name", name + "Lower",
+ "--computer-tgt-lifetime-mins", "44")
+ self.assertEqual(result, -1)
+ self.assertIn("--computer-tgt-lifetime-mins must be between 45 and 2147483647",
+ err)
+
+ # check upper bounds (2147483647)
+ result, out, err = self.runcmd("domain", "auth", "policy", "create",
+ "--name", name + "Upper",
+ "--computer-tgt-lifetime-mins", "2147483648")
+ self.assertEqual(result, -1)
+ self.assertIn("--computer-tgt-lifetime-mins must be between 45 and 2147483647",
+ err)
+
+ def test_create__computer_allowed_to_authenticate_to_by_group(self):
+ """Tests the --computer-allowed-to-authenticate-to-by-group shortcut."""
+ name = self.unique_name()
+ expected = "O:SYG:SYD:(XA;OICI;CR;;;WD;(Member_of_any {SID(%s)}))" % (
+ self.device_group.object_sid)
+
+ # Create a user with authenticate to by group attribute.
+ self.addCleanup(self.delete_authentication_policy, name=name, force=True)
+ result, out, err = self.runcmd(
+ "domain", "auth", "policy", "create", "--name", name,
+ "--computer-allowed-to-authenticate-to-by-group",
+ self.device_group.name)
+ self.assertIsNone(result, msg=err)
+
+ # Check user allowed to authenticate to field was modified.
+ policy = self.get_authentication_policy(name)
+ self.assertEqual(str(policy["cn"]), name)
+ desc = policy["msDS-ComputerAllowedToAuthenticateTo"][0]
+ sddl = ndr_unpack(security.descriptor, desc).as_sddl()
+ self.assertEqual(sddl, expected)
+
+ def test_create__computer_allowed_to_authenticate_to_by_silo(self):
+ """Tests the --computer-allowed-to-authenticate-to-by-silo shortcut."""
+ name = self.unique_name()
+ expected = ('O:SYG:SYD:(XA;OICI;CR;;;WD;(@USER.ad://ext/'
+ 'AuthenticationSilo == "QA"))')
+
+ # Create a user with authenticate to by silo attribute.
+ self.addCleanup(self.delete_authentication_policy, name=name, force=True)
+ result, out, err = self.runcmd(
+ "domain", "auth", "policy", "create", "--name", name,
+ "--computer-allowed-to-authenticate-to-by-silo", "QA")
+ self.assertIsNone(result, msg=err)
+
+ # Check user allowed to authenticate to field was modified.
+ policy = self.get_authentication_policy(name)
+ self.assertEqual(str(policy["cn"]), name)
+ desc = policy["msDS-ComputerAllowedToAuthenticateTo"][0]
+ sddl = ndr_unpack(security.descriptor, desc).as_sddl()
+ self.assertEqual(sddl, expected)
+
+ def test_create__valid_sddl(self):
+ """Test creating a new authentication policy with valid SDDL in a field."""
+ name = self.unique_name()
+ expected = "O:SYG:SYD:(XA;OICI;CR;;;WD;(Member_of {SID(AO)}))"
+
+ self.addCleanup(self.delete_authentication_policy, name=name, force=True)
+ result, out, err = self.runcmd("domain", "auth", "policy", "create",
+ "--name", name,
+ "--user-allowed-to-authenticate-from",
+ expected)
+ self.assertIsNone(result, msg=err)
+
+ # Check policy fields.
+ policy = self.get_authentication_policy(name)
+ self.assertEqual(str(policy["cn"]), name)
+ desc = policy["msDS-UserAllowedToAuthenticateFrom"][0]
+ sddl = ndr_unpack(security.descriptor, desc).as_sddl()
+ self.assertEqual(sddl, expected)
+
+ def test_create__invalid_sddl(self):
+ """Test creating a new authentication policy with invalid SDDL in a field."""
+ name = self.unique_name()
+
+ result, out, err = self.runcmd("domain", "auth", "policy", "create",
+ "--name", name,
+ "--user-allowed-to-authenticate-from",
+ "*INVALID SDDL*")
+
+ self.assertEqual(result, -1)
+ self.assertIn("Unable to parse SDDL", err)
+ self.assertIn(" *INVALID SDDL*\n ^\n expected '[OGDS]:' section start ", err)
+
+ def test_create__invalid_sddl_conditional_ace(self):
+ """Test creating a new authentication policy with invalid SDDL in a field."""
+ sddl = "O:SYG:SYD:(XA;OICI;CR;;;WD;(Member_of {secret club}))"
+ result, out, err = self.runcmd("domain", "auth", "policy", "create",
+ "--name", "invalidSDDLPolicy2",
+ "--user-allowed-to-authenticate-from",
+ sddl)
+ self.assertEqual(result, -1)
+ self.assertIn("Unable to parse SDDL", err)
+ self.assertIn(sddl, err)
+ self.assertIn(f"\n{'^':>41}", err)
+ self.assertIn("unexpected byte 0x73 's' parsing literal", err)
+ self.assertNotIn(" File ", err)
+
+ def test_create__invalid_sddl_conditional_ace_non_ascii(self):
+ """Test creating a new authentication policy with invalid SDDL in a field."""
+ sddl = 'O:SYG:SYD:(XA;OICI;CR;;;WD;(@User.āāēē == "łē¶ŧ¹⅓þōīŋ“đ¢ð»" && Member_of {secret club}))'
+ result, out, err = self.runcmd("domain", "auth", "policy", "create",
+ "--name", "invalidSDDLPolicy2",
+ "--user-allowed-to-authenticate-from",
+ sddl)
+ self.assertEqual(result, -1)
+ self.assertIn("Unable to parse SDDL", err)
+ self.assertIn(sddl, err)
+ self.assertIn(f"\n{'^':>76}\n", err)
+ self.assertIn(" unexpected byte 0x73 's' parsing literal", err)
+ self.assertNotIn(" File ", err)
+
+ def test_create__invalid_sddl_normal_ace(self):
+ """Test creating a new authentication policy with invalid SDDL in a field."""
+ sddl = "O:SYG:SYD:(A;;;;ZZ)(XA;OICI;CR;;;WD;(Member_of {WD}))"
+ result, out, err = self.runcmd("domain", "auth", "policy", "create",
+ "--name", "invalidSDDLPolicy3",
+ "--user-allowed-to-authenticate-from",
+ sddl)
+ self.assertEqual(result, -1)
+ self.assertIn("Unable to parse SDDL", err)
+ self.assertIn(sddl, err)
+ self.assertIn(f"\n{'^':>13}", err)
+ self.assertIn("\n malformed ACE with only 4 ';'\n", err)
+ self.assertNotIn(" File ", err) # traceback marker
+
+ def test_create__device_attribute_in_sddl_allowed_to(self):
+ """Test creating a new authentication policy that uses
+ user-allowed-to-authenticate-to with a device attribute."""
+
+ sddl = 'O:SYG:SYD:(XA;OICI;CR;;;WD;(@Device.claim == "foo"))'
+
+ name = self.unique_name()
+ self.addCleanup(self.delete_authentication_policy, name=name)
+ result, _, err = self.runcmd("domain", "auth", "policy", "create",
+ "--name", name,
+ "--user-allowed-to-authenticate-to",
+ sddl)
+ self.assertIsNone(result, msg=err)
+
+ def test_create__device_operator_in_sddl_allowed_to(self):
+ """Test creating a new authentication policy that uses
+ user-allowed-to-authenticate-to with a device operator."""
+
+ sddl = 'O:SYG:SYD:(XA;OICI;CR;;;WD;(Not_Device_Member_of {SID(WD)}))'
+
+ name = self.unique_name()
+ self.addCleanup(self.delete_authentication_policy, name=name)
+ result, _, err = self.runcmd("domain", "auth", "policy", "create",
+ "--name", name,
+ "--user-allowed-to-authenticate-to",
+ sddl)
+ self.assertIsNone(result, msg=err)
+
+ def test_create__device_attribute_in_sddl_allowed_from(self):
+ """Test creating a new authentication policy that uses
+ user-allowed-to-authenticate-from with a device attribute."""
+
+ sddl = 'O:SYG:SYD:(XA;OICI;CR;;;WD;(@Device.claim == "foo"))'
+
+ name = self.unique_name()
+ result, _, err = self.runcmd("domain", "auth", "policy", "create",
+ "--name", name,
+ "--user-allowed-to-authenticate-from",
+ sddl)
+ self.assertEqual(result, -1)
+ self.assertIn("Unable to parse SDDL", err)
+ self.assertIn(sddl, err)
+ self.assertIn(f"\n{'^':>31}\n", err)
+ self.assertIn(" a device attribute is not applicable in this context "
+ "(did you intend a user attribute?)",
+ err)
+ self.assertNotIn(" File ", err)
+
+ def test_create__device_operator_in_sddl_allowed_from(self):
+ """Test creating a new authentication policy that uses
+ user-allowed-to-authenticate-from with a device operator."""
+
+ sddl = 'O:SYG:SYD:(XA;OICI;CR;;;WD;(Not_Device_Member_of {SID(WD)}))'
+
+ name = self.unique_name()
+ result, _, err = self.runcmd("domain", "auth", "policy", "create",
+ "--name", name,
+ "--user-allowed-to-authenticate-from",
+ sddl)
+ self.assertEqual(result, -1)
+ self.assertIn("Unable to parse SDDL", err)
+ self.assertIn(sddl, err)
+ self.assertIn(f"\n{'^':>30}\n", err)
+ self.assertIn(" a device‐relative expression will never evaluate to "
+ "true in this context (did you intend a user‐relative "
+ "expression?)",
+ err)
+ self.assertNotIn(" File ", err)
+
+ def test_create__device_attribute_in_sddl_already_exists(self):
+ """Test modifying an existing authentication policy that uses
+ user-allowed-to-authenticate-from with a device attribute."""
+
+ # The SDDL refers to ‘Device.claim’.
+ sddl = 'O:SYG:SYD:(XA;OICI;CR;;;WD;(@Device.claim == "foo"))'
+ domain_sid = security.dom_sid(self.samdb.get_domain_sid())
+ descriptor = security.descriptor.from_sddl(sddl, domain_sid)
+
+ # Manually create an authentication policy that refers to a device
+ # attribute.
+
+ name = self.unique_name()
+ dn = self.get_authn_policies_dn()
+ dn.add_child(f"CN={name}")
+ message = {
+ 'dn': dn,
+ 'msDS-AuthNPolicyEnforced': b'TRUE',
+ 'objectClass': b'msDS-AuthNPolicy',
+ 'msDS-UserAllowedToAuthenticateFrom': ndr_pack(descriptor),
+ }
+
+ self.addCleanup(self.delete_authentication_policy, name=name)
+ self.samdb.add(message)
+
+ # Change the policy description. This should succeed, in spite of the
+ # policy’s referring to a device attribute when it shouldn’t.
+ result, _, err = self.runcmd("domain", "auth", "policy", "modify",
+ "--name", name,
+ "--description", "NewDescription")
+ self.assertIsNone(result, msg=err)
+
+ def test_create__already_exists(self):
+ """Test creating a new authentication policy that already exists."""
+ result, out, err = self.runcmd("domain", "auth", "policy", "create",
+ "--name", "User Policy")
+ self.assertEqual(result, -1)
+ self.assertIn("Authentication policy User Policy already exists", err)
+
+ def test_create__name_missing(self):
+ """Test create authentication policy without --name argument."""
+ result, out, err = self.runcmd("domain", "auth", "policy", "create")
+ self.assertEqual(result, -1)
+ self.assertIn("Argument --name is required.", err)
+
+ def test_create__audit(self):
+ """Test create authentication policy with --audit flag."""
+ name = self.unique_name()
+
+ self.addCleanup(self.delete_authentication_policy, name=name, force=True)
+ result, out, err = self.runcmd("domain", "auth", "policy", "create",
+ "--name", name,
+ "--audit")
+ self.assertIsNone(result, msg=err)
+
+ # fetch and check policy
+ policy = self.get_authentication_policy(name)
+ self.assertEqual(str(policy["msDS-AuthNPolicyEnforced"]), "FALSE")
+
+ def test_create__enforce(self):
+ """Test create authentication policy with --enforce flag."""
+ name = self.unique_name()
+
+ self.addCleanup(self.delete_authentication_policy, name=name, force=True)
+ result, out, err = self.runcmd("domain", "auth", "policy", "create",
+ "--name", name,
+ "--enforce")
+ self.assertIsNone(result, msg=err)
+
+ # fetch and check policy
+ policy = self.get_authentication_policy(name)
+ self.assertEqual(str(policy["msDS-AuthNPolicyEnforced"]), "TRUE")
+
+ def test_create__audit_enforce_together(self):
+ """Test create auth policy using both --audit and --enforce."""
+ name = self.unique_name()
+
+ result, out, err = self.runcmd("domain", "auth", "policy", "create",
+ "--name", name,
+ "--audit", "--enforce")
+
+ self.assertEqual(result, -1)
+ self.assertIn("--audit and --enforce cannot be used together.", err)
+
+ def test_create__protect_unprotect_together(self):
+ """Test create authentication policy using --protect and --unprotect."""
+ name = self.unique_name()
+
+ result, out, err = self.runcmd("domain", "auth", "policy", "create",
+ "--name", name,
+ "--protect", "--unprotect")
+
+ self.assertEqual(result, -1)
+ self.assertIn("--protect and --unprotect cannot be used together.", err)
+
+ def test_create__user_allowed_to_authenticate_from_repeated(self):
+ """Test repeating similar arguments doesn't make sense to use together.
+
+ --user-allowed-to-authenticate-from
+ --user-allowed-to-authenticate-from-device-silo
+ """
+ sddl = 'O:SYG:SYD:(XA;OICI;CR;;;WD;(@USER.ad://ext/AuthenticationSilo == "Developers"))'
+ name = self.unique_name()
+
+ result, out, err = self.runcmd("domain", "auth", "policy", "create",
+ "--name", name,
+ "--user-allowed-to-authenticate-from",
+ sddl,
+ "--user-allowed-to-authenticate-from-device-silo",
+ "Managers")
+
+ self.assertEqual(result, -1)
+ self.assertIn("--user-allowed-to-authenticate-from argument repeated 2 times.", err)
+
+ def test_create__user_allowed_to_authenticate_to_repeated(self):
+ """Test repeating similar arguments doesn't make sense to use together.
+
+ --user-allowed-to-authenticate-to
+ --user-allowed-to-authenticate-to-by-silo
+ """
+ sddl = 'O:SYG:SYD:(XA;OICI;CR;;;WD;(@USER.ad://ext/AuthenticationSilo == "Developers"))'
+ name = self.unique_name()
+
+ result, out, err = self.runcmd("domain", "auth", "policy", "create",
+ "--name", name,
+ "--user-allowed-to-authenticate-to",
+ sddl,
+ "--user-allowed-to-authenticate-to-by-silo",
+ "Managers")
+
+ self.assertEqual(result, -1)
+ self.assertIn("--user-allowed-to-authenticate-to argument repeated 2 times.", err)
+
+ def test_create__service_allowed_to_authenticate_from_repeated(self):
+ """Test repeating similar arguments doesn't make sense to use together.
+
+ --service-allowed-to-authenticate-from
+ --service-allowed-to-authenticate-from-device-silo
+ """
+ sddl = 'O:SYG:SYD:(XA;OICI;CR;;;WD;(@USER.ad://ext/AuthenticationSilo == "Managers"))'
+ name = self.unique_name()
+
+ result, out, err = self.runcmd("domain", "auth", "policy", "create",
+ "--name", name,
+ "--service-allowed-to-authenticate-from",
+ sddl,
+ "--service-allowed-to-authenticate-from-device-silo",
+ "QA")
+
+ self.assertEqual(result, -1)
+ self.assertIn("--service-allowed-to-authenticate-from argument repeated 2 times.", err)
+
+ def test_create__service_allowed_to_authenticate_to_repeated(self):
+ """Test repeating similar arguments doesn't make sense to use together.
+
+ --service-allowed-to-authenticate-to
+ --service-allowed-to-authenticate-to-by-silo
+ """
+ sddl = 'O:SYG:SYD:(XA;OICI;CR;;;WD;(@USER.ad://ext/AuthenticationSilo == "Managers"))'
+ name = self.unique_name()
+
+ result, out, err = self.runcmd("domain", "auth", "policy", "create",
+ "--name", name,
+ "--service-allowed-to-authenticate-to",
+ sddl,
+ "--service-allowed-to-authenticate-to-by-silo",
+ "QA")
+
+ self.assertEqual(result, -1)
+ self.assertIn("--service-allowed-to-authenticate-to argument repeated 2 times.", err)
+
+ def test_create__computer_allowed_to_authenticate_to_repeated(self):
+ """Test repeating similar arguments doesn't make sense to use together.
+
+ --computer-allowed-to-authenticate-to
+ --computer-allowed-to-authenticate-to-by-silo
+ """
+ sddl = 'O:SYG:SYD:(XA;OICI;CR;;;WD;(@USER.ad://ext/AuthenticationSilo == "Managers"))'
+ name = self.unique_name()
+
+ result, out, err = self.runcmd("domain", "auth", "policy", "create",
+ "--name", name,
+ "--computer-allowed-to-authenticate-to",
+ sddl,
+ "--computer-allowed-to-authenticate-to-by-silo",
+ "QA")
+
+ self.assertEqual(result, -1)
+ self.assertIn("--computer-allowed-to-authenticate-to argument repeated 2 times.", err)
+
+ def test_create__fails(self):
+ """Test creating an authentication policy, but it fails."""
+ name = self.unique_name()
+
+ # Raise ModelError when ldb.add() is called.
+ with patch.object(SamDB, "add") as add_mock:
+ add_mock.side_effect = ModelError("Custom error message")
+ result, out, err = self.runcmd("domain", "auth", "policy", "create",
+ "--name", name)
+ self.assertEqual(result, -1)
+ self.assertIn("Custom error message", err)
+
+ def test_modify__description(self):
+ """Test modifying an authentication policy description."""
+ name = self.unique_name()
+
+ # Create a policy to modify for this test.
+ self.addCleanup(self.delete_authentication_policy, name=name, force=True)
+ self.runcmd("domain", "auth", "policy", "create", "--name", name)
+
+ # Change the policy description.
+ result, out, err = self.runcmd("domain", "auth", "policy", "modify",
+ "--name", name,
+ "--description", "NewDescription")
+ self.assertIsNone(result, msg=err)
+
+ # Verify fields were changed.
+ policy = self.get_authentication_policy(name)
+ self.assertEqual(str(policy["description"]), "NewDescription")
+
+ def test_modify__strong_ntlm_policy(self):
+ """Test modify strong ntlm policy on the authentication policy."""
+ name = self.unique_name()
+
+ # Create a policy to modify for this test.
+ self.addCleanup(self.delete_authentication_policy, name=name, force=True)
+ self.runcmd("domain", "auth", "policy", "create", "--name", name)
+
+ result, out, err = self.runcmd("domain", "auth", "policy", "modify",
+ "--name", name,
+ "--strong-ntlm-policy", "Required")
+ self.assertIsNone(result, msg=err)
+
+ # Verify fields were changed.
+ policy = self.get_authentication_policy(name)
+ self.assertEqual(str(policy["msDS-StrongNTLMPolicy"]), "2")
+
+ # Check an invalid choice.
+ with self.assertRaises((OptionValueError, SystemExit)):
+ self.runcmd("domain", "auth", "policy", "modify",
+ "--name", name,
+ "--strong-ntlm-policy", "Invalid")
+
+ # It is difficult to test the error message text for invalid
+ # choices because inside optparse it will raise OptionValueError
+ # followed by raising SystemExit(2).
+
+ def test_modify__user_tgt_lifetime_mins(self):
+ """Test modifying an authentication policy --user-tgt-lifetime-mins.
+
+ This includes checking the upper and lower bounds.
+ """
+ name = self.unique_name()
+
+ # Create a policy to modify for this test.
+ self.addCleanup(self.delete_authentication_policy, name=name, force=True)
+ self.runcmd("domain", "auth", "policy", "create", "--name", name)
+
+ result, out, err = self.runcmd("domain", "auth", "policy", "modify",
+ "--name", name,
+ "--user-tgt-lifetime-mins", "120")
+ self.assertIsNone(result, msg=err)
+
+ # Verify field was changed.
+ policy = self.get_authentication_policy(name)
+ self.assertEqual(str(policy["msDS-UserTGTLifetime"]), "120")
+
+ # check lower bounds (45)
+ result, out, err = self.runcmd("domain", "auth", "policy", "modify",
+ "--name", name + "Lower",
+ "--user-tgt-lifetime-mins", "44")
+ self.assertEqual(result, -1)
+ self.assertIn("--user-tgt-lifetime-mins must be between 45 and 2147483647",
+ err)
+
+ # check upper bounds (2147483647)
+ result, out, err = self.runcmd("domain", "auth", "policy", "modify",
+ "--name", name + "Upper",
+ "--user-tgt-lifetime-mins", "2147483648")
+ self.assertEqual(result, -1)
+ self.assertIn("--user-tgt-lifetime-mins must be between 45 and 2147483647",
+ err)
+
+ def test_modify__service_tgt_lifetime_mins(self):
+ """Test modifying an authentication policy --service-tgt-lifetime-mins.
+
+ This includes checking the upper and lower bounds.
+ """
+ name = self.unique_name()
+
+ # Create a policy to modify for this test.
+ self.addCleanup(self.delete_authentication_policy, name=name, force=True)
+ self.runcmd("domain", "auth", "policy", "create", "--name", name)
+
+ result, out, err = self.runcmd("domain", "auth", "policy", "modify",
+ "--name", name,
+ "--service-tgt-lifetime-mins", "120")
+ self.assertIsNone(result, msg=err)
+
+ # Verify field was changed.
+ policy = self.get_authentication_policy(name)
+ self.assertEqual(str(policy["msDS-ServiceTGTLifetime"]), "120")
+
+ # check lower bounds (45)
+ result, out, err = self.runcmd("domain", "auth", "policy", "modify",
+ "--name", name + "Lower",
+ "--service-tgt-lifetime-mins", "44")
+ self.assertEqual(result, -1)
+ self.assertIn("--service-tgt-lifetime-mins must be between 45 and 2147483647",
+ err)
+
+ # check upper bounds (2147483647)
+ result, out, err = self.runcmd("domain", "auth", "policy", "modify",
+ "--name", name + "Upper",
+ "--service-tgt-lifetime-mins", "2147483648")
+ self.assertEqual(result, -1)
+ self.assertIn("--service-tgt-lifetime-mins must be between 45 and 2147483647",
+ err)
+
+ def test_modify__computer_tgt_lifetime_mins(self):
+ """Test modifying an authentication policy --computer-tgt-lifetime-mins.
+
+ This includes checking the upper and lower bounds.
+ """
+ name = self.unique_name()
+
+ # Create a policy to modify for this test.
+ self.addCleanup(self.delete_authentication_policy, name=name, force=True)
+ self.runcmd("domain", "auth", "policy", "create", "--name", name)
+
+ result, out, err = self.runcmd("domain", "auth", "policy", "modify",
+ "--name", name,
+ "--computer-tgt-lifetime-mins", "120")
+ self.assertIsNone(result, msg=err)
+
+ # Verify field was changed.
+ policy = self.get_authentication_policy(name)
+ self.assertEqual(str(policy["msDS-ComputerTGTLifetime"]), "120")
+
+ # check lower bounds (45)
+ result, out, err = self.runcmd("domain", "auth", "policy", "modify",
+ "--name", name + "Lower",
+ "--computer-tgt-lifetime-mins", "44")
+ self.assertEqual(result, -1)
+ self.assertIn("--computer-tgt-lifetime-mins must be between 45 and 2147483647",
+ err)
+
+ # check upper bounds (2147483647)
+ result, out, err = self.runcmd("domain", "auth", "policy", "modify",
+ "--name", name + "Upper",
+ "--computer-tgt-lifetime-mins", "2147483648")
+ self.assertEqual(result, -1)
+ self.assertIn("--computer-tgt-lifetime-mins must be between 45 and 2147483647",
+ err)
+
+ def test_modify__user_allowed_to_authenticate_from(self):
+ """Modify authentication policy user allowed to authenticate from."""
+ name = self.unique_name()
+ expected = "O:SYG:SYD:(XA;OICI;CR;;;WD;(Member_of {SID(AO)}))"
+
+ # Create a policy to modify for this test.
+ self.addCleanup(self.delete_authentication_policy, name=name, force=True)
+ self.runcmd("domain", "auth", "policy", "create", "--name", name)
+
+ # Modify user allowed to authenticate from field
+ result, out, err = self.runcmd("domain", "auth", "policy", "modify",
+ "--name", name,
+ "--user-allowed-to-authenticate-from",
+ expected)
+ self.assertIsNone(result, msg=err)
+
+ # Check user allowed to authenticate from field was modified.
+ policy = self.get_authentication_policy(name)
+ self.assertEqual(str(policy["cn"]), name)
+ desc = policy["msDS-UserAllowedToAuthenticateFrom"][0]
+ sddl = ndr_unpack(security.descriptor, desc).as_sddl()
+ self.assertEqual(sddl, expected)
+
+ def test_modify__user_allowed_to_authenticate_from_device_group(self):
+ """Test the --user-allowed-to-authenticate-from-device-group shortcut."""
+ name = self.unique_name()
+ expected = "O:SYG:SYD:(XA;OICI;CR;;;WD;(Member_of_any {SID(%s)}))" % (
+ self.device_group.object_sid)
+
+ # Create a policy to modify for this test.
+ self.addCleanup(self.delete_authentication_policy, name=name, force=True)
+ self.runcmd("domain", "auth", "policy", "create", "--name", name)
+
+ # Modify user allowed to authenticate from silo field
+ result, out, err = self.runcmd("domain", "auth", "policy", "modify",
+ "--name", name,
+ "--user-allowed-to-authenticate-from-device-group",
+ self.device_group.name)
+ self.assertIsNone(result, msg=err)
+
+ # Check generated SDDL.
+ policy = self.get_authentication_policy(name)
+ desc = policy["msDS-UserAllowedToAuthenticateFrom"][0]
+ sddl = ndr_unpack(security.descriptor, desc).as_sddl()
+ self.assertEqual(sddl, expected)
+
+ def test_modify__user_allowed_to_authenticate_from_device_silo(self):
+ """Test the --user-allowed-to-authenticate-from-device-silo shortcut."""
+ name = self.unique_name()
+
+ # Create a policy to modify for this test.
+ self.addCleanup(self.delete_authentication_policy, name=name, force=True)
+ self.runcmd("domain", "auth", "policy", "create", "--name", name)
+
+ # Modify user allowed to authenticate from silo field
+ result, out, err = self.runcmd("domain", "auth", "policy", "modify",
+ "--name", name,
+ "--user-allowed-to-authenticate-from-device-silo",
+ "QA")
+ self.assertIsNone(result, msg=err)
+
+ # Check generated SDDL.
+ policy = self.get_authentication_policy(name)
+ desc = policy["msDS-UserAllowedToAuthenticateFrom"][0]
+ sddl = ndr_unpack(security.descriptor, desc).as_sddl()
+ self.assertEqual(
+ sddl,
+ 'O:SYG:SYD:(XA;OICI;CR;;;WD;(@USER.ad://ext/AuthenticationSilo == "QA"))')
+
+ def test_modify__user_allowed_to_authenticate_to(self):
+ """Modify authentication policy user allowed to authenticate to."""
+ name = self.unique_name()
+ expected = "O:SYG:SYD:(XA;OICI;CR;;;WD;(Member_of {SID(AO)}))"
+
+ # Create a policy to modify for this test.
+ self.addCleanup(self.delete_authentication_policy, name=name, force=True)
+ self.runcmd("domain", "auth", "policy", "create", "--name", name)
+
+ # Modify user allowed to authenticate to field
+ result, out, err = self.runcmd("domain", "auth", "policy", "modify",
+ "--name", name,
+ "--user-allowed-to-authenticate-to",
+ expected)
+ self.assertIsNone(result, msg=err)
+
+ # Check user allowed to authenticate to field was modified.
+ policy = self.get_authentication_policy(name)
+ self.assertEqual(str(policy["cn"]), name)
+ desc = policy["msDS-UserAllowedToAuthenticateTo"][0]
+ sddl = ndr_unpack(security.descriptor, desc).as_sddl()
+ self.assertEqual(sddl, expected)
+
+ def test_modify__user_allowed_to_authenticate_to_by_group(self):
+ """Tests the --user-allowed-to-authenticate-to-by-group shortcut."""
+ name = self.unique_name()
+ expected = "O:SYG:SYD:(XA;OICI;CR;;;WD;(Member_of_any {SID(%s)}))" % (
+ self.device_group.object_sid)
+
+ # Create a policy to modify for this test.
+ self.addCleanup(self.delete_authentication_policy, name=name, force=True)
+ self.runcmd("domain", "auth", "policy", "create", "--name", name)
+
+ # Modify user allowed to authenticate to field
+ result, out, err = self.runcmd("domain", "auth", "policy", "modify",
+ "--name", name,
+ "--user-allowed-to-authenticate-to-by-group",
+ self.device_group.name)
+ self.assertIsNone(result, msg=err)
+
+ # Check user allowed to authenticate to field was modified.
+ policy = self.get_authentication_policy(name)
+ self.assertEqual(str(policy["cn"]), name)
+ desc = policy["msDS-UserAllowedToAuthenticateTo"][0]
+ sddl = ndr_unpack(security.descriptor, desc).as_sddl()
+ self.assertEqual(sddl, expected)
+
+ def test_modify__user_allowed_to_authenticate_to_by_silo(self):
+ """Tests the --user-allowed-to-authenticate-to-by-silo shortcut."""
+ name = self.unique_name()
+ expected = ('O:SYG:SYD:(XA;OICI;CR;;;WD;(@USER.ad://ext/'
+ 'AuthenticationSilo == "Developers"))')
+
+ # Create a policy to modify for this test.
+ self.addCleanup(self.delete_authentication_policy, name=name, force=True)
+ self.runcmd("domain", "auth", "policy", "create", "--name", name)
+
+ # Modify user allowed to authenticate to field
+ result, out, err = self.runcmd("domain", "auth", "policy", "modify",
+ "--name", name,
+ "--user-allowed-to-authenticate-to-by-silo",
+ "Developers")
+ self.assertIsNone(result, msg=err)
+
+ # Check user allowed to authenticate to field was modified.
+ policy = self.get_authentication_policy(name)
+ self.assertEqual(str(policy["cn"]), name)
+ desc = policy["msDS-UserAllowedToAuthenticateTo"][0]
+ sddl = ndr_unpack(security.descriptor, desc).as_sddl()
+ self.assertEqual(sddl, expected)
+
+ def test_modify__service_allowed_to_authenticate_from(self):
+ """Modify authentication policy service allowed to authenticate from."""
+ name = self.unique_name()
+ expected = "O:SYG:SYD:(XA;OICI;CR;;;WD;(Member_of {SID(AO)}))"
+
+ # Create a policy to modify for this test.
+ self.addCleanup(self.delete_authentication_policy, name=name, force=True)
+ self.runcmd("domain", "auth", "policy", "create", "--name", name)
+
+ # Modify service allowed to authenticate from field
+ result, out, err = self.runcmd("domain", "auth", "policy", "modify",
+ "--name", name,
+ "--service-allowed-to-authenticate-from",
+ expected)
+ self.assertIsNone(result, msg=err)
+
+ # Check service allowed to authenticate from field was modified.
+ policy = self.get_authentication_policy(name)
+ self.assertEqual(str(policy["cn"]), name)
+ desc = policy["msDS-ServiceAllowedToAuthenticateFrom"][0]
+ sddl = ndr_unpack(security.descriptor, desc).as_sddl()
+ self.assertEqual(sddl, expected)
+
+ def test_modify__service_allowed_to_authenticate_from_device_group(self):
+ """Test the --service-allowed-to-authenticate-from-device-group shortcut."""
+ name = self.unique_name()
+ expected = "O:SYG:SYD:(XA;OICI;CR;;;WD;(Member_of_any {SID(%s)}))" % (
+ self.device_group.object_sid)
+
+ # Create a policy to modify for this test.
+ self.addCleanup(self.delete_authentication_policy, name=name, force=True)
+ self.runcmd("domain", "auth", "policy", "create", "--name", name)
+
+ # Modify user allowed to authenticate from silo field
+ result, out, err = self.runcmd("domain", "auth", "policy", "modify",
+ "--name", name,
+ "--service-allowed-to-authenticate-from-device-group",
+ self.device_group.name)
+ self.assertIsNone(result, msg=err)
+
+ # Check generated SDDL.
+ policy = self.get_authentication_policy(name)
+ desc = policy["msDS-ServiceAllowedToAuthenticateFrom"][0]
+ sddl = ndr_unpack(security.descriptor, desc).as_sddl()
+ self.assertEqual(sddl, expected)
+
+ def test_modify__service_allowed_to_authenticate_from_device_silo(self):
+ """Test the --service-allowed-to-authenticate-from-device-silo shortcut."""
+ name = self.unique_name()
+
+ # Create a policy to modify for this test.
+ self.addCleanup(self.delete_authentication_policy, name=name, force=True)
+ self.runcmd("domain", "auth", "policy", "create", "--name", name)
+
+ # Modify user allowed to authenticate from silo field
+ result, out, err = self.runcmd("domain", "auth", "policy", "modify",
+ "--name", name,
+ "--service-allowed-to-authenticate-from-device-silo",
+ "Developers")
+ self.assertIsNone(result, msg=err)
+
+ # Check generated SDDL.
+ policy = self.get_authentication_policy(name)
+ desc = policy["msDS-ServiceAllowedToAuthenticateFrom"][0]
+ sddl = ndr_unpack(security.descriptor, desc).as_sddl()
+ self.assertEqual(
+ sddl,
+ 'O:SYG:SYD:(XA;OICI;CR;;;WD;(@USER.ad://ext/AuthenticationSilo == "Developers"))')
+
+ def test_modify__service_allowed_to_authenticate_to(self):
+ """Modify authentication policy service allowed to authenticate to."""
+ name = self.unique_name()
+ expected = "O:SYG:SYD:(XA;OICI;CR;;;WD;(Member_of {SID(AO)}))"
+
+ # Create a policy to modify for this test.
+ self.addCleanup(self.delete_authentication_policy, name=name, force=True)
+ self.runcmd("domain", "auth", "policy", "create", "--name", name)
+
+ # Modify service allowed to authenticate to field
+ result, out, err = self.runcmd("domain", "auth", "policy", "modify",
+ "--name", name,
+ "--service-allowed-to-authenticate-to",
+ expected)
+ self.assertIsNone(result, msg=err)
+
+ # Check service allowed to authenticate to field was modified.
+ policy = self.get_authentication_policy(name)
+ self.assertEqual(str(policy["cn"]), name)
+ desc = policy["msDS-ServiceAllowedToAuthenticateTo"][0]
+ sddl = ndr_unpack(security.descriptor, desc).as_sddl()
+ self.assertEqual(sddl, expected)
+
+ def test_modify__service_allowed_to_authenticate_to_by_group(self):
+ """Tests the --service-allowed-to-authenticate-to-by-group shortcut."""
+ name = self.unique_name()
+ expected = "O:SYG:SYD:(XA;OICI;CR;;;WD;(Member_of_any {SID(%s)}))" % (
+ self.device_group.object_sid)
+
+ # Create a policy to modify for this test.
+ self.addCleanup(self.delete_authentication_policy, name=name, force=True)
+ self.runcmd("domain", "auth", "policy", "create", "--name", name)
+
+ # Modify user allowed to authenticate to field
+ result, out, err = self.runcmd("domain", "auth", "policy", "modify",
+ "--name", name,
+ "--service-allowed-to-authenticate-to-by-group",
+ self.device_group.name)
+ self.assertIsNone(result, msg=err)
+
+ # Check user allowed to authenticate to field was modified.
+ policy = self.get_authentication_policy(name)
+ self.assertEqual(str(policy["cn"]), name)
+ desc = policy["msDS-ServiceAllowedToAuthenticateTo"][0]
+ sddl = ndr_unpack(security.descriptor, desc).as_sddl()
+ self.assertEqual(sddl, expected)
+
+ def test_modify__service_allowed_to_authenticate_to_by_silo(self):
+ """Tests the --service-allowed-to-authenticate-to-by-silo shortcut."""
+ name = self.unique_name()
+ expected = ('O:SYG:SYD:(XA;OICI;CR;;;WD;(@USER.ad://ext/'
+ 'AuthenticationSilo == "QA"))')
+
+ # Create a policy to modify for this test.
+ self.addCleanup(self.delete_authentication_policy, name=name, force=True)
+ self.runcmd("domain", "auth", "policy", "create", "--name", name)
+
+ # Modify user allowed to authenticate to field
+ result, out, err = self.runcmd("domain", "auth", "policy", "modify",
+ "--name", name,
+ "--service-allowed-to-authenticate-to-by-silo",
+ "QA")
+ self.assertIsNone(result, msg=err)
+
+ # Check user allowed to authenticate to field was modified.
+ policy = self.get_authentication_policy(name)
+ self.assertEqual(str(policy["cn"]), name)
+ desc = policy["msDS-ServiceAllowedToAuthenticateTo"][0]
+ sddl = ndr_unpack(security.descriptor, desc).as_sddl()
+ self.assertEqual(sddl, expected)
+
+ def test_modify__computer_allowed_to_authenticate_to(self):
+ """Modify authentication policy computer allowed to authenticate to."""
+ name = self.unique_name()
+ expected = "O:SYG:SYD:(XA;OICI;CR;;;WD;(Member_of {SID(AO)}))"
+
+ # Create a policy to modify for this test.
+ self.addCleanup(self.delete_authentication_policy, name=name, force=True)
+ self.runcmd("domain", "auth", "policy", "create", "--name", name)
+
+ # Modify computer allowed to authenticate to field
+ result, out, err = self.runcmd("domain", "auth", "policy", "modify",
+ "--name", name,
+ "--computer-allowed-to-authenticate-to",
+ expected)
+ self.assertIsNone(result, msg=err)
+
+ # Check computer allowed to authenticate to field was modified.
+ policy = self.get_authentication_policy(name)
+ self.assertEqual(str(policy["cn"]), name)
+ desc = policy["msDS-ComputerAllowedToAuthenticateTo"][0]
+ sddl = ndr_unpack(security.descriptor, desc).as_sddl()
+ self.assertEqual(sddl, expected)
+
+ def test_modify__computer_allowed_to_authenticate_to_by_group(self):
+ """Tests the --computer-allowed-to-authenticate-to-by-group shortcut."""
+ name = self.unique_name()
+ expected = "O:SYG:SYD:(XA;OICI;CR;;;WD;(Member_of_any {SID(%s)}))" % (
+ self.device_group.object_sid)
+
+ # Create a policy to modify for this test.
+ self.addCleanup(self.delete_authentication_policy, name=name, force=True)
+ self.runcmd("domain", "auth", "policy", "create", "--name", name)
+
+ # Modify user allowed to authenticate to field
+ result, out, err = self.runcmd("domain", "auth", "policy", "modify",
+ "--name", name,
+ "--computer-allowed-to-authenticate-to-by-group",
+ self.device_group.name)
+ self.assertIsNone(result, msg=err)
+
+ # Check user allowed to authenticate to field was modified.
+ policy = self.get_authentication_policy(name)
+ self.assertEqual(str(policy["cn"]), name)
+ desc = policy["msDS-ComputerAllowedToAuthenticateTo"][0]
+ sddl = ndr_unpack(security.descriptor, desc).as_sddl()
+ self.assertEqual(sddl, expected)
+
+ def test_modify__computer_allowed_to_authenticate_to_by_silo(self):
+ """Tests the --computer-allowed-to-authenticate-to-by-silo shortcut."""
+ name = self.unique_name()
+ expected = ('O:SYG:SYD:(XA;OICI;CR;;;WD;(@USER.ad://ext/'
+ 'AuthenticationSilo == "QA"))')
+
+ # Create a policy to modify for this test.
+ self.addCleanup(self.delete_authentication_policy, name=name, force=True)
+ self.runcmd("domain", "auth", "policy", "create", "--name", name)
+
+ # Modify user allowed to authenticate to field
+ result, out, err = self.runcmd("domain", "auth", "policy", "modify",
+ "--name", name,
+ "--computer-allowed-to-authenticate-to-by-silo",
+ "QA")
+ self.assertIsNone(result, msg=err)
+
+ # Check user allowed to authenticate to field was modified.
+ policy = self.get_authentication_policy(name)
+ self.assertEqual(str(policy["cn"]), name)
+ desc = policy["msDS-ComputerAllowedToAuthenticateTo"][0]
+ sddl = ndr_unpack(security.descriptor, desc).as_sddl()
+ self.assertEqual(sddl, expected)
+
+ def test_modify__name_missing(self):
+ """Test modify authentication but the --name argument is missing."""
+ result, out, err = self.runcmd("domain", "auth", "policy", "modify",
+ "--description", "NewDescription")
+ self.assertEqual(result, -1)
+ self.assertIn("Argument --name is required.", err)
+
+ def test_modify__notfound(self):
+ """Test modify an authentication silo that doesn't exist."""
+ result, out, err = self.runcmd("domain", "auth", "policy", "modify",
+ "--name", "doesNotExist",
+ "--description", "NewDescription")
+ self.assertEqual(result, -1)
+ self.assertIn("Authentication policy doesNotExist not found.", err)
+
+ def test_modify__audit_enforce(self):
+ """Test modify authentication policy using --audit and --enforce."""
+ name = self.unique_name()
+
+ # Create a policy to modify for this test.
+ self.addCleanup(self.delete_authentication_policy,
+ name=name, force=True)
+ self.runcmd("domain", "auth", "policy", "create", "--name", name)
+
+ # Change to audit, the default is --enforce.
+ result, out, err = self.runcmd("domain", "auth", "policy", "modify",
+ "--name", name,
+ "--audit")
+ self.assertIsNone(result, msg=err)
+
+ # Check that the policy was changed to --audit.
+ policy = self.get_authentication_policy(name)
+ self.assertEqual(str(policy["msDS-AuthNPolicyEnforced"]), "FALSE")
+
+ result, out, err = self.runcmd("domain", "auth", "policy", "modify",
+ "--name", name,
+ "--enforce")
+ self.assertIsNone(result, msg=err)
+
+ # Check if the policy was changed back to --enforce.
+ policy = self.get_authentication_policy(name)
+ self.assertEqual(str(policy["msDS-AuthNPolicyEnforced"]), "TRUE")
+
+ def test_modify__protect_unprotect(self):
+ """Test modify authentication policy using --protect and --unprotect."""
+ name = self.unique_name()
+
+ # Create a policy to modify for this test.
+ self.addCleanup(self.delete_authentication_policy, name=name, force=True)
+ self.runcmd("domain", "auth", "policy", "create", "--name", name)
+
+ utils = SDUtils(self.samdb)
+ result, out, err = self.runcmd("domain", "auth", "policy", "modify",
+ "--name", name,
+ "--protect")
+ self.assertIsNone(result, msg=err)
+
+ # Check that claim type was protected.
+ policy = self.get_authentication_policy(name)
+ desc = utils.get_sd_as_sddl(policy["dn"])
+ self.assertIn("(D;;DTSD;;;WD)", desc)
+
+ result, out, err = self.runcmd("domain", "auth", "policy", "modify",
+ "--name", name,
+ "--unprotect")
+ self.assertIsNone(result, msg=err)
+
+ # Check that claim type was unprotected.
+ policy = self.get_authentication_policy(name)
+ desc = utils.get_sd_as_sddl(policy["dn"])
+ self.assertNotIn("(D;;DTSD;;;WD)", desc)
+
+ def test_modify__audit_enforce_together(self):
+ """Test modify auth policy using both --audit and --enforce."""
+ result, out, err = self.runcmd("domain", "auth", "policy", "modify",
+ "--name", "User Policy",
+ "--audit", "--enforce")
+ self.assertEqual(result, -1)
+ self.assertIn("--audit and --enforce cannot be used together.", err)
+
+ def test_modify__protect_unprotect_together(self):
+ """Test modify authentication policy using --protect and --unprotect."""
+ result, out, err = self.runcmd("domain", "auth", "policy", "modify",
+ "--name", "User Policy",
+ "--protect", "--unprotect")
+ self.assertEqual(result, -1)
+ self.assertIn("--protect and --unprotect cannot be used together.", err)
+
+ def test_modify__fails(self):
+ """Test modifying an authentication policy, but it fails."""
+ # Raise ModelError when ldb.add() is called.
+ with patch.object(SamDB, "modify") as modify_mock:
+ modify_mock.side_effect = ModelError("Custom error message")
+ result, out, err = self.runcmd("domain", "auth", "policy", "modify",
+ "--name", "User Policy",
+ "--description", "New description")
+ self.assertEqual(result, -1)
+ self.assertIn("Custom error message", err)
+
+ def test_delete__success(self):
+ """Test deleting an authentication policy that is not protected."""
+ # Create non-protected authentication policy.
+ result, out, err = self.runcmd("domain", "auth", "policy", "create",
+ "--name=deleteTest")
+ self.assertIsNone(result, msg=err)
+ policy = self.get_authentication_policy("deleteTest")
+ self.assertIsNotNone(policy)
+
+ # Do the deletion.
+ result, out, err = self.runcmd("domain", "auth", "policy", "delete",
+ "--name", "deleteTest")
+ self.assertIsNone(result, msg=err)
+
+ # Authentication policy shouldn't exist anymore.
+ policy = self.get_authentication_policy("deleteTest")
+ self.assertIsNone(policy)
+
+ def test_delete__protected(self):
+ """Test deleting a protected auth policy, with and without --force."""
+ # Create protected authentication policy.
+ result, out, err = self.runcmd("domain", "auth", "policy", "create",
+ "--name=deleteProtected",
+ "--protect")
+ self.assertIsNone(result, msg=err)
+ policy = self.get_authentication_policy("deleteProtected")
+ self.assertIsNotNone(policy)
+
+ # Do the deletion.
+ result, out, err = self.runcmd("domain", "auth", "policy", "delete",
+ "--name=deleteProtected")
+ self.assertEqual(result, -1)
+
+ # Authentication silo should still exist.
+ policy = self.get_authentication_policy("deleteProtected")
+ self.assertIsNotNone(policy)
+
+ # Try a force delete instead.
+ result, out, err = self.runcmd("domain", "auth", "policy", "delete",
+ "--name=deleteProtected", "--force")
+ self.assertIsNone(result, msg=err)
+
+ # Authentication silo shouldn't exist anymore.
+ policy = self.get_authentication_policy("deleteProtected")
+ self.assertIsNone(policy)
+
+ def test_delete__notfound(self):
+ """Test deleting an authentication policy that doesn't exist."""
+ result, out, err = self.runcmd("domain", "auth", "policy", "delete",
+ "--name", "doesNotExist")
+ self.assertEqual(result, -1)
+ self.assertIn("Authentication policy doesNotExist not found.", err)
+
+ def test_delete__name_required(self):
+ """Test deleting an authentication policy without --name argument."""
+ result, out, err = self.runcmd("domain", "auth", "policy", "delete")
+ self.assertEqual(result, -1)
+ self.assertIn("Argument --name is required.", err)
+
+ def test_delete__force_fails(self):
+ """Test deleting an authentication policy with --force, but it fails."""
+ name = self.unique_name()
+
+ # Create protected authentication policy.
+ self.addCleanup(self.delete_authentication_policy, name=name, force=True)
+ result, out, err = self.runcmd("domain", "auth", "policy", "create",
+ "--name", name,
+ "--protect")
+ self.assertIsNone(result, msg=err)
+
+ # Policy exists
+ policy = self.get_authentication_policy(name)
+ self.assertIsNotNone(policy)
+
+ # Try doing delete with --force.
+ # Patch SDUtils.dacl_delete_aces with a Mock that raises ModelError.
+ with patch.object(SDUtils, "dacl_delete_aces") as delete_mock:
+ delete_mock.side_effect = ModelError("Custom error message")
+ result, out, err = self.runcmd("domain", "auth", "policy", "delete",
+ "--name", name,
+ "--force")
+ self.assertEqual(result, -1)
+ self.assertIn("Custom error message", err)
+
+ def test_delete__fails(self):
+ """Test deleting an authentication policy, but it fails."""
+ name = self.unique_name()
+
+ # Create regular authentication policy.
+ self.addCleanup(self.delete_authentication_policy, name=name, force=True)
+ result, out, err = self.runcmd("domain", "auth", "policy", "create",
+ "--name", name)
+ self.assertIsNone(result, msg=err)
+
+ # Policy exists
+ policy = self.get_authentication_policy(name)
+ self.assertIsNotNone(policy)
+
+ # Raise ModelError when ldb.delete() is called.
+ with patch.object(SamDB, "delete") as delete_mock:
+ delete_mock.side_effect = ModelError("Custom error message")
+ result, out, err = self.runcmd("domain", "auth", "policy", "delete",
+ "--name", name)
+ self.assertEqual(result, -1)
+ self.assertIn("Custom error message", err)
+
+ # When not using --force we get a hint.
+ self.assertIn("Try --force", err)
+
+ def test_delete__protected_fails(self):
+ """Test deleting an authentication policy, but it fails."""
+ name = self.unique_name()
+
+ # Create protected authentication policy.
+ self.addCleanup(self.delete_authentication_policy, name=name, force=True)
+ result, out, err = self.runcmd("domain", "auth", "policy", "create",
+ "--name", name,
+ "--protect")
+ self.assertIsNone(result, msg=err)
+
+ # Policy exists
+ policy = self.get_authentication_policy(name)
+ self.assertIsNotNone(policy)
+
+ # Raise ModelError when ldb.delete() is called.
+ with patch.object(SamDB, "delete") as delete_mock:
+ delete_mock.side_effect = ModelError("Custom error message")
+ result, out, err = self.runcmd("domain", "auth", "policy", "delete",
+ "--name", name,
+ "--force")
+ self.assertEqual(result, -1)
+ self.assertIn("Custom error message", err)
+
+ # When using --force we don't get the hint.
+ self.assertNotIn("Try --force", err)
diff --git a/python/samba/tests/samba_tool/domain_auth_silo.py b/python/samba/tests/samba_tool/domain_auth_silo.py
new file mode 100644
index 0000000..a1cd85c
--- /dev/null
+++ b/python/samba/tests/samba_tool/domain_auth_silo.py
@@ -0,0 +1,618 @@
+# Unix SMB/CIFS implementation.
+#
+# Tests for samba-tool domain auth silo command
+#
+# Copyright (C) Catalyst.Net Ltd. 2023
+#
+# Written by Rob van der Linde <rob@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import json
+from unittest.mock import patch
+
+from samba.netcmd.domain.models.exceptions import ModelError
+from samba.samdb import SamDB
+from samba.sd_utils import SDUtils
+
+from .silo_base import SiloTest
+
+
+class AuthSiloCmdTestCase(SiloTest):
+
+ def test_list(self):
+ """Test listing authentication silos in list format."""
+ result, out, err = self.runcmd("domain", "auth", "silo", "list")
+ self.assertIsNone(result, msg=err)
+
+ expected_silos = ["Developers", "Managers", "QA"]
+
+ for silo in expected_silos:
+ self.assertIn(silo, out)
+
+ def test_list___json(self):
+ """Test listing authentication silos in JSON format."""
+ result, out, err = self.runcmd("domain", "auth", "silo",
+ "list", "--json")
+ self.assertIsNone(result, msg=err)
+
+ # we should get valid json
+ silos = json.loads(out)
+
+ expected_silos = ["Developers", "Managers", "QA"]
+
+ for name in expected_silos:
+ silo = silos[name]
+ self.assertIn("msDS-AuthNPolicySilo", list(silo["objectClass"]))
+ self.assertIn("description", silo)
+ self.assertIn("msDS-UserAuthNPolicy", silo)
+ self.assertIn("objectGUID", silo)
+
+ def test_view(self):
+ """Test viewing a single authentication silo."""
+ result, out, err = self.runcmd("domain", "auth", "silo", "view",
+ "--name", "Developers")
+ self.assertIsNone(result, msg=err)
+
+ # we should get valid json
+ silo = json.loads(out)
+
+ # check a few fields only
+ self.assertEqual(silo["cn"], "Developers")
+ self.assertEqual(silo["description"],
+ "Developers, Developers, Developers!")
+
+ def test_view__notfound(self):
+ """Test viewing an authentication silo that doesn't exist."""
+ result, out, err = self.runcmd("domain", "auth", "silo", "view",
+ "--name", "doesNotExist")
+ self.assertEqual(result, -1)
+ self.assertIn("Authentication silo doesNotExist not found.", err)
+
+ def test_view__name_required(self):
+ """Test view authentication silo without --name argument."""
+ result, out, err = self.runcmd("domain", "auth", "silo", "view")
+ self.assertEqual(result, -1)
+ self.assertIn("Argument --name is required.", err)
+
+ def test_create__single_policy(self):
+ """Test creating a new authentication silo with a single policy."""
+ name = self.unique_name()
+
+ self.addCleanup(self.delete_authentication_silo, name=name, force=True)
+ result, out, err = self.runcmd("domain", "auth", "silo", "create",
+ "--name", name,
+ "--user-authentication-policy", "User Policy")
+ self.assertIsNone(result, msg=err)
+
+ # Check silo that was created
+ silo = self.get_authentication_silo(name)
+ self.assertEqual(str(silo["cn"]), name)
+ self.assertIn("User Policy", str(silo["msDS-UserAuthNPolicy"]))
+ self.assertEqual(str(silo["msDS-AuthNPolicySiloEnforced"]), "TRUE")
+
+ def test_create__multiple_policies(self):
+ """Test creating a new authentication silo with multiple policies."""
+ name = self.unique_name()
+
+ self.addCleanup(self.delete_authentication_silo, name=name, force=True)
+ result, out, err = self.runcmd("domain", "auth", "silo", "create",
+ "--name", name,
+ "--user-authentication-policy",
+ "User Policy",
+ "--service-authentication-policy",
+ "Service Policy",
+ "--computer-authentication-policy",
+ "Computer Policy")
+ self.assertIsNone(result, msg=err)
+
+ # Check silo that was created.
+ silo = self.get_authentication_silo(name)
+ self.assertEqual(str(silo["cn"]), name)
+ self.assertIn("User Policy", str(silo["msDS-UserAuthNPolicy"]))
+ self.assertIn("Service Policy", str(silo["msDS-ServiceAuthNPolicy"]))
+ self.assertIn("Computer Policy", str(silo["msDS-ComputerAuthNPolicy"]))
+ self.assertEqual(str(silo["msDS-AuthNPolicySiloEnforced"]), "TRUE")
+
+ def test_create__policy_dn(self):
+ """Test creating a new authentication silo when policy is a dn."""
+ name = self.unique_name()
+ policy = self.get_authentication_policy("User Policy")
+
+ self.addCleanup(self.delete_authentication_silo, name=name, force=True)
+ result, out, err = self.runcmd("domain", "auth", "silo", "create",
+ "--name", name,
+ "--user-authentication-policy", policy["dn"])
+ self.assertIsNone(result, msg=err)
+
+ # Check silo that was created
+ silo = self.get_authentication_silo(name)
+ self.assertEqual(str(silo["cn"]), name)
+ self.assertIn(str(policy["name"]), str(silo["msDS-UserAuthNPolicy"]))
+ self.assertEqual(str(silo["msDS-AuthNPolicySiloEnforced"]), "TRUE")
+
+ def test_create__already_exists(self):
+ """Test creating a new authentication silo that already exists."""
+ result, out, err = self.runcmd("domain", "auth", "silo", "create",
+ "--name", "Developers",
+ "--user-authentication-policy", "User Policy")
+ self.assertEqual(result, -1)
+ self.assertIn("Authentication silo Developers already exists.", err)
+
+ def test_create__name_missing(self):
+ """Test create authentication silo without --name argument."""
+ result, out, err = self.runcmd("domain", "auth", "silo", "create",
+ "--user-authentication-policy", "User Policy")
+ self.assertEqual(result, -1)
+ self.assertIn("Argument --name is required.", err)
+
+ def test_create__audit(self):
+ """Test create authentication silo with --audit flag."""
+ name = self.unique_name()
+
+ self.addCleanup(self.delete_authentication_silo, name=name, force=True)
+ result, out, err = self.runcmd("domain", "auth", "silo", "create",
+ "--name", "auditPolicies",
+ "--user-authentication-policy", "User Policy",
+ "--name", name,
+ "--user-authentication-policy", "User Policy",
+ "--audit")
+ self.assertIsNone(result, msg=err)
+
+ # fetch and check silo
+ silo = self.get_authentication_silo(name)
+ self.assertEqual(str(silo["msDS-AuthNPolicySiloEnforced"]), "FALSE")
+
+ def test_create__enforce(self):
+ """Test create authentication silo with --enforce flag."""
+ name = self.unique_name()
+
+ self.addCleanup(self.delete_authentication_silo, name=name, force=True)
+ result, out, err = self.runcmd("domain", "auth", "silo", "create",
+ "--name", name,
+ "--user-authentication-policy", "User Policy",
+ "--enforce")
+ self.assertIsNone(result, msg=err)
+
+ # fetch and check silo
+ silo = self.get_authentication_silo(name)
+ self.assertEqual(str(silo["msDS-AuthNPolicySiloEnforced"]), "TRUE")
+
+ def test_create__audit_enforce_together(self):
+ """Test create authentication silo using both --audit and --enforce."""
+ name = self.unique_name()
+
+ result, out, err = self.runcmd("domain", "auth", "silo", "create",
+ "--name", name,
+ "--user-authentication-policy", "User Policy",
+ "--audit", "--enforce")
+
+ self.assertEqual(result, -1)
+ self.assertIn("--audit and --enforce cannot be used together.", err)
+
+ def test_create__protect_unprotect_together(self):
+ """Test create authentication silo using --protect and --unprotect."""
+ name = self.unique_name()
+
+ result, out, err = self.runcmd("domain", "auth", "silo", "create",
+ "--name", name,
+ "--user-authentication-policy", "User Policy",
+ "--protect", "--unprotect")
+
+ self.assertEqual(result, -1)
+ self.assertIn("--protect and --unprotect cannot be used together.", err)
+
+ def test_create__policy_notfound(self):
+ """Test create authentication silo with a policy that doesn't exist."""
+ name = self.unique_name()
+
+ result, out, err = self.runcmd("domain", "auth", "silo", "create",
+ "--name", name,
+ "--user-authentication-policy", "Invalid Policy")
+
+ self.assertEqual(result, -1)
+ self.assertIn("Authentication policy Invalid Policy not found.", err)
+
+ def test_create__fails(self):
+ """Test creating an authentication silo, but it fails."""
+ name = self.unique_name()
+
+ # Raise ModelError when ldb.add() is called.
+ with patch.object(SamDB, "add") as add_mock:
+ add_mock.side_effect = ModelError("Custom error message")
+ result, out, err = self.runcmd("domain", "auth", "silo", "create",
+ "--name", name,
+ "--user-authentication-policy", "User Policy")
+ self.assertEqual(result, -1)
+ self.assertIn("Custom error message", err)
+
+ def test_modify__description(self):
+ """Test modify authentication silo changing the description field."""
+ name = self.unique_name()
+
+ # Create a silo to modify for this test.
+ self.addCleanup(self.delete_authentication_silo, name=name, force=True)
+ self.runcmd("domain", "auth", "silo", "create", "--name", name)
+
+ result, out, err = self.runcmd("domain", "auth", "silo", "modify",
+ "--name", name,
+ "--description", "New Description")
+ self.assertIsNone(result, msg=err)
+
+ # check new value
+ silo = self.get_authentication_silo(name)
+ self.assertEqual(str(silo["description"]), "New Description")
+
+ def test_modify__audit_enforce(self):
+ """Test modify authentication silo setting --audit and --enforce."""
+ name = self.unique_name()
+
+ # Create a silo to modify for this test.
+ self.addCleanup(self.delete_authentication_silo, name=name, force=True)
+ self.runcmd("domain", "auth", "silo", "create", "--name", name)
+
+ result, out, err = self.runcmd("domain", "auth", "silo", "modify",
+ "--name", name,
+ "--audit")
+ self.assertIsNone(result, msg=err)
+
+ # Check silo is set to audit.
+ silo = self.get_authentication_silo(name)
+ self.assertEqual(str(silo["msDS-AuthNPolicySiloEnforced"]), "FALSE")
+
+ result, out, err = self.runcmd("domain", "auth", "silo", "modify",
+ "--name", name,
+ "--enforce")
+ self.assertIsNone(result, msg=err)
+
+ # Check is set to enforce.
+ silo = self.get_authentication_silo(name)
+ self.assertEqual(str(silo["msDS-AuthNPolicySiloEnforced"]), "TRUE")
+
+ def test_modify__protect_unprotect(self):
+ """Test modify un-protecting and protecting an authentication silo."""
+ name = self.unique_name()
+
+ # Create a silo to modify for this test.
+ self.addCleanup(self.delete_authentication_silo, name=name, force=True)
+ self.runcmd("domain", "auth", "silo", "create", "--name", name)
+
+ utils = SDUtils(self.samdb)
+ result, out, err = self.runcmd("domain", "auth", "silo", "modify",
+ "--name", name,
+ "--protect")
+ self.assertIsNone(result, msg=err)
+
+ # Check that silo was protected.
+ silo = self.get_authentication_silo(name)
+ desc = utils.get_sd_as_sddl(silo["dn"])
+ self.assertIn("(D;;DTSD;;;WD)", desc)
+
+ result, out, err = self.runcmd("domain", "auth", "silo", "modify",
+ "--name", name,
+ "--unprotect")
+ self.assertIsNone(result, msg=err)
+
+ # Check that silo was unprotected.
+ silo = self.get_authentication_silo(name)
+ desc = utils.get_sd_as_sddl(silo["dn"])
+ self.assertNotIn("(D;;DTSD;;;WD)", desc)
+
+ def test_modify__audit_enforce_together(self):
+ """Test modify silo doesn't allow both --audit and --enforce."""
+ result, out, err = self.runcmd("domain", "auth", "silo", "modify",
+ "--name", "QA",
+ "--audit", "--enforce")
+
+ self.assertEqual(result, -1)
+ self.assertIn("--audit and --enforce cannot be used together.", err)
+
+ def test_modify__protect_unprotect_together(self):
+ """Test modify silo using both --protect and --unprotect."""
+ result, out, err = self.runcmd("domain", "auth", "silo", "modify",
+ "--name", "Developers",
+ "--protect", "--unprotect")
+ self.assertEqual(result, -1)
+ self.assertIn("--protect and --unprotect cannot be used together.", err)
+
+ def test_modify__notfound(self):
+ """Test modify an authentication silo that doesn't exist."""
+ result, out, err = self.runcmd("domain", "auth", "silo", "modify",
+ "--name", "doesNotExist",
+ "--description=NewDescription")
+ self.assertEqual(result, -1)
+ self.assertIn("Authentication silo doesNotExist not found.", err)
+
+ def test_modify__name_missing(self):
+ """Test modify authentication silo without --name argument."""
+ result, out, err = self.runcmd("domain", "auth", "silo", "modify")
+ self.assertEqual(result, -1)
+ self.assertIn("Argument --name is required.", err)
+
+ def test_modify__fails(self):
+ """Test modify authentication silo, but it fails."""
+ # Raise ModelError when ldb.modify() is called.
+ with patch.object(SamDB, "modify") as add_mock:
+ add_mock.side_effect = ModelError("Custom error message")
+ result, out, err = self.runcmd("domain", "auth", "silo", "modify",
+ "--name", "Developers",
+ "--description", "Devs")
+ self.assertEqual(result, -1)
+ self.assertIn("Custom error message", err)
+
+ def test_authentication_silo_delete(self):
+ """Test deleting an authentication silo that is not protected."""
+ name = self.unique_name()
+
+ # Create non-protected authentication silo.
+ result, out, err = self.runcmd("domain", "auth", "silo", "create",
+ "--name", name,
+ "--user-authentication-policy", "User Policy")
+ self.assertIsNone(result, msg=err)
+ silo = self.get_authentication_silo(name)
+ self.assertIsNotNone(silo)
+
+ # Do the deletion.
+ result, out, err = self.runcmd("domain", "auth", "silo", "delete",
+ "--name", name)
+ self.assertIsNone(result, msg=err)
+
+ # Authentication silo shouldn't exist anymore.
+ silo = self.get_authentication_silo(name)
+ self.assertIsNone(silo)
+
+ def test_delete__protected(self):
+ """Test deleting a protected auth silo, with and without --force."""
+ name = self.unique_name()
+
+ # Create protected authentication silo.
+ result, out, err = self.runcmd("domain", "auth", "silo", "create",
+ "--name", name,
+ "--user-authentication-policy", "User Policy",
+ "--protect")
+ self.assertIsNone(result, msg=err)
+ silo = self.get_authentication_silo(name)
+ self.assertIsNotNone(silo)
+
+ # Do the deletion.
+ result, out, err = self.runcmd("domain", "auth", "silo", "delete",
+ "--name", name)
+ self.assertEqual(result, -1)
+
+ # Authentication silo should still exist.
+ silo = self.get_authentication_silo(name)
+ self.assertIsNotNone(silo)
+
+ # Try a force delete instead.
+ result, out, err = self.runcmd("domain", "auth", "silo", "delete",
+ "--name", name, "--force")
+ self.assertIsNone(result, msg=err)
+
+ # Authentication silo shouldn't exist anymore.
+ silo = self.get_authentication_silo(name)
+ self.assertIsNone(silo)
+
+ def test_delete__notfound(self):
+ """Test deleting an authentication silo that doesn't exist."""
+ result, out, err = self.runcmd("domain", "auth", "silo", "delete",
+ "--name", "doesNotExist")
+ self.assertEqual(result, -1)
+ self.assertIn("Authentication silo doesNotExist not found.", err)
+
+ def test_delete__name_required(self):
+ """Test deleting an authentication silo without --name argument."""
+ result, out, err = self.runcmd("domain", "auth", "silo", "delete")
+ self.assertEqual(result, -1)
+ self.assertIn("Argument --name is required.", err)
+
+ def test_delete__force_fails(self):
+ """Test deleting an authentication silo with --force, but it fails."""
+ name = self.unique_name()
+
+ # Create protected authentication silo.
+ self.addCleanup(self.delete_authentication_silo, name=name, force=True)
+ result, out, err = self.runcmd("domain", "auth", "silo", "create",
+ "--name", name,
+ "--user-authentication-policy", "User Policy",
+ "--protect")
+ self.assertIsNone(result, msg=err)
+
+ # Silo exists
+ silo = self.get_authentication_silo(name)
+ self.assertIsNotNone(silo)
+
+ # Try doing delete with --force.
+ # Patch SDUtils.dacl_delete_aces with a Mock that raises ModelError.
+ with patch.object(SDUtils, "dacl_delete_aces") as delete_mock:
+ delete_mock.side_effect = ModelError("Custom error message")
+ result, out, err = self.runcmd("domain", "auth", "silo", "delete",
+ "--name", name,
+ "--force")
+ self.assertEqual(result, -1)
+ self.assertIn("Custom error message", err)
+
+ def test_delete__fails(self):
+ """Test deleting an authentication silo, but it fails."""
+ name = self.unique_name()
+
+ # Create regular authentication silo.
+ self.addCleanup(self.delete_authentication_silo, name=name, force=True)
+ result, out, err = self.runcmd("domain", "auth", "silo", "create",
+ "--name", name,
+ "--user-authentication-policy", "User Policy")
+ self.assertIsNone(result, msg=err)
+
+ # Silo exists
+ silo = self.get_authentication_silo(name)
+ self.assertIsNotNone(silo)
+
+ # Raise ModelError when ldb.delete() is called.
+ with patch.object(SamDB, "delete") as delete_mock:
+ delete_mock.side_effect = ModelError("Custom error message")
+ result, out, err = self.runcmd("domain", "auth", "silo", "delete",
+ "--name", name)
+ self.assertEqual(result, -1)
+ self.assertIn("Custom error message", err)
+
+ # When not using --force we get a hint.
+ self.assertIn("Try --force", err)
+
+ def test_delete__protected_fails(self):
+ """Test deleting an authentication silo, but it fails."""
+ name = self.unique_name()
+
+ # Create protected authentication silo.
+ self.addCleanup(self.delete_authentication_silo, name=name, force=True)
+ result, out, err = self.runcmd("domain", "auth", "silo", "create",
+ "--name", name,
+ "--user-authentication-policy", "User Policy",
+ "--protect")
+ self.assertIsNone(result, msg=err)
+
+ # Silo exists
+ silo = self.get_authentication_silo(name)
+ self.assertIsNotNone(silo)
+
+ # Raise ModelError when ldb.delete() is called.
+ with patch.object(SamDB, "delete") as delete_mock:
+ delete_mock.side_effect = ModelError("Custom error message")
+ result, out, err = self.runcmd("domain", "auth", "silo", "delete",
+ "--name", name,
+ "--force")
+ self.assertEqual(result, -1)
+ self.assertIn("Custom error message", err)
+
+ # When using --force we don't get the hint.
+ self.assertNotIn("Try --force", err)
+
+
+class AuthSiloMemberCmdTestCase(SiloTest):
+
+ def setUp(self):
+ super().setUp()
+
+ # Create an organisational unit to test in.
+ self.ou = self.samdb.get_default_basedn()
+ self.ou.add_child("OU=Domain Auth Tests")
+ self.samdb.create_ou(self.ou)
+ self.addCleanup(self.samdb.delete, self.ou, ["tree_delete:1"])
+
+ # Grant member access to silos
+ self.grant_silo_access("Developers", "bob")
+ self.grant_silo_access("Developers", "jane")
+ self.grant_silo_access("Managers", "alice")
+
+ def create_computer(self, name):
+ """Create a Computer and return the dn."""
+ dn = f"CN={name},{self.ou}"
+ self.samdb.newcomputer(name, self.ou)
+ return dn
+
+ def grant_silo_access(self, silo, member):
+ """Grant a member access to an authentication silo."""
+ result, out, err = self.runcmd("domain", "auth", "silo",
+ "member", "grant",
+ "--name", silo, "--member", member)
+
+ self.assertIsNone(result, msg=err)
+ self.assertIn(
+ f"User {member} granted access to the authentication silo {silo}",
+ out)
+ self.addCleanup(self.revoke_silo_access, silo, member)
+
+ def revoke_silo_access(self, silo, member):
+ """Revoke a member from an authentication silo."""
+ result, out, err = self.runcmd("domain", "auth", "silo",
+ "member", "revoke",
+ "--name", silo, "--member", member)
+
+ self.assertIsNone(result, msg=err)
+
+ def test_member_list(self):
+ """Test listing authentication policy members in list format."""
+ alice = self.get_user("alice")
+ jane = self.get_user("jane")
+ bob = self.get_user("bob")
+
+ result, out, err = self.runcmd("domain", "auth", "silo",
+ "member", "list",
+ "--name", "Developers")
+
+ self.assertIsNone(result, msg=err)
+ self.assertIn(str(bob.dn), out)
+ self.assertIn(str(jane.dn), out)
+ self.assertNotIn(str(alice.dn), out)
+
+ def test_member_list___json(self):
+ """Test listing authentication policy members list in json format."""
+ alice = self.get_user("alice")
+ jane = self.get_user("jane")
+ bob = self.get_user("bob")
+
+ result, out, err = self.runcmd("domain", "auth", "silo",
+ "member", "list",
+ "--name", "Developers", "--json")
+
+ self.assertIsNone(result, msg=err)
+ members = json.loads(out)
+ members_dn = [member["dn"] for member in members]
+ self.assertIn(str(bob.dn), members_dn)
+ self.assertIn(str(jane.dn), members_dn)
+ self.assertNotIn(str(alice.dn), members_dn)
+
+ def test_member_list__name_missing(self):
+ """Test list authentication policy members without the name argument."""
+ result, out, err = self.runcmd("domain", "auth", "silo",
+ "member", "list")
+
+ self.assertIsNotNone(result)
+ self.assertIn("Argument --name is required.", err)
+
+ def test_member_grant__user(self):
+ """Test adding a user to an authentication silo."""
+ self.grant_silo_access("Developers", "joe")
+
+ # Check if member is in silo
+ user = self.get_user("joe")
+ silo = self.get_authentication_silo("Developers")
+ members = [str(member) for member in silo["msDS-AuthNPolicySiloMembers"]]
+ self.assertIn(str(user.dn), members)
+
+ def test_member_grant__computer(self):
+ """Test adding a computer to an authentication silo"""
+ name = self.unique_name()
+ computer = self.create_computer(name)
+ silo = "Developers"
+
+ # Don't use self.grant_silo_member as it will try to clean up the user.
+ result, out, err = self.runcmd("domain", "auth", "silo",
+ "member", "grant",
+ "--name", silo,
+ "--member", computer)
+
+ self.assertIsNone(result, msg=err)
+ self.assertIn(
+ f"User {name}$ granted access to the authentication silo {silo} (unassigned).",
+ out)
+
+ def test_member_grant__unknown_user(self):
+ """Test adding an unknown user to an authentication silo."""
+ result, out, err = self.runcmd("domain", "auth", "silo",
+ "member", "grant",
+ "--name", "Developers",
+ "--member", "does_not_exist")
+
+ self.assertIsNotNone(result)
+ self.assertIn("User does_not_exist not found.", err)
diff --git a/python/samba/tests/samba_tool/domain_claim.py b/python/samba/tests/samba_tool/domain_claim.py
new file mode 100644
index 0000000..96caacd
--- /dev/null
+++ b/python/samba/tests/samba_tool/domain_claim.py
@@ -0,0 +1,608 @@
+# Unix SMB/CIFS implementation.
+#
+# Tests for samba-tool domain claim management
+#
+# Copyright (C) Catalyst.Net Ltd. 2023
+#
+# Written by Rob van der Linde <rob@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import json
+import os
+
+from ldb import SCOPE_ONELEVEL
+from samba.sd_utils import SDUtils
+
+from .base import SambaToolCmdTest
+
+# List of claim value types we should expect to see.
+VALUE_TYPES = [
+ "Date Time",
+ "Multi-valued Choice",
+ "Multi-valued Text",
+ "Number",
+ "Ordered List",
+ "Single-valued Choice",
+ "Text",
+ "Yes/No"
+]
+
+HOST = "ldap://{DC_SERVER}".format(**os.environ)
+CREDS = "-U{DC_USERNAME}%{DC_PASSWORD}".format(**os.environ)
+
+
+class BaseClaimCmdTest(SambaToolCmdTest):
+ """Base class for claim types and claim value types tests."""
+
+ @classmethod
+ def setUpClass(cls):
+ cls.samdb = cls.getSamDB("-H", HOST, CREDS)
+ super().setUpClass()
+
+ @classmethod
+ def setUpTestData(cls):
+ cls.create_claim_type("accountExpires", name="expires",
+ classes=["user"])
+ cls.create_claim_type("department", name="dept", classes=["user"],
+ protect=True)
+ cls.create_claim_type("carLicense", name="plate", classes=["user"],
+ disable=True)
+
+ def get_services_dn(self):
+ """Returns Services DN."""
+ services_dn = self.samdb.get_config_basedn()
+ services_dn.add_child("CN=Services")
+ return services_dn
+
+ def get_claim_types_dn(self):
+ """Returns the Claim Types DN."""
+ claim_types_dn = self.get_services_dn()
+ claim_types_dn.add_child("CN=Claim Types,CN=Claims Configuration")
+ return claim_types_dn
+
+ @classmethod
+ def _run(cls, *argv):
+ """Override _run, so we don't always have to pass host and creds."""
+ args = list(argv)
+ args.extend(["-H", HOST, CREDS])
+ return super()._run(*args)
+
+ runcmd = _run
+ runsubcmd = _run
+
+ @classmethod
+ def create_claim_type(cls, attribute, name=None, description=None,
+ classes=None, disable=False, protect=False):
+ """Create a claim type using the samba-tool command."""
+
+ # if name is specified it will override the attribute name
+ display_name = name or attribute
+
+ # base command for create claim-type
+ cmd = ["domain", "claim", "claim-type",
+ "create", "--attribute", attribute]
+
+ # list of classes (applies_to)
+ if classes is not None:
+ cmd.extend([f"--class={name}" for name in classes])
+
+ # optional attributes
+ if name is not None:
+ cmd.append(f"--name={name}")
+ if description is not None:
+ cmd.append(f"--description={description}")
+ if disable:
+ cmd.append("--disable")
+ if protect:
+ cmd.append("--protect")
+
+ result, out, err = cls.runcmd(*cmd)
+ assert result is None
+ assert out.startswith("Created claim type")
+ cls.addClassCleanup(cls.delete_claim_type, name=display_name, force=True)
+ return display_name
+
+ @classmethod
+ def delete_claim_type(cls, name, force=False):
+ """Delete claim type by display name."""
+ cmd = ["domain", "claim", "claim-type", "delete", "--name", name]
+
+ # Force-delete protected claim type.
+ if force:
+ cmd.append("--force")
+
+ result, out, err = cls.runcmd(*cmd)
+ assert result is None
+ assert "Deleted claim type" in out
+
+ def get_claim_type(self, name):
+ """Get claim type by display name."""
+ claim_types_dn = self.get_claim_types_dn()
+
+ result = self.samdb.search(base=claim_types_dn,
+ scope=SCOPE_ONELEVEL,
+ expression=f"(displayName={name})")
+
+ if len(result) == 1:
+ return result[0]
+
+
+class ClaimTypeCmdTestCase(BaseClaimCmdTest):
+ """Tests for the claim-type command."""
+
+ def test_list(self):
+ """Test listing claim types in list format."""
+ result, out, err = self.runcmd("domain", "claim", "claim-type", "list")
+ self.assertIsNone(result, msg=err)
+
+ expected_claim_types = ["expires", "dept", "plate"]
+
+ for claim_type in expected_claim_types:
+ self.assertIn(claim_type, out)
+
+ def test_list__json(self):
+ """Test listing claim types in JSON format."""
+ result, out, err = self.runcmd("domain", "claim", "claim-type",
+ "list", "--json")
+ self.assertIsNone(result, msg=err)
+
+ # we should get valid json
+ json_result = json.loads(out)
+ claim_types = list(json_result.keys())
+
+ expected_claim_types = ["expires", "dept", "plate"]
+
+ for claim_type in expected_claim_types:
+ self.assertIn(claim_type, claim_types)
+
+ def test_view(self):
+ """Test viewing a single claim type."""
+ result, out, err = self.runcmd("domain", "claim", "claim-type",
+ "view", "--name", "expires")
+ self.assertIsNone(result, msg=err)
+
+ # we should get valid json
+ claim_type = json.loads(out)
+
+ # check a few fields only
+ self.assertEqual(claim_type["displayName"], "expires")
+ self.assertEqual(claim_type["description"], "Account-Expires")
+
+ def test_view__name_missing(self):
+ """Test view claim type without --name is handled."""
+ result, out, err = self.runcmd("domain", "claim", "claim-type", "view")
+ self.assertEqual(result, -1)
+ self.assertIn("Argument --name is required.", err)
+
+ def test_view__notfound(self):
+ """Test viewing claim type that doesn't exist is handled."""
+ result, out, err = self.runcmd("domain", "claim", "claim-type",
+ "view", "--name", "doesNotExist")
+ self.assertEqual(result, -1)
+ self.assertIn("Claim type doesNotExist not found.", err)
+
+ def test_create(self):
+ """Test creating several known attributes as claim types.
+
+ The point is to test it against the various datatypes that could
+ be found, but not include every known attribute.
+ """
+ # We just need to test a few different data types for attributes,
+ # there is no need to test every known attribute.
+ claim_types = [
+ "adminCount",
+ "accountExpires",
+ "department",
+ "carLicense",
+ "msDS-PrimaryComputer",
+ "isDeleted",
+ ]
+
+ # Each known attribute must be in the schema.
+ for attribute in claim_types:
+ # Use a different name, so we don't clash with existing attributes.
+ name = "test_create_" + attribute
+
+ self.addCleanup(self.delete_claim_type, name=name, force=True)
+
+ result, out, err = self.runcmd("domain", "claim", "claim-type",
+ "create",
+ "--attribute", attribute,
+ "--name", name,
+ "--class=user")
+ self.assertIsNone(result, msg=err)
+
+ # It should have used the attribute name as displayName.
+ claim_type = self.get_claim_type(name)
+ self.assertEqual(str(claim_type["displayName"]), name)
+ self.assertEqual(str(claim_type["Enabled"]), "TRUE")
+ self.assertEqual(str(claim_type["objectClass"][-1]), "msDS-ClaimType")
+ self.assertEqual(str(claim_type["msDS-ClaimSourceType"]), "AD")
+
+ def test_create__boolean(self):
+ """Test adding a known boolean attribute and check its type."""
+ self.addCleanup(self.delete_claim_type, name="boolAttr", force=True)
+
+ result, out, err = self.runcmd("domain", "claim", "claim-type",
+ "create", "--attribute=msNPAllowDialin",
+ "--name=boolAttr", "--class=user")
+
+ self.assertIsNone(result, msg=err)
+ claim_type = self.get_claim_type("boolAttr")
+ self.assertEqual(str(claim_type["displayName"]), "boolAttr")
+ self.assertEqual(str(claim_type["msDS-ClaimValueType"]), "6")
+
+ def test_create__number(self):
+ """Test adding a known numeric attribute and check its type."""
+ self.addCleanup(self.delete_claim_type, name="intAttr", force=True)
+
+ result, out, err = self.runcmd("domain", "claim", "claim-type",
+ "create", "--attribute=adminCount",
+ "--name=intAttr", "--class=user")
+
+ self.assertIsNone(result, msg=err)
+ claim_type = self.get_claim_type("intAttr")
+ self.assertEqual(str(claim_type["displayName"]), "intAttr")
+ self.assertEqual(str(claim_type["msDS-ClaimValueType"]), "1")
+
+ def test_create__text(self):
+ """Test adding a known text attribute and check its type."""
+ self.addCleanup(self.delete_claim_type, name="textAttr", force=True)
+
+ result, out, err = self.runcmd("domain", "claim", "claim-type",
+ "create", "--attribute=givenName",
+ "--name=textAttr", "--class=user")
+
+ self.assertIsNone(result, msg=err)
+ claim_type = self.get_claim_type("textAttr")
+ self.assertEqual(str(claim_type["displayName"]), "textAttr")
+ self.assertEqual(str(claim_type["msDS-ClaimValueType"]), "3")
+
+ def test_create__disabled(self):
+ """Test adding a disabled attribute."""
+ self.addCleanup(self.delete_claim_type, name="disabledAttr", force=True)
+
+ result, out, err = self.runcmd("domain", "claim", "claim-type",
+ "create", "--attribute=msTSHomeDrive",
+ "--name=disabledAttr", "--class=user",
+ "--disable")
+
+ self.assertIsNone(result, msg=err)
+ claim_type = self.get_claim_type("disabledAttr")
+ self.assertEqual(str(claim_type["displayName"]), "disabledAttr")
+ self.assertEqual(str(claim_type["Enabled"]), "FALSE")
+
+ def test_create__protected(self):
+ """Test adding a protected attribute."""
+ self.addCleanup(self.delete_claim_type, name="protectedAttr", force=True)
+
+ result, out, err = self.runcmd("domain", "claim", "claim-type",
+ "create", "--attribute=mobile",
+ "--name=protectedAttr", "--class=user",
+ "--protect")
+
+ self.assertIsNone(result, msg=err)
+ claim_type = self.get_claim_type("protectedAttr")
+ self.assertEqual(str(claim_type["displayName"]), "protectedAttr")
+
+ # Check if the claim type is protected from accidental deletion.
+ utils = SDUtils(self.samdb)
+ desc = utils.get_sd_as_sddl(claim_type["dn"])
+ self.assertIn("(D;;DTSD;;;WD)", desc)
+
+ def test_create__classes(self):
+ """Test adding an attribute applied to different classes."""
+ schema_dn = self.samdb.get_schema_basedn()
+ user_dn = f"CN=User,{schema_dn}"
+ computer_dn = f"CN=Computer,{schema_dn}"
+
+ # --class=user
+ self.addCleanup(self.delete_claim_type, name="streetName", force=True)
+ result, out, err = self.runcmd("domain", "claim", "claim-type",
+ "create", "--attribute=street",
+ "--name=streetName", "--class=user")
+ self.assertIsNone(result, msg=err)
+ claim_type = self.get_claim_type("streetName")
+ applies_to = [str(dn) for dn in claim_type["msDS-ClaimTypeAppliesToClass"]]
+ self.assertEqual(str(claim_type["displayName"]), "streetName")
+ self.assertEqual(len(applies_to), 1)
+ self.assertIn(user_dn, applies_to)
+ self.assertNotIn(computer_dn, applies_to)
+
+ # --class=computer
+ self.addCleanup(self.delete_claim_type, name="ext", force=True)
+ result, out, err = self.runcmd("domain", "claim", "claim-type",
+ "create", "--attribute=extensionName",
+ "--name=ext", "--class=computer")
+ self.assertIsNone(result, msg=err)
+ claim_type = self.get_claim_type("ext")
+ applies_to = [str(dn) for dn in claim_type["msDS-ClaimTypeAppliesToClass"]]
+ self.assertEqual(str(claim_type["displayName"]), "ext")
+ self.assertEqual(len(applies_to), 1)
+ self.assertNotIn(user_dn, applies_to)
+ self.assertIn(computer_dn, applies_to)
+
+ # --class=user --class=computer
+ self.addCleanup(self.delete_claim_type,
+ name="primaryComputer", force=True)
+ result, out, err = self.runcmd("domain", "claim", "claim-type",
+ "create", "--attribute=msDS-PrimaryComputer",
+ "--name=primaryComputer", "--class=user",
+ "--class=computer")
+ self.assertIsNone(result, msg=err)
+ claim_type = self.get_claim_type("primaryComputer")
+ applies_to = [str(dn) for dn in claim_type["msDS-ClaimTypeAppliesToClass"]]
+ self.assertEqual(str(claim_type["displayName"]), "primaryComputer")
+ self.assertEqual(len(applies_to), 2)
+ self.assertIn(user_dn, applies_to)
+ self.assertIn(computer_dn, applies_to)
+
+ # No classes should raise CommandError.
+ result, out, err = self.runcmd("domain", "claim", "claim-type",
+ "create", "--attribute=wWWHomePage",
+ "--name=homepage")
+ self.assertEqual(result, -1)
+ self.assertIn("Argument --class is required.", err)
+
+ def test__delete(self):
+ """Test deleting a claim type that is not protected."""
+ # Create non-protected claim type.
+ result, out, err = self.runcmd("domain", "claim", "claim-type",
+ "create", "--attribute=msDS-SiteName",
+ "--name=siteName", "--class=computer")
+ self.assertIsNone(result, msg=err)
+ claim_type = self.get_claim_type("siteName")
+ self.assertIsNotNone(claim_type)
+
+ # Do the deletion.
+ result, out, err = self.runcmd("domain", "claim", "claim-type",
+ "delete", "--name=siteName")
+ self.assertIsNone(result, msg=err)
+
+ # Claim type shouldn't exist anymore.
+ claim_type = self.get_claim_type("siteName")
+ self.assertIsNone(claim_type)
+
+ def test_delete__protected(self):
+ """Test deleting a protected claim type, with and without --force."""
+ # Create protected claim type.
+ result, out, err = self.runcmd("domain", "claim", "claim-type",
+ "create", "--attribute=postalCode",
+ "--name=postcode", "--class=user",
+ "--protect")
+ self.assertIsNone(result, msg=err)
+ claim_type = self.get_claim_type("postcode")
+ self.assertIsNotNone(claim_type)
+
+ # Do the deletion.
+ result, out, err = self.runcmd("domain", "claim", "claim-type",
+ "delete", "--name=postcode")
+ self.assertEqual(result, -1)
+
+ # Claim type should still exist.
+ claim_type = self.get_claim_type("postcode")
+ self.assertIsNotNone(claim_type)
+
+ # Try a force delete instead.
+ result, out, err = self.runcmd("domain", "claim", "claim-type",
+ "delete", "--name=postcode", "--force")
+ self.assertIsNone(result, msg=err)
+
+ # Claim type shouldn't exist anymore.
+ claim_type = self.get_claim_type("siteName")
+ self.assertIsNone(claim_type)
+
+ def test_delete__notfound(self):
+ """Test deleting a claim type that doesn't exist."""
+ result, out, err = self.runcmd("domain", "claim", "claim-type",
+ "delete", "--name", "doesNotExist")
+ self.assertEqual(result, -1)
+ self.assertIn("Claim type doesNotExist not found.", err)
+
+ def test_modify__description(self):
+ """Test modifying a claim type description."""
+ self.addCleanup(self.delete_claim_type, name="company", force=True)
+ self.create_claim_type("company", classes=["user"])
+
+ result, out, err = self.runcmd("domain", "claim", "claim-type",
+ "modify", "--name", "company",
+ "--description=NewDescription")
+ self.assertIsNone(result, msg=err)
+
+ # Verify fields were changed.
+ claim_type = self.get_claim_type("company")
+ self.assertEqual(str(claim_type["description"]), "NewDescription")
+
+ def test_modify__classes(self):
+ """Test modify claim type classes."""
+ schema_dn = self.samdb.get_schema_basedn()
+ user_dn = f"CN=User,{schema_dn}"
+ computer_dn = f"CN=Computer,{schema_dn}"
+
+ self.addCleanup(self.delete_claim_type, name="seeAlso", force=True)
+ self.create_claim_type("seeAlso", classes=["user"])
+
+ # First try removing all classes which shouldn't be allowed.
+ result, out, err = self.runcmd("domain", "claim", "claim-type",
+ "modify", "--name", "seeAlso",
+ "--class=")
+ self.assertEqual(result, -1)
+ self.assertIn("Class name is required.", err)
+
+ # Try changing it to just --class=computer first.
+ result, out, err = self.runcmd("domain", "claim", "claim-type",
+ "modify", "--name", "seeAlso",
+ "--class=computer")
+ self.assertIsNone(result, msg=err)
+ claim_type = self.get_claim_type("seeAlso")
+ applies_to = [str(dn) for dn in claim_type["msDS-ClaimTypeAppliesToClass"]]
+ self.assertNotIn(user_dn, applies_to)
+ self.assertIn(computer_dn, applies_to)
+
+ # Now try changing it to --class=user again.
+ result, out, err = self.runcmd("domain", "claim", "claim-type",
+ "modify", "--name", "seeAlso",
+ "--class=user")
+ self.assertIsNone(result, msg=err)
+ claim_type = self.get_claim_type("seeAlso")
+ applies_to = [str(dn) for dn in claim_type["msDS-ClaimTypeAppliesToClass"]]
+ self.assertIn(user_dn, applies_to)
+ self.assertNotIn(computer_dn, applies_to)
+
+ # Why not both?
+ result, out, err = self.runcmd("domain", "claim", "claim-type",
+ "modify", "--name", "seeAlso",
+ "--class=user", "--class=computer")
+ self.assertIsNone(result, msg=err)
+ claim_type = self.get_claim_type("seeAlso")
+ applies_to = [str(dn) for dn in claim_type["msDS-ClaimTypeAppliesToClass"]]
+ self.assertIn(user_dn, applies_to)
+ self.assertIn(computer_dn, applies_to)
+
+ def test_modify__enable_disable(self):
+ """Test modify disabling and enabling a claim type."""
+ self.addCleanup(self.delete_claim_type, name="catalogs", force=True)
+ self.create_claim_type("catalogs", classes=["user"])
+
+ result, out, err = self.runcmd("domain", "claim", "claim-type",
+ "modify", "--name", "catalogs",
+ "--disable")
+ self.assertIsNone(result, msg=err)
+
+ # Check that claim type was disabled.
+ claim_type = self.get_claim_type("catalogs")
+ self.assertEqual(str(claim_type["Enabled"]), "FALSE")
+
+ result, out, err = self.runcmd("domain", "claim", "claim-type",
+ "modify", "--name", "catalogs",
+ "--enable")
+ self.assertIsNone(result, msg=err)
+
+ # Check that claim type was enabled.
+ claim_type = self.get_claim_type("catalogs")
+ self.assertEqual(str(claim_type["Enabled"]), "TRUE")
+
+ def test_modify__protect_unprotect(self):
+ """Test modify un-protecting and protecting a claim type."""
+ self.addCleanup(self.delete_claim_type, name="pager", force=True)
+ self.create_claim_type("pager", classes=["user"])
+
+ utils = SDUtils(self.samdb)
+ result, out, err = self.runcmd("domain", "claim", "claim-type",
+ "modify", "--name", "pager",
+ "--protect")
+ self.assertIsNone(result, msg=err)
+
+ # Check that claim type was protected.
+ claim_type = self.get_claim_type("pager")
+ desc = utils.get_sd_as_sddl(claim_type["dn"])
+ self.assertIn("(D;;DTSD;;;WD)", desc)
+
+ result, out, err = self.runcmd("domain", "claim", "claim-type",
+ "modify", "--name", "pager",
+ "--unprotect")
+ self.assertIsNone(result, msg=err)
+
+ # Check that claim type was unprotected.
+ claim_type = self.get_claim_type("pager")
+ desc = utils.get_sd_as_sddl(claim_type["dn"])
+ self.assertNotIn("(D;;DTSD;;;WD)", desc)
+
+ def test_modify__enable_disable_together(self):
+ """Test modify claim type doesn't allow both --enable and --disable."""
+ self.addCleanup(self.delete_claim_type,
+ name="businessCategory", force=True)
+ self.create_claim_type("businessCategory", classes=["user"])
+
+ result, out, err = self.runcmd("domain", "claim", "claim-type",
+ "modify", "--name", "businessCategory",
+ "--enable", "--disable")
+ self.assertEqual(result, -1)
+ self.assertIn("--enable and --disable cannot be used together.", err)
+
+ def test_modify__protect_unprotect_together(self):
+ """Test modify claim type using both --protect and --unprotect."""
+ self.addCleanup(self.delete_claim_type,
+ name="businessCategory", force=True)
+ self.create_claim_type("businessCategory", classes=["user"])
+
+ result, out, err = self.runcmd("domain", "claim", "claim-type",
+ "modify", "--name", "businessCategory",
+ "--protect", "--unprotect")
+ self.assertEqual(result, -1)
+ self.assertIn("--protect and --unprotect cannot be used together.", err)
+
+ def test_modify__notfound(self):
+ """Test modify a claim type that doesn't exist."""
+ result, out, err = self.runcmd("domain", "claim", "claim-type",
+ "modify", "--name", "doesNotExist",
+ "--description=NewDescription")
+ self.assertEqual(result, -1)
+ self.assertIn("Claim type doesNotExist not found.", err)
+
+
+class ValueTypeCmdTestCase(BaseClaimCmdTest):
+ """Tests for the value-type command."""
+
+ def test_list(self):
+ """Test listing claim value types in list format."""
+ result, out, err = self.runcmd("domain", "claim", "value-type", "list")
+ self.assertIsNone(result, msg=err)
+
+ # base list of value types is there
+ for value_type in VALUE_TYPES:
+ self.assertIn(value_type, out)
+
+ def test_list__json(self):
+ """Test listing claim value types in JSON format."""
+ result, out, err = self.runcmd("domain", "claim", "value-type",
+ "list", "--json")
+ self.assertIsNone(result, msg=err)
+
+ # we should get valid json
+ json_result = json.loads(out)
+ value_types = list(json_result.keys())
+
+ # base list of value types is there
+ for value_type in VALUE_TYPES:
+ self.assertIn(value_type, value_types)
+
+ def test_view(self):
+ """Test viewing a single claim value type."""
+ result, out, err = self.runcmd("domain", "claim", "value-type",
+ "view", "--name", "Text")
+ self.assertIsNone(result, msg=err)
+
+ # we should get valid json
+ value_type = json.loads(out)
+
+ # check a few fields only
+ self.assertEqual(value_type["name"], "MS-DS-Text")
+ self.assertEqual(value_type["displayName"], "Text")
+ self.assertEqual(value_type["msDS-ClaimValueType"], 3)
+
+ def test_view__name_missing(self):
+ """Test viewing a claim value type with missing --name is handled."""
+ result, out, err = self.runcmd("domain", "claim", "value-type", "view")
+ self.assertEqual(result, -1)
+ self.assertIn("Argument --name is required.", err)
+
+ def test_view__notfound(self):
+ """Test viewing a claim value type that doesn't exist is handled."""
+ result, out, err = self.runcmd("domain", "claim", "value-type",
+ "view", "--name", "doesNotExist")
+ self.assertEqual(result, -1)
+ self.assertIn("Value type doesNotExist not found.", err)
diff --git a/python/samba/tests/samba_tool/domain_models.py b/python/samba/tests/samba_tool/domain_models.py
new file mode 100644
index 0000000..e0f21fe
--- /dev/null
+++ b/python/samba/tests/samba_tool/domain_models.py
@@ -0,0 +1,416 @@
+# Unix SMB/CIFS implementation.
+#
+# Tests for domain models and fields
+#
+# Copyright (C) Catalyst.Net Ltd. 2023
+#
+# Written by Rob van der Linde <rob@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import os
+from datetime import datetime
+from xml.etree import ElementTree
+
+from ldb import FLAG_MOD_ADD, MessageElement, SCOPE_ONELEVEL
+from samba.dcerpc import security
+from samba.dcerpc.misc import GUID
+from samba.netcmd.domain.models import Group, User, fields
+from samba.netcmd.domain.models.auth_policy import StrongNTLMPolicy
+from samba.ndr import ndr_pack, ndr_unpack
+
+from .base import SambaToolCmdTest
+
+HOST = "ldap://{DC_SERVER}".format(**os.environ)
+CREDS = "-U{DC_USERNAME}%{DC_PASSWORD}".format(**os.environ)
+
+
+class FieldTestMixin:
+ """Tests a model field to ensure it behaves correctly in both directions.
+
+ Use a mixin since TestCase can't be marked as abstract.
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ cls.samdb = cls.getSamDB("-H", HOST, CREDS)
+ super().setUpClass()
+
+ def get_users_dn(self):
+ """Returns Users DN."""
+ users_dn = self.samdb.get_root_basedn()
+ users_dn.add_child("CN=Users")
+ return users_dn
+
+ def test_to_db_value(self):
+ # Loop through each value and expected value combination.
+ # If the expected value is callable, treat it as a validation callback.
+ # NOTE: perhaps we should be using subtests for this.
+ for (value, expected) in self.to_db_value:
+ db_value = self.field.to_db_value(self.samdb, value, FLAG_MOD_ADD)
+ if callable(expected):
+ self.assertTrue(expected(db_value))
+ else:
+ self.assertEqual(db_value, expected)
+
+ def test_from_db_value(self):
+ # Loop through each value and expected value combination.
+ # NOTE: perhaps we should be using subtests for this.
+ for (db_value, expected) in self.from_db_value:
+ value = self.field.from_db_value(self.samdb, db_value)
+ self.assertEqual(value, expected)
+
+
+class IntegerFieldTest(FieldTestMixin, SambaToolCmdTest):
+ field = fields.IntegerField("FieldName")
+
+ to_db_value = [
+ (10, MessageElement(b"10")),
+ ([1, 5, 10], MessageElement([b"1", b"5", b"10"])),
+ (None, None),
+ ]
+
+ from_db_value = [
+ (MessageElement(b"10"), 10),
+ (MessageElement([b"1", b"5", b"10"]), [1, 5, 10]),
+ (None, None),
+ ]
+
+
+class BinaryFieldTest(FieldTestMixin, SambaToolCmdTest):
+ field = fields.BinaryField("FieldName")
+
+ to_db_value = [
+ (b"SAMBA", MessageElement(b"SAMBA")),
+ ([b"SAMBA", b"Developer"], MessageElement([b"SAMBA", b"Developer"])),
+ (None, None),
+ ]
+
+ from_db_value = [
+ (MessageElement(b"SAMBA"), b"SAMBA"),
+ (MessageElement([b"SAMBA", b"Developer"]), [b"SAMBA", b"Developer"]),
+ (None, None),
+ ]
+
+
+class StringFieldTest(FieldTestMixin, SambaToolCmdTest):
+ field = fields.StringField("FieldName")
+
+ to_db_value = [
+ ("SAMBA", MessageElement(b"SAMBA")),
+ (["SAMBA", "Developer"], MessageElement([b"SAMBA", b"Developer"])),
+ (None, None),
+ ]
+
+ from_db_value = [
+ (MessageElement(b"SAMBA"), "SAMBA"),
+ (MessageElement([b"SAMBA", b"Developer"]), ["SAMBA", "Developer"]),
+ (None, None),
+ ]
+
+
+class BooleanFieldTest(FieldTestMixin, SambaToolCmdTest):
+ field = fields.BooleanField("FieldName")
+
+ to_db_value = [
+ (True, MessageElement(b"TRUE")),
+ ([False, True], MessageElement([b"FALSE", b"TRUE"])),
+ (None, None),
+ ]
+
+ from_db_value = [
+ (MessageElement(b"TRUE"), True),
+ (MessageElement([b"FALSE", b"TRUE"]), [False, True]),
+ (None, None),
+ ]
+
+
+class EnumFieldTest(FieldTestMixin, SambaToolCmdTest):
+ field = fields.EnumField("FieldName", StrongNTLMPolicy)
+
+ to_db_value = [
+ (StrongNTLMPolicy.OPTIONAL, MessageElement("1")),
+ ([StrongNTLMPolicy.REQUIRED, StrongNTLMPolicy.OPTIONAL],
+ MessageElement(["2", "1"])),
+ (None, None),
+ ]
+
+ from_db_value = [
+ (MessageElement("1"), StrongNTLMPolicy.OPTIONAL),
+ (MessageElement(["2", "1"]),
+ [StrongNTLMPolicy.REQUIRED, StrongNTLMPolicy.OPTIONAL]),
+ (None, None),
+ ]
+
+
+class DateTimeFieldTest(FieldTestMixin, SambaToolCmdTest):
+ field = fields.DateTimeField("FieldName")
+
+ to_db_value = [
+ (datetime(2023, 1, 27, 22, 36, 41), MessageElement("20230127223641.0Z")),
+ ([datetime(2023, 1, 27, 22, 36, 41), datetime(2023, 1, 27, 22, 47, 50)],
+ MessageElement(["20230127223641.0Z", "20230127224750.0Z"])),
+ (None, None),
+ ]
+
+ from_db_value = [
+ (MessageElement("20230127223641.0Z"), datetime(2023, 1, 27, 22, 36, 41)),
+ (MessageElement(["20230127223641.0Z", "20230127224750.0Z"]),
+ [datetime(2023, 1, 27, 22, 36, 41), datetime(2023, 1, 27, 22, 47, 50)]),
+ (None, None),
+ ]
+
+
+class RelatedFieldTest(FieldTestMixin, SambaToolCmdTest):
+ field = fields.RelatedField("FieldName", User)
+
+ @property
+ def to_db_value(self):
+ alice = User.get(self.samdb, username="alice")
+ joe = User.get(self.samdb, username="joe")
+ return [
+ (alice, MessageElement(str(alice.dn))),
+ ([joe, alice], MessageElement([str(joe.dn), str(alice.dn)])),
+ (None, None),
+ ]
+
+ @property
+ def from_db_value(self):
+ alice = User.get(self.samdb, username="alice")
+ joe = User.get(self.samdb, username="joe")
+ return [
+ (MessageElement(str(alice.dn)), alice),
+ (MessageElement([str(joe.dn), str(alice.dn)]), [joe, alice]),
+ (None, None),
+ ]
+
+
+class DnFieldTest(FieldTestMixin, SambaToolCmdTest):
+ field = fields.DnField("FieldName")
+
+ @property
+ def to_db_value(self):
+ alice = User.get(self.samdb, username="alice")
+ joe = User.get(self.samdb, username="joe")
+ return [
+ (alice.dn, MessageElement(str(alice.dn))),
+ ([joe.dn, alice.dn], MessageElement([str(joe.dn), str(alice.dn)])),
+ (None, None),
+ ]
+
+ @property
+ def from_db_value(self):
+ alice = User.get(self.samdb, username="alice")
+ joe = User.get(self.samdb, username="joe")
+ return [
+ (MessageElement(str(alice.dn)), alice.dn),
+ (MessageElement([str(joe.dn), str(alice.dn)]), [joe.dn, alice.dn]),
+ (None, None),
+ ]
+
+
+class SIDFieldTest(FieldTestMixin, SambaToolCmdTest):
+ field = fields.SIDField("FieldName")
+
+ @property
+ def to_db_value(self):
+ # Create a group for testing
+ group = Group(name="group1")
+ group.save(self.samdb)
+ self.addCleanup(group.delete, self.samdb)
+
+ # Get raw value to compare against
+ group_rec = self.samdb.search(Group.get_base_dn(self.samdb),
+ scope=SCOPE_ONELEVEL,
+ expression="(name=group1)",
+ attrs=["objectSid"])[0]
+ raw_sid = group_rec["objectSid"]
+
+ return [
+ (group.object_sid, raw_sid),
+ (None, None),
+ ]
+
+ @property
+ def from_db_value(self):
+ # Create a group for testing
+ group = Group(name="group1")
+ group.save(self.samdb)
+ self.addCleanup(group.delete, self.samdb)
+
+ # Get raw value to compare against
+ group_rec = self.samdb.search(Group.get_base_dn(self.samdb),
+ scope=SCOPE_ONELEVEL,
+ expression="(name=group1)",
+ attrs=["objectSid"])[0]
+ raw_sid = group_rec["objectSid"]
+
+ return [
+ (raw_sid, group.object_sid),
+ (None, None),
+ ]
+
+
+class GUIDFieldTest(FieldTestMixin, SambaToolCmdTest):
+ field = fields.GUIDField("FieldName")
+
+ @property
+ def to_db_value(self):
+ users_dn = self.get_users_dn()
+
+ alice = self.samdb.search(users_dn,
+ scope=SCOPE_ONELEVEL,
+ expression="(sAMAccountName=alice)",
+ attrs=["objectGUID"])[0]
+
+ joe = self.samdb.search(users_dn,
+ scope=SCOPE_ONELEVEL,
+ expression="(sAMAccountName=joe)",
+ attrs=["objectGUID"])[0]
+
+ alice_guid = str(ndr_unpack(GUID, alice["objectGUID"][0]))
+ joe_guid = str(ndr_unpack(GUID, joe["objectGUID"][0]))
+
+ return [
+ (alice_guid, alice["objectGUID"]),
+ (
+ [joe_guid, alice_guid],
+ MessageElement([joe["objectGUID"][0], alice["objectGUID"][0]]),
+ ),
+ (None, None),
+ ]
+
+ @property
+ def from_db_value(self):
+ users_dn = self.get_users_dn()
+
+ alice = self.samdb.search(users_dn,
+ scope=SCOPE_ONELEVEL,
+ expression="(sAMAccountName=alice)",
+ attrs=["objectGUID"])[0]
+
+ joe = self.samdb.search(users_dn,
+ scope=SCOPE_ONELEVEL,
+ expression="(sAMAccountName=joe)",
+ attrs=["objectGUID"])[0]
+
+ alice_guid = str(ndr_unpack(GUID, alice["objectGUID"][0]))
+ joe_guid = str(ndr_unpack(GUID, joe["objectGUID"][0]))
+
+ return [
+ (alice["objectGUID"], alice_guid),
+ (
+ MessageElement([joe["objectGUID"][0], alice["objectGUID"][0]]),
+ [joe_guid, alice_guid],
+ ),
+ (None, None),
+ ]
+
+
+class SDDLFieldTest(FieldTestMixin, SambaToolCmdTest):
+ field = fields.SDDLField("FieldName")
+
+ def setUp(self):
+ super().setUp()
+ self.domain_sid = security.dom_sid(self.samdb.get_domain_sid())
+
+ def encode(self, value):
+ return ndr_pack(security.descriptor.from_sddl(value, self.domain_sid))
+
+ @property
+ def to_db_value(self):
+ values = [
+ "O:SYG:SYD:(XA;OICI;CR;;;WD;(Member_of {SID(AU)}))",
+ "O:SYG:SYD:(XA;OICI;CR;;;WD;(Member_of {SID(AO)}))",
+ "O:SYG:SYD:(XA;OICI;CR;;;WD;((Member_of {SID(AO)}) || (Member_of {SID(BO)})))",
+ "O:SYG:SYD:(XA;OICI;CR;;;WD;(Member_of {SID(%s)}))" % self.domain_sid,
+ ]
+ expected = [
+ (value, MessageElement(self.encode(value))) for value in values
+ ]
+ expected.append((None, None))
+ return expected
+
+ @property
+ def from_db_value(self):
+ values = [
+ "O:SYG:SYD:(XA;OICI;CR;;;WD;(Member_of {SID(AU)}))",
+ "O:SYG:SYD:(XA;OICI;CR;;;WD;(Member_of {SID(AO)}))",
+ "O:SYG:SYD:(XA;OICI;CR;;;WD;((Member_of {SID(AO)}) || (Member_of {SID(BO)})))",
+ "O:SYG:SYD:(XA;OICI;CR;;;WD;(Member_of {SID(%s)}))" % self.domain_sid,
+ ]
+ expected = [
+ (MessageElement(self.encode(value)), value) for value in values
+ ]
+ expected.append((None, None))
+ return expected
+
+
+class PossibleClaimValuesFieldTest(FieldTestMixin, SambaToolCmdTest):
+ field = fields.PossibleClaimValuesField("FieldName")
+
+ json_data = [{
+ "ValueGUID": "1c39ed4f-0b26-4536-b963-5959c8b1b676",
+ "ValueDisplayName": "Alice",
+ "ValueDescription": "Alice Description",
+ "Value": "alice",
+ }]
+
+ xml_data = "<?xml version='1.0' encoding='utf-16'?>" \
+ "<PossibleClaimValues xmlns:xsd='http://www.w3.org/2001/XMLSchema'" \
+ " xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'" \
+ " xmlns='http://schemas.microsoft.com/2010/08/ActiveDirectory/PossibleValues'>" \
+ "<StringList>" \
+ "<Item>" \
+ "<ValueGUID>1c39ed4f-0b26-4536-b963-5959c8b1b676</ValueGUID>" \
+ "<ValueDisplayName>Alice</ValueDisplayName>" \
+ "<ValueDescription>Alice Description</ValueDescription>" \
+ "<Value>alice</Value>" \
+ "</Item>" \
+ "</StringList>" \
+ "</PossibleClaimValues>"
+
+ def validate_xml(self, db_field):
+ """Callback that compares XML strings.
+
+ Tidying the HTMl output and adding consistent indentation was only
+ added to ETree in Python 3.9+ so generate a single line XML string.
+
+ This is just based on comparing the parsed XML, converted back
+ to a string, then comparing those strings.
+
+ So the expected xml_data string must have no spacing or indentation.
+
+ :param db_field: MessageElement value returned by field.to_db_field()
+ """
+ expected = ElementTree.fromstring(self.xml_data)
+ parsed = ElementTree.fromstring(str(db_field))
+ return ElementTree.tostring(parsed) == ElementTree.tostring(expected)
+
+ @property
+ def to_db_value(self):
+ return [
+ (self.json_data, self.validate_xml), # callback to validate XML
+ (self.json_data[0], self.validate_xml), # one item wrapped as list
+ ([], None), # empty list clears field
+ (None, None),
+ ]
+
+ @property
+ def from_db_value(self):
+ return [
+ (MessageElement(self.xml_data), self.json_data),
+ (None, None),
+ ]
diff --git a/python/samba/tests/samba_tool/drs_clone_dc_data_lmdb_size.py b/python/samba/tests/samba_tool/drs_clone_dc_data_lmdb_size.py
new file mode 100644
index 0000000..1cb88d3
--- /dev/null
+++ b/python/samba/tests/samba_tool/drs_clone_dc_data_lmdb_size.py
@@ -0,0 +1,119 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Catalyst IT Ltd. 2019
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from samba.tests.samba_tool.base import SambaToolCmdTest
+import os
+import shutil
+
+
+class DrsCloneDcDataLmdbSizeTestCase(SambaToolCmdTest):
+ """Test setting of the lmdb map size during drs clone-dc-data"""
+
+ def setUp(self):
+ super().setUp()
+ self.tempsambadir = os.path.join(self.tempdir, "samba")
+ os.mkdir(self.tempsambadir)
+
+ # clone a domain and set the lmdb map size to size
+ #
+ # returns the tuple (ret, stdout, stderr)
+ def clone(self, size=None):
+ command = (
+ "samba-tool " +
+ "drs clone-dc-database " +
+ os.environ["REALM"] + " " +
+ ("-U%s%%%s " % (os.environ["USERNAME"], os.environ["PASSWORD"])) +
+ ("--targetdir=%s " % self.tempsambadir) +
+ "--backend-store=mdb "
+ )
+ if size:
+ command += ("--backend-store-size=%s" % size)
+
+ return self.run_command(command)
+
+ #
+ # Get the lmdb map size for the specified command
+ #
+ # While there is a python lmdb package available we use the lmdb command
+ # line utilities to avoid introducing a dependency.
+ #
+ def get_lmdb_environment_size(self, path):
+ (result, out, err) = self.run_command("mdb_stat -ne %s" % path)
+ if result:
+ self.fail("Unable to run mdb_stat\n")
+ for line in out.split("\n"):
+ line = line.strip()
+ if line.startswith("Map size:"):
+ line = line.replace(" ", "")
+ (label, size) = line.split(":")
+ return int(size)
+
+ #
+ # Check the lmdb files created by provision and ensure that the map size
+ # has been set to size.
+ #
+ # Currently this is all the *.ldb files in private/sam.ldb.d
+ #
+ def check_lmdb_environment_sizes(self, size):
+ directory = os.path.join(self.tempsambadir, "private", "sam.ldb.d")
+ for name in os.listdir(directory):
+ if name.endswith(".ldb"):
+ path = os.path.join(directory, name)
+ s = self.get_lmdb_environment_size(path)
+ if s != size:
+ self.fail("File %s, size=%d larger than %d" %
+ (name, s, size))
+
+ #
+ # Ensure that if --backend-store-size is not specified the default of
+ # 8Gb is used
+ def test_default(self):
+ (result, out, err) = self.clone()
+ self.assertEqual(0, result)
+ self.check_lmdb_environment_sizes(8 * 1024 * 1024 * 1024)
+
+ def test_64Mb(self):
+ (result, out, err) = self.clone("64Mb")
+ self.assertEqual(0, result)
+ self.check_lmdb_environment_sizes(64 * 1024 * 1024)
+
+ def test_no_unit_suffix(self):
+ (result, out, err) = self.run_command(
+ 'samba-tool drs clone-dc-database --backend-store-size "2"')
+ self.assertGreater(result, 0)
+ self.assertRegex(err,
+ r"--backend-store-size invalid suffix ''")
+
+ def test_invalid_unit_suffix(self):
+ (result, out, err) = self.run_command(
+ 'samba-tool drs clone-dc-database --backend-store-size "2 cd"')
+ self.assertGreater(result, 0)
+ self.assertRegex(err,
+ r"--backend-store-size invalid suffix 'cd'")
+
+ def test_non_numeric(self):
+ (result, out, err) = self.run_command(
+ 'samba-tool drs clone-dc-database --backend-store-size "two Gb"')
+ self.assertGreater(result, 0)
+ self.assertRegex(
+ err,
+ r"backend-store-size option requires a numeric value, with an"
+ " optional unit suffix")
+
+ def tearDown(self):
+ super().tearDown()
+ shutil.rmtree(self.tempsambadir)
diff --git a/python/samba/tests/samba_tool/dsacl.py b/python/samba/tests/samba_tool/dsacl.py
new file mode 100644
index 0000000..8ddf37e
--- /dev/null
+++ b/python/samba/tests/samba_tool/dsacl.py
@@ -0,0 +1,211 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Martin Kraemer 2019 <mk.maddin@gmail.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import os
+from samba.tests.samba_tool.base import SambaToolCmdTest
+import re
+
+class DSaclSetSddlTestCase(SambaToolCmdTest):
+ """Tests for samba-tool dsacl set --sddl subcommand"""
+ sddl = "(OA;CIIO;RPWP;aaaaaaaa-1111-bbbb-2222-dddddddddddd;33333333-eeee-4444-ffff-555555555555;PS)"
+ sddl_lc = "(OA;CIIO;RPWP;aaaaaaaa-1111-bbbb-2222-dddddddddddd;33333333-eeee-4444-ffff-555555555555;PS)"
+ sddl_uc = "(OA;CIIO;RPWP;AAAAAAAA-1111-BBBB-2222-DDDDDDDDDDDD;33333333-EEEE-4444-FFFF-555555555555;PS)"
+ sddl_sid = "(OA;CIIO;RPWP;aaaaaaaa-1111-bbbb-2222-dddddddddddd;33333333-eeee-4444-ffff-555555555555;S-1-5-10)"
+ sddl_multi = "(OA;CIIO;RPWP;aaaaaaaa-1111-bbbb-2222-dddddddddddd;33333333-eeee-4444-ffff-555555555555;PS)(OA;CIIO;RPWP;cccccccc-9999-ffff-8888-eeeeeeeeeeee;77777777-dddd-6666-bbbb-555555555555;PS)"
+
+ def setUp(self):
+ super().setUp()
+ self.samdb = self.getSamDB("-H", "ldap://%s" % os.environ["DC_SERVER"],"-U%s%%%s" % (os.environ["DC_USERNAME"], os.environ["DC_PASSWORD"]))
+ self.dn="OU=DSaclSetSddlTestCase,%s" % self.samdb.domain_dn()
+ self.samdb.create_ou(self.dn)
+
+ def tearDown(self):
+ super().tearDown()
+ # clean-up the created test ou
+ self.samdb.delete(self.dn)
+
+ def test_sddl(self):
+ """Tests if a sddl string can be added 'the normal way'"""
+ (result, out, err) = self.runsubcmd("dsacl", "set","--objectdn=%s" % self.dn, "--sddl=%s" % self.sddl)
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ #extract only the two sddl strings from samba-tool output
+ acl_list=re.findall('.*descriptor for.*:\n(.*?)\n',out)
+ self.assertNotEqual(acl_list[0], acl_list[1], "new and old SDDL string differ")
+ self.assertMatch(acl_list[1], self.sddl, "new SDDL string should be contained within second sddl output")
+
+ def test_sddl_set_get(self):
+ """Tests if a sddl string can be added 'the normal way' and the output of 'get' is the same"""
+ (result, out, err) = self.runsubcmd("dsacl", "get",
+ "--objectdn=%s" % self.dn)
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ #extract only the two sddl strings from samba-tool output
+ acl_list_get=re.findall('^descriptor for.*:\n(.*?)\n', out)
+
+ (result, out, err) = self.runsubcmd("dsacl", "set",
+ "--objectdn=%s" % self.dn,
+ "--sddl=%s" % self.sddl)
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ #extract only the two sddl strings from samba-tool output
+ acl_list_old=re.findall('old descriptor for.*:\n(.*?)\n', out)
+ self.assertEqual(acl_list_old, acl_list_get,
+ "output of dsacl get should be the same as before set")
+
+ acl_list=re.findall('new descriptor for.*:\n(.*?)\n', out)
+
+ (result, out, err) = self.runsubcmd("dsacl", "get",
+ "--objectdn=%s" % self.dn)
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ #extract only the two sddl strings from samba-tool output
+ acl_list_get2=re.findall('^descriptor for.*:\n(.*?)\n', out)
+ self.assertEqual(acl_list, acl_list_get2,
+ "output of dsacl get should be the same as after set")
+
+ def test_multisddl(self):
+ """Tests if we can add multiple, different sddl strings at the same time"""
+ (result, out, err) = self.runsubcmd("dsacl", "set","--objectdn=%s" % self.dn, "--sddl=%s" % self.sddl_multi)
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ #extract only the two sddl strings from samba-tool output
+ acl_list=re.findall('.*descriptor for.*:\n(.*?)\n',out)
+ for ace in re.findall(r'\(.*?\)',self.sddl_multi):
+ self.assertMatch(acl_list[1], ace, "new SDDL string should be contained within second sddl output")
+
+ def test_duplicatesddl(self):
+ """Tests if an already existing sddl string can be added causing duplicate entry"""
+ acl_list = self._double_sddl_check(self.sddl,self.sddl)
+ self.assertEqual(acl_list[0],acl_list[1])
+
+ def test_casesensitivesddl(self):
+ """Tests if an already existing sddl string can be added in different cases causing duplicate entry"""
+ acl_list = self._double_sddl_check(self.sddl_lc,self.sddl_uc)
+ self.assertEqual(acl_list[0],acl_list[1])
+
+ def test_sidsddl(self):
+ """Tests if an already existing sddl string can be added with SID instead of SDDL SIDString causing duplicate entry"""
+ acl_list = self._double_sddl_check(self.sddl,self.sddl_sid)
+ self.assertEqual(acl_list[0],acl_list[1])
+
+ def test_twosddl(self):
+ """Tests if an already existing sddl string can be added by using it twice/in combination with non existing sddl string causing duplicate entry"""
+ acl_list = self._double_sddl_check(self.sddl,self.sddl + self.sddl)
+ self.assertEqual(acl_list[0],acl_list[1])
+
+ def _double_sddl_check(self,sddl1,sddl2):
+ """Adds two sddl strings and checks if there was an ace change after the second adding"""
+ (result, out, err) = self.runsubcmd("dsacl", "set","--objectdn=%s" % self.dn, "--sddl=%s" % sddl1)
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ acl_list = re.findall('.*descriptor for.*:\n(.*?)\n',out)
+ self.assertMatch(acl_list[1], sddl1, "new SDDL string should be contained within second sddl output - is not")
+ #add sddl2
+ (result, out, err) = self.runsubcmd("dsacl", "set","--objectdn=%s" % self.dn, "--sddl=%s" % sddl2)
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ acl_list = re.findall('.*descriptor for.*:\n(.*?)\n',out)
+ return acl_list
+
+ def test_add_delete_sddl(self):
+ """Tests if a sddl string can be added 'the normal way', deleted and
+ final state is the same as initial.
+ """
+ (result, out, err) = self.runsubcmd("dsacl", "get",
+ "--objectdn=%s" % self.dn)
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ # extract only the two sddl strings from samba-tool output
+ acl_list_orig = re.findall('^descriptor for.*:\n(.*?)\n', out)[0]
+
+ (result, out, err) = self.runsubcmd("dsacl", "set",
+ "--objectdn=%s" % self.dn,
+ "--sddl=%s" % self.sddl)
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ acl_list_added = re.findall('new descriptor for.*:\n(.*?)\n', out)[0]
+ self.assertNotEqual(acl_list_added, acl_list_orig, "After adding the SD should be different.")
+ self.assertMatch(acl_list_added, self.sddl, "The added ACE should be part of the new SD.")
+
+ (result, out, err) = self.runsubcmd("dsacl", "delete",
+ "--objectdn=%s" % self.dn,
+ "--sddl=%s" % self.sddl)
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ acl_list_final = re.findall('new descriptor for.*:\n(.*?)\n', out)[0]
+ self.assertEqual(acl_list_orig, acl_list_final,
+ "output of dsacl delete should be the same as before adding")
+
+ (result, out, err) = self.runsubcmd("dsacl", "get",
+ "--objectdn=%s" % self.dn)
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ # extract only the two sddl strings from samba-tool output
+ acl_list_final_get = re.findall('^descriptor for.*:\n(.*?)\n', out)[0]
+ self.assertEqual(acl_list_orig, acl_list_final_get,
+ "output of dsacl get should be the same as after adding and deleting again")
+
+ def test_delete(self):
+ # add sddl_multi first
+ (result, out, err) = self.runsubcmd("dsacl", "set",
+ "--objectdn=%s" % self.dn,
+ "--sddl=%s" % self.sddl_multi)
+
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ # delete sddl
+ (result, out, err) = self.runsubcmd("dsacl", "delete",
+ "--objectdn=%s" % self.dn,
+ "--sddl=%s" % self.sddl)
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ acl_list_deleted = re.findall('new descriptor for.*:\n(.*?)\n', out)[0]
+
+ self.assertNotRegex(acl_list_deleted, re.escape(self.sddl))
+ left_sddl = self.sddl_multi.replace(self.sddl, "")
+ self.assertRegex(acl_list_deleted, re.escape(left_sddl))
+
+ def test_delete_twice(self):
+ """Tests if deleting twice the same ACEs returns the expected warning."""
+ # add sddl_multi first
+ (result, out, err) = self.runsubcmd("dsacl", "set",
+ "--objectdn=%s" % self.dn,
+ "--sddl=%s" % self.sddl_multi)
+
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+
+ # delete sddl
+ (result, out, err) = self.runsubcmd("dsacl", "delete",
+ "--objectdn=%s" % self.dn,
+ "--sddl=%s" % self.sddl)
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+
+ # delete sddl_multi
+ (result, out, err) = self.runsubcmd("dsacl", "delete",
+ "--objectdn=%s" % self.dn,
+ "--sddl=%s" % self.sddl_multi)
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ self.assertRegex(out, "WARNING", "Should throw a warning about deleting non existent ace.")
+ warn = re.findall("WARNING: (.*?)\n", out)[0]
+ left_sddl = self.sddl_multi.replace(self.sddl, "")
+ self.assertRegex(warn, re.escape(self.sddl), "Should point out the non existent ace.")
+ self.assertNotRegex(warn, re.escape(left_sddl),
+ "Should not complain about all aces, since one of them is not deleted twice.")
diff --git a/python/samba/tests/samba_tool/forest.py b/python/samba/tests/samba_tool/forest.py
new file mode 100644
index 0000000..23291ca
--- /dev/null
+++ b/python/samba/tests/samba_tool/forest.py
@@ -0,0 +1,70 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) William Brown <william@blackhats.net.au> 2018
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import os
+import ldb
+from samba.tests.samba_tool.base import SambaToolCmdTest
+
+
+class ForestCmdTestCase(SambaToolCmdTest):
+ """Tests for samba-tool dsacl subcommands"""
+ samdb = None
+
+ def setUp(self):
+ super().setUp()
+ self.samdb = self.getSamDB("-H", "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"], os.environ["DC_PASSWORD"]))
+ self.domain_dn = self.samdb.domain_dn()
+
+ def tearDown(self):
+ super().tearDown()
+ # Reset the values we might have changed.
+ ds_dn = "CN=Directory Service,CN=Windows NT,CN=Services,CN=Configuration"
+ m = ldb.Message()
+ m.dn = ldb.Dn(self.samdb, "%s,%s" % (ds_dn, self.domain_dn))
+ m['dsheuristics'] = ldb.MessageElement(
+ '0000000', ldb.FLAG_MOD_REPLACE, 'dsheuristics')
+
+ self.samdb.modify(m)
+
+ def test_display(self):
+ """Tests that we can display forest settings"""
+ (result, out, err) = self.runcmd("forest",
+ "directory_service",
+ "show",
+ "-H", "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"],
+ os.environ["DC_PASSWORD"]))
+
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ self.assertIn("dsheuristics: <NO VALUE>", out)
+
+ def test_modify_dsheuristics(self):
+ """Test that we can modify the dsheuristics setting"""
+
+ (result, out, err) = self.runcmd("forest",
+ "directory_service",
+ "dsheuristics",
+ "0000002",
+ "-H", "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"],
+ os.environ["DC_PASSWORD"]))
+
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ self.assertIn("set dsheuristics: 0000002", out)
diff --git a/python/samba/tests/samba_tool/fsmo.py b/python/samba/tests/samba_tool/fsmo.py
new file mode 100644
index 0000000..29fe7bf
--- /dev/null
+++ b/python/samba/tests/samba_tool/fsmo.py
@@ -0,0 +1,52 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Rowland Penny <rpenny@samba.org> 2016
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import os
+import ldb
+from samba.tests.samba_tool.base import SambaToolCmdTest
+
+
+class FsmoCmdTestCase(SambaToolCmdTest):
+ """Test for samba-tool fsmo show subcommand"""
+
+ def test_fsmoget(self):
+ """Run fsmo show to see if it errors"""
+ (result, out, err) = self.runsubcmd("fsmo", "show")
+
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+
+ # Check that the output is sensible
+ samdb = self.getSamDB("-H", "ldap://%s" % os.environ["SERVER"],
+ "-U%s%%%s" % (os.environ["USERNAME"], os.environ["PASSWORD"]))
+
+ try:
+ res = samdb.search(base=ldb.Dn(samdb, "CN=Infrastructure,DC=DomainDnsZones") + samdb.get_default_basedn(),
+ scope=ldb.SCOPE_BASE, attrs=["fsmoRoleOwner"])
+
+ self.assertTrue("DomainDnsZonesMasterRole owner: " + str(res[0]["fsmoRoleOwner"][0]) in out)
+ except ldb.LdbError as e:
+ (enum, string) = e.args
+ if enum == ldb.ERR_NO_SUCH_OBJECT:
+ self.assertTrue("The 'domaindns' role is not present in this domain" in out)
+ else:
+ raise
+
+ res = samdb.search(base=samdb.get_default_basedn(),
+ scope=ldb.SCOPE_BASE, attrs=["fsmoRoleOwner"])
+
+ self.assertTrue("DomainNamingMasterRole owner: " + str(res[0]["fsmoRoleOwner"][0]) in out)
diff --git a/python/samba/tests/samba_tool/gpo.py b/python/samba/tests/samba_tool/gpo.py
new file mode 100644
index 0000000..851c70e
--- /dev/null
+++ b/python/samba/tests/samba_tool/gpo.py
@@ -0,0 +1,1847 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Andrew Bartlett 2012
+#
+# based on time.py:
+# Copyright (C) Sean Dague <sdague@linux.vnet.ibm.com> 2011
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import os, pwd, grp
+import ldb
+import samba
+from samba.tests.samba_tool.base import SambaToolCmdTest
+import shutil
+from samba.netcmd.gpo import get_gpo_dn, get_gpo_info
+from samba.param import LoadParm
+from samba.tests.gpo import stage_file, unstage_file
+from samba.dcerpc import preg
+from samba.ndr import ndr_pack, ndr_unpack
+from samba.common import get_string
+from configparser import ConfigParser
+import xml.etree.ElementTree as etree
+from tempfile import NamedTemporaryFile
+import re
+from samba.gp.gpclass import check_guid
+from samba.gp_parse.gp_ini import GPTIniParser
+
+gpo_load_json = \
+b"""
+[
+ {
+ "keyname": "Software\\\\Policies\\\\Mozilla\\\\Firefox\\\\Homepage",
+ "valuename": "StartPage",
+ "class": "USER",
+ "type": "REG_SZ",
+ "data": "homepage"
+ },
+ {
+ "keyname": "Software\\\\Policies\\\\Mozilla\\\\Firefox\\\\Homepage",
+ "valuename": "URL",
+ "class": "USER",
+ "type": 1,
+ "data": "samba.org"
+ },
+ {
+ "keyname": "Software\\\\Microsoft\\\\Internet Explorer\\\\Toolbar",
+ "valuename": "IEToolbar",
+ "class": "USER",
+ "type": "REG_BINARY",
+ "data": [0]
+ },
+ {
+ "keyname": "Software\\\\Policies\\\\Microsoft\\\\InputPersonalization",
+ "valuename": "RestrictImplicitTextCollection",
+ "class": "USER",
+ "type": "REG_DWORD",
+ "data": 1
+ },
+ {
+ "keyname": "Software\\\\Policies\\\\Mozilla\\\\Firefox",
+ "valuename": "ExtensionSettings",
+ "class": "MACHINE",
+ "type": "REG_MULTI_SZ",
+ "data": [
+ "{",
+ " \\"key\\": \\"value\\"",
+ "}"
+ ]
+ }
+]
+"""
+
+gpo_remove_json = \
+b"""
+[
+ {
+ "keyname": "Software\\\\Policies\\\\Mozilla\\\\Firefox\\\\Homepage",
+ "valuename": "StartPage",
+ "class": "USER"
+ },
+ {
+ "keyname": "Software\\\\Policies\\\\Mozilla\\\\Firefox\\\\Homepage",
+ "valuename": "URL",
+ "class": "USER"
+ },
+ {
+ "keyname": "Software\\\\Microsoft\\\\Internet Explorer\\\\Toolbar",
+ "valuename": "IEToolbar",
+ "class": "USER"
+ },
+ {
+ "keyname": "Software\\\\Policies\\\\Microsoft\\\\InputPersonalization",
+ "valuename": "RestrictImplicitTextCollection",
+ "class": "USER"
+ },
+ {
+ "keyname": "Software\\\\Policies\\\\Mozilla\\\\Firefox",
+ "valuename": "ExtensionSettings",
+ "class": "MACHINE"
+ }
+]
+"""
+
+def gpt_ini_version(gpo_guid):
+ lp = LoadParm()
+ lp.load(os.environ['SERVERCONFFILE'])
+ local_path = lp.get('path', 'sysvol')
+ GPT_INI = os.path.join(local_path, lp.get('realm').lower(), 'Policies',
+ gpo_guid, 'GPT.INI')
+ if os.path.exists(GPT_INI):
+ with open(GPT_INI, 'rb') as f:
+ data = f.read()
+ parser = GPTIniParser()
+ parser.parse(data)
+ if parser.ini_conf.has_option('General', 'Version'):
+ version = int(parser.ini_conf.get('General',
+ 'Version').encode('utf-8'))
+ else:
+ version = 0
+ else:
+ version = 0
+ return version
+
+# These are new GUIDs, not used elsewhere, made up for the use of testing the
+# adding of extension GUIDs in `samba-tool gpo load`.
+ext_guids = ['{123d2b56-7b14-4516-bbc4-763d29d57654}',
+ '{d000e91b-e70f-481b-9549-58de7929bcee}']
+
+source_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../../../.."))
+provision_path = os.path.join(source_path, "source4/selftest/provisions/")
+
+def has_difference(path1, path2, binary=True, xml=True, sortlines=False):
+ """Use this function to determine if the GPO backup differs from another.
+
+ xml=True checks whether any xml files are equal
+ binary=True checks whether any .SAMBABACKUP files are equal
+ """
+ if os.path.isfile(path1):
+ if sortlines:
+ file1 = open(path1).readlines()
+ file1.sort()
+ file2 = open(path1).readlines()
+ file2.sort()
+ if file1 != file2:
+ return path1
+
+ elif open(path1).read() != open(path2).read():
+ return path1
+
+ return None
+
+ l_dirs = [ path1 ]
+ r_dirs = [ path2 ]
+ while l_dirs:
+ l_dir = l_dirs.pop()
+ r_dir = r_dirs.pop()
+
+ dirlist = os.listdir(l_dir)
+ dirlist_other = os.listdir(r_dir)
+
+ dirlist.sort()
+ dirlist_other.sort()
+ if dirlist != dirlist_other:
+ return dirlist
+
+ for e in dirlist:
+ l_name = os.path.join(l_dir, e)
+ r_name = os.path.join(r_dir, e)
+
+ if os.path.isdir(l_name):
+ l_dirs.append(l_name)
+ r_dirs.append(r_name)
+ else:
+ if (l_name.endswith('.xml') and xml or
+ l_name.endswith('.SAMBABACKUP') and binary):
+ if open(l_name, "rb").read() != open(r_name, "rb").read():
+ return l_name
+
+ return None
+
+
+class GpoCmdTestCase(SambaToolCmdTest):
+ """Tests for samba-tool time subcommands"""
+
+ gpo_name = "testgpo"
+
+ # This exists in the source tree to be restored
+ backup_gpo_guid = "{1E1DC8EA-390C-4800-B327-98B56A0AEA5D}"
+
+ def test_gpo_list(self):
+ """Run gpo list against the server and make sure it looks accurate"""
+ (result, out, err) = self.runsubcmd("gpo", "listall", "-H", "ldap://%s" % os.environ["SERVER"])
+ self.assertCmdSuccess(result, out, err, "Ensuring gpo listall ran successfully")
+
+ def test_fetchfail(self):
+ """Run against a non-existent GPO, and make sure it fails (this hard-coded UUID is very unlikely to exist"""
+ (result, out, err) = self.runsubcmd("gpo", "fetch", "c25cac17-a02a-4151-835d-fae17446ee43", "-H", "ldap://%s" % os.environ["SERVER"])
+ self.assertCmdFail(result, "check for result code")
+
+ def test_fetch(self):
+ """Run against a real GPO, and make sure it passes"""
+ (result, out, err) = self.runsubcmd("gpo", "fetch", self.gpo_guid, "-H", "ldap://%s" % os.environ["SERVER"], "--tmpdir", self.tempdir)
+ self.assertCmdSuccess(result, out, err, "Ensuring gpo fetched successfully")
+ shutil.rmtree(os.path.join(self.tempdir, "policy"))
+
+ def test_show(self):
+ """Show a real GPO, and make sure it passes"""
+ (result, out, err) = self.runsubcmd("gpo", "show", self.gpo_guid, "-H", "ldap://%s" % os.environ["SERVER"])
+ self.assertCmdSuccess(result, out, err, "Ensuring gpo fetched successfully")
+
+ def test_show_as_admin(self):
+ """Show a real GPO, and make sure it passes"""
+ (result, out, err) = self.runsubcmd("gpo", "show", self.gpo_guid, "-H", "ldap://%s" % os.environ["SERVER"], "-U%s%%%s" % (os.environ["USERNAME"], os.environ["PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, "Ensuring gpo fetched successfully")
+
+ def test_aclcheck(self):
+ """Check all the GPOs on the remote server have correct ACLs"""
+ (result, out, err) = self.runsubcmd("gpo", "aclcheck", "-H", "ldap://%s" % os.environ["SERVER"], "-U%s%%%s" % (os.environ["USERNAME"], os.environ["PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, "Ensuring gpo checked successfully")
+
+ def test_getlink_empty(self):
+ self.samdb = self.getSamDB("-H", "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"],
+ os.environ["DC_PASSWORD"]))
+
+ container_dn = 'OU=gpo_test_link,%s' % self.samdb.get_default_basedn()
+
+ self.samdb.add({
+ 'dn': container_dn,
+ 'objectClass': 'organizationalUnit'
+ })
+
+ (result, out, err) = self.runsubcmd("gpo", "getlink", container_dn,
+ "-H", "ldap://%s" % os.environ["SERVER"],
+ "-U%s%%%s" % (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, "Ensuring gpo link fetched successfully")
+
+ # Microsoft appears to allow an empty space character after deletion of
+ # a GPO. We should be able to handle this.
+ m = ldb.Message()
+ m.dn = ldb.Dn(self.samdb, container_dn)
+ m['gPLink'] = ldb.MessageElement(' ', ldb.FLAG_MOD_REPLACE, 'gPLink')
+ self.samdb.modify(m)
+
+ (result, out, err) = self.runsubcmd("gpo", "getlink", container_dn,
+ "-H", "ldap://%s" % os.environ["SERVER"],
+ "-U%s%%%s" % (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, "Ensuring gpo link fetched successfully")
+
+ self.samdb.delete(container_dn)
+
+ def test_backup_restore_compare_binary(self):
+ """Restore from a static backup and compare the binary contents"""
+
+ if not os.path.exists(provision_path):
+ self.skipTest('Test requires provision data not available in '
+ + 'release tarball')
+
+ static_path = os.path.join(self.backup_path, 'policy',
+ self.backup_gpo_guid)
+
+ temp_path = os.path.join(self.tempdir, 'temp')
+ os.mkdir(temp_path)
+
+ new_path = os.path.join(self.tempdir, 'new')
+ os.mkdir(new_path)
+
+ gpo_guid = None
+ try:
+ (result, out, err) = self.runsubcmd("gpo", "restore", "BACKUP_RESTORE1",
+ static_path,
+ "-H", "ldap://%s" %
+ os.environ["SERVER"], "--tmpdir",
+ temp_path, "--entities",
+ self.entity_file, "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]),
+ "--restore-metadata")
+
+ self.assertCmdSuccess(result, out, err,
+ "Ensure gpo restore successful")
+
+ gpo_guid = "{%s}" % out.split("{")[1].split("}")[0]
+
+ (result, out, err) = self.runsubcmd("gpo", "backup", gpo_guid,
+ "-H", "ldap://%s" %
+ os.environ["SERVER"],
+ "--tmpdir", new_path)
+
+ self.assertCmdSuccess(result, out, err, "Ensuring gpo fetched successfully")
+
+ # Compare the directories
+ self.assertIsNone(has_difference(os.path.join(new_path, 'policy',
+ gpo_guid),
+ static_path, binary=True,
+ xml=False))
+ finally:
+ if gpo_guid:
+ (result, out, err) = self.runsubcmd("gpo", "del", gpo_guid,
+ "-H", "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, "Ensuring gpo deleted successfully")
+
+ shutil.rmtree(temp_path)
+ shutil.rmtree(new_path)
+
+ def test_backup_restore_no_entities_compare_binary(self):
+ """Restore from a static backup (and use no entity file, resulting in
+ copy-restore fallback), and compare the binary contents"""
+
+ if not os.path.exists(provision_path):
+ self.skipTest('Test requires provision data not available in '
+ + 'release tarball')
+
+ static_path = os.path.join(self.backup_path, 'policy',
+ self.backup_gpo_guid)
+
+ temp_path = os.path.join(self.tempdir, 'temp')
+ os.mkdir(temp_path)
+
+ new_path = os.path.join(self.tempdir, 'new')
+ os.mkdir(new_path)
+
+ gpo_guid = None
+ gpo_guid1 = None
+ gpo_guid2 = None
+ try:
+ (result, out, err) = self.runsubcmd("gpo", "restore", "BACKUP_RESTORE1",
+ static_path,
+ "-H", "ldap://%s" %
+ os.environ["SERVER"], "--tmpdir",
+ temp_path, "--entities",
+ self.entity_file, "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]),
+ "--restore-metadata")
+
+ self.assertCmdSuccess(result, out, err,
+ "Ensure gpo restore successful")
+
+ gpo_guid = "{%s}" % out.split("{")[1].split("}")[0]
+ gpo_guid1 = gpo_guid
+
+ # Do not output entities file
+ (result, out, err) = self.runsubcmd("gpo", "backup", gpo_guid,
+ "-H", "ldap://%s" %
+ os.environ["SERVER"],
+ "--tmpdir", new_path,
+ "--generalize")
+
+ self.assertCmdSuccess(result, out, err, "Ensuring gpo fetched successfully")
+
+ # Do not use an entities file
+ (result, out, err) = self.runsubcmd("gpo", "restore", "BACKUP_RESTORE2",
+ os.path.join(new_path, 'policy', gpo_guid1),
+ "-H", "ldap://%s" %
+ os.environ["SERVER"], "--tmpdir",
+ temp_path, "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]),
+ "--restore-metadata")
+
+ self.assertCmdSuccess(result, out, err,
+ "Ensure gpo restore successful")
+
+ gpo_guid = "{%s}" % out.split("{")[1].split("}")[0]
+ gpo_guid2 = gpo_guid
+
+ self.assertCmdSuccess(result, out, err, "Ensuring gpo restored successfully")
+
+ (result, out, err) = self.runsubcmd("gpo", "backup", gpo_guid,
+ "-H", "ldap://%s" %
+ os.environ["SERVER"],
+ "--tmpdir", new_path)
+
+ # Compare the directories
+ self.assertIsNone(has_difference(os.path.join(new_path, 'policy',
+ gpo_guid1),
+ os.path.join(new_path, 'policy',
+ gpo_guid2),
+ binary=True, xml=False))
+ finally:
+ if gpo_guid1:
+ (result, out, err) = self.runsubcmd("gpo", "del", gpo_guid1,
+ "-H", "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, "Ensuring gpo deleted successfully")
+
+ if gpo_guid2:
+ (result, out, err) = self.runsubcmd("gpo", "del", gpo_guid2,
+ "-H", "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, "Ensuring gpo deleted successfully")
+
+ shutil.rmtree(temp_path)
+ shutil.rmtree(new_path)
+
+ def test_backup_restore_backup_compare_XML(self):
+ """Restore from a static backup and backup to compare XML"""
+
+ if not os.path.exists(provision_path):
+ self.skipTest('Test requires provision data not available in '
+ + 'release tarball')
+
+ static_path = os.path.join(self.backup_path, 'policy',
+ self.backup_gpo_guid)
+
+ temp_path = os.path.join(self.tempdir, 'temp')
+ os.mkdir(temp_path)
+
+ new_path = os.path.join(self.tempdir, 'new')
+ os.mkdir(new_path)
+
+ gpo_guid = None
+ gpo_guid1 = None
+ gpo_guid2 = None
+ try:
+ (result, out, err) = self.runsubcmd("gpo", "restore", "BACKUP_RESTORE1",
+ static_path,
+ "-H", "ldap://%s" %
+ os.environ["SERVER"], "--tmpdir",
+ temp_path, "--entities",
+ self.entity_file, "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]),
+ "--restore-metadata")
+
+ self.assertCmdSuccess(result, out, err,
+ "Ensure gpo restore successful")
+
+ gpo_guid = "{%s}" % out.split("{")[1].split("}")[0]
+ gpo_guid1 = gpo_guid
+
+ (result, out, err) = self.runsubcmd("gpo", "backup", gpo_guid,
+ "-H", "ldap://%s" %
+ os.environ["SERVER"],
+ "--tmpdir", new_path)
+
+ self.assertCmdSuccess(result, out, err, "Ensuring gpo fetched successfully")
+
+ (result, out, err) = self.runsubcmd("gpo", "restore", "BACKUP_RESTORE2",
+ os.path.join(new_path, 'policy', gpo_guid1),
+ "-H", "ldap://%s" %
+ os.environ["SERVER"], "--tmpdir",
+ temp_path, "--entities",
+ self.entity_file, "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]),
+ "--restore-metadata")
+
+ self.assertCmdSuccess(result, out, err,
+ "Ensure gpo restore successful")
+
+ gpo_guid = "{%s}" % out.split("{")[1].split("}")[0]
+ gpo_guid2 = gpo_guid
+
+ self.assertCmdSuccess(result, out, err, "Ensuring gpo restored successfully")
+
+ (result, out, err) = self.runsubcmd("gpo", "backup", gpo_guid,
+ "-H", "ldap://%s" %
+ os.environ["SERVER"],
+ "--tmpdir", new_path)
+
+ # Compare the directories
+ self.assertIsNone(has_difference(os.path.join(new_path, 'policy',
+ gpo_guid1),
+ os.path.join(new_path, 'policy',
+ gpo_guid2),
+ binary=True, xml=True))
+ finally:
+ if gpo_guid1:
+ (result, out, err) = self.runsubcmd("gpo", "del", gpo_guid1,
+ "-H", "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, "Ensuring gpo deleted successfully")
+
+ if gpo_guid2:
+ (result, out, err) = self.runsubcmd("gpo", "del", gpo_guid2,
+ "-H", "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, "Ensuring gpo deleted successfully")
+
+ shutil.rmtree(temp_path)
+ shutil.rmtree(new_path)
+
+ def test_backup_restore_generalize(self):
+ """Restore from a static backup with different entities, generalize it
+ again, and compare the XML"""
+
+ if not os.path.exists(provision_path):
+ self.skipTest('Test requires provision data not available in '
+ + 'release tarball')
+
+ static_path = os.path.join(self.backup_path, 'policy',
+ self.backup_gpo_guid)
+
+ temp_path = os.path.join(self.tempdir, 'temp')
+ os.mkdir(temp_path)
+
+ new_path = os.path.join(self.tempdir, 'new')
+ os.mkdir(new_path)
+
+ alt_entity_file = os.path.join(new_path, 'entities')
+ with open(alt_entity_file, 'wb') as f:
+ f.write(b'''<!ENTITY SAMBA__NETWORK_PATH__82419dafed126a07d6b96c66fc943735__ "\\\\samdom.example.com">
+<!ENTITY SAMBA__NETWORK_PATH__0484cd41ded45a0728333a9c5e5ef619__ "\\\\samdom">
+<!ENTITY SAMBA____SDDL_ACL____4ce8277be3f630300cbcf80a80e21cf4__ "D:PAR(A;CI;KA;;;BA)(A;CIIO;KA;;;CO)(A;CI;KA;;;SY)(A;CI;KR;;;S-1-16-0)">
+<!ENTITY SAMBA____USER_ID_____d0970f5a1e19cb803f916c203d5c39c4__ "*S-1-5-113">
+<!ENTITY SAMBA____USER_ID_____7b7bc2512ee1fedcd76bdc68926d4f7b__ "Administrator">
+<!ENTITY SAMBA____USER_ID_____a3069f5a7a6530293ad8df6abd32af3d__ "Foobaz">
+<!ENTITY SAMBA____USER_ID_____fdf60b2473b319c8c341de5f62479a7d__ "*S-1-5-32-545">
+<!ENTITY SAMBA____USER_ID_____adb831a7fdd83dd1e2a309ce7591dff8__ "Guest">
+<!ENTITY SAMBA____USER_ID_____9fa835214b4fc8b6102c991f7d97c2f8__ "*S-1-5-32-547">
+<!ENTITY SAMBA____USER_ID_____bf8caafa94a19a6262bad2e8b6d4bce6__ "*S-1-5-32-546">
+<!ENTITY SAMBA____USER_ID_____a45da96d0bf6575970f2d27af22be28a__ "System">
+<!ENTITY SAMBA____USER_ID_____171d33a63ebd67f856552940ed491ad3__ "s-1-5-32-545">
+<!ENTITY SAMBA____USER_ID_____7140932fff16ce85cc64d3caab588d0d__ "s-1-1-0">
+''')
+
+ gen_entity_file = os.path.join(temp_path, 'entities')
+
+ gpo_guid = None
+ try:
+ (result, out, err) = self.runsubcmd("gpo", "restore", "BACKUP_RESTORE1",
+ static_path,
+ "-H", "ldap://%s" %
+ os.environ["SERVER"], "--tmpdir",
+ temp_path, "--entities",
+ alt_entity_file, "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]),
+ "--restore-metadata")
+
+ self.assertCmdSuccess(result, out, err, "Ensuring gpo restored successfully")
+
+ gpo_guid = "{%s}" % out.split("{")[1].split("}")[0]
+
+ (result, out, err) = self.runsubcmd("gpo", "backup", gpo_guid,
+ "-H", "ldap://%s" %
+ os.environ["SERVER"],
+ "--tmpdir", new_path,
+ "--generalize", "--entities",
+ gen_entity_file)
+
+ self.assertCmdSuccess(result, out, err, "Ensuring gpo fetched successfully")
+
+ # Assert entity files are identical (except for line order)
+ self.assertIsNone(has_difference(alt_entity_file,
+ gen_entity_file,
+ sortlines=True))
+
+ # Compare the directories (XML)
+ self.assertIsNone(has_difference(os.path.join(new_path, 'policy',
+ gpo_guid),
+ static_path, binary=False,
+ xml=True))
+ finally:
+ if gpo_guid:
+ (result, out, err) = self.runsubcmd("gpo", "del", gpo_guid,
+ "-H", "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, "Ensuring gpo deleted successfully")
+
+ shutil.rmtree(temp_path)
+ shutil.rmtree(new_path)
+
+ def test_backup_with_extension_attributes(self):
+ self.samdb = self.getSamDB("-H", "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"],
+ os.environ["DC_PASSWORD"]))
+
+ temp_path = os.path.join(self.tempdir, 'temp')
+ os.mkdir(temp_path)
+
+ extensions = {
+ # Taken from "source4/setup/provision_group_policy.ldif" on domain
+ 'gPCMachineExtensionNames': '[{35378EAC-683F-11D2-A89A-00C04FBBCFA2}{53D6AB1B-2488-11D1-A28C-00C04FB94F17}][{827D319E-6EAC-11D2-A4EA-00C04F79F83A}{803E14A0-B4FB-11D0-A0D0-00A0C90F574B}][{B1BE8D72-6EAC-11D2-A4EA-00C04F79F83A}{53D6AB1B-2488-11D1-A28C-00C04FB94F17}]',
+ 'gPCUserExtensionNames': '[{3060E8D0-7020-11D2-842D-00C04FA372D4}{3060E8CE-7020-11D2-842D-00C04FA372D4}][{35378EAC-683F-11D2-A89A-00C04FBBCFA2}{0F6B957E-509E-11D1-A7CC-0000F87571E3}]'
+ }
+
+ gpo_dn = get_gpo_dn(self.samdb, self.gpo_guid)
+ for ext in extensions:
+ data = extensions[ext]
+
+ m = ldb.Message()
+ m.dn = gpo_dn
+ m[ext] = ldb.MessageElement(data, ldb.FLAG_MOD_REPLACE, ext)
+
+ self.samdb.modify(m)
+
+ try:
+ (result, out, err) = self.runsubcmd("gpo", "backup", self.gpo_guid,
+ "-H", "ldap://%s" %
+ os.environ["SERVER"],
+ "--tmpdir", temp_path)
+
+ self.assertCmdSuccess(result, out, err, "Ensuring gpo fetched successfully")
+
+ guid = "{%s}" % out.split("{")[1].split("}")[0]
+
+ temp_path = os.path.join(temp_path, 'policy', guid)
+
+ (result, out, err) = self.runsubcmd("gpo", "restore", "RESTORE_EXT",
+ temp_path,
+ "-H", "ldap://%s" %
+ os.environ["SERVER"], "--tmpdir",
+ self.tempdir, "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]),
+ "--restore-metadata")
+
+ self.assertCmdSuccess(result, out, err, "Ensuring gpo restored successfully")
+
+ gpo_guid = "{%s}" % out.split("{")[1].split("}")[0]
+
+ msg = get_gpo_info(self.samdb, gpo_guid)
+ self.assertEqual(len(msg), 1)
+
+ for ext in extensions:
+ self.assertTrue(ext in msg[0])
+ self.assertEqual(extensions[ext], str(msg[0][ext][0]))
+
+ finally:
+ if gpo_guid:
+ (result, out, err) = self.runsubcmd("gpo", "del", gpo_guid,
+ "-H", "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, "Ensuring gpo deleted successfully")
+
+ shutil.rmtree(os.path.join(self.tempdir, "policy"))
+ shutil.rmtree(os.path.join(self.tempdir, 'temp'))
+
+ def test_admx_load(self):
+ lp = LoadParm()
+ lp.load(os.environ['SERVERCONFFILE'])
+ local_path = lp.get('path', 'sysvol')
+ admx_path = os.path.join(local_path, os.environ['REALM'].lower(),
+ 'Policies', 'PolicyDefinitions')
+ (result, out, err) = self.runsubcmd("gpo", "admxload",
+ "-H", "ldap://%s" %
+ os.environ["SERVER"],
+ "--admx-dir=%s" %
+ os.path.join(source_path,
+ 'libgpo/admx'),
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertCmdSuccess(result, out, err,
+ 'Filling PolicyDefinitions failed')
+ self.assertTrue(os.path.exists(admx_path),
+ 'PolicyDefinitions was not created')
+ self.assertTrue(os.path.exists(os.path.join(admx_path, 'samba.admx')),
+ 'Filling PolicyDefinitions failed')
+ shutil.rmtree(admx_path)
+
+ def test_smb_conf_set(self):
+ lp = LoadParm()
+ lp.load(os.environ['SERVERCONFFILE'])
+ local_path = lp.get('path', 'sysvol')
+ reg_pol = os.path.join(local_path, lp.get('realm').lower(), 'Policies',
+ self.gpo_guid, 'Machine/Registry.pol')
+
+ policy = 'apply group policies'
+ before_vers = gpt_ini_version(self.gpo_guid)
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage", "smb_conf",
+ "set"), self.gpo_guid,
+ policy, "yes",
+ "-H", "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertCmdSuccess(result, out, err,
+ 'Failed to set apply group policies')
+ after_vers = gpt_ini_version(self.gpo_guid)
+ self.assertGreater(after_vers, before_vers, 'GPT.INI was not updated')
+
+ self.assertTrue(os.path.exists(reg_pol),
+ 'The Registry.pol does not exist')
+ reg_data = ndr_unpack(preg.file, open(reg_pol, 'rb').read())
+ ret = any([get_string(e.valuename) == policy and e.data == 1
+ for e in reg_data.entries])
+ self.assertTrue(ret, 'The sudoers entry was not added')
+
+ before_vers = after_vers
+ # Ensure an empty set command deletes the entry
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage", "smb_conf",
+ "set"), self.gpo_guid,
+ policy, "-H", "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertCmdSuccess(result, out, err,
+ 'Failed to unset apply group policies')
+ after_vers = gpt_ini_version(self.gpo_guid)
+ self.assertGreater(after_vers, before_vers, 'GPT.INI was not updated')
+
+ reg_data = ndr_unpack(preg.file, open(reg_pol, 'rb').read())
+ ret = not any([get_string(e.valuename) == policy and e.data == 1
+ for e in reg_data.entries])
+ self.assertTrue(ret, 'The sudoers entry was not removed')
+
+ def test_smb_conf_list(self):
+ lp = LoadParm()
+ lp.load(os.environ['SERVERCONFFILE'])
+ local_path = lp.get('path', 'sysvol')
+ reg_pol = os.path.join(local_path, lp.get('realm').lower(), 'Policies',
+ self.gpo_guid, 'Machine/Registry.pol')
+
+ # Stage the Registry.pol file with test data
+ stage = preg.file()
+ e = preg.entry()
+ e.keyname = b'Software\\Policies\\Samba\\smb_conf'
+ e.valuename = b'apply group policies'
+ e.type = 4
+ e.data = 1
+ stage.num_entries = 1
+ stage.entries = [e]
+ ret = stage_file(reg_pol, ndr_pack(stage))
+ self.assertTrue(ret, 'Could not create the target %s' % reg_pol)
+
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage", "smb_conf",
+ "list"), self.gpo_guid,
+ "-H", "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertIn('%s = True' % e.valuename, out, 'The test entry was not found!')
+
+ # Unstage the Registry.pol file
+ unstage_file(reg_pol)
+
+ def test_security_set(self):
+ lp = LoadParm()
+ lp.load(os.environ['SERVERCONFFILE'])
+ local_path = lp.get('path', 'sysvol')
+ inf_pol = os.path.join(local_path, lp.get('realm').lower(), 'Policies',
+ self.gpo_guid, 'Machine/Microsoft/Windows NT/SecEdit/GptTmpl.inf')
+
+ before_vers = gpt_ini_version(self.gpo_guid)
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage", "security",
+ "set"), self.gpo_guid,
+ 'MaxTicketAge', '10',
+ "-H", "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertCmdSuccess(result, out, err,
+ 'Failed to set MaxTicketAge')
+ self.assertTrue(os.path.exists(inf_pol),
+ '%s was not created' % inf_pol)
+ inf_pol_contents = open(inf_pol, 'r').read()
+ self.assertIn('MaxTicketAge = 10', inf_pol_contents,
+ 'The test entry was not found!')
+ after_vers = gpt_ini_version(self.gpo_guid)
+ self.assertGreater(after_vers, before_vers, 'GPT.INI was not updated')
+
+ before_vers = after_vers
+ # Ensure an empty set command deletes the entry
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage", "security",
+ "set"), self.gpo_guid,
+ 'MaxTicketAge',
+ "-H", "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertCmdSuccess(result, out, err,
+ 'Failed to unset MaxTicketAge')
+ inf_pol_contents = open(inf_pol, 'r').read()
+ self.assertNotIn('MaxTicketAge = 10', inf_pol_contents,
+ 'The test entry was still found!')
+ after_vers = gpt_ini_version(self.gpo_guid)
+ self.assertGreater(after_vers, before_vers, 'GPT.INI was not updated')
+
+ def test_security_list(self):
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage", "security",
+ "set"), self.gpo_guid,
+ 'MaxTicketAge', '10',
+ "-H", "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertCmdSuccess(result, out, err,
+ 'Failed to set MaxTicketAge')
+
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage", "security",
+ "list"), self.gpo_guid,
+ "-H", "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertIn('MaxTicketAge = 10', out, 'The test entry was not found!')
+
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage", "security",
+ "set"), self.gpo_guid,
+ 'MaxTicketAge',
+ "-H", "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertCmdSuccess(result, out, err,
+ 'Failed to unset MaxTicketAge')
+
+ def test_security_nonempty_sections(self):
+ lp = LoadParm()
+ lp.load(os.environ['SERVERCONFFILE'])
+ local_path = lp.get('path', 'sysvol')
+ gpt_inf = os.path.join(local_path, lp.get('realm').lower(), 'Policies',
+ self.gpo_guid, 'Machine/Microsoft/Windows NT',
+ 'SecEdit/GptTmpl.inf')
+
+ before_vers = gpt_ini_version(self.gpo_guid)
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage", "security",
+ "set"), self.gpo_guid,
+ 'MaxTicketAge', '10',
+ "-H", "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertCmdSuccess(result, out, err,
+ 'Failed to set MaxTicketAge')
+ after_vers = gpt_ini_version(self.gpo_guid)
+ self.assertGreater(after_vers, before_vers, 'GPT.INI was not updated')
+
+ before_vers = after_vers
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage", "security",
+ "set"), self.gpo_guid,
+ 'MaxTicketAge',
+ "-H", "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertCmdSuccess(result, out, err,
+ 'Failed to unset MaxTicketAge')
+ after_vers = gpt_ini_version(self.gpo_guid)
+ self.assertGreater(after_vers, before_vers, 'GPT.INI was not updated')
+
+ inf_data = ConfigParser(interpolation=None)
+ inf_data.read(gpt_inf)
+
+ self.assertFalse(inf_data.has_section('Kerberos Policy'))
+
+ def test_sudoers_add(self):
+ lp = LoadParm()
+ lp.load(os.environ['SERVERCONFFILE'])
+ local_path = lp.get('path', 'sysvol')
+ reg_pol = os.path.join(local_path, lp.get('realm').lower(), 'Policies',
+ self.gpo_guid, 'Machine/Registry.pol')
+
+ # Stage the Registry.pol file with test data
+ stage = preg.file()
+ e = preg.entry()
+ e.keyname = b'Software\\Policies\\Samba\\Unix Settings\\Sudo Rights'
+ e.valuename = b'Software\\Policies\\Samba\\Unix Settings'
+ e.type = 1
+ e.data = b'fakeu ALL=(ALL) NOPASSWD: ALL'
+ stage.num_entries = 1
+ stage.entries = [e]
+ ret = stage_file(reg_pol, ndr_pack(stage))
+ self.assertTrue(ret, 'Could not create the target %s' % reg_pol)
+
+ before_vers = gpt_ini_version(self.gpo_guid)
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage",
+ "sudoers", "add"),
+ self.gpo_guid, 'ALL', 'ALL',
+ 'fakeu', 'fakeg', "-H",
+ "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, 'Sudoers add failed')
+ after_vers = gpt_ini_version(self.gpo_guid)
+ self.assertGreater(after_vers, before_vers, 'GPT.INI was not updated')
+
+ sudoer = 'fakeu,fakeg% ALL=(ALL) NOPASSWD: ALL'
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage",
+ "sudoers", "list"),
+ self.gpo_guid, "-H",
+ "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertIn(sudoer, out, 'The test entry was not found!')
+ self.assertIn(get_string(e.data), out, 'The test entry was not found!')
+
+ before_vers = after_vers
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage",
+ "sudoers", "remove"),
+ self.gpo_guid, sudoer,
+ "-H", "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, 'Sudoers remove failed')
+ after_vers = gpt_ini_version(self.gpo_guid)
+ self.assertGreater(after_vers, before_vers, 'GPT.INI was not updated')
+
+ before_vers = after_vers
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage",
+ "sudoers", "remove"),
+ self.gpo_guid,
+ get_string(e.data),
+ "-H", "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, 'Sudoers remove failed')
+ after_vers = gpt_ini_version(self.gpo_guid)
+ self.assertGreater(after_vers, before_vers, 'GPT.INI was not updated')
+
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage",
+ "sudoers", "list"),
+ self.gpo_guid, "-H",
+ "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertNotIn(sudoer, out, 'The test entry was still found!')
+ self.assertNotIn(get_string(e.data), out,
+ 'The test entry was still found!')
+
+ # Unstage the Registry.pol file
+ unstage_file(reg_pol)
+
+ def test_sudoers_list(self):
+ lp = LoadParm()
+ lp.load(os.environ['SERVERCONFFILE'])
+ local_path = lp.get('path', 'sysvol')
+ vgp_xml = os.path.join(local_path, lp.get('realm').lower(), 'Policies',
+ self.gpo_guid, 'Machine/VGP/VTLA/Sudo',
+ 'SudoersConfiguration/manifest.xml')
+
+ stage = etree.Element('vgppolicy')
+ policysetting = etree.SubElement(stage, 'policysetting')
+ pv = etree.SubElement(policysetting, 'version')
+ pv.text = '1'
+ name = etree.SubElement(policysetting, 'name')
+ name.text = 'Sudo Policy'
+ description = etree.SubElement(policysetting, 'description')
+ description.text = 'Sudoers File Configuration Policy'
+ apply_mode = etree.SubElement(policysetting, 'apply_mode')
+ apply_mode.text = 'merge'
+ data = etree.SubElement(policysetting, 'data')
+ load_plugin = etree.SubElement(data, 'load_plugin')
+ load_plugin.text = 'true'
+ sudoers_entry = etree.SubElement(data, 'sudoers_entry')
+ command = etree.SubElement(sudoers_entry, 'command')
+ command.text = 'ALL'
+ user = etree.SubElement(sudoers_entry, 'user')
+ user.text = 'ALL'
+ listelement = etree.SubElement(sudoers_entry, 'listelement')
+ principal = etree.SubElement(listelement, 'principal')
+ principal.text = 'fakeu'
+ principal.attrib['type'] = 'user'
+ # Ensure an empty principal doesn't cause a crash
+ sudoers_entry = etree.SubElement(data, 'sudoers_entry')
+ command = etree.SubElement(sudoers_entry, 'command')
+ command.text = 'ALL'
+ user = etree.SubElement(sudoers_entry, 'user')
+ user.text = 'ALL'
+ # Ensure having dispersed principals still works
+ sudoers_entry = etree.SubElement(data, 'sudoers_entry')
+ command = etree.SubElement(sudoers_entry, 'command')
+ command.text = 'ALL'
+ user = etree.SubElement(sudoers_entry, 'user')
+ user.text = 'ALL'
+ listelement = etree.SubElement(sudoers_entry, 'listelement')
+ principal = etree.SubElement(listelement, 'principal')
+ principal.text = 'fakeu2'
+ principal.attrib['type'] = 'user'
+ listelement = etree.SubElement(sudoers_entry, 'listelement')
+ group = etree.SubElement(listelement, 'principal')
+ group.text = 'fakeg2'
+ group.attrib['type'] = 'group'
+ ret = stage_file(vgp_xml, etree.tostring(stage, 'utf-8'))
+ self.assertTrue(ret, 'Could not create the target %s' % vgp_xml)
+
+ reg_pol = os.path.join(local_path, lp.get('realm').lower(), 'Policies',
+ self.gpo_guid, 'Machine/Registry.pol')
+
+ # Stage the Registry.pol file with test data
+ stage = preg.file()
+ e = preg.entry()
+ e.keyname = b'Software\\Policies\\Samba\\Unix Settings\\Sudo Rights'
+ e.valuename = b'Software\\Policies\\Samba\\Unix Settings'
+ e.type = 1
+ e.data = b'fakeu3 ALL=(ALL) NOPASSWD: ALL'
+ stage.num_entries = 1
+ stage.entries = [e]
+ ret = stage_file(reg_pol, ndr_pack(stage))
+ self.assertTrue(ret, 'Could not create the target %s' % reg_pol)
+
+ sudoer = 'fakeu ALL=(ALL) NOPASSWD: ALL'
+ sudoer2 = 'fakeu2,fakeg2% ALL=(ALL) NOPASSWD: ALL'
+ sudoer_no_principal = 'ALL ALL=(ALL) NOPASSWD: ALL'
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage",
+ "sudoers", "list"),
+ self.gpo_guid, "-H",
+ "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, 'Sudoers list failed')
+ self.assertIn(sudoer, out, 'The test entry was not found!')
+ self.assertIn(sudoer2, out, 'The test entry was not found!')
+ self.assertIn(get_string(e.data), out, 'The test entry was not found!')
+ self.assertIn(sudoer_no_principal, out,
+ 'The test entry was not found!')
+
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage",
+ "sudoers", "remove"),
+ self.gpo_guid, sudoer2,
+ "-H", "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, 'Sudoers remove failed')
+
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage",
+ "sudoers", "remove"),
+ self.gpo_guid,
+ sudoer_no_principal,
+ "-H", "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, 'Sudoers remove failed')
+
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage",
+ "sudoers", "list"),
+ self.gpo_guid, "-H",
+ "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertNotIn(sudoer2, out, 'The test entry was still found!')
+ self.assertNotIn(sudoer_no_principal, out,
+ 'The test entry was still found!')
+
+ # Unstage the manifest.xml file
+ unstage_file(vgp_xml)
+ # Unstage the Registry.pol file
+ unstage_file(reg_pol)
+
+ def test_symlink_list(self):
+ lp = LoadParm()
+ lp.load(os.environ['SERVERCONFFILE'])
+ local_path = lp.get('path', 'sysvol')
+ vgp_xml = os.path.join(local_path, lp.get('realm').lower(), 'Policies',
+ self.gpo_guid, 'Machine/VGP/VTLA/Unix',
+ 'Symlink/manifest.xml')
+ stage = etree.Element('vgppolicy')
+ policysetting = etree.SubElement(stage, 'policysetting')
+ pv = etree.SubElement(policysetting, 'version')
+ pv.text = '1'
+ name = etree.SubElement(policysetting, 'name')
+ name.text = 'Symlink Policy'
+ description = etree.SubElement(policysetting, 'description')
+ description.text = 'Specifies symbolic link data'
+ apply_mode = etree.SubElement(policysetting, 'apply_mode')
+ apply_mode.text = 'merge'
+ data = etree.SubElement(policysetting, 'data')
+ file_properties = etree.SubElement(data, 'file_properties')
+ source = etree.SubElement(file_properties, 'source')
+ source.text = os.path.join(self.tempdir, 'test.source')
+ target = etree.SubElement(file_properties, 'target')
+ target.text = os.path.join(self.tempdir, 'test.target')
+ ret = stage_file(vgp_xml, etree.tostring(stage, 'utf-8'))
+ self.assertTrue(ret, 'Could not create the target %s' % vgp_xml)
+
+ symlink = 'ln -s %s %s' % (source.text, target.text)
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage",
+ "symlink", "list"),
+ self.gpo_guid, "-H",
+ "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertIn(symlink, out, 'The test entry was not found!')
+
+ # Unstage the manifest.xml file
+ unstage_file(vgp_xml)
+
+ def test_symlink_add(self):
+ source_text = os.path.join(self.tempdir, 'test.source')
+ target_text = os.path.join(self.tempdir, 'test.target')
+ symlink = 'ln -s %s %s' % (source_text, target_text)
+ before_vers = gpt_ini_version(self.gpo_guid)
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage",
+ "symlink", "add"),
+ self.gpo_guid,
+ source_text, target_text,
+ "-H", "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, 'Symlink add failed')
+ after_vers = gpt_ini_version(self.gpo_guid)
+ self.assertGreater(after_vers, before_vers, 'GPT.INI was not updated')
+
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage",
+ "symlink", "list"),
+ self.gpo_guid, "-H",
+ "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertIn(symlink, out, 'The test entry was not found!')
+
+ before_vers = after_vers
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage",
+ "symlink", "remove"),
+ self.gpo_guid,
+ source_text, target_text,
+ "-H", "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, 'Symlink remove failed')
+ after_vers = gpt_ini_version(self.gpo_guid)
+ self.assertGreater(after_vers, before_vers, 'GPT.INI was not updated')
+
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage",
+ "symlink", "list"),
+ self.gpo_guid, "-H",
+ "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertNotIn(symlink, out, 'The test entry was not removed!')
+
+ def test_files_list(self):
+ lp = LoadParm()
+ lp.load(os.environ['SERVERCONFFILE'])
+ local_path = lp.get('path', 'sysvol')
+ vgp_xml = os.path.join(local_path, lp.get('realm').lower(), 'Policies',
+ self.gpo_guid, 'Machine/VGP/VTLA/Unix',
+ 'Files/manifest.xml')
+ source_file = os.path.join(local_path, lp.get('realm').lower(),
+ 'Policies', self.gpo_guid, 'Machine/VGP',
+ 'VTLA/Unix/Files/test.source')
+ stage = etree.Element('vgppolicy')
+ policysetting = etree.SubElement(stage, 'policysetting')
+ pv = etree.SubElement(policysetting, 'version')
+ pv.text = '1'
+ name = etree.SubElement(policysetting, 'name')
+ name.text = 'Files'
+ description = etree.SubElement(policysetting, 'description')
+ description.text = 'Represents file data to set/copy on clients'
+ data = etree.SubElement(policysetting, 'data')
+ file_properties = etree.SubElement(data, 'file_properties')
+ source = etree.SubElement(file_properties, 'source')
+ source.text = source_file
+ target = etree.SubElement(file_properties, 'target')
+ target.text = os.path.join(self.tempdir, 'test.target')
+ user = etree.SubElement(file_properties, 'user')
+ user.text = pwd.getpwuid(os.getuid()).pw_name
+ group = etree.SubElement(file_properties, 'group')
+ group.text = grp.getgrgid(os.getgid()).gr_name
+
+ # Request permissions of 755
+ permissions = etree.SubElement(file_properties, 'permissions')
+ permissions.set('type', 'user')
+ etree.SubElement(permissions, 'read')
+ etree.SubElement(permissions, 'write')
+ etree.SubElement(permissions, 'execute')
+ permissions = etree.SubElement(file_properties, 'permissions')
+ permissions.set('type', 'group')
+ etree.SubElement(permissions, 'read')
+ etree.SubElement(permissions, 'execute')
+ permissions = etree.SubElement(file_properties, 'permissions')
+ permissions.set('type', 'other')
+ etree.SubElement(permissions, 'read')
+ etree.SubElement(permissions, 'execute')
+
+ ret = stage_file(vgp_xml, etree.tostring(stage, 'utf-8'))
+ self.assertTrue(ret, 'Could not create the target %s' % vgp_xml)
+
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage",
+ "files", "list"),
+ self.gpo_guid, "-H",
+ "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertIn(target.text, out, 'The test entry was not found!')
+ self.assertIn('-rwxr-xr-x', out,
+ 'The test entry permissions were not found')
+
+ # Unstage the manifest.xml file
+ unstage_file(vgp_xml)
+
+ def test_files_add(self):
+ lp = LoadParm()
+ lp.load(os.environ['SERVERCONFFILE'])
+ local_path = lp.get('path', 'sysvol')
+ sysvol_source = os.path.join(local_path, lp.get('realm').lower(),
+ 'Policies', self.gpo_guid, 'Machine/VGP',
+ 'VTLA/Unix/Files/test.source')
+ source_file = os.path.join(self.tempdir, 'test.source')
+ source_data = '#!/bin/sh\necho hello world'
+ with open(source_file, 'w') as w:
+ w.write(source_data)
+ target_file = os.path.join(self.tempdir, 'test.target')
+ user = pwd.getpwuid(os.getuid()).pw_name
+ group = grp.getgrgid(os.getgid()).gr_name
+ before_vers = gpt_ini_version(self.gpo_guid)
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage",
+ "files", "add"),
+ self.gpo_guid,
+ source_file,
+ target_file,
+ user, group,
+ '755', "-H",
+ "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, 'File add failed')
+ self.assertIn(source_data, open(sysvol_source, 'r').read(),
+ 'Failed to find the source file on the sysvol')
+ after_vers = gpt_ini_version(self.gpo_guid)
+ self.assertGreater(after_vers, before_vers, 'GPT.INI was not updated')
+
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage",
+ "files", "list"),
+ self.gpo_guid, "-H",
+ "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertIn(target_file, out, 'The test entry was not found!')
+ self.assertIn('-rwxr-xr-x', out,
+ 'The test entry permissions were not found')
+
+ os.unlink(source_file)
+
+ before_vers = after_vers
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage",
+ "files", "remove"),
+ self.gpo_guid,
+ target_file, "-H",
+ "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, 'File remove failed')
+ after_vers = gpt_ini_version(self.gpo_guid)
+ self.assertGreater(after_vers, before_vers, 'GPT.INI was not updated')
+
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage",
+ "files", "list"),
+ self.gpo_guid, "-H",
+ "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertNotIn(target_file, out, 'The test entry was still found!')
+
+ def test_vgp_openssh_list(self):
+ lp = LoadParm()
+ lp.load(os.environ['SERVERCONFFILE'])
+ local_path = lp.get('path', 'sysvol')
+ vgp_xml = os.path.join(local_path, lp.get('realm').lower(), 'Policies',
+ self.gpo_guid, 'Machine/VGP/VTLA/SshCfg',
+ 'SshD/manifest.xml')
+
+ stage = etree.Element('vgppolicy')
+ policysetting = etree.SubElement(stage, 'policysetting')
+ pv = etree.SubElement(policysetting, 'version')
+ pv.text = '1'
+ name = etree.SubElement(policysetting, 'name')
+ name.text = 'Configuration File'
+ description = etree.SubElement(policysetting, 'description')
+ description.text = 'Represents Unix configuration file settings'
+ apply_mode = etree.SubElement(policysetting, 'apply_mode')
+ apply_mode.text = 'merge'
+ data = etree.SubElement(policysetting, 'data')
+ configfile = etree.SubElement(data, 'configfile')
+ etree.SubElement(configfile, 'filename')
+ configsection = etree.SubElement(configfile, 'configsection')
+ etree.SubElement(configsection, 'sectionname')
+ opt = etree.SubElement(configsection, 'keyvaluepair')
+ key = etree.SubElement(opt, 'key')
+ key.text = 'KerberosAuthentication'
+ value = etree.SubElement(opt, 'value')
+ value.text = 'Yes'
+ ret = stage_file(vgp_xml, etree.tostring(stage, 'utf-8'))
+ self.assertTrue(ret, 'Could not create the target %s' % vgp_xml)
+
+ openssh = 'KerberosAuthentication Yes'
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage",
+ "openssh", "list"),
+ self.gpo_guid, "-H",
+ "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertIn(openssh, out, 'The test entry was not found!')
+
+ # Unstage the manifest.xml file
+ unstage_file(vgp_xml)
+
+ def test_vgp_openssh_set(self):
+ before_vers = gpt_ini_version(self.gpo_guid)
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage",
+ "openssh", "set"),
+ self.gpo_guid,
+ "KerberosAuthentication",
+ "Yes", "-H",
+ "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, 'OpenSSH set failed')
+ after_vers = gpt_ini_version(self.gpo_guid)
+ self.assertGreater(after_vers, before_vers, 'GPT.INI was not updated')
+
+ openssh = 'KerberosAuthentication Yes'
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage",
+ "openssh", "list"),
+ self.gpo_guid, "-H",
+ "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertIn(openssh, out, 'The test entry was not found!')
+
+ before_vers = after_vers
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage",
+ "openssh", "set"),
+ self.gpo_guid,
+ "KerberosAuthentication", "-H",
+ "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, 'OpenSSH unset failed')
+ after_vers = gpt_ini_version(self.gpo_guid)
+ self.assertGreater(after_vers, before_vers, 'GPT.INI was not updated')
+
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage",
+ "openssh", "list"),
+ self.gpo_guid, "-H",
+ "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertNotIn(openssh, out, 'The test entry was still found!')
+
+ def test_startup_script_add(self):
+ lp = LoadParm()
+ fname = None
+ before_vers = gpt_ini_version(self.gpo_guid)
+ with NamedTemporaryFile() as f:
+ fname = os.path.basename(f.name)
+ f.write(b'#!/bin/sh\necho $@ hello world')
+ f.flush()
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage",
+ "scripts", "startup",
+ "add"), self.gpo_guid,
+ f.name, "'-n'", "-H",
+ "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, 'Script add failed')
+ after_vers = gpt_ini_version(self.gpo_guid)
+ self.assertGreater(after_vers, before_vers, 'GPT.INI was not updated')
+
+ script_path = '\\'.join(['\\', lp.get('realm').lower(), 'Policies',
+ self.gpo_guid, 'MACHINE\\VGP\\VTLA\\Unix',
+ 'Scripts\\Startup', fname])
+ entry = '@reboot root %s -n' % script_path
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage", "scripts",
+ "startup", "list"),
+ self.gpo_guid, "-H",
+ "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertIn(entry, out, 'The test entry was not found!')
+ local_path = lp.get('path', 'sysvol')
+ local_script_path = os.path.join(local_path, lp.get('realm').lower(),
+ 'Policies', self.gpo_guid,
+ 'Machine/VGP/VTLA/Unix',
+ 'Scripts/Startup', fname)
+ self.assertTrue(os.path.exists(local_script_path),
+ 'The test script was not uploaded to the sysvol')
+
+ before_vers = after_vers
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage",
+ "scripts", "startup",
+ "remove"), self.gpo_guid,
+ f.name, "-H",
+ "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, 'Script remove failed')
+ after_vers = gpt_ini_version(self.gpo_guid)
+ self.assertGreater(after_vers, before_vers, 'GPT.INI was not updated')
+
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage", "scripts",
+ "startup", "list"),
+ self.gpo_guid, "-H",
+ "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertNotIn(entry, out, 'The test entry was still found!')
+
+ def test_startup_script_list(self):
+ lp = LoadParm()
+ lp.load(os.environ['SERVERCONFFILE'])
+ local_path = lp.get('path', 'sysvol')
+ vgp_xml = os.path.join(local_path, lp.get('realm').lower(), 'Policies',
+ self.gpo_guid, 'Machine/VGP/VTLA/Unix',
+ 'Scripts/Startup/manifest.xml')
+ stage = etree.Element('vgppolicy')
+ policysetting = etree.SubElement(stage, 'policysetting')
+ pv = etree.SubElement(policysetting, 'version')
+ pv.text = '1'
+ name = etree.SubElement(policysetting, 'name')
+ name.text = 'Unix Scripts'
+ description = etree.SubElement(policysetting, 'description')
+ description.text = 'Represents Unix scripts to run on Group Policy clients'
+ data = etree.SubElement(policysetting, 'data')
+ listelement = etree.SubElement(data, 'listelement')
+ script = etree.SubElement(listelement, 'script')
+ script.text = 'test.sh'
+ parameters = etree.SubElement(listelement, 'parameters')
+ parameters.text = '-e'
+ ret = stage_file(vgp_xml, etree.tostring(stage, 'utf-8'))
+ self.assertTrue(ret, 'Could not create the target %s' % vgp_xml)
+
+ script_path = '\\'.join(['\\', lp.get('realm').lower(), 'Policies',
+ self.gpo_guid, 'MACHINE\\VGP\\VTLA\\Unix',
+ 'Scripts\\Startup', script.text])
+ entry = '@reboot root %s %s' % (script_path, parameters.text)
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage", "scripts",
+ "startup", "list"),
+ self.gpo_guid, "-H",
+ "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertIn(entry, out, 'The test entry was not found!')
+
+ # Unstage the manifest.xml file
+ unstage_file(vgp_xml)
+
+ def test_vgp_motd_set(self):
+ text = 'This is the message of the day'
+ msg = '"%s\n"' % text
+ before_vers = gpt_ini_version(self.gpo_guid)
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage",
+ "motd", "set"),
+ self.gpo_guid,
+ msg, "-H",
+ "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, 'MOTD set failed')
+ after_vers = gpt_ini_version(self.gpo_guid)
+ self.assertGreater(after_vers, before_vers, 'GPT.INI was not updated')
+
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage",
+ "motd", "list"),
+ self.gpo_guid, "-H",
+ "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertIn(text, out, 'The test entry was not found!')
+
+ before_vers = after_vers
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage",
+ "motd", "set"),
+ self.gpo_guid, "-H",
+ "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, 'MOTD unset failed')
+ after_vers = gpt_ini_version(self.gpo_guid)
+ self.assertGreater(after_vers, before_vers, 'GPT.INI was not updated')
+
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage",
+ "motd", "list"),
+ self.gpo_guid, "-H",
+ "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertNotIn(text, out, 'The test entry was still found!')
+
+ def test_vgp_motd(self):
+ lp = LoadParm()
+ lp.load(os.environ['SERVERCONFFILE'])
+ local_path = lp.get('path', 'sysvol')
+ vgp_xml = os.path.join(local_path, lp.get('realm').lower(), 'Policies',
+ self.gpo_guid, 'Machine/VGP/VTLA/Unix',
+ 'MOTD/manifest.xml')
+
+ stage = etree.Element('vgppolicy')
+ policysetting = etree.SubElement(stage, 'policysetting')
+ pv = etree.SubElement(policysetting, 'version')
+ pv.text = '1'
+ name = etree.SubElement(policysetting, 'name')
+ name.text = 'Text File'
+ description = etree.SubElement(policysetting, 'description')
+ description.text = 'Represents a Generic Text File'
+ apply_mode = etree.SubElement(policysetting, 'apply_mode')
+ apply_mode.text = 'replace'
+ data = etree.SubElement(policysetting, 'data')
+ filename = etree.SubElement(data, 'filename')
+ filename.text = 'motd'
+ text = etree.SubElement(data, 'text')
+ text.text = 'This is a message of the day'
+ ret = stage_file(vgp_xml, etree.tostring(stage, 'utf-8'))
+ self.assertTrue(ret, 'Could not create the target %s' % vgp_xml)
+
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage",
+ "motd", "list"),
+ self.gpo_guid, "-H",
+ "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertIn(text.text, out, 'The test entry was not found!')
+
+ # Unstage the manifest.xml file
+ unstage_file(vgp_xml)
+
+ def test_vgp_issue_list(self):
+ lp = LoadParm()
+ lp.load(os.environ['SERVERCONFFILE'])
+ local_path = lp.get('path', 'sysvol')
+ vgp_xml = os.path.join(local_path, lp.get('realm').lower(), 'Policies',
+ self.gpo_guid, 'Machine/VGP/VTLA/Unix',
+ 'Issue/manifest.xml')
+
+ stage = etree.Element('vgppolicy')
+ policysetting = etree.SubElement(stage, 'policysetting')
+ pv = etree.SubElement(policysetting, 'version')
+ pv.text = '1'
+ name = etree.SubElement(policysetting, 'name')
+ name.text = 'Text File'
+ description = etree.SubElement(policysetting, 'description')
+ description.text = 'Represents a Generic Text File'
+ apply_mode = etree.SubElement(policysetting, 'apply_mode')
+ apply_mode.text = 'replace'
+ data = etree.SubElement(policysetting, 'data')
+ filename = etree.SubElement(data, 'filename')
+ filename.text = 'issue'
+ text = etree.SubElement(data, 'text')
+ text.text = 'Welcome to Samba!'
+ ret = stage_file(vgp_xml, etree.tostring(stage, 'utf-8'))
+ self.assertTrue(ret, 'Could not create the target %s' % vgp_xml)
+
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage",
+ "issue", "list"),
+ self.gpo_guid, "-H",
+ "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertIn(text.text, out, 'The test entry was not found!')
+
+ # Unstage the manifest.xml file
+ unstage_file(vgp_xml)
+
+ def test_vgp_issue_set(self):
+ text = 'Welcome to Samba!'
+ msg = '"%s\n"' % text
+ before_vers = gpt_ini_version(self.gpo_guid)
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage",
+ "issue", "set"),
+ self.gpo_guid,
+ msg, "-H",
+ "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, 'Issue set failed')
+ after_vers = gpt_ini_version(self.gpo_guid)
+ self.assertGreater(after_vers, before_vers, 'GPT.INI was not updated')
+
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage",
+ "issue", "list"),
+ self.gpo_guid, "-H",
+ "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertIn(text, out, 'The test entry was not found!')
+
+ before_vers = after_vers
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage",
+ "issue", "set"),
+ self.gpo_guid, "-H",
+ "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, 'Issue unset failed')
+ after_vers = gpt_ini_version(self.gpo_guid)
+ self.assertGreater(after_vers, before_vers, 'GPT.INI was not updated')
+
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage",
+ "issue", "list"),
+ self.gpo_guid, "-H",
+ "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertNotIn(text, out, 'The test entry was still found!')
+
+ def test_load_show_remove(self):
+ before_vers = gpt_ini_version(self.gpo_guid)
+ with NamedTemporaryFile() as f:
+ f.write(gpo_load_json)
+ f.flush()
+ (result, out, err) = self.runsubcmd("gpo", "load",
+ self.gpo_guid,
+ "--content=%s" % f.name,
+ "--machine-ext-name=%s" %
+ ext_guids[0],
+ "--user-ext-name=%s" %
+ ext_guids[1],
+ "-H", "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, 'Loading policy failed')
+ after_vers = gpt_ini_version(self.gpo_guid)
+ self.assertGreater(after_vers, before_vers, 'GPT.INI was not updated')
+
+ before_vers = after_vers
+ # Write the default registry extension
+ with NamedTemporaryFile() as f:
+ f.write(b'[]') # Intentionally empty policy
+ f.flush()
+ # Load an empty policy, taking the default client extension
+ (result, out, err) = self.runsubcmd("gpo", "load",
+ self.gpo_guid,
+ "--content=%s" % f.name,
+ "-H", "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, 'Loading policy failed')
+ after_vers = gpt_ini_version(self.gpo_guid)
+ self.assertEqual(after_vers, before_vers,
+ 'GPT.INI changed on empty merge')
+
+ (result, out, err) = self.runsubcmd("gpo", "show", self.gpo_guid, "-H",
+ "ldap://%s" % os.environ["SERVER"])
+ self.assertCmdSuccess(result, out, err, 'Failed to fetch gpos')
+ self.assertIn('homepage', out, 'Homepage policy not loaded')
+ self.assertIn('samba.org', out, 'Homepage policy not loaded')
+ self.assertIn(ext_guids[0], out, 'Machine extension not loaded')
+ self.assertIn(ext_guids[1], out, 'User extension not loaded')
+ self.assertIn('{35378eac-683f-11d2-a89a-00c04fbbcfa2}', out,
+ 'Default extension not loaded')
+ toolbar_data = '"valuename": "IEToolbar",\n "class": "USER",' + \
+ '\n "type": "REG_BINARY",' + \
+ '\n "data": [\n 0\n ]'
+ self.assertIn(toolbar_data, out, 'Toolbar policy not loaded')
+ restrict_data = '"valuename": "RestrictImplicitTextCollection",' + \
+ '\n "class": "USER",' + \
+ '\n "type": "REG_DWORD",\n "data": 1\n'
+ self.assertIn(restrict_data, out, 'Restrict policy not loaded')
+ ext_data = '" \\"key\\": \\"value\\"",'
+ self.assertIn(ext_data, out, 'Extension policy not loaded')
+
+ before_vers = after_vers
+ with NamedTemporaryFile() as f:
+ f.write(gpo_remove_json)
+ f.flush()
+ (result, out, err) = self.runsubcmd("gpo", "remove",
+ self.gpo_guid,
+ "--content=%s" % f.name,
+ "--machine-ext-name=%s" %
+ ext_guids[0],
+ "--user-ext-name=%s" %
+ ext_guids[1],
+ "-H", "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, 'Removing policy failed')
+ after_vers = gpt_ini_version(self.gpo_guid)
+ self.assertGreater(after_vers, before_vers, 'GPT.INI was not updated')
+
+ (result, out, err) = self.runsubcmd("gpo", "show", self.gpo_guid, "-H",
+ "ldap://%s" % os.environ["SERVER"])
+ self.assertCmdSuccess(result, out, err, 'Failed to fetch gpos')
+ self.assertNotIn('samba.org', out, 'Homepage policy not removed')
+ self.assertNotIn(ext_guids[0], out, 'Machine extension not unloaded')
+ self.assertNotIn(ext_guids[1], out, 'User extension not unloaded')
+
+ def test_cse_register_unregister_list(self):
+ with NamedTemporaryFile() as f:
+ (result, out, err) = self.runsublevelcmd("gpo", ("cse",
+ "register"),
+ f.name, 'gp_test_ext',
+ '--machine')
+ self.assertCmdSuccess(result, out, err, 'CSE register failed')
+
+ (result, out, err) = self.runsublevelcmd("gpo", ("cse",
+ "list"))
+ self.assertIn(f.name, out, 'The test cse was not found')
+ self.assertIn('ProcessGroupPolicy : gp_test_ext', out,
+ 'The test cse was not found')
+ self.assertIn('MachinePolicy : True', out,
+ 'The test cse was not enabled')
+ self.assertIn('UserPolicy : False', out,
+ 'The test cse should not have User policy enabled')
+ cse_ext = re.findall(r'^UniqueGUID\s+:\s+(.*)', out)
+ self.assertEqual(len(cse_ext), 1,
+ 'The test cse GUID was not found')
+ cse_ext = cse_ext[0]
+ self.assertTrue(check_guid(cse_ext),
+ 'The test cse GUID was not formatted correctly')
+
+ (result, out, err) = self.runsublevelcmd("gpo", ("cse",
+ "unregister"),
+ cse_ext)
+ self.assertCmdSuccess(result, out, err, 'CSE unregister failed')
+
+ (result, out, err) = self.runsublevelcmd("gpo", ("cse",
+ "list"))
+ self.assertNotIn(f.name, out, 'The test cse was still found')
+
+ def setUp(self):
+ """set up a temporary GPO to work with"""
+ super().setUp()
+ (result, out, err) = self.runsubcmd("gpo", "create", self.gpo_name,
+ "-H", "ldap://%s" % os.environ["SERVER"],
+ "-U%s%%%s" % (os.environ["USERNAME"], os.environ["PASSWORD"]),
+ "--tmpdir", self.tempdir)
+ self.assertCmdSuccess(result, out, err, "Ensuring gpo created successfully")
+ shutil.rmtree(os.path.join(self.tempdir, "policy"))
+ try:
+ self.gpo_guid = "{%s}" % out.split("{")[1].split("}")[0]
+ except IndexError:
+ self.fail("Failed to find GUID in output: %s" % out)
+
+ self.backup_path = os.path.join(samba.source_tree_topdir(), 'source4',
+ 'selftest', 'provisions',
+ 'generalized-gpo-backup')
+
+ self.entity_file = os.path.join(self.backup_path, 'entities')
+
+ def tearDown(self):
+ """remove the temporary GPO to work with"""
+ (result, out, err) = self.runsubcmd("gpo", "del", self.gpo_guid, "-H", "ldap://%s" % os.environ["SERVER"], "-U%s%%%s" % (os.environ["USERNAME"], os.environ["PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, "Ensuring gpo deleted successfully")
+ super().tearDown()
diff --git a/python/samba/tests/samba_tool/gpo_exts.py b/python/samba/tests/samba_tool/gpo_exts.py
new file mode 100644
index 0000000..e7a24b0
--- /dev/null
+++ b/python/samba/tests/samba_tool/gpo_exts.py
@@ -0,0 +1,202 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) David Mulder 2021
+#
+# based on gpo.py:
+# Copyright (C) Andrew Bartlett 2012
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import os
+from samba.tests.samba_tool.base import SambaToolCmdTest
+import shutil
+from samba.param import LoadParm
+from samba.tests.gpo import stage_file, unstage_file
+import xml.etree.ElementTree as etree
+
+class GpoCmdTestCase(SambaToolCmdTest):
+ """Tests for samba-tool time subcommands"""
+
+ gpo_name = "testgpo"
+
+ def test_vgp_access_list(self):
+ lp = LoadParm()
+ lp.load(os.environ['SERVERCONFFILE'])
+ local_path = lp.get('path', 'sysvol')
+ vgp_xml = os.path.join(local_path, lp.get('realm').lower(), 'Policies',
+ self.gpo_guid, 'Machine/VGP/VTLA/VAS',
+ 'HostAccessControl/Allow/manifest.xml')
+
+ stage = etree.Element('vgppolicy')
+ policysetting = etree.SubElement(stage, 'policysetting')
+ pv = etree.SubElement(policysetting, 'version')
+ pv.text = '1'
+ name = etree.SubElement(policysetting, 'name')
+ name.text = 'Host Access Control'
+ description = etree.SubElement(policysetting, 'description')
+ description.text = 'Represents host access control data (pam_access)'
+ apply_mode = etree.SubElement(policysetting, 'apply_mode')
+ apply_mode.text = 'merge'
+ data = etree.SubElement(policysetting, 'data')
+ listelement = etree.SubElement(data, 'listelement')
+ etype = etree.SubElement(listelement, 'type')
+ etype.text = 'USER'
+ entry = etree.SubElement(listelement, 'entry')
+ entry.text = 'goodguy@%s' % lp.get('realm').lower()
+ adobject = etree.SubElement(listelement, 'adobject')
+ name = etree.SubElement(adobject, 'name')
+ name.text = 'goodguy'
+ domain = etree.SubElement(adobject, 'domain')
+ domain.text = lp.get('realm').lower()
+ etype = etree.SubElement(adobject, 'type')
+ etype.text = 'user'
+ groupattr = etree.SubElement(data, 'groupattr')
+ groupattr.text = 'samAccountName'
+ listelement = etree.SubElement(data, 'listelement')
+ etype = etree.SubElement(listelement, 'type')
+ etype.text = 'GROUP'
+ entry = etree.SubElement(listelement, 'entry')
+ entry.text = '%s\\goodguys' % lp.get('realm').lower()
+ adobject = etree.SubElement(listelement, 'adobject')
+ name = etree.SubElement(adobject, 'name')
+ name.text = 'goodguys'
+ domain = etree.SubElement(adobject, 'domain')
+ domain.text = lp.get('realm').lower()
+ etype = etree.SubElement(adobject, 'type')
+ etype.text = 'group'
+ ret = stage_file(vgp_xml, etree.tostring(stage, 'utf-8'))
+ self.assertTrue(ret, 'Could not create the target %s' % vgp_xml)
+
+ uentry = '+:%s\\goodguy:ALL' % domain.text
+ gentry = '+:%s\\goodguys:ALL' % domain.text
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage",
+ "access", "list"),
+ self.gpo_guid, "-H",
+ "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertIn(uentry, out, 'The test entry was not found!')
+ self.assertIn(gentry, out, 'The test entry was not found!')
+
+ # Unstage the manifest.xml file
+ unstage_file(vgp_xml)
+
+ def test_vgp_access_add(self):
+ lp = LoadParm()
+ lp.load(os.environ['SERVERCONFFILE'])
+
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage",
+ "access", "add"),
+ self.gpo_guid,
+ "allow", self.test_user,
+ lp.get('realm').lower(),
+ "-H", "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, 'Access add failed')
+
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage",
+ "access", "add"),
+ self.gpo_guid,
+ "deny", self.test_group,
+ lp.get('realm').lower(),
+ "-H", "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, 'Access add failed')
+
+ allow_entry = '+:%s\\%s:ALL' % (lp.get('realm').lower(), self.test_user)
+ deny_entry = '-:%s\\%s:ALL' % (lp.get('realm').lower(), self.test_group)
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage",
+ "access", "list"),
+ self.gpo_guid, "-H",
+ "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertIn(allow_entry, out, 'The test entry was not found!')
+ self.assertIn(deny_entry, out, 'The test entry was not found!')
+
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage",
+ "access", "remove"),
+ self.gpo_guid,
+ "allow", self.test_user,
+ lp.get('realm').lower(),
+ "-H", "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, 'Access remove failed')
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage",
+ "access", "remove"),
+ self.gpo_guid,
+ "deny", self.test_group,
+ lp.get('realm').lower(),
+ "-H", "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, 'Access remove failed')
+
+ (result, out, err) = self.runsublevelcmd("gpo", ("manage",
+ "access", "list"),
+ self.gpo_guid, "-H",
+ "ldap://%s" %
+ os.environ["SERVER"],
+ "-U%s%%%s" %
+ (os.environ["USERNAME"],
+ os.environ["PASSWORD"]))
+ self.assertNotIn(allow_entry, out, 'The test entry was still found!')
+ self.assertNotIn(deny_entry, out, 'The test entry was still found!')
+
+ def setUp(self):
+ """set up a temporary GPO to work with"""
+ super().setUp()
+ (result, out, err) = self.runsubcmd("gpo", "create", self.gpo_name,
+ "-H", "ldap://%s" % os.environ["SERVER"],
+ "-U%s%%%s" % (os.environ["USERNAME"], os.environ["PASSWORD"]),
+ "--tmpdir", self.tempdir)
+ self.assertCmdSuccess(result, out, err, "Ensuring gpo created successfully")
+ shutil.rmtree(os.path.join(self.tempdir, "policy"))
+ try:
+ self.gpo_guid = "{%s}" % out.split("{")[1].split("}")[0]
+ except IndexError:
+ self.fail("Failed to find GUID in output: %s" % out)
+
+ self.test_user = 'testuser'
+ (result, out, err) = self.runsubcmd("user", "add", self.test_user,
+ "--random-password")
+ self.assertCmdSuccess(result, out, err, 'User creation failed')
+ self.test_group = 'testgroup'
+ (result, out, err) = self.runsubcmd("group", "add", self.test_group)
+ self.assertCmdSuccess(result, out, err, 'Group creation failed')
+
+ def tearDown(self):
+ """remove the temporary GPO to work with"""
+ (result, out, err) = self.runsubcmd("gpo", "del", self.gpo_guid, "-H", "ldap://%s" % os.environ["SERVER"], "-U%s%%%s" % (os.environ["USERNAME"], os.environ["PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, "Ensuring gpo deleted successfully")
+ (result, out, err) = self.runsubcmd("user", "delete", self.test_user)
+ self.assertCmdSuccess(result, out, err, 'User delete failed')
+ (result, out, err) = self.runsubcmd("group", "delete", self.test_group)
+ self.assertCmdSuccess(result, out, err, 'Group delete failed')
+ super().tearDown()
diff --git a/python/samba/tests/samba_tool/group.py b/python/samba/tests/samba_tool/group.py
new file mode 100644
index 0000000..e7a660c
--- /dev/null
+++ b/python/samba/tests/samba_tool/group.py
@@ -0,0 +1,613 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Michael Adam 2012
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import os
+import time
+import ldb
+from samba.tests.samba_tool.base import SambaToolCmdTest
+from samba import (
+ nttime2unix,
+ dsdb
+ )
+
+
+class GroupCmdTestCase(SambaToolCmdTest):
+ """Tests for samba-tool group subcommands"""
+ groups = []
+ samdb = None
+
+ def setUp(self):
+ super().setUp()
+ self.samdb = self.getSamDB("-H", "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"], os.environ["DC_PASSWORD"]))
+ self.groups = []
+ self.groups.append(self._randomGroup({"name": "testgroup1"}))
+ self.groups.append(self._randomGroup({"name": "testgroup2"}))
+ self.groups.append(self._randomGroup({"name": "testgroup3"}))
+ self.groups.append(self._randomGroup({"name": "testgroup4"}))
+ self.groups.append(self._randomGroup({"name": "testgroup5 (with brackets)"}))
+ self.groups.append(self._randomPosixGroup({"name": "posixgroup1"}))
+ self.groups.append(self._randomPosixGroup({"name": "posixgroup2"}))
+ self.groups.append(self._randomPosixGroup({"name": "posixgroup3"}))
+ self.groups.append(self._randomPosixGroup({"name": "posixgroup4"}))
+ self.groups.append(self._randomPosixGroup({"name": "posixgroup5 (with brackets)"}))
+ self.groups.append(self._randomUnixGroup({"name": "unixgroup1"}))
+ self.groups.append(self._randomUnixGroup({"name": "unixgroup2"}))
+ self.groups.append(self._randomUnixGroup({"name": "unixgroup3"}))
+ self.groups.append(self._randomUnixGroup({"name": "unixgroup4"}))
+ self.groups.append(self._randomUnixGroup({"name": "unixgroup5 (with brackets)"}))
+
+ # setup the 12 groups and ensure they are correct
+ for group in self.groups:
+ (result, out, err) = group["createGroupFn"](group)
+
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "There shouldn't be any error message")
+
+ if 'unix' in group["name"]:
+ self.assertIn("Modified Group '%s' successfully"
+ % group["name"], out)
+ else:
+ self.assertIn("Added group %s" % group["name"], out)
+
+ group["checkGroupFn"](group)
+
+ found = self._find_group(group["name"])
+
+ self.assertIsNotNone(found)
+
+ self.assertEqual("%s" % found.get("name"), group["name"])
+ self.assertEqual("%s" % found.get("description"), group["description"])
+
+ def tearDown(self):
+ super().tearDown()
+ # clean up all the left over groups, just in case
+ for group in self.groups:
+ if self._find_group(group["name"]):
+ self.runsubcmd("group", "delete", group["name"])
+
+ def test_newgroup(self):
+ """This tests the "group add" and "group delete" commands"""
+ # try to add all the groups again, this should fail
+ for group in self.groups:
+ (result, out, err) = self._create_group(group)
+ self.assertCmdFail(result, "Succeeded to add existing group")
+ self.assertIn("LDAP error 68 LDAP_ENTRY_ALREADY_EXISTS", err)
+
+ # try to delete all the groups we just added
+ for group in self.groups:
+ (result, out, err) = self.runsubcmd("group", "delete", group["name"])
+ self.assertCmdSuccess(result, out, err,
+ "Failed to delete group '%s'" % group["name"])
+ found = self._find_group(group["name"])
+ self.assertIsNone(found,
+ "Deleted group '%s' still exists" % group["name"])
+
+ # test adding groups
+ for group in self.groups:
+ (result, out, err) = self.runsubcmd("group", "add", group["name"],
+ "--description=%s" % group["description"],
+ "-H", "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"],
+ os.environ["DC_PASSWORD"]))
+
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "There shouldn't be any error message")
+ self.assertIn("Added group %s" % group["name"], out)
+
+ found = self._find_group(group["name"])
+
+ self.assertEqual("%s" % found.get("samaccountname"),
+ "%s" % group["name"])
+
+ def test_list(self):
+ (result, out, err) = self.runsubcmd("group", "list",
+ "-H", "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"],
+ os.environ["DC_PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, "Error running list")
+
+ search_filter = "(objectClass=group)"
+
+ grouplist = self.samdb.search(base=self.samdb.domain_dn(),
+ scope=ldb.SCOPE_SUBTREE,
+ expression=search_filter,
+ attrs=["samaccountname"])
+
+ self.assertTrue(len(grouplist) > 0, "no groups found in samdb")
+
+ for groupobj in grouplist:
+ name = str(groupobj.get("samaccountname", idx=0))
+ found = self.assertMatch(out, name,
+ "group '%s' not found" % name)
+
+ def test_list_verbose(self):
+ (result, out, err) = self.runsubcmd("group", "list", "--verbose",
+ "-H", "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"],
+ os.environ["DC_PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, "Error running list --verbose")
+
+ # use the output to build a dictionary, where key=group-name,
+ # value=num-members
+ output_memberships = {}
+
+ # split the output by line, skipping the first 2 header lines
+ group_lines = out.split('\n')[2:-1]
+ for line in group_lines:
+ # split line by column whitespace (but keep the group name together
+ # if it contains spaces)
+ values = line.split(" ")
+ name = values[0]
+ num_members = int(values[-1])
+ output_memberships[name] = num_members
+
+ # build up a similar dict using an LDAP search
+ search_filter = "(objectClass=group)"
+ grouplist = self.samdb.search(base=self.samdb.domain_dn(),
+ scope=ldb.SCOPE_SUBTREE,
+ expression=search_filter,
+ attrs=["samaccountname", "member"])
+ self.assertTrue(len(grouplist) > 0, "no groups found in samdb")
+
+ ldap_memberships = {}
+ for groupobj in grouplist:
+ name = str(groupobj.get("samaccountname", idx=0))
+ num_members = len(groupobj.get("member", default=[]))
+ ldap_memberships[name] = num_members
+
+ # check the command output matches LDAP
+ self.assertTrue(output_memberships == ldap_memberships,
+ "Command output doesn't match LDAP results.\n" +
+ "Command='%s'\nLDAP='%s'" %(output_memberships,
+ ldap_memberships))
+
+ def test_list_full_dn(self):
+ (result, out, err) = self.runsubcmd("group", "list", "--full-dn",
+ "-H", "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"],
+ os.environ["DC_PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, "Error running list")
+
+ search_filter = "(objectClass=group)"
+
+ grouplist = self.samdb.search(base=self.samdb.domain_dn(),
+ scope=ldb.SCOPE_SUBTREE,
+ expression=search_filter,
+ attrs=[])
+
+ self.assertTrue(len(grouplist) > 0, "no groups found in samdb")
+
+ for groupobj in grouplist:
+ name = str(groupobj.get("dn", idx=0))
+ found = self.assertMatch(out, name,
+ "group '%s' not found" % name)
+
+ def test_list_base_dn(self):
+ base_dn = "CN=Users"
+ (result, out, err) = self.runsubcmd("group", "list", "--base-dn", base_dn,
+ "-H", "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"],
+ os.environ["DC_PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, "Error running list")
+
+ search_filter = "(objectClass=group)"
+
+ grouplist = self.samdb.search(base=self.samdb.normalize_dn_in_domain(base_dn),
+ scope=ldb.SCOPE_SUBTREE,
+ expression=search_filter,
+ attrs=["name"])
+
+ self.assertTrue(len(grouplist) > 0, "no groups found in samdb")
+
+ for groupobj in grouplist:
+ name = str(groupobj.get("name", idx=0))
+ found = self.assertMatch(out, name,
+ "group '%s' not found" % name)
+
+ def test_listmembers(self):
+ (result, out, err) = self.runsubcmd("group", "listmembers", "Domain Users",
+ "-H", "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"],
+ os.environ["DC_PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, "Error running listmembers")
+
+ search_filter = "(|(primaryGroupID=513)(memberOf=CN=Domain Users,CN=Users,%s))" % self.samdb.domain_dn()
+
+ grouplist = self.samdb.search(base=self.samdb.domain_dn(),
+ scope=ldb.SCOPE_SUBTREE,
+ expression=search_filter,
+ attrs=["samAccountName"])
+
+ self.assertTrue(len(grouplist) > 0, "no groups found in samdb")
+
+ for groupobj in grouplist:
+ name = str(groupobj.get("samAccountName", idx=0))
+ found = self.assertMatch(out, name, "group '%s' not found" % name)
+
+ def test_listmembers_hide_expired(self):
+ expire_username = "expireUser"
+ expire_user = self._random_user({"name": expire_username})
+ self._create_user(expire_user)
+
+ (result, out, err) = self.runsubcmd(
+ "group",
+ "listmembers",
+ "Domain Users",
+ "--hide-expired",
+ "-H",
+ "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"],
+ os.environ["DC_PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, "Error running listmembers")
+ self.assertTrue(expire_username in out,
+ "user '%s' not found" % expire_username)
+
+ # user will be expired one second ago
+ self.samdb.setexpiry(
+ "(sAMAccountname=%s)" % expire_username,
+ -1,
+ False)
+
+ (result, out, err) = self.runsubcmd(
+ "group",
+ "listmembers",
+ "Domain Users",
+ "--hide-expired",
+ "-H",
+ "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"],
+ os.environ["DC_PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, "Error running listmembers")
+ self.assertFalse(expire_username in out,
+ "user '%s' not found" % expire_username)
+
+ self.samdb.deleteuser(expire_username)
+
+ def test_listmembers_hide_disabled(self):
+ disable_username = "disableUser"
+ disable_user = self._random_user({"name": disable_username})
+ self._create_user(disable_user)
+
+ (result, out, err) = self.runsubcmd(
+ "group",
+ "listmembers",
+ "Domain Users",
+ "--hide-disabled",
+ "-H",
+ "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"],
+ os.environ["DC_PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, "Error running listmembers")
+ self.assertTrue(disable_username in out,
+ "user '%s' not found" % disable_username)
+
+ self.samdb.disable_account("(sAMAccountname=%s)" % disable_username)
+
+ (result, out, err) = self.runsubcmd(
+ "group",
+ "listmembers",
+ "Domain Users",
+ "--hide-disabled",
+ "-H",
+ "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"],
+ os.environ["DC_PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, "Error running listmembers")
+ self.assertFalse(disable_username in out,
+ "user '%s' not found" % disable_username)
+
+ self.samdb.deleteuser(disable_username)
+
+ def test_listmembers_full_dn(self):
+ (result, out, err) = self.runsubcmd("group", "listmembers", "Domain Users",
+ "--full-dn",
+ "-H", "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"],
+ os.environ["DC_PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, "Error running listmembers")
+
+ search_filter = "(|(primaryGroupID=513)(memberOf=CN=Domain Users,CN=Users,%s))" % self.samdb.domain_dn()
+
+ grouplist = self.samdb.search(base=self.samdb.domain_dn(),
+ scope=ldb.SCOPE_SUBTREE,
+ expression=search_filter,
+ attrs=["dn"])
+
+ self.assertTrue(len(grouplist) > 0, "no groups found in samdb")
+
+ for groupobj in grouplist:
+ name = str(groupobj.get("dn", idx=0))
+ found = self.assertMatch(out, name, "group '%s' not found" % name)
+
+
+ def test_move(self):
+ full_ou_dn = str(self.samdb.normalize_dn_in_domain("OU=movetest_grp"))
+ self.addCleanup(self.samdb.delete, full_ou_dn, ["tree_delete:1"])
+
+ (result, out, err) = self.runsubcmd("ou", "add", full_ou_dn)
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "There shouldn't be any error message")
+ self.assertIn('Added ou "%s"' % full_ou_dn, out)
+
+ for group in self.groups:
+ (result, out, err) = self.runsubcmd(
+ "group", "move", group["name"], full_ou_dn)
+ self.assertCmdSuccess(result, out, err, "Error running move")
+ self.assertIn('Moved group "%s" into "%s"' %
+ (group["name"], full_ou_dn), out)
+
+ # Should fail as groups objects are in OU
+ (result, out, err) = self.runsubcmd("ou", "delete", full_ou_dn)
+ self.assertCmdFail(result)
+ self.assertIn(("subtree_delete: Unable to delete a non-leaf node "
+ "(it has %d children)!") % len(self.groups), err)
+
+ for group in self.groups:
+ new_dn = "CN=Users,%s" % self.samdb.domain_dn()
+ (result, out, err) = self.runsubcmd(
+ "group", "move", group["name"], new_dn)
+ self.assertCmdSuccess(result, out, err, "Error running move")
+ self.assertIn('Moved group "%s" into "%s"' %
+ (group["name"], new_dn), out)
+
+ def test_show(self):
+ """Assert that we can show a group correctly."""
+ (result, out, err) = self.runsubcmd("group", "show", "Domain Users",
+ "-H", "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"],
+ os.environ["DC_PASSWORD"]))
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ self.assertIn("dn: CN=Domain Users,CN=Users,DC=addom,DC=samba,DC=example,DC=com", out)
+
+ def test_rename_samaccountname(self):
+ """rename the samaccountname of all groups"""
+ for group in self.groups:
+ new_name = "new_samaccountname_of_" + group["name"]
+
+ # change samaccountname
+ (result, out, err) = self.runsubcmd("group", "rename", group["name"],
+ "--samaccountname=" + new_name)
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ self.assertIn('successfully', out)
+
+ found = self._find_group(new_name)
+ self.assertEqual("%s" % found.get("description"), group["description"])
+ if not "cn" in group or str(group["cn"]) == str(group["name"]):
+ self.assertEqual("%s" % found.get("cn"), new_name)
+ else:
+ self.assertEqual("%s" % found.get("cn"), group["cn"])
+
+ # trying to remove the samaccountname throws an error
+ (result, out, err) = self.runsubcmd("group", "rename", new_name,
+ "--samaccountname=")
+ self.assertCmdFail(result)
+ self.assertIn('Failed to rename group', err)
+ self.assertIn('delete protected attribute', err)
+
+ # reset changes
+ (result, out, err) = self.runsubcmd("group", "rename", new_name,
+ "--samaccountname=" + group["name"])
+ self.assertCmdSuccess(result, out, err)
+ if "cn" in group:
+ (result, out, err) = self.runsubcmd("group", "rename", group["name"],
+ "--force-new-cn=%s" % group["cn"])
+ self.assertCmdSuccess(result, out, err)
+
+ def test_rename_cn_mail(self):
+ """change and remove the cn and mail attributes of all groups"""
+ for group in self.groups:
+ new_mail = "new mail of " + group["name"]
+ new_cn = "new cn of " + group["name"]
+
+ # change attributes
+ (result, out, err) = self.runsubcmd("group", "rename", group["name"],
+ "--mail-address=" + new_mail,
+ "--force-new-cn=" + new_cn)
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ self.assertIn('successfully', out)
+
+ found = self._find_group(group["name"])
+ self.assertEqual("%s" % found.get("mail"), new_mail)
+ self.assertEqual("%s" % found.get("cn"), new_cn)
+
+ # remove mail
+ (result, out, err) = self.runsubcmd("group", "rename", group["name"],
+ "--mail-address=")
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ self.assertIn('successfully', out)
+
+ found = self._find_group(group["name"])
+ self.assertEqual(found.get("mail"), None)
+
+ # trying to remove cn (throws an error)
+ (result, out, err) = self.runsubcmd("group", "rename", group["name"],
+ "--force-new-cn=")
+ self.assertCmdFail(result)
+ self.assertIn("Failed to rename group", err)
+ self.assertIn("delete protected attribute", err)
+
+ # reset CN (mail is already empty)
+ (result, out, err) = self.runsubcmd("group", "rename", group["name"],
+ "--reset-cn")
+ self.assertCmdSuccess(result, out, err)
+
+ def _randomGroup(self, base={}):
+ """create a group with random attribute values, you can specify base
+ attributes"""
+ group = {
+ "name": self.randomName(),
+ "description": self.randomName(count=100),
+ "createGroupFn": self._create_group,
+ "checkGroupFn": self._check_group,
+ }
+ group.update(base)
+ return group
+
+ def _randomPosixGroup(self, base={}):
+ """create a group with random attribute values and additional RFC2307
+ attributes, you can specify base attributes"""
+ group = self._randomGroup({})
+ group.update(base)
+ posixAttributes = {
+ "unixdomain": self.randomName(),
+ "gidNumber": self.randomXid(),
+ "createGroupFn": self._create_posix_group,
+ "checkGroupFn": self._check_posix_group,
+ }
+ group.update(posixAttributes)
+ group.update(base)
+ return group
+
+ def _randomUnixGroup(self, base={}):
+ """create a group with random attribute values and additional RFC2307
+ attributes, you can specify base attributes"""
+ group = self._randomGroup({})
+ group.update(base)
+ posixAttributes = {
+ "gidNumber": self.randomXid(),
+ "createGroupFn": self._create_unix_group,
+ "checkGroupFn": self._check_unix_group,
+ }
+ group.update(posixAttributes)
+ group.update(base)
+ return group
+
+ def _check_group(self, group):
+ """ check if a group from SamDB has the same attributes as
+ its template """
+ found = self._find_group(group["name"])
+
+ self.assertEqual("%s" % found.get("name"), group["name"])
+ self.assertEqual("%s" % found.get("description"), group["description"])
+
+ def _check_posix_group(self, group):
+ """ check if a posix_group from SamDB has the same attributes as
+ its template """
+ found = self._find_group(group["name"])
+
+ self.assertEqual("%s" % found.get("gidNumber"), "%s" %
+ group["gidNumber"])
+ self._check_group(group)
+
+ def _check_unix_group(self, group):
+ """ check if a unix_group from SamDB has the same attributes as its
+template """
+ found = self._find_group(group["name"])
+
+ self.assertEqual("%s" % found.get("gidNumber"), "%s" %
+ group["gidNumber"])
+ self._check_group(group)
+
+ def _create_group(self, group):
+ return self.runsubcmd("group", "add", group["name"],
+ "--description=%s" % group["description"],
+ "-H", "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"],
+ os.environ["DC_PASSWORD"]))
+
+ def _create_posix_group(self, group):
+ """ create a new group with RFC2307 attributes """
+ return self.runsubcmd("group", "add", group["name"],
+ "--description=%s" % group["description"],
+ "--nis-domain=%s" % group["unixdomain"],
+ "--gid-number=%s" % group["gidNumber"],
+ "-H", "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"],
+ os.environ["DC_PASSWORD"]))
+
+ def _create_unix_group(self, group):
+ """ Add RFC2307 attributes to a group"""
+ self._create_group(group)
+ return self.runsubcmd("group", "addunixattrs", group["name"],
+ "%s" % group["gidNumber"],
+ "-H", "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"],
+ os.environ["DC_PASSWORD"]))
+
+ def _find_group(self, name):
+ search_filter = ("(&(sAMAccountName=%s)(objectCategory=%s,%s))" %
+ (ldb.binary_encode(name),
+ "CN=Group,CN=Schema,CN=Configuration",
+ self.samdb.domain_dn()))
+ grouplist = self.samdb.search(base=self.samdb.domain_dn(),
+ scope=ldb.SCOPE_SUBTREE,
+ expression=search_filter)
+ if grouplist:
+ return grouplist[0]
+ else:
+ return None
+
+ def test_stats(self):
+ (result, out, err) = self.runsubcmd("group", "stats",
+ "-H", "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"],
+ os.environ["DC_PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, "Error running stats")
+
+ # sanity-check the command reports 'total groups' correctly
+ search_filter = "(objectClass=group)"
+ grouplist = self.samdb.search(base=self.samdb.domain_dn(),
+ scope=ldb.SCOPE_SUBTREE,
+ expression=search_filter,
+ attrs=[])
+
+ total_groups = len(grouplist)
+ self.assertTrue("Total groups: {0}".format(total_groups) in out,
+ "Total groups not reported correctly")
+
+ def _random_user(self, base=None):
+ """
+ create a user with random attribute values, you can specify
+ base attributes
+ """
+ if base is None:
+ base = {}
+ user = {
+ "name": self.randomName(),
+ "password": self.random_password(16),
+ "surname": self.randomName(),
+ "given-name": self.randomName(),
+ "job-title": self.randomName(),
+ "department": self.randomName(),
+ "company": self.randomName(),
+ "description": self.randomName(count=100),
+ "createUserFn": self._create_user,
+ }
+ user.update(base)
+ return user
+
+ def _create_user(self, user):
+ return self.runsubcmd(
+ "user",
+ "add",
+ user["name"],
+ user["password"],
+ "--surname=%s" % user["surname"],
+ "--given-name=%s" % user["given-name"],
+ "--job-title=%s" % user["job-title"],
+ "--department=%s" % user["department"],
+ "--description=%s" % user["description"],
+ "--company=%s" % user["company"],
+ "-H",
+ "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"],
+ os.environ["DC_PASSWORD"]))
diff --git a/python/samba/tests/samba_tool/group_edit.sh b/python/samba/tests/samba_tool/group_edit.sh
new file mode 100755
index 0000000..3db2c66
--- /dev/null
+++ b/python/samba/tests/samba_tool/group_edit.sh
@@ -0,0 +1,228 @@
+#!/bin/sh
+#
+# Test for 'samba-tool group edit'
+
+if [ $# -lt 3 ]; then
+ cat <<EOF
+Usage: group_edit.sh SERVER USERNAME PASSWORD
+EOF
+ exit 1
+fi
+
+SERVER="$1"
+USERNAME="$2"
+PASSWORD="$3"
+
+samba_ldbsearch=ldbsearch
+if test -x $BINDIR/ldbsearch; then
+ samba_ldbsearch=$BINDIR/ldbsearch
+fi
+
+STpath=$(pwd)
+. $STpath/testprogs/blackbox/subunit.sh
+
+display_name="Users in Göttingen"
+display_name_b64="VXNlcnMgaW4gR8O2dHRpbmdlbg=="
+display_name_new="Users in Goettingen"
+# attribute value including control character
+# echo -e "test \a string" | base64
+display_name_con_b64="dGVzdCAHIHN0cmluZwo="
+
+tmpeditor=$(mktemp --suffix .sh -p $SELFTEST_TMPDIR samba-tool-editor-XXXXXXXX)
+chmod +x $tmpeditor
+
+create_test_group()
+{
+ $PYTHON ${STpath}/source4/scripting/bin/samba-tool \
+ group add testgroup1 \
+ -H "ldap://$SERVER" "-U$USERNAME" "--password=$PASSWORD"
+}
+
+delete_test_group()
+{
+ $PYTHON ${STpath}/source4/scripting/bin/samba-tool \
+ group delete testgroup1 \
+ -H "ldap://$SERVER" "-U$USERNAME" "--password=$PASSWORD"
+}
+
+create_test_user()
+{
+ $PYTHON ${STpath}/source4/scripting/bin/samba-tool \
+ user create testuser1 --random-password \
+ -H "ldap://$SERVER" "-U$USERNAME" "--password=$PASSWORD"
+}
+
+delete_test_user()
+{
+ $PYTHON ${STpath}/source4/scripting/bin/samba-tool \
+ user delete testuser1 \
+ -H "ldap://$SERVER" "-U$USERNAME" "--password=$PASSWORD"
+}
+
+add_member()
+{
+ user_dn=$($PYTHON ${STpath}/source4/scripting/bin/samba-tool \
+ user show testuser1 --attributes=dn \
+ -H "ldap://$SERVER" "-U$USERNAME" "--password=$PASSWORD" |
+ grep ^dn: | cut -d' ' -f2)
+
+ # create editor.sh
+ cat >$tmpeditor <<EOF
+#!/usr/bin/env bash
+group_ldif="\$1"
+
+grep -v '^$' \$group_ldif > \${group_ldif}.tmp
+echo "member: $user_dn" >> \${group_ldif}.tmp
+
+mv \${group_ldif}.tmp \$group_ldif
+EOF
+
+ $PYTHON ${STpath}/source4/scripting/bin/samba-tool \
+ group edit testgroup1 --editor=$tmpeditor \
+ -H "ldap://$SERVER" "-U$USERNAME" "--password=$PASSWORD"
+}
+
+get_member()
+{
+ $PYTHON ${STpath}/source4/scripting/bin/samba-tool \
+ group listmembers testgroup1 \
+ -H "ldap://$SERVER" "-U$USERNAME" "--password=$PASSWORD"
+}
+
+# Test edit group - add base64 attributes
+add_attribute_base64()
+{
+ # create editor.sh
+ cat >$tmpeditor <<EOF
+#!/usr/bin/env bash
+group_ldif="\$1"
+
+grep -v '^$' \$group_ldif > \${group_ldif}.tmp
+echo "displayName:: $display_name_b64" >> \${group_ldif}.tmp
+
+mv \${group_ldif}.tmp \$group_ldif
+EOF
+
+ $PYTHON ${STpath}/source4/scripting/bin/samba-tool group edit \
+ testgroup1 --editor=$tmpeditor \
+ -H "ldap://$SERVER" "-U$USERNAME" "--password=$PASSWORD"
+}
+
+get_attribute_base64()
+{
+ $samba_ldbsearch '(sAMAccountName=testgroup1)' displayName \
+ -H "ldap://$SERVER" "-U$USERNAME" "--password=$PASSWORD"
+}
+
+delete_attribute()
+{
+ # create editor.sh
+ cat >$tmpeditor <<EOF
+#!/usr/bin/env bash
+group_ldif="\$1"
+
+grep -v '^displayName' \$group_ldif >> \${group_ldif}.tmp
+mv \${group_ldif}.tmp \$group_ldif
+EOF
+ $PYTHON ${STpath}/source4/scripting/bin/samba-tool group edit \
+ testgroup1 --editor=$tmpeditor \
+ -H "ldap://$SERVER" "-U$USERNAME" "--password=$PASSWORD"
+}
+
+# Test edit group - add base64 attribute value including control character
+add_attribute_base64_control()
+{
+ # create editor.sh
+ cat >$tmpeditor <<EOF
+#!/usr/bin/env bash
+group_ldif="\$1"
+
+grep -v '^$' \$group_ldif > \${group_ldif}.tmp
+echo "displayName:: $display_name_con_b64" >> \${group_ldif}.tmp
+
+mv \${group_ldif}.tmp \$group_ldif
+EOF
+ $PYTHON ${STpath}/source4/scripting/bin/samba-tool group edit \
+ testgroup1 --editor=$tmpeditor \
+ -H "ldap://$SERVER" "-U$USERNAME" "--password=$PASSWORD"
+}
+
+get_attribute_base64_control()
+{
+ $PYTHON ${STpath}/source4/scripting/bin/samba-tool group show \
+ testgroup1 --attributes=displayName \
+ -H "ldap://$SERVER" "-U$USERNAME" "--password=$PASSWORD"
+}
+
+get_attribute_force_no_base64()
+{
+ # LDB_FLAG_FORCE_NO_BASE64_LDIF should be used here.
+ $PYTHON ${STpath}/source4/scripting/bin/samba-tool group show \
+ testgroup1 --attributes=displayName \
+ -H "ldap://$SERVER" "-U$USERNAME" "--password=$PASSWORD"
+}
+
+# Test edit group - change base64 attribute value including control character
+change_attribute_base64_control()
+{
+ # create editor.sh
+ cat >$tmpeditor <<EOF
+#!/usr/bin/env bash
+group_ldif="\$1"
+
+sed -i -e 's/displayName:: $display_name_con_b64/displayName: $display_name/' \
+ \$group_ldif
+EOF
+ $PYTHON ${STpath}/source4/scripting/bin/samba-tool group edit \
+ testgroup1 --editor=$tmpeditor \
+ -H "ldap://$SERVER" "-U$USERNAME" "--password=$PASSWORD"
+}
+
+# Test edit group - change attributes with LDB_FLAG_FORCE_NO_BASE64_LDIF
+change_attribute_force_no_base64()
+{
+ # create editor.sh
+ # Expects that the original attribute is available as clear text,
+ # because the LDB_FLAG_FORCE_NO_BASE64_LDIF should be used here.
+ cat >$tmpeditor <<EOF
+#!/usr/bin/env bash
+group_ldif="\$1"
+
+sed -i -e 's/displayName: $display_name/displayName: $display_name_new/' \
+ \$group_ldif
+EOF
+
+ $PYTHON ${STpath}/source4/scripting/bin/samba-tool group edit \
+ testgroup1 --editor=$tmpeditor \
+ -H "ldap://$SERVER" "-U$USERNAME" "--password=$PASSWORD"
+}
+
+get_changed_attribute_force_no_base64()
+{
+ $PYTHON ${STpath}/source4/scripting/bin/samba-tool group show \
+ testgroup1 --attributes=displayName \
+ -H "ldap://$SERVER" "-U$USERNAME" "--password=$PASSWORD"
+}
+
+failed=0
+
+testit "create_test_group" create_test_group || failed=$(expr $failed + 1)
+testit "create_test_user" create_test_user || failed=$(expr $failed + 1)
+testit "add_member" add_member || failed=$(expr $failed + 1)
+testit_grep "get_member" "^testuser1" get_member || failed=$(expr $failed + 1)
+testit "add_attribute_base64" add_attribute_base64 || failed=$(expr $failed + 1)
+testit_grep "get_attribute_base64" "^displayName:: $display_name_b64" get_attribute_base64 || failed=$(expr $failed + 1)
+testit "delete_attribute" delete_attribute || failed=$(expr $failed + 1)
+testit "add_attribute_base64_control" add_attribute_base64_control || failed=$(expr $failed + 1)
+testit_grep "get_attribute_base64_control" "^displayName:: $display_name_con_b64" get_attribute_base64_control || failed=$(expr $failed + 1)
+testit "change_attribute_base64_control" change_attribute_base64_control || failed=$(expr $failed + 1)
+testit_grep "get_attribute_base64" "^displayName:: $display_name_b64" get_attribute_base64 || failed=$(expr $failed + 1)
+testit_grep "get_attribute_force_no_base64" "^displayName: $display_name" get_attribute_force_no_base64 || failed=$(expr $failed + 1)
+testit "change_attribute_force_no_base64" change_attribute_force_no_base64 || failed=$(expr $failed + 1)
+testit_grep "get_changed_attribute_force_no_base64" "^displayName: $display_name_new" get_changed_attribute_force_no_base64 || failed=$(expr $failed + 1)
+testit "delete_test_group" delete_test_group || failed=$(expr $failed + 1)
+testit "delete_test_user" delete_test_user || failed=$(expr $failed + 1)
+
+rm -f $tmpeditor
+
+exit $failed
diff --git a/python/samba/tests/samba_tool/help.py b/python/samba/tests/samba_tool/help.py
new file mode 100644
index 0000000..fa7836d
--- /dev/null
+++ b/python/samba/tests/samba_tool/help.py
@@ -0,0 +1,81 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Catalyst IT Ltd 2017.
+#
+# Originally written by Douglas Bagnall <douglas.bagnall@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import re
+from samba.tests.samba_tool.base import SambaToolCmdTest
+from samba.tests import BlackboxProcessError
+from samba.tests import check_help_consistency
+from samba.common import get_string
+
+
+class HelpTestCase(SambaToolCmdTest):
+ """Tests for samba-tool help and --help
+
+ We test for consistency and lack of crashes."""
+
+ def _find_sub_commands(self, args):
+ self.runcmd(*args)
+
+ def test_help_tree(self):
+ # we call actual subprocesses, because we are probing the
+ # actual help output where there is no sub-command. Don't copy
+ # this if you have an actual command: for that use
+ # self.runcmd() or self.runsubcmd().
+ known_commands = [[]]
+ failed_commands = []
+
+ for i in range(4):
+ new_commands = []
+ for c in known_commands:
+ line = ' '.join(['samba-tool'] + c + ['--help'])
+ try:
+ output = self.check_output(line)
+ except BlackboxProcessError as e:
+ output = e.stdout
+ failed_commands.append(c)
+ output = get_string(output)
+ tail = output.partition('Available subcommands:')[2]
+ subcommands = re.findall(r'^\s*([\w-]+)\s+-', tail,
+ re.MULTILINE)
+ for s in subcommands:
+ new_commands.append(c + [s])
+
+ # check that `samba-tool help X Y` == `samba-tool X Y --help`
+ line = ' '.join(['samba-tool', 'help'] + c)
+ try:
+ output2 = self.check_output(line)
+ except BlackboxProcessError as e:
+ output2 = e.stdout
+ failed_commands.append(c)
+
+ output2 = get_string(output2)
+ self.assertEqual(output, output2)
+
+ err = check_help_consistency(output,
+ options_start='Options:',
+ options_end='Available subcommands:')
+ if err is not None:
+ self.fail("consistency error with %s:\n%s" % (line, err))
+
+ if not new_commands:
+ break
+
+ known_commands = new_commands
+
+ self.assertEqual(failed_commands, [])
diff --git a/python/samba/tests/samba_tool/join.py b/python/samba/tests/samba_tool/join.py
new file mode 100644
index 0000000..0cbd319
--- /dev/null
+++ b/python/samba/tests/samba_tool/join.py
@@ -0,0 +1,31 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2016
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import os
+import ldb
+from samba.tests.samba_tool.base import SambaToolCmdTest
+
+
+class JoinCmdTestCase(SambaToolCmdTest):
+ """Test for samba-tool domain join subcommand"""
+
+ def test_rejoin(self):
+ """Run domain join to confirm it errors because we are already joined"""
+ (result, out, err) = self.runsubcmd("domain", "join", os.environ["REALM"], "dc", "-U%s%%%s" % (os.environ["USERNAME"], os.environ["PASSWORD"]))
+
+ self.assertCmdFail(result)
+ self.assertTrue("Not removing account" in err, "Should fail with exception")
diff --git a/python/samba/tests/samba_tool/join_lmdb_size.py b/python/samba/tests/samba_tool/join_lmdb_size.py
new file mode 100644
index 0000000..7b43c45
--- /dev/null
+++ b/python/samba/tests/samba_tool/join_lmdb_size.py
@@ -0,0 +1,152 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Catalyst IT Ltd. 2019
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from samba.tests.samba_tool.base import SambaToolCmdTest
+import os
+import shutil
+
+
+class JoinLmdbSizeTestCase(SambaToolCmdTest):
+ """Test setting of the lmdb map size during join"""
+
+ def setUp(self):
+ super().setUp()
+ self.tempsambadir = os.path.join(self.tempdir, "samba")
+ os.mkdir(self.tempsambadir)
+ (_, name) = os.path.split(self.tempdir)
+ self.netbios_name = name
+
+ # join a domain and set the lmdb map size to size
+ #
+ # returns the tuple (ret, stdout, stderr)
+ def join(self, size=None, role=None):
+ command = (
+ "samba-tool " +
+ "domain join " +
+ os.environ["REALM"] + " " +
+ role + " " +
+ ("-U%s%%%s " % (os.environ["USERNAME"], os.environ["PASSWORD"])) +
+ ("--targetdir=%s " % self.tempsambadir) +
+ ("--option=netbiosname=%s " % self.netbios_name) +
+ "--backend-store=mdb "
+ )
+ if size:
+ command += ("--backend-store-size=%s" % size)
+
+ (ret, stdout, stderr) = self.run_command(command)
+ if ret == 0:
+ self.cleanup_join(self.netbios_name)
+
+ return (ret, stdout, stderr)
+
+ def is_rodc(self):
+ url = "ldb://%s/private/sam.ldb" % self.tempsambadir
+ samdb = self.getSamDB("-H", url)
+ return samdb.am_rodc()
+
+ #
+ # Get the lmdb map size for the specified command
+ #
+ # While there is a python lmdb package available we use the lmdb command
+ # line utilities to avoid introducing a dependency.
+ #
+ def get_lmdb_environment_size(self, path):
+ (result, out, err) = self.run_command("mdb_stat -ne %s" % path)
+ if result:
+ self.fail("Unable to run mdb_stat\n")
+ for line in out.split("\n"):
+ line = line.strip()
+ if line.startswith("Map size:"):
+ line = line.replace(" ", "")
+ (label, size) = line.split(":")
+ return int(size)
+
+ #
+ # Check the lmdb files created by join and ensure that the map size
+ # has been set to size.
+ #
+ # Currently this is all the *.ldb files in private/sam.ldb.d
+ #
+ def check_lmdb_environment_sizes(self, size):
+ directory = os.path.join(self.tempsambadir, "private", "sam.ldb.d")
+ for name in os.listdir(directory):
+ if name.endswith(".ldb"):
+ path = os.path.join(directory, name)
+ s = self.get_lmdb_environment_size(path)
+ if s != size:
+ self.fail("File %s, size=%d larger than %d" %
+ (name, s, size))
+
+ #
+ # Ensure that if --backend-store-size is not specified the default of
+ # 8Gb is used
+ def test_join_as_dc_default(self):
+ (result, out, err) = self.join(role="DC")
+ self.assertEqual(0, result)
+ self.check_lmdb_environment_sizes(8 * 1024 * 1024 * 1024)
+ self.assertFalse(self.is_rodc())
+
+ #
+ # Join as an DC with the lmdb backend size set to 1Gb
+ def test_join_as_dc(self):
+ (result, out, err) = self.join("1Gb", "DC")
+ self.assertEqual(0, result)
+ self.check_lmdb_environment_sizes(1 * 1024 * 1024 * 1024)
+ self.assertFalse(self.is_rodc())
+
+ #
+ # Join as an RODC with the lmdb backend size set to 128Mb
+ def test_join_as_rodc(self):
+ (result, out, err) = self.join("128Mb", "RODC")
+ self.assertEqual(0, result)
+ self.check_lmdb_environment_sizes(128 * 1024 * 1024)
+ self.assertTrue(self.is_rodc())
+
+ #
+ # Join as an RODC with --backend-store-size
+ def test_join_as_rodc_default(self):
+ (result, out, err) = self.join(role="RODC")
+ self.assertEqual(0, result)
+ self.check_lmdb_environment_sizes(8 * 1024 * 1024 * 1024)
+ self.assertTrue(self.is_rodc())
+
+ def test_no_unit_suffix(self):
+ (result, out, err) = self.run_command(
+ 'samba-tool domain join --backend-store-size "2"')
+ self.assertGreater(result, 0)
+ self.assertRegex(err,
+ r"--backend-store-size invalid suffix ''")
+
+ def test_invalid_unit_suffix(self):
+ (result, out, err) = self.run_command(
+ 'samba-tool domain join --backend-store-size "2 cd"')
+ self.assertGreater(result, 0)
+ self.assertRegex(err,
+ r"--backend-store-size invalid suffix 'cd'")
+
+ def test_non_numeric(self):
+ (result, out, err) = self.run_command(
+ 'samba-tool domain join --backend-store-size "two Gb"')
+ self.assertGreater(result, 0)
+ self.assertRegex(
+ err,
+ r"backend-store-size option requires a numeric value, with an"
+ " optional unit suffix")
+
+ def tearDown(self):
+ super().tearDown()
+ shutil.rmtree(self.tempsambadir)
diff --git a/python/samba/tests/samba_tool/join_member.py b/python/samba/tests/samba_tool/join_member.py
new file mode 100644
index 0000000..c2ab02f
--- /dev/null
+++ b/python/samba/tests/samba_tool/join_member.py
@@ -0,0 +1,71 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) David Mulder <dmulder@samba.org> 2021
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import os
+import re
+from samba.tests.samba_tool.base import SambaToolCmdTest
+from samba.param import LoadParm
+from samba.netcmd.common import netcmd_dnsname
+
+class JoinMemberCmdTestCase(SambaToolCmdTest):
+ """Test for samba-tool domain join subcommand"""
+
+ def test_join_member(self):
+ """Run a domain member join, and check that dns is updated"""
+ smb_conf = os.environ["SERVERCONFFILE"]
+ zone = os.environ["REALM"].lower()
+ lp = LoadParm()
+ lp.load(smb_conf)
+ dnsname = netcmd_dnsname(lp)
+ # Fetch the existing dns A records
+ (result, out, err) = self.runsubcmd("dns", "query",
+ os.environ["DC_SERVER"],
+ zone, dnsname, 'A',
+ "-s", smb_conf,
+ "-U%s%%%s" % (os.environ["DC_USERNAME"],
+ os.environ["DC_PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, 'Failed to find the record')
+
+ existing_records = re.findall('A:\s+(\d+\.\d+\.\d+\.\d+)\s', out)
+
+ # Remove the existing records
+ for record in existing_records:
+ (result, out, err) = self.runsubcmd("dns", "delete",
+ os.environ["DC_SERVER"],
+ zone, dnsname, 'A', record,
+ "-s", smb_conf,
+ "-U%s%%%s" % (os.environ["DC_USERNAME"],
+ os.environ["DC_PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, 'Failed to remove record')
+
+ # Perform the s3 member join (net ads join)
+ (result, out, err) = self.runsubcmd("domain", "join",
+ os.environ["REALM"], "member",
+ "-s", smb_conf,
+ "-U%s%%%s" % (os.environ["DC_USERNAME"],
+ os.environ["DC_PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, 'Failed to join member')
+
+ # Ensure the dns A record was created
+ (result, out, err) = self.runsubcmd("dns", "query",
+ os.environ["DC_SERVER"],
+ zone, dnsname, 'A',
+ "-s", smb_conf,
+ "-U%s%%%s" % (os.environ["DC_USERNAME"],
+ os.environ["DC_PASSWORD"]))
+ self.assertCmdSuccess(result, out, err,
+ 'Failed to find dns host records for %s' % dnsname)
diff --git a/python/samba/tests/samba_tool/ntacl.py b/python/samba/tests/samba_tool/ntacl.py
new file mode 100644
index 0000000..1173101
--- /dev/null
+++ b/python/samba/tests/samba_tool/ntacl.py
@@ -0,0 +1,247 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Andrew Bartlett 2012
+#
+# Based on user.py:
+# Copyright (C) Sean Dague <sdague@linux.vnet.ibm.com> 2011
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import os
+from samba.tests.samba_tool.base import SambaToolCmdTest
+from samba.tests import env_loadparm
+import random
+
+
+class NtACLCmdSysvolTestCase(SambaToolCmdTest):
+ """Tests for samba-tool ntacl sysvol* subcommands"""
+
+ def test_ntvfs(self):
+ (result, out, err) = self.runsubcmd("ntacl", "sysvolreset",
+ "--use-ntvfs")
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(out, "", "Shouldn't be any output messages")
+ self.assertIn("Please note that POSIX permissions have NOT been changed, only the stored NT ACL", err)
+
+ def test_s3fs(self):
+ (result, out, err) = self.runsubcmd("ntacl", "sysvolreset",
+ "--use-s3fs")
+
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ self.assertEqual(out, "", "Shouldn't be any output messages")
+
+ def test_ntvfs_check(self):
+ (result, out, err) = self.runsubcmd("ntacl", "sysvolreset",
+ "--use-ntvfs")
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(out, "", "Shouldn't be any output messages")
+ self.assertIn("Please note that POSIX permissions have NOT been changed, only the stored NT ACL", err)
+ # Now check they were set correctly
+ (result, out, err) = self.runsubcmd("ntacl", "sysvolcheck")
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ self.assertEqual(out, "", "Shouldn't be any output messages")
+
+ def test_s3fs_check(self):
+ (result, out, err) = self.runsubcmd("ntacl", "sysvolreset",
+ "--use-s3fs")
+
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ self.assertEqual(out, "", "Shouldn't be any output messages")
+
+ # Now check they were set correctly
+ (result, out, err) = self.runsubcmd("ntacl", "sysvolcheck")
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ self.assertEqual(out, "", "Shouldn't be any output messages")
+
+ def test_with_missing_files(self):
+ lp = env_loadparm()
+ sysvol = lp.get('path', 'sysvol')
+ realm = lp.get('realm').lower()
+
+ src = os.path.join(sysvol, realm, 'Policies')
+ dest = os.path.join(sysvol, realm, 'Policies-NOT-IN-THE-EXPECTED-PLACE')
+ try:
+ os.rename(src, dest)
+
+ for args in (["sysvolreset", "--use-s3fs"],
+ ["sysvolreset", "--use-ntvfs"],
+ ["sysvolreset"],
+ ["sysvolcheck"]
+ ):
+
+ (result, out, err) = self.runsubcmd("ntacl", *args)
+ self.assertCmdFail(result, f"succeeded with {args} with missing dir")
+ self.assertNotIn("uncaught exception", err,
+ "Shouldn't be uncaught exception")
+ self.assertNotRegex(err, r'^\s*File [^,]+, line \d+, in',
+ "Shouldn't be lines of traceback")
+ self.assertEqual(out, "", "Shouldn't be any output messages")
+ finally:
+ os.rename(dest, src)
+
+
+class NtACLCmdGetSetTestCase(SambaToolCmdTest):
+ """Tests for samba-tool ntacl get/set subcommands"""
+
+ acl = "O:DAG:DUD:P(A;OICI;FA;;;DA)(A;OICI;FA;;;EA)(A;OICIIO;FA;;;CO)(A;OICI;FA;;;DA)(A;OICI;FA;;;SY)(A;OICI;0x1200a9;;;AU)(A;OICI;0x1200a9;;;ED)S:AI(OU;CIIDSA;WP;f30e3bbe-9ff0-11d1-b603-0000f80367c1;bf967aa5-0de6-11d0-a285-00aa003049e2;WD)(OU;CIIDSA;WP;f30e3bbf-9ff0-11d1-b603-0000f80367c1;bf967aa5-0de6-11d0-a285-00aa003049e2;WD)"
+
+ def test_ntvfs(self):
+ path = os.environ['SELFTEST_PREFIX']
+ tempf = os.path.join(path, "pytests" + str(int(100000 * random.random())))
+ open(tempf, 'w').write("empty")
+
+ (result, out, err) = self.runsubcmd("ntacl", "set", self.acl, tempf,
+ "--use-ntvfs")
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(out, "", "Shouldn't be any output messages")
+ self.assertIn("Please note that POSIX permissions have NOT been changed, only the stored NT ACL", err)
+
+ def test_s3fs(self):
+ path = os.environ['SELFTEST_PREFIX']
+ tempf = os.path.join(path, "pytests" + str(int(100000 * random.random())))
+ open(tempf, 'w').write("empty")
+
+ (result, out, err) = self.runsubcmd("ntacl", "set", self.acl, tempf,
+ "--use-s3fs")
+
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ self.assertEqual(out, "", "Shouldn't be any output messages")
+
+ def test_ntvfs_check(self):
+ path = os.environ['SELFTEST_PREFIX']
+ tempf = os.path.join(path, "pytests" + str(int(100000 * random.random())))
+ open(tempf, 'w').write("empty")
+
+ (result, out, err) = self.runsubcmd("ntacl", "set", self.acl, tempf,
+ "--use-ntvfs")
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(out, "", "Shouldn't be any output messages")
+ self.assertIn("Please note that POSIX permissions have NOT been changed, only the stored NT ACL", err)
+
+ # Now check they were set correctly
+ (result, out, err) = self.runsubcmd("ntacl", "get", tempf,
+ "--use-ntvfs", "--as-sddl")
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ self.assertEqual(self.acl + "\n", out, "Output should be the ACL")
+
+ def test_s3fs_check(self):
+ path = os.environ['SELFTEST_PREFIX']
+ tempf = os.path.join(path, "pytests" + str(int(100000 * random.random())))
+ open(tempf, 'w').write("empty")
+
+ (result, out, err) = self.runsubcmd("ntacl", "set", self.acl, tempf,
+ "--use-s3fs")
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(out, "", "Shouldn't be any output messages")
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+
+ # Now check they were set correctly
+ (result, out, err) = self.runsubcmd("ntacl", "get", tempf,
+ "--use-s3fs", "--as-sddl")
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ self.assertEqual(self.acl + "\n", out, "Output should be the ACL")
+
+class NtACLCmdChangedomsidTestCase(SambaToolCmdTest):
+ """Tests for samba-tool ntacl changedomsid subcommand"""
+ maxDiff = 10000
+ acl = "O:DAG:DUD:P(A;OICI;0x001f01ff;;;DA)(A;OICI;0x001f01ff;;;EA)(A;OICIIO;0x001f01ff;;;CO)(A;OICI;0x001f01ff;;;DA)(A;OICI;0x001f01ff;;;SY)(A;OICI;0x001200a9;;;AU)(A;OICI;0x001200a9;;;ED)S:AI(OU;CIIDSA;WP;f30e3bbe-9ff0-11d1-b603-0000f80367c1;bf967aa5-0de6-11d0-a285-00aa003049e2;WD)(OU;CIIDSA;WP;f30e3bbf-9ff0-11d1-b603-0000f80367c1;bf967aa5-0de6-11d0-a285-00aa003049e2;WD)"
+ new_acl="O:S-1-5-21-2212615479-2695158682-2101375468-512G:S-1-5-21-2212615479-2695158682-2101375468-513D:P(A;OICI;FA;;;S-1-5-21-2212615479-2695158682-2101375468-512)(A;OICI;FA;;;S-1-5-21-2212615479-2695158682-2101375468-519)(A;OICIIO;FA;;;CO)(A;OICI;FA;;;S-1-5-21-2212615479-2695158682-2101375468-512)(A;OICI;FA;;;SY)(A;OICI;0x1200a9;;;AU)(A;OICI;0x1200a9;;;ED)S:AI(OU;CIIDSA;WP;f30e3bbe-9ff0-11d1-b603-0000f80367c1;bf967aa5-0de6-11d0-a285-00aa003049e2;WD)(OU;CIIDSA;WP;f30e3bbf-9ff0-11d1-b603-0000f80367c1;bf967aa5-0de6-11d0-a285-00aa003049e2;WD)"
+ domain_sid=os.environ['DOMSID']
+ new_domain_sid="S-1-5-21-2212615479-2695158682-2101375468"
+
+ def test_ntvfs_check(self):
+ path = os.environ['SELFTEST_PREFIX']
+ tempf = os.path.join(
+ path, "pytests" + str(int(100000 * random.random())))
+ open(tempf, 'w').write("empty")
+
+ print("DOMSID: %s", self.domain_sid)
+
+ (result, out, err) = self.runsubcmd("ntacl",
+ "set",
+ self.acl,
+ tempf,
+ "--use-ntvfs")
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(out, "", "Shouldn't be any output messages")
+ self.assertIn("Please note that POSIX permissions have NOT been "
+ "changed, only the stored NT ACL", err)
+
+ (result, out, err) = self.runsubcmd("ntacl",
+ "changedomsid",
+ self.domain_sid,
+ self.new_domain_sid,
+ tempf,
+ "--use-ntvfs")
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(out, "", "Shouldn't be any output messages")
+ self.assertIn("Please note that POSIX permissions have NOT been "
+ "changed, only the stored NT ACL.", err)
+
+ # Now check they were set correctly
+ (result, out, err) = self.runsubcmd("ntacl",
+ "get",
+ tempf,
+ "--use-ntvfs",
+ "--as-sddl")
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ self.assertEqual(self.new_acl + "\n", out, "Output should be the ACL")
+
+ def test_s3fs_check(self):
+ path = os.environ['SELFTEST_PREFIX']
+ tempf = os.path.join(
+ path, "pytests" + str(int(100000 * random.random())))
+ open(tempf, 'w').write("empty")
+
+ print("DOMSID: %s" % self.domain_sid)
+
+ (result, out, err) = self.runsubcmd("ntacl",
+ "set",
+ self.acl,
+ tempf,
+ "--use-s3fs",
+ "--service=sysvol")
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(out, "", "Shouldn't be any output messages")
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+
+ (result, out, err) = self.runsubcmd("ntacl",
+ "changedomsid",
+ self.domain_sid,
+ self.new_domain_sid,
+ tempf,
+ "--use-s3fs",
+ "--service=sysvol")
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(out, "", "Shouldn't be any output messages")
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+
+ # Now check they were set correctly
+ (result, out, err) = self.runsubcmd("ntacl",
+ "get",
+ tempf,
+ "--use-s3fs",
+ "--as-sddl",
+ "--service=sysvol")
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ self.assertEqual(self.new_acl + "\n", out, "Output should be the ACL")
diff --git a/python/samba/tests/samba_tool/ou.py b/python/samba/tests/samba_tool/ou.py
new file mode 100644
index 0000000..7a84876
--- /dev/null
+++ b/python/samba/tests/samba_tool/ou.py
@@ -0,0 +1,291 @@
+# Unix SMB/CIFS implementation.
+#
+# Copyright (C) Bjoern Baumbach <bb@sernet.de> 2018
+#
+# based on group.py:
+# Copyright (C) Michael Adam 2012
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import os
+import ldb
+from samba.tests.samba_tool.base import SambaToolCmdTest
+
+
+class OUCmdTestCase(SambaToolCmdTest):
+ """Tests for samba-tool ou subcommands"""
+ ous = []
+ samdb = None
+
+ def setUp(self):
+ super().setUp()
+ self.samdb = self.getSamDB("-H", "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"], os.environ["DC_PASSWORD"]))
+ self.ous = []
+ self.ous.append(self._randomOU({"name": "testou1"}))
+ self.ous.append(self._randomOU({"name": "testou2"}))
+ self.ous.append(self._randomOU({"name": "testou3"}))
+ self.ous.append(self._randomOU({"name": "testou4"}))
+
+ # setup the 4 ous and ensure they are correct
+ for ou in self.ous:
+ (result, out, err) = self._create_ou(ou)
+
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "There shouldn't be any error message")
+ full_ou_dn = self.samdb.normalize_dn_in_domain("OU=%s" % ou["name"])
+ self.assertIn('Added ou "%s"' % full_ou_dn, out)
+
+ found = self._find_ou(ou["name"])
+
+ self.assertIsNotNone(found)
+
+ self.assertEqual("%s" % found.get("name"), ou["name"])
+ self.assertEqual("%s" % found.get("description"),
+ ou["description"])
+
+ def tearDown(self):
+ super().tearDown()
+ # clean up all the left over ous, just in case
+ for ou in self.ous:
+ if self._find_ou(ou["name"]):
+ (result, out, err) = self.runsubcmd("ou", "delete",
+ "OU=%s" % ou["name"])
+ self.assertCmdSuccess(result, out, err,
+ "Failed to delete ou '%s'" % ou["name"])
+
+ def test_newou(self):
+ """This tests the "ou create" and "ou delete" commands"""
+ # try to create all the ous again, this should fail
+ for ou in self.ous:
+ (result, out, err) = self._create_ou(ou)
+ self.assertCmdFail(result, "Succeeded to add existing ou")
+ self.assertIn("already exists", err)
+
+ # try to delete all the ous we just added
+ for ou in self.ous:
+ (result, out, err) = self.runsubcmd("ou", "delete", "OU=%s" %
+ ou["name"])
+ self.assertCmdSuccess(result, out, err,
+ "Failed to delete ou '%s'" % ou["name"])
+ found = self._find_ou(ou["name"])
+ self.assertIsNone(found,
+ "Deleted ou '%s' still exists" % ou["name"])
+
+ # test creating ous
+ for ou in self.ous:
+ (result, out, err) = self.runsubcmd(
+ "ou", "add", "OU=%s" % ou["name"],
+ "--description=%s" % ou["description"])
+
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "There shouldn't be any error message")
+ full_ou_dn = self.samdb.normalize_dn_in_domain("OU=%s" % ou["name"])
+ self.assertIn('Added ou "%s"' % full_ou_dn, out)
+
+ found = self._find_ou(ou["name"])
+
+ self.assertEqual("%s" % found.get("ou"),
+ "%s" % ou["name"])
+
+ # try to delete all the ous we just added (with full dn)
+ for ou in self.ous:
+ full_ou_dn = self.samdb.normalize_dn_in_domain("OU=%s" % ou["name"])
+ (result, out, err) = self.runsubcmd("ou", "delete", str(full_ou_dn))
+ self.assertCmdSuccess(result, out, err,
+ "Failed to delete ou '%s'" % ou["name"])
+ found = self._find_ou(ou["name"])
+ self.assertIsNone(found,
+ "Deleted ou '%s' still exists" % ou["name"])
+
+ # test creating ous (with full dn)
+ for ou in self.ous:
+ full_ou_dn = self.samdb.normalize_dn_in_domain("OU=%s" % ou["name"])
+ (result, out, err) = self.runsubcmd(
+ "ou", "add", str(full_ou_dn),
+ "--description=%s" % ou["description"])
+
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "There shouldn't be any error message")
+ full_ou_dn = self.samdb.normalize_dn_in_domain("OU=%s" % ou["name"])
+ self.assertIn('Added ou "%s"' % full_ou_dn, out)
+
+ found = self._find_ou(ou["name"])
+
+ self.assertEqual("%s" % found.get("ou"),
+ "%s" % ou["name"])
+
+ def test_list(self):
+ (result, out, err) = self.runsubcmd("ou", "list")
+ self.assertCmdSuccess(result, out, err, "Error running list")
+
+ search_filter = "(objectClass=organizationalUnit)"
+
+ oulist = self.samdb.search(base=self.samdb.domain_dn(),
+ scope=ldb.SCOPE_SUBTREE,
+ expression=search_filter,
+ attrs=["name"])
+
+ self.assertTrue(len(oulist) > 0, "no ous found in samdb")
+
+ for ouobj in oulist:
+ name = ouobj.get("name", idx=0)
+ found = self.assertMatch(out, str(name),
+ "ou '%s' not found" % name)
+
+ def test_list_base_dn(self):
+ base_dn = str(self.samdb.domain_dn())
+ (result, out, err) = self.runsubcmd("ou", "list", "-b", base_dn)
+ self.assertCmdSuccess(result, out, err, "Error running list")
+
+ search_filter = "(objectClass=organizationalUnit)"
+
+ oulist = self.samdb.search(base=base_dn,
+ scope=ldb.SCOPE_SUBTREE,
+ expression=search_filter,
+ attrs=["name"])
+
+ self.assertTrue(len(oulist) > 0, "no ous found in samdb")
+
+ for ouobj in oulist:
+ name = ouobj.get("name", idx=0)
+ found = self.assertMatch(out, str(name),
+ "ou '%s' not found" % name)
+
+ def test_rename(self):
+ for ou in self.ous:
+ ousuffix = "RenameTest"
+ newouname = ou["name"] + ousuffix
+ (result, out, err) = self.runsubcmd("ou", "rename",
+ "OU=%s" % ou["name"],
+ "OU=%s" % newouname)
+ self.assertCmdSuccess(result, out, err,
+ "Failed to rename ou '%s'" % ou["name"])
+ found = self._find_ou(ou["name"])
+ self.assertIsNone(found,
+ "Renamed ou '%s' still exists" % ou["name"])
+ found = self._find_ou(newouname)
+ self.assertIsNotNone(found,
+ "Renamed ou '%s' does not exist" % newouname)
+
+ (result, out, err) = self.runsubcmd("ou", "rename",
+ "OU=%s" % newouname,
+ "OU=%s" % ou["name"])
+ self.assertCmdSuccess(result, out, err,
+ "Failed to rename ou '%s'" % newouname)
+
+ def test_move(self):
+ parentou = self._randomOU({"name": "parentOU"})
+ (result, out, err) = self._create_ou(parentou)
+ self.assertCmdSuccess(result, out, err)
+
+ for ou in self.ous:
+ olddn = self._find_ou(ou["name"]).get("dn")
+
+ (result, out, err) = self.runsubcmd("ou", "move",
+ "OU=%s" % ou["name"],
+ "OU=%s" % parentou["name"])
+ self.assertCmdSuccess(result, out, err,
+ "Failed to move ou '%s'" % ou["name"])
+ self.assertEqual(err, "", "There shouldn't be any error message")
+ full_ou_dn = self.samdb.normalize_dn_in_domain("OU=%s" % ou["name"])
+ self.assertIn('Moved ou "%s"' % full_ou_dn, out)
+
+ found = self._find_ou(ou["name"])
+ self.assertNotEqual(found.get("dn"), olddn,
+ "Moved ou '%s' still exists with the same dn" %
+ ou["name"])
+ newexpecteddn = ldb.Dn(self.samdb,
+ "OU=%s,OU=%s,%s" %
+ (ou["name"], parentou["name"],
+ self.samdb.domain_dn()))
+ self.assertEqual(found.get("dn"), newexpecteddn,
+ "Moved ou '%s' does not exist" %
+ ou["name"])
+
+ (result, out, err) = self.runsubcmd("ou", "move",
+ "%s" % newexpecteddn,
+ "%s" % olddn.parent())
+ self.assertCmdSuccess(result, out, err,
+ "Failed to move ou '%s'" % ou["name"])
+
+ (result, out, err) = self.runsubcmd("ou", "delete",
+ "OU=%s" % parentou["name"])
+ self.assertCmdSuccess(result, out, err,
+ "Failed to delete ou '%s'" % parentou["name"])
+
+ def test_listobjects(self):
+ (result, out, err) = self.runsubcmd("ou", "listobjects",
+ "%s" % self.samdb.domain_dn(),
+ "--full-dn")
+ self.assertCmdSuccess(result, out, err,
+ "Failed to list ou's objects")
+ self.assertEqual(err, "", "There shouldn't be any error message")
+
+ objlist = self.samdb.search(base=self.samdb.domain_dn(),
+ scope=ldb.SCOPE_ONELEVEL,
+ attrs=[])
+ self.assertTrue(len(objlist) > 0, "no objects found")
+
+ for obj in objlist:
+ found = self.assertMatch(out, str(obj.dn),
+ "object '%s' not found" % obj.dn)
+
+ def test_list_full_dn(self):
+ (result, out, err) = self.runsubcmd("ou", "list",
+ "--full-dn")
+ self.assertCmdSuccess(result, out, err,
+ "Failed to list ous")
+ self.assertEqual(err, "", "There shouldn't be any error message")
+
+ filter = "(objectClass=organizationalUnit)"
+ objlist = self.samdb.search(base=self.samdb.domain_dn(),
+ scope=ldb.SCOPE_SUBTREE,
+ expression=filter,
+ attrs=[])
+ self.assertTrue(len(objlist) > 0, "no ou objects found")
+
+ for obj in objlist:
+ found = self.assertMatch(out, str(obj.dn),
+ "object '%s' not found" % obj.dn)
+
+ def _randomOU(self, base=None):
+ """create an ou with random attribute values, you can specify base
+ attributes"""
+ if base is None:
+ base = {}
+ ou = {
+ "name": self.randomName(),
+ "description": self.randomName(count=100),
+ }
+ ou.update(base)
+ return ou
+
+ def _create_ou(self, ou):
+ return self.runsubcmd("ou", "add", "OU=%s" % ou["name"],
+ "--description=%s" % ou["description"])
+
+ def _find_ou(self, name):
+ search_filter = ("(&(name=%s)(objectCategory=%s,%s))" %
+ (ldb.binary_encode(name),
+ "CN=Organizational-Unit,CN=Schema,CN=Configuration",
+ self.samdb.domain_dn()))
+ oulist = self.samdb.search(base=self.samdb.domain_dn(),
+ scope=ldb.SCOPE_SUBTREE,
+ expression=search_filter)
+ if oulist:
+ return oulist[0]
+ else:
+ return None
diff --git a/python/samba/tests/samba_tool/passwordsettings.py b/python/samba/tests/samba_tool/passwordsettings.py
new file mode 100644
index 0000000..6db7a58
--- /dev/null
+++ b/python/samba/tests/samba_tool/passwordsettings.py
@@ -0,0 +1,484 @@
+# Test 'samba-tool domain passwordsettings' sub-commands
+#
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2018
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import os
+import ldb
+from samba.tests.samba_tool.base import SambaToolCmdTest
+from samba.tests.pso import PasswordSettings, TestUser
+
+
+class PwdSettingsCmdTestCase(SambaToolCmdTest):
+ """Tests for 'samba-tool domain passwordsettings' subcommands"""
+
+ def setUp(self):
+ super().setUp()
+ self.server = "ldap://%s" % os.environ["DC_SERVER"]
+ self.user_auth = "-U%s%%%s" % (os.environ["DC_USERNAME"],
+ os.environ["DC_PASSWORD"])
+ self.ldb = self.getSamDB("-H", self.server, self.user_auth)
+ system_dn = "CN=System,%s" % self.ldb.domain_dn()
+ self.pso_container = "CN=Password Settings Container,%s" % system_dn
+ self.obj_cleanup = []
+
+ def tearDown(self):
+ super().tearDown()
+ # clean-up any objects the test has created
+ for dn in self.obj_cleanup:
+ self.ldb.delete(dn)
+
+ def check_pso(self, pso_name, pso):
+ """Checks the PSO info in the DB matches what's expected"""
+
+ # lookup the PSO in the DB
+ dn = "CN=%s,%s" % (pso_name, self.pso_container)
+ pso_attrs = ['name', 'msDS-PasswordSettingsPrecedence',
+ 'msDS-PasswordReversibleEncryptionEnabled',
+ 'msDS-PasswordHistoryLength',
+ 'msDS-MinimumPasswordLength',
+ 'msDS-PasswordComplexityEnabled',
+ 'msDS-MinimumPasswordAge',
+ 'msDS-MaximumPasswordAge',
+ 'msDS-LockoutObservationWindow',
+ 'msDS-LockoutThreshold', 'msDS-LockoutDuration']
+ res = self.ldb.search(dn, scope=ldb.SCOPE_BASE, attrs=pso_attrs)
+ self.assertEqual(len(res), 1, "PSO lookup failed")
+
+ # convert types in the PSO-settings to what the search returns, i.e.
+ # boolean --> string, seconds --> timestamps in -100 nanosecond units
+ complexity_str = "TRUE" if pso.complexity else "FALSE"
+ plaintext_str = "TRUE" if pso.store_plaintext else "FALSE"
+ lockout_duration = -int(pso.lockout_duration * (1e7))
+ lockout_window = -int(pso.lockout_window * (1e7))
+ min_age = -int(pso.password_age_min * (1e7))
+ max_age = -int(pso.password_age_max * (1e7))
+
+ # check the PSO's settings match the search results
+ self.assertEqual(str(res[0]['msDS-PasswordComplexityEnabled'][0]),
+ complexity_str)
+ plaintext_res = res[0]['msDS-PasswordReversibleEncryptionEnabled'][0]
+ self.assertEqual(str(plaintext_res), plaintext_str)
+ self.assertEqual(int(res[0]['msDS-PasswordHistoryLength'][0]),
+ pso.history_len)
+ self.assertEqual(int(res[0]['msDS-MinimumPasswordLength'][0]),
+ pso.password_len)
+ self.assertEqual(int(res[0]['msDS-MinimumPasswordAge'][0]), min_age)
+ self.assertEqual(int(res[0]['msDS-MaximumPasswordAge'][0]), max_age)
+ self.assertEqual(int(res[0]['msDS-LockoutObservationWindow'][0]),
+ lockout_window)
+ self.assertEqual(int(res[0]['msDS-LockoutDuration'][0]),
+ lockout_duration)
+ self.assertEqual(int(res[0]['msDS-LockoutThreshold'][0]),
+ pso.lockout_attempts)
+ self.assertEqual(int(res[0]['msDS-PasswordSettingsPrecedence'][0]),
+ pso.precedence)
+
+ # check we can also display the PSO via the show command
+ (result, out, err) = self.runsublevelcmd("domain", ("passwordsettings",
+ "pso", "show"), pso_name,
+ "-H", self.server,
+ self.user_auth)
+ self.assertTrue(len(out.split(":")) >= 10,
+ "Expect 10 fields displayed")
+
+ # for a few settings, sanity-check the display is what we expect
+ self.assertIn("Minimum password length: %u" % pso.password_len, out)
+ self.assertIn("Password history length: %u" % pso.history_len, out)
+ lockout_str = "lockout threshold (attempts): %u" % pso.lockout_attempts
+ self.assertIn(lockout_str, out)
+
+ def test_pso_create(self):
+ """Tests basic PSO creation using the samba-tool"""
+
+ # we expect the PSO to take the current domain settings by default
+ # (we'll set precedence/complexity, the rest should be the defaults)
+ expected_pso = PasswordSettings(None, self.ldb)
+ expected_pso.complexity = False
+ expected_pso.precedence = 100
+
+ # check basic PSO creation works
+ pso_name = "test-create-PSO"
+ (result, out, err) = self.runsublevelcmd("domain", ("passwordsettings",
+ "pso", "create"), pso_name,
+ "100", "--complexity=off",
+ "-H", self.server,
+ self.user_auth)
+ # make sure we clean-up after the test completes
+ self.obj_cleanup.append("CN=%s,%s" % (pso_name, self.pso_container))
+
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ self.assertIn("successfully created", out)
+ self.check_pso(pso_name, expected_pso)
+
+ # check creating a PSO with the same name fails
+ (result, out, err) = self.runsublevelcmd("domain", ("passwordsettings",
+ "pso", "create"), pso_name,
+ "100", "--complexity=off",
+ "-H", self.server,
+ self.user_auth)
+ self.assertCmdFail(result, "Ensure that create for existing PSO fails")
+ self.assertIn("already exists", err)
+
+ # check we need to specify at least one password policy argument
+ pso_name = "test-create-PSO2"
+ (result, out, err) = self.runsublevelcmd("domain", ("passwordsettings",
+ "pso", "create"), pso_name,
+ "100", "-H", self.server,
+ self.user_auth)
+ self.assertCmdFail(result, "Ensure that create for existing PSO fails")
+ self.assertIn("specify at least one password policy setting", err)
+
+ # create a PSO with different settings and check they match
+ expected_pso.complexity = True
+ expected_pso.store_plaintext = True
+ expected_pso.precedence = 50
+ expected_pso.password_len = 12
+ day_in_secs = 60 * 60 * 24
+ expected_pso.password_age_min = 11 * day_in_secs
+ expected_pso.password_age_max = 50 * day_in_secs
+
+ (result, out, err) = self.runsublevelcmd("domain", ("passwordsettings",
+ "pso", "create"), pso_name,
+ "50", "--complexity=on",
+ "--store-plaintext=on",
+ "--min-pwd-length=12",
+ "--min-pwd-age=11",
+ "--max-pwd-age=50",
+ "-H", self.server,
+ self.user_auth)
+ self.obj_cleanup.append("CN=%s,%s" % (pso_name, self.pso_container))
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ self.assertIn("successfully created", out)
+ self.check_pso(pso_name, expected_pso)
+
+ # check the PSOs we created are present in the 'list' command
+ (result, out, err) = self.runsublevelcmd("domain", ("passwordsettings",
+ "pso", "list"),
+ "-H", self.server,
+ self.user_auth)
+ self.assertCmdSuccess(result, out, err)
+ self.assertIn("test-create-PSO", out)
+ self.assertIn("test-create-PSO2", out)
+
+ def _create_pso(self, pso_name):
+ """Creates a PSO for use in other tests"""
+ # the new PSO will take the current domain settings by default
+ pso_settings = PasswordSettings(None, self.ldb)
+ pso_settings.name = pso_name
+ pso_settings.password_len = 10
+ pso_settings.precedence = 200
+
+ (result, out, err) = self.runsublevelcmd("domain", ("passwordsettings",
+ "pso", "create"), pso_name,
+ "200", "--min-pwd-length=10",
+ "-H", self.server,
+ self.user_auth)
+ # make sure we clean-up after the test completes
+ pso_settings.dn = "CN=%s,%s" % (pso_name, self.pso_container)
+ self.obj_cleanup.append(pso_settings.dn)
+
+ # sanity-check the cmd was successful
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ self.assertIn("successfully created", out)
+ self.check_pso(pso_name, pso_settings)
+
+ return pso_settings
+
+ def test_pso_set(self):
+ """Tests we can modify a PSO using the samba-tool"""
+
+ pso_name = "test-set-PSO"
+ pso_settings = self._create_pso(pso_name)
+
+ # check we can update a PSO's settings
+ pso_settings.precedence = 99
+ pso_settings.lockout_attempts = 10
+ pso_settings.lockout_duration = 60 * 17
+ (res, out, err) = self.runsublevelcmd("domain", ("passwordsettings",
+ "pso", "set"), pso_name,
+ "--precedence=99",
+ "--account-lockout-threshold=10",
+ "--account-lockout-duration=17",
+ "-H", self.server,
+ self.user_auth)
+ self.assertCmdSuccess(res, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ self.assertIn("Successfully updated", out)
+
+ # check the PSO's settings now reflect the new values
+ self.check_pso(pso_name, pso_settings)
+
+ def test_pso_delete(self):
+ """Tests we can delete a PSO using the samba-tool"""
+
+ pso_name = "test-delete-PSO"
+ self._create_pso(pso_name)
+
+ # check we can successfully delete the PSO
+ (result, out, err) = self.runsublevelcmd("domain", ("passwordsettings",
+ "pso", "delete"), pso_name,
+ "-H", self.server,
+ self.user_auth)
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ self.assertIn("Deleted PSO", out)
+ dn = "CN=%s,%s" % (pso_name, self.pso_container)
+ self.obj_cleanup.remove(dn)
+
+ # check the object no longer exists in the DB
+ try:
+ self.ldb.search(dn, scope=ldb.SCOPE_BASE, attrs=['name'])
+ self.fail("PSO shouldn't exist")
+ except ldb.LdbError as e:
+ (enum, estr) = e.args
+ self.assertEqual(enum, ldb.ERR_NO_SUCH_OBJECT)
+
+ # run the same cmd again - it should fail because PSO no longer exists
+ (result, out, err) = self.runsublevelcmd("domain", ("passwordsettings",
+ "pso", "delete"), pso_name,
+ "-H", self.server,
+ self.user_auth)
+ self.assertCmdFail(result, "Deleting a non-existent PSO should fail")
+ self.assertIn("Unable to find PSO", err)
+
+ def check_pso_applied(self, user, pso):
+ """Checks that the correct PSO is applied to a given user"""
+
+ # first check the samba-tool output tells us the correct PSO is applied
+ (result, out, err) = self.runsublevelcmd("domain", ("passwordsettings",
+ "pso", "show-user"),
+ user.name, "-H", self.server,
+ self.user_auth)
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ if pso is None:
+ self.assertIn("No PSO applies to user", out)
+ else:
+ self.assertIn(pso.name, out)
+
+ # then check the DB tells us the same thing
+ if pso is None:
+ self.assertEqual(user.get_resultant_PSO(), None)
+ else:
+ self.assertEqual(user.get_resultant_PSO(), pso.dn)
+
+ def test_pso_apply_to_user(self):
+ """Checks we can apply/unapply a PSO to a user"""
+
+ pso_name = "test-apply-PSO"
+ test_pso = self._create_pso(pso_name)
+
+ # check that a new user has no PSO applied by default
+ user = TestUser("test-PSO-user", self.ldb)
+ self.obj_cleanup.append(user.dn)
+ self.check_pso_applied(user, pso=None)
+
+ # add the user to a new group
+ group_name = "test-PSO-group"
+ dn = "CN=%s,%s" % (group_name, self.ldb.domain_dn())
+ self.ldb.add({"dn": dn, "objectclass": "group",
+ "sAMAccountName": group_name})
+ self.obj_cleanup.append(dn)
+ m = ldb.Message()
+ m.dn = ldb.Dn(self.ldb, dn)
+ m["member"] = ldb.MessageElement(user.dn, ldb.FLAG_MOD_ADD, "member")
+ self.ldb.modify(m)
+
+ # check samba-tool can successfully link a PSO to a group
+ (result, out, err) = self.runsublevelcmd("domain", ("passwordsettings",
+ "pso", "apply"), pso_name,
+ group_name, "-H", self.server,
+ self.user_auth)
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ self.check_pso_applied(user, pso=test_pso)
+
+ # we should fail if we try to apply the same PSO/group twice though
+ (result, out, err) = self.runsublevelcmd("domain", ("passwordsettings",
+ "pso", "apply"), pso_name,
+ group_name, "-H", self.server,
+ self.user_auth)
+ self.assertCmdFail(result, "Shouldn't be able to apply PSO twice")
+ self.assertIn("already applies", err)
+
+ # check samba-tool can successfully link a PSO to a user
+ (result, out, err) = self.runsublevelcmd("domain", ("passwordsettings",
+ "pso", "apply"), pso_name,
+ user.name, "-H", self.server,
+ self.user_auth)
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ self.check_pso_applied(user, pso=test_pso)
+
+ # check samba-tool can successfully unlink a group from a PSO
+ (result, out, err) = self.runsublevelcmd("domain", ("passwordsettings",
+ "pso", "unapply"), pso_name,
+ group_name, "-H", self.server,
+ self.user_auth)
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ # PSO still applies directly to the user, even though group was removed
+ self.check_pso_applied(user, pso=test_pso)
+
+ # check samba-tool can successfully unlink a user from a PSO
+ (result, out, err) = self.runsublevelcmd("domain", ("passwordsettings",
+ "pso", "unapply"), pso_name,
+ user.name, "-H", self.server,
+ self.user_auth)
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ self.check_pso_applied(user, pso=None)
+
+ def test_pso_unpriv(self):
+ """Checks unprivileged users can't modify PSOs via samba-tool"""
+
+ # create a dummy PSO and a non-admin user
+ pso_name = "test-unpriv-PSO"
+ self._create_pso(pso_name)
+ user = TestUser("test-unpriv-user", self.ldb)
+ self.obj_cleanup.append(user.dn)
+ unpriv_auth = "-U%s%%%s" % (user.name, user.get_password())
+
+ # check we need admin privileges to be able to do anything to PSOs
+ (result, out, err) = self.runsublevelcmd("domain", ("passwordsettings",
+ "pso", "set"), pso_name,
+ "--complexity=off", "-H",
+ self.server, unpriv_auth)
+ self.assertCmdFail(result, "Need admin privileges to modify PSO")
+ self.assertIn("You may not have permission", err)
+
+ (result, out, err) = self.runsublevelcmd("domain", ("passwordsettings",
+ "pso", "create"), "bad-perm",
+ "250", "--complexity=off",
+ "-H", self.server,
+ unpriv_auth)
+ self.assertCmdFail(result, "Need admin privileges to modify PSO")
+ self.assertIn("Administrator permissions are needed", err)
+
+ (result, out, err) = self.runsublevelcmd("domain", ("passwordsettings",
+ "pso", "delete"), pso_name,
+ "-H", self.server,
+ unpriv_auth)
+ self.assertCmdFail(result, "Need admin privileges to delete PSO")
+ self.assertIn("You may not have permission", err)
+
+ (result, out, err) = self.runsublevelcmd("domain", ("passwordsettings",
+ "pso", "show"), pso_name,
+ "-H", self.server,
+ unpriv_auth)
+ self.assertCmdFail(result, "Need admin privileges to view PSO")
+ self.assertIn("You may not have permission", err)
+
+ (result, out, err) = self.runsublevelcmd("domain", ("passwordsettings",
+ "pso", "apply"), pso_name,
+ user.name, "-H", self.server,
+ unpriv_auth)
+ self.assertCmdFail(result, "Need admin privileges to modify PSO")
+ self.assertIn("You may not have permission", err)
+
+ (result, out, err) = self.runsublevelcmd("domain", ("passwordsettings",
+ "pso", "unapply"), pso_name,
+ user.name, "-H", self.server,
+ unpriv_auth)
+ self.assertCmdFail(result, "Need admin privileges to modify PSO")
+ self.assertIn("You may not have permission", err)
+
+ # The 'list' command actually succeeds because it's not easy to tell
+ # whether we got no results due to lack of permissions, or because
+ # there were no PSOs to display
+ (result, out, err) = self.runsublevelcmd("domain", ("passwordsettings",
+ "pso", "list"), "-H",
+ self.server, unpriv_auth)
+ self.assertCmdSuccess(result, out, err)
+ self.assertIn("No PSOs", out)
+ self.assertIn("permission", out)
+
+ def test_domain_passwordsettings(self):
+ """Checks the 'set/show' commands for the domain settings (non-PSO)"""
+
+ # check the 'show' cmd for the domain settings
+ (result, out, err) = self.runsublevelcmd("domain", ("passwordsettings",
+ "show"), "-H", self.server,
+ self.user_auth)
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+
+ # check an arbitrary setting is displayed correctly
+ min_pwd_len = self.ldb.get_minPwdLength()
+ self.assertIn("Minimum password length: %s" % min_pwd_len, out)
+
+ # check we can change the domain setting
+ self.addCleanup(self.ldb.set_minPwdLength, min_pwd_len)
+ new_len = int(min_pwd_len) + 3
+ min_pwd_args = "--min-pwd-length=%u" % new_len
+ (result, out, err) = self.runsublevelcmd("domain", ("passwordsettings",
+ "set"), min_pwd_args,
+ "-H", self.server,
+ self.user_auth)
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ self.assertIn("successful", out)
+ self.assertEqual(new_len, self.ldb.get_minPwdLength())
+
+ # check the updated value is now displayed
+ (result, out, err) = self.runsublevelcmd("domain", ("passwordsettings",
+ "show"), "-H", self.server,
+ self.user_auth)
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ self.assertIn("Minimum password length: %u" % new_len, out)
+
+ def test_domain_passwordsettings_pwdage(self):
+ """Checks the 'set' command for the domain password age (non-PSO)"""
+
+ # check we can set the domain max password age
+ max_pwd_age = self.ldb.get_maxPwdAge()
+ self.addCleanup(self.ldb.set_maxPwdAge, max_pwd_age)
+ max_pwd_args = "--max-pwd-age=270"
+ (result, out, err) = self.runsublevelcmd("domain", ("passwordsettings",
+ "set"), max_pwd_args,
+ "-H", self.server,
+ self.user_auth)
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ self.assertIn("successful", out)
+ self.assertNotEqual(max_pwd_age, self.ldb.get_maxPwdAge())
+
+ # check we can't set the domain min password age to more than the max
+ min_pwd_age = self.ldb.get_minPwdAge()
+ self.addCleanup(self.ldb.set_minPwdAge, min_pwd_age)
+ min_pwd_args = "--min-pwd-age=271"
+ (result, out, err) = self.runsublevelcmd("domain", ("passwordsettings",
+ "set"), min_pwd_args,
+ "-H", self.server,
+ self.user_auth)
+ self.assertCmdFail(result, "minPwdAge > maxPwdAge should be rejected")
+ self.assertIn("Maximum password age", err)
+
+ # check we can set the domain min password age to less than the max
+ min_pwd_args = "--min-pwd-age=269"
+ (result, out, err) = self.runsublevelcmd("domain", ("passwordsettings",
+ "set"), min_pwd_args,
+ "-H", self.server,
+ self.user_auth)
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ self.assertIn("successful", out)
+ self.assertNotEqual(min_pwd_age, self.ldb.get_minPwdAge())
diff --git a/python/samba/tests/samba_tool/processes.py b/python/samba/tests/samba_tool/processes.py
new file mode 100644
index 0000000..4407797
--- /dev/null
+++ b/python/samba/tests/samba_tool/processes.py
@@ -0,0 +1,42 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Andrew Bartlett 2012
+#
+# based on time.py:
+# Copyright (C) Sean Dague <sdague@linux.vnet.ibm.com> 2011
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import os
+from samba.tests.samba_tool.base import SambaToolCmdTest
+
+
+class ProcessCmdTestCase(SambaToolCmdTest):
+ """Tests for samba-tool process subcommands"""
+
+ def test_name(self):
+ """Run processes command"""
+ (result, out, err) = self.runcmd("processes", "--name", "samba")
+ self.assertCmdSuccess(result, out, err, "Ensuring processes ran successfully")
+
+ def test_unknown_name(self):
+ """Run processes command with an not-existing --name"""
+ (result, out, err) = self.runcmd("processes", "--name", "not-existing-samba")
+ self.assertCmdSuccess(result, out, err, "Ensuring processes ran successfully")
+ self.assertEqual(out, "")
+
+ def test_all(self):
+ """Run processes command"""
+ (result, out, err) = self.runcmd("processes")
+ self.assertCmdSuccess(result, out, err, "Ensuring processes ran successfully")
diff --git a/python/samba/tests/samba_tool/promote_dc_lmdb_size.py b/python/samba/tests/samba_tool/promote_dc_lmdb_size.py
new file mode 100644
index 0000000..88e9d7c
--- /dev/null
+++ b/python/samba/tests/samba_tool/promote_dc_lmdb_size.py
@@ -0,0 +1,174 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Catalyst IT Ltd. 2019
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from samba.tests.samba_tool.base import SambaToolCmdTest
+import os
+import shutil
+
+
+class PromoteDcLmdbSizeTestCase(SambaToolCmdTest):
+ """Test setting of the lmdb map size during a promote dc"""
+
+ def setUp(self):
+ super().setUp()
+ self.tempsambadir = os.path.join(self.tempdir, "samba")
+ os.mkdir(self.tempsambadir)
+ (_, name) = os.path.split(self.tempdir)
+ self.netbios_name = name
+
+ # join a domain as a member server
+ #
+ # returns the tuple (ret, stdout, stderr)
+ def join_member(self):
+ command = (
+ "samba-tool " +
+ "domain join " +
+ os.environ["REALM"] + " " +
+ "member " +
+ ("-U%s%%%s " % (os.environ["USERNAME"], os.environ["PASSWORD"])) +
+ ("--option=netbiosname=%s " % self.netbios_name) +
+ ("--targetdir=%s " % self.tempsambadir))
+ return self.run_command(command)
+
+ #
+ # Promote a member server to a domain controller
+ def promote(self, size=None, role=None):
+ command = (
+ "samba-tool " +
+ "domain dcpromo " +
+ os.environ["REALM"] + " " +
+ role + " " +
+ ("-U%s%%%s " % (os.environ["USERNAME"], os.environ["PASSWORD"])) +
+ ("--option=netbiosname=%s " % self.netbios_name) +
+ ("--targetdir=%s " % self.tempsambadir) +
+ "--backend-store=mdb "
+ )
+ if size:
+ command += ("--backend-store-size=%s" % size)
+
+ (ret, stdout, stderr) = self.run_command(command)
+ if ret == 0:
+ self.cleanup_join(self.netbios_name)
+
+ return (ret, stdout, stderr)
+
+ def is_rodc(self):
+ url = "ldb://%s/private/sam.ldb" % self.tempsambadir
+ samdb = self.getSamDB("-H", url)
+ return samdb.am_rodc()
+
+ #
+ # Get the lmdb map size for the specified command
+ #
+ # While there is a python lmdb package available we use the lmdb command
+ # line utilities to avoid introducing a dependency.
+ #
+ def get_lmdb_environment_size(self, path):
+ (result, out, err) = self.run_command("mdb_stat -ne %s" % path)
+ if result:
+ self.fail("Unable to run mdb_stat\n")
+ for line in out.split("\n"):
+ line = line.strip()
+ if line.startswith("Map size:"):
+ line = line.replace(" ", "")
+ (label, size) = line.split(":")
+ return int(size)
+
+ #
+ # Check the lmdb files created by join and ensure that the map size
+ # has been set to size.
+ #
+ # Currently this is all the *.ldb files in private/sam.ldb.d
+ #
+ def check_lmdb_environment_sizes(self, size):
+ directory = os.path.join(self.tempsambadir, "private", "sam.ldb.d")
+ for name in os.listdir(directory):
+ if name.endswith(".ldb"):
+ path = os.path.join(directory, name)
+ s = self.get_lmdb_environment_size(path)
+ if s != size:
+ self.fail("File %s, size=%d larger than %d" %
+ (name, s, size))
+
+ #
+ # Ensure that if --backend-store-size is not specified the default of
+ # 8Gb is used
+ def test_promote_dc_default(self):
+ (result, out, err) = self.join_member()
+ self.assertEqual(0, result)
+ (result, out, err) = self.promote(role="DC")
+ self.assertEqual(0, result)
+ self.check_lmdb_environment_sizes(8 * 1024 * 1024 * 1024)
+ self.assertFalse(self.is_rodc())
+
+ #
+ # Ensure that if --backend-store-size is not specified the default of
+ # 8Gb is used
+ def test_promote_rodc_default(self):
+ (result, out, err) = self.join_member()
+ self.assertEqual(0, result)
+ (result, out, err) = self.promote(role="RODC")
+ self.assertEqual(0, result)
+ self.check_lmdb_environment_sizes(8 * 1024 * 1024 * 1024)
+ self.assertTrue(self.is_rodc())
+
+ #
+ # Promote to a DC with a backend size of 96Mb
+ def test_promote_dc_96Mb(self):
+ (result, out, err) = self.join_member()
+ self.assertEqual(0, result)
+ (result, out, err) = self.promote(role="DC", size="96Mb")
+ self.assertEqual(0, result)
+ self.check_lmdb_environment_sizes(96 * 1024 * 1024)
+ self.assertFalse(self.is_rodc())
+
+ #
+ # Promote to an RODC with a backend size of 256Mb
+ def test_promote_rodc_256Mb(self):
+ (result, out, err) = self.join_member()
+ self.assertEqual(0, result)
+ (result, out, err) = self.promote(role="RODC", size="256Mb")
+ self.assertEqual(0, result)
+ self.check_lmdb_environment_sizes(256 * 1024 * 1024)
+ self.assertTrue(self.is_rodc())
+
+ def test_no_unit_suffix(self):
+ (result, out, err) = self.run_command(
+ 'samba-tool domain dcpromo --backend-store-size "2"')
+ self.assertGreater(result, 0)
+ self.assertRegex(err,
+ r"--backend-store-size invalid suffix ''")
+
+ def test_invalid_unit_suffix(self):
+ (result, out, err) = self.run_command(
+ 'samba-tool domain dcpromo --backend-store-size "2 cd"')
+ self.assertGreater(result, 0)
+ self.assertRegex(err,
+ r"--backend-store-size invalid suffix 'cd'")
+
+ def test_non_numeric(self):
+ (result, out, err) = self.run_command(
+ 'samba-tool domain dcpromo --backend-store-size "two Gb"')
+ self.assertGreater(result, 0)
+ self.assertRegex(
+ err,
+ r"backend-store-size option requires a numeric value, with an"
+ " optional unit suffix")
+
+ def tearDown(self):
+ super().tearDown()
+ shutil.rmtree(self.tempsambadir)
diff --git a/python/samba/tests/samba_tool/provision_lmdb_size.py b/python/samba/tests/samba_tool/provision_lmdb_size.py
new file mode 100644
index 0000000..3514edf
--- /dev/null
+++ b/python/samba/tests/samba_tool/provision_lmdb_size.py
@@ -0,0 +1,132 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Catalyst IT Ltd. 2019
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from samba.tests.samba_tool.base import SambaToolCmdTest
+import os
+import shutil
+
+
+class ProvisionLmdbSizeTestCase(SambaToolCmdTest):
+ """Test setting of the lmdb map size during provision"""
+
+ def setUp(self):
+ super().setUp()
+ self.tempsambadir = os.path.join(self.tempdir, "samba")
+ os.mkdir(self.tempsambadir)
+
+ # provision a domain and set the lmdb map size to size
+ #
+ # returns the tuple (ret, stdout, stderr)
+ def provision(self, size=None):
+ command = (
+ "samba-tool " +
+ "domain provision " +
+ "--realm=foo.example.com " +
+ "--domain=FOO " +
+ ("--targetdir=%s " % self.tempsambadir) +
+ "--backend-store=mdb " +
+ "--use-ntvfs "
+ )
+ if size:
+ command += ("--backend-store-size=%s" % size)
+
+ return self.run_command(command)
+
+ #
+ # Get the lmdb map size for the specified command
+ #
+ # While there is a python lmdb package available we use the lmdb command
+ # line utilities to avoid introducing a dependency.
+ #
+ def get_lmdb_environment_size(self, path):
+ (result, out, err) = self.run_command("mdb_stat -ne %s" % path)
+ if result:
+ self.fail("Unable to run mdb_stat\n")
+ for line in out.split("\n"):
+ line = line.strip()
+ if line.startswith("Map size:"):
+ line = line.replace(" ", "")
+ (label, size) = line.split(":")
+ return int(size)
+
+ #
+ # Check the lmdb files created by provision and ensure that the map size
+ # has been set to size.
+ #
+ # Currently this is all the *.ldb files in private/sam.ldb.d
+ #
+ def check_lmdb_environment_sizes(self, size):
+ directory = os.path.join(self.tempsambadir, "private", "sam.ldb.d")
+ for name in os.listdir(directory):
+ if name.endswith(".ldb"):
+ path = os.path.join(directory, name)
+ s = self.get_lmdb_environment_size(path)
+ if s != size:
+ self.fail("File %s, size=%d larger than %d" %
+ (name, s, size))
+
+ #
+ # Ensure that if --backend-store-size is not specified the default of
+ # 8Gb is used
+ def test_default(self):
+ (result, out, err) = self.provision()
+ self.assertEqual(0, result)
+ self.check_lmdb_environment_sizes(8 * 1024 * 1024 * 1024)
+
+ def test_64Mb(self):
+ (result, out, err) = self.provision("64Mb")
+ self.assertEqual(0, result)
+ self.check_lmdb_environment_sizes(64 * 1024 * 1024)
+
+ def test_1Gb(self):
+ (result, out, err) = self.provision("1Gb")
+ self.assertEqual(0, result)
+ self.check_lmdb_environment_sizes(1 * 1024 * 1024 * 1024)
+
+ # 128Mb specified in bytes.
+ #
+ def test_134217728b(self):
+ (result, out, err) = self.provision("134217728b")
+ self.assertEqual(0, result)
+ self.check_lmdb_environment_sizes(134217728)
+
+ def test_no_unit_suffix(self):
+ (result, out, err) = self.run_command(
+ 'samba-tool domain provision --backend-store-size "2"')
+ self.assertGreater(result, 0)
+ self.assertRegex(err,
+ r"--backend-store-size invalid suffix ''")
+
+ def test_invalid_unit_suffix(self):
+ (result, out, err) = self.run_command(
+ 'samba-tool domain provision --backend-store-size "2 cd"')
+ self.assertGreater(result, 0)
+ self.assertRegex(err,
+ r"--backend-store-size invalid suffix 'cd'")
+
+ def test_non_numeric(self):
+ (result, out, err) = self.run_command(
+ 'samba-tool domain provision --backend-store-size "two Gb"')
+ self.assertGreater(result, 0)
+ self.assertRegex(
+ err,
+ r"backend-store-size option requires a numeric value, with an"
+ " optional unit suffix")
+
+ def tearDown(self):
+ super().tearDown()
+ shutil.rmtree(self.tempsambadir)
diff --git a/python/samba/tests/samba_tool/provision_password_check.py b/python/samba/tests/samba_tool/provision_password_check.py
new file mode 100644
index 0000000..51b4a4d
--- /dev/null
+++ b/python/samba/tests/samba_tool/provision_password_check.py
@@ -0,0 +1,57 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Catalyst IT Ltd. 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from samba.tests.samba_tool.base import SambaToolCmdTest
+import os
+import shutil
+
+
+class ProvisionPasswordTestCase(SambaToolCmdTest):
+ """Test for password validation in domain provision subcommand"""
+
+ def setUp(self):
+ super().setUp()
+ self.tempsambadir = os.path.join(self.tempdir, "samba")
+ os.mkdir(self.tempsambadir)
+
+ def _provision_with_password(self, password):
+ return self.runsubcmd(
+ "domain", "provision", "--realm=foo.example.com", "--domain=FOO",
+ "--targetdir=%s" % self.tempsambadir, "--adminpass=%s" % password,
+ "--use-ntvfs")
+
+ def test_short_and_low_quality(self):
+ (result, out, err) = self._provision_with_password("foo")
+ self.assertCmdFail(result)
+
+ def test_short(self):
+ (result, out, err) = self._provision_with_password("Fo0!_9")
+ self.assertCmdFail(result)
+ self.assertRegex(err, r"minimum password length")
+
+ def test_low_quality(self):
+ (result, out, err) = self._provision_with_password("aaaaaaaaaaaaaaaaa")
+ self.assertCmdFail(result)
+ self.assertRegex(err, r"quality standards")
+
+ def test_good(self):
+ (result, out, err) = self._provision_with_password("Fo0!_9.")
+ self.assertCmdSuccess(result, out, err)
+
+ def tearDown(self):
+ super().tearDown()
+ shutil.rmtree(self.tempsambadir)
diff --git a/python/samba/tests/samba_tool/provision_userPassword_crypt.py b/python/samba/tests/samba_tool/provision_userPassword_crypt.py
new file mode 100644
index 0000000..2de8cdd
--- /dev/null
+++ b/python/samba/tests/samba_tool/provision_userPassword_crypt.py
@@ -0,0 +1,67 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Catalyst IT Ltd. 2021
+#
+# based on provision_lmdb_size.py:
+# Copyright (C) Catalyst IT Ltd. 2019
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from samba.tests.samba_tool.base import SambaToolCmdTest
+import os
+import shutil
+
+
+class ProvisionUserPasswordTestCase(SambaToolCmdTest):
+ """Test for crypt() hashed passwords"""
+
+ def setUp(self):
+ super().setUp()
+ self.tempsambadir = os.path.join(self.tempdir, "samba")
+ os.mkdir(self.tempsambadir)
+
+ # provision a domain
+ #
+ # returns the tuple (ret, stdout, stderr)
+ def provision(self, machinepass=None):
+ command = (
+ "samba-tool " +
+ "domain provision " +
+ "--use-rfc230 " +
+ "--realm=\"EXAMPLE.COM\" " +
+ "--domain=\"EXAMPLE\" " +
+ "--adminpass=\"FooBar123\" " +
+ "--server-role=dc " +
+ "--host-ip=10.166.183.55 " +
+ "--option=\"password hash userPassword " +
+ "schemes=CryptSHA256 CryptSHA512\" " +
+ ("--targetdir=\"%s\" " % self.tempsambadir) +
+ "--use-ntvfs"
+ )
+ if machinepass:
+ command += ("--machinepass=\"%s\"" % machinepass)
+
+ return self.run_command(command)
+
+ def test_crypt(self):
+ (result, out, err) = self.provision()
+ self.assertEqual(0, result)
+
+ def test_length(self):
+ (result, out, err) = self.provision(machinepass="FooBar123" + ("a"*1024))
+ self.assertNotEqual(0, result)
+
+ def tearDown(self):
+ super().tearDown()
+ shutil.rmtree(self.tempsambadir)
diff --git a/python/samba/tests/samba_tool/rodc.py b/python/samba/tests/samba_tool/rodc.py
new file mode 100644
index 0000000..94e84d6
--- /dev/null
+++ b/python/samba/tests/samba_tool/rodc.py
@@ -0,0 +1,131 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Catalyst IT Ltd. 2015
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import os
+import ldb
+import samba
+from samba.samdb import SamDB
+from samba.tests import delete_force
+from samba.tests.samba_tool.base import SambaToolCmdTest
+from samba.credentials import Credentials
+from samba.auth import system_session
+
+
+class RodcCmdTestCase(SambaToolCmdTest):
+ def setUp(self):
+ super().setUp()
+ self.lp = samba.param.LoadParm()
+ self.lp.load(os.environ["SMB_CONF_PATH"])
+ self.creds = Credentials()
+ self.creds.set_username(os.environ["DC_USERNAME"])
+ self.creds.set_password(os.environ["DC_PASSWORD"])
+ self.creds.guess(self.lp)
+ self.session = system_session()
+ self.ldb = SamDB("ldap://" + os.environ["DC_SERVER"],
+ session_info=self.session, credentials=self.creds, lp=self.lp)
+
+ self.base_dn = self.ldb.domain_dn()
+
+ self.ldb.newuser("sambatool1", "1qazXSW@")
+ self.ldb.newuser("sambatool2", "2wsxCDE#")
+ self.ldb.newuser("sambatool3", "3edcVFR$")
+ self.ldb.newuser("sambatool4", "4rfvBGT%")
+ self.ldb.newuser("sambatool5", "5tjbNHY*")
+ self.ldb.newuser("sambatool6", "6yknMJU*")
+
+ self.ldb.add_remove_group_members("Allowed RODC Password Replication Group",
+ ["sambatool1", "sambatool2", "sambatool3",
+ "sambatool4", "sambatool5"],
+ add_members_operation=True)
+
+ def tearDown(self):
+ super().tearDown()
+ self.ldb.deleteuser("sambatool1")
+ self.ldb.deleteuser("sambatool2")
+ self.ldb.deleteuser("sambatool3")
+ self.ldb.deleteuser("sambatool4")
+ self.ldb.deleteuser("sambatool5")
+ self.ldb.deleteuser("sambatool6")
+ (result, out, err) = self.runsubcmd("drs", "replicate", "--local", "unused",
+ os.environ["DC_SERVER"], self.base_dn)
+
+ def test_single_by_account_name(self):
+ (result, out, err) = self.runsubcmd("rodc", "preload", "sambatool1",
+ "--server", os.environ["DC_SERVER"])
+ self.assertCmdSuccess(result, out, err, "ensuring rodc prefetch ran successfully")
+ self.assertEqual(out, "Replicating DN CN=sambatool1,CN=Users,%s\n" % self.base_dn)
+ self.assertEqual(err, "")
+
+ def test_single_by_dn(self):
+ (result, out, err) = self.runsubcmd("rodc", "preload", "cn=sambatool2,cn=users,%s" % self.base_dn,
+ "--server", os.environ["DC_SERVER"])
+ self.assertCmdSuccess(result, out, err, "ensuring rodc prefetch ran successfully")
+ self.assertEqual(out, "Replicating DN CN=sambatool2,CN=Users,%s\n" % self.base_dn)
+
+ def test_multi_by_account_name(self):
+ (result, out, err) = self.runsubcmd("rodc", "preload", "sambatool1", "sambatool2",
+ "--server", os.environ["DC_SERVER"])
+ self.assertCmdSuccess(result, out, err, "ensuring rodc prefetch ran successfully")
+ self.assertEqual(out, "Replicating DN CN=sambatool1,CN=Users,%s\nReplicating DN CN=sambatool2,CN=Users,%s\n" % (self.base_dn, self.base_dn))
+
+ def test_multi_by_dn(self):
+ (result, out, err) = self.runsubcmd("rodc", "preload", "cn=sambatool3,cn=users,%s" % self.base_dn, "cn=sambatool4,cn=users,%s" % self.base_dn,
+ "--server", os.environ["DC_SERVER"])
+ self.assertCmdSuccess(result, out, err, "ensuring rodc prefetch ran successfully")
+ self.assertEqual(out, "Replicating DN CN=sambatool3,CN=Users,%s\nReplicating DN CN=sambatool4,CN=Users,%s\n" % (self.base_dn, self.base_dn))
+
+ def test_multi_in_file(self):
+ tempf = os.path.join(self.tempdir, "accountlist")
+ open(tempf, 'w').write("sambatool1\nsambatool2")
+ (result, out, err) = self.runsubcmd("rodc", "preload", "--file", tempf,
+ "--server", os.environ["DC_SERVER"])
+ self.assertCmdSuccess(result, out, err, "ensuring rodc prefetch ran successfully")
+ self.assertEqual(out, "Replicating DN CN=sambatool1,CN=Users,%s\nReplicating DN CN=sambatool2,CN=Users,%s\n" % (self.base_dn, self.base_dn))
+ os.unlink(tempf)
+
+ def test_multi_with_missing_name_success(self):
+ (result, out, err) = self.runsubcmd("rodc", "preload",
+ "nonexistentuser1", "sambatool5",
+ "nonexistentuser2",
+ "--server", os.environ["DC_SERVER"],
+ "--ignore-errors")
+ self.assertCmdSuccess(result, out, err, "ensuring rodc prefetch ran successfully")
+ self.assertTrue(out.startswith("Replicating DN CN=sambatool5,CN=Users,%s\n"
+ % self.base_dn))
+
+ def test_multi_with_missing_name_failure(self):
+ (result, out, err) = self.runsubcmd("rodc", "preload",
+ "nonexistentuser1", "sambatool5",
+ "nonexistentuser2",
+ "--server", os.environ["DC_SERVER"])
+ self.assertCmdFail(result, "ensuring rodc prefetch quit on missing user")
+
+ def test_multi_without_group_success(self):
+ (result, out, err) = self.runsubcmd("rodc", "preload",
+ "sambatool6", "sambatool5",
+ "--server", os.environ["DC_SERVER"],
+ "--ignore-errors")
+ self.assertCmdSuccess(result, out, err, "ensuring rodc prefetch ran successfully")
+ self.assertTrue(out.startswith("Replicating DN CN=sambatool6,CN=Users,%s\n"
+ "Replicating DN CN=sambatool5,CN=Users,%s\n"
+ % (self.base_dn, self.base_dn)))
+
+ def test_multi_without_group_failure(self):
+ (result, out, err) = self.runsubcmd("rodc", "preload",
+ "sambatool6", "sambatool5",
+ "--server", os.environ["DC_SERVER"])
+ self.assertCmdFail(result, "ensuring rodc prefetch quit on non-replicated user")
diff --git a/python/samba/tests/samba_tool/schema.py b/python/samba/tests/samba_tool/schema.py
new file mode 100644
index 0000000..5c4ac78
--- /dev/null
+++ b/python/samba/tests/samba_tool/schema.py
@@ -0,0 +1,109 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) William Brown <william@blackhats.net.au> 2018
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import os
+import ldb
+from samba.tests.samba_tool.base import SambaToolCmdTest
+
+
+class SchemaCmdTestCase(SambaToolCmdTest):
+ """Tests for samba-tool dsacl subcommands"""
+ samdb = None
+
+ def setUp(self):
+ super().setUp()
+ self.samdb = self.getSamDB("-H", "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"], os.environ["DC_PASSWORD"]))
+
+ def test_display_attribute(self):
+ """Tests that we can display schema attributes"""
+ (result, out, err) = self.runsublevelcmd("schema", ("attribute",
+ "show"), "uid",
+ "-H", "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"],
+ os.environ["DC_PASSWORD"]))
+
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ self.assertIn("dn: CN=uid,CN=Schema,CN=Configuration,", out)
+
+ def test_modify_attribute_searchflags(self):
+ """Tests that we can modify searchFlags of an attribute"""
+ (result, out, err) = self.runsublevelcmd("schema", ("attribute",
+ "modify"), "uid", "--searchflags=9",
+ "-H", "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"],
+ os.environ["DC_PASSWORD"]))
+
+ self.assertCmdFail(result, 'Unknown flag 9, please see --help')
+
+ (result, out, err) = self.runsublevelcmd("schema", ("attribute",
+ "modify"), "uid", "--searchflags=fATTINDEX",
+ "-H", "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"],
+ os.environ["DC_PASSWORD"]))
+
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ self.assertIn("modified cn=uid,CN=Schema,CN=Configuration,", out)
+
+ (result, out, err) = self.runsublevelcmd("schema", ("attribute",
+ "modify"), "uid",
+ "--searchflags=fATTINDEX,fSUBTREEATTINDEX",
+ "-H", "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"],
+ os.environ["DC_PASSWORD"]))
+
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ self.assertIn("modified cn=uid,CN=Schema,CN=Configuration,", out)
+
+ (result, out, err) = self.runsublevelcmd("schema", ("attribute",
+ "modify"), "uid",
+ "--searchflags=fAtTiNdEx,fPRESERVEONDELETE",
+ "-H", "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"],
+ os.environ["DC_PASSWORD"]))
+
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ self.assertIn("modified cn=uid,CN=Schema,CN=Configuration,", out)
+
+ def test_show_oc_attribute(self):
+ """Tests that we can modify searchFlags of an attribute"""
+ (result, out, err) = self.runsublevelcmd("schema", ("attribute",
+ "show_oc"), "cn",
+ "-H", "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"],
+ os.environ["DC_PASSWORD"]))
+
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ self.assertIn("--- MAY contain ---", out)
+ self.assertIn("--- MUST contain ---", out)
+
+ def test_display_objectclass(self):
+ """Tests that we can display schema objectclasses"""
+ (result, out, err) = self.runsublevelcmd("schema", ("objectclass",
+ "show"), "person",
+ "-H", "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"],
+ os.environ["DC_PASSWORD"]))
+
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ self.assertIn("dn: CN=Person,CN=Schema,CN=Configuration,", out)
diff --git a/python/samba/tests/samba_tool/silo_base.py b/python/samba/tests/samba_tool/silo_base.py
new file mode 100644
index 0000000..451d330
--- /dev/null
+++ b/python/samba/tests/samba_tool/silo_base.py
@@ -0,0 +1,229 @@
+# Unix SMB/CIFS implementation.
+#
+# Base test class for samba-tool domain auth policy and silo commands.
+#
+# Copyright (C) Catalyst.Net Ltd. 2023
+#
+# Written by Rob van der Linde <rob@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import os
+
+from ldb import SCOPE_ONELEVEL
+
+from samba.netcmd.domain.models import Group
+
+from .base import SambaToolCmdTest
+
+HOST = "ldap://{DC_SERVER}".format(**os.environ)
+CREDS = "-U{DC_USERNAME}%{DC_PASSWORD}".format(**os.environ)
+
+
+class SiloTest(SambaToolCmdTest):
+ """Base test class for silo and policy related commands."""
+
+ @classmethod
+ def setUpClass(cls):
+ cls.samdb = cls.getSamDB("-H", HOST, CREDS)
+ super().setUpClass()
+
+ @classmethod
+ def setUpTestData(cls):
+ cls.create_authentication_policy(name="User Policy")
+ cls.create_authentication_policy(name="Service Policy")
+ cls.create_authentication_policy(name="Computer Policy")
+
+ cls.create_authentication_silo(
+ name="Developers",
+ description="Developers, Developers, Developers!",
+ user_authentication_policy="User Policy")
+ cls.create_authentication_silo(
+ name="Managers",
+ description="Managers",
+ user_authentication_policy="User Policy")
+ cls.create_authentication_silo(
+ name="QA",
+ description="Quality Assurance",
+ user_authentication_policy="User Policy",
+ service_authentication_policy="Service Policy",
+ computer_authentication_policy="Computer Policy")
+
+ cls.device_group = Group(name="device-group")
+ cls.device_group.save(cls.samdb)
+ cls.addClassCleanup(cls.device_group.delete, cls.samdb)
+
+ def get_services_dn(self):
+ """Returns Services DN."""
+ services_dn = self.samdb.get_config_basedn()
+ services_dn.add_child("CN=Services")
+ return services_dn
+
+ def get_authn_configuration_dn(self):
+ """Returns AuthN Configuration DN."""
+ authn_policy_configuration = self.get_services_dn()
+ authn_policy_configuration.add_child("CN=AuthN Policy Configuration")
+ return authn_policy_configuration
+
+ def get_authn_silos_dn(self):
+ """Returns AuthN Silos DN."""
+ authn_silos_dn = self.get_authn_configuration_dn()
+ authn_silos_dn.add_child("CN=AuthN Silos")
+ return authn_silos_dn
+
+ def get_authn_policies_dn(self):
+ """Returns AuthN Policies DN."""
+ authn_policies_dn = self.get_authn_configuration_dn()
+ authn_policies_dn.add_child("CN=AuthN Policies")
+ return authn_policies_dn
+
+ def get_users_dn(self):
+ """Returns Users DN."""
+ users_dn = self.samdb.get_root_basedn()
+ users_dn.add_child("CN=Users")
+ return users_dn
+
+ def get_user(self, username):
+ """Get a user by username."""
+ users_dn = self.get_users_dn()
+
+ result = self.samdb.search(base=users_dn,
+ scope=SCOPE_ONELEVEL,
+ expression=f"(sAMAccountName={username})")
+
+ if len(result) == 1:
+ return result[0]
+
+ @classmethod
+ def _run(cls, *argv):
+ """Override _run, so we don't always have to pass host and creds."""
+ args = list(argv)
+ args.extend(["-H", HOST, CREDS])
+ return super()._run(*args)
+
+ runcmd = _run
+ runsubcmd = _run
+
+ @classmethod
+ def create_authentication_policy(cls, name, description=None, audit=False,
+ protect=False):
+ """Create an authentication policy."""
+
+ # base command for create authentication policy
+ cmd = ["domain", "auth", "policy", "create", "--name", name]
+
+ # optional attributes
+ if description is not None:
+ cmd.append(f"--description={description}")
+ if audit:
+ cmd.append("--audit")
+ if protect:
+ cmd.append("--protect")
+
+ # Run command and store name in self.silos for tearDownClass to clean
+ # up.
+ result, out, err = cls.runcmd(*cmd)
+ assert result is None
+ assert out.startswith("Created authentication policy")
+ cls.addClassCleanup(cls.delete_authentication_policy,
+ name=name, force=True)
+ return name
+
+ @classmethod
+ def delete_authentication_policy(cls, name, force=False):
+ """Delete authentication policy by name."""
+ cmd = ["domain", "auth", "policy", "delete", "--name", name]
+
+ # Force-delete protected authentication policy.
+ if force:
+ cmd.append("--force")
+
+ result, out, err = cls.runcmd(*cmd)
+ assert result is None
+ assert "Deleted authentication policy" in out
+
+ @classmethod
+ def create_authentication_silo(cls, name, description=None,
+ user_authentication_policy=None,
+ service_authentication_policy=None,
+ computer_authentication_policy=None,
+ audit=False, protect=False):
+ """Create an authentication silo using the samba-tool command."""
+
+ # Base command for create authentication policy.
+ cmd = ["domain", "auth", "silo", "create", "--name", name]
+
+ # Authentication policies.
+ if user_authentication_policy:
+ cmd += ["--user-authentication-policy",
+ user_authentication_policy]
+ if service_authentication_policy:
+ cmd += ["--service-authentication-policy",
+ service_authentication_policy]
+ if computer_authentication_policy:
+ cmd += ["--computer-authentication-policy",
+ computer_authentication_policy]
+
+ # Other optional attributes.
+ if description is not None:
+ cmd.append(f"--description={description}")
+ if protect:
+ cmd.append("--protect")
+ if audit:
+ cmd.append("--audit")
+
+ # Run command and store name in self.silos for tearDownClass to clean
+ # up.
+ result, out, err = cls.runcmd(*cmd)
+ assert result is None
+ assert out.startswith("Created authentication silo")
+ cls.addClassCleanup(cls.delete_authentication_silo,
+ name=name, force=True)
+ return name
+
+ @classmethod
+ def delete_authentication_silo(cls, name, force=False):
+ """Delete authentication silo by name."""
+ cmd = ["domain", "auth", "silo", "delete", "--name", name]
+
+ # Force-delete protected authentication silo.
+ if force:
+ cmd.append("--force")
+
+ result, out, err = cls.runcmd(*cmd)
+ assert result is None
+ assert "Deleted authentication silo" in out
+
+ def get_authentication_silo(self, name):
+ """Get authentication silo by name."""
+ authn_silos_dn = self.get_authn_silos_dn()
+
+ result = self.samdb.search(base=authn_silos_dn,
+ scope=SCOPE_ONELEVEL,
+ expression=f"(CN={name})")
+
+ if len(result) == 1:
+ return result[0]
+
+ def get_authentication_policy(self, name):
+ """Get authentication policy by name."""
+ authn_policies_dn = self.get_authn_policies_dn()
+
+ result = self.samdb.search(base=authn_policies_dn,
+ scope=SCOPE_ONELEVEL,
+ expression=f"(CN={name})")
+
+ if len(result) == 1:
+ return result[0]
diff --git a/python/samba/tests/samba_tool/sites.py b/python/samba/tests/samba_tool/sites.py
new file mode 100644
index 0000000..4288f35
--- /dev/null
+++ b/python/samba/tests/samba_tool/sites.py
@@ -0,0 +1,205 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Catalyst.Net LTD 2015
+# Copyright (C) Sean Dague <sdague@linux.vnet.ibm.com> 2011
+#
+# Catalyst.Net's contribution was written by Douglas Bagnall
+# <douglas.bagnall@catalyst.net.nz>.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import json
+import os
+import ldb
+from samba.tests.samba_tool.base import SambaToolCmdTest
+from samba import sites, subnets
+
+
+class BaseSitesCmdTestCase(SambaToolCmdTest):
+ """Tests for samba-tool sites subnets"""
+ def setUp(self):
+ super().setUp()
+ self.dburl = "ldap://%s" % os.environ["DC_SERVER"]
+ self.creds_string = "-U%s%%%s" % (os.environ["DC_USERNAME"],
+ os.environ["DC_PASSWORD"])
+
+ self.samdb = self.getSamDB("-H", self.dburl, self.creds_string)
+ self.config_dn = str(self.samdb.get_config_basedn())
+
+
+class SitesCmdTestCase(BaseSitesCmdTestCase):
+
+ def test_site_create(self):
+ sitename = 'new_site'
+
+ result, out, err = self.runsubcmd("sites", "create", sitename,
+ "-H", self.dburl, self.creds_string)
+ self.assertCmdSuccess(result, out, err)
+
+ dnsites = ldb.Dn(self.samdb, "CN=Sites,%s" % self.config_dn)
+ dnsite = ldb.Dn(self.samdb, "CN=%s,%s" % (sitename, dnsites))
+
+ ret = self.samdb.search(base=dnsites, scope=ldb.SCOPE_ONELEVEL,
+ expression='(cn=%s)' % sitename)
+ self.assertEqual(len(ret), 1)
+
+ # now delete it
+ self.samdb.delete(dnsite, ["tree_delete:0"])
+
+ def test_site_list(self):
+ result, out, err = self.runsubcmd("sites", "list",
+ "-H", self.dburl, self.creds_string)
+ self.assertCmdSuccess(result, out, err)
+ self.assertIn("Default-First-Site-Name", out)
+
+ # The same but with --json
+ result, out, err = self.runsubcmd("sites", "list", "--json",
+ "-H", self.dburl, self.creds_string)
+ self.assertCmdSuccess(result, out, err)
+ json_data = json.loads(out)
+ self.assertIn("Default-First-Site-Name", json_data)
+
+ def test_site_view(self):
+ result, out, err = self.runsubcmd("sites", "view",
+ "Default-First-Site-Name",
+ "-H", self.dburl, self.creds_string)
+ self.assertCmdSuccess(result, out, err)
+ json_data = json.loads(out)
+ self.assertEqual(json_data["cn"], "Default-First-Site-Name")
+
+ # Now try one that doesn't exist
+ result, out, err = self.runsubcmd("sites", "view",
+ "Does-Not-Exist",
+ "-H", self.dburl, self.creds_string)
+ self.assertCmdFail(result, err)
+
+
+class SitesSubnetCmdTestCase(BaseSitesCmdTestCase):
+ def setUp(self):
+ super().setUp()
+ self.sitename = "testsite"
+ self.sitename2 = "testsite2"
+ self.samdb.transaction_start()
+ sites.create_site(self.samdb, self.config_dn, self.sitename)
+ sites.create_site(self.samdb, self.config_dn, self.sitename2)
+ self.samdb.transaction_commit()
+
+ def tearDown(self):
+ self.samdb.transaction_start()
+ sites.delete_site(self.samdb, self.config_dn, self.sitename)
+ sites.delete_site(self.samdb, self.config_dn, self.sitename2)
+ self.samdb.transaction_commit()
+ super().tearDown()
+
+ def test_site_subnet_create(self):
+ cidrs = (("10.9.8.0/24", self.sitename),
+ ("50.60.0.0/16", self.sitename2),
+ ("50.61.0.0/16", self.sitename2), # second subnet on the site
+ ("50.0.0.0/8", self.sitename), # overlapping subnet, other site
+ ("50.62.1.2/32", self.sitename), # single IP
+ ("aaaa:bbbb:cccc:dddd:eeee:ffff:2222:1100/120",
+ self.sitename2),
+ )
+
+ for cidr, sitename in cidrs:
+ result, out, err = self.runsubcmd("sites", "subnet", "create",
+ cidr, sitename,
+ "-H", self.dburl,
+ self.creds_string)
+ self.assertCmdSuccess(result, out, err)
+
+ ret = self.samdb.search(base=self.config_dn,
+ scope=ldb.SCOPE_SUBTREE,
+ expression=('(&(objectclass=subnet)(cn=%s))'
+ % cidr))
+ self.assertIsNotNone(ret)
+ self.assertEqual(len(ret), 1)
+
+ dnsubnets = ldb.Dn(self.samdb,
+ "CN=Subnets,CN=Sites,%s" % self.config_dn)
+
+ for cidr, sitename in cidrs:
+ dnsubnet = ldb.Dn(self.samdb, ("Cn=%s,CN=Subnets,CN=Sites,%s" %
+ (cidr, self.config_dn)))
+
+ ret = self.samdb.search(base=dnsubnets, scope=ldb.SCOPE_ONELEVEL,
+ expression='(CN=%s)' % cidr)
+ self.assertIsNotNone(ret)
+ self.assertEqual(len(ret), 1)
+ self.samdb.delete(dnsubnet, ["tree_delete:0"])
+
+ def test_site_subnet_create_should_fail(self):
+ cidrs = (("10.9.8.0/33", self.sitename), # mask too big
+ ("50.60.0.0/8", self.sitename2), # insufficient zeros
+ ("50.261.0.0/16", self.sitename2), # bad octet
+ ("7.0.0.0.0/0", self.sitename), # insufficient zeros
+ ("aaaa:bbbb:cccc:dddd:eeee:ffff:2222:1100/119",
+ self.sitename), # insufficient zeros
+ )
+
+ for cidr, sitename in cidrs:
+ result, out, err = self.runsubcmd("sites", "subnet", "create",
+ cidr, sitename,
+ "-H", self.dburl,
+ self.creds_string)
+ self.assertCmdFail(result)
+
+ ret = self.samdb.search(base=self.config_dn,
+ scope=ldb.SCOPE_SUBTREE,
+ expression=('(&(objectclass=subnet)(cn=%s))'
+ % cidr))
+
+ self.assertIsNotNone(ret)
+ self.assertEqual(len(ret), 0)
+
+ def test_site_subnet_list(self):
+ subnet = "10.9.8.0/24"
+ subnets.create_subnet(self.samdb, self.samdb.get_config_basedn(),
+ subnet, self.sitename)
+
+ # cleanup after test
+ dnsubnet = ldb.Dn(self.samdb, ("CN=%s,CN=Subnets,CN=Sites,%s" %
+ (subnet, self.config_dn)))
+ self.addCleanup(self.samdb.delete, dnsubnet, ["tree_delete:1"])
+
+ result, out, err = self.runsubcmd("sites", "subnet", "list",
+ self.sitename,
+ "-H", self.dburl, self.creds_string)
+
+ self.assertCmdSuccess(result, out, err)
+ self.assertIn(subnet, out)
+
+ def test_site_subnet_view(self):
+ subnet = "50.60.0.0/16"
+ subnets.create_subnet(self.samdb, self.samdb.get_config_basedn(),
+ subnet, self.sitename2)
+
+ # cleanup after test
+ dnsubnet = ldb.Dn(self.samdb, ("CN=%s,CN=Subnets,CN=Sites,%s" %
+ (subnet, self.config_dn)))
+ self.addCleanup(self.samdb.delete, dnsubnet, ["tree_delete:1"])
+
+ result, out, err = self.runsubcmd("sites", "subnet",
+ "view", subnet,
+ "-H", self.dburl, self.creds_string)
+
+ self.assertCmdSuccess(result, out, err)
+ json_data = json.loads(out)
+ self.assertEqual(json_data["cn"], subnet)
+
+ # Now try one that doesn't exist
+ result, out, err = self.runsubcmd("sites", "subnet",
+ "view", "50.0.0.0/8",
+ "-H", self.dburl, self.creds_string)
+ self.assertCmdFail(result, err)
diff --git a/python/samba/tests/samba_tool/timecmd.py b/python/samba/tests/samba_tool/timecmd.py
new file mode 100644
index 0000000..8e286f6
--- /dev/null
+++ b/python/samba/tests/samba_tool/timecmd.py
@@ -0,0 +1,44 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Sean Dague <sdague@linux.vnet.ibm.com> 2011
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import os
+from time import localtime, strptime, mktime
+from samba.tests.samba_tool.base import SambaToolCmdTest
+
+
+class TimeCmdTestCase(SambaToolCmdTest):
+ """Tests for samba-tool time subcommands"""
+
+ def test_timeget(self):
+ """Run time against the server and make sure it looks accurate"""
+ (result, out, err) = self.runcmd("time", os.environ["SERVER"])
+ self.assertCmdSuccess(result, out, err, "Ensuring time ran successfully")
+
+ timefmt = strptime(out, "%a %b %d %H:%M:%S %Y %Z\n")
+ servertime = int(mktime(timefmt))
+ now = int(mktime(localtime()))
+
+ # because there is a race here, allow up to 5 seconds difference in times
+ delta = 5
+ self.assertTrue((servertime > (now - delta) and (servertime < (now + delta)), "Time is now"))
+
+ def test_timefail(self):
+ """Run time against a non-existent server, and make sure it fails"""
+ (result, out, err) = self.runcmd("time", "notaserver")
+ self.assertEqual(result, -1, "check for result code")
+ self.assertNotEqual(err.strip().find("NT_STATUS_OBJECT_NAME_NOT_FOUND"), -1, "ensure right error string")
+ self.assertEqual(out, "", "ensure no output returned")
diff --git a/python/samba/tests/samba_tool/user.py b/python/samba/tests/samba_tool/user.py
new file mode 100644
index 0000000..26c9748
--- /dev/null
+++ b/python/samba/tests/samba_tool/user.py
@@ -0,0 +1,1246 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Sean Dague <sdague@linux.vnet.ibm.com> 2011
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import os
+import time
+import base64
+import ldb
+from samba.tests.samba_tool.base import SambaToolCmdTest
+from samba import (
+ credentials,
+ nttime2unix,
+ dsdb,
+ werror,
+ )
+from samba.ndr import ndr_unpack
+from samba.dcerpc import drsblobs
+from samba.common import get_bytes
+from samba.common import get_string
+from samba.tests import env_loadparm
+
+
+class UserCmdTestCase(SambaToolCmdTest):
+ """Tests for samba-tool user subcommands"""
+ users = []
+ samdb = None
+
+ def setUp(self):
+ super().setUp()
+ self.samdb = self.getSamDB("-H", "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"], os.environ["DC_PASSWORD"]))
+
+ # Modify the default template homedir
+ lp = self.get_loadparm()
+ self.template_homedir = lp.get('template homedir')
+ lp.set('template homedir', '/home/test/%D/%U')
+
+ self.users = []
+ self.users.append(self._randomUser({"name": "sambatool1", "company": "comp1"}))
+ self.users.append(self._randomUser({"name": "sambatool2", "company": "comp1"}))
+ self.users.append(self._randomUser({"name": "sambatool3", "company": "comp2"}))
+ self.users.append(self._randomUser({"name": "sambatool4", "company": "comp2"}))
+ self.users.append(self._randomPosixUser({"name": "posixuser1"}))
+ self.users.append(self._randomPosixUser({"name": "posixuser2"}))
+ self.users.append(self._randomPosixUser({"name": "posixuser3"}))
+ self.users.append(self._randomPosixUser({"name": "posixuser4"}))
+ self.users.append(self._randomUnixUser({"name": "unixuser1"}))
+ self.users.append(self._randomUnixUser({"name": "unixuser2"}))
+ self.users.append(self._randomUnixUser({"name": "unixuser3"}))
+ self.users.append(self._randomUnixUser({"name": "unixuser4"}))
+
+ # Make sure users don't exist
+ for user in self.users:
+ if self._find_user(user["name"]):
+ self.runsubcmd("user", "delete", user["name"])
+
+ # setup the 12 users and ensure they are correct
+ for user in self.users:
+ (result, out, err) = user["createUserFn"](user)
+
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ if 'unix' in user["name"]:
+ self.assertIn("Modified User '%s' successfully" % user["name"],
+ out)
+ else:
+ self.assertIn("User '%s' added successfully" % user["name"],
+ out)
+
+ user["checkUserFn"](user)
+
+ def tearDown(self):
+ super().tearDown()
+ # clean up all the left over users, just in case
+ for user in self.users:
+ if self._find_user(user["name"]):
+ self.runsubcmd("user", "delete", user["name"])
+ lp = env_loadparm()
+ # second run of this test
+ # the cache is still there and '--cache-ldb-initialize'
+ # will fail
+ cachedb = lp.private_path("user-syncpasswords-cache.ldb")
+ if os.path.exists(cachedb):
+ os.remove(cachedb)
+ lp.set('template homedir', self.template_homedir)
+
+ def test_newuser(self):
+ # try to add all the users again, this should fail
+ for user in self.users:
+ (result, out, err) = self._create_user(user)
+ self.assertCmdFail(result, "Ensure that create user fails")
+ self.assertIn("LDAP error 68 LDAP_ENTRY_ALREADY_EXISTS", err)
+
+ # try to delete all the 4 users we just added
+ for user in self.users:
+ (result, out, err) = self.runsubcmd("user", "delete", user["name"])
+ self.assertCmdSuccess(result, out, err, "Can we delete users")
+ found = self._find_user(user["name"])
+ self.assertIsNone(found)
+
+ # test adding users with --use-username-as-cn
+ for user in self.users:
+ (result, out, err) = self.runsubcmd("user", "create", user["name"], user["password"],
+ "--use-username-as-cn",
+ "--surname=%s" % user["surname"],
+ "--given-name=%s" % user["given-name"],
+ "--job-title=%s" % user["job-title"],
+ "--department=%s" % user["department"],
+ "--description=%s" % user["description"],
+ "--company=%s" % user["company"],
+ "-H", "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"], os.environ["DC_PASSWORD"]))
+
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ self.assertIn("User '%s' added successfully" % user["name"], out)
+
+ found = self._find_user(user["name"])
+
+ self.assertEqual("%s" % found.get("cn"), "%(name)s" % user)
+ self.assertEqual("%s" % found.get("name"), "%(name)s" % user)
+
+ def test_newuser_weak_password(self):
+ # Ensure that when we try to create a user over LDAP (thus no
+ # transactions) and the password is too weak, we do not get a
+ # half-created account.
+
+ def cleanup_user(username):
+ try:
+ self.samdb.deleteuser(username)
+ except Exception as err:
+ estr = err.args[0]
+ if 'Unable to find user' not in estr:
+ raise
+
+ server = os.environ['DC_SERVER']
+ dc_username = os.environ['DC_USERNAME']
+ dc_password = os.environ['DC_PASSWORD']
+
+ username = self.randomName()
+ password = 'a'
+
+ self.addCleanup(cleanup_user, username)
+
+ # Try to add the user and ensure it fails.
+ result, out, err = self.runsubcmd('user', 'add',
+ username, password,
+ '-H', f'ldap://{server}',
+ f'-U{dc_username}%{dc_password}')
+ self.assertCmdFail(result)
+ self.assertIn('Failed to add user', err)
+ self.assertIn('LDAP_CONSTRAINT_VIOLATION', err)
+ self.assertIn(f'{werror.WERR_PASSWORD_RESTRICTION:08X}', err)
+
+ # Now search for the user, and make sure we don't find anything.
+ res = self.samdb.search(self.samdb.domain_dn(),
+ expression=f'(sAMAccountName={username})',
+ scope=ldb.SCOPE_SUBTREE)
+ self.assertEqual(0, len(res), 'expected not to find the user')
+
+ def _verify_supplementalCredentials(self, ldif,
+ min_packages=3,
+ max_packages=6):
+ msgs = self.samdb.parse_ldif(ldif)
+ (changetype, obj) = next(msgs)
+
+ self.assertIn("supplementalCredentials", obj, "supplementalCredentials attribute required")
+ sc_blob = obj["supplementalCredentials"][0]
+ sc = ndr_unpack(drsblobs.supplementalCredentialsBlob, sc_blob)
+
+ self.assertGreaterEqual(sc.sub.num_packages,
+ min_packages, "min_packages check")
+ self.assertLessEqual(sc.sub.num_packages,
+ max_packages, "max_packages check")
+
+ if max_packages == 0:
+ return
+
+ def find_package(packages, name, start_idx=0):
+ for i in range(start_idx, len(packages)):
+ if packages[i].name == name:
+ return (i, packages[i])
+ return (None, None)
+
+ # The ordering is this
+ #
+ # Primary:Kerberos-Newer-Keys (optional)
+ # Primary:Kerberos
+ # Primary:WDigest
+ # Primary:CLEARTEXT (optional)
+ # Primary:SambaGPG (optional)
+ #
+ # And the 'Packages' package is insert before the last
+ # other package.
+
+ nidx = 0
+ (pidx, pp) = find_package(sc.sub.packages, "Packages", start_idx=nidx)
+ self.assertIsNotNone(pp, "Packages required")
+ self.assertEqual(pidx + 1, sc.sub.num_packages - 1,
+ "Packages needs to be at num_packages - 1")
+
+ (knidx, knp) = find_package(sc.sub.packages, "Primary:Kerberos-Newer-Keys",
+ start_idx=nidx)
+ if knidx is not None:
+ self.assertEqual(knidx, nidx, "Primary:Kerberos-Newer-Keys at wrong position")
+ nidx = nidx + 1
+ if nidx == pidx:
+ nidx = nidx + 1
+
+ (kidx, kp) = find_package(sc.sub.packages, "Primary:Kerberos",
+ start_idx=nidx)
+ self.assertIsNotNone(pp, "Primary:Kerberos required")
+ self.assertEqual(kidx, nidx, "Primary:Kerberos at wrong position")
+ nidx = nidx + 1
+ if nidx == pidx:
+ nidx = nidx + 1
+
+ (widx, wp) = find_package(sc.sub.packages, "Primary:WDigest",
+ start_idx=nidx)
+ self.assertIsNotNone(pp, "Primary:WDigest required")
+ self.assertEqual(widx, nidx, "Primary:WDigest at wrong position")
+ nidx = nidx + 1
+ if nidx == pidx:
+ nidx = nidx + 1
+
+ (cidx, cp) = find_package(sc.sub.packages, "Primary:CLEARTEXT",
+ start_idx=nidx)
+ if cidx is not None:
+ self.assertEqual(cidx, nidx, "Primary:CLEARTEXT at wrong position")
+ nidx = nidx + 1
+ if nidx == pidx:
+ nidx = nidx + 1
+
+ (gidx, gp) = find_package(sc.sub.packages, "Primary:SambaGPG",
+ start_idx=nidx)
+ if gidx is not None:
+ self.assertEqual(gidx, nidx, "Primary:SambaGPG at wrong position")
+ nidx = nidx + 1
+ if nidx == pidx:
+ nidx = nidx + 1
+
+ self.assertEqual(nidx, sc.sub.num_packages, "Unknown packages found")
+
+ def test_setpassword(self):
+ expect_nt_hash = bool(int(os.environ.get("EXPECT_NT_HASH", "1")))
+
+ for user in self.users:
+ newpasswd = self.random_password(16)
+ (result, out, err) = self.runsubcmd("user", "setpassword",
+ user["name"],
+ "--newpassword=%s" % newpasswd,
+ "-H", "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"], os.environ["DC_PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, "Ensure setpassword runs")
+ self.assertEqual(err, "", "setpassword with url")
+ self.assertMatch(out, "Changed password OK", "setpassword with url")
+
+ attributes = "sAMAccountName,unicodePwd,supplementalCredentials,virtualClearTextUTF8,virtualClearTextUTF16,virtualSSHA,virtualSambaGPG"
+ (result, out, err) = self.runsubcmd("user", "syncpasswords",
+ "--cache-ldb-initialize",
+ "--attributes=%s" % attributes,
+ "--decrypt-samba-gpg")
+ self.assertCmdSuccess(result, out, err, "Ensure syncpasswords --cache-ldb-initialize runs")
+ self.assertEqual(err, "", "getpassword without url")
+ cache_attrs = {
+ "objectClass": {"value": "userSyncPasswords"},
+ "samdbUrl": {},
+ "dirsyncFilter": {},
+ "dirsyncAttribute": {},
+ "dirsyncControl": {"value": "dirsync:1:0:0"},
+ "passwordAttribute": {},
+ "decryptSambaGPG": {},
+ "currentTime": {},
+ }
+ for a in cache_attrs.keys():
+ v = cache_attrs[a].get("value", "")
+ self.assertMatch(out, "%s: %s" % (a, v),
+ "syncpasswords --cache-ldb-initialize: %s: %s out[%s]" % (a, v, out))
+
+ (result, out, err) = self.runsubcmd("user", "syncpasswords", "--no-wait")
+ self.assertCmdSuccess(result, out, err, "Ensure syncpasswords --no-wait runs")
+ self.assertEqual(err, "", "syncpasswords --no-wait")
+ self.assertMatch(out, "dirsync_loop(): results 0",
+ "syncpasswords --no-wait: 'dirsync_loop(): results 0': out[%s]" % (out))
+ for user in self.users:
+ self.assertMatch(out, "sAMAccountName: %s" % (user["name"]),
+ "syncpasswords --no-wait: 'sAMAccountName': %s out[%s]" % (user["name"], out))
+
+ for user in self.users:
+ newpasswd = self.random_password(16)
+ creds = credentials.Credentials()
+ creds.set_anonymous()
+ creds.set_password(newpasswd)
+ unicodePwd = base64.b64encode(creds.get_nt_hash()).decode('utf8')
+ virtualClearTextUTF8 = base64.b64encode(get_bytes(newpasswd)).decode('utf8')
+ virtualClearTextUTF16 = base64.b64encode(get_string(newpasswd).encode('utf-16-le')).decode('utf8')
+
+ (result, out, err) = self.runsubcmd("user", "setpassword",
+ user["name"],
+ "--newpassword=%s" % newpasswd)
+ self.assertCmdSuccess(result, out, err, "Ensure setpassword runs")
+ self.assertEqual(err, "", "setpassword without url")
+ self.assertMatch(out, "Changed password OK", "setpassword without url")
+
+ (result, out, err) = self.runsubcmd("user", "syncpasswords", "--no-wait")
+ self.assertCmdSuccess(result, out, err, "Ensure syncpasswords --no-wait runs")
+ self.assertEqual(err, "", "syncpasswords --no-wait")
+ self.assertMatch(out, "dirsync_loop(): results 0",
+ "syncpasswords --no-wait: 'dirsync_loop(): results 0': out[%s]" % (out))
+ self.assertMatch(out, "sAMAccountName: %s" % (user["name"]),
+ "syncpasswords --no-wait: 'sAMAccountName': %s out[%s]" % (user["name"], out))
+ self.assertMatch(out, "# unicodePwd::: REDACTED SECRET ATTRIBUTE",
+ "getpassword '# unicodePwd::: REDACTED SECRET ATTRIBUTE': out[%s]" % out)
+ if expect_nt_hash or "virtualSambaGPG:: " in out:
+ self.assertMatch(out, "unicodePwd:: %s" % unicodePwd,
+ "getpassword unicodePwd: out[%s]" % out)
+ else:
+ self.assertNotIn("unicodePwd:: %s" % unicodePwd, out)
+ self.assertMatch(out, "# supplementalCredentials::: REDACTED SECRET ATTRIBUTE",
+ "getpassword '# supplementalCredentials::: REDACTED SECRET ATTRIBUTE': out[%s]" % out)
+ self.assertMatch(out, "supplementalCredentials:: ",
+ "getpassword supplementalCredentials: out[%s]" % out)
+ if "virtualSambaGPG:: " in out:
+ self.assertMatch(out, "virtualClearTextUTF8:: %s" % virtualClearTextUTF8,
+ "getpassword virtualClearTextUTF8: out[%s]" % out)
+ self.assertMatch(out, "virtualClearTextUTF16:: %s" % virtualClearTextUTF16,
+ "getpassword virtualClearTextUTF16: out[%s]" % out)
+ self.assertMatch(out, "virtualSSHA: ",
+ "getpassword virtualSSHA: out[%s]" % out)
+
+ (result, out, err) = self.runsubcmd("user", "getpassword",
+ user["name"],
+ "--attributes=%s" % attributes,
+ "--decrypt-samba-gpg")
+ self.assertCmdSuccess(result, out, err, "Ensure getpassword runs")
+ self.assertEqual(err, "Got password OK\n", "getpassword without url")
+ self.assertMatch(out, "sAMAccountName: %s" % (user["name"]),
+ "getpassword: 'sAMAccountName': %s out[%s]" % (user["name"], out))
+ if expect_nt_hash or "virtualSambaGPG:: " in out:
+ self.assertMatch(out, "unicodePwd:: %s" % unicodePwd,
+ "getpassword unicodePwd: out[%s]" % out)
+ else:
+ self.assertNotIn("unicodePwd:: %s" % unicodePwd, out)
+ self.assertMatch(out, "supplementalCredentials:: ",
+ "getpassword supplementalCredentials: out[%s]" % out)
+ self._verify_supplementalCredentials(out)
+ if "virtualSambaGPG:: " in out:
+ self.assertMatch(out, "virtualClearTextUTF8:: %s" % virtualClearTextUTF8,
+ "getpassword virtualClearTextUTF8: out[%s]" % out)
+ self.assertMatch(out, "virtualClearTextUTF16:: %s" % virtualClearTextUTF16,
+ "getpassword virtualClearTextUTF16: out[%s]" % out)
+ self.assertMatch(out, "virtualSSHA: ",
+ "getpassword virtualSSHA: out[%s]" % out)
+
+ for user in self.users:
+ newpasswd = self.random_password(16)
+ (result, out, err) = self.runsubcmd("user", "setpassword",
+ user["name"],
+ "--newpassword=%s" % newpasswd,
+ "--must-change-at-next-login",
+ "-H", "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"], os.environ["DC_PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, "Ensure setpassword runs")
+ self.assertEqual(err, "", "setpassword with forced change")
+ self.assertMatch(out, "Changed password OK", "setpassword with forced change")
+
+ def test_setexpiry(self):
+ for user in self.users:
+ twodays = time.time() + (2 * 24 * 60 * 60)
+
+ (result, out, err) = self.runsubcmd("user", "setexpiry", user["name"],
+ "--days=2",
+ "-H", "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"], os.environ["DC_PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, "Can we run setexpiry with names")
+ self.assertIn("Expiry for user '%s' set to 2 days." % user["name"], out)
+
+ found = self._find_user(user["name"])
+
+ expires = nttime2unix(int("%s" % found.get("accountExpires")))
+ self.assertWithin(expires, twodays, 5, "Ensure account expires is within 5 seconds of the expected time")
+
+ # TODO: re-enable this after the filter case is sorted out
+ if "filters are broken, bail now":
+ return
+
+ # now run the expiration based on a filter
+ fourdays = time.time() + (4 * 24 * 60 * 60)
+ (result, out, err) = self.runsubcmd("user", "setexpiry",
+ "--filter", "(&(objectClass=user)(company=comp2))",
+ "--days=4",
+ "-H", "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"], os.environ["DC_PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, "Can we run setexpiry with a filter")
+
+ for user in self.users:
+ found = self._find_user(user["name"])
+ if ("%s" % found.get("company")) == "comp2":
+ expires = nttime2unix(int("%s" % found.get("accountExpires")))
+ self.assertWithin(expires, fourdays, 5, "Ensure account expires is within 5 seconds of the expected time")
+ else:
+ expires = nttime2unix(int("%s" % found.get("accountExpires")))
+ self.assertWithin(expires, twodays, 5, "Ensure account expires is within 5 seconds of the expected time")
+
+ def test_list(self):
+ (result, out, err) = self.runsubcmd("user", "list",
+ "-H", "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"],
+ os.environ["DC_PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, "Error running list")
+
+ search_filter = ("(&(objectClass=user)(userAccountControl:%s:=%u))" %
+ (ldb.OID_COMPARATOR_AND, dsdb.UF_NORMAL_ACCOUNT))
+
+ userlist = self.samdb.search(base=self.samdb.domain_dn(),
+ scope=ldb.SCOPE_SUBTREE,
+ expression=search_filter,
+ attrs=["samaccountname"])
+
+ self.assertTrue(len(userlist) > 0, "no users found in samdb")
+
+ for userobj in userlist:
+ name = str(userobj.get("samaccountname", idx=0))
+ self.assertMatch(out, name,
+ "user '%s' not found" % name)
+
+
+ def test_list_base_dn(self):
+ base_dn = "CN=Users"
+ (result, out, err) = self.runsubcmd("user", "list", "-b", base_dn,
+ "-H", "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"],
+ os.environ["DC_PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, "Error running list")
+
+ search_filter = ("(&(objectClass=user)(userAccountControl:%s:=%u))" %
+ (ldb.OID_COMPARATOR_AND, dsdb.UF_NORMAL_ACCOUNT))
+
+ userlist = self.samdb.search(base=self.samdb.normalize_dn_in_domain(base_dn),
+ scope=ldb.SCOPE_SUBTREE,
+ expression=search_filter,
+ attrs=["samaccountname"])
+
+ self.assertTrue(len(userlist) > 0, "no users found in samdb")
+
+ for userobj in userlist:
+ name = str(userobj.get("samaccountname", idx=0))
+ self.assertMatch(out, name,
+ "user '%s' not found" % name)
+
+ def test_list_full_dn(self):
+ (result, out, err) = self.runsubcmd("user", "list", "--full-dn",
+ "-H", "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"],
+ os.environ["DC_PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, "Error running list")
+
+ search_filter = ("(&(objectClass=user)(userAccountControl:%s:=%u))" %
+ (ldb.OID_COMPARATOR_AND, dsdb.UF_NORMAL_ACCOUNT))
+
+ userlist = self.samdb.search(base=self.samdb.domain_dn(),
+ scope=ldb.SCOPE_SUBTREE,
+ expression=search_filter,
+ attrs=["dn"])
+
+ self.assertTrue(len(userlist) > 0, "no users found in samdb")
+
+ for userobj in userlist:
+ name = str(userobj.get("dn", idx=0))
+ self.assertMatch(out, name,
+ "user '%s' not found" % name)
+
+ def test_list_hide_expired(self):
+ expire_username = "expireUser"
+ expire_user = self._randomUser({"name": expire_username})
+ self._create_user(expire_user)
+
+ (result, out, err) = self.runsubcmd(
+ "user",
+ "list",
+ "--hide-expired",
+ "-H",
+ "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"],
+ os.environ["DC_PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, "Error running list")
+ self.assertTrue(expire_username in out,
+ "user '%s' not found" % expire_username)
+
+ # user will be expired one second ago
+ self.samdb.setexpiry(
+ "(sAMAccountname=%s)" % expire_username,
+ -1,
+ False)
+
+ (result, out, err) = self.runsubcmd(
+ "user",
+ "list",
+ "--hide-expired",
+ "-H",
+ "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"],
+ os.environ["DC_PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, "Error running list")
+ self.assertFalse(expire_username in out,
+ "user '%s' found" % expire_username)
+
+ self.samdb.deleteuser(expire_username)
+
+ def test_list_hide_disabled(self):
+ disable_username = "disableUser"
+ disable_user = self._randomUser({"name": disable_username})
+ self._create_user(disable_user)
+
+ (result, out, err) = self.runsubcmd(
+ "user",
+ "list",
+ "--hide-disabled",
+ "-H",
+ "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"],
+ os.environ["DC_PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, "Error running list")
+ self.assertTrue(disable_username in out,
+ "user '%s' not found" % disable_username)
+
+ self.samdb.disable_account("(sAMAccountname=%s)" % disable_username)
+
+ (result, out, err) = self.runsubcmd(
+ "user",
+ "list",
+ "--hide-disabled",
+ "-H",
+ "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"],
+ os.environ["DC_PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, "Error running list")
+ self.assertFalse(disable_username in out,
+ "user '%s' found" % disable_username)
+
+ self.samdb.deleteuser(disable_username)
+
+ def test_show(self):
+ for user in self.users:
+ (result, out, err) = self.runsubcmd(
+ "user", "show", user["name"],
+ "--attributes=sAMAccountName,company",
+ "-H", "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"],
+ os.environ["DC_PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, "Error running show")
+
+ expected_out = """dn: CN=%s %s,CN=Users,%s
+company: %s
+sAMAccountName: %s
+
+""" % (user["given-name"], user["surname"], self.samdb.domain_dn(),
+ user["company"], user["name"])
+
+ self.assertEqual(out, expected_out,
+ "Unexpected show output for user '%s'" %
+ user["name"])
+
+ time_attrs = [
+ "name", # test that invalid values are just ignored
+ "whenCreated",
+ "whenChanged",
+ "accountExpires",
+ "badPasswordTime",
+ "lastLogoff",
+ "lastLogon",
+ "lastLogonTimestamp",
+ "lockoutTime",
+ "msDS-UserPasswordExpiryTimeComputed",
+ "pwdLastSet",
+ ]
+
+ attrs = []
+ for ta in time_attrs:
+ attrs.append(ta)
+ for fm in ["GeneralizedTime", "UnixTime", "TimeSpec"]:
+ attrs.append("%s;format=%s" % (ta, fm))
+
+ (result, out, err) = self.runsubcmd(
+ "user", "show", user["name"],
+ "--attributes=%s" % ",".join(attrs),
+ "-H", "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"],
+ os.environ["DC_PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, "Error running show")
+
+ self.assertIn(";format=GeneralizedTime", out)
+ self.assertIn(";format=UnixTime", out)
+ self.assertIn(";format=TimeSpec", out)
+
+ self.assertIn("name: ", out)
+ self.assertNotIn("name;format=GeneralizedTime: ", out)
+ self.assertNotIn("name;format=UnixTime: ", out)
+ self.assertNotIn("name;format=TimeSpec: ", out)
+
+ self.assertIn("whenCreated: 20", out)
+ self.assertIn("whenCreated;format=GeneralizedTime: 20", out)
+ self.assertIn("whenCreated;format=UnixTime: 1", out)
+ self.assertIn("whenCreated;format=TimeSpec: 1", out)
+
+ self.assertIn("whenChanged: 20", out)
+ self.assertIn("whenChanged;format=GeneralizedTime: 20", out)
+ self.assertIn("whenChanged;format=UnixTime: 1", out)
+ self.assertIn("whenChanged;format=TimeSpec: 1", out)
+
+ self.assertIn("accountExpires: 9223372036854775807", out)
+ self.assertNotIn("accountExpires;format=GeneralizedTime: ", out)
+ self.assertNotIn("accountExpires;format=UnixTime: ", out)
+ self.assertNotIn("accountExpires;format=TimeSpec: ", out)
+
+ self.assertIn("badPasswordTime: 0", out)
+ self.assertNotIn("badPasswordTime;format=GeneralizedTime: ", out)
+ self.assertNotIn("badPasswordTime;format=UnixTime: ", out)
+ self.assertNotIn("badPasswordTime;format=TimeSpec: ", out)
+
+ self.assertIn("lastLogoff: 0", out)
+ self.assertNotIn("lastLogoff;format=GeneralizedTime: ", out)
+ self.assertNotIn("lastLogoff;format=UnixTime: ", out)
+ self.assertNotIn("lastLogoff;format=TimeSpec: ", out)
+
+ self.assertIn("lastLogon: 0", out)
+ self.assertNotIn("lastLogon;format=GeneralizedTime: ", out)
+ self.assertNotIn("lastLogon;format=UnixTime: ", out)
+ self.assertNotIn("lastLogon;format=TimeSpec: ", out)
+
+ # If a specified attribute is not available on a user object
+ # it's silently omitted.
+ self.assertNotIn("lastLogonTimestamp:", out)
+ self.assertNotIn("lockoutTime:", out)
+
+ self.assertIn("msDS-UserPasswordExpiryTimeComputed: 1", out)
+ self.assertIn("msDS-UserPasswordExpiryTimeComputed;format=GeneralizedTime: 20", out)
+ self.assertIn("msDS-UserPasswordExpiryTimeComputed;format=UnixTime: 1", out)
+ self.assertIn("msDS-UserPasswordExpiryTimeComputed;format=TimeSpec: 1", out)
+
+ self.assertIn("pwdLastSet: 1", out)
+ self.assertIn("pwdLastSet;format=GeneralizedTime: 20", out)
+ self.assertIn("pwdLastSet;format=UnixTime: 1", out)
+ self.assertIn("pwdLastSet;format=TimeSpec: 1", out)
+
+ out_msgs = self.samdb.parse_ldif(out)
+ out_msg = next(out_msgs)[1]
+
+ self.assertIn("whenCreated", out_msg)
+ when_created_str = str(out_msg["whenCreated"][0])
+ self.assertIn("whenCreated;format=GeneralizedTime", out_msg)
+ self.assertEqual(str(out_msg["whenCreated;format=GeneralizedTime"][0]), when_created_str)
+ when_created_time = ldb.string_to_time(when_created_str)
+ self.assertIn("whenCreated;format=UnixTime", out_msg)
+ self.assertEqual(str(out_msg["whenCreated;format=UnixTime"][0]), str(when_created_time))
+ self.assertIn("whenCreated;format=TimeSpec", out_msg)
+ self.assertEqual(str(out_msg["whenCreated;format=TimeSpec"][0]),
+ "%d.000000000" % (when_created_time))
+
+ self.assertIn("whenChanged", out_msg)
+ when_changed_str = str(out_msg["whenChanged"][0])
+ self.assertIn("whenChanged;format=GeneralizedTime", out_msg)
+ self.assertEqual(str(out_msg["whenChanged;format=GeneralizedTime"][0]), when_changed_str)
+ when_changed_time = ldb.string_to_time(when_changed_str)
+ self.assertIn("whenChanged;format=UnixTime", out_msg)
+ self.assertEqual(str(out_msg["whenChanged;format=UnixTime"][0]), str(when_changed_time))
+ self.assertIn("whenChanged;format=TimeSpec", out_msg)
+ self.assertEqual(str(out_msg["whenChanged;format=TimeSpec"][0]),
+ "%d.000000000" % (when_changed_time))
+
+ self.assertIn("pwdLastSet;format=GeneralizedTime", out_msg)
+ pwd_last_set_str = str(out_msg["pwdLastSet;format=GeneralizedTime"][0])
+ pwd_last_set_time = ldb.string_to_time(pwd_last_set_str)
+ self.assertIn("pwdLastSet;format=UnixTime", out_msg)
+ self.assertEqual(str(out_msg["pwdLastSet;format=UnixTime"][0]), str(pwd_last_set_time))
+ self.assertIn("pwdLastSet;format=TimeSpec", out_msg)
+ self.assertIn("%d." % pwd_last_set_time, str(out_msg["pwdLastSet;format=TimeSpec"][0]))
+ self.assertNotIn(".000000000", str(out_msg["pwdLastSet;format=TimeSpec"][0]))
+
+ # assert that the pwd has been set in the minute after user creation
+ self.assertGreaterEqual(pwd_last_set_time, when_created_time)
+ self.assertLess(pwd_last_set_time, when_created_time + 60)
+
+ self.assertIn("msDS-UserPasswordExpiryTimeComputed;format=GeneralizedTime", out_msg)
+ pwd_expires_str = str(out_msg["msDS-UserPasswordExpiryTimeComputed;format=GeneralizedTime"][0])
+ pwd_expires_time = ldb.string_to_time(pwd_expires_str)
+ self.assertIn("msDS-UserPasswordExpiryTimeComputed;format=UnixTime", out_msg)
+ self.assertEqual(str(out_msg["msDS-UserPasswordExpiryTimeComputed;format=UnixTime"][0]), str(pwd_expires_time))
+ self.assertIn("msDS-UserPasswordExpiryTimeComputed;format=TimeSpec", out_msg)
+ self.assertIn("%d." % pwd_expires_time, str(out_msg["msDS-UserPasswordExpiryTimeComputed;format=TimeSpec"][0]))
+ self.assertNotIn(".000000000", str(out_msg["msDS-UserPasswordExpiryTimeComputed;format=TimeSpec"][0]))
+
+ # assert that the pwd expires after it was set
+ self.assertGreater(pwd_expires_time, pwd_last_set_time)
+
+ def test_move(self):
+ full_ou_dn = str(self.samdb.normalize_dn_in_domain("OU=movetest_usr"))
+ self.addCleanup(self.samdb.delete, full_ou_dn, ["tree_delete:1"])
+
+ (result, out, err) = self.runsubcmd("ou", "add", full_ou_dn)
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "There shouldn't be any error message")
+ self.assertIn('Added ou "%s"' % full_ou_dn, out)
+
+ for user in self.users:
+ (result, out, err) = self.runsubcmd(
+ "user", "move", user["name"], full_ou_dn)
+ self.assertCmdSuccess(result, out, err, "Error running move")
+ self.assertIn('Moved user "%s" into "%s"' %
+ (user["name"], full_ou_dn), out)
+
+ # Should fail as users objects are in OU
+ (result, out, err) = self.runsubcmd("ou", "delete", full_ou_dn)
+ self.assertCmdFail(result)
+ self.assertIn(("subtree_delete: Unable to delete a non-leaf node "
+ "(it has %d children)!") % len(self.users), err)
+
+ for user in self.users:
+ new_dn = "CN=Users,%s" % self.samdb.domain_dn()
+ (result, out, err) = self.runsubcmd(
+ "user", "move", user["name"], new_dn)
+ self.assertCmdSuccess(result, out, err, "Error running move")
+ self.assertIn('Moved user "%s" into "%s"' %
+ (user["name"], new_dn), out)
+
+ def test_rename_surname_initials_givenname(self):
+ """rename the existing surname and given name and add missing
+ initials, then remove them, for all users"""
+ for user in self.users:
+ new_givenname = "new_given_name_of_" + user["name"]
+ new_initials = "A"
+ new_surname = "new_surname_of_" + user["name"]
+ found = self._find_user(user["name"])
+ old_cn = str(found.get("cn"))
+
+ # rename given name, initials and surname
+ (result, out, err) = self.runsubcmd("user", "rename", user["name"],
+ "--surname=%s" % new_surname,
+ "--initials=%s" % new_initials,
+ "--given-name=%s" % new_givenname)
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ self.assertIn('successfully', out)
+
+ found = self._find_user(user["name"])
+ self.assertEqual("%s" % found.get("givenName"), new_givenname)
+ self.assertEqual("%s" % found.get("initials"), new_initials)
+ self.assertEqual("%s" % found.get("sn"), new_surname)
+ self.assertEqual("%s" % found.get("name"),
+ "%s %s. %s" % (new_givenname, new_initials, new_surname))
+ self.assertEqual("%s" % found.get("cn"),
+ "%s %s. %s" % (new_givenname, new_initials, new_surname))
+
+ # remove given name, initials and surname
+ (result, out, err) = self.runsubcmd("user", "rename", user["name"],
+ "--surname=",
+ "--initials=",
+ "--given-name=")
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ self.assertIn('successfully', out)
+
+ found = self._find_user(user["name"])
+ self.assertEqual(found.get("givenName"), None)
+ self.assertEqual(found.get("initials"), None)
+ self.assertEqual(found.get("sn"), None)
+ self.assertEqual("%s" % found.get("cn"), user["name"])
+
+ # reset changes (initials are removed)
+ (result, out, err) = self.runsubcmd("user", "rename", user["name"],
+ "--surname=%(surname)s" % user,
+ "--given-name=%(given-name)s" % user)
+ self.assertCmdSuccess(result, out, err)
+
+ if old_cn:
+ (result, out, err) = self.runsubcmd("user", "rename", user["name"],
+ "--force-new-cn=%s" % old_cn)
+
+ def test_rename_cn_samaccountname(self):
+ """rename and try to remove the cn and the samaccount of all users"""
+ for user in self.users:
+ new_cn = "new_cn_of_" + user["name"]
+ new_samaccountname = "new_samaccount_of_" + user["name"]
+ new_surname = "new_surname_of_" + user["name"]
+
+ # rename cn
+ (result, out, err) = self.runsubcmd("user", "rename", user["name"],
+ "--samaccountname=%s"
+ % new_samaccountname,
+ "--force-new-cn=%s" % new_cn)
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ self.assertIn('successfully', out)
+
+ found = self._find_user(new_samaccountname)
+ self.assertEqual("%s" % found.get("cn"), new_cn)
+ self.assertEqual("%s" % found.get("sAMAccountName"),
+ new_samaccountname)
+
+ # changing the surname has no effect to the cn
+ (result, out, err) = self.runsubcmd("user", "rename", new_samaccountname,
+ "--surname=%s" % new_surname)
+ self.assertCmdSuccess(result, out, err)
+
+ found = self._find_user(new_samaccountname)
+ self.assertEqual("%s" % found.get("cn"), new_cn)
+
+ # trying to remove cn (throws an error)
+ (result, out, err) = self.runsubcmd("user", "rename",
+ new_samaccountname,
+ "--force-new-cn=")
+ self.assertCmdFail(result)
+ self.assertIn('Failed to rename user', err)
+ self.assertIn("delete protected attribute", err)
+
+ # trying to remove the samccountname (throws an error)
+ (result, out, err) = self.runsubcmd("user", "rename",
+ new_samaccountname,
+ "--samaccountname=")
+ self.assertCmdFail(result)
+ self.assertIn('Failed to rename user', err)
+ self.assertIn('delete protected attribute', err)
+
+ # reset changes (cn must be the name)
+ (result, out, err) = self.runsubcmd("user", "rename", new_samaccountname,
+ "--samaccountname=%(name)s"
+ % user,
+ "--force-new-cn=%(name)s" % user)
+ self.assertCmdSuccess(result, out, err)
+
+ def test_rename_standard_cn(self):
+ """reset the cn of all users to the standard"""
+ for user in self.users:
+ new_cn = "new_cn_of_" + user["name"]
+ new_givenname = "new_given_name_of_" + user["name"]
+ new_initials = "A"
+ new_surname = "new_surname_of_" + user["name"]
+
+ # set different cn
+ (result, out, err) = self.runsubcmd("user", "rename", user["name"],
+ "--force-new-cn=%s" % new_cn)
+ self.assertCmdSuccess(result, out, err)
+
+ # remove given name, initials and surname
+ (result, out, err) = self.runsubcmd("user", "rename", user["name"],
+ "--surname=",
+ "--initials=",
+ "--given-name=")
+ self.assertCmdSuccess(result, out, err)
+
+ # reset the CN (no given name, initials or surname --> samaccountname)
+ (result, out, err) = self.runsubcmd("user", "rename", user["name"],
+ "--reset-cn")
+
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ self.assertIn('successfully', out)
+
+ found = self._find_user(user["name"])
+ self.assertEqual("%s" % found.get("cn"), user["name"])
+
+ # set given name, initials and surname and set different cn
+ (result, out, err) = self.runsubcmd("user", "rename", user["name"],
+ "--force-new-cn=%s" % new_cn,
+ "--surname=%s" % new_surname,
+ "--initials=%s" % new_initials,
+ "--given-name=%s" % new_givenname)
+ self.assertCmdSuccess(result, out, err)
+
+ # reset the CN (given name, initials or surname are given --> given name)
+ (result, out, err) = self.runsubcmd("user", "rename", user["name"],
+ "--reset-cn")
+
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ self.assertIn('successfully', out)
+
+ found = self._find_user(user["name"])
+ self.assertEqual("%s" % found.get("cn"),
+ "%s %s. %s" % (new_givenname, new_initials, new_surname))
+
+ # reset changes
+ (result, out, err) = self.runsubcmd("user", "rename", user["name"],
+ "--reset-cn",
+ "--initials=",
+ "--surname=%(surname)s" % user,
+ "--given-name=%(given-name)s" % user)
+ self.assertCmdSuccess(result, out, err)
+
+ def test_rename_mailaddress_displayname(self):
+ for user in self.users:
+ new_mail = "new_mailaddress_of_" + user["name"]
+ new_displayname = "new displayname of " + user["name"]
+
+ # change mail and displayname
+ (result, out, err) = self.runsubcmd("user", "rename", user["name"],
+ "--mail-address=%s"
+ % new_mail,
+ "--display-name=%s"
+ % new_displayname)
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ self.assertIn('successfully', out)
+
+ found = self._find_user(user["name"])
+ self.assertEqual("%s" % found.get("mail"), new_mail)
+ self.assertEqual("%s" % found.get("displayName"), new_displayname)
+
+ # remove mail and displayname
+ (result, out, err) = self.runsubcmd("user", "rename", user["name"],
+ "--mail-address=",
+ "--display-name=")
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ self.assertIn('successfully', out)
+
+ found = self._find_user(user["name"])
+ self.assertEqual(found.get("mail"), None)
+ self.assertEqual(found.get("displayName"), None)
+
+ def test_rename_upn(self):
+ """rename upn of all users"""
+ for user in self.users:
+ found = self._find_user(user["name"])
+ old_upn = "%s" % found.get("userPrincipalName")
+ valid_suffix = old_upn.split('@')[1] # samba.example.com
+
+ valid_new_upn = "new_%s@%s" % (user["name"], valid_suffix)
+ invalid_new_upn = "%s@invalid.suffix" + user["name"]
+
+ # trying to set invalid upn
+ (result, out, err) = self.runsubcmd("user", "rename", user["name"],
+ "--upn=%s"
+ % invalid_new_upn)
+ self.assertCmdFail(result)
+ self.assertIn('is not a valid upn', err)
+
+ # set valid upn
+ (result, out, err) = self.runsubcmd("user", "rename", user["name"],
+ "--upn=%s"
+ % valid_new_upn)
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ self.assertIn('successfully', out)
+
+ found = self._find_user(user["name"])
+ self.assertEqual("%s" % found.get("userPrincipalName"), valid_new_upn)
+
+ # trying to remove upn
+ (result, out, err) = self.runsubcmd("user", "rename", user["name"],
+ "--upn=%s")
+ self.assertCmdFail(result)
+ self.assertIn('is not a valid upn', err)
+
+ # reset upn
+ (result, out, err) = self.runsubcmd("user", "rename", user["name"],
+ "--upn=%s" % old_upn)
+ self.assertCmdSuccess(result, out, err)
+
+ def test_getpwent(self):
+ try:
+ import pwd
+ except ImportError:
+ self.skipTest("Skipping getpwent test, no 'pwd' module available")
+ return
+
+ # get the current user's data for the test
+ uid = os.geteuid()
+ try:
+ u = pwd.getpwuid(uid)
+ except KeyError:
+ self.skipTest("Skipping getpwent test, current EUID not found in NSS")
+ return
+
+
+# samba-tool user create command didn't support users with empty gecos if none is
+# specified on the command line and the user hasn't one in the passwd file it
+# will fail, so let's add some contents
+
+ gecos = u[4]
+ if (gecos is None or len(gecos) == 0):
+ gecos = "Foo GECOS"
+ user = self._randomPosixUser({
+ "name": u[0],
+ "uid": u[0],
+ "uidNumber": u[2],
+ "gidNumber": u[3],
+ "gecos": gecos,
+ "loginShell": u[6],
+ })
+
+ # Remove user if it already exists
+ if self._find_user(u[0]):
+ self.runsubcmd("user", "delete", u[0])
+ # check if --rfc2307-from-nss sets the same values as we got from pwd.getpwuid()
+ (result, out, err) = self.runsubcmd("user", "create", user["name"], user["password"],
+ "--surname=%s" % user["surname"],
+ "--given-name=%s" % user["given-name"],
+ "--job-title=%s" % user["job-title"],
+ "--department=%s" % user["department"],
+ "--description=%s" % user["description"],
+ "--company=%s" % user["company"],
+ "--gecos=%s" % user["gecos"],
+ "--rfc2307-from-nss",
+ "-H", "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"], os.environ["DC_PASSWORD"]))
+
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ self.assertIn("User '%s' added successfully" % user["name"], out)
+
+ self._check_posix_user(user)
+ self.runsubcmd("user", "delete", user["name"])
+
+ # Check if overriding the attributes from NSS with explicit values works
+ #
+ # get a user with all random posix attributes
+ user = self._randomPosixUser({"name": u[0]})
+
+ # Remove user if it already exists
+ if self._find_user(u[0]):
+ self.runsubcmd("user", "delete", u[0])
+ # create a user with posix attributes from nss but override all of them with the
+ # random ones just obtained
+ (result, out, err) = self.runsubcmd("user", "create", user["name"], user["password"],
+ "--surname=%s" % user["surname"],
+ "--given-name=%s" % user["given-name"],
+ "--job-title=%s" % user["job-title"],
+ "--department=%s" % user["department"],
+ "--description=%s" % user["description"],
+ "--company=%s" % user["company"],
+ "--rfc2307-from-nss",
+ "--gecos=%s" % user["gecos"],
+ "--login-shell=%s" % user["loginShell"],
+ "--uid=%s" % user["uid"],
+ "--uid-number=%s" % user["uidNumber"],
+ "--gid-number=%s" % user["gidNumber"],
+ "-H", "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"], os.environ["DC_PASSWORD"]))
+
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+ self.assertIn("User '%s' added successfully" % user["name"], out)
+
+ self._check_posix_user(user)
+ self.runsubcmd("user", "delete", user["name"])
+
+ # Test: samba-tool user unlock
+ # This test does not verify that the command unlocks the user, it just
+ # tests the command itself. The unlock test, which unlocks locked users,
+ # is located in the 'samba4.ldap.password_lockout' test in
+ # source4/dsdb/tests/python/password_lockout.py
+ def test_unlock(self):
+
+ # try to unlock a nonexistent user, this should fail
+ nonexistentusername = "userdoesnotexist"
+ (result, out, err) = self.runsubcmd(
+ "user", "unlock", nonexistentusername)
+ self.assertCmdFail(result, "Ensure that unlock nonexistent user fails")
+ self.assertIn("Failed to unlock user '%s'" % nonexistentusername, err)
+ self.assertIn("Unable to find user", err)
+
+ # try to unlock with insufficient permissions, this should fail
+ unprivileged_username = "unprivilegedunlockuser"
+ unlocktest_username = "usertounlock"
+
+ self.runsubcmd("user", "add", unprivileged_username, "Passw0rd")
+ self.runsubcmd("user", "add", unlocktest_username, "Passw0rd")
+
+ (result, out, err) = self.runsubcmd(
+ "user", "unlock", unlocktest_username,
+ "-H", "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (unprivileged_username,
+ "Passw0rd"))
+ self.assertCmdFail(result, "Fail with LDAP_INSUFFICIENT_ACCESS_RIGHTS")
+ self.assertIn("Failed to unlock user '%s'" % unlocktest_username, err)
+ self.assertIn("LDAP error 50 LDAP_INSUFFICIENT_ACCESS_RIGHTS", err)
+
+ self.runsubcmd("user", "delete", unprivileged_username)
+ self.runsubcmd("user", "delete", unlocktest_username)
+
+ # run unlock against test users
+ for user in self.users:
+ (result, out, err) = self.runsubcmd(
+ "user", "unlock", user["name"])
+ self.assertCmdSuccess(result, out, err, "Error running user unlock")
+ self.assertEqual(err, "", "Shouldn't be any error messages")
+
+ def _randomUser(self, base=None):
+ """create a user with random attribute values, you can specify base attributes"""
+ if base is None:
+ base = {}
+ user = {
+ "name": self.randomName(),
+ "password": self.random_password(16),
+ "surname": self.randomName(),
+ "given-name": self.randomName(),
+ "job-title": self.randomName(),
+ "department": self.randomName(),
+ "company": self.randomName(),
+ "description": self.randomName(count=100),
+ "createUserFn": self._create_user,
+ "checkUserFn": self._check_user,
+ }
+ user.update(base)
+ return user
+
+ def _randomPosixUser(self, base=None):
+ """create a user with random attribute values and additional RFC2307
+ attributes, you can specify base attributes"""
+ if base is None:
+ base = {}
+ user = self._randomUser({})
+ user.update(base)
+ posixAttributes = {
+ "uid": self.randomName(),
+ "loginShell": self.randomName(),
+ "gecos": self.randomName(),
+ "uidNumber": self.randomXid(),
+ "gidNumber": self.randomXid(),
+ "createUserFn": self._create_posix_user,
+ "checkUserFn": self._check_posix_user,
+ }
+ user.update(posixAttributes)
+ user.update(base)
+ return user
+
+ def _randomUnixUser(self, base=None):
+ """create a user with random attribute values and additional RFC2307
+ attributes, you can specify base attributes"""
+ if base is None:
+ base = {}
+ user = self._randomUser({})
+ user.update(base)
+ posixAttributes = {
+ "uidNumber": self.randomXid(),
+ "gidNumber": self.randomXid(),
+ "uid": self.randomName(),
+ "loginShell": self.randomName(),
+ "gecos": self.randomName(),
+ "createUserFn": self._create_unix_user,
+ "checkUserFn": self._check_unix_user,
+ }
+ user.update(posixAttributes)
+ user.update(base)
+ return user
+
+ def _check_user(self, user):
+ """ check if a user from SamDB has the same attributes as its template """
+ found = self._find_user(user["name"])
+
+ self.assertEqual("%s" % found.get("name"), "%(given-name)s %(surname)s" % user)
+ self.assertEqual("%s" % found.get("title"), user["job-title"])
+ self.assertEqual("%s" % found.get("company"), user["company"])
+ self.assertEqual("%s" % found.get("description"), user["description"])
+ self.assertEqual("%s" % found.get("department"), user["department"])
+
+ def _check_posix_user(self, user):
+ """ check if a posix_user from SamDB has the same attributes as its template """
+ found = self._find_user(user["name"])
+
+ self.assertEqual("%s" % found.get("loginShell"), user["loginShell"])
+ self.assertEqual("%s" % found.get("gecos"), user["gecos"])
+ self.assertEqual("%s" % found.get("uidNumber"), "%s" % user["uidNumber"])
+ self.assertEqual("%s" % found.get("gidNumber"), "%s" % user["gidNumber"])
+ self.assertEqual("%s" % found.get("uid"), user["uid"])
+ self._check_user(user)
+
+ def _check_unix_user(self, user):
+ """ check if a unix_user from SamDB has the same attributes as its
+template """
+ found = self._find_user(user["name"])
+
+ self.assertEqual("%s" % found.get("loginShell"), user["loginShell"])
+ self.assertEqual("%s" % found.get("gecos"), user["gecos"])
+ self.assertEqual("%s" % found.get("uidNumber"), "%s" %
+ user["uidNumber"])
+ self.assertEqual("%s" % found.get("gidNumber"), "%s" %
+ user["gidNumber"])
+ self.assertEqual("%s" % found.get("uid"), user["uid"])
+ self.assertIn('/home/test/', "%s" % found.get("unixHomeDirectory"))
+ self._check_user(user)
+
+ def _create_user(self, user):
+ return self.runsubcmd("user", "add", user["name"], user["password"],
+ "--surname=%s" % user["surname"],
+ "--given-name=%s" % user["given-name"],
+ "--job-title=%s" % user["job-title"],
+ "--department=%s" % user["department"],
+ "--description=%s" % user["description"],
+ "--company=%s" % user["company"],
+ "-H", "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"], os.environ["DC_PASSWORD"]))
+
+ def _create_posix_user(self, user):
+ """ create a new user with RFC2307 attributes """
+ return self.runsubcmd("user", "create", user["name"], user["password"],
+ "--surname=%s" % user["surname"],
+ "--given-name=%s" % user["given-name"],
+ "--job-title=%s" % user["job-title"],
+ "--department=%s" % user["department"],
+ "--description=%s" % user["description"],
+ "--company=%s" % user["company"],
+ "--gecos=%s" % user["gecos"],
+ "--login-shell=%s" % user["loginShell"],
+ "--uid=%s" % user["uid"],
+ "--uid-number=%s" % user["uidNumber"],
+ "--gid-number=%s" % user["gidNumber"],
+ "-H", "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"], os.environ["DC_PASSWORD"]))
+
+ def _create_unix_user(self, user):
+ """ Add RFC2307 attributes to a user"""
+ self._create_user(user)
+ return self.runsubcmd("user", "addunixattrs", user["name"],
+ "%s" % user["uidNumber"],
+ "--gid-number=%s" % user["gidNumber"],
+ "--gecos=%s" % user["gecos"],
+ "--login-shell=%s" % user["loginShell"],
+ "--uid=%s" % user["uid"],
+ "-H", "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"],
+ os.environ["DC_PASSWORD"]))
+
+ def _find_user(self, name):
+ search_filter = "(&(sAMAccountName=%s)(objectCategory=%s,%s))" % (ldb.binary_encode(name), "CN=Person,CN=Schema,CN=Configuration", self.samdb.domain_dn())
+ userlist = self.samdb.search(base=self.samdb.domain_dn(),
+ scope=ldb.SCOPE_SUBTREE,
+ expression=search_filter)
+ if userlist:
+ return userlist[0]
+ else:
+ return None
diff --git a/python/samba/tests/samba_tool/user_auth_policy.py b/python/samba/tests/samba_tool/user_auth_policy.py
new file mode 100644
index 0000000..c5bdd06
--- /dev/null
+++ b/python/samba/tests/samba_tool/user_auth_policy.py
@@ -0,0 +1,86 @@
+# Unix SMB/CIFS implementation.
+#
+# Tests for samba-tool user auth policy command
+#
+# Copyright (C) Catalyst.Net Ltd. 2023
+#
+# Written by Rob van der Linde <rob@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from samba.netcmd.domain.models import AuthenticationPolicy, User
+
+from .silo_base import SiloTest
+
+
+class AuthPolicyCmdTestCase(SiloTest):
+ def test_assign(self):
+ """Test assigning an authentication policy to a user."""
+ self.addCleanup(self.runcmd, "user", "auth", "policy", "remove", "alice")
+ result, out, err = self.runcmd("user", "auth", "policy", "assign",
+ "alice", "--policy", "User Policy")
+ self.assertIsNone(result, msg=err)
+
+ # Assigned policy should be 'Developers'
+ user = User.get(self.samdb, username="alice")
+ policy = AuthenticationPolicy.get(self.samdb, dn=user.assigned_policy)
+ self.assertEqual(policy.name, "User Policy")
+
+ def test_assign__invalid_policy(self):
+ """Test assigning a non-existing authentication policy to a user."""
+ result, out, err = self.runcmd("user", "auth", "policy", "assign",
+ "alice", "--policy", "doesNotExist")
+ self.assertEqual(result, -1)
+ self.assertIn("Authentication policy doesNotExist not found.", err)
+
+ def test_remove(self):
+ """Test removing the assigned authentication policy from a user."""
+ # First assign a policy, so we can test removing it.
+ self.runcmd("user", "auth", "policy", "assign", "bob", "--policy",
+ "User Policy")
+
+ # Assigned policy should be set
+ user = User.get(self.samdb, username="bob")
+ self.assertIsNotNone(user.assigned_policy)
+
+ # Now try removing it
+ result, out, err = self.runcmd("user", "auth", "policy", "remove",
+ "bob")
+ self.assertIsNone(result, msg=err)
+
+ # Assigned policy should be None
+ user = User.get(self.samdb, username="bob")
+ self.assertIsNone(user.assigned_policy)
+
+ def test_view(self):
+ """Test viewing the current assigned authentication policy on a user."""
+ # Assign a policy on one of the users.
+ self.addCleanup(self.runcmd, "user", "auth", "policy", "remove", "bob")
+ self.runcmd("user", "auth", "policy", "assign", "bob", "--policy",
+ "User Policy")
+
+ # Test user with a policy assigned.
+ result, out, err = self.runcmd("user", "auth", "policy", "view",
+ "bob")
+ self.assertIsNone(result, msg=err)
+ self.assertEqual(
+ out, "User bob assigned to authentication policy User Policy\n")
+
+ # Test user without a policy assigned.
+ result, out, err = self.runcmd("user", "auth", "policy", "view",
+ "joe")
+ self.assertIsNone(result, msg=err)
+ self.assertEqual(
+ out, "User joe has no assigned authentication policy.\n")
diff --git a/python/samba/tests/samba_tool/user_auth_silo.py b/python/samba/tests/samba_tool/user_auth_silo.py
new file mode 100644
index 0000000..19cce26
--- /dev/null
+++ b/python/samba/tests/samba_tool/user_auth_silo.py
@@ -0,0 +1,84 @@
+# Unix SMB/CIFS implementation.
+#
+# Tests for samba-tool user auth silo command
+#
+# Copyright (C) Catalyst.Net Ltd. 2023
+#
+# Written by Rob van der Linde <rob@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from samba.netcmd.domain.models import AuthenticationSilo, User
+
+from .silo_base import SiloTest
+
+
+class AuthPolicyCmdTestCase(SiloTest):
+ def test_assign(self):
+ """Test assigning an authentication silo to a user."""
+ self.addCleanup(self.runcmd, "user", "auth", "silo", "remove", "alice")
+ result, out, err = self.runcmd("user", "auth", "silo", "assign",
+ "alice", "--silo", "Developers")
+ self.assertIsNone(result, msg=err)
+
+ # Assigned silo should be 'Developers'
+ user = User.get(self.samdb, username="alice")
+ silo = AuthenticationSilo.get(self.samdb, dn=user.assigned_silo)
+ self.assertEqual(silo.name, "Developers")
+
+ def test_assign__invalid_silo(self):
+ """Test assigning a non-existing authentication silo to a user."""
+ result, out, err = self.runcmd("user", "auth", "silo", "assign",
+ "alice", "--silo", "doesNotExist")
+ self.assertEqual(result, -1)
+ self.assertIn("Authentication silo doesNotExist not found.", err)
+
+ def test_remove(self):
+ """Test removing the assigned authentication silo from a user."""
+ # First assign a silo, so we can test removing it.
+ self.runcmd("user", "auth", "silo", "assign", "bob", "--silo", "QA")
+
+ # Assigned silo should be set
+ user = User.get(self.samdb, username="bob")
+ self.assertIsNotNone(user.assigned_silo)
+
+ # Now try removing it
+ result, out, err = self.runcmd("user", "auth", "silo", "remove",
+ "bob")
+ self.assertIsNone(result, msg=err)
+
+ # Assigned silo should be None
+ user = User.get(self.samdb, username="bob")
+ self.assertIsNone(user.assigned_silo)
+
+ def test_view(self):
+ """Test viewing the current assigned authentication silo on a user."""
+ # Assign a silo on one of the users.
+ self.addCleanup(self.runcmd, "user", "auth", "silo", "remove", "bob")
+ self.runcmd("user", "auth", "silo", "assign", "bob", "--silo", "QA")
+
+ # Test user with a silo assigned.
+ result, out, err = self.runcmd("user", "auth", "silo", "view",
+ "bob")
+ self.assertIsNone(result, msg=err)
+ self.assertEqual(
+ out, "User bob assigned to authentication silo QA (revoked)\n")
+
+ # Test user without a silo assigned.
+ result, out, err = self.runcmd("user", "auth", "silo", "view",
+ "joe")
+ self.assertIsNone(result, msg=err)
+ self.assertEqual(
+ out, "User joe has no assigned authentication silo.\n")
diff --git a/python/samba/tests/samba_tool/user_check_password_script.py b/python/samba/tests/samba_tool/user_check_password_script.py
new file mode 100644
index 0000000..183b77b
--- /dev/null
+++ b/python/samba/tests/samba_tool/user_check_password_script.py
@@ -0,0 +1,106 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Sean Dague <sdague@linux.vnet.ibm.com> 2011
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2016
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import os
+from samba.tests.samba_tool.base import SambaToolCmdTest
+
+
+class UserCheckPwdTestCase(SambaToolCmdTest):
+ """Tests for samba-tool user subcommands"""
+ users = []
+ samdb = None
+
+ def setUp(self):
+ super().setUp()
+ self.samdb = self.getSamDB("-H", "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"], os.environ["DC_PASSWORD"]))
+ self.old_min_pwd_age = self.samdb.get_minPwdAge()
+ self.samdb.set_minPwdAge("0")
+
+ def tearDown(self):
+ super().tearDown()
+ self.samdb.set_minPwdAge(self.old_min_pwd_age)
+
+ def _test_checkpassword(self, user, bad_password, good_password, desc):
+
+ (result, out, err) = self.runsubcmd("user", "add", user["name"], bad_password,
+ "-H", "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"], os.environ["DC_PASSWORD"]))
+ self.assertCmdFail(result, "Should fail adding a user with %s password." % desc)
+
+ (result, out, err) = self.runsubcmd("user", "add", user["name"], good_password,
+ "-H", "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"], os.environ["DC_PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, "Should succeed adding a user with good password.")
+
+ # Set password
+ (result, out, err) = self.runsubcmd("user", "setpassword", user["name"],
+ "--newpassword=%s" % bad_password,
+ "-H", "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"], os.environ["DC_PASSWORD"]))
+ self.assertCmdFail(result, "Should fail setting a user's password to a %s password." % desc)
+
+ (result, out, err) = self.runsubcmd("user", "setpassword", user["name"],
+ "--newpassword=%s" % good_password,
+ "-H", "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"], os.environ["DC_PASSWORD"]))
+ self.assertCmdSuccess(result, out, err, "Should succeed setting a user's password to a good one.")
+
+ # Password=
+
+ (result, out, err) = self.runsubcmd("user", "password",
+ "--newpassword=%s" % bad_password,
+ "--ipaddress", os.environ["DC_SERVER_IP"],
+ "-U%s%%%s" % (user["name"], good_password))
+ self.assertCmdFail(result, "A user setting their own password to a %s password should fail." % desc)
+
+ (result, out, err) = self.runsubcmd("user", "password",
+ "--newpassword=%s" % good_password + 'XYZ',
+ "--ipaddress", os.environ["DC_SERVER_IP"],
+ "-U%s%%%s" % (user["name"], good_password))
+ self.assertCmdSuccess(result, out, err, "A user setting their own password to a good one should succeed.")
+
+ def test_checkpassword_unacceptable(self):
+ # Add
+ user = self._randomUser()
+ bad_password = os.environ["UNACCEPTABLE_PASSWORD"]
+ good_password = bad_password[:-1]
+ return self._test_checkpassword(user,
+ bad_password,
+ good_password,
+ "unacceptable")
+
+ def test_checkpassword_username(self):
+ # Add
+ user = self._randomUser()
+ bad_password = user["name"]
+ good_password = bad_password[:-1]
+ return self._test_checkpassword(user,
+ bad_password,
+ good_password,
+ "username")
+
+ def _randomUser(self, base=None):
+ """create a user with random attribute values, you can specify base attributes"""
+ if base is None:
+ base = {}
+ user = {
+ "name": self.randomName(),
+ }
+ user.update(base)
+ return user
diff --git a/python/samba/tests/samba_tool/user_edit.sh b/python/samba/tests/samba_tool/user_edit.sh
new file mode 100755
index 0000000..342899f
--- /dev/null
+++ b/python/samba/tests/samba_tool/user_edit.sh
@@ -0,0 +1,198 @@
+#!/bin/sh
+#
+# Test for 'samba-tool user edit'
+
+if [ $# -lt 3 ]; then
+ cat <<EOF
+Usage: user_edit.sh SERVER USERNAME PASSWORD
+EOF
+ exit 1
+fi
+
+SERVER="$1"
+USERNAME="$2"
+PASSWORD="$3"
+
+samba_ldbsearch=ldbsearch
+if test -x $BINDIR/ldbsearch; then
+ samba_ldbsearch=$BINDIR/ldbsearch
+fi
+
+STpath=$(pwd)
+. $STpath/testprogs/blackbox/subunit.sh
+
+display_name="Björn"
+display_name_b64="QmrDtnJu"
+display_name_new="Renamed Bjoern"
+# attribute value including control character
+# echo -e "test \a string" | base64
+display_name_con_b64="dGVzdCAHIHN0cmluZwo="
+
+tmpeditor=$(mktemp --suffix .sh -p ${SELFTEST_TMPDIR} samba-tool-editor-XXXXXXXX)
+chmod +x $tmpeditor
+
+TEST_USER="$(mktemp -u sambatoolXXXXXX)"
+
+create_test_user()
+{
+ $PYTHON ${STpath}/source4/scripting/bin/samba-tool \
+ user create ${TEST_USER} --random-password \
+ -H "ldap://$SERVER" "-U$USERNAME" "--password=$PASSWORD"
+}
+
+edit_user()
+{
+ # create editor.sh
+ cat >$tmpeditor <<-'EOF'
+#!/usr/bin/env bash
+user_ldif="$1"
+SED=$(which sed)
+$SED -i -e 's/userAccountControl: 512/userAccountControl: 514/' $user_ldif
+ EOF
+
+ $PYTHON ${STpath}/source4/scripting/bin/samba-tool \
+ user edit ${TEST_USER} --editor=$tmpeditor \
+ -H "ldap://$SERVER" "-U$USERNAME" "--password=$PASSWORD"
+}
+
+# Test edit user - add base64 attributes
+add_attribute_base64()
+{
+ # create editor.sh
+ cat >$tmpeditor <<EOF
+#!/usr/bin/env bash
+user_ldif="\$1"
+
+grep -v '^\$' \$user_ldif > \${user_ldif}.tmp
+echo "displayName:: $display_name_b64" >> \${user_ldif}.tmp
+
+mv \${user_ldif}.tmp \$user_ldif
+EOF
+
+ $PYTHON ${STpath}/source4/scripting/bin/samba-tool user edit \
+ ${TEST_USER} --editor=$tmpeditor \
+ -H "ldap://$SERVER" "-U$USERNAME" "--password=$PASSWORD"
+}
+
+get_attribute_base64()
+{
+ $samba_ldbsearch "(sAMAccountName=${TEST_USER})" displayName \
+ -H "ldap://$SERVER" "-U$USERNAME" "--password=$PASSWORD"
+}
+
+delete_attribute()
+{
+ # create editor.sh
+ cat >$tmpeditor <<EOF
+#!/usr/bin/env bash
+user_ldif="\$1"
+
+grep -v '^displayName' \$user_ldif >> \${user_ldif}.tmp
+mv \${user_ldif}.tmp \$user_ldif
+EOF
+ $PYTHON ${STpath}/source4/scripting/bin/samba-tool user edit \
+ ${TEST_USER} --editor=$tmpeditor \
+ -H "ldap://$SERVER" "-U$USERNAME" "--password=$PASSWORD"
+}
+
+# Test edit user - add base64 attribute value including control character
+add_attribute_base64_control()
+{
+ # create editor.sh
+ cat >$tmpeditor <<EOF
+#!/usr/bin/env bash
+user_ldif="\$1"
+
+grep -v '^\$' \$user_ldif > \${user_ldif}.tmp
+echo "displayName:: $display_name_con_b64" >> \${user_ldif}.tmp
+
+mv \${user_ldif}.tmp \$user_ldif
+EOF
+ $PYTHON ${STpath}/source4/scripting/bin/samba-tool user edit \
+ ${TEST_USER} --editor=$tmpeditor \
+ -H "ldap://$SERVER" "-U$USERNAME" "--password=$PASSWORD"
+}
+
+get_attribute_base64_control()
+{
+ $PYTHON ${STpath}/source4/scripting/bin/samba-tool user show \
+ ${TEST_USER} --attributes=displayName \
+ -H "ldap://$SERVER" "-U$USERNAME" "--password=$PASSWORD"
+}
+
+get_attribute_force_no_base64()
+{
+ # LDB_FLAG_FORCE_NO_BASE64_LDIF should be used here.
+ $PYTHON ${STpath}/source4/scripting/bin/samba-tool user show \
+ ${TEST_USER} --attributes=displayName \
+ -H "ldap://$SERVER" "-U$USERNAME" "--password=$PASSWORD"
+}
+
+# Test edit user - change base64 attribute value including control character
+change_attribute_base64_control()
+{
+ # create editor.sh
+ cat >$tmpeditor <<EOF
+#!/usr/bin/env bash
+user_ldif="\$1"
+
+sed -i -e 's/displayName:: $display_name_con_b64/displayName: $display_name/' \
+ \$user_ldif
+EOF
+ $PYTHON ${STpath}/source4/scripting/bin/samba-tool user edit \
+ ${TEST_USER} --editor=$tmpeditor \
+ -H "ldap://$SERVER" "-U$USERNAME" "--password=$PASSWORD"
+}
+
+# Test edit user - change attributes with LDB_FLAG_FORCE_NO_BASE64_LDIF
+change_attribute_force_no_base64()
+{
+ # create editor.sh
+ # Expects that the original attribute is available as clear text,
+ # because the LDB_FLAG_FORCE_NO_BASE64_LDIF should be used here.
+ cat >$tmpeditor <<EOF
+#!/usr/bin/env bash
+user_ldif="\$1"
+
+sed -i -e 's/displayName: $display_name/displayName: $display_name_new/' \
+ \$user_ldif
+EOF
+
+ $PYTHON ${STpath}/source4/scripting/bin/samba-tool user edit \
+ ${TEST_USER} --editor=$tmpeditor \
+ -H "ldap://$SERVER" "-U$USERNAME" "--password=$PASSWORD"
+}
+
+get_changed_attribute_force_no_base64()
+{
+ $PYTHON ${STpath}/source4/scripting/bin/samba-tool user show \
+ ${TEST_USER} --attributes=displayName \
+ -H "ldap://$SERVER" "-U$USERNAME" "--password=$PASSWORD"
+}
+
+delete_user()
+{
+ $PYTHON ${STpath}/source4/scripting/bin/samba-tool \
+ user delete ${TEST_USER} \
+ -H "ldap://$SERVER" "-U$USERNAME" "--password=$PASSWORD"
+}
+
+failed=0
+
+testit "create_test_user" create_test_user || failed=$(expr $failed + 1)
+testit "edit_user" edit_user || failed=$(expr $failed + 1)
+testit "add_attribute_base64" add_attribute_base64 || failed=$(expr $failed + 1)
+testit_grep "get_attribute_base64" "^displayName:: $display_name_b64" get_attribute_base64 || failed=$(expr $failed + 1)
+testit "delete_attribute" delete_attribute || failed=$(expr $failed + 1)
+testit "add_attribute_base64_control" add_attribute_base64_control || failed=$(expr $failed + 1)
+testit_grep "get_attribute_base64_control" "^displayName:: $display_name_con_b64" get_attribute_base64_control || failed=$(expr $failed + 1)
+testit "change_attribute_base64_control" change_attribute_base64_control || failed=$(expr $failed + 1)
+testit_grep "get_attribute_base64" "^displayName:: $display_name_b64" get_attribute_base64 || failed=$(expr $failed + 1)
+testit_grep "get_attribute_force_no_base64" "^displayName: $display_name" get_attribute_force_no_base64 || failed=$(expr $failed + 1)
+testit "change_attribute_force_no_base64" change_attribute_force_no_base64 || failed=$(expr $failed + 1)
+testit_grep "get_changed_attribute_force_no_base64" "^displayName: $display_name_new" get_changed_attribute_force_no_base64 || failed=$(expr $failed + 1)
+testit "delete_user" delete_user || failed=$(expr $failed + 1)
+
+rm -f $tmpeditor
+
+exit $failed
diff --git a/python/samba/tests/samba_tool/user_get_kerberos_ticket.py b/python/samba/tests/samba_tool/user_get_kerberos_ticket.py
new file mode 100644
index 0000000..4ac502e
--- /dev/null
+++ b/python/samba/tests/samba_tool/user_get_kerberos_ticket.py
@@ -0,0 +1,195 @@
+# Unix SMB/CIFS implementation.
+#
+# Blackbox tests for getting Kerberos tickets from Group Managed Service Account and other (local) passwords
+#
+# Copyright (C) Catalyst.Net Ltd. 2023
+#
+# Written by Rob van der Linde <rob@catalyst.net.nz>
+#
+# Copyright Andrew Bartlett <abartlet@samba.org> 2023
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+import os
+
+sys.path.insert(0, "bin/python")
+os.environ["PYTHONUNBUFFERED"] = "1"
+
+from ldb import SCOPE_BASE
+from samba import credentials
+from samba.credentials import Credentials, MUST_USE_KERBEROS
+from samba.dcerpc import security, samr
+from samba.dsdb import UF_WORKSTATION_TRUST_ACCOUNT, UF_NORMAL_ACCOUNT
+from samba.netcmd.domain.models import User
+from samba.ndr import ndr_pack, ndr_unpack
+from samba.tests import connect_samdb, connect_samdb_env, delete_force
+
+from samba.tests import BlackboxTestCase, BlackboxProcessError
+
+
+# If not specified, this is None, meaning local sam.ldb
+PW_READ_URL = os.environ.get("PW_READ_URL")
+
+# We still need to connect to a remote server to check we got the ticket
+SERVER = os.environ.get("SERVER")
+
+PW_CHECK_URL = f"ldap://{SERVER}"
+
+# For authentication to PW_READ_URL if required
+SERVER_USERNAME = os.environ["USERNAME"]
+SERVER_PASSWORD = os.environ["PASSWORD"]
+
+CREDS = f"-U{SERVER_USERNAME}%{SERVER_PASSWORD}"
+
+
+class GetKerberosTiketTest(BlackboxTestCase):
+ """Blackbox tests for GMSA getpassword and connecting as that user."""
+
+ @classmethod
+ def setUpClass(cls):
+ cls.lp = cls.get_loadparm()
+ cls.env_creds = cls.get_env_credentials(lp=cls.lp,
+ env_username="USERNAME",
+ env_password="PASSWORD",
+ env_domain="DOMAIN",
+ env_realm="REALM")
+ if PW_READ_URL is None:
+ url = cls.lp.private_path("sam.ldb")
+ else:
+ url = PW_CHECK_URL
+ cls.samdb = connect_samdb(url, lp=cls.lp, credentials=cls.env_creds)
+ super().setUpClass()
+
+ @classmethod
+ def setUpTestData(cls):
+ cls.gmsa_username = "GMSA_K5Test_User$"
+ cls.username = "get-kerberos-ticket-test"
+ cls.user_base_dn = f"CN=Users,{cls.samdb.domain_dn()}"
+ cls.user_dn = f"CN={cls.username},{cls.user_base_dn}"
+ cls.gmsa_base_dn = f"CN=Managed Service Accounts,{cls.samdb.domain_dn()}"
+ cls.gmsa_user_dn = f"CN={cls.gmsa_username},{cls.gmsa_base_dn}"
+
+ msg = cls.samdb.search(base="", scope=SCOPE_BASE, attrs=["tokenGroups"])[0]
+ connecting_user_sid = str(ndr_unpack(security.dom_sid, msg["tokenGroups"][0]))
+
+ domain_sid = security.dom_sid(cls.samdb.get_domain_sid())
+ allow_sddl = f"O:SYD:(A;;RP;;;{connecting_user_sid})"
+ allow_sd = ndr_pack(security.descriptor.from_sddl(allow_sddl, domain_sid))
+
+ details = {
+ "dn": str(cls.gmsa_user_dn),
+ "objectClass": "msDS-GroupManagedServiceAccount",
+ "msDS-ManagedPasswordInterval": "1",
+ "msDS-GroupMSAMembership": allow_sd,
+ "sAMAccountName": cls.gmsa_username,
+ "userAccountControl": str(UF_WORKSTATION_TRUST_ACCOUNT),
+ }
+
+ cls.samdb.add(details)
+ cls.addClassCleanup(delete_force, cls.samdb, cls.gmsa_user_dn)
+
+ user_password = "P@ssw0rd"
+ utf16pw = ('"' + user_password + '"').encode('utf-16-le')
+ user_details = {
+ "dn": str(cls.user_dn),
+ "objectClass": "user",
+ "sAMAccountName": cls.username,
+ "userAccountControl": str(UF_NORMAL_ACCOUNT),
+ "unicodePwd": utf16pw
+ }
+
+ cls.samdb.add(user_details)
+ cls.addClassCleanup(delete_force, cls.samdb, cls.user_dn)
+
+ cls.gmsa_user = User.get(cls.samdb, username=cls.gmsa_username)
+ cls.user = User.get(cls.samdb, username=cls.username)
+
+ def get_ticket(self, username, options=None):
+ if options is None:
+ options = ""
+ ccache_path = f"{self.tempdir}/ccache"
+ ccache_location = f"FILE:{ccache_path}"
+ cmd = f"user get-kerberos-ticket --output-krb5-ccache={ccache_location} {username} {options}"
+
+ try:
+ self.check_output(cmd)
+ except BlackboxProcessError as e:
+ self.fail(e)
+ self.addCleanup(os.unlink, ccache_path)
+ return ccache_location
+
+ def test_gmsa_ticket(self):
+ # Get a ticket with the tool
+ output_ccache = self.get_ticket(self.gmsa_username)
+ creds = self.insta_creds(template=self.env_creds)
+ creds.set_kerberos_state(MUST_USE_KERBEROS)
+ creds.set_named_ccache(self.lp, output_ccache)
+ db = connect_samdb(PW_CHECK_URL, credentials=creds, lp=self.lp)
+ msg = db.search(base="", scope=SCOPE_BASE, attrs=["tokenGroups"])[0]
+ connecting_user_sid = str(ndr_unpack(security.dom_sid, msg["tokenGroups"][0]))
+
+ self.assertEqual(self.gmsa_user.object_sid, connecting_user_sid)
+
+ def test_user_ticket(self):
+ output_ccache = self.get_ticket(self.username)
+ # Get a ticket with the tool
+ creds = self.insta_creds(template=self.env_creds)
+ creds.set_kerberos_state(MUST_USE_KERBEROS)
+
+ # Currently this is based on reading the unicodePwd, but this should be expanded
+ creds.set_named_ccache(output_ccache, credentials.SPECIFIED, self.lp)
+
+ db = connect_samdb(PW_CHECK_URL, credentials=creds, lp=self.lp)
+
+ msg = db.search(base="", scope=SCOPE_BASE, attrs=["tokenGroups"])[0]
+ connecting_user_sid = str(ndr_unpack(security.dom_sid, msg["tokenGroups"][0]))
+
+ self.assertEqual(self.user.object_sid, connecting_user_sid)
+
+ def test_user_ticket_gpg(self):
+ output_ccache = self.get_ticket(self.username, "--decrypt-samba-gpg")
+ # Get a ticket with the tool
+ creds = self.insta_creds(template=self.env_creds)
+ creds.set_kerberos_state(MUST_USE_KERBEROS)
+ creds.set_named_ccache(output_ccache, credentials.SPECIFIED, self.lp)
+ db = connect_samdb(PW_CHECK_URL, credentials=creds, lp=self.lp)
+
+ msg = db.search(base="", scope=SCOPE_BASE, attrs=["tokenGroups"])[0]
+ connecting_user_sid = str(ndr_unpack(security.dom_sid, msg["tokenGroups"][0]))
+
+ self.assertEqual(self.user.object_sid, connecting_user_sid)
+
+ @classmethod
+ def _make_cmdline(cls, line):
+ """Override to pass line as samba-tool subcommand instead.
+
+ Automatically fills in HOST and CREDS as well.
+ """
+ if isinstance(line, list):
+ cmd = ["samba-tool"] + line
+ if PW_READ_URL is not None:
+ cmd += ["-H", PW_READ_URL, CREDS]
+ else:
+ cmd = f"samba-tool {line}"
+ if PW_READ_URL is not None:
+ cmd += "-H {PW_READ_URL} {CREDS}"
+
+ return super()._make_cmdline(cmd)
+
+
+if __name__ == "__main__":
+ import unittest
+ unittest.main()
diff --git a/python/samba/tests/samba_tool/user_getpassword_gmsa.py b/python/samba/tests/samba_tool/user_getpassword_gmsa.py
new file mode 100644
index 0000000..9844456
--- /dev/null
+++ b/python/samba/tests/samba_tool/user_getpassword_gmsa.py
@@ -0,0 +1,171 @@
+# Unix SMB/CIFS implementation.
+#
+# Blackbox tests for reading Group Managed Service Account passwords
+#
+# Copyright (C) Catalyst.Net Ltd. 2023
+#
+# Written by Rob van der Linde <rob@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+import os
+
+sys.path.insert(0, "bin/python")
+os.environ["PYTHONUNBUFFERED"] = "1"
+
+from ldb import SCOPE_BASE
+
+from samba.credentials import Credentials, MUST_USE_KERBEROS
+from samba.dcerpc import security, samr
+from samba.dsdb import UF_WORKSTATION_TRUST_ACCOUNT
+from samba.netcmd.domain.models import User
+from samba.ndr import ndr_pack, ndr_unpack
+from samba.tests import connect_samdb, connect_samdb_env, delete_force
+
+from samba.tests import BlackboxTestCase
+
+DC_SERVER = os.environ["SERVER"]
+SERVER = os.environ["SERVER"]
+SERVER_USERNAME = os.environ["USERNAME"]
+SERVER_PASSWORD = os.environ["PASSWORD"]
+
+HOST = f"ldap://{SERVER}"
+CREDS = f"-U{SERVER_USERNAME}%{SERVER_PASSWORD}"
+
+
+class GMSAPasswordTest(BlackboxTestCase):
+ """Blackbox tests for GMSA getpassword and connecting as that user."""
+
+ @classmethod
+ def setUpClass(cls):
+ cls.lp = cls.get_loadparm()
+ cls.env_creds = cls.get_env_credentials(lp=cls.lp,
+ env_username="USERNAME",
+ env_password="PASSWORD",
+ env_domain="DOMAIN",
+ env_realm="REALM")
+ cls.samdb = connect_samdb(HOST, lp=cls.lp, credentials=cls.env_creds)
+ super().setUpClass()
+
+ @classmethod
+ def setUpTestData(cls):
+ cls.username = "GMSA_Test_User$"
+ cls.base_dn = f"CN=Managed Service Accounts,{cls.samdb.domain_dn()}"
+ cls.user_dn = f"CN={cls.username},{cls.base_dn}"
+
+ msg = cls.samdb.search(base="", scope=SCOPE_BASE, attrs=["tokenGroups"])[0]
+ connecting_user_sid = str(ndr_unpack(security.dom_sid, msg["tokenGroups"][0]))
+
+ domain_sid = security.dom_sid(cls.samdb.get_domain_sid())
+ allow_sddl = f"O:SYD:(A;;RP;;;{connecting_user_sid})"
+ allow_sd = ndr_pack(security.descriptor.from_sddl(allow_sddl, domain_sid))
+
+ details = {
+ "dn": str(cls.user_dn),
+ "objectClass": "msDS-GroupManagedServiceAccount",
+ "msDS-ManagedPasswordInterval": "1",
+ "msDS-GroupMSAMembership": allow_sd,
+ "sAMAccountName": cls.username,
+ "userAccountControl": str(UF_WORKSTATION_TRUST_ACCOUNT),
+ }
+
+ cls.samdb.add(details)
+ cls.addClassCleanup(delete_force, cls.samdb, cls.user_dn)
+
+ cls.user = User.get(cls.samdb, username=cls.username)
+
+ def getpassword(self, attrs):
+ cmd = f"user getpassword --attributes={attrs} {self.username}"
+
+ ldif = self.check_output(cmd).decode()
+ res = self.samdb.parse_ldif(ldif)
+ _, user_message = next(res)
+
+ # check each attr is returned
+ for attr in attrs.split(","):
+ self.assertIn(attr, user_message)
+
+ return user_message
+
+ def test_getpassword(self):
+ self.getpassword("virtualClearTextUTF16,unicodePwd")
+ self.getpassword("virtualClearTextUTF16")
+ self.getpassword("unicodePwd")
+
+ def test_utf16_password(self):
+ user_msg = self.getpassword("virtualClearTextUTF16")
+ password = user_msg["virtualClearTextUTF16"][0]
+
+ creds = self.insta_creds(template=self.env_creds)
+ creds.set_username(self.username)
+ creds.set_utf16_password(password)
+ db = connect_samdb(HOST, credentials=creds, lp=self.lp)
+
+ msg = db.search(base="", scope=SCOPE_BASE, attrs=["tokenGroups"])[0]
+ connecting_user_sid = str(ndr_unpack(security.dom_sid, msg["tokenGroups"][0]))
+
+ self.assertEqual(self.user.object_sid, connecting_user_sid)
+
+ def test_utf8_password(self):
+ user_msg = self.getpassword("virtualClearTextUTF8")
+ password = str(user_msg["virtualClearTextUTF8"][0])
+
+ creds = self.insta_creds(template=self.env_creds)
+ # Because the password has been converted to utf-8 via UTF16_MUNGED
+ # the nthash is no longer valid. We need to use AES kerberos ciphers
+ # for this to work.
+ creds.set_kerberos_state(MUST_USE_KERBEROS)
+ creds.set_username(self.username)
+ creds.set_password(password)
+ db = connect_samdb(HOST, credentials=creds, lp=self.lp)
+
+ msg = db.search(base="", scope=SCOPE_BASE, attrs=["tokenGroups"])[0]
+ connecting_user_sid = str(ndr_unpack(security.dom_sid, msg["tokenGroups"][0]))
+
+ self.assertEqual(self.user.object_sid, connecting_user_sid)
+
+ def test_unicode_pwd(self):
+ user_msg = self.getpassword("unicodePwd")
+
+ creds = self.insta_creds(template=self.env_creds)
+ creds.set_username(self.username)
+ nt_pass = samr.Password()
+ nt_pass.hash = list(user_msg["unicodePwd"][0])
+ creds.set_nt_hash(nt_pass)
+ db = connect_samdb(HOST, credentials=creds, lp=self.lp)
+
+ msg = db.search(base="", scope=SCOPE_BASE, attrs=["tokenGroups"])[0]
+ connecting_user_sid = str(ndr_unpack(security.dom_sid, msg["tokenGroups"][0]))
+
+ self.assertEqual(self.user.object_sid, connecting_user_sid)
+
+ @classmethod
+ def _make_cmdline(cls, line):
+ """Override to pass line as samba-tool subcommand instead.
+
+ Automatically fills in HOST and CREDS as well.
+ """
+ if isinstance(line, list):
+ cmd = ["samba-tool"] + line + ["-H", SERVER, CREDS]
+ else:
+ cmd = f"samba-tool {line} -H {HOST} {CREDS}"
+
+ return super()._make_cmdline(cmd)
+
+
+if __name__ == "__main__":
+ import unittest
+ unittest.main()
diff --git a/python/samba/tests/samba_tool/user_virtualCryptSHA.py b/python/samba/tests/samba_tool/user_virtualCryptSHA.py
new file mode 100644
index 0000000..e95a4be
--- /dev/null
+++ b/python/samba/tests/samba_tool/user_virtualCryptSHA.py
@@ -0,0 +1,516 @@
+# Tests for the samba-tool user sub command reading Primary:userPassword
+#
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import ldb
+import samba
+from samba.tests.samba_tool.base import SambaToolCmdTest
+from samba.credentials import Credentials
+from samba.samdb import SamDB
+from samba.auth import system_session
+from samba import dsdb
+
+USER_NAME = "CryptSHATestUser"
+HASH_OPTION = "password hash userPassword schemes"
+
+
+class UserCmdCryptShaTestCase(SambaToolCmdTest):
+ """
+ Tests for samba-tool user subcommands generation of the virtualCryptSHA256
+ and virtualCryptSHA512 attributes
+ """
+ users = []
+ samdb = None
+
+ def add_user(self, hashes=""):
+ self.lp = samba.tests.env_loadparm()
+
+ # set the extra hashes to be calculated
+ self.lp.set(HASH_OPTION, hashes)
+
+ self.creds = Credentials()
+ self.session = system_session()
+ self.ldb = SamDB(
+ session_info=self.session,
+ credentials=self.creds,
+ lp=self.lp)
+
+ password = self.random_password()
+ self.runsubcmd("user",
+ "create",
+ USER_NAME,
+ password)
+
+ def tearDown(self):
+ super().tearDown()
+ self.runsubcmd("user", "delete", USER_NAME)
+
+ def _get_password(self, attributes, decrypt=False):
+ command = ["user",
+ "getpassword",
+ USER_NAME,
+ "--attributes",
+ attributes]
+ if decrypt:
+ command.append("--decrypt-samba-gpg")
+
+ (result, out, err) = self.runsubcmd(*command)
+ self.assertCmdSuccess(result,
+ out,
+ err,
+ "Ensure getpassword runs")
+ self.assertEqual(err, "Got password OK\n", "getpassword")
+ return out
+
+ # Change the just the NT password hash, as would happen if the password
+ # was updated by Windows, the userPassword values are now obsolete.
+ #
+ def _change_nt_hash(self):
+ res = self.ldb.search(expression = "cn=%s" % USER_NAME,
+ scope = ldb.SCOPE_SUBTREE)
+ msg = ldb.Message()
+ msg.dn = res[0].dn
+ msg["unicodePwd"] = ldb.MessageElement(b"ABCDEF1234567890",
+ ldb.FLAG_MOD_REPLACE,
+ "unicodePwd")
+ self.ldb.modify(
+ msg,
+ controls=["local_oid:%s:0" %
+ dsdb.DSDB_CONTROL_BYPASS_PASSWORD_HASH_OID])
+
+ # gpg decryption not enabled.
+ # both virtual attributes specified, no rounds option
+ # no hashes stored in supplementalCredentials
+ # Should not get values
+ def test_no_gpg_both_hashes_no_rounds(self):
+ self.add_user()
+ out = self._get_password("virtualCryptSHA256,virtualCryptSHA512")
+
+ self.assertTrue("virtualCryptSHA256:" not in out)
+ self.assertTrue("virtualCryptSHA512:" not in out)
+ self.assertTrue("rounds=" not in out)
+
+ # gpg decryption not enabled.
+ # SHA256 specified
+ # no hashes stored in supplementalCredentials
+ # No rounds
+ #
+ # Should not get values
+ def test_no_gpg_sha256_no_rounds(self):
+ self.add_user()
+ out = self._get_password("virtualCryptSHA256")
+
+ self.assertTrue("virtualCryptSHA256:" not in out)
+ self.assertTrue("virtualCryptSHA512:" not in out)
+ self.assertTrue("rounds=" not in out)
+
+ # gpg decryption not enabled.
+ # SHA512 specified
+ # no hashes stored in supplementalCredentials
+ # No rounds
+ #
+ # Should not get values
+ def test_no_gpg_sha512_no_rounds(self):
+ self.add_user()
+ out = self._get_password("virtualCryptSHA512")
+
+ self.assertTrue("virtualCryptSHA256:" not in out)
+ self.assertTrue("virtualCryptSHA512:" not in out)
+ self.assertTrue("rounds=" not in out)
+
+ # gpg decryption not enabled.
+ # SHA128 specified, i.e. invalid/unknown algorithm
+ # no hashes stored in supplementalCredentials
+ # No rounds
+ #
+ # Should not get values
+ def test_no_gpg_invalid_alg_no_rounds(self):
+ self.add_user()
+ out = self._get_password("virtualCryptSHA128")
+
+ self.assertTrue("virtualCryptSHA256:" not in out)
+ self.assertTrue("virtualCryptSHA512:" not in out)
+ self.assertTrue("rounds=" not in out)
+
+ # gpg decryption enabled.
+ # both virtual attributes specified, no rounds option
+ # no hashes stored in supplementalCredentials
+ # Should get values
+ def test_gpg_both_hashes_no_rounds(self):
+ self.add_user()
+ out = self._get_password("virtualCryptSHA256,virtualCryptSHA512", True)
+
+ self.assertTrue("virtualCryptSHA256:" in out)
+ self.assertTrue("virtualCryptSHA512:" in out)
+ self.assertTrue("rounds=" not in out)
+
+ # gpg decryption enabled.
+ # SHA256 specified
+ # no hashes stored in supplementalCredentials
+ # No rounds
+ #
+ # Should get values
+ def test_gpg_sha256_no_rounds(self):
+ self.add_user()
+ out = self._get_password("virtualCryptSHA256", True)
+
+ self.assertTrue("virtualCryptSHA256:" in out)
+ self.assertTrue("virtualCryptSHA512:" not in out)
+ self.assertTrue("rounds=" not in out)
+
+ # gpg decryption enabled.
+ # SHA512 specified
+ # no hashes stored in supplementalCredentials
+ # No rounds
+ #
+ # Should get values
+ def test_gpg_sha512_no_rounds(self):
+ self.add_user()
+ out = self._get_password("virtualCryptSHA512", True)
+
+ self.assertTrue("virtualCryptSHA256:" not in out)
+ self.assertTrue("virtualCryptSHA512:" in out)
+ self.assertTrue("rounds=" not in out)
+
+ # gpg decryption enabled.
+ # SHA128 specified, i.e. invalid/unknown algorithm
+ # no hashes stored in supplementalCredentials
+ # No rounds
+ #
+ # Should not get values
+ def test_gpg_invalid_alg_no_rounds(self):
+ self.add_user()
+ out = self._get_password("virtualCryptSHA128", True)
+
+ self.assertTrue("virtualCryptSHA256:" not in out)
+ self.assertTrue("virtualCryptSHA512:" not in out)
+ self.assertTrue("rounds=" not in out)
+
+ # gpg decryption enabled.
+ # both virtual attributes specified, no rounds option
+ # no hashes stored in supplementalCredentials
+ # underlying windows password changed, so plain text password is
+ # invalid.
+ # Should not get values
+ def test_gpg_both_hashes_no_rounds_pwd_changed(self):
+ self.add_user()
+ self._change_nt_hash()
+ out = self._get_password("virtualCryptSHA256,virtualCryptSHA512", True)
+
+ self.assertTrue("virtualCryptSHA256:" not in out)
+ self.assertTrue("virtualCryptSHA512:" not in out)
+ self.assertTrue("rounds=" not in out)
+
+ # gpg decryption enabled.
+ # SHA256 specified, no rounds option
+ # no hashes stored in supplementalCredentials
+ # underlying windows password changed, so plain text password is
+ # invalid.
+ # Should not get values
+ def test_gpg_sha256_no_rounds_pwd_changed(self):
+ self.add_user()
+ self._change_nt_hash()
+ out = self._get_password("virtualCryptSHA256", True)
+
+ self.assertTrue("virtualCryptSHA256:" not in out)
+ self.assertTrue("virtualCryptSHA512:" not in out)
+ self.assertTrue("rounds=" not in out)
+
+ # gpg decryption enabled.
+ # SHA512 specified, no rounds option
+ # no hashes stored in supplementalCredentials
+ # underlying windows password changed, so plain text password is
+ # invalid.
+ # Should not get values
+ def test_gpg_sha512_no_rounds_pwd_changed(self):
+ self.add_user()
+ self._change_nt_hash()
+ out = self._get_password("virtualCryptSHA256", True)
+
+ self.assertTrue("virtualCryptSHA256:" not in out)
+ self.assertTrue("virtualCryptSHA512:" not in out)
+ self.assertTrue("rounds=" not in out)
+
+ # gpg decryption enabled.
+ # both virtual attributes specified, rounds specified
+ # no hashes stored in supplementalCredentials
+ # Should get values reflecting the requested rounds
+ def test_gpg_both_hashes_both_rounds(self):
+ self.add_user()
+ out = self._get_password(
+ "virtualCryptSHA256;rounds=10123,virtualCryptSHA512;rounds=10456",
+ True)
+
+ self.assertTrue("virtualCryptSHA256:" in out)
+ self.assertTrue("virtualCryptSHA512:" in out)
+
+ sha256 = self._get_attribute(out, "virtualCryptSHA256")
+ self.assertTrue(sha256.startswith("{CRYPT}$5$rounds=10123$"))
+
+ sha512 = self._get_attribute(out, "virtualCryptSHA512")
+ self.assertTrue(sha512.startswith("{CRYPT}$6$rounds=10456$"))
+
+ # gpg decryption enabled.
+ # both virtual attributes specified, rounds specified
+ # invalid rounds for sha256
+ # no hashes stored in supplementalCredentials
+ # Should get values, no rounds for sha256, rounds for sha 512
+ def test_gpg_both_hashes_sha256_rounds_invalid(self):
+ self.add_user()
+ out = self._get_password(
+ "virtualCryptSHA256;rounds=invalid,virtualCryptSHA512;rounds=3125",
+ True)
+
+ self.assertTrue("virtualCryptSHA256:" in out)
+ self.assertTrue("virtualCryptSHA512:" in out)
+
+ sha256 = self._get_attribute(out, "virtualCryptSHA256")
+ self.assertTrue(sha256.startswith("{CRYPT}$5$"))
+ self.assertTrue("rounds" not in sha256)
+
+ sha512 = self._get_attribute(out, "virtualCryptSHA512")
+ self.assertTrue(sha512.startswith("{CRYPT}$6$rounds=3125$"))
+
+ # gpg decryption not enabled.
+ # both virtual attributes specified, no rounds option
+ # both hashes stored in supplementalCredentials
+ # Should get values
+ def test_no_gpg_both_hashes_no_rounds_stored_hashes(self):
+ self.add_user("CryptSHA512 CryptSHA256")
+
+ out = self._get_password("virtualCryptSHA256,virtualCryptSHA512")
+
+ self.assertTrue("virtualCryptSHA256:" in out)
+ self.assertTrue("virtualCryptSHA512:" in out)
+ self.assertTrue("rounds=" not in out)
+
+ # Should be using the pre computed hash in supplementalCredentials
+ # so it should not change between calls.
+ sha256 = self._get_attribute(out, "virtualCryptSHA256")
+ sha512 = self._get_attribute(out, "virtualCryptSHA512")
+
+ out = self._get_password("virtualCryptSHA256,virtualCryptSHA512")
+ self.assertEqual(sha256, self._get_attribute(out, "virtualCryptSHA256"))
+ self.assertEqual(sha512, self._get_attribute(out, "virtualCryptSHA512"))
+
+ # gpg decryption not enabled.
+ # both virtual attributes specified, rounds specified
+ # both hashes stored in supplementalCredentials, with not rounds
+ # Should get hashes for the first matching scheme entry
+ def test_no_gpg_both_hashes_rounds_stored_hashes(self):
+ self.add_user("CryptSHA512 CryptSHA256")
+
+ out = self._get_password("virtualCryptSHA256;rounds=2561," +
+ "virtualCryptSHA512;rounds=5129")
+
+ self.assertTrue("virtualCryptSHA256:" in out)
+ self.assertTrue("virtualCryptSHA512:" in out)
+ self.assertTrue("rounds=" not in out)
+
+ # Should be using the pre computed hash in supplementalCredentials
+ # so it should not change between calls.
+ sha256 = self._get_attribute(out, "virtualCryptSHA256")
+ sha512 = self._get_attribute(out, "virtualCryptSHA512")
+
+ out = self._get_password("virtualCryptSHA256,virtualCryptSHA512")
+ self.assertEqual(sha256, self._get_attribute(out, "virtualCryptSHA256"))
+ self.assertEqual(sha512, self._get_attribute(out, "virtualCryptSHA512"))
+
+ # gpg decryption not enabled.
+ # both virtual attributes specified, rounds specified
+ # both hashes stored in supplementalCredentials, with rounds
+ # Should get values
+ def test_no_gpg_both_hashes_rounds_stored_hashes_with_rounds(self):
+ self.add_user("CryptSHA512 " +
+ "CryptSHA256 " +
+ "CryptSHA512:rounds=5129 " +
+ "CryptSHA256:rounds=2561")
+
+ out = self._get_password("virtualCryptSHA256;rounds=2561," +
+ "virtualCryptSHA512;rounds=5129")
+
+ self.assertTrue("virtualCryptSHA256:" in out)
+ self.assertTrue("virtualCryptSHA512:" in out)
+ self.assertTrue("rounds=" in out)
+
+ # Should be using the pre computed hash in supplementalCredentials
+ # so it should not change between calls.
+ sha256 = self._get_attribute(out, "virtualCryptSHA256")
+ sha512 = self._get_attribute(out, "virtualCryptSHA512")
+
+ out = self._get_password("virtualCryptSHA256;rounds=2561," +
+ "virtualCryptSHA512;rounds=5129")
+ self.assertEqual(sha256, self._get_attribute(out, "virtualCryptSHA256"))
+ self.assertEqual(sha512, self._get_attribute(out, "virtualCryptSHA512"))
+
+ # Number of rounds should match that specified
+ self.assertTrue(sha256.startswith("{CRYPT}$5$rounds=2561"))
+ self.assertTrue(sha512.startswith("{CRYPT}$6$rounds=5129"))
+
+ # gpg decryption not enabled.
+ # both virtual attributes specified, rounds specified
+ # both hashes stored in supplementalCredentials, with rounds
+ # number of rounds stored/requested do not match
+ # Should get the precomputed hashes for CryptSHA512 and CryptSHA256
+ def test_no_gpg_both_hashes_rounds_stored_hashes_with_rounds_no_match(self):
+ self.add_user("CryptSHA512 " +
+ "CryptSHA256 " +
+ "CryptSHA512:rounds=5129 " +
+ "CryptSHA256:rounds=2561")
+
+ out = self._get_password("virtualCryptSHA256;rounds=4000," +
+ "virtualCryptSHA512;rounds=5000")
+
+ self.assertTrue("virtualCryptSHA256:" in out)
+ self.assertTrue("virtualCryptSHA512:" in out)
+ self.assertTrue("rounds=" not in out)
+
+ # Should be using the pre computed hash in supplementalCredentials
+ # so it should not change between calls.
+ sha256 = self._get_attribute(out, "virtualCryptSHA256")
+ sha512 = self._get_attribute(out, "virtualCryptSHA512")
+
+ out = self._get_password("virtualCryptSHA256;rounds=4000," +
+ "virtualCryptSHA512;rounds=5000")
+ self.assertEqual(sha256, self._get_attribute(out, "virtualCryptSHA256"))
+ self.assertEqual(sha512, self._get_attribute(out, "virtualCryptSHA512"))
+
+ # As the number of rounds did not match, should have returned the
+ # first hash of the corresponding scheme
+ out = self._get_password("virtualCryptSHA256," +
+ "virtualCryptSHA512")
+ self.assertEqual(sha256, self._get_attribute(out, "virtualCryptSHA256"))
+ self.assertEqual(sha512, self._get_attribute(out, "virtualCryptSHA512"))
+
+ # gpg decryption enabled.
+ # both virtual attributes specified, no rounds option
+ # both hashes stored in supplementalCredentials
+ # Should get values
+ def test_gpg_both_hashes_no_rounds_stored_hashes(self):
+ self.add_user("CryptSHA512 CryptSHA256")
+
+ out = self._get_password("virtualCryptSHA256,virtualCryptSHA512", True)
+
+ self.assertTrue("virtualCryptSHA256:" in out)
+ self.assertTrue("virtualCryptSHA512:" in out)
+ self.assertTrue("rounds=" not in out)
+
+ # Should be using the pre computed hash in supplementalCredentials
+ # so it should not change between calls.
+ sha256 = self._get_attribute(out, "virtualCryptSHA256")
+ sha512 = self._get_attribute(out, "virtualCryptSHA512")
+
+ out = self._get_password("virtualCryptSHA256,virtualCryptSHA512", True)
+ self.assertEqual(sha256, self._get_attribute(out, "virtualCryptSHA256"))
+ self.assertEqual(sha512, self._get_attribute(out, "virtualCryptSHA512"))
+
+ # gpg decryption enabled.
+ # both virtual attributes specified, rounds specified
+ # both hashes stored in supplementalCredentials, with no rounds
+ # Should get calculated hashed with the correct number of rounds
+ def test_gpg_both_hashes_rounds_stored_hashes(self):
+ self.add_user("CryptSHA512 CryptSHA256")
+
+ out = self._get_password("virtualCryptSHA256;rounds=2561," +
+ "virtualCryptSHA512;rounds=5129",
+ True)
+
+ self.assertTrue("virtualCryptSHA256:" in out)
+ self.assertTrue("virtualCryptSHA512:" in out)
+ self.assertTrue("rounds=" in out)
+
+ # Should be calculating the hashes
+ # so they should change between calls.
+ sha256 = self._get_attribute(out, "virtualCryptSHA256")
+ sha512 = self._get_attribute(out, "virtualCryptSHA512")
+
+ out = self._get_password("virtualCryptSHA256;rounds=2561," +
+ "virtualCryptSHA512;rounds=5129",
+ True)
+ self.assertFalse(sha256 == self._get_attribute(out, "virtualCryptSHA256"))
+ self.assertFalse(sha512 == self._get_attribute(out, "virtualCryptSHA512"))
+
+ # The returned hashes should specify the correct number of rounds
+ self.assertTrue(sha256.startswith("{CRYPT}$5$rounds=2561"))
+ self.assertTrue(sha512.startswith("{CRYPT}$6$rounds=5129"))
+
+ # gpg decryption enabled.
+ # both virtual attributes specified, rounds specified
+ # both hashes stored in supplementalCredentials, with rounds
+ # Should get values
+ def test_gpg_both_hashes_rounds_stored_hashes_with_rounds(self):
+ self.add_user("CryptSHA512 " +
+ "CryptSHA256 " +
+ "CryptSHA512:rounds=5129 " +
+ "CryptSHA256:rounds=2561")
+
+ out = self._get_password("virtualCryptSHA256;rounds=2561," +
+ "virtualCryptSHA512;rounds=5129",
+ True)
+
+ self.assertTrue("virtualCryptSHA256:" in out)
+ self.assertTrue("virtualCryptSHA512:" in out)
+ self.assertTrue("rounds=" in out)
+
+ # Should be using the pre computed hash in supplementalCredentials
+ # so it should not change between calls.
+ sha256 = self._get_attribute(out, "virtualCryptSHA256")
+ sha512 = self._get_attribute(out, "virtualCryptSHA512")
+
+ out = self._get_password("virtualCryptSHA256;rounds=2561," +
+ "virtualCryptSHA512;rounds=5129",
+ True)
+ self.assertEqual(sha256, self._get_attribute(out, "virtualCryptSHA256"))
+ self.assertEqual(sha512, self._get_attribute(out, "virtualCryptSHA512"))
+
+ # The returned hashes should specify the correct number of rounds
+ self.assertTrue(sha256.startswith("{CRYPT}$5$rounds=2561"))
+ self.assertTrue(sha512.startswith("{CRYPT}$6$rounds=5129"))
+
+ # gpg decryption enabled.
+ # both virtual attributes specified, rounds specified
+ # both hashes stored in supplementalCredentials, with rounds
+ # number of rounds stored/requested do not match
+ # Should get calculated hashes with the correct number of rounds
+ def test_gpg_both_hashes_rounds_stored_hashes_with_rounds_no_match(self):
+ self.add_user("CryptSHA512 " +
+ "CryptSHA256 " +
+ "CryptSHA512:rounds=5129 " +
+ "CryptSHA256:rounds=2561")
+
+ out = self._get_password("virtualCryptSHA256;rounds=4000," +
+ "virtualCryptSHA512;rounds=5000",
+ True)
+
+ self.assertTrue("virtualCryptSHA256:" in out)
+ self.assertTrue("virtualCryptSHA512:" in out)
+ self.assertTrue("rounds=" in out)
+
+ # Should be calculating the hashes
+ # so they should change between calls.
+ sha256 = self._get_attribute(out, "virtualCryptSHA256")
+ sha512 = self._get_attribute(out, "virtualCryptSHA512")
+
+ out = self._get_password("virtualCryptSHA256;rounds=4000," +
+ "virtualCryptSHA512;rounds=5000",
+ True)
+ self.assertFalse(sha256 == self._get_attribute(out, "virtualCryptSHA256"))
+ self.assertFalse(sha512 == self._get_attribute(out, "virtualCryptSHA512"))
+
+ # The calculated hashes should specify the correct number of rounds
+ self.assertTrue(sha256.startswith("{CRYPT}$5$rounds=4000"))
+ self.assertTrue(sha512.startswith("{CRYPT}$6$rounds=5000"))
diff --git a/python/samba/tests/samba_tool/user_virtualCryptSHA_base.py b/python/samba/tests/samba_tool/user_virtualCryptSHA_base.py
new file mode 100644
index 0000000..14e3de9
--- /dev/null
+++ b/python/samba/tests/samba_tool/user_virtualCryptSHA_base.py
@@ -0,0 +1,99 @@
+# Tests for the samba-tool user sub command reading Primary:userPassword
+#
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import ldb
+import samba
+from samba.tests.samba_tool.base import SambaToolCmdTest
+from samba.credentials import Credentials
+from samba.samdb import SamDB
+from samba.auth import system_session
+from samba import dsdb
+
+USER_NAME = "CryptSHATestUser"
+HASH_OPTION = "password hash userPassword schemes"
+
+
+class UserCmdCryptShaTestCase(SambaToolCmdTest):
+ """
+ Tests for samba-tool user subcommands generation of the virtualCryptSHA256
+ and virtualCryptSHA512 attributes
+ """
+ users = []
+ samdb = None
+
+ def _get_attribute(self, out, name):
+ parsed = list(self.ldb.parse_ldif(out))
+ self.assertEqual(len(parsed), 1)
+ changetype, msg = parsed[0]
+ return str(msg.get(name, ""))
+
+ def add_user(self, hashes=""):
+ self.lp = samba.tests.env_loadparm()
+
+ # set the extra hashes to be calculated
+ self.lp.set(HASH_OPTION, hashes)
+
+ self.creds = Credentials()
+ self.session = system_session()
+ self.ldb = SamDB(
+ session_info=self.session,
+ credentials=self.creds,
+ lp=self.lp)
+
+ password = self.random_password()
+ self.runsubcmd("user",
+ "create",
+ USER_NAME,
+ password)
+
+ def tearDown(self):
+ super().tearDown()
+ self.runsubcmd("user", "delete", USER_NAME)
+
+ def _get_password(self, attributes, decrypt=False):
+ command = ["user",
+ "getpassword",
+ USER_NAME,
+ "--attributes",
+ attributes]
+ if decrypt:
+ command.append("--decrypt-samba-gpg")
+
+ (result, out, err) = self.runsubcmd(*command)
+ self.assertCmdSuccess(result,
+ out,
+ err,
+ "Ensure getpassword runs")
+ self.assertEqual(err, "Got password OK\n", "getpassword")
+ return out
+
+ # Change the just the NT password hash, as would happen if the password
+ # was updated by Windows, the userPassword values are now obsolete.
+ #
+ def _change_nt_hash(self):
+ res = self.ldb.search(expression = "cn=%s" % USER_NAME,
+ scope = ldb.SCOPE_SUBTREE)
+ msg = ldb.Message()
+ msg.dn = res[0].dn
+ msg["unicodePwd"] = ldb.MessageElement(b"ABCDEF1234567890",
+ ldb.FLAG_MOD_REPLACE,
+ "unicodePwd")
+ self.ldb.modify(
+ msg,
+ controls=["local_oid:%s:0" %
+ dsdb.DSDB_CONTROL_BYPASS_PASSWORD_HASH_OID])
diff --git a/python/samba/tests/samba_tool/user_virtualCryptSHA_gpg.py b/python/samba/tests/samba_tool/user_virtualCryptSHA_gpg.py
new file mode 100644
index 0000000..6517eee
--- /dev/null
+++ b/python/samba/tests/samba_tool/user_virtualCryptSHA_gpg.py
@@ -0,0 +1,262 @@
+# Tests for the samba-tool user sub command reading Primary:userPassword
+#
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from samba.tests.samba_tool.user_virtualCryptSHA_base import UserCmdCryptShaTestCase
+
+
+class UserCmdCryptShaTestCaseGPG(UserCmdCryptShaTestCase):
+ """
+ Tests for samba-tool user subcommands generation of the virtualCryptSHA256
+ and virtualCryptSHA512 attributes
+ """
+
+ # gpg decryption enabled.
+ # both virtual attributes specified, no rounds option
+ # no hashes stored in supplementalCredentials
+ # Should get values
+ def test_gpg_both_hashes_no_rounds(self):
+ self.add_user()
+ out = self._get_password("virtualCryptSHA256,virtualCryptSHA512", True)
+
+ self.assertIn("virtualCryptSHA256:", out)
+ self.assertIn("virtualCryptSHA512:", out)
+ self.assertNotIn("rounds=", out)
+
+ # gpg decryption enabled.
+ # SHA256 specified
+ # no hashes stored in supplementalCredentials
+ # No rounds
+ #
+ # Should get values
+ def test_gpg_sha256_no_rounds(self):
+ self.add_user()
+ out = self._get_password("virtualCryptSHA256", True)
+
+ self.assertIn("virtualCryptSHA256:", out)
+ self.assertNotIn("virtualCryptSHA512:", out)
+ self.assertNotIn("rounds=", out)
+
+ # gpg decryption enabled.
+ # SHA512 specified
+ # no hashes stored in supplementalCredentials
+ # No rounds
+ #
+ # Should get values
+ def test_gpg_sha512_no_rounds(self):
+ self.add_user()
+ out = self._get_password("virtualCryptSHA512", True)
+
+ self.assertNotIn("virtualCryptSHA256:", out)
+ self.assertIn("virtualCryptSHA512:",out)
+ self.assertNotIn("rounds=", out)
+
+ # gpg decryption enabled.
+ # SHA128 specified, i.e. invalid/unknown algorithm
+ # no hashes stored in supplementalCredentials
+ # No rounds
+ #
+ # Should not get values
+ def test_gpg_invalid_alg_no_rounds(self):
+ self.add_user()
+ out = self._get_password("virtualCryptSHA128", True)
+
+ self.assertNotIn("virtualCryptSHA256:", out)
+ self.assertNotIn("virtualCryptSHA512:", out)
+ self.assertNotIn("rounds=", out)
+
+ # gpg decryption enabled.
+ # both virtual attributes specified, no rounds option
+ # no hashes stored in supplementalCredentials
+ # underlying windows password changed, so plain text password is
+ # invalid.
+ # Should not get values
+ def test_gpg_both_hashes_no_rounds_pwd_changed(self):
+ self.add_user()
+ self._change_nt_hash()
+ out = self._get_password("virtualCryptSHA256,virtualCryptSHA512", True)
+
+ self.assertNotIn("virtualCryptSHA256:", out)
+ self.assertNotIn("virtualCryptSHA512:", out)
+ self.assertNotIn("rounds=", out)
+
+ # gpg decryption enabled.
+ # SHA256 specified, no rounds option
+ # no hashes stored in supplementalCredentials
+ # underlying windows password changed, so plain text password is
+ # invalid.
+ # Should not get values
+ def test_gpg_sha256_no_rounds_pwd_changed(self):
+ self.add_user()
+ self._change_nt_hash()
+ out = self._get_password("virtualCryptSHA256", True)
+
+ self.assertNotIn("virtualCryptSHA256:", out)
+ self.assertNotIn("virtualCryptSHA512:", out)
+ self.assertNotIn("rounds=", out)
+
+ # gpg decryption enabled.
+ # SHA512 specified, no rounds option
+ # no hashes stored in supplementalCredentials
+ # underlying windows password changed, so plain text password is
+ # invalid.
+ # Should not get values
+ def test_gpg_sha512_no_rounds_pwd_changed(self):
+ self.add_user()
+ self._change_nt_hash()
+ out = self._get_password("virtualCryptSHA256", True)
+
+ self.assertNotIn("virtualCryptSHA256:", out)
+ self.assertNotIn("virtualCryptSHA512:", out)
+ self.assertNotIn("rounds=", out)
+
+ # gpg decryption enabled.
+ # both virtual attributes specified, rounds specified
+ # no hashes stored in supplementalCredentials
+ # Should get values reflecting the requested rounds
+ def test_gpg_both_hashes_both_rounds(self):
+ self.add_user()
+ out = self._get_password(
+ "virtualCryptSHA256;rounds=10123,virtualCryptSHA512;rounds=10456",
+ True)
+
+ self.assertIn("virtualCryptSHA256;rounds=10123:", out)
+ self.assertIn("virtualCryptSHA512;rounds=10456:", out)
+
+ sha256 = self._get_attribute(out, "virtualCryptSHA256;rounds=10123")
+ self.assertTrue(sha256.startswith("{CRYPT}$5$rounds=10123$"))
+
+ sha512 = self._get_attribute(out, "virtualCryptSHA512;rounds=10456")
+ self.assertTrue(sha512.startswith("{CRYPT}$6$rounds=10456$"))
+
+ # gpg decryption enabled.
+ # both virtual attributes specified, rounds specified
+ # invalid rounds for sha256
+ # no hashes stored in supplementalCredentials
+ # Should get values, no rounds for sha256, rounds for sha 512
+ def test_gpg_both_hashes_sha256_rounds_invalid(self):
+ self.add_user()
+ out = self._get_password(
+ "virtualCryptSHA256;rounds=invalid,virtualCryptSHA512;rounds=3125",
+ True)
+
+ self.assertIn("virtualCryptSHA256;rounds=invalid:", out)
+ self.assertIn("virtualCryptSHA512;rounds=3125:", out)
+
+ sha256 = self._get_attribute(out, "virtualCryptSHA256;rounds=invalid")
+ self.assertTrue(sha256.startswith("{CRYPT}$5$"))
+ self.assertNotIn("rounds", sha256)
+
+ sha512 = self._get_attribute(out, "virtualCryptSHA512;rounds=3125")
+ self.assertTrue(sha512.startswith("{CRYPT}$6$rounds=3125$"))
+
+ # gpg decryption enabled.
+ # both virtual attributes specified, rounds specified
+ # both hashes stored in supplementalCredentials, with no rounds
+ # Should get calculated hashed with the correct number of rounds
+ def test_gpg_both_hashes_rounds_stored_hashes(self):
+ self.add_user("CryptSHA512 CryptSHA256")
+
+ out = self._get_password("virtualCryptSHA256;rounds=2561," +
+ "virtualCryptSHA512;rounds=5129",
+ True)
+
+ self.assertIn("virtualCryptSHA256;rounds=2561:", out)
+ self.assertIn("virtualCryptSHA512;rounds=5129:", out)
+ self.assertIn("$rounds=", out)
+
+ # Should be calculating the hashes
+ # so they should change between calls.
+ sha256 = self._get_attribute(out, "virtualCryptSHA256;rounds=2561")
+ sha512 = self._get_attribute(out, "virtualCryptSHA512;rounds=5129")
+
+ out = self._get_password("virtualCryptSHA256;rounds=2561," +
+ "virtualCryptSHA512;rounds=5129",
+ True)
+ self.assertNotEqual(sha256, self._get_attribute(out, "virtualCryptSHA256"))
+ self.assertNotEqual(sha512, self._get_attribute(out, "virtualCryptSHA512"))
+
+ # The returned hashes should specify the correct number of rounds
+ self.assertTrue(sha256.startswith("{CRYPT}$5$rounds=2561"))
+ self.assertTrue(sha512.startswith("{CRYPT}$6$rounds=5129"))
+
+ # gpg decryption enabled.
+ # both virtual attributes specified, rounds specified
+ # both hashes stored in supplementalCredentials, with rounds
+ # Should get values
+ def test_gpg_both_hashes_rounds_stored_hashes_with_rounds(self):
+ self.add_user("CryptSHA512 " +
+ "CryptSHA256 " +
+ "CryptSHA512:rounds=5129 " +
+ "CryptSHA256:rounds=2561")
+
+ out = self._get_password("virtualCryptSHA256;rounds=2561," +
+ "virtualCryptSHA512;rounds=5129",
+ True)
+
+ self.assertIn("virtualCryptSHA256;rounds=2561:", out)
+ self.assertIn("virtualCryptSHA512;rounds=5129:", out)
+ self.assertIn("$rounds=", out)
+
+ # Should be using the pre computed hash in supplementalCredentials
+ # so it should not change between calls.
+ sha256 = self._get_attribute(out, "virtualCryptSHA256;rounds=2561")
+ sha512 = self._get_attribute(out, "virtualCryptSHA512;rounds=5129")
+
+ out = self._get_password("virtualCryptSHA256;rounds=2561," +
+ "virtualCryptSHA512;rounds=5129",
+ True)
+ self.assertEqual(sha256, self._get_attribute(out, "virtualCryptSHA256;rounds=2561"))
+ self.assertEqual(sha512, self._get_attribute(out, "virtualCryptSHA512;rounds=5129"))
+
+ # The returned hashes should specify the correct number of rounds
+ self.assertTrue(sha256.startswith("{CRYPT}$5$rounds=2561"))
+ self.assertTrue(sha512.startswith("{CRYPT}$6$rounds=5129"))
+
+ # gpg decryption enabled.
+ # both virtual attributes specified, rounds specified
+ # both hashes stored in supplementalCredentials, with rounds
+ # number of rounds stored/requested do not match
+ # Should get calculated hashes with the correct number of rounds
+ def test_gpg_both_hashes_rounds_stored_hashes_with_rounds_no_match(self):
+ self.add_user("CryptSHA512 " +
+ "CryptSHA256 " +
+ "CryptSHA512:rounds=5129 " +
+ "CryptSHA256:rounds=2561")
+
+ out = self._get_password("virtualCryptSHA256;rounds=4000," +
+ "virtualCryptSHA512;rounds=5000",
+ True)
+
+ self.assertIn("virtualCryptSHA256;rounds=4000:", out)
+ self.assertIn("virtualCryptSHA512;rounds=5000:", out)
+ self.assertIn("$rounds=", out)
+
+ # Should be calculating the hashes
+ # so they should change between calls.
+ sha256 = self._get_attribute(out, "virtualCryptSHA256;rounds=4000")
+ sha512 = self._get_attribute(out, "virtualCryptSHA512;rounds=5000")
+
+ out = self._get_password("virtualCryptSHA256;rounds=4000," +
+ "virtualCryptSHA512;rounds=5000",
+ True)
+ self.assertNotEqual(sha256, self._get_attribute(out, "virtualCryptSHA256;rounds=4000"))
+ self.assertNotEqual(sha512, self._get_attribute(out, "virtualCryptSHA512;rounds=5000"))
+
+ # The calculated hashes should specify the correct number of rounds
+ self.assertTrue(sha256.startswith("{CRYPT}$5$rounds=4000"))
+ self.assertTrue(sha512.startswith("{CRYPT}$6$rounds=5000"))
diff --git a/python/samba/tests/samba_tool/user_virtualCryptSHA_userPassword.py b/python/samba/tests/samba_tool/user_virtualCryptSHA_userPassword.py
new file mode 100644
index 0000000..1f84af0
--- /dev/null
+++ b/python/samba/tests/samba_tool/user_virtualCryptSHA_userPassword.py
@@ -0,0 +1,188 @@
+# Tests for the samba-tool user sub command reading Primary:userPassword
+#
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from samba.tests.samba_tool.user_virtualCryptSHA_base import UserCmdCryptShaTestCase
+
+
+class UserCmdCryptShaTestCaseUserPassword(UserCmdCryptShaTestCase):
+ # gpg decryption not enabled.
+ # both virtual attributes specified, no rounds option
+ # no hashes stored in supplementalCredentials
+ # Should not get values
+ def test_no_gpg_both_hashes_no_rounds(self):
+ self.add_user()
+ out = self._get_password("virtualCryptSHA256,virtualCryptSHA512")
+
+ self.assertTrue("virtualCryptSHA256:" not in out)
+ self.assertTrue("virtualCryptSHA512:" not in out)
+ self.assertTrue("rounds=" not in out)
+
+ # gpg decryption not enabled.
+ # SHA256 specified
+ # no hashes stored in supplementalCredentials
+ # No rounds
+ #
+ # Should not get values
+ def test_no_gpg_sha256_no_rounds(self):
+ self.add_user()
+ out = self._get_password("virtualCryptSHA256")
+
+ self.assertTrue("virtualCryptSHA256:" not in out)
+ self.assertTrue("virtualCryptSHA512:" not in out)
+ self.assertTrue("rounds=" not in out)
+
+ # gpg decryption not enabled.
+ # SHA512 specified
+ # no hashes stored in supplementalCredentials
+ # No rounds
+ #
+ # Should not get values
+ def test_no_gpg_sha512_no_rounds(self):
+ self.add_user()
+ out = self._get_password("virtualCryptSHA512")
+
+ self.assertTrue("virtualCryptSHA256:" not in out)
+ self.assertTrue("virtualCryptSHA512:" not in out)
+ self.assertTrue("rounds=" not in out)
+
+ # gpg decryption not enabled.
+ # SHA128 specified, i.e. invalid/unknown algorithm
+ # no hashes stored in supplementalCredentials
+ # No rounds
+ #
+ # Should not get values
+ def test_no_gpg_invalid_alg_no_rounds(self):
+ self.add_user()
+ out = self._get_password("virtualCryptSHA128")
+
+ self.assertTrue("virtualCryptSHA256:" not in out)
+ self.assertTrue("virtualCryptSHA512:" not in out)
+ self.assertTrue("rounds=" not in out)
+
+ # gpg decryption not enabled.
+ # both virtual attributes specified, no rounds option
+ # both hashes stored in supplementalCredentials
+ # Should get values
+ def test_no_gpg_both_hashes_no_rounds_stored_hashes(self):
+ self.add_user("CryptSHA512 CryptSHA256")
+
+ out = self._get_password("virtualCryptSHA256,virtualCryptSHA512")
+
+ self.assertTrue("virtualCryptSHA256:" in out)
+ self.assertTrue("virtualCryptSHA512:" in out)
+ self.assertTrue("rounds=" not in out)
+
+ # Should be using the pre computed hash in supplementalCredentials
+ # so it should not change between calls.
+ sha256 = self._get_attribute(out, "virtualCryptSHA256")
+ sha512 = self._get_attribute(out, "virtualCryptSHA512")
+
+ out = self._get_password("virtualCryptSHA256,virtualCryptSHA512")
+ self.assertEqual(sha256, self._get_attribute(out, "virtualCryptSHA256"))
+ self.assertEqual(sha512, self._get_attribute(out, "virtualCryptSHA512"))
+
+ # gpg decryption not enabled.
+ # both virtual attributes specified, rounds specified
+ # both hashes stored in supplementalCredentials, with not rounds
+ # Should get hashes for the first matching scheme entry
+ def test_no_gpg_both_hashes_rounds_stored_hashes(self):
+ self.add_user("CryptSHA512 CryptSHA256")
+
+ out = self._get_password("virtualCryptSHA256;rounds=2561," +
+ "virtualCryptSHA512;rounds=5129")
+
+ self.assertTrue("virtualCryptSHA256;rounds=2561:" in out)
+ self.assertTrue("virtualCryptSHA512;rounds=5129:" in out)
+ self.assertTrue("$rounds=" not in out)
+
+ # Should be using the pre computed hash in supplementalCredentials
+ # so it should not change between calls.
+ sha256 = self._get_attribute(out, "virtualCryptSHA256;rounds=2561")
+ sha512 = self._get_attribute(out, "virtualCryptSHA512;rounds=5129")
+
+ out = self._get_password("virtualCryptSHA256,virtualCryptSHA512")
+ self.assertEqual(sha256, self._get_attribute(out,
+ "virtualCryptSHA256"))
+ self.assertEqual(sha512, self._get_attribute(out,
+ "virtualCryptSHA512"))
+
+ # gpg decryption not enabled.
+ # both virtual attributes specified, rounds specified
+ # both hashes stored in supplementalCredentials, with rounds
+ # Should get values
+ def test_no_gpg_both_hashes_rounds_stored_hashes_with_rounds(self):
+ self.add_user("CryptSHA512 " +
+ "CryptSHA256 " +
+ "CryptSHA512:rounds=5129 " +
+ "CryptSHA256:rounds=2561")
+
+ out = self._get_password("virtualCryptSHA256;rounds=2561," +
+ "virtualCryptSHA512;rounds=5129")
+
+ self.assertTrue("virtualCryptSHA256;rounds=2561:" in out)
+ self.assertTrue("virtualCryptSHA512;rounds=5129:" in out)
+ self.assertTrue("$rounds=" in out)
+
+ # Should be using the pre computed hash in supplementalCredentials
+ # so it should not change between calls.
+ sha256 = self._get_attribute(out, "virtualCryptSHA256;rounds=2561")
+ sha512 = self._get_attribute(out, "virtualCryptSHA512;rounds=5129")
+
+ out = self._get_password("virtualCryptSHA256;rounds=2561," +
+ "virtualCryptSHA512;rounds=5129")
+ self.assertEqual(sha256, self._get_attribute(out, "virtualCryptSHA256;rounds=2561"))
+ self.assertEqual(sha512, self._get_attribute(out, "virtualCryptSHA512;rounds=5129"))
+
+ # Number of rounds should match that specified
+ self.assertTrue(sha256.startswith("{CRYPT}$5$rounds=2561"))
+ self.assertTrue(sha512.startswith("{CRYPT}$6$rounds=5129"))
+
+ # gpg decryption not enabled.
+ # both virtual attributes specified, rounds specified
+ # both hashes stored in supplementalCredentials, with rounds
+ # number of rounds stored/requested do not match
+ # Should get the precomputed hashes for CryptSHA512 and CryptSHA256
+ def test_no_gpg_both_hashes_rounds_stored_hashes_with_rounds_no_match(self):
+ self.add_user("CryptSHA512 " +
+ "CryptSHA256 " +
+ "CryptSHA512:rounds=5129 " +
+ "CryptSHA256:rounds=2561")
+
+ out = self._get_password("virtualCryptSHA256;rounds=4000," +
+ "virtualCryptSHA512;rounds=5000")
+
+ self.assertTrue("virtualCryptSHA256;rounds=4000:" in out)
+ self.assertTrue("virtualCryptSHA512;rounds=5000:" in out)
+ self.assertTrue("$rounds=" not in out)
+
+ # Should be using the pre computed hash in supplementalCredentials
+ # so it should not change between calls.
+ sha256 = self._get_attribute(out, "virtualCryptSHA256;rounds=4000")
+ sha512 = self._get_attribute(out, "virtualCryptSHA512;rounds=5000")
+
+ out = self._get_password("virtualCryptSHA256;rounds=4000," +
+ "virtualCryptSHA512;rounds=5000")
+ self.assertEqual(sha256, self._get_attribute(out, "virtualCryptSHA256;rounds=4000"))
+ self.assertEqual(sha512, self._get_attribute(out, "virtualCryptSHA512;rounds=5000"))
+
+ # As the number of rounds did not match, should have returned the
+ # first hash of the corresponding scheme
+ out = self._get_password("virtualCryptSHA256," +
+ "virtualCryptSHA512")
+ self.assertEqual(sha256, self._get_attribute(out, "virtualCryptSHA256"))
+ self.assertEqual(sha512, self._get_attribute(out, "virtualCryptSHA512"))
diff --git a/python/samba/tests/samba_tool/user_wdigest.py b/python/samba/tests/samba_tool/user_wdigest.py
new file mode 100644
index 0000000..0d87762
--- /dev/null
+++ b/python/samba/tests/samba_tool/user_wdigest.py
@@ -0,0 +1,450 @@
+# Tests for the samba-tool user sub command reading Primary:WDigest
+#
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2017
+#
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import os
+import samba
+from samba.tests.samba_tool.base import SambaToolCmdTest
+from hashlib import md5
+
+
+USER_NAME = "WdigestTestUser"
+
+# Calculate the MD5 password digest from the supplied user, realm and password
+#
+
+
+def calc_digest(user, realm, password):
+ data = "%s:%s:%s" % (user, realm, password)
+ if isinstance(data, str):
+ data = data.encode('utf8')
+
+ return "%s:%s:%s" % (user, realm, md5(data).hexdigest())
+
+
+class UserCmdWdigestTestCase(SambaToolCmdTest):
+ """Tests for samba-tool user subcommands extraction of the wdigest values
+ Test results validated against Windows Server 2012 R2.
+ NOTE: That as at 22-05-2017 the values Documented at
+ 3.1.1.8.11.3.1 WDIGEST_CREDENTIALS Construction
+ are incorrect.
+ """
+ users = []
+ samdb = None
+
+ def setUp(self):
+ super().setUp()
+ self.lp = samba.tests.env_loadparm()
+ self.samdb = self.getSamDB(
+ "-H", "ldap://%s" % os.environ["DC_SERVER"],
+ "-U%s%%%s" % (os.environ["DC_USERNAME"],
+ os.environ["DC_PASSWORD"]))
+ self.dns_domain = self.samdb.domain_dns_name()
+ res = self.samdb.search(
+ base=self.samdb.get_config_basedn(),
+ expression="ncName=%s" % self.samdb.get_default_basedn(),
+ attrs=["nETBIOSName"])
+ self.netbios_domain = str(res[0]["nETBIOSName"][0])
+ self.password = self.random_password()
+ result, out, err = self.runsubcmd("user",
+ "create",
+ USER_NAME,
+ self.password)
+ self.assertCmdSuccess(result,
+ out,
+ err,
+ "Ensure user is created")
+
+ def tearDown(self):
+ super().tearDown()
+ result, out, err = self.runsubcmd("user", "delete", USER_NAME)
+ self.assertCmdSuccess(result,
+ out,
+ err,
+ "Ensure user is deleted")
+
+ def _testWDigest(self, attribute, expected, missing=False):
+
+ (result, out, err) = self.runsubcmd("user",
+ "getpassword",
+ USER_NAME,
+ "--attributes",
+ attribute)
+ self.assertCmdSuccess(result,
+ out,
+ err,
+ "Ensure getpassword runs")
+ self.assertEqual(err, "Got password OK\n", "getpassword")
+
+ if missing:
+ self.assertTrue(attribute not in out)
+ else:
+ self.assertMatch(out.replace('\n ', ''),
+ "%s: %s" % (attribute, expected))
+
+ def test_Wdigest_no_suffix(self):
+ attribute = "virtualWDigest"
+ self._testWDigest(attribute, None, True)
+
+ def test_Wdigest_non_numeric_suffix(self):
+ attribute = "virtualWDigestss"
+ self._testWDigest(attribute, None, True)
+
+ def test_Wdigest00(self):
+ attribute = "virtualWDigest00"
+ self._testWDigest(attribute, None, True)
+
+ # Hash01 MD5(sAMAccountName,
+ # NETBIOSDomainName,
+ # password)
+ #
+ def test_Wdigest01(self):
+ attribute = "virtualWDigest01"
+ expected = calc_digest(USER_NAME,
+ self.netbios_domain,
+ self.password)
+ self._testWDigest(attribute, expected)
+
+ # Hash02 MD5(LOWER(sAMAccountName),
+ # LOWER(NETBIOSDomainName),
+ # password)
+ #
+ def test_Wdigest02(self):
+ attribute = "virtualWDigest02"
+ expected = calc_digest(USER_NAME.lower(),
+ self.netbios_domain.lower(),
+ self.password)
+ self._testWDigest(attribute, expected)
+
+ # Hash03 MD5(UPPER(sAMAccountName),
+ # UPPER(NETBIOSDomainName),
+ # password)
+ #
+ def test_Wdigest03(self):
+ attribute = "virtualWDigest03"
+ expected = calc_digest(USER_NAME.upper(),
+ self.netbios_domain.upper(),
+ self.password)
+ self._testWDigest(attribute, expected)
+
+ # Hash04 MD5(sAMAccountName,
+ # UPPER(NETBIOSDomainName),
+ # password)
+ #
+ def test_Wdigest04(self):
+ attribute = "virtualWDigest04"
+ expected = calc_digest(USER_NAME,
+ self.netbios_domain.upper(),
+ self.password)
+ self._testWDigest(attribute, expected)
+
+ # Hash05 MD5(sAMAccountName,
+ # LOWER(NETBIOSDomainName),
+ # password)
+ #
+ def test_Wdigest05(self):
+ attribute = "virtualWDigest05"
+ expected = calc_digest(USER_NAME,
+ self.netbios_domain.lower(),
+ self.password)
+ self._testWDigest(attribute, expected)
+
+ # Hash06 MD5(UPPER(sAMAccountName),
+ # LOWER(NETBIOSDomainName),
+ # password)
+ #
+ def test_Wdigest06(self):
+ attribute = "virtualWDigest06"
+ expected = calc_digest(USER_NAME.upper(),
+ self.netbios_domain.lower(),
+ self.password)
+ self._testWDigest(attribute, expected)
+
+ # Hash07 MD5(LOWER(sAMAccountName),
+ # UPPER(NETBIOSDomainName),
+ # password)
+ #
+ def test_Wdigest07(self):
+ attribute = "virtualWDigest07"
+ expected = calc_digest(USER_NAME.lower(),
+ self.netbios_domain.upper(),
+ self.password)
+ self._testWDigest(attribute, expected)
+
+ # Hash08 MD5(sAMAccountName,
+ # DNSDomainName,
+ # password)
+ #
+ # Note: Samba lowercases the DNSDomainName at provision time,
+ # Windows preserves the case. This means that the WDigest08 values
+ # calculated byt Samba and Windows differ.
+ #
+ def test_Wdigest08(self):
+ attribute = "virtualWDigest08"
+ expected = calc_digest(USER_NAME,
+ self.dns_domain,
+ self.password)
+ self._testWDigest(attribute, expected)
+
+ # Hash09 MD5(LOWER(sAMAccountName),
+ # LOWER(DNSDomainName),
+ # password)
+ #
+ def test_Wdigest09(self):
+ attribute = "virtualWDigest09"
+ expected = calc_digest(USER_NAME.lower(),
+ self.dns_domain.lower(),
+ self.password)
+ self._testWDigest(attribute, expected)
+
+ # Hash10 MD5(UPPER(sAMAccountName),
+ # UPPER(DNSDomainName),
+ # password)
+ #
+ def test_Wdigest10(self):
+ attribute = "virtualWDigest10"
+ expected = calc_digest(USER_NAME.upper(),
+ self.dns_domain.upper(),
+ self.password)
+ self._testWDigest(attribute, expected)
+
+ # Hash11 MD5(sAMAccountName,
+ # UPPER(DNSDomainName),
+ # password)
+ #
+ def test_Wdigest11(self):
+ attribute = "virtualWDigest11"
+ expected = calc_digest(USER_NAME,
+ self.dns_domain.upper(),
+ self.password)
+ self._testWDigest(attribute, expected)
+
+ # Hash12 MD5(sAMAccountName,
+ # LOWER(DNSDomainName),
+ # password)
+ #
+ def test_Wdigest12(self):
+ attribute = "virtualWDigest12"
+ expected = calc_digest(USER_NAME,
+ self.dns_domain.lower(),
+ self.password)
+ self._testWDigest(attribute, expected)
+
+ # Hash13 MD5(UPPER(sAMAccountName),
+ # LOWER(DNSDomainName),
+ # password)
+ #
+ def test_Wdigest13(self):
+ attribute = "virtualWDigest13"
+ expected = calc_digest(USER_NAME.upper(),
+ self.dns_domain.lower(),
+ self.password)
+ self._testWDigest(attribute, expected)
+
+ # Hash14 MD5(LOWER(sAMAccountName),
+ # UPPER(DNSDomainName),
+ # password)
+ #
+
+ def test_Wdigest14(self):
+ attribute = "virtualWDigest14"
+ expected = calc_digest(USER_NAME.lower(),
+ self.dns_domain.upper(),
+ self.password)
+ self._testWDigest(attribute, expected)
+
+ # Hash15 MD5(userPrincipalName,
+ # password)
+ #
+ def test_Wdigest15(self):
+ attribute = "virtualWDigest15"
+ name = "%s@%s" % (USER_NAME, self.dns_domain)
+ expected = calc_digest(name,
+ "",
+ self.password)
+ self._testWDigest(attribute, expected)
+
+ # Hash16 MD5(LOWER(userPrincipalName),
+ # password)
+ #
+ def test_Wdigest16(self):
+ attribute = "virtualWDigest16"
+ name = "%s@%s" % (USER_NAME.lower(), self.dns_domain.lower())
+ expected = calc_digest(name,
+ "",
+ self.password)
+ self._testWDigest(attribute, expected)
+
+ # Hash17 MD5(UPPER(userPrincipalName),
+ # password)
+ #
+ def test_Wdigest17(self):
+ attribute = "virtualWDigest17"
+ name = "%s@%s" % (USER_NAME.upper(), self.dns_domain.upper())
+ expected = calc_digest(name,
+ "",
+ self.password)
+ self._testWDigest(attribute, expected)
+
+ # Hash18 MD5(NETBIOSDomainName\sAMAccountName,
+ # password)
+ #
+ def test_Wdigest18(self):
+ attribute = "virtualWDigest18"
+ name = "%s\\%s" % (self.netbios_domain, USER_NAME)
+ expected = calc_digest(name,
+ "",
+ self.password)
+ self._testWDigest(attribute, expected)
+
+ # Hash19 MD5(LOWER(NETBIOSDomainName\sAMAccountName),
+ # password)
+ #
+ def test_Wdigest19(self):
+ attribute = "virtualWDigest19"
+ name = "%s\\%s" % (self.netbios_domain, USER_NAME)
+ expected = calc_digest(name.lower(),
+ "",
+ self.password)
+ self._testWDigest(attribute, expected)
+
+ # Hash20 MD5(UPPER(NETBIOSDomainName\sAMAccountName),
+ # password)
+ #
+ def test_Wdigest20(self):
+ attribute = "virtualWDigest20"
+ name = "%s\\%s" % (self.netbios_domain, USER_NAME)
+ expected = calc_digest(name.upper(),
+ "",
+ self.password)
+ self._testWDigest(attribute, expected)
+
+ # Hash21 MD5(sAMAccountName,
+ # "Digest",
+ # password)
+ #
+ def test_Wdigest21(self):
+ attribute = "virtualWDigest21"
+ expected = calc_digest(USER_NAME,
+ "Digest",
+ self.password)
+ self._testWDigest(attribute, expected)
+
+ # Hash22 MD5(LOWER(sAMAccountName),
+ # "Digest",
+ # password)
+ #
+ def test_Wdigest22(self):
+ attribute = "virtualWDigest22"
+ expected = calc_digest(USER_NAME.lower(),
+ "Digest",
+ self.password)
+ self._testWDigest(attribute, expected)
+
+ # Hash23 MD5(UPPER(sAMAccountName),
+ # "Digest",
+ # password)
+ #
+ def test_Wdigest23(self):
+ attribute = "virtualWDigest23"
+ expected = calc_digest(USER_NAME.upper(),
+ "Digest",
+ self.password)
+ self._testWDigest(attribute, expected)
+
+ # Hash24 MD5(userPrincipalName),
+ # "Digest",
+ # password)
+ #
+ def test_Wdigest24(self):
+ attribute = "virtualWDigest24"
+ name = "%s@%s" % (USER_NAME, self.dns_domain)
+ expected = calc_digest(name,
+ "Digest",
+ self.password)
+ self._testWDigest(attribute, expected)
+
+ # Hash25 MD5(LOWER(userPrincipalName),
+ # "Digest",
+ # password)
+ #
+ def test_Wdigest25(self):
+ attribute = "virtualWDigest25"
+ name = "%s@%s" % (USER_NAME, self.dns_domain.lower())
+ expected = calc_digest(name.lower(),
+ "Digest",
+ self.password)
+ self._testWDigest(attribute, expected)
+
+ # Hash26 MD5(UPPER(userPrincipalName),
+ # "Digest",
+ # password)
+ #
+ def test_Wdigest26(self):
+ attribute = "virtualWDigest26"
+ name = "%s@%s" % (USER_NAME, self.dns_domain.lower())
+ expected = calc_digest(name.upper(),
+ "Digest",
+ self.password)
+ self._testWDigest(attribute, expected)
+ # Hash27 MD5(NETBIOSDomainName\sAMAccountName,
+ # "Digest",
+ # password)
+ #
+
+ def test_Wdigest27(self):
+ attribute = "virtualWDigest27"
+ name = "%s\\%s" % (self.netbios_domain, USER_NAME)
+ expected = calc_digest(name,
+ "Digest",
+ self.password)
+ self._testWDigest(attribute, expected)
+
+ # Hash28 MD5(LOWER(NETBIOSDomainName\sAMAccountName),
+ # "Digest",
+ # password)
+ #
+ def test_Wdigest28(self):
+ attribute = "virtualWDigest28"
+ name = "%s\\%s" % (self.netbios_domain.lower(), USER_NAME.lower())
+ expected = calc_digest(name,
+ "Digest",
+ self.password)
+ self._testWDigest(attribute, expected)
+
+ # Hash29 MD5(UPPER(NETBIOSDomainName\sAMAccountName),
+ # "Digest",
+ # password)
+ #
+ def test_Wdigest29(self):
+ attribute = "virtualWDigest29"
+ name = "%s\\%s" % (self.netbios_domain.upper(), USER_NAME.upper())
+ expected = calc_digest(name,
+ "Digest",
+ self.password)
+ self._testWDigest(attribute, expected)
+
+ def test_Wdigest30(self):
+ attribute = "virtualWDigest30"
+ self._testWDigest(attribute, None, True)
+
+ # Check digest calculation against an known htdigest value
+ def test_calc_digest(self):
+ htdigest = "gary:fred:2204fcc247cb47ded249ef2fe0013255"
+ digest = calc_digest("gary", "fred", "password")
+ self.assertEqual(htdigest, digest)
diff --git a/python/samba/tests/samba_tool/visualize.py b/python/samba/tests/samba_tool/visualize.py
new file mode 100644
index 0000000..f736129
--- /dev/null
+++ b/python/samba/tests/samba_tool/visualize.py
@@ -0,0 +1,618 @@
+# -*- coding: utf-8 -*-
+# Tests for samba-tool visualize
+# Copyright (C) Andrew Bartlett 2015, 2018
+#
+# by Douglas Bagnall <douglas.bagnall@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+"""Tests for samba-tool visualize ntdsconn using the test ldif
+topologies.
+
+We don't test samba-tool visualize reps here because repsTo and
+repsFrom are not replicated, and there are no actual remote servers to
+query.
+"""
+import os
+import tempfile
+import re
+from io import StringIO
+from samba.tests.samba_tool.base import SambaToolCmdTest
+from samba.kcc import ldif_import_export
+from samba.graph import COLOUR_SETS
+from samba.param import LoadParm
+
+MULTISITE_LDIF = os.path.join(os.environ['SRCDIR_ABS'],
+ "testdata/ldif-utils-test-multisite.ldif")
+
+# UNCONNECTED_LDIF is a single site, unconnected 5DC database that was
+# created using samba-tool domain join in testenv.
+UNCONNECTED_LDIF = os.path.join(os.environ['SRCDIR_ABS'],
+ "testdata/unconnected-intrasite.ldif")
+
+DOMAIN = "DC=ad,DC=samba,DC=example,DC=com"
+DN_TEMPLATE = "CN=%s,CN=Servers,CN=%s,CN=Sites,CN=Configuration," + DOMAIN
+
+MULTISITE_LDIF_DSAS = [
+ ("WIN01", "Default-First-Site-Name"),
+ ("WIN08", "Site-4"),
+ ("WIN07", "Site-4"),
+ ("WIN06", "Site-3"),
+ ("WIN09", "Site-5"),
+ ("WIN10", "Site-5"),
+ ("WIN02", "Site-2"),
+ ("WIN04", "Site-2"),
+ ("WIN03", "Site-2"),
+ ("WIN05", "Site-2"),
+]
+
+
+class StringIOThinksItIsATTY(StringIO):
+ """A StringIO that claims to be a TTY for testing --color=auto,
+ by switching the stringIO class attribute."""
+ def isatty(self):
+ return True
+
+
+def samdb_from_ldif(ldif, tempdir, lp, dsa=None, tag=''):
+ if dsa is None:
+ dsa_name = 'default-DSA'
+ else:
+ dsa_name = dsa[:5]
+ dburl = os.path.join(tempdir,
+ ("ldif-to-sambdb-%s-%s" %
+ (tag, dsa_name)))
+ samdb = ldif_import_export.ldif_to_samdb(dburl, lp, ldif,
+ forced_local_dsa=dsa)
+ return (samdb, dburl)
+
+
+def collapse_space(s, keep_empty_lines=False):
+ lines = []
+ for line in s.splitlines():
+ line = ' '.join(line.strip().split())
+ if line or keep_empty_lines:
+ lines.append(line)
+ return '\n'.join(lines)
+
+
+class SambaToolVisualizeLdif(SambaToolCmdTest):
+ def setUp(self):
+ super().setUp()
+ self.lp = LoadParm()
+ self.samdb, self.dbfile = samdb_from_ldif(MULTISITE_LDIF,
+ self.tempdir,
+ self.lp)
+ self.dburl = 'tdb://' + self.dbfile
+
+ def tearDown(self):
+ self.remove_files(self.dbfile)
+ super().tearDown()
+
+ def remove_files(self, *files):
+ for f in files:
+ self.assertTrue(f.startswith(self.tempdir))
+ os.unlink(f)
+
+ def test_colour(self):
+ """Ensure the colour output is the same as the monochrome output
+ EXCEPT for the colours, of which the monochrome one should
+ know nothing."""
+ colour_re = re.compile('\033' r'\[[\d;]+m')
+ result, monochrome, err = self.runsubcmd("visualize", "ntdsconn",
+ '-H', self.dburl,
+ '--color=no', '-S')
+ self.assertCmdSuccess(result, monochrome, err)
+ self.assertFalse(colour_re.findall(monochrome))
+
+ colour_args = [['--color=yes']]
+ colour_args += [['--color-scheme', x] for x in COLOUR_SETS
+ if x is not None]
+
+ for args in colour_args:
+ result, out, err = self.runsubcmd("visualize", "ntdsconn",
+ '-H', self.dburl,
+ '-S', *args)
+ self.assertCmdSuccess(result, out, err)
+ self.assertTrue(colour_re.search(out),
+ f"'{' '.join(args)}' should be colour")
+ uncoloured = colour_re.sub('', out)
+
+ self.assertStringsEqual(monochrome, uncoloured, strip=True)
+
+ def assert_colour(self, text, has_colour=True, monochrome=None):
+ colour_re = re.compile('\033' r'\[[\d;]+m')
+ found = colour_re.search(text)
+ if has_colour:
+ self.assertTrue(found, text)
+ else:
+ self.assertFalse(found, text)
+ if monochrome is not None:
+ uncoloured = colour_re.sub('', text)
+ self.assertStringsEqual(monochrome, uncoloured, strip=True)
+
+ def test_colour_auto_tty(self):
+ """Assert the behaviour of --colour=auto with and without
+ NO_COLOUR on a fake tty"""
+ result, monochrome, err = self.runsubcmd("visualize", "ntdsconn",
+ '-H', self.dburl,
+ '--color=no', '-S')
+ self.assertCmdSuccess(result, monochrome, err)
+ self.assert_colour(monochrome, False)
+ cls = self.__class__
+
+ try:
+ cls.stringIO = StringIOThinksItIsATTY
+ old_no_color = os.environ.pop('NO_COLOR', None)
+ # First with no NO_COLOR env var. There should be colour.
+ result, out, err = self.runsubcmd("visualize", "ntdsconn",
+ '-H', self.dburl,
+ '-S',
+ '--color=auto')
+ self.assertCmdSuccess(result, out, err)
+ self.assert_colour(out, True, monochrome)
+
+ for env, opt, is_colour in [
+ # NO_COLOR='' should be as if no NO_COLOR
+ ['', '--color=auto', True],
+ # NO_COLOR='1': we expect no colour
+ ['1', '--color=auto', False],
+ # NO_COLOR='no': we still expect no colour
+ ['no', '--color=auto', False],
+ # NO_COLOR=' ', alias for 'auto'
+ [' ', '--color=tty', False],
+ # NO_COLOR=' ', alias for 'auto'
+ [' ', '--color=if-tty', False],
+ # NO_COLOR='', alias for 'auto'
+ ['', '--color=tty', True],
+ # NO_COLOR='', alias for 'no'
+ ['', '--color=never', False],
+ # NO_COLOR='x', alias for 'yes' (--color=yes wins)
+ ['x', '--color=force', True],
+ ]:
+ os.environ['NO_COLOR'] = env
+
+ try:
+ result, out, err = self.runsubcmd("visualize", "ntdsconn",
+ '-H', self.dburl,
+ '-S',
+ opt)
+ except SystemExit:
+ # optparse makes us do this
+ self.fail(f"optparse rejects {env}, {opt}, {is_colour}")
+
+ self.assertCmdSuccess(result, out, err)
+ self.assert_colour(out, is_colour, monochrome)
+
+ # with "-o -" output filename alias for stdout.
+ result, out, err = self.runsubcmd("visualize", "ntdsconn",
+ '-H', self.dburl,
+ '-S',
+ opt,
+ '-o', '-')
+ self.assertCmdSuccess(result, out, err)
+ self.assert_colour(out, is_colour, monochrome)
+
+ finally:
+ cls.stringIO = StringIO
+ if old_no_color is None:
+ os.environ.pop('NO_COLOR', None)
+ else:
+ os.environ['NO_COLOR'] = old_no_color
+
+ def test_import_ldif_xdot(self):
+ """We can't test actual xdot, but using the environment we can
+ persuade samba-tool that a script we write is xdot and ensure
+ it gets the right text.
+ """
+ result, expected, err = self.runsubcmd("visualize", "ntdsconn",
+ '-H', self.dburl,
+ '--color=no', '-S',
+ '--dot')
+ self.assertCmdSuccess(result, expected, err)
+
+ # not that we're expecting anything here
+ old_xdot_path = os.environ.get('SAMBA_TOOL_XDOT_PATH')
+
+ tmpdir = tempfile.mkdtemp()
+ fake_xdot = os.path.join(tmpdir, 'fake_xdot')
+ content = os.path.join(tmpdir, 'content')
+ f = open(fake_xdot, 'w')
+ print('#!/bin/sh', file=f)
+ print('cp $1 %s' % content, file=f)
+ f.close()
+ os.chmod(fake_xdot, 0o700)
+
+ os.environ['SAMBA_TOOL_XDOT_PATH'] = fake_xdot
+ result, empty, err = self.runsubcmd("visualize", "ntdsconn",
+ '--importldif', MULTISITE_LDIF,
+ '--color=no', '-S',
+ '--xdot')
+
+ f = open(content)
+ xdot = f.read()
+ f.close()
+ os.remove(fake_xdot)
+ os.remove(content)
+ os.rmdir(tmpdir)
+
+ if old_xdot_path is not None:
+ os.environ['SAMBA_TOOL_XDOT_PATH'] = old_xdot_path
+ else:
+ del os.environ['SAMBA_TOOL_XDOT_PATH']
+
+ self.assertCmdSuccess(result, xdot, err)
+ self.assertStringsEqual(expected, xdot, strip=True)
+
+ def test_import_ldif(self):
+ """Make sure the samba-tool visualize --importldif option gives the
+ same output as using the externally generated db from the same
+ LDIF."""
+ result, s1, err = self.runsubcmd("visualize", "ntdsconn",
+ '-H', self.dburl,
+ '--color=no', '-S')
+ self.assertCmdSuccess(result, s1, err)
+
+ result, s2, err = self.runsubcmd("visualize", "ntdsconn",
+ '--importldif', MULTISITE_LDIF,
+ '--color=no', '-S')
+ self.assertCmdSuccess(result, s2, err)
+
+ self.assertStringsEqual(s1, s2)
+
+ def test_output_file(self):
+ """Check that writing to a file works, with and without
+ --color=auto."""
+ # NOTE, we can't really test --color=auto works with a TTY.
+ colour_re = re.compile('\033' r'\[[\d;]+m')
+ result, expected, err = self.runsubcmd("visualize", "ntdsconn",
+ '-H', self.dburl,
+ '--color=auto', '-S')
+ self.assertCmdSuccess(result, expected, err)
+ # Not a TTY, so stdout output should be colourless
+ self.assertFalse(colour_re.search(expected))
+ expected = expected.strip()
+
+ color_auto_file = os.path.join(self.tempdir, 'color-auto')
+
+ result, out, err = self.runsubcmd("visualize", "ntdsconn",
+ '-H', self.dburl,
+ '--color=auto', '-S',
+ '-o', color_auto_file)
+ self.assertCmdSuccess(result, out, err)
+ # We wrote to file, so stdout should be empty
+ self.assertEqual(out, '')
+ f = open(color_auto_file)
+ color_auto = f.read()
+ f.close()
+ self.assertStringsEqual(color_auto, expected, strip=True)
+ self.remove_files(color_auto_file)
+
+ color_no_file = os.path.join(self.tempdir, 'color-no')
+ result, out, err = self.runsubcmd("visualize", "ntdsconn",
+ '-H', self.dburl,
+ '--color=no', '-S',
+ '-o', color_no_file)
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(out, '')
+ f = open(color_no_file)
+ color_no = f.read()
+ f.close()
+ self.remove_files(color_no_file)
+
+ self.assertStringsEqual(color_no, expected, strip=True)
+
+ color_yes_file = os.path.join(self.tempdir, 'color-yes')
+ result, out, err = self.runsubcmd("visualize", "ntdsconn",
+ '-H', self.dburl,
+ '--color=yes', '-S',
+ '-o', color_yes_file)
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual(out, '')
+ f = open(color_yes_file)
+ colour_yes = f.read()
+ f.close()
+ self.assertNotEqual(colour_yes.strip(), expected)
+
+ self.remove_files(color_yes_file)
+
+ # Try the magic filename "-", meaning stdout.
+ # This doesn't exercise the case when stdout is a TTY
+ for c, equal in [('no', True), ('auto', True), ('yes', False)]:
+ result, out, err = self.runsubcmd("visualize", "ntdsconn",
+ '-H', self.dburl,
+ '--color', c,
+ '-S', '-o', '-')
+ self.assertCmdSuccess(result, out, err)
+ self.assertEqual((out.strip() == expected), equal)
+
+ def test_utf8(self):
+ """Ensure that --utf8 adds at least some expected utf-8, and that it
+ isn't there without --utf8."""
+ result, utf8, err = self.runsubcmd("visualize", "ntdsconn",
+ '-H', self.dburl,
+ '--color=no', '-S', '--utf8')
+ self.assertCmdSuccess(result, utf8, err)
+
+ result, ascii, err = self.runsubcmd("visualize", "ntdsconn",
+ '-H', self.dburl,
+ '--color=no', '-S')
+ self.assertCmdSuccess(result, ascii, err)
+ for c in ('│', '─', '╭'):
+ self.assertTrue(c in utf8, 'UTF8 should contain %s' % c)
+ self.assertTrue(c not in ascii, 'ASCII should not contain %s' % c)
+
+ def test_forced_local_dsa(self):
+ # the forced_local_dsa shouldn't make any difference, except
+ # for the title line.
+ result, target, err = self.runsubcmd("visualize", "ntdsconn",
+ '-H', self.dburl,
+ '--color=no', '-S')
+ self.assertCmdSuccess(result, target, err)
+ files = []
+ target = target.strip().split('\n', 1)[1]
+ for cn, site in MULTISITE_LDIF_DSAS:
+ dsa = DN_TEMPLATE % (cn, site)
+ samdb, dbfile = samdb_from_ldif(MULTISITE_LDIF,
+ self.tempdir,
+ self.lp, dsa,
+ tag=cn)
+
+ result, out, err = self.runsubcmd("visualize", "ntdsconn",
+ '-H', 'tdb://' + dbfile,
+ '--color=no', '-S')
+ self.assertCmdSuccess(result, out, err)
+ # Separate out the title line, which will differ in the DN.
+ title, body = out.strip().split('\n', 1)
+ self.assertStringsEqual(target, body)
+ self.assertIn(cn, title)
+ files.append(dbfile)
+ self.remove_files(*files)
+
+ def test_short_names(self):
+ """Ensure the colour ones are the same as the monochrome ones EXCEPT
+ for the colours, of which the monochrome one should know nothing"""
+ result, short, err = self.runsubcmd("visualize", "ntdsconn",
+ '-H', self.dburl,
+ '--color=no', '-S', '--no-key')
+ self.assertCmdSuccess(result, short, err)
+ result, long, err = self.runsubcmd("visualize", "ntdsconn",
+ '-H', self.dburl,
+ '--color=no', '--no-key')
+ self.assertCmdSuccess(result, long, err)
+
+ lines = short.split('\n')
+ replacements = []
+ key_lines = ['']
+ short_without_key = []
+ for line in lines:
+ m = re.match(r"'(.{1,2})' stands for '(.+)'", line)
+ if m:
+ a, b = m.groups()
+ replacements.append((len(a), a, b))
+ key_lines.append(line)
+ else:
+ short_without_key.append(line)
+
+ short = '\n'.join(short_without_key)
+ # we need to replace longest strings first
+ replacements.sort(reverse=True)
+ short2long = short
+ # we don't want to shorten the DC name in the header line.
+ long_header, long2short = long.strip().split('\n', 1)
+ for _, a, b in replacements:
+ short2long = short2long.replace(a, b)
+ long2short = long2short.replace(b, a)
+
+ long2short = '%s\n%s' % (long_header, long2short)
+
+ # The white space is going to be all wacky, so lets squish it down
+ short2long = collapse_space(short2long)
+ long2short = collapse_space(long2short)
+ short = collapse_space(short)
+ long = collapse_space(long)
+
+ self.assertStringsEqual(short2long, long, strip=True)
+ self.assertStringsEqual(short, long2short, strip=True)
+
+ def test_disconnected_ldif_with_key(self):
+ """Test that the 'unconnected' ldif shows up and exactly matches the
+ expected output."""
+ # This is not truly a disconnected graph because the
+ # vampre/local/promoted DCs are in there and they have
+ # relationships, and SERVER2 and SERVER3 for some reason refer
+ # to them.
+
+ samdb, dbfile = samdb_from_ldif(UNCONNECTED_LDIF,
+ self.tempdir,
+ self.lp, tag='disconnected')
+ dburl = 'tdb://' + dbfile
+ result, output, err = self.runsubcmd("visualize", "ntdsconn",
+ '-H', dburl,
+ '--color=no', '-S')
+ self.remove_files(dbfile)
+ self.assertCmdSuccess(result, output, err)
+ self.assertStringsEqual(output,
+ EXPECTED_DISTANCE_GRAPH_WITH_KEY)
+
+ def test_dot_ntdsconn(self):
+ """Graphviz NTDS Connection output"""
+ result, dot, err = self.runsubcmd("visualize", "ntdsconn",
+ '-H', self.dburl,
+ '--color=no', '-S', '--dot',
+ '--no-key')
+ self.assertCmdSuccess(result, dot, err)
+ self.assertStringsEqual(EXPECTED_DOT_MULTISITE_NO_KEY, dot)
+
+ def test_dot_ntdsconn_disconnected(self):
+ """Graphviz NTDS Connection output from disconnected graph"""
+ samdb, dbfile = samdb_from_ldif(UNCONNECTED_LDIF,
+ self.tempdir,
+ self.lp, tag='disconnected')
+
+ result, dot, err = self.runsubcmd("visualize", "ntdsconn",
+ '-H', 'tdb://' + dbfile,
+ '--color=no', '-S', '--dot',
+ '-o', '-')
+ self.assertCmdSuccess(result, dot, err)
+ self.remove_files(dbfile)
+ self.assertStringsEqual(EXPECTED_DOT_NTDSCONN_DISCONNECTED, dot,
+ strip=True)
+
+ def test_dot_ntdsconn_disconnected_to_file(self):
+ """Graphviz NTDS Connection output into a file"""
+ samdb, dbfile = samdb_from_ldif(UNCONNECTED_LDIF,
+ self.tempdir,
+ self.lp, tag='disconnected')
+
+ dot_file = os.path.join(self.tempdir, 'dotfile')
+
+ result, dot, err = self.runsubcmd("visualize", "ntdsconn",
+ '-H', 'tdb://' + dbfile,
+ '--color=no', '-S', '--dot',
+ '-o', dot_file)
+ self.assertCmdSuccess(result, dot, err)
+ f = open(dot_file)
+ dot = f.read()
+ f.close()
+ self.assertStringsEqual(EXPECTED_DOT_NTDSCONN_DISCONNECTED, dot)
+
+ self.remove_files(dbfile, dot_file)
+
+
+EXPECTED_DOT_MULTISITE_NO_KEY = r"""/* generated by samba */
+digraph A_samba_tool_production {
+label="NTDS Connections known to CN=WIN01,CN=Servers,CN=Default-First-Site-Name,CN=Sites,CN=Configuration,DC=ad,DC=samba,DC=example,DC=com";
+fontsize=10;
+
+node[fontname=Helvetica; fontsize=10];
+
+"CN=NTDS Settings,\nCN=WIN01,\nCN=Servers,\nCN=Default-\nFirst-Site-Name,\n...";
+"CN=NTDS Settings,\nCN=WIN02,\nCN=Servers,\nCN=Site-2,\n...";
+"CN=NTDS Settings,\nCN=WIN03,\nCN=Servers,\nCN=Site-2,\n...";
+"CN=NTDS Settings,\nCN=WIN04,\nCN=Servers,\nCN=Site-2,\n...";
+"CN=NTDS Settings,\nCN=WIN05,\nCN=Servers,\nCN=Site-2,\n...";
+"CN=NTDS Settings,\nCN=WIN06,\nCN=Servers,\nCN=Site-3,\n...";
+"CN=NTDS Settings,\nCN=WIN07,\nCN=Servers,\nCN=Site-4,\n...";
+"CN=NTDS Settings,\nCN=WIN08,\nCN=Servers,\nCN=Site-4,\n...";
+"CN=NTDS Settings,\nCN=WIN09,\nCN=Servers,\nCN=Site-5,\n...";
+"CN=NTDS Settings,\nCN=WIN10,\nCN=Servers,\nCN=Site-5,\n...";
+"CN=NTDS Settings,\nCN=WIN01,\nCN=Servers,\nCN=Default-\nFirst-Site-Name,\n..." -> "CN=NTDS Settings,\nCN=WIN03,\nCN=Servers,\nCN=Site-2,\n..." [color="#000000", ];
+"CN=NTDS Settings,\nCN=WIN01,\nCN=Servers,\nCN=Default-\nFirst-Site-Name,\n..." -> "CN=NTDS Settings,\nCN=WIN06,\nCN=Servers,\nCN=Site-3,\n..." [color="#000000", ];
+"CN=NTDS Settings,\nCN=WIN01,\nCN=Servers,\nCN=Default-\nFirst-Site-Name,\n..." -> "CN=NTDS Settings,\nCN=WIN07,\nCN=Servers,\nCN=Site-4,\n..." [color="#000000", ];
+"CN=NTDS Settings,\nCN=WIN01,\nCN=Servers,\nCN=Default-\nFirst-Site-Name,\n..." -> "CN=NTDS Settings,\nCN=WIN08,\nCN=Servers,\nCN=Site-4,\n..." [color="#000000", ];
+"CN=NTDS Settings,\nCN=WIN01,\nCN=Servers,\nCN=Default-\nFirst-Site-Name,\n..." -> "CN=NTDS Settings,\nCN=WIN10,\nCN=Servers,\nCN=Site-5,\n..." [color="#000000", ];
+"CN=NTDS Settings,\nCN=WIN02,\nCN=Servers,\nCN=Site-2,\n..." -> "CN=NTDS Settings,\nCN=WIN04,\nCN=Servers,\nCN=Site-2,\n..." [color="#000000", ];
+"CN=NTDS Settings,\nCN=WIN02,\nCN=Servers,\nCN=Site-2,\n..." -> "CN=NTDS Settings,\nCN=WIN05,\nCN=Servers,\nCN=Site-2,\n..." [color="#000000", ];
+"CN=NTDS Settings,\nCN=WIN03,\nCN=Servers,\nCN=Site-2,\n..." -> "CN=NTDS Settings,\nCN=WIN04,\nCN=Servers,\nCN=Site-2,\n..." [color="#000000", ];
+"CN=NTDS Settings,\nCN=WIN03,\nCN=Servers,\nCN=Site-2,\n..." -> "CN=NTDS Settings,\nCN=WIN05,\nCN=Servers,\nCN=Site-2,\n..." [color="#000000", ];
+"CN=NTDS Settings,\nCN=WIN04,\nCN=Servers,\nCN=Site-2,\n..." -> "CN=NTDS Settings,\nCN=WIN01,\nCN=Servers,\nCN=Default-\nFirst-Site-Name,\n..." [color="#000000", ];
+"CN=NTDS Settings,\nCN=WIN04,\nCN=Servers,\nCN=Site-2,\n..." -> "CN=NTDS Settings,\nCN=WIN02,\nCN=Servers,\nCN=Site-2,\n..." [color="#000000", ];
+"CN=NTDS Settings,\nCN=WIN04,\nCN=Servers,\nCN=Site-2,\n..." -> "CN=NTDS Settings,\nCN=WIN03,\nCN=Servers,\nCN=Site-2,\n..." [color="#000000", ];
+"CN=NTDS Settings,\nCN=WIN05,\nCN=Servers,\nCN=Site-2,\n..." -> "CN=NTDS Settings,\nCN=WIN02,\nCN=Servers,\nCN=Site-2,\n..." [color="#000000", ];
+"CN=NTDS Settings,\nCN=WIN05,\nCN=Servers,\nCN=Site-2,\n..." -> "CN=NTDS Settings,\nCN=WIN03,\nCN=Servers,\nCN=Site-2,\n..." [color="#000000", ];
+"CN=NTDS Settings,\nCN=WIN07,\nCN=Servers,\nCN=Site-4,\n..." -> "CN=NTDS Settings,\nCN=WIN01,\nCN=Servers,\nCN=Default-\nFirst-Site-Name,\n..." [color="#000000", ];
+"CN=NTDS Settings,\nCN=WIN09,\nCN=Servers,\nCN=Site-5,\n..." -> "CN=NTDS Settings,\nCN=WIN10,\nCN=Servers,\nCN=Site-5,\n..." [color="#000000", ];
+"CN=NTDS Settings,\nCN=WIN10,\nCN=Servers,\nCN=Site-5,\n..." -> "CN=NTDS Settings,\nCN=WIN01,\nCN=Servers,\nCN=Default-\nFirst-Site-Name,\n..." [color="#000000", ];
+"CN=NTDS Settings,\nCN=WIN10,\nCN=Servers,\nCN=Site-5,\n..." -> "CN=NTDS Settings,\nCN=WIN09,\nCN=Servers,\nCN=Site-5,\n..." [color="#000000", ];
+}
+
+"""
+
+
+EXPECTED_DOT_NTDSCONN_DISCONNECTED = r"""/* generated by samba */
+digraph A_samba_tool_production {
+label="NTDS Connections known to CN=LOCALDC,CN=Servers,CN=Default-First-Site-Name,CN=Sites,CN=Configuration,DC=samba,DC=example,DC=com";
+fontsize=10;
+
+node[fontname=Helvetica; fontsize=10];
+
+"CN=NTDS Settings,\nCN=CLIENT,\n...";
+"CN=NTDS Settings,\nCN=LOCALDC,\n...";
+"CN=NTDS Settings,\nCN=PROMOTEDVDC,\n...";
+"CN=NTDS Settings,\nCN=SERVER1,\n...";
+"CN=NTDS Settings,\nCN=SERVER2,\n...";
+"CN=NTDS Settings,\nCN=SERVER3,\n...";
+"CN=NTDS Settings,\nCN=SERVER4,\n...";
+"CN=NTDS Settings,\nCN=SERVER5,\n...";
+"CN=NTDS Settings,\nCN=LOCALDC,\n..." -> "CN=NTDS Settings,\nCN=PROMOTEDVDC,\n..." [color="#000000", ];
+"CN=NTDS Settings,\nCN=PROMOTEDVDC,\n..." -> "CN=NTDS Settings,\nCN=LOCALDC,\n..." [color="#000000", ];
+"CN=NTDS Settings,\nCN=SERVER2,\n..." -> "CN=NTDS Settings,\nCN=PROMOTEDVDC,\n..." [color="#000000", ];
+"CN=NTDS Settings,\nCN=SERVER3,\n..." -> "CN=NTDS Settings,\nCN=LOCALDC,\n..." [color="#000000", ];
+subgraph cluster_key {
+label="Key";
+subgraph cluster_key_nodes {
+label="";
+color = "invis";
+
+}
+subgraph cluster_key_edges {
+label="";
+color = "invis";
+subgraph cluster_key_0_ {
+key_0_e1[label=src; color="#000000"; group="key_0__g"]
+key_0_e2[label=dest; color="#000000"; group="key_0__g"]
+key_0_e1 -> key_0_e2 [constraint = false; color="#000000"]
+key_0__label[shape=plaintext; style=solid; width=2.000000; label="NTDS Connection\r"]
+}
+{key_0__label}
+}
+
+elision0[shape=plaintext; style=solid; label="\“...” means “CN=Servers,CN=Default-First-Site-Name,CN=Sites,CN=Configuration,DC=samba,DC=example,DC=com”\r"]
+
+}
+"CN=NTDS Settings,\nCN=CLIENT,\n..." -> key_0__label [style=invis];
+"CN=NTDS Settings,\nCN=LOCALDC,\n..." -> key_0__label [style=invis];
+"CN=NTDS Settings,\nCN=PROMOTEDVDC,\n..." -> key_0__label [style=invis];
+"CN=NTDS Settings,\nCN=SERVER1,\n..." -> key_0__label [style=invis];
+"CN=NTDS Settings,\nCN=SERVER2,\n..." -> key_0__label [style=invis];
+"CN=NTDS Settings,\nCN=SERVER3,\n..." -> key_0__label [style=invis];
+"CN=NTDS Settings,\nCN=SERVER4,\n..." -> key_0__label [style=invis];
+"CN=NTDS Settings,\nCN=SERVER5,\n..." -> key_0__label [style=invis]
+key_0__label -> elision0 [style=invis; weight=9]
+
+}
+"""
+
+EXPECTED_DISTANCE_GRAPH_WITH_KEY = """
+NTDS Connections known to CN=LOCALDC,CN=Servers,CN=Default-First-Site-Name,CN=Sites,CN=Configuration,DC=samba,DC=example,DC=com
+
+ destination
+ ,-------- *,CN=CLIENT+
+ |,------- *,CN=LOCALDC+
+ ||,------ *,CN=PROMOTEDVDC+
+ |||,----- *,CN=SERVER1+
+ ||||,---- *,CN=SERVER2+
+ |||||,--- *,CN=SERVER3+
+ ||||||,-- *,CN=SERVER4+
+ source |||||||,- *,CN=SERVER5+
+ *,CN=CLIENT+ 0-------
+ *,CN=LOCALDC+ -01-----
+*,CN=PROMOTEDVDC+ -10-----
+ *,CN=SERVER1+ ---0----
+ *,CN=SERVER2+ -21-0---
+ *,CN=SERVER3+ -12--0--
+ *,CN=SERVER4+ ------0-
+ *,CN=SERVER5+ -------0
+
+'*' stands for 'CN=NTDS Settings'
+'+' stands for ',CN=Servers,CN=Default-First-Site-Name,CN=Sites,CN=Configuration,DC=samba,DC=example,DC=com'
+
+Data can get from source to destination in the indicated number of steps.
+0 means zero steps (it is the same DC)
+1 means a direct link
+2 means a transitive link involving two steps (i.e. one intermediate DC)
+- means there is no connection, even through other DCs
+
+"""
diff --git a/python/samba/tests/samba_tool/visualize_drs.py b/python/samba/tests/samba_tool/visualize_drs.py
new file mode 100644
index 0000000..64b2cdb
--- /dev/null
+++ b/python/samba/tests/samba_tool/visualize_drs.py
@@ -0,0 +1,636 @@
+# -*- coding: utf-8 -*-
+# Originally based on tests for samba.kcc.ldif_import_export.
+# Copyright (C) Andrew Bartlett 2015, 2018
+#
+# by Douglas Bagnall <douglas.bagnall@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+"""Tests for samba-tool visualize using the vampire DC and promoted DC
+environments. For most tests we assume we can't assert much about what
+state they are in, so we mainly check for command failure, but for
+others we try to grasp control of replication and make more specific
+assertions.
+"""
+
+import os
+import re
+import json
+import random
+import subprocess
+from samba.tests.samba_tool.base import SambaToolCmdTest
+
+VERBOSE = False
+
+ENV_DSAS = {
+ 'promoted_dc': ['CN=PROMOTEDVDC,CN=Servers,CN=Default-First-Site-Name,CN=Sites,CN=Configuration,DC=samba,DC=example,DC=com',
+ 'CN=LOCALDC,CN=Servers,CN=Default-First-Site-Name,CN=Sites,CN=Configuration,DC=samba,DC=example,DC=com'],
+ 'vampire_dc': ['CN=LOCALDC,CN=Servers,CN=Default-First-Site-Name,CN=Sites,CN=Configuration,DC=samba,DC=example,DC=com',
+ 'CN=LOCALVAMPIREDC,CN=Servers,CN=Default-First-Site-Name,CN=Sites,CN=Configuration,DC=samba,DC=example,DC=com'],
+}
+
+PARTITION_NAMES = [
+ "DOMAIN",
+ "CONFIGURATION",
+ "SCHEMA",
+ "DNSDOMAIN",
+ "DNSFOREST",
+]
+
+def adjust_cmd_for_py_version(parts):
+ if os.getenv("PYTHON", None):
+ parts.insert(0, os.environ["PYTHON"])
+ return parts
+
+def set_auto_replication(dc, allow):
+ credstring = '-U%s%%%s' % (os.environ["USERNAME"], os.environ["PASSWORD"])
+ on_or_off = '-' if allow else '+'
+
+ for opt in ['DISABLE_INBOUND_REPL',
+ 'DISABLE_OUTBOUND_REPL']:
+ cmd = adjust_cmd_for_py_version(['bin/samba-tool',
+ 'drs', 'options',
+ credstring, dc,
+ "--dsa-option=%s%s" % (on_or_off, opt)])
+
+ subprocess.check_call(cmd)
+
+
+def force_replication(src, dest, base):
+ credstring = '-U%s%%%s' % (os.environ["USERNAME"], os.environ["PASSWORD"])
+ cmd = adjust_cmd_for_py_version(['bin/samba-tool',
+ 'drs', 'replicate',
+ dest, src, base,
+ credstring,
+ '--sync-forced'])
+
+ subprocess.check_call(cmd)
+
+
+def get_utf8_matrix(s):
+ # parse the graphical table *just* well enough for our tests
+ # decolourise first
+ s = re.sub("\033" r"\[[^m]+m", '', s)
+ lines = s.split('\n')
+ # matrix rows have '·' on the diagonal
+ rows = [x.strip().replace('·', '0') for x in lines if '·' in x]
+ names = []
+ values = []
+ for r in rows:
+ parts = r.rsplit(None, len(rows))
+ k, v = parts[0], parts[1:]
+ # we want the FOO in 'CN=FOO+' or 'CN=FOO,CN=x,DC=...'
+ k = re.match(r'cn=([^+,]+)', k.lower()).group(1)
+ names.append(k)
+ if len(v) == 1: # this is a single-digit matrix, no spaces
+ v = list(v[0])
+ values.append([int(x) if x.isdigit() else 1e999 for x in v])
+
+ d = {}
+ for n1, row in zip(names, values):
+ d[n1] = {}
+ for n2, v in zip(names, row):
+ d[n1][n2] = v
+
+ return d
+
+
+class SambaToolVisualizeDrsTest(SambaToolCmdTest):
+
+ def test_ntdsconn(self):
+ server = "ldap://%s" % os.environ["SERVER"]
+ creds = "%s%%%s" % (os.environ["USERNAME"], os.environ["PASSWORD"])
+ (result, out, err) = self.runsubcmd("visualize", "ntdsconn",
+ '-H', server,
+ '-U', creds,
+ '--color=no', '-S')
+ self.assertCmdSuccess(result, out, err)
+
+ def test_ntdsconn_remote(self):
+ server = "ldap://%s" % os.environ["SERVER"]
+ creds = "%s%%%s" % (os.environ["USERNAME"], os.environ["PASSWORD"])
+ (result, out, err) = self.runsubcmd("visualize", "ntdsconn",
+ '-H', server,
+ '-U', creds,
+ '--color=no', '-S', '-r')
+ self.assertCmdSuccess(result, out, err)
+
+ def test_reps(self):
+ server = "ldap://%s" % os.environ["SERVER"]
+ creds = "%s%%%s" % (os.environ["USERNAME"], os.environ["PASSWORD"])
+ (result, out, err) = self.runsubcmd("visualize", "reps",
+ '-H', server,
+ '-U', creds,
+ '--color=no', '-S')
+ self.assertCmdSuccess(result, out, err)
+
+ def test_uptodateness_all_partitions(self):
+ creds = "%s%%%s" % (os.environ["USERNAME"], os.environ["PASSWORD"])
+ dc1 = os.environ["SERVER"]
+ dc2 = os.environ["DC_SERVER"]
+ # We will check that the visualisation works for the two
+ # stopped DCs, but we can't make assertions that the output
+ # will be the same because there may be replication between
+ # the two calls. Stopping the replication on these ones is not
+ # enough because there are other DCs about.
+ (result, out, err) = self.runsubcmd("visualize", "uptodateness",
+ "-r",
+ '-H', "ldap://%s" % dc1,
+ '-U', creds,
+ '--color=no', '-S')
+ self.assertCmdSuccess(result, out, err)
+
+ (result, out, err) = self.runsubcmd("visualize", "uptodateness",
+ "-r",
+ '-H', "ldap://%s" % dc2,
+ '-U', creds,
+ '--color=no', '-S')
+ self.assertCmdSuccess(result, out, err)
+
+ def test_uptodateness_partitions(self):
+ creds = "%s%%%s" % (os.environ["USERNAME"], os.environ["PASSWORD"])
+ dc1 = os.environ["SERVER"]
+ for part in PARTITION_NAMES:
+ (result, out, err) = self.runsubcmd("visualize", "uptodateness",
+ "-r",
+ '-H', "ldap://%s" % dc1,
+ '-U', creds,
+ '--color=no', '-S',
+ '--partition', part)
+ self.assertCmdSuccess(result, out, err)
+
+ def test_drs_uptodateness(self):
+ """
+ Test cmd `drs uptodateness`
+
+ It should print info like this:
+
+ DNSDOMAIN failure: 4 median: 1.5 maximum: 2
+ SCHEMA failure: 4 median: 220.0 maximum: 439
+ DOMAIN failure: 1 median: 25 maximum: 25
+ CONFIGURATION failure: 1 median: 25 maximum: 25
+ DNSFOREST failure: 4 median: 1.5 maximum: 2
+
+ """
+ creds = "%s%%%s" % (os.environ["USERNAME"], os.environ["PASSWORD"])
+ dc1 = os.environ["SERVER"]
+ dc2 = os.environ["DC_SERVER"]
+ for dc in [dc1, dc2]:
+ (result, out, err) = self.runsubcmd("drs", "uptodateness",
+ '-H', "ldap://%s" % dc,
+ '-U', creds)
+ self.assertCmdSuccess(result, out, err)
+ # each partition name should be in output
+ for part_name in PARTITION_NAMES:
+ self.assertIn(part_name, out, msg=out)
+
+ for line in out.splitlines():
+ # check keyword in output
+ for attr in ['maximum', 'median', 'failure']:
+ self.assertIn(attr, line)
+
+ def test_drs_uptodateness_partition(self):
+ """
+ Test cmd `drs uptodateness --partition DOMAIN`
+
+ It should print info like this:
+
+ DOMAIN failure: 1 median: 25 maximum: 25
+
+ """
+ creds = "%s%%%s" % (os.environ["USERNAME"], os.environ["PASSWORD"])
+ dc1 = os.environ["SERVER"]
+ dc2 = os.environ["DC_SERVER"]
+ for dc in [dc1, dc2]:
+ (result, out, err) = self.runsubcmd("drs", "uptodateness",
+ '-H', "ldap://%s" % dc,
+ '-U', creds,
+ '--partition', 'DOMAIN')
+ self.assertCmdSuccess(result, out, err)
+
+ lines = out.splitlines()
+ self.assertEqual(len(lines), 1)
+
+ line = lines[0]
+ self.assertTrue(line.startswith('DOMAIN'))
+
+ def test_drs_uptodateness_json(self):
+ """
+ Test cmd `drs uptodateness --json`
+
+ Example output:
+
+ {
+ "DNSDOMAIN": {
+ "failure": 0,
+ "median": 0.0,
+ "maximum": 0
+ },
+ ...
+ "SCHEMA": {
+ "failure": 0,
+ "median": 0.0,
+ "maximum": 0
+ }
+ }
+ """
+ creds = "%s%%%s" % (os.environ["USERNAME"], os.environ["PASSWORD"])
+ dc1 = os.environ["SERVER"]
+ dc2 = os.environ["DC_SERVER"]
+ for dc in [dc1, dc2]:
+ (result, out, err) = self.runsubcmd("drs", "uptodateness",
+ '-H', "ldap://%s" % dc,
+ '-U', creds,
+ '--json')
+ self.assertCmdSuccess(result, out, err)
+ # should be json format
+ obj = json.loads(out)
+ # each partition name should be in json obj
+ for part_name in PARTITION_NAMES:
+ self.assertIn(part_name, obj)
+ summary_obj = obj[part_name]
+ for attr in ['maximum', 'median', 'failure']:
+ self.assertIn(attr, summary_obj)
+
+ def test_drs_uptodateness_json_median(self):
+ """
+ Test cmd `drs uptodateness --json --median`
+
+ drs uptodateness --json --median
+
+ {
+ "DNSDOMAIN": {
+ "median": 0.0
+ },
+ ...
+ "SCHEMA": {
+ "median": 0.0
+ }
+ }
+ """
+ creds = "%s%%%s" % (os.environ["USERNAME"], os.environ["PASSWORD"])
+ dc1 = os.environ["SERVER"]
+ dc2 = os.environ["DC_SERVER"]
+ for dc in [dc1, dc2]:
+ (result, out, err) = self.runsubcmd("drs", "uptodateness",
+ '-H', "ldap://%s" % dc,
+ '-U', creds,
+ '--json', '--median')
+ self.assertCmdSuccess(result, out, err)
+ # should be json format
+ obj = json.loads(out)
+ # each partition name should be in json obj
+ for part_name in PARTITION_NAMES:
+ self.assertIn(part_name, obj)
+ summary_obj = obj[part_name]
+ self.assertIn('median', summary_obj)
+ self.assertNotIn('maximum', summary_obj)
+ self.assertNotIn('failure', summary_obj)
+
+ def assert_matrix_validity(self, matrix, dcs=()):
+ for dc in dcs:
+ self.assertIn(dc, matrix)
+ for k, row in matrix.items():
+ self.assertEqual(row[k], 0)
+
+ def test_uptodateness_stop_replication_domain(self):
+ creds = "%s%%%s" % (os.environ["USERNAME"], os.environ["PASSWORD"])
+ dc1 = os.environ["SERVER"]
+ dc2 = os.environ["DC_SERVER"]
+ self.addCleanup(set_auto_replication, dc1, True)
+ self.addCleanup(set_auto_replication, dc2, True)
+
+ def display(heading, out):
+ if VERBOSE:
+ print("========", heading, "=========")
+ print(out)
+
+ samdb1 = self.getSamDB("-H", "ldap://%s" % dc1, "-U", creds)
+ samdb2 = self.getSamDB("-H", "ldap://%s" % dc2, "-U", creds)
+
+ domain_dn = samdb1.domain_dn()
+ self.assertTrue(domain_dn == samdb2.domain_dn(),
+ "We expected the same domain_dn across DCs")
+
+ ou1 = "OU=dc1.%x,%s" % (random.randrange(1 << 64), domain_dn)
+ ou2 = "OU=dc2.%x,%s" % (random.randrange(1 << 64), domain_dn)
+ samdb1.add({
+ "dn": ou1,
+ "objectclass": "organizationalUnit"
+ })
+ samdb2.add({
+ "dn": ou2,
+ "objectclass": "organizationalUnit"
+ })
+
+ set_auto_replication(dc1, False)
+ (result, out, err) = self.runsubcmd("visualize", "uptodateness",
+ "-r",
+ '-H', "ldap://%s" % dc1,
+ '-U', creds,
+ '--color=yes',
+ '--utf8', '-S',
+ '--partition', 'DOMAIN')
+ display("dc1 replication is now off", out)
+ self.assertCmdSuccess(result, out, err)
+ matrix = get_utf8_matrix(out)
+ self.assert_matrix_validity(matrix, [dc1, dc2])
+
+ force_replication(dc2, dc1, domain_dn)
+ (result, out, err) = self.runsubcmd("visualize", "uptodateness",
+ "-r",
+ '-H', "ldap://%s" % dc1,
+ '-U', creds,
+ '--color=yes',
+ '--utf8', '-S',
+ '--partition', 'DOMAIN')
+ display("forced replication %s -> %s" % (dc2, dc1), out)
+ self.assertCmdSuccess(result, out, err)
+ matrix = get_utf8_matrix(out)
+ self.assert_matrix_validity(matrix, [dc1, dc2])
+ self.assertEqual(matrix[dc1][dc2], 0)
+
+ force_replication(dc1, dc2, domain_dn)
+ (result, out, err) = self.runsubcmd("visualize", "uptodateness",
+ "-r",
+ '-H', "ldap://%s" % dc1,
+ '-U', creds,
+ '--color=yes',
+ '--utf8', '-S',
+ '--partition', 'DOMAIN')
+ display("forced replication %s -> %s" % (dc2, dc1), out)
+ self.assertCmdSuccess(result, out, err)
+ matrix = get_utf8_matrix(out)
+ self.assert_matrix_validity(matrix, [dc1, dc2])
+ self.assertEqual(matrix[dc2][dc1], 0)
+
+ dn1 = 'cn=u1.%%d,%s' % (ou1)
+ dn2 = 'cn=u2.%%d,%s' % (ou2)
+
+ for i in range(10):
+ samdb1.add({
+ "dn": dn1 % i,
+ "objectclass": "user"
+ })
+
+ (result, out, err) = self.runsubcmd("visualize", "uptodateness",
+ "-r",
+ '-H', "ldap://%s" % dc1,
+ '-U', creds,
+ '--color=yes',
+ '--utf8', '-S',
+ '--partition', 'DOMAIN')
+ display("added 10 users on %s" % dc1, out)
+ self.assertCmdSuccess(result, out, err)
+ matrix = get_utf8_matrix(out)
+ self.assert_matrix_validity(matrix, [dc1, dc2])
+ # dc2's view of dc1 should now be 10 changes out of date
+ self.assertEqual(matrix[dc2][dc1], 10)
+
+ for i in range(10):
+ samdb2.add({
+ "dn": dn2 % i,
+ "objectclass": "user"
+ })
+
+ (result, out, err) = self.runsubcmd("visualize", "uptodateness",
+ "-r",
+ '-H', "ldap://%s" % dc1,
+ '-U', creds,
+ '--color=yes',
+ '--utf8', '-S',
+ '--partition', 'DOMAIN')
+ display("added 10 users on %s" % dc2, out)
+ self.assertCmdSuccess(result, out, err)
+ matrix = get_utf8_matrix(out)
+ self.assert_matrix_validity(matrix, [dc1, dc2])
+ # dc1's view of dc2 is probably 11 changes out of date
+ self.assertGreaterEqual(matrix[dc1][dc2], 10)
+
+ for i in range(10, 101):
+ samdb1.add({
+ "dn": dn1 % i,
+ "objectclass": "user"
+ })
+ samdb2.add({
+ "dn": dn2 % i,
+ "objectclass": "user"
+ })
+
+ (result, out, err) = self.runsubcmd("visualize", "uptodateness",
+ "-r",
+ '-H', "ldap://%s" % dc1,
+ '-U', creds,
+ '--color=yes',
+ '--utf8', '-S',
+ '--partition', 'DOMAIN')
+ display("added 91 users on both", out)
+ self.assertCmdSuccess(result, out, err)
+ matrix = get_utf8_matrix(out)
+ self.assert_matrix_validity(matrix, [dc1, dc2])
+ # the difference here should be ~101.
+ self.assertGreaterEqual(matrix[dc1][dc2], 100)
+ self.assertGreaterEqual(matrix[dc2][dc1], 100)
+
+ (result, out, err) = self.runsubcmd("visualize", "uptodateness",
+ "-r",
+ '-H', "ldap://%s" % dc1,
+ '-U', creds,
+ '--color=yes',
+ '--utf8', '-S',
+ '--partition', 'DOMAIN',
+ '--max-digits', '2')
+ display("with --max-digits 2", out)
+ self.assertCmdSuccess(result, out, err)
+ matrix = get_utf8_matrix(out)
+ self.assert_matrix_validity(matrix, [dc1, dc2])
+ # visualising with 2 digits mean these overflow into infinity
+ self.assertGreaterEqual(matrix[dc1][dc2], 1e99)
+ self.assertGreaterEqual(matrix[dc2][dc1], 1e99)
+
+ (result, out, err) = self.runsubcmd("visualize", "uptodateness",
+ "-r",
+ '-H', "ldap://%s" % dc1,
+ '-U', creds,
+ '--color=yes',
+ '--utf8', '-S',
+ '--partition', 'DOMAIN',
+ '--max-digits', '1')
+ display("with --max-digits 1", out)
+ self.assertCmdSuccess(result, out, err)
+ matrix = get_utf8_matrix(out)
+ self.assert_matrix_validity(matrix, [dc1, dc2])
+ # visualising with 1 digit means these overflow into infinity
+ self.assertGreaterEqual(matrix[dc1][dc2], 1e99)
+ self.assertGreaterEqual(matrix[dc2][dc1], 1e99)
+
+ force_replication(dc2, dc1, samdb1.domain_dn())
+ (result, out, err) = self.runsubcmd("visualize", "uptodateness",
+ "-r",
+ '-H', "ldap://%s" % dc1,
+ '-U', creds,
+ '--color=yes',
+ '--utf8', '-S',
+ '--partition', 'DOMAIN')
+
+ display("forced replication %s -> %s" % (dc2, dc1), out)
+ self.assertCmdSuccess(result, out, err)
+ matrix = get_utf8_matrix(out)
+ self.assert_matrix_validity(matrix, [dc1, dc2])
+ self.assertEqual(matrix[dc1][dc2], 0)
+
+ force_replication(dc1, dc2, samdb2.domain_dn())
+ (result, out, err) = self.runsubcmd("visualize", "uptodateness",
+ "-r",
+ '-H', "ldap://%s" % dc1,
+ '-U', creds,
+ '--color=yes',
+ '--utf8', '-S',
+ '--partition', 'DOMAIN')
+
+ display("forced replication %s -> %s" % (dc1, dc2), out)
+ self.assertCmdSuccess(result, out, err)
+ matrix = get_utf8_matrix(out)
+ self.assert_matrix_validity(matrix, [dc1, dc2])
+ self.assertEqual(matrix[dc2][dc1], 0)
+
+ samdb1.delete(ou1, ['tree_delete:1'])
+ samdb2.delete(ou2, ['tree_delete:1'])
+
+ (result, out, err) = self.runsubcmd("visualize", "uptodateness",
+ "-r",
+ '-H', "ldap://%s" % dc1,
+ '-U', creds,
+ '--color=yes',
+ '--utf8', '-S',
+ '--partition', 'DOMAIN')
+ display("tree delete both ous on %s" % (dc1,), out)
+ self.assertCmdSuccess(result, out, err)
+ matrix = get_utf8_matrix(out)
+ self.assert_matrix_validity(matrix, [dc1, dc2])
+ self.assertGreaterEqual(matrix[dc1][dc2], 100)
+ self.assertGreaterEqual(matrix[dc2][dc1], 100)
+
+ set_auto_replication(dc1, True)
+ (result, out, err) = self.runsubcmd("visualize", "uptodateness",
+ "-r",
+ '-H', "ldap://%s" % dc1,
+ '-U', creds,
+ '--color=yes',
+ '--utf8', '-S',
+ '--partition', 'DOMAIN')
+ display("replication is now on", out)
+ self.assertCmdSuccess(result, out, err)
+ matrix = get_utf8_matrix(out)
+ self.assert_matrix_validity(matrix, [dc1, dc2])
+ # We can't assert actual values after this because
+ # auto-replication is on and things will change underneath us.
+
+ (result, out, err) = self.runsubcmd("visualize", "uptodateness",
+ "-r",
+ '-H', "ldap://%s" % dc2,
+ '-U', creds,
+ '--color=yes',
+ '--utf8', '-S',
+ '--partition', 'DOMAIN')
+
+ display("%s's view" % dc2, out)
+ self.assertCmdSuccess(result, out, err)
+ matrix = get_utf8_matrix(out)
+ self.assert_matrix_validity(matrix, [dc1, dc2])
+
+ force_replication(dc1, dc2, samdb2.domain_dn())
+ (result, out, err) = self.runsubcmd("visualize", "uptodateness",
+ "-r",
+ '-H', "ldap://%s" % dc1,
+ '-U', creds,
+ '--color=yes',
+ '--utf8', '-S',
+ '--partition', 'DOMAIN')
+
+ display("forced replication %s -> %s" % (dc1, dc2), out)
+ self.assertCmdSuccess(result, out, err)
+ matrix = get_utf8_matrix(out)
+ self.assert_matrix_validity(matrix, [dc1, dc2])
+
+ force_replication(dc2, dc1, samdb2.domain_dn())
+ (result, out, err) = self.runsubcmd("visualize", "uptodateness",
+ "-r",
+ '-H', "ldap://%s" % dc1,
+ '-U', creds,
+ '--color=yes',
+ '--utf8', '-S',
+ '--partition', 'DOMAIN')
+ display("forced replication %s -> %s" % (dc2, dc1), out)
+ self.assertCmdSuccess(result, out, err)
+ matrix = get_utf8_matrix(out)
+ self.assert_matrix_validity(matrix, [dc1, dc2])
+
+ (result, out, err) = self.runsubcmd("visualize", "uptodateness",
+ "-r",
+ '-H', "ldap://%s" % dc2,
+ '-U', creds,
+ '--color=yes',
+ '--utf8', '-S',
+ '--partition', 'DOMAIN')
+ display("%s's view" % dc2, out)
+
+ self.assertCmdSuccess(result, out, err)
+ matrix = get_utf8_matrix(out)
+ self.assert_matrix_validity(matrix, [dc1, dc2])
+
+ def test_reps_remote(self):
+ server = "ldap://%s" % os.environ["SERVER"]
+ creds = "%s%%%s" % (os.environ["USERNAME"], os.environ["PASSWORD"])
+ (result, out, err) = self.runsubcmd("visualize", "reps",
+ '-H', server,
+ '-U', creds,
+ '--color=no', '-S', '-r')
+ self.assertCmdSuccess(result, out, err)
+
+ def test_ntdsconn_dot(self):
+ server = "ldap://%s" % os.environ["SERVER"]
+ creds = "%s%%%s" % (os.environ["USERNAME"], os.environ["PASSWORD"])
+ (result, out, err) = self.runsubcmd("visualize", "ntdsconn",
+ '-H', server,
+ '-U', creds, '--dot',
+ '--color=no', '-S')
+ self.assertCmdSuccess(result, out, err)
+
+ def test_ntdsconn_remote_dot(self):
+ server = "ldap://%s" % os.environ["SERVER"]
+ creds = "%s%%%s" % (os.environ["USERNAME"], os.environ["PASSWORD"])
+ (result, out, err) = self.runsubcmd("visualize", "ntdsconn",
+ '-H', server,
+ '-U', creds, '--dot',
+ '--color=no', '-S', '-r')
+ self.assertCmdSuccess(result, out, err)
+
+ def test_reps_dot(self):
+ server = "ldap://%s" % os.environ["SERVER"]
+ creds = "%s%%%s" % (os.environ["USERNAME"], os.environ["PASSWORD"])
+ (result, out, err) = self.runsubcmd("visualize", "reps",
+ '-H', server,
+ '-U', creds, '--dot',
+ '--color=no', '-S')
+ self.assertCmdSuccess(result, out, err)
+
+ def test_reps_remote_dot(self):
+ server = "ldap://%s" % os.environ["SERVER"]
+ creds = "%s%%%s" % (os.environ["USERNAME"], os.environ["PASSWORD"])
+ (result, out, err) = self.runsubcmd("visualize", "reps",
+ '-H', server,
+ '-U', creds, '--dot',
+ '--color=no', '-S', '-r')
+ self.assertCmdSuccess(result, out, err)
diff --git a/python/samba/tests/samba_upgradedns_lmdb.py b/python/samba/tests/samba_upgradedns_lmdb.py
new file mode 100644
index 0000000..a2029a0
--- /dev/null
+++ b/python/samba/tests/samba_upgradedns_lmdb.py
@@ -0,0 +1,75 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Catalyst IT Ltd. 2019
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from samba.tests.samba_tool.base import SambaToolCmdTest
+import os
+import shutil
+
+
+class UpgradeDnsLmdbTestCase(SambaToolCmdTest):
+ """
+ Tests for dns upgrade on a lmdb backend
+ """
+
+ def setUp(self):
+ super().setUp()
+ self.tempsambadir = os.path.join(self.tempdir, "samba")
+ os.mkdir(self.tempsambadir)
+
+ # provision a domain
+ #
+ # returns the tuple (ret, stdout, stderr)
+ def provision(self):
+ command = (
+ "samba-tool "
+ "domain provision "
+ "--realm=foo.example.com "
+ "--domain=FOO "
+ "--targetdir=%s "
+ "--backend-store=mdb "
+ "--use-ntvfs " % self.tempsambadir)
+ return self.run_command(command)
+
+ # upgrade a domains dns to BIND9
+ #
+ # returns the tuple (ret, stdout, stderr)
+ def upgrade_dns(self):
+ command = (
+ "samba_upgradedns "
+ "--dns-backend=BIND9_DLZ "
+ "--configfile %s/etc/smb.conf" % self.tempsambadir)
+ return self.run_command(command)
+
+ def tearDown(self):
+ super().tearDown()
+ shutil.rmtree(self.tempsambadir)
+
+ def test_lmdb_lock_files_linked_on_upgrade_to_bind9_dlz(self):
+ """
+ Ensure that links are created for the lock files as well as the
+ data files
+ """
+ self.provision()
+ self.upgrade_dns()
+ directory = ("%s/bind-dns/dns/sam.ldb.d" % self.tempsambadir)
+ for filename in os.listdir(directory):
+ if filename.endswith(".ldb") and "DNSZONES" in filename:
+ lock_file = ("%s/%s-lock" % (directory, filename))
+ self.assertTrue(
+ os.path.isfile(lock_file),
+ msg=("Lock file %s/%s-lock for %s, does not exist" %
+ (directory, filename, filename)))
diff --git a/python/samba/tests/samdb.py b/python/samba/tests/samdb.py
new file mode 100644
index 0000000..e8b632b
--- /dev/null
+++ b/python/samba/tests/samdb.py
@@ -0,0 +1,66 @@
+# Unix SMB/CIFS implementation. Tests for SamDB
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2008
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for samba.samdb."""
+
+import logging
+import os
+
+from samba.auth import system_session
+from samba.provision import provision
+from samba.tests import TestCaseInTempDir
+from samba.dsdb import DS_DOMAIN_FUNCTION_2008_R2
+
+
+class SamDBTestCase(TestCaseInTempDir):
+ """Base-class for tests with a Sam Database.
+
+ This is used by the Samba SamDB-tests, but e.g. also by the OpenChange
+ provisioning tests (which need a Sam).
+ """
+
+ def setUp(self):
+ super().setUp()
+ self.session = system_session()
+ logger = logging.getLogger("selftest")
+ self.domain = "dsdb"
+ self.realm = "dsdb.samba.example.com"
+ host_name = "test"
+ server_role = "active directory domain controller"
+ self.result = provision(logger,
+ self.session, targetdir=self.tempdir,
+ realm=self.realm, domain=self.domain,
+ hostname=host_name,
+ use_ntvfs=True,
+ serverrole=server_role,
+ dns_backend="SAMBA_INTERNAL",
+ dom_for_fun_level=DS_DOMAIN_FUNCTION_2008_R2)
+ self.samdb = self.result.samdb
+ self.lp = self.result.lp
+
+ def tearDown(self):
+ self.rm_files('names.tdb')
+ self.rm_dirs('etc', 'msg.lock', 'private', 'state', 'bind-dns')
+
+ super().tearDown()
+
+
+class SamDBTests(SamDBTestCase):
+
+ def test_get_domain(self):
+ self.assertEqual(self.samdb.domain_dns_name(), self.realm.lower())
+ self.assertEqual(self.samdb.domain_netbios_name(), self.domain.upper())
diff --git a/python/samba/tests/samdb_api.py b/python/samba/tests/samdb_api.py
new file mode 100644
index 0000000..5a720aa
--- /dev/null
+++ b/python/samba/tests/samdb_api.py
@@ -0,0 +1,148 @@
+# Tests for the samba samdb api
+#
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2018
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from samba.tests import TestCaseInTempDir
+from samba.samdb import SamDB
+from ldb import LdbError, ERR_OPERATIONS_ERROR
+import errno
+
+
+class SamDBApiTestCase(TestCaseInTempDir):
+
+ def tearDown(self):
+ self.rm_files("test.db", "existing.db", allow_missing=True)
+
+ super().tearDown()
+
+ # Attempt to open and existing non tdb file as a tdb file.
+ # Don't create new db is set, the default
+ #
+ # Should fail to open
+ # And the existing file should be left intact.
+ #
+ def test_dont_create_db_existing_non_tdb_file(self):
+ existing_name = self.tempdir + "/existing.db"
+ existing = open(existing_name, "w")
+ existing.write("This is not a tdb file!!!!!!\n")
+ existing.close()
+
+ try:
+ SamDB(url="tdb://" + existing_name)
+ self.fail("Exception not thrown ")
+ except LdbError as e:
+ (err, _) = e.args
+ self.assertEqual(err, ERR_OPERATIONS_ERROR)
+
+ existing = open(existing_name, "r")
+ contents = existing.readline()
+ self.assertEqual("This is not a tdb file!!!!!!\n", contents)
+
+ # Attempt to open and existing non tdb file as a tdb file.
+ # Don't create new db is cleared
+ #
+ # Should open as a tdb file
+ # And the existing file should be over written
+ #
+ def test_create_db_existing_file_non_tdb_file(self):
+ existing_name = self.tempdir + "/existing.db"
+ existing = open(existing_name, "wb")
+ existing.write(b"This is not a tdb file!!!!!!")
+ existing.close()
+
+ SamDB(url="tdb://" + existing_name, flags=0)
+
+ existing = open(existing_name, "rb")
+ contents = existing.readline()
+ self.assertEqual(b"TDB file\n", contents)
+
+ #
+ # Attempt to open an existing tdb file as a tdb file.
+ # Don't create new db is set, the default
+ #
+ # Should open successfully
+ # And the existing file should be left intact.
+ #
+ def test_dont_create_db_existing_tdb_file(self):
+ existing_name = self.tempdir + "/existing.db"
+ initial = SamDB(url="tdb://" + existing_name, flags=0)
+ dn = "dn=,cn=test_dont_create_db_existing_tdb_file"
+ initial.add({
+ "dn": dn,
+ "cn": "test_dont_create_db_existing_tdb_file"
+ })
+
+ cn = initial.searchone("cn", dn)
+ self.assertEqual(b"test_dont_create_db_existing_tdb_file", cn)
+
+ second = SamDB(url="tdb://" + existing_name)
+ cn = second.searchone("cn", dn)
+ self.assertEqual(b"test_dont_create_db_existing_tdb_file", cn)
+
+ #
+ # Attempt to open an existing tdb file as a tdb file.
+ # Don't create new db is explicitly cleared
+ #
+ # Should open successfully
+ # And the existing file should be left intact.
+ #
+ def test_create_db_existing_file_tdb_file(self):
+ existing_name = self.tempdir + "/existing.db"
+ initial = SamDB(url="tdb://" + existing_name, flags=0)
+ dn = "dn=,cn=test_dont_create_db_existing_tdb_file"
+ initial.add({
+ "dn": dn,
+ "cn": "test_dont_create_db_existing_tdb_file"
+ })
+
+ cn = initial.searchone("cn", dn)
+ self.assertEqual(b"test_dont_create_db_existing_tdb_file", cn)
+
+ second = SamDB(url="tdb://" + existing_name, flags=0)
+ cn = second.searchone("cn", dn)
+ self.assertEqual(b"test_dont_create_db_existing_tdb_file", cn)
+
+ # Open a non existent TDB file.
+ # Don't create new db is set, the default
+ #
+ # Should fail
+ # and the database file should not be created
+ def test_dont_create_db_new_file(self):
+ try:
+ SamDB(url="tdb://" + self.tempdir + "/test.db")
+ self.fail("Exception not thrown ")
+ except LdbError as e1:
+ (err, _) = e1.args
+ self.assertEqual(err, ERR_OPERATIONS_ERROR)
+
+ try:
+ file = open(self.tempdir + "/test.db", "r")
+ self.fail("New database file created")
+ except IOError as e:
+ self.assertEqual(e.errno, errno.ENOENT)
+
+ # Open a SamDB with the don't create new DB flag cleared.
+ # The underlying database file does not exist.
+ #
+ # Should successful open the SamDB creating a new database file.
+ #
+
+ def test_create_db_new_file(self):
+ SamDB(url="tdb://" + self.tempdir + "/test.db", flags=0)
+ existing = open(self.tempdir + "/test.db", mode="rb")
+ contents = existing.readline()
+ self.assertEqual(b"TDB file\n", contents)
diff --git a/python/samba/tests/sddl.py b/python/samba/tests/sddl.py
new file mode 100644
index 0000000..b594021
--- /dev/null
+++ b/python/samba/tests/sddl.py
@@ -0,0 +1,894 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Volker Lendecke <vl@samba.org> 2021
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for samba.dcerpc.security"""
+
+from samba.dcerpc import security
+from samba.tests import TestCase, DynamicTestCase, get_env_dir
+from samba.colour import c_RED, c_GREEN
+import os
+
+
+class SddlDecodeEncodeBase(TestCase):
+ maxDiff = 10000
+ case_insensitive = False
+
+ @classmethod
+ def setUpDynamicTestCases(cls):
+ cls.domain_sid = security.dom_sid("S-1-2-3-4")
+
+ strings_dir = getattr(cls, 'strings_dir', None)
+ if strings_dir is not None:
+ cls.read_windows_strings(strings_dir, False)
+
+ for (key, fn) in [
+ ("SAMBA_WRITE_WINDOWS_STRINGS_DIR",
+ cls.write_windows_strings),
+ ("SAMBA_READ_WINDOWS_STRINGS_DIR",
+ cls.read_windows_strings),
+ ("SAMBA_WRITE_FUZZ_STRINGS_DIR",
+ cls.write_sddl_strings_for_fuzz_seeds)]:
+ dir = get_env_dir(key)
+ if dir is not None:
+ fn(dir)
+
+ seen = set()
+ for pair in cls.strings:
+ if isinstance(pair, str):
+ pair = (pair, pair)
+
+ if pair in seen:
+ print(f"seen {pair} after {len(seen)}")
+ seen.add(pair)
+ sddl, canonical = pair
+
+ name = sddl
+ if len(name) > 120:
+ name = f"{name[:100]}+{len(name) - 100}-more-characters"
+
+ if cls.should_succeed:
+ cls.generate_dynamic_test('test_sddl', name, sddl, canonical)
+ else:
+ cls.generate_dynamic_test('test_sddl_should_fail',
+ name, sddl, canonical)
+
+ def _test_sddl_with_args(self, s, canonical):
+ try:
+ sd1 = security.descriptor.from_sddl(s, self.domain_sid)
+ except (TypeError, ValueError, security.SDDLValueError) as e:
+ self.fail(f"raised {e}")
+
+ sddl = sd1.as_sddl(self.domain_sid)
+ sd2 = security.descriptor.from_sddl(sddl, self.domain_sid)
+ self.assertEqual(sd1, sd2)
+ if '0X' in canonical.upper() or self.case_insensitive:
+ # let's chill out about case in hex numbers.
+ self.assertEqual(sddl.upper(), canonical.upper())
+ else:
+ self.assertEqual(sddl, canonical)
+
+ def _test_sddl_should_fail_with_args(self, s, canonical):
+ try:
+ sd = security.descriptor.from_sddl(s, self.domain_sid)
+ except security.SDDLValueError as e:
+ generic_msg, specific_msg, position, sddl = e.args
+ self.assertEqual(generic_msg, "Unable to parse SDDL")
+ self.assertIsInstance(specific_msg, str)
+ self.assertIsInstance(position, int)
+ self.assertLessEqual(position, len(s))
+ self.assertGreaterEqual(position, 0)
+ self.assertEqual(s, sddl)
+
+ print(f"{s}\n{' ' * position}^\n {specific_msg}")
+ else:
+ self.fail(f"{sd.as_sddl(self.domain_sid)} should fail to parse")
+
+ @classmethod
+ def write_sddl_strings_for_fuzz_seeds(cls, dir):
+ """write all the SDDL strings we have into a directory as individual
+ files, using a naming convention beloved of fuzzing engines.
+
+ To run this set an environment variable; see
+ cls.setUpDynamicTestCases(), above.
+
+ Note this will only run in subclasses annotated with @DynamicTestCase.
+ """
+ from hashlib import md5
+ for sddl in cls.strings:
+ if not isinstance(sddl, str):
+ sddl = sddl[0]
+ name = md5(sddl.encode()).hexdigest()
+ with open(os.path.join(dir, name), 'w') as f:
+ f.write(sddl)
+
+ @classmethod
+ def write_windows_strings(cls, dir):
+ """Write all test cases in the format used by
+ libcli/security/tests/windows/windows-sddl-tests.c which, if
+ compiled on Windows under Cygwin or MSYS64, can run SDDL
+ parsing tests using the Windows API. This allows us to run the
+ same tests here and on Windows, to ensure we get the same
+ results.
+
+ That test program can read examples in a bespoke text format,
+ in which each line looks like:
+
+ original sddl -> returned sddl
+
+ That is, the separator consists of the 4 bytes " -> ".
+ Multi-line examples are not possible.
+
+ To run this set an environment variable; see
+ cls.setUpDynamicTestCases(), above. Then if you copy the
+ file/s produced to Windows and run them in your POSIX-y shell
+ with
+
+ windows-sddl-tests -i path/to/*.txt
+
+ the results on Windows will be shown.
+
+ Note this will only run in subclasses annotated with @DynamicTestCase.
+ """
+ name = f"{dir}/{cls.name}.txt"
+ with open(name, 'w') as f:
+ for p in cls.strings:
+ if isinstance(p, str):
+ p = (p, p)
+ print(f"{p[0]} -> {p[1]}", file=f)
+
+ @classmethod
+ def read_windows_strings(cls, dir, verbose=True):
+ """This is complementary to cls.write_windows_strings(), which writes
+ these tests in a format usable on Windows. In this case
+ examples will be read in, replacing the strings here with the
+ ones listed. Along the way it alerts you to the changes.
+
+ To run this set an environment variable; see
+ cls.setUpDynamicTestCases(), above.
+
+ Note this will only run in subclasses annotated with @DynamicTestCase.
+ """
+ filename = f"{dir}/{cls.name}.txt"
+
+ if not hasattr(cls, 'strings'):
+ cls.strings = []
+
+ old_pairs = set()
+ for s in cls.strings:
+ if isinstance(s, str):
+ s = (s, s)
+ old_pairs.add(s)
+
+ new_pairs = set()
+ with open(filename) as f:
+ for line in f:
+ line = line.rstrip()
+ if line.startswith('#') or line == '':
+ continue
+ o, _, c = line.partition(' -> ')
+ if c == '':
+ c = o
+ new_pairs.add((o, c))
+
+ if old_pairs == new_pairs:
+ # nothing to do
+ if verbose:
+ print(f"no change in {c_GREEN(cls.name)}")
+ return
+
+ if verbose:
+ print(f"change in {c_RED(cls.name)}")
+ print("added:")
+ for x in sorted(new_pairs - old_pairs):
+ print(x)
+ print("removed:")
+ for x in sorted(old_pairs - new_pairs):
+ print(x)
+
+ cls.strings[:] = sorted(new_pairs)
+
+
+@DynamicTestCase
+class SddlNonCanonical(SddlDecodeEncodeBase):
+ """These ones are transformed in the round trip into a preferred
+ synonym. For example "S:D:" is accepted as input, but only "D:S:
+ will be output.
+ """
+ name = "non_canonical"
+ should_succeed = True
+ strings = [
+ # format is (original, canonical); after passing through an SD
+ # object, the SDDL will look like the canonical version.
+ ("D:(A;;CC;;;BA)(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)(A;;RPLCLORC;;;AU)",
+ "D:(A;;CC;;;BA)(A;;CCDCLCSWRPWPDTLOCRSDRCWDWO;;;SY)(A;;LCRPLORC;;;AU)"),
+
+ (("D:(A;;RP;;;WD)"
+ "(OA;;CR;1131f6aa-9c07-11d1-f79f-00c04fc2dcd2;;ED)"
+ "(OA;;CR;1131f6ab-9c07-11d1-f79f-00c04fc2dcd2;;ED)"
+ "(OA;;CR;1131f6ac-9c07-11d1-f79f-00c04fc2dcd2;;ED)"
+ "(OA;;CR;1131f6aa-9c07-11d1-f79f-00c04fc2dcd2;;BA)"
+ "(OA;;CR;1131f6ab-9c07-11d1-f79f-00c04fc2dcd2;;BA)"
+ "(OA;;CR;1131f6ac-9c07-11d1-f79f-00c04fc2dcd2;;BA)"
+ "(A;;RPLCLORC;;;AU)"
+ "(A;;RPWPCRLCLOCCRCWDWOSW;;;BO)"
+ "(A;CI;RPWPCRLCLOCCRCWDWOSDSW;;;BA)"
+ "(A;;RPWPCRLCLOCCDCRCWDWOSDDTSW;;;SY)"
+ "(A;CI;RPWPCRLCLOCCDCRCWDWOSDDTSW;;;ES)"
+ "(A;CI;LC;;;RU)"
+ "(OA;CIIO;RP;037088f8-0ae1-11d2-b422-00a0c968f939;bf967aba-0de6-11d0-a285-00aa003049e2;RU)"
+ "(OA;CIIO;RP;59ba2f42-79a2-11d0-9020-00c04fc2d3cf;bf967aba-0de6-11d0-a285-00aa003049e2;RU)"
+ "(OA;CIIO;RP;bc0ac240-79a9-11d0-9020-00c04fc2d4cf;bf967aba-0de6-11d0-a285-00aa003049e2;RU)"
+ "(OA;CIIO;RP;4c164200-20c0-11d0-a768-00aa006e0529;bf967aba-0de6-11d0-a285-00aa003049e2;RU)"
+ "(OA;CIIO;RP;5f202010-79a5-11d0-9020-00c04fc2d4cf;bf967aba-0de6-11d0-a285-00aa003049e2;RU)"
+ "(OA;;RP;c7407360-20bf-11d0-a768-00aa006e0529;;RU)"
+ "(OA;CIIO;RPLCLORC;;bf967a9c-0de6-11d0-a285-00aa003049e2;RU)"
+ "(A;;RPRC;;;RU)"
+ "(OA;CIIO;RPLCLORC;;bf967aba-0de6-11d0-a285-00aa003049e2;RU)"
+ "(A;;LCRPLORC;;;ED)"
+ "(OA;CIIO;RP;037088f8-0ae1-11d2-b422-00a0c968f939;4828CC14-1437-45bc-9B07-AD6F015E5F28;RU)"
+ "(OA;CIIO;RP;59ba2f42-79a2-11d0-9020-00c04fc2d3cf;4828CC14-1437-45bc-9B07-AD6F015E5F28;RU)"
+ "(OA;CIIO;RP;bc0ac240-79a9-11d0-9020-00c04fc2d4cf;4828CC14-1437-45bc-9B07-AD6F015E5F28;RU)"
+ "(OA;CIIO;RP;4c164200-20c0-11d0-a768-00aa006e0529;4828CC14-1437-45bc-9B07-AD6F015E5F28;RU)"
+ "(OA;CIIO;RP;5f202010-79a5-11d0-9020-00c04fc2d4cf;4828CC14-1437-45bc-9B07-AD6F015E5F28;RU)"
+ "(OA;CIIO;RPLCLORC;;4828CC14-1437-45bc-9B07-AD6F015E5F28;RU)"
+ "(OA;;RP;b8119fd0-04f6-4762-ab7a-4986c76b3f9a;;RU)"
+ "(OA;;RP;b8119fd0-04f6-4762-ab7a-4986c76b3f9a;;AU)"
+ "(OA;CIIO;RP;b7c69e6d-2cc7-11d2-854e-00a0c983f608;bf967aba-0de6-11d0-a285-00aa003049e2;ED)"
+ "(OA;CIIO;RP;b7c69e6d-2cc7-11d2-854e-00a0c983f608;bf967a9c-0de6-11d0-a285-00aa003049e2;ED)"
+ "(OA;CIIO;RP;b7c69e6d-2cc7-11d2-854e-00a0c983f608;bf967a86-0de6-11d0-a285-00aa003049e2;ED)"
+ "(OA;;CR;1131f6ad-9c07-11d1-f79f-00c04fc2dcd2;;NO)"
+ "(OA;;CR;1131f6ad-9c07-11d1-f79f-00c04fc2dcd2;;BA)"
+ "(OA;;CR;e2a36dc9-ae17-47c3-b58b-be34c55ba633;;SU)"
+ "(OA;;CR;280f369c-67c7-438e-ae98-1d46f3c6f541;;AU)"
+ "(OA;;CR;ccc2dc7d-a6ad-4a7a-8846-c04e3cc53501;;AU)"
+ "(OA;;CR;05c74c5e-4deb-43b4-bd9f-86664c2a7fd5;;AU)"
+ "S:(AU;SA;WDWOWP;;;WD)"),
+ ("D:(A;;RP;;;WD)"
+ "(OA;;CR;1131f6aa-9c07-11d1-f79f-00c04fc2dcd2;;ED)"
+ "(OA;;CR;1131f6ab-9c07-11d1-f79f-00c04fc2dcd2;;ED)"
+ "(OA;;CR;1131f6ac-9c07-11d1-f79f-00c04fc2dcd2;;ED)"
+ "(OA;;CR;1131f6aa-9c07-11d1-f79f-00c04fc2dcd2;;BA)"
+ "(OA;;CR;1131f6ab-9c07-11d1-f79f-00c04fc2dcd2;;BA)"
+ "(OA;;CR;1131f6ac-9c07-11d1-f79f-00c04fc2dcd2;;BA)"
+ "(A;;LCRPLORC;;;AU)"
+ "(A;;CCLCSWRPWPLOCRRCWDWO;;;BO)"
+ "(A;CI;CCLCSWRPWPLOCRSDRCWDWO;;;BA)"
+ "(A;;CCDCLCSWRPWPDTLOCRSDRCWDWO;;;SY)"
+ "(A;CI;CCDCLCSWRPWPDTLOCRSDRCWDWO;;;ES)"
+ "(A;CI;LC;;;RU)"
+ "(OA;CIIO;RP;037088f8-0ae1-11d2-b422-00a0c968f939;bf967aba-0de6-11d0-a285-00aa003049e2;RU)"
+ "(OA;CIIO;RP;59ba2f42-79a2-11d0-9020-00c04fc2d3cf;bf967aba-0de6-11d0-a285-00aa003049e2;RU)"
+ "(OA;CIIO;RP;bc0ac240-79a9-11d0-9020-00c04fc2d4cf;bf967aba-0de6-11d0-a285-00aa003049e2;RU)"
+ "(OA;CIIO;RP;4c164200-20c0-11d0-a768-00aa006e0529;bf967aba-0de6-11d0-a285-00aa003049e2;RU)"
+ "(OA;CIIO;RP;5f202010-79a5-11d0-9020-00c04fc2d4cf;bf967aba-0de6-11d0-a285-00aa003049e2;RU)"
+ "(OA;;RP;c7407360-20bf-11d0-a768-00aa006e0529;;RU)"
+ "(OA;CIIO;LCRPLORC;;bf967a9c-0de6-11d0-a285-00aa003049e2;RU)"
+ "(A;;RPRC;;;RU)"
+ "(OA;CIIO;LCRPLORC;;bf967aba-0de6-11d0-a285-00aa003049e2;RU)"
+ "(A;;LCRPLORC;;;ED)"
+ "(OA;CIIO;RP;037088f8-0ae1-11d2-b422-00a0c968f939;4828cc14-1437-45bc-9b07-ad6f015e5f28;RU)"
+ "(OA;CIIO;RP;59ba2f42-79a2-11d0-9020-00c04fc2d3cf;4828cc14-1437-45bc-9b07-ad6f015e5f28;RU)"
+ "(OA;CIIO;RP;bc0ac240-79a9-11d0-9020-00c04fc2d4cf;4828cc14-1437-45bc-9b07-ad6f015e5f28;RU)"
+ "(OA;CIIO;RP;4c164200-20c0-11d0-a768-00aa006e0529;4828cc14-1437-45bc-9b07-ad6f015e5f28;RU)"
+ "(OA;CIIO;RP;5f202010-79a5-11d0-9020-00c04fc2d4cf;4828cc14-1437-45bc-9b07-ad6f015e5f28;RU)"
+ "(OA;CIIO;LCRPLORC;;4828cc14-1437-45bc-9b07-ad6f015e5f28;RU)"
+ "(OA;;RP;b8119fd0-04f6-4762-ab7a-4986c76b3f9a;;RU)"
+ "(OA;;RP;b8119fd0-04f6-4762-ab7a-4986c76b3f9a;;AU)"
+ "(OA;CIIO;RP;b7c69e6d-2cc7-11d2-854e-00a0c983f608;bf967aba-0de6-11d0-a285-00aa003049e2;ED)"
+ "(OA;CIIO;RP;b7c69e6d-2cc7-11d2-854e-00a0c983f608;bf967a9c-0de6-11d0-a285-00aa003049e2;ED)"
+ "(OA;CIIO;RP;b7c69e6d-2cc7-11d2-854e-00a0c983f608;bf967a86-0de6-11d0-a285-00aa003049e2;ED)"
+ "(OA;;CR;1131f6ad-9c07-11d1-f79f-00c04fc2dcd2;;NO)"
+ "(OA;;CR;1131f6ad-9c07-11d1-f79f-00c04fc2dcd2;;BA)"
+ "(OA;;CR;e2a36dc9-ae17-47c3-b58b-be34c55ba633;;SU)"
+ "(OA;;CR;280f369c-67c7-438e-ae98-1d46f3c6f541;;AU)"
+ "(OA;;CR;ccc2dc7d-a6ad-4a7a-8846-c04e3cc53501;;AU)"
+ "(OA;;CR;05c74c5e-4deb-43b4-bd9f-86664c2a7fd5;;AU)"
+ "S:(AU;SA;WPWDWO;;;WD)")),
+
+ (("D:(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;BO)"
+ "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;AO)"
+ "(A;;WPRPCRCCDCLCLORCWOWDSDDTSW;;;SY)"
+ "(A;;RPCRLCLORCSDDT;;;CO)"
+ "(OA;;WP;4c164200-20c0-11d0-a768-00aa006e0529;;CO)"
+ "(A;;RPLCLORC;;;AU)"
+ "(OA;;CR;ab721a53-1e2f-11d0-9819-00aa0040529b;;WD)"
+ "(A;;CCDC;;;PS)"
+ "(OA;;CCDC;bf967aa8-0de6-11d0-a285-00aa003049e2;;PO)"
+ "(OA;;RPWP;bf967a7f-0de6-11d0-a285-00aa003049e2;;SY)"
+ "(OA;;SW;f3a64788-5306-11d1-a9c5-0000f80367c1;;PS)"
+ "(OA;;RPWP;77B5B886-944A-11d1-AEBD-0000F80367C1;;PS)"
+ "(OA;;SW;72e39547-7b18-11d1-adef-00c04fd8d5cd;;PS)"
+ "(OA;;SW;72e39547-7b18-11d1-adef-00c04fd8d5cd;;CO)"
+ "(OA;;SW;f3a64788-5306-11d1-a9c5-0000f80367c1;;CO)"
+ "(OA;;WP;3e0abfd0-126a-11d0-a060-00aa006c33ed;bf967a86-0de6-11d0-a285-00aa003049e2;CO)"
+ "(OA;;WP;5f202010-79a5-11d0-9020-00c04fc2d4cf;bf967a86-0de6-11d0-a285-00aa003049e2;CO)"
+ "(OA;;WP;bf967950-0de6-11d0-a285-00aa003049e2;bf967a86-0de6-11d0-a285-00aa003049e2;CO)"
+ "(OA;;WP;bf967953-0de6-11d0-a285-00aa003049e2;bf967a86-0de6-11d0-a285-00aa003049e2;CO)"
+ "(OA;;RP;46a9b11d-60ae-405a-b7e8-ff8a58d456d2;;S-1-5-32-560)"),
+ ("D:(A;;CCDCLCSWRPWPDTLOCRSDRCWDWO;;;BO)"
+ "(A;;CCDCLCSWRPWPDTLOCRSDRCWDWO;;;AO)"
+ "(A;;CCDCLCSWRPWPDTLOCRSDRCWDWO;;;SY)"
+ "(A;;LCRPDTLOCRSDRC;;;CO)"
+ "(OA;;WP;4c164200-20c0-11d0-a768-00aa006e0529;;CO)"
+ "(A;;LCRPLORC;;;AU)"
+ "(OA;;CR;ab721a53-1e2f-11d0-9819-00aa0040529b;;WD)"
+ "(A;;CCDC;;;PS)"
+ "(OA;;CCDC;bf967aa8-0de6-11d0-a285-00aa003049e2;;PO)"
+ "(OA;;RPWP;bf967a7f-0de6-11d0-a285-00aa003049e2;;SY)"
+ "(OA;;SW;f3a64788-5306-11d1-a9c5-0000f80367c1;;PS)"
+ "(OA;;RPWP;77b5b886-944a-11d1-aebd-0000f80367c1;;PS)"
+ "(OA;;SW;72e39547-7b18-11d1-adef-00c04fd8d5cd;;PS)"
+ "(OA;;SW;72e39547-7b18-11d1-adef-00c04fd8d5cd;;CO)"
+ "(OA;;SW;f3a64788-5306-11d1-a9c5-0000f80367c1;;CO)"
+ "(OA;;WP;3e0abfd0-126a-11d0-a060-00aa006c33ed;bf967a86-0de6-11d0-a285-00aa003049e2;CO)"
+ "(OA;;WP;5f202010-79a5-11d0-9020-00c04fc2d4cf;bf967a86-0de6-11d0-a285-00aa003049e2;CO)"
+ "(OA;;WP;bf967950-0de6-11d0-a285-00aa003049e2;bf967a86-0de6-11d0-a285-00aa003049e2;CO)"
+ "(OA;;WP;bf967953-0de6-11d0-a285-00aa003049e2;bf967a86-0de6-11d0-a285-00aa003049e2;CO)"
+ "(OA;;RP;46a9b11d-60ae-405a-b7e8-ff8a58d456d2;;S-1-5-32-560)")),
+
+ ("D:(A;;RPLCLORC;;;BO)(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)(A;;RPLCLORC;;;AU)",
+ "D:(A;;LCRPLORC;;;BO)(A;;CCDCLCSWRPWPDTLOCRSDRCWDWO;;;SY)(A;;LCRPLORC;;;AU)"),
+
+ (("D:(A;;WPCRCCDCLCLORCWOWDSDDTSWRP;;;BO)"
+ "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;AO)"
+ "(A;;RPWPCRCCDCLCLORCWOWDSDSWDT;;;SY)"
+ "(A;;RPCRLCLORCSDDT;;;CO)"
+ "(OA;;WP;4c164200-20c0-11d0-a768-00aa006e0529;;CO)"
+ "(A;;RPLCLORC;;;AU)"
+ "(OA;;CR;ab721a53-1e2f-11d0-9819-00aa0040529b;;WD)"
+ "(A;;CCDC;;;PS)"
+ "(OA;;CCDC;bf967aa8-0de6-11d0-a285-00aa003049e2;;PO)"
+ "(OA;;RPWP;bf967a7f-0de6-11d0-a285-00aa003049e2;;SY)"
+ "(OA;;SW;f3a64788-5306-11d1-a9c5-0000f80367c1;;PS)"
+ "(OA;;RPWP;77B5B886-944A-11d1-AEBD-0000F80367C1;;PS)"
+ "(OA;;SW;72e39547-7b18-11d1-adef-00c04fd8d5cd;;PS)"
+ "(OA;;SW;72e39547-7b18-11d1-adef-00c04fd8d5cd;;CO)"
+ "(OA;;SW;f3a64788-5306-11d1-a9c5-0000f80367c1;;CO)"
+ "(OA;;WP;3e0abfd0-126a-11d0-a060-00aa006c33ed;bf967a86-0de6-11d0-a285-00aa003049e2;CO)"
+ "(OA;;WP;5f202010-79a5-11d0-9020-00c04fc2d4cf;bf967a86-0de6-11d0-a285-00aa003049e2;CO)"
+ "(OA;;WP;bf967950-0de6-11d0-a285-00aa003049e2;bf967a86-0de6-11d0-a285-00aa003049e2;CO)"
+ "(OA;;WP;bf967953-0de6-11d0-a285-00aa003049e2;bf967a86-0de6-11d0-a285-00aa003049e2;CO)"
+ "(OA;;RP;46a9b11d-60ae-405a-b7e8-ff8a58d456d2;;SU)"),
+ ("D:(A;;CCDCLCSWRPWPDTLOCRSDRCWDWO;;;BO)"
+ "(A;;CCDCLCSWRPWPDTLOCRSDRCWDWO;;;AO)"
+ "(A;;CCDCLCSWRPWPDTLOCRSDRCWDWO;;;SY)"
+ "(A;;LCRPDTLOCRSDRC;;;CO)"
+ "(OA;;WP;4c164200-20c0-11d0-a768-00aa006e0529;;CO)"
+ "(A;;LCRPLORC;;;AU)"
+ "(OA;;CR;ab721a53-1e2f-11d0-9819-00aa0040529b;;WD)"
+ "(A;;CCDC;;;PS)"
+ "(OA;;CCDC;bf967aa8-0de6-11d0-a285-00aa003049e2;;PO)"
+ "(OA;;RPWP;bf967a7f-0de6-11d0-a285-00aa003049e2;;SY)"
+ "(OA;;SW;f3a64788-5306-11d1-a9c5-0000f80367c1;;PS)"
+ "(OA;;RPWP;77b5b886-944a-11d1-aebd-0000f80367c1;;PS)"
+ "(OA;;SW;72e39547-7b18-11d1-adef-00c04fd8d5cd;;PS)"
+ "(OA;;SW;72e39547-7b18-11d1-adef-00c04fd8d5cd;;CO)"
+ "(OA;;SW;f3a64788-5306-11d1-a9c5-0000f80367c1;;CO)"
+ "(OA;;WP;3e0abfd0-126a-11d0-a060-00aa006c33ed;bf967a86-0de6-11d0-a285-00aa003049e2;CO)"
+ "(OA;;WP;5f202010-79a5-11d0-9020-00c04fc2d4cf;bf967a86-0de6-11d0-a285-00aa003049e2;CO)"
+ "(OA;;WP;bf967950-0de6-11d0-a285-00aa003049e2;bf967a86-0de6-11d0-a285-00aa003049e2;CO)"
+ "(OA;;WP;bf967953-0de6-11d0-a285-00aa003049e2;bf967a86-0de6-11d0-a285-00aa003049e2;CO)"
+ "(OA;;RP;46a9b11d-60ae-405a-b7e8-ff8a58d456d2;;SU)")),
+
+ (("D:(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;BO)"
+ "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)"
+ "(A;;RPLCLORC;;;AU)",
+ "D:(A;;CCDCLCSWRPWPDTLOCRSDRCWDWO;;;BO)"
+ "(A;;CCDCLCSWRPWPDTLOCRSDRCWDWO;;;SY)"
+ "(A;;LCRPLORC;;;AU)")),
+
+ (("D:(A;;;;;BO)"
+ "(A;;;;;AO)"
+ "(A;;;;;SY)"
+ "(A;;RPCRLCLORCSDDT;;;CO)"
+ "(OA;;WP;4c164200-20c0-11d0-a768-00aa006e0529;;CO)"
+ "(A;;RPLCLORC;;;AU)"
+ "(OA;;CR;ab721a53-1e2f-11d0-9819-00aa0040529b;;WD)"
+ "(A;;CCDC;;;PS)"
+ "(OA;;CCDC;bf967aa8-0de6-11d0-a285-00aa003049e2;;PO)"
+ "(OA;;RPWP;bf967a7f-0de6-11d0-a285-00aa003049e2;;SY)"
+ "(OA;;SW;f3a64788-5306-11d1-a9c5-0000f80367c1;;PS)"
+ "(OA;;RPWP;77B5B886-944A-11d1-AEBD-0000F80367C1;;PS)"
+ "(OA;;SW;72e39547-7b18-11d1-adef-00c04fd8d5cd;;PS)"
+ "(OA;;SW;72e39547-7b18-11d1-adef-00c04fd8d5cd;;CO)"
+ "(OA;;SW;f3a64788-5306-11d1-a9c5-0000f80367c1;;CO)"
+ "(OA;;WP;3e0abfd0-126a-11d0-a060-00aa006c33ed;bf967a86-0de6-11d0-a285-00aa003049e2;CO)"
+ "(OA;;WP;5f202010-79a5-11d0-9020-00c04fc2d4cf;bf967a86-0de6-11d0-a285-00aa003049e2;CO)"
+ "(OA;;WP;bf967950-0de6-11d0-a285-00aa003049e2;bf967a86-0de6-11d0-a285-00aa003049e2;CO)"
+ "(OA;;WP;bf967953-0de6-11d0-a285-00aa003049e2;bf967a86-0de6-11d0-a285-00aa003049e2;CO)"
+ "(OA;;RP;46a9b11d-60ae-405a-b7e8-ff8a58d456d2;;SU)"),
+ ("D:(A;;;;;BO)"
+ "(A;;;;;AO)"
+ "(A;;;;;SY)"
+ "(A;;LCRPDTLOCRSDRC;;;CO)"
+ "(OA;;WP;4c164200-20c0-11d0-a768-00aa006e0529;;CO)"
+ "(A;;LCRPLORC;;;AU)"
+ "(OA;;CR;ab721a53-1e2f-11d0-9819-00aa0040529b;;WD)"
+ "(A;;CCDC;;;PS)"
+ "(OA;;CCDC;bf967aa8-0de6-11d0-a285-00aa003049e2;;PO)"
+ "(OA;;RPWP;bf967a7f-0de6-11d0-a285-00aa003049e2;;SY)"
+ "(OA;;SW;f3a64788-5306-11d1-a9c5-0000f80367c1;;PS)"
+ "(OA;;RPWP;77b5b886-944a-11d1-aebd-0000f80367c1;;PS)"
+ "(OA;;SW;72e39547-7b18-11d1-adef-00c04fd8d5cd;;PS)"
+ "(OA;;SW;72e39547-7b18-11d1-adef-00c04fd8d5cd;;CO)"
+ "(OA;;SW;f3a64788-5306-11d1-a9c5-0000f80367c1;;CO)"
+ "(OA;;WP;3e0abfd0-126a-11d0-a060-00aa006c33ed;bf967a86-0de6-11d0-a285-00aa003049e2;CO)"
+ "(OA;;WP;5f202010-79a5-11d0-9020-00c04fc2d4cf;bf967a86-0de6-11d0-a285-00aa003049e2;CO)"
+ "(OA;;WP;bf967950-0de6-11d0-a285-00aa003049e2;bf967a86-0de6-11d0-a285-00aa003049e2;CO)"
+ "(OA;;WP;bf967953-0de6-11d0-a285-00aa003049e2;bf967a86-0de6-11d0-a285-00aa003049e2;CO)"
+ "(OA;;RP;46a9b11d-60ae-405a-b7e8-ff8a58d456d2;;SU)")),
+
+ ("D:(A;;RPLCLORC;;;AU)",
+ "D:(A;;LCRPLORC;;;AU)"),
+
+ (("D:(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;BO)"
+ "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)"
+ "(A;;RPLCLORC;;;AU)"
+ "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;AO)"
+ "(A;;RPLCLORC;;;PS)"
+ "(OA;;CR;ab721a55-1e2f-11d0-9819-00aa0040529b;;AU)"
+ "(OA;;RP;46a9b11d-60ae-405a-b7e8-ff8a58d456d2;;SU)"),
+ ("D:(A;;CCDCLCSWRPWPDTLOCRSDRCWDWO;;;BO)"
+ "(A;;CCDCLCSWRPWPDTLOCRSDRCWDWO;;;SY)"
+ "(A;;LCRPLORC;;;AU)"
+ "(A;;CCDCLCSWRPWPDTLOCRSDRCWDWO;;;AO)"
+ "(A;;LCRPLORC;;;PS)"
+ "(OA;;CR;ab721a55-1e2f-11d0-9819-00aa0040529b;;AU)"
+ "(OA;;RP;46a9b11d-60ae-405a-b7e8-ff8a58d456d2;;SU)")),
+
+ (("D:(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;BO)"
+ "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)"
+ "(A;;RPLCLORC;;;AU)"
+ "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;CO)"),
+ ("D:(A;;CCDCLCSWRPWPDTLOCRSDRCWDWO;;;BO)"
+ "(A;;CCDCLCSWRPWPDTLOCRSDRCWDWO;;;SY)"
+ "(A;;LCRPLORC;;;AU)"
+ "(A;;CCDCLCSWRPWPDTLOCRSDRCWDWO;;;CO)")),
+
+ (("D:(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;BO)"
+ "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)"
+ "(A;;RPLCLORC;;;AU)S:(AU;SA;CRWP;;;WD)"),
+ ("D:(A;;CCDCLCSWRPWPDTLOCRSDRCWDWO;;;BO)"
+ "(A;;CCDCLCSWRPWPDTLOCRSDRCWDWO;;;SY)"
+ "(A;;LCRPLORC;;;AU)S:(AU;SA;WPCR;;;WD)")),
+
+ (("D:(A;;RPWPCRCCDCLCLORCWOWDSDDTSWRP;;;BO)"
+ "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)"
+ "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;AO)"
+ "(A;;RPLCLORC;;;PS)"
+ "(OA;;CR;ab721a53-1e2f-11d0-9819-00aa0040529b;;PS)"
+ "(OA;;CR;ab721a54-1e2f-11d0-9819-00aa0040529b;;PS)"
+ "(OA;;CR;ab721a56-1e2f-11d0-9819-00aa0040529b;;PS)"
+ "(OA;;RPWP;77B5B886-944A-11d1-AEBD-0000F80367C1;;PS)"
+ "(OA;;RPWP;E45795B2-9455-11d1-AEBD-0000F80367C1;;PS)"
+ "(OA;;RPWP;E45795B3-9455-11d1-AEBD-0000F80367C1;;PS)"
+ "(OA;;RP;037088f8-0ae1-11d2-b422-00a0c968f939;;RD)"
+ "(OA;;RP;4c164200-20c0-11d0-a768-00aa006e0529;;RD)"
+ "(OA;;RP;bc0ac240-79a9-11d0-9020-00c04fc2d4cf;;RD)"
+ "(A;;RC;;;AU)"
+ "(OA;;RP;59ba2f42-79a2-11d0-9020-00c04fc2d3cf;;AU)"
+ "(OA;;RP;77B5B886-944A-11d1-AEBD-0000F80367C1;;AU)"
+ "(OA;;RP;E45795B3-9455-11d1-AEBD-0000F80367C1;;AU)"
+ "(OA;;RP;e48d0154-bcf8-11d1-8702-00c04fb96050;;AU)"
+ "(OA;;CR;ab721a53-1e2f-11d0-9819-00aa0040529b;;WD)"
+ "(OA;;RP;5f202010-79a5-11d0-9020-00c04fc2d4cf;;RD)"
+ "(OA;;RPWP;bf967a7f-0de6-11d0-a285-00aa003049e2;;SY)"
+ "(OA;;RP;46a9b11d-60ae-405a-b7e8-ff8a58d456d2;;SU)"
+ "(OA;;WPRP;6db69a1c-9422-11d1-aebd-0000f80367c1;;SU)"),
+ ("D:(A;;CCDCLCSWRPWPDTLOCRSDRCWDWO;;;BO)"
+ "(A;;CCDCLCSWRPWPDTLOCRSDRCWDWO;;;SY)"
+ "(A;;CCDCLCSWRPWPDTLOCRSDRCWDWO;;;AO)"
+ "(A;;LCRPLORC;;;PS)"
+ "(OA;;CR;ab721a53-1e2f-11d0-9819-00aa0040529b;;PS)"
+ "(OA;;CR;ab721a54-1e2f-11d0-9819-00aa0040529b;;PS)"
+ "(OA;;CR;ab721a56-1e2f-11d0-9819-00aa0040529b;;PS)"
+ "(OA;;RPWP;77b5b886-944a-11d1-aebd-0000f80367c1;;PS)"
+ "(OA;;RPWP;e45795b2-9455-11d1-aebd-0000f80367c1;;PS)"
+ "(OA;;RPWP;e45795b3-9455-11d1-aebd-0000f80367c1;;PS)"
+ "(OA;;RP;037088f8-0ae1-11d2-b422-00a0c968f939;;RD)"
+ "(OA;;RP;4c164200-20c0-11d0-a768-00aa006e0529;;RD)"
+ "(OA;;RP;bc0ac240-79a9-11d0-9020-00c04fc2d4cf;;RD)"
+ "(A;;RC;;;AU)"
+ "(OA;;RP;59ba2f42-79a2-11d0-9020-00c04fc2d3cf;;AU)"
+ "(OA;;RP;77b5b886-944a-11d1-aebd-0000f80367c1;;AU)"
+ "(OA;;RP;e45795b3-9455-11d1-aebd-0000f80367c1;;AU)"
+ "(OA;;RP;e48d0154-bcf8-11d1-8702-00c04fb96050;;AU)"
+ "(OA;;CR;ab721a53-1e2f-11d0-9819-00aa0040529b;;WD)"
+ "(OA;;RP;5f202010-79a5-11d0-9020-00c04fc2d4cf;;RD)"
+ "(OA;;RPWP;bf967a7f-0de6-11d0-a285-00aa003049e2;;SY)"
+ "(OA;;RP;46a9b11d-60ae-405a-b7e8-ff8a58d456d2;;SU)"
+ "(OA;;RPWP;6db69a1c-9422-11d1-aebd-0000f80367c1;;SU)")),
+
+ (("D:(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)"),
+ ("D:(A;;CCDCLCSWRPWPDTLOCRSDRCWDWO;;;SY)")),
+
+ (("D:(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)"
+ "(A;;RPLCLORC;;;AU)"
+ "(A;;LCRPLORC;;;ED)"),
+ ("D:(A;;CCDCLCSWRPWPDTLOCRSDRCWDWO;;;SY)"
+ "(A;;LCRPLORC;;;AU)"
+ "(A;;LCRPLORC;;;ED)")),
+
+ (("D:(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)"
+ "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;BO)"
+ "(OA;;CCDC;bf967a86-0de6-11d0-a285-00aa003049e2;;AO)"
+ "(OA;;CCDC;bf967aba-0de6-11d0-a285-00aa003049e2;;AO)"
+ "(OA;;CCDC;bf967a9c-0de6-11d0-a285-00aa003049e2;;AO)"
+ "(OA;;CCDC;bf967aa8-0de6-11d0-a285-00aa003049e2;;PO)"
+ "(A;;RPLCLORC;;;AU)"
+ "(A;;LCRPLORC;;;ED)"
+ "(OA;;CCDC;4828CC14-1437-45bc-9B07-AD6F015E5F28;;AO)"),
+ ("D:(A;;CCDCLCSWRPWPDTLOCRSDRCWDWO;;;SY)"
+ "(A;;CCDCLCSWRPWPDTLOCRSDRCWDWO;;;BO)"
+ "(OA;;CCDC;bf967a86-0de6-11d0-a285-00aa003049e2;;AO)"
+ "(OA;;CCDC;bf967aba-0de6-11d0-a285-00aa003049e2;;AO)"
+ "(OA;;CCDC;bf967a9c-0de6-11d0-a285-00aa003049e2;;AO)"
+ "(OA;;CCDC;bf967aa8-0de6-11d0-a285-00aa003049e2;;PO)"
+ "(A;;LCRPLORC;;;AU)"
+ "(A;;LCRPLORC;;;ED)"
+ "(OA;;CCDC;4828cc14-1437-45bc-9b07-ad6f015e5f28;;AO)")),
+
+ (("D:(A;;RPWPCRCCDCLCLORCWOWDSW;;;BO)"
+ "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)"
+ "(A;;RPLCLORC;;;AU)"),
+ ("D:(A;;CCDCLCSWRPWPLOCRRCWDWO;;;BO)"
+ "(A;;CCDCLCSWRPWPDTLOCRSDRCWDWO;;;SY)"
+ "(A;;LCRPLORC;;;AU)")),
+
+ (("D:(A;CI;RPWPCRCCDCLCLORCWOWDSDDTSW;;;BO)"
+ "(A;;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)"
+ "(A;;RPLCLORC;;;AU)"),
+ ("D:(A;CI;CCDCLCSWRPWPDTLOCRSDRCWDWO;;;BO)"
+ "(A;;CCDCLCSWRPWPDTLOCRSDRCWDWO;;;SY)"
+ "(A;;LCRPLORC;;;AU)")),
+
+ ("S:D:P", "D:PS:"),
+ ("S:D:", "D:S:"),
+
+ # decimal to hex
+ ("D:(A;;123456789;;;LG)",
+ "D:(A;;0x75bcd15;;;LG)"),
+
+ # octal to hex
+ ("D:(A;;01234567;;;LG)",
+ "D:(A;;0x53977;;;LG)"),
+
+ # numbers to flags
+ ("D:(A;;16;;;LG)",
+ "D:(A;;RP;;;LG)"),
+ ("D:(A;;17;;;LG)",
+ "D:(A;;CCRP;;;LG)"),
+ ("D:(A;;0xff;;;LG)",
+ "D:(A;;CCDCLCSWRPWPDTLO;;;LG)"),
+ ("D:(A;;0xf01ff;;;LG)",
+ "D:(A;;CCDCLCSWRPWPDTLOCRSDRCWDWO;;;LG)"),
+ ("D:(A;;0xe00f0000;;;LG)",
+ "D:(A;;SDRCWDWOGXGWGR;;;LG)"),
+
+ # ACL flags
+ ("D:ARPAI(A;;GA;;;SY)", "D:PARAI(A;;GA;;;SY)"),
+ ("D:AIPAR(A;;GA;;;SY)", "D:PARAI(A;;GA;;;SY)"),
+ ("D:PARP(A;;GA;;;SY)", "D:PAR(A;;GA;;;SY)"),
+ ("D:PPPPPPPPPPPP(A;;GA;;;SY)", "D:P(A;;GA;;;SY)"),
+
+ # hex vs decimal
+ ('D:(A;;CC;;;S-1-21474836480-32-579)',
+ 'D:(A;;CC;;;S-1-0x500000000-32-579)'),
+ ("D:(A;;GA;;;S-1-5000000000-30-40)",
+ "D:(A;;GA;;;S-1-0x12A05F200-30-40)"),
+ ("D:(A;;GA;;;S-1-0x2-3-4)",
+ "D:(A;;GA;;;S-1-2-3-4)"),
+ ("D:(A;;GA;;;S-1-0x20-3-4)",
+ "D:(A;;GA;;;S-1-32-3-4)"),
+ ("D:(A;;GA;;;S-1-3-0x00000002-3-4)",
+ "D:(A;;GA;;;S-1-3-2-3-4)"),
+ ("D:(A;;GA;;;S-1-3-0xffffffff-3-4)",
+ "D:(A;;GA;;;S-1-3-4294967295-3-4)"),
+ ("D:(A;;GA;;;S-1-5-21-0x1-0x2-0x3-513)",
+ "D:(A;;GA;;;S-1-5-21-1-2-3-513)"),
+ ("D:(A;;GA;;;S-1-5-21-2447931902-1787058256-3961074038-0x4b1)",
+ "D:(A;;GA;;;S-1-5-21-2447931902-1787058256-3961074038-1201)"),
+
+ # ambiguous 'D', looks like part of the SID but isn't
+ ("O:S-1-2-0x200D:", "O:S-1-2-512D:"),
+ ("O:S-1-2-0x2D:(A;;GA;;;LG)", "O:S-1-2-2D:(A;;GA;;;LG)"),
+
+ # like the 'samba3.blackbox.large_acl.NT1' test in
+ # WindowsFlagsAreDifferent below, except using numeric flags
+ # that can't easily be turned into symbolic flags. Also it is
+ # longer, and uses different flags for each ACE.
+ (("D:(A;;0x00654321;;;WD)" +
+ ''.join(f"(A;;0x00abc{i:03};;;S-1-5-21-11111111-22222222-33333333-{i})"
+ for i in range(101, 601))),
+ ("D:(A;;0x654321;;;WD)" +
+ ''.join(f"(A;;0xabc{i:03};;;S-1-5-21-11111111-22222222-33333333-{i})"
+ for i in range(101, 601)))
+ ),
+
+ # Windows allows a space in the middle of access flags
+ ("D:AI(A;CI;RP LCLORC;;;AU)", "D:AI(A;CI;LCRPLORC;;;AU)"),
+ ("D:AI(A;CI;RP LCLO RC;;;AU)", "D:AI(A;CI;LCRPLORC;;;AU)"),
+ # space before string flags is ignored.
+ ("D:(A;; GA;;;LG)", "D:(A;;GA;;;LG)"),
+ ("D:(A;; 0x75bcd15;;;LG)", "D:(A;;0x75bcd15;;;LG)"),
+
+ # from 'samba3.blackbox.large_acl.NT1.able to retrieve a large ACL if VFS supports it'
+ (("D:(A;;0x001f01ff;;;WD)" +
+ ''.join(f"(A;;0x001f01ff;;;S-1-5-21-11111111-22222222-33333333-{i})"
+ for i in range(1001, 1201))),
+ ("D:(A;;FA;;;WD)" +
+ ''.join(f"(A;;FA;;;S-1-5-21-11111111-22222222-33333333-{i})"
+ for i in range(1001, 1201)))
+ ),
+
+ # from samba4.blackbox.samba-tool_ntacl, but using 0x1f01ff in place of FA (which it will become)
+ (("O:S-1-5-21-2212615479-2695158682-2101375468-512"
+ "G:S-1-5-21-2212615479-2695158682-2101375468-513"
+ "D:P(A;OICI;0x001f01ff;;;S-1-5-21-2212615479-2695158682-2101375468-512)"
+ "(A;OICI;0x001f01ff;;;S-1-5-21-2212615479-2695158682-2101375468-519)"
+ "(A;OICIIO;0x001f01ff;;;CO)"
+ "(A;OICI;0x001f01ff;;;S-1-5-21-2212615479-2695158682-2101375468-512)"
+ "(A;OICI;0x001f01ff;;;SY)"
+ "(A;OICI;0x001200a9;;;AU)"
+ "(A;OICI;0x001200a9;;;ED)"
+ "S:AI(OU;CIIDSA;WP;f30e3bbe-9ff0-11d1-b603-0000f80367c1;"
+ "bf967aa5-0de6-11d0-a285-00aa003049e2;WD)"
+ "(OU;CIIDSA;WP;f30e3bbf-9ff0-11d1-b603-0000f80367c1;"
+ "bf967aa5-0de6-11d0-a285-00aa003049e2;WD)"),
+ ("O:S-1-5-21-2212615479-2695158682-2101375468-512"
+ "G:S-1-5-21-2212615479-2695158682-2101375468-513"
+ "D:P(A;OICI;FA;;;S-1-5-21-2212615479-2695158682-2101375468-512)"
+ "(A;OICI;FA;;;S-1-5-21-2212615479-2695158682-2101375468-519)"
+ "(A;OICIIO;FA;;;CO)"
+ "(A;OICI;FA;;;S-1-5-21-2212615479-2695158682-2101375468-512)"
+ "(A;OICI;FA;;;SY)"
+ "(A;OICI;0x1200a9;;;AU)"
+ "(A;OICI;0x1200a9;;;ED)"
+ "S:AI(OU;CIIDSA;WP;f30e3bbe-9ff0-11d1-b603-0000f80367c1;"
+ "bf967aa5-0de6-11d0-a285-00aa003049e2;WD)"
+ "(OU;CIIDSA;WP;f30e3bbf-9ff0-11d1-b603-0000f80367c1;"
+ "bf967aa5-0de6-11d0-a285-00aa003049e2;WD)")),
+
+ (("O:LAG:BAD:P(A;OICI;0x1f01ff;;;BA)"),
+ ("O:LAG:BAD:P(A;OICI;FA;;;BA)")),
+
+ (("O:LAG:BAD:(A;;0x1ff;;;WD)",
+ ("O:LAG:BAD:(A;;CCDCLCSWRPWPDTLOCR;;;WD)"))),
+
+ ("D:(A;;FAGX;;;SY)", "D:(A;;0x201f01ff;;;SY)"),
+ ]
+
+
+@DynamicTestCase
+class SddlCanonical(SddlDecodeEncodeBase):
+ """These ones are expected to be returned in exactly the form they
+ start in. Hence we only have one string for each example.
+ """
+ name = "canonical"
+ should_succeed = True
+ strings = [
+ # derived from GPO acl in provision, "-512D" could be misinterpreted
+ ("O:S-1-5-21-1225132014-296224811-2507946102-512"
+ "G:S-1-5-21-1225132014-296224811-2507946102-512"
+ "D:P"),
+ "D:(A;;GA;;;SY)",
+ "D:(A;;GA;;;RU)",
+ "D:(A;;GA;;;LG)",
+ "D:(A;;0x401200a0;;;LG)",
+ "D:S:",
+ "D:PS:",
+ 'D:(A;;GA;;;RS)',
+ "S:(AU;SA;CR;;;WD)(AU;SA;CR;;;WD)",
+
+ ("S:(OU;CISA;WP;f30e3bbe-9ff0-11d1-b603-0000f80367c1;bf967aa5-0de6-11d0-a285-00aa003049e2;WD)"
+ "(OU;CISA;WP;f30e3bbf-9ff0-11d1-b603-0000f80367c1;bf967aa5-0de6-11d0-a285-00aa003049e2;WD)"),
+
+ "D:(A;;GA;;;S-1-3-4294967295-3-4)",
+ "D:(A;;GA;;;S-1-5-21-1-2-3-513)",
+ "D:(A;;GA;;;S-1-5-21-2447931902-1787058256-3961074038-1201)",
+ "O:S-1-2-512D:",
+ "D:PARAI(A;;GA;;;SY)",
+ "D:P(A;;GA;;;LG)(A;;GX;;;AA)",
+ "D:(A;;FA;;;WD)",
+ "D:(A;;CCDCLCSWRPWPDTLOCR;;;WD)",
+ "D:(A;;CCDCLCSWRPWPDTLOCRSDRCWDWO;;;BA)"
+
+ ]
+
+
+@DynamicTestCase
+class SddlShouldFail(SddlDecodeEncodeBase):
+ """These ones should be rejected.
+ """
+ name = "should_fail"
+ should_succeed = False
+ strings = [
+ "Z:(A;;GA;;;SY)",
+ "D:(Antlers;;GA;;;SY)",
+ "Q:(A;;GA;;;RU)",
+ "d:(A;;GA;;;LG)",
+ "D:((A;;GA;;;LG))",
+ "D:(A;;GA;;)",
+ "D :S:",
+ "S:(AU;SA;CROOO;;;WD)(AU;SA;CR;;;WD)",
+ "D:(A;;GA;;;S-1-0x1313131313131-513)",
+ "D:(A;;GA;a;;S-1-5-21-2447931902-1787058256-0x3961074038-1201)",
+ "D:(A;;GA;a;;S-1-5-21-2447931902-1787058256-0xec193176-1201)",
+ ("S:(OOU;CISA;WP;f30e3bbe-9ff0-11d1-b603-0000f80367c1;bf967aa5-0de6-11d0-a285-00aa003049e2;WD)"
+ "(OU;CISA;WP;f30e3bbf-9ff0-11d1-b603-0000f80367c1;bf967aa5-0de6-11d0-a285-00aa003049e2;WD)"),
+ ("S:(OU;CISA;WP;f30e3bbe-9ff0-11d1-b603-00potato7c1;bf967aa5-0de6-11d0-a285-00aa003049e2;WD)"
+ "(OU;CISA;WP;f30e3bbf-9ff0-11d1-b603-00chips7c1;bf967aa5-0de6-11d0-a285-00aa003049e2;WD)"),
+ "D:P:S:",
+ "D:(Ā;;GA;;;LG)", # macron on Ā
+
+ # whitespace around flags
+ "D:(A;;123456789 ;;;LG)",
+ "D:(A;;0x75bcd15\t;;;LG)",
+ "D:(A;; 0x75bcd15;;;LG",
+ "D:(A;;0x 75bcd15;;;LG)",
+ # Windows accepts space before string flags, not after.
+ "D:(A;;GA ;;;LG)",
+ "D:(A;;RP ;;;LG)",
+ # wrong numbers of ';'
+ "D:(A;;GA;;;LG;)",
+ "D:(A;;GA;;;LG;;)",
+ "D:(A;;GA)",
+ f"D:(A;{';' * 10000})",
+
+ # space after SID is bad
+ # but Windows accepts space before SID, after 2-letter SID
+ "D:(A;;GA;;;S-1-3-4 )",
+
+ "D:(A;;GA; f30e3bbf-9ff0-11d1-b603-0000f80367c1;;WD)",
+ "D:(A;;GA;f30e3bbf-9ff0-11d1-b603-0000f80367c1 ;;WD)",
+ "D:(A;;GA;; f30e3bbf-9ff0-11d1-b603-0000f80367c1;WD)",
+ "D:(A;;GA;;f30e3bbf-9ff0-11d1-b603-0000f80367c1 ;WD)",
+
+ # Samba used to use GUID_from_string(), which would take
+ # anything GUID-ish, including {}-wrapped GUIDs, hyphen-less
+ # hexstrings, and 16 raw bytes. But we only want one kind.
+ "D:(A;;GA;;{f30e3bbf-9ff0-11d1-b603-0000f80367c1};WD)",
+ # would have been treated as raw bytes.
+ "D:(A;;GA;;0123456789abcdef;WD)",
+ # would have been 16 hex pairs.
+ "D:(A;;GA;;0123456789abcdef0123456789abcdef;WD)",
+
+ # space splits a flag in half.
+ "D:AI(A;CI;RP LCLOR C;;;AU)",
+ # tabs in flags
+ "D:AI(A;CI;RP LC\tLORC;;;AU)",
+ "D:AI(A;CI;RP LC\t LORC;;;AU)",
+
+ # incomplete SIDs
+ "O:S",
+ "O:S-",
+ "O:S-1",
+ "O:S-10",
+ "O:S-0",
+ "O:S-1-",
+ "O:S-0x1",
+ "O:S-0x1-",
+
+ "O:",
+ "O:XX",
+
+ "D:("
+ "D:()"
+ "D:())"
+ "D:(A;;0x75bcd15;;;LG))",
+ ]
+
+
+@DynamicTestCase
+class SddlWindowsIsFussy(SddlDecodeEncodeBase):
+ """Windows won't accept these strings, seemingly for semantic rather than
+ syntactic reasons.
+ """
+ name = "windows_is_fussy"
+ should_succeed = True
+ strings = [
+ # Windows doesn't seem to want AU type in DACL.
+ ("D:(A;;RP;;;WD)"
+ "(AU;SA;CR;;;BA)"
+ "(AU;SA;CR;;;DU)"),
+ ]
+
+
+@DynamicTestCase
+class SddlWindowsIsLessFussy(SddlDecodeEncodeBase):
+ """Windows will accept these seemingly malformed strings, but Samba
+ won't.
+ """
+ name = "windows_is_less_fussy"
+ should_succeed = False
+ strings = [
+ # whitespace is ignored, repaired on return
+ ("D:(A;;GA;;; LG)", "D:(A;;GA;;;LG)"),
+ ("D: (A;;GA;;;LG)", "D:(A;;GA;;;LG)"),
+ # whitespace before ACL string flags is ignored.
+ ("D: AI(A;;GA;;;LG)", "D:AI(A;;GA;;;LG)"),
+ # wrong case on type is ignored, fixed
+ ("D:(a;;GA;;;LG)", "D:(A;;GA;;;LG)"),
+ ("D:(A;;GA;;;lg)", "D:(A;;GA;;;LG)"),
+ ("D:(A;;ga;;;LG)", "D:(A;;GA;;;LG)"),
+ ("D: S:","D:S:"),
+
+ # whitespace around ACL flags
+ ("D: P(A;;GA;;;LG)", "D:P(A;;GA;;;LG)"),
+ ("D:P (A;;GA;;;LG)", "D:P(A;;GA;;;LG)"),
+
+ # whitespace between ACES
+ ("D:P(A;;GA;;;LG) (A;;GX;;;AA)",
+ "D:P(A;;GA;;;LG)(A;;GX;;;AA)"),
+
+ # whitespace in absent ace flags
+ ("D:(A; ;GA;;;LG)","D:(A;;GA;;;LG)"),
+
+ # space after ACL flags
+ ("D:AI (A;;GA;;;LG)", "D:AI(A;;GA;;;LG)"),
+
+ # and more whitespace.
+ ("D:(A;;GA;;; WD)", "D:(A;;GA;;;WD)"),
+ ("D:(A;;GA;;;WD )", "D:(A;;GA;;;WD)"),
+ ("D:(A;;GA;;; S-1-3-4)", "D:(A;;GA;;;OW)"),
+ ("D:(A;;GA;; ;S-1-3-4)", "D:(A;;GA;;;OW)"),
+ ("D:(A;;GA; ;;S-1-3-4)", "D:(A;;GA;;;OW)"),
+ ("D:(A;;GA;;; S-1-333-4)", "D:(A;;GA;;;S-1-333-4)"),
+ ("D:(A;;GA; ;;S-1-333-4)", "D:(A;;GA;;;S-1-333-4)"),
+ (" O:AA", "O:AA"),
+ (" O:AA ", "O:AA"),
+ (" O:AA G:WD ", "O:AAG:WD"),
+
+ # spaces in some parts of the SID (not subauth)
+ ("O:S- 1- 2-3", "O:S-1-2-3"),
+ ]
+
+
+@DynamicTestCase
+class SddlWindowsIsWeird(SddlDecodeEncodeBase):
+ """Windows will accept some very misleading SDDL strings.
+ """
+ name = "windows_is_weird"
+ should_succeed = False
+ strings = [
+ # overflow of hex turns on all flags
+ ("D:(A;;0x123456789;;;LG)",
+ "D:(A;;0xffffffff;;;LG)"),
+ # S-Ox1- makes all the rest of the SID hex.
+ ('D:(A;;CC;;;S-0x1-0-0-579)',
+ 'D:(A;;CC;;;S-1-0-0-1401)'),
+ ('O:S-0x1-20-0-579', 'O:S-1-32-0-1401'),
+ ("D:(A;;GA;;;S-1-3-4294967296-3-4)",
+ "D:(A;;GA;;;S-1-3-4294967295-3-4)"),
+ # sid overflow
+ ("D:(A;;GA;;;S-1-3-0x100000000-3-4)",
+ "D:(A;;GA;;;S-1-3-4294967295-3-4)"),
+ ("D:(A;;GA;;;S-1-5-21-0x1313131313131-513)",
+ "D:(A;;GA;;;S-1-5-21-4294967295-513)"),
+ # negative numbers for access flags
+ ("D:(A;;-99;;;LG)",
+ "D:(A;;0xffffff9d;;;LG)"),
+ ("D:(A;;-0xffffff55;;;LG)",
+ "D:(A;;CCDCSWWPLO;;;LG)"),
+ # combine overflow with negatives
+ # -9876543210 == -0xffffffff == -(-1) == 0x1 == CC flag
+ ("D:(A;;-9876543210;;;LG)",
+ "D:(A;;CC;;;LG)"),
+ # overflow of hex turns on all flags
+ ("D:(A;;100000000000000000000000;;;LG)",
+ "D:(A;;0xffffffff;;;LG)"),
+ ]
diff --git a/python/samba/tests/sddl_conditional_ace.py b/python/samba/tests/sddl_conditional_ace.py
new file mode 100644
index 0000000..d7c6c7d
--- /dev/null
+++ b/python/samba/tests/sddl_conditional_ace.py
@@ -0,0 +1,52 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Volker Lendecke <vl@samba.org> 2021
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for SDDL conditional ACES."""
+
+from .sddl import SddlDecodeEncodeBase
+from samba.tests import DynamicTestCase
+from pathlib import Path
+
+STRINGS_DIR = Path(__name__).parent.parent.parent / 'libcli/security/tests/data'
+
+@DynamicTestCase
+class SddlConditionalAces(SddlDecodeEncodeBase):
+ strings_dir = STRINGS_DIR
+ name = "conditional_aces"
+ should_succeed = True
+
+
+@DynamicTestCase
+class SddlConditionalAcesShouldFail(SddlDecodeEncodeBase):
+ strings_dir = STRINGS_DIR
+ name = "conditional_aces_should_fail"
+ should_succeed = False
+
+
+@DynamicTestCase
+class SddlConditionalAcesWindowsOnly(SddlDecodeEncodeBase):
+ strings_dir = STRINGS_DIR
+ name = "conditional_aces_windows_only"
+ should_succeed = False
+
+
+@DynamicTestCase
+class SddlConditionalAcesCaseInsensitive(SddlDecodeEncodeBase):
+ strings_dir = STRINGS_DIR
+ name = "conditional_aces_case_insensitive"
+ should_succeed = True
+ case_insensitive = True
diff --git a/python/samba/tests/security.py b/python/samba/tests/security.py
new file mode 100644
index 0000000..68df3dd
--- /dev/null
+++ b/python/samba/tests/security.py
@@ -0,0 +1,209 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for samba.dcerpc.security."""
+
+import samba.tests
+from samba.dcerpc import security
+from samba.security import access_check
+from samba import ntstatus
+from samba import NTSTATUSError
+
+
+class SecurityTokenTests(samba.tests.TestCase):
+
+ def setUp(self):
+ super().setUp()
+ self.token = security.token()
+
+ def test_is_system(self):
+ self.assertFalse(self.token.is_system())
+
+ def test_is_anonymous(self):
+ self.assertFalse(self.token.is_anonymous())
+
+ def test_has_builtin_administrators(self):
+ self.assertFalse(self.token.has_builtin_administrators())
+
+ def test_has_nt_authenticated_users(self):
+ self.assertFalse(self.token.has_nt_authenticated_users())
+
+ def test_has_priv(self):
+ self.assertFalse(self.token.has_privilege(security.SEC_PRIV_SHUTDOWN))
+
+ def test_set_priv(self):
+ self.assertFalse(self.token.has_privilege(security.SEC_PRIV_SHUTDOWN))
+ self.assertFalse(self.token.set_privilege(security.SEC_PRIV_SHUTDOWN))
+ self.assertTrue(self.token.has_privilege(security.SEC_PRIV_SHUTDOWN))
+
+
+class SecurityDescriptorTests(samba.tests.TestCase):
+
+ def setUp(self):
+ super().setUp()
+ self.descriptor = security.descriptor()
+
+ def test_from_sddl(self):
+ desc = security.descriptor.from_sddl("O:AOG:DAD:(A;;RPWPCCDCLCSWRCWDWOGA;;;S-1-0-0)",
+ security.dom_sid("S-1-2-3"))
+ self.assertEqual(desc.group_sid, security.dom_sid('S-1-2-3-512'))
+ self.assertEqual(desc.owner_sid, security.dom_sid('S-1-5-32-548'))
+ self.assertEqual(desc.revision, 1)
+ self.assertEqual(desc.sacl, None)
+ self.assertEqual(desc.type, 0x8004)
+
+ def test_from_sddl_invalidsddl(self):
+ self.assertRaises(security.SDDLValueError, security.descriptor.from_sddl, "foo",
+ security.dom_sid("S-1-2-3"))
+
+ def test_from_sddl_invalidtype1(self):
+ self.assertRaises(TypeError, security.descriptor.from_sddl, security.dom_sid('S-1-2-3-512'),
+ security.dom_sid("S-1-2-3"))
+
+ def test_from_sddl_invalidtype2(self):
+ sddl = "O:AOG:DAD:(A;;RPWPCCDCLCSWRCWDWOGA;;;S-1-0-0)"
+ self.assertRaises(TypeError, security.descriptor.from_sddl, sddl,
+ "S-1-2-3")
+
+ def test_as_sddl(self):
+ text = "O:AOG:DAD:(A;;RPWPCCDCLCSWRCWDWOGA;;;S-1-0-0)"
+ dom = security.dom_sid("S-1-2-3")
+ desc1 = security.descriptor.from_sddl(text, dom)
+ desc2 = security.descriptor.from_sddl(desc1.as_sddl(dom), dom)
+ self.assertEqual(desc1.group_sid, desc2.group_sid)
+ self.assertEqual(desc1.owner_sid, desc2.owner_sid)
+ self.assertEqual(desc1.sacl, desc2.sacl)
+ self.assertEqual(desc1.type, desc2.type)
+
+ def test_as_sddl_invalid(self):
+ text = "O:AOG:DAD:(A;;RPWPCCDCLCSWRCWDWOGA;;;S-1-0-0)"
+ dom = security.dom_sid("S-1-2-3")
+ desc1 = security.descriptor.from_sddl(text, dom)
+ self.assertRaises(TypeError, desc1.as_sddl, text)
+
+ def test_as_sddl_no_domainsid(self):
+ dom = security.dom_sid("S-1-2-3")
+ text = "O:AOG:DAD:(A;;RPWPCCDCLCSWRCWDWOGA;;;S-1-0-0)"
+ desc1 = security.descriptor.from_sddl(text, dom)
+ desc2 = security.descriptor.from_sddl(desc1.as_sddl(), dom)
+ self.assertEqual(desc1.group_sid, desc2.group_sid)
+ self.assertEqual(desc1.owner_sid, desc2.owner_sid)
+ self.assertEqual(desc1.sacl, desc2.sacl)
+ self.assertEqual(desc1.type, desc2.type)
+
+ def test_domsid_nodomsid_as_sddl(self):
+ dom = security.dom_sid("S-1-2-3")
+ text = "O:AOG:DAD:(A;;RPWPCCDCLCSWRCWDWOGA;;;S-1-0-0)"
+ desc1 = security.descriptor.from_sddl(text, dom)
+ self.assertNotEqual(desc1.as_sddl(), desc1.as_sddl(dom))
+
+ def test_split(self):
+ dom = security.dom_sid("S-1-0-7")
+ self.assertEqual((security.dom_sid("S-1-0"), 7), dom.split())
+
+
+class DomSidTests(samba.tests.TestCase):
+
+ def test_parse_sid(self):
+ sid = security.dom_sid("S-1-5-21")
+ self.assertEqual("S-1-5-21", str(sid))
+
+ def test_sid_equal(self):
+ sid1 = security.dom_sid("S-1-5-21")
+ sid2 = security.dom_sid("S-1-5-21")
+ self.assertEqual(sid1, sid1)
+ self.assertEqual(sid1, sid2)
+
+ def test_random(self):
+ sid = security.random_sid()
+ self.assertTrue(str(sid).startswith("S-1-5-21-"))
+
+ def test_repr(self):
+ sid = security.random_sid()
+ self.assertTrue(repr(sid).startswith("dom_sid('S-1-5-21-"))
+
+
+class PrivilegeTests(samba.tests.TestCase):
+
+ def test_privilege_name(self):
+ self.assertEqual("SeShutdownPrivilege",
+ security.privilege_name(security.SEC_PRIV_SHUTDOWN))
+
+ def test_privilege_id(self):
+ self.assertEqual(security.SEC_PRIV_SHUTDOWN,
+ security.privilege_id("SeShutdownPrivilege"))
+
+
+class CheckAccessTests(samba.tests.TestCase):
+
+ def test_check_access(self):
+ desc = security.descriptor.from_sddl("O:AOG:DAD:(A;;RPWPCCDCLCSWRCWDWOGA;;;S-1-0-0)",
+ security.dom_sid("S-1-2-3"))
+ token = security.token()
+
+ self.assertEqual(access_check(desc, token, 0), 0)
+
+ params = (
+ (security.SEC_FLAG_SYSTEM_SECURITY,
+ ntstatus.NT_STATUS_PRIVILEGE_NOT_HELD),
+ (security.SEC_STD_READ_CONTROL, ntstatus.NT_STATUS_ACCESS_DENIED)
+ )
+
+ for arg, num in params:
+ try:
+ result = access_check(desc, token, arg)
+ except Exception as e:
+ self.assertTrue(isinstance(e, NTSTATUSError))
+ e_num, e_msg = e.args
+ self.assertEqual(num, e_num)
+ else:
+ self.fail()
+
+
+class SecurityAceTests(samba.tests.TestCase):
+ sddl = "(OA;CIIO;RPWP;aaaaaaaa-1111-bbbb-2222-dddddddddddd;33333333-eeee-4444-ffff-555555555555;PS)"
+ sddl2 = "(OA;CIIO;RPWP;cccccccc-9999-ffff-8888-eeeeeeeeeeee;77777777-dddd-6666-bbbb-555555555555;PS)"
+ sddl3 = "(OA;CIIO;RPWP;aaaaaaaa-1111-bbbb-2222-dddddddddddd;77777777-dddd-6666-bbbb-555555555555;PS)"
+ sddl_uc = "(OA;CIIO;RPWP;AAAAAAAA-1111-BBBB-2222-DDDDDDDDDDDD;33333333-EEEE-4444-FFFF-555555555555;PS)"
+ sddl_mc = "(OA;CIIO;RPWP;AaAaAAAa-1111-BbBb-2222-DDddDDdDDDDD;33333333-EeeE-4444-FffF-555555555555;PS)"
+ sddl_sid = "(OA;CIIO;RPWP;aaaaaaaa-1111-bbbb-2222-dddddddddddd;33333333-eeee-4444-ffff-555555555555;S-1-5-10)"
+
+ def setUp(self):
+ super().setUp()
+ self.dom = security.dom_sid("S-1-2-3")
+
+ def test_equality(self):
+ ace = security.descriptor.from_sddl("D:" + self.sddl, self.dom).dacl.aces[0]
+ ace2 = security.descriptor.from_sddl("D:" + self.sddl2, self.dom).dacl.aces[0]
+ ace3 = security.descriptor.from_sddl("D:" + self.sddl3, self.dom).dacl.aces[0]
+ ace_uc = security.descriptor.from_sddl("D:" + self.sddl_uc, self.dom).dacl.aces[0]
+ ace_mc = security.descriptor.from_sddl("D:" + self.sddl_mc, self.dom).dacl.aces[0]
+ ace_sid = security.descriptor.from_sddl("D:" + self.sddl_sid, self.dom).dacl.aces[0]
+ self.assertTrue(ace == ace_uc, "Case should not matter.")
+ self.assertTrue(ace == ace_mc, "Case should not matter.")
+ self.assertTrue(ace != ace2, "Different ACEs should be unequal.")
+ self.assertTrue(ace2 != ace3, "Different ACEs should be unequal.")
+ self.assertTrue(ace == ace_sid, "Different ways of specifying SID should not matter.")
+
+ def test_as_sddl(self):
+ ace = security.descriptor.from_sddl("D:" + self.sddl, self.dom).dacl.aces[0]
+ ace_sddl = ace.as_sddl(self.dom)
+ # compare created SDDL with original one (we need to strip the parenthesis from the original
+ # since as_sddl does not create them)
+ self.assertEqual(ace_sddl, self.sddl[1:-1])
+ ace_new = security.descriptor.from_sddl("D:(" + ace_sddl + ")", self.dom).dacl.aces[0]
+ self.assertTrue(ace == ace_new, "Exporting ace as SDDl and reading back should result in same ACE.")
diff --git a/python/samba/tests/security_descriptors.py b/python/samba/tests/security_descriptors.py
new file mode 100644
index 0000000..b3dd2ca
--- /dev/null
+++ b/python/samba/tests/security_descriptors.py
@@ -0,0 +1,216 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Volker Lendecke <vl@samba.org> 2021
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""These tests compare Windows security descriptors with Samba
+descriptors derived from the same SDDL.
+
+They use json and json.gz files in libcli/security/tests/data.
+"""
+
+from samba.dcerpc import security
+from samba.ndr import ndr_pack, ndr_unpack, ndr_print
+from samba.tests import TestCase, DynamicTestCase
+from samba.colour import colourdiff
+from hashlib import md5
+import gzip
+
+import json
+from pathlib import Path
+
+TEST_DIR = Path(__name__).parent.parent.parent / 'libcli/security/tests/data'
+
+
+class SDDLvsDescriptorBase(TestCase):
+ """These tests have no explicit cases and no inline data. The actual
+ data is kept in JSON files in libcli/security/tests/data, so that
+ it easy to share those files with Windows. To control what tests
+ are run, set the `json_file` attribute in subclasses, and/or add a
+ filter_test_cases class method.
+ """
+ maxDiff = 10000
+ json_file = None
+ munge_to_v4 = True
+ domain_sid = security.dom_sid("S-1-5-21-2457507606-2709100691-398136650")
+ failure_json = None
+ success_json = None
+
+ @classmethod
+ def filter_test_cases(cls, data):
+ """Filter out some cases before running the tests.
+ Like this, for example:
+ return {k:v for k, v in data.items() if len(k) < 200 and
+ '(D;;;;;MP)(D;;;;;MP)(D;;;;;MP)' in k}
+ """
+ return data
+
+ @classmethod
+ def setUpDynamicTestCases(cls):
+ try:
+ with gzip.open(cls.json_file, 'rt') as f:
+ data = json.load(f)
+ except Exception:
+ with open(cls.json_file) as f:
+ data = json.load(f)
+
+ data = cls.filter_test_cases(data)
+ i = 0
+ for sddl, sdl in data.items():
+ i += 1
+ name = f'{i:03}-{sddl}'
+ if len(name) > 130:
+ tag = md5(sddl.encode()).hexdigest()[:10]
+ name = f"{name[:100]}+{len(name) - 100}-more-characters-{tag}"
+ cls.generate_dynamic_test('test_sddl_vs_sd', name, sddl, sdl)
+
+ if cls.failure_json:
+ cls.failures = {}
+ cls.failure_file = open(cls.failure_json, 'w')
+ cls.addClassCleanup(json.dump, cls.failures, cls.failure_file)
+ if cls.success_json:
+ cls.successes = {}
+ cls.success_file = open(cls.success_json, 'w')
+ cls.addClassCleanup(json.dump, cls.successes, cls.success_file)
+
+ def _test_sddl_vs_sd_with_args(self, sddl, sdl):
+ sdb_win = bytes(sdl)
+ try:
+ sd_sam = security.descriptor.from_sddl(sddl, self.domain_sid)
+ except (TypeError, ValueError, security.SDDLValueError) as e:
+ try:
+ sd_win = ndr_unpack(security.descriptor, sdb_win)
+ win_ndr_print = ndr_print(sd_win)
+ except RuntimeError as e2:
+ win_ndr_print = f"not parseable: {e2}"
+ if self.failure_json:
+ self.failures[sddl] = sdl
+
+ self.fail(f"failed to parse {sddl} into SD: {e}")
+
+ try:
+ sdb_sam = ndr_pack(sd_sam)
+ except RuntimeError as e:
+ if self.failure_json:
+ self.failures[sddl] = sdl
+ self.fail(f"failed to pack samba SD from {sddl} into bytes: {e}\n"
+ f"{ndr_print(sd_sam)}")
+
+ try:
+ sd_win = ndr_unpack(security.descriptor, sdb_win)
+ except RuntimeError as e:
+ if self.failure_json:
+ self.failures[sddl] = sdl
+ self.fail(f"could not unpack windows descriptor for {sddl}: {e}")
+
+ if self.munge_to_v4:
+ # Force the ACL revisions to match Samba. Windows seems to
+ # use the lowest possible revision, while Samba uses
+ # ACL_REVISION_DS when generating from SDDL. The _DS
+ # version allows more ACE types, but is otherwise the same.
+ #
+ # MS-DTYP 2.4.5 ACL:
+ #
+ # ACL_REVISION 0x02
+ #
+ # When set to 0x02, only AceTypes 0x00, 0x01,
+ # 0x02, 0x03, 0x11, 0x12, and 0x13 can be present in the ACL.
+ # An AceType of 0x11 is used for SACLs but not for DACLs. For
+ # more information about ACE types, see section 2.4.4.1.
+ #
+ # ACL_REVISION_DS 0x04
+ #
+ # When set to 0x04, AceTypes 0x05, 0x06, 0x07, 0x08, and 0x11
+ # are allowed. ACLs of revision 0x04 are applicable only to
+ # directory service objects. An AceType of 0x11 is used for
+ # SACLs but not for DACLs.
+ #
+ # 5, 6, 7, 8 are object ACES.
+ if sd_win.dacl:
+ sd_win.dacl.revision = 4
+ if sd_win.sacl:
+ sd_win.sacl.revision = 4
+
+ if (sd_win != sd_sam):
+ if self.failure_json:
+ self.failures[sddl] = sdl
+ self.fail(f"Descriptors differ for {sddl}")
+
+ if self.success_json:
+ self.successes[sddl] = sdl
+
+
+@DynamicTestCase
+class SDDLvsDescriptorShortOrdinaryAcls(SDDLvsDescriptorBase):
+ """These are not conditional ACEs or resource attribute aces, the SDDL
+ is less than 1000 characters long, and success is expected.
+ """
+ json_file = TEST_DIR / 'short-ordinary-acls.json.gz'
+
+
+@DynamicTestCase
+class SDDLvsDescriptorRegistryObjectRights(SDDLvsDescriptorBase):
+ """We'll fail these because we don't recognise 'KA' and related object
+ rights strings that are used for registry objects."""
+ json_file = TEST_DIR / 'registry-object-rights.json'
+
+
+@DynamicTestCase
+class SDDLvsDescriptorOverSizeAcls(SDDLvsDescriptorBase):
+ """These are ordinary ACLs that contain duplicate ACEs (e.g.
+ 'D:P(D;;;;;MP)(D;;;;;MP)(D;;;;;MP)(D;;;;;MP)'). Due to a
+ peculiarity in Windows, the ACL structures generated have extra
+ trailing zero bytes. Due to a peculiarity in the way Samba reads
+ an ACL (namely, it assumes an ACL will be just big enough for its
+ ACEs), these cannot currently be parsed by Samba.
+ """
+ json_file = TEST_DIR / 'oversize-acls.json'
+
+
+@DynamicTestCase
+class SDDLvsDescriptorShortConditionalAndResourceAceSuccesses(SDDLvsDescriptorBase):
+ """These contain conditional ACEs or resource attribute aces, the SDDL
+ is less than 1000 characters long, and success is expected.
+ """
+ json_file = TEST_DIR / 'short-conditional-and-resource-aces-successes.json.gz'
+
+
+@DynamicTestCase
+class SDDLvsDescriptorShortConditionalAndResourceAcesTxIntegers(SDDLvsDescriptorBase):
+ """These contain resource attribute aces in the form
+
+ (RA;;;;;WD;("foo",TX,0x0,0077,00,...))
+
+ where the numbers after the 0x0 flags like "0077" are interpreted
+ by Windows as if they are octet strings. This is not documented
+ and not supported by Samba.
+ """
+ json_file = TEST_DIR / 'short-conditional-and-resource-aces-tx-int.json.gz'
+
+
+@DynamicTestCase
+class SDDLvsDescriptorShortOrdinaryAclsNoMungeV4(SDDLvsDescriptorBase):
+ """These ones have revision 2 ACLs (NT4), but Samba's SDDL only writes
+ revision 4 ACLs (which are otherwise identical).
+ """
+ munge_to_v4 = False
+ json_file = TEST_DIR / 'short-ordinary-acls-v2.json.gz'
+
+
+@DynamicTestCase
+class SDDLvsDescriptorCollectedConditionalAces(SDDLvsDescriptorBase):
+ """Some conditional ACE strings that have collected up.
+ """
+ json_file = TEST_DIR / 'conditional_aces.txt.json'
diff --git a/python/samba/tests/segfault.py b/python/samba/tests/segfault.py
new file mode 100644
index 0000000..faf858e
--- /dev/null
+++ b/python/samba/tests/segfault.py
@@ -0,0 +1,243 @@
+# Unix SMB/CIFS implementation.
+#
+# Copyright (C) Catalyst.Net Ltd. 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Test whether various python calls segfault when given unexpected input.
+"""
+
+import samba.tests
+import os
+import sys
+from samba.net import Net
+from samba.credentials import DONT_USE_KERBEROS
+from samba.dcerpc import misc, drsuapi, samr, unixinfo, dnsserver
+from samba import auth, gensec
+from samba.samdb import SamDB
+from samba import netbios
+from samba import registry
+from samba import ldb
+from samba import messaging
+
+import traceback
+
+
+def segfault_detector(f):
+ def wrapper(*args, **kwargs):
+ pid = os.fork()
+ if pid == 0:
+ try:
+ f(*args, **kwargs)
+ except Exception as e:
+ traceback.print_exc()
+ sys.stderr.flush()
+ sys.stdout.flush()
+ os._exit(0)
+
+ pid2, status = os.waitpid(pid, 0)
+ if os.WIFSIGNALED(status):
+ signal = os.WTERMSIG(status)
+ raise AssertionError("Failed with signal %d" % signal)
+
+ return wrapper
+
+
+def no_gdb_backtrace(f):
+ from os import environ
+ def w(*args, **kwargs):
+ environ['PLEASE_NO_GDB_BACKTRACE'] = '1'
+ f(*args, **kwargs)
+ del environ['PLEASE_NO_GDB_BACKTRACE']
+
+ return w
+
+
+class SegfaultTests(samba.tests.TestCase):
+ def get_lp_et_al(self):
+ server = os.environ["SERVER"]
+ lp = self.get_loadparm()
+
+ creds = self.insta_creds(template=self.get_credentials(),
+ kerberos_state=DONT_USE_KERBEROS)
+ return lp, creds, server
+
+ def get_samdb(self):
+ lp, creds, server = self.get_lp_et_al()
+ url = 'ldap://' + server
+ ldb = SamDB(url, credentials=creds, lp=lp)
+ return ldb
+
+ @segfault_detector
+ def test_net_replicate_init__1(self):
+ lp, creds, server = self.get_lp_et_al()
+ net = Net(creds, lp, server=server)
+ net.replicate_init(42, lp, None, misc.GUID())
+
+ @no_gdb_backtrace
+ @segfault_detector
+ def test_net_replicate_init__3(self):
+ # third argument is also unchecked
+ samdb = self.get_samdb()
+ lp, creds, server = self.get_lp_et_al()
+ net = Net(creds, lp, server=server)
+ net.replicate_init(samdb, lp, 42, misc.GUID())
+
+ @segfault_detector
+ def test_net_replicate_chunk_1(self):
+ lp, creds, server = self.get_lp_et_al()
+ ctr = drsuapi.DsGetNCChangesCtr6()
+ net = Net(creds, lp, server=server)
+ net.replicate_chunk(42, 1, ctr)
+
+ @segfault_detector
+ def test_auth_context_gensec_start_server(self):
+ a = auth.AuthContext(ldb=42, methods=['sam'])
+ # there is no failure yet because the ldb is not actually
+ # dereferenced.
+ g = gensec.Security.start_server(auth_context=a)
+ # and still the ldb is not dereferenced...
+
+ @segfault_detector
+ def test_auth_user_session(self):
+ s = auth.user_session(ldb=42, principal='foo')
+
+ @segfault_detector
+ def test_gensec_start_server(self):
+ gensec.Security.start_server(auth_context=42)
+
+ @segfault_detector
+ def test_netbios_query_name(self):
+ n = netbios.Node()
+ t = n.query_name((42, 'foo'), 'localhost')
+
+ @segfault_detector
+ def test_encrypt_netr_crypt_password(self):
+ lp, creds, server = self.get_lp_et_al()
+ creds.encrypt_netr_crypt_password(42)
+
+ @segfault_detector
+ def test_hive_open_ldb(self):
+ # we don't need to provide a valid path because we segfault first
+ try:
+ registry.open_ldb('', credentials=42)
+ except ldb.LdbError as e:
+ print("failed with %s" % e)
+
+ @segfault_detector
+ def test_hive_open_hive(self):
+ # we don't need to provide a valid path because we segfault first
+ try:
+ registry.open_hive('s', 's', 's', 's')
+ except ldb.LdbError as e:
+ print("failed with %s" % e)
+
+ @segfault_detector
+ def test_ldb_add_nameless_element(self):
+ m = ldb.Message()
+ e = ldb.MessageElement('q')
+ try:
+ m.add(e)
+ except ldb.LdbError:
+ pass
+ str(m)
+
+ @segfault_detector
+ def test_ldb_register_module(self):
+ ldb.register_module('')
+
+ @segfault_detector
+ def test_messaging_deregister(self):
+ messaging.deregister('s', 's', 's', False)
+
+ @segfault_detector
+ def test_rpcecho(self):
+ from samba.dcerpc import echo
+ echo.rpcecho("")
+
+ @segfault_detector
+ def test_dcerpc_idl_ref_elements(self):
+ """There are many pidl generated functions that crashed on this
+ pattern, where a NULL pointer was created rather than an empty
+ structure."""
+ samr.Connect5().out_info_out = 1
+
+ @segfault_detector
+ def test_dcerpc_idl_unixinfo_elements(self):
+ """Dereferencing is sufficient to crash"""
+ unixinfo.GetPWUid().out_infos
+
+ @segfault_detector
+ def test_dcerpc_idl_inline_arrays(self):
+ """Inline arrays were incorrectly handled."""
+ dnsserver.DNS_RPC_SERVER_INFO_DOTNET().pExtensions
+
+ @segfault_detector
+ def test_dcerpc_idl_set_inline_arrays(self):
+ """Setting an inline array was incorrectly handled."""
+ a = dnsserver.DNS_EXTENSION()
+ x = dnsserver.DNS_RPC_DP_INFO()
+ x.pwszReserved = [a, a, a]
+
+ @no_gdb_backtrace
+ @segfault_detector
+ def test_dnsp_string_list(self):
+ from samba.dcerpc import dnsp
+ # We segfault if s.count is greater than the length of s.str
+ s = dnsp.string_list()
+ s.count = 3
+ s.str
+
+ @no_gdb_backtrace
+ @segfault_detector
+ def test_dns_record(self):
+ from samba.dnsserver import TXTRecord
+ from samba.dcerpc import dnsp
+ # there are many others here
+ rec = TXTRecord(["a", "b", "c"])
+ rec.wType = dnsp.DNS_TYPE_A
+ rec.data
+
+ @no_gdb_backtrace
+ @segfault_detector
+ def test_ldb_msg_diff(self):
+ samdb = self.get_samdb()
+
+ msg = ldb.Message()
+ msg.dn = ldb.Dn(samdb, '')
+ diff = samdb.msg_diff(msg, msg)
+
+ del msg
+ diff.dn
+
+ @no_gdb_backtrace
+ @segfault_detector
+ def test_ldb_msg_del_dn(self):
+ msg = ldb.Message()
+ del msg.dn
+
+ @no_gdb_backtrace
+ @segfault_detector
+ def test_ldb_control_del_critical(self):
+ samdb = self.get_samdb()
+
+ c = ldb.Control(samdb, 'relax:1')
+ del c.critical
+
+ @segfault_detector
+ def test_random_bytes(self):
+ # memory error from SIZE_MAX -1 allocation.
+ from samba import generate_random_bytes
+ generate_random_bytes(-1)
diff --git a/python/samba/tests/sid_strings.py b/python/samba/tests/sid_strings.py
new file mode 100644
index 0000000..108351c
--- /dev/null
+++ b/python/samba/tests/sid_strings.py
@@ -0,0 +1,608 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Catalyst.NET Ltd 2022
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import os
+import string
+import sys
+import time
+from hashlib import blake2b
+
+import ldb
+
+from samba import param
+
+from samba.auth import system_session
+from samba.credentials import Credentials
+from samba.dcerpc import security
+from samba.ndr import ndr_unpack
+from samba.samdb import SamDB
+from samba.tests import (
+ DynamicTestCase,
+ TestCase,
+ delete_force,
+ env_get_var_value,
+)
+
+sys.path.insert(0, 'bin/python')
+os.environ['PYTHONUNBUFFERED'] = '1'
+
+
+late_ERR_CONSTRAINT_VIOLATION = b"a hack to allow Windows to sometimes fail late"
+
+
+class SidStringBase(TestCase):
+ @classmethod
+ def setUpDynamicTestCases(cls):
+ if not hasattr(cls, 'skip_local'):
+ cls.skip_local = env_get_var_value('SAMBA_SID_STRINGS_SKIP_LOCAL',
+ allow_missing=True)
+
+ if env_get_var_value('CHECK_ALL_COMBINATIONS',
+ allow_missing=True):
+ for x in string.ascii_uppercase:
+ for y in string.ascii_uppercase:
+ code = x + y
+ if code not in cls.cases:
+ cls.cases[code] = None
+
+ for code, expected_sid in cls.cases.items():
+ name = code
+
+ cls.generate_dynamic_test('test_sid_string', name,
+ code, expected_sid)
+ if not cls.skip_local:
+ cls.generate_dynamic_test('test_sid_string_internal', name,
+ code, expected_sid)
+
+ @classmethod
+ def setUpClass(cls):
+ super().setUpClass()
+
+ server = os.environ['DC_SERVER']
+ host = f'ldap://{server}'
+
+ lp = param.LoadParm()
+ lp.load(os.environ['SMB_CONF_PATH'])
+
+ creds = Credentials()
+ creds.guess(lp)
+ creds.set_username(env_get_var_value('DC_USERNAME'))
+ creds.set_password(env_get_var_value('DC_PASSWORD'))
+
+ cls.ldb = SamDB(host, credentials=creds,
+ session_info=system_session(lp), lp=lp)
+ cls.base_dn = cls.ldb.domain_dn()
+ cls.schema_dn = cls.ldb.get_schema_basedn().get_linearized()
+ cls.timestamp = str(int(time.time()))
+ cls.domain_sid = cls.ldb.get_domain_sid()
+
+ def _test_sid_string_with_args(self, code, expected_sid):
+ suffix = int(blake2b(code.encode(), digest_size=3).hexdigest(), 16)
+
+ class_name = f'my-Sid-String-Class-{self.timestamp}-{suffix}'
+ class_ldap_display_name = class_name.replace('-', '')
+
+ class_dn = f'CN={class_name},{self.schema_dn}'
+
+ governs_id = f'1.3.6.1.4.1.7165.4.6.2.9.1{self.timestamp[-7:]}.{suffix}'
+
+ # expected_sid can be a SID string, an error code, None, or a
+ # special value indicating a deferred error, as follows:
+ #
+ # * a number represents the expected error code at the *first*
+ # hurdle, creating the classSchema object.
+ #
+ # * late_ERR_CONSTRAINT_VIOLATION means an error when
+ # creating an object based on the class schema.
+ #
+ # * None means a somewhat unspecified error or failure to set
+ # the object owner sid.
+ #
+ # * a string is the expected owner sid. The rid is borrowed
+ # * and tacked onto the governs-id.
+
+ if expected_sid is None:
+ expected_err = ldb.ERR_UNWILLING_TO_PERFORM
+ elif isinstance(expected_sid, int):
+ expected_err = expected_sid
+ elif expected_sid is late_ERR_CONSTRAINT_VIOLATION:
+ expected_err = None
+ else:
+ expected_err = None
+ # Append the RID to our OID to ensure more uniqueness.
+ rid = expected_sid.rsplit('-', 1)[1]
+ governs_id += f'.{rid}'
+
+ ldif = f'''
+dn: {class_dn}
+objectClass: classSchema
+cn: {class_name}
+governsId: {governs_id}
+subClassOf: top
+possSuperiors: domainDNS
+defaultSecurityDescriptor: O:{code}
+'''
+ try:
+ self.ldb.add_ldif(ldif)
+ except ldb.LdbError as err:
+ num, _ = err.args
+ self.assertEqual(num, expected_err)
+ return
+ else:
+ if isinstance(expected_sid, int):
+ self.fail("should have failed")
+
+ # Search for created objectclass
+ res = self.ldb.search(class_dn, scope=ldb.SCOPE_BASE,
+ attrs=['defaultSecurityDescriptor'])
+ self.assertEqual(1, len(res))
+ self.assertEqual(res[0].get('defaultSecurityDescriptor', idx=0),
+ f'O:{code}'.encode('utf-8'))
+
+ ldif = '''
+dn:
+changetype: modify
+add: schemaUpdateNow
+schemaUpdateNow: 1
+'''
+ self.ldb.modify_ldif(ldif)
+
+ object_name = f'sddl_{self.timestamp}_{suffix}'
+ object_dn = f'CN={object_name},{self.base_dn}'
+
+ ldif = f'''
+dn: {object_dn}
+objectClass: {class_ldap_display_name}
+cn: {object_name}
+'''
+ if expected_sid is late_ERR_CONSTRAINT_VIOLATION:
+ expected_err = ldb.ERR_CONSTRAINT_VIOLATION
+
+ try:
+ self.ldb.add_ldif(ldif)
+ except ldb.LdbError as err:
+ num, _ = err.args
+ self.assertEqual(num, expected_err)
+ return
+
+ if expected_sid is not None:
+ self.assertIsNone(expected_err)
+
+ # Search for created object
+ res = self.ldb.search(object_dn, scope=ldb.SCOPE_BASE,
+ attrs=['nTSecurityDescriptor'])
+ self.assertEqual(1, len(res))
+
+ # Delete the object
+ delete_force(self.ldb, object_dn)
+
+ data = res[0].get('nTSecurityDescriptor', idx=0)
+ descriptor = ndr_unpack(security.descriptor, data)
+
+ expected_sid = self.format_expected_sid(expected_sid)
+ owner_sid = str(descriptor.owner_sid)
+ self.assertEqual(expected_sid, owner_sid)
+
+ def format_expected_sid(self, expected_sid):
+ if expected_sid is None:
+ return f'{self.domain_sid}-{security.DOMAIN_RID_ADMINS}'
+
+ if not isinstance(expected_sid, str):
+ # never going to match, should have failed already
+ return None
+
+ return expected_sid.format(domain_sid=self.domain_sid)
+
+ def _test_sid_string_internal_with_args(self, code, expected_sid):
+ """We just want to test the SIDs, which Samba can't really do because
+ it doesn't parse them until creating an object using the
+ schema class, at which time it doesn't distinguish between a
+ missing value and a nonsense value.
+
+ So let's also run the test using libcli/security/sddl.c and
+ see what we *would* have done.
+ """
+ sddl = f"O:{code}"
+ domsid = security.dom_sid(self.domain_sid)
+
+ try:
+ sd = security.descriptor.from_sddl(sddl, domsid)
+ except (ValueError, security.SDDLValueError):
+ # we don't have detail as to what went wrong
+ self.assertNotIsInstance(expected_sid, str)
+ else:
+ expected_sid = self.format_expected_sid(expected_sid)
+ self.assertEqual(expected_sid, str(sd.owner_sid))
+
+
+@DynamicTestCase
+class SidStringTests(SidStringBase):
+ """Testing two letter aliases."""
+ cases = {
+ 'AA': 'S-1-5-32-579',
+ 'AC': 'S-1-15-2-1',
+ 'AN': 'S-1-5-7',
+ 'AO': 'S-1-5-32-548',
+ 'AP': '{domain_sid}-525',
+ 'AS': 'S-1-18-1',
+ 'AU': 'S-1-5-11',
+ 'BA': 'S-1-5-32-544',
+ 'BG': 'S-1-5-32-546',
+ 'BO': 'S-1-5-32-551',
+ 'BU': 'S-1-5-32-545',
+ 'CA': '{domain_sid}-517',
+ 'CD': 'S-1-5-32-574',
+ 'CG': 'S-1-3-1',
+ 'CN': '{domain_sid}-522',
+ 'CO': 'S-1-3-0',
+ 'CY': 'S-1-5-32-569',
+ 'DC': '{domain_sid}-515',
+ 'DD': '{domain_sid}-516',
+ 'DG': '{domain_sid}-514',
+ 'DU': '{domain_sid}-513',
+ 'EA': '{domain_sid}-519',
+ 'ED': 'S-1-5-9',
+ 'EK': '{domain_sid}-527',
+ 'ER': 'S-1-5-32-573',
+ 'ES': 'S-1-5-32-576',
+ 'HA': 'S-1-5-32-578',
+ 'HI': 'S-1-16-12288',
+ 'IS': 'S-1-5-32-568',
+ 'IU': 'S-1-5-4',
+ 'KA': '{domain_sid}-526',
+ 'LA': '{domain_sid}-500',
+ 'LG': '{domain_sid}-501',
+ 'LS': 'S-1-5-19',
+ 'LU': 'S-1-5-32-559',
+ 'LW': 'S-1-16-4096',
+ 'ME': 'S-1-16-8192',
+ 'MP': 'S-1-16-8448',
+ 'MS': 'S-1-5-32-577',
+ 'MU': 'S-1-5-32-558',
+ 'NO': 'S-1-5-32-556',
+ 'NS': 'S-1-5-20',
+ 'NU': 'S-1-5-2',
+ 'OW': 'S-1-3-4',
+ 'PA': '{domain_sid}-520',
+ 'PO': 'S-1-5-32-550',
+ 'PS': 'S-1-5-10',
+ 'PU': 'S-1-5-32-547',
+ 'RA': 'S-1-5-32-575',
+ 'RC': 'S-1-5-12',
+ 'RD': 'S-1-5-32-555',
+ 'RE': 'S-1-5-32-552',
+ 'RM': 'S-1-5-32-580',
+ 'RO': '{domain_sid}-498',
+ 'RS': '{domain_sid}-553',
+ 'RU': 'S-1-5-32-554',
+ 'SA': '{domain_sid}-518',
+ 'SI': 'S-1-16-16384',
+ 'SO': 'S-1-5-32-549',
+ 'SS': 'S-1-18-2',
+ 'SU': 'S-1-5-6',
+ 'SY': 'S-1-5-18',
+ # Not tested, as it always gives us an OPERATIONS_ERROR with Windows.
+ # 'UD': 'S-1-5-84-0-0-0-0-0',
+ 'WD': 'S-1-1-0',
+ 'WR': 'S-1-5-33',
+ 'aa': 'S-1-5-32-579',
+ 'Aa': 'S-1-5-32-579',
+ 'aA': 'S-1-5-32-579',
+ 'BR': None,
+ 'IF': None,
+ 'LK': None,
+ }
+
+
+@DynamicTestCase
+class SidStringsThatStartWithS(SidStringBase):
+ """Testing normal or normal-adjacent SIDs"""
+ cases = {
+ # testing explicit string to string round trips.
+ 'S-1-5-32-579': 'S-1-5-32-579',
+ 'S-1-5-0x20-579': 'S-1-5-32-579', # hex
+ 'S-1-0x05-32-579': 'S-1-5-32-579',
+ 'S-1-5-040-579': 'S-1-5-40-579', # no octal
+ 'S-1-0x50000000-32-579': 'S-1-1342177280-32-579',
+ 'S-1-0x500000000-32-579': 'S-1-0x500000000-32-579',
+ 'S-1-21474836480-32-579': 'S-1-0x500000000-32-579', # >32 bit is hex
+ f'S-1-5-{(1 << 32) - 1}-579': 'S-1-5-4294967295-579',
+ f'S-1-{(1 << 48) - 1}-579': 'S-1-0xffffffffffff-579',
+ f'S-1-{(1 << 48)}-579': ldb.ERR_UNWILLING_TO_PERFORM,
+ 'S-1-99999999999999999999999999999999999999-32-11111111111': ldb.ERR_UNWILLING_TO_PERFORM,
+ 'S-1-5-0-579': 'S-1-5-0-579',
+ 'S-1-0-0-579': 'S-1-0-0-579',
+ 'S-1-0x5-0x20-0x243': 'S-1-5-32-579',
+ 'S-1-5-32--579': ldb.ERR_UNWILLING_TO_PERFORM,
+ 'S-1-5-32- 579': ldb.ERR_UNWILLING_TO_PERFORM,
+ 'S-1-5-32 -579': ldb.ERR_UNWILLING_TO_PERFORM,
+ 'S-1-5-3 2-579': ldb.ERR_UNWILLING_TO_PERFORM,
+ ' S-1-1-1-1-1-1-1': ldb.ERR_UNWILLING_TO_PERFORM,
+ # go to lower case in hex.
+ 'S-1-0xABcDef123-0xABCDef-579': 'S-1-0xabcdef123-11259375-579',
+ 'S-1-1-1-1-1-1-1': 'S-1-1-1-1-1-1-1',
+ 's-1-5-32-579': 'S-1-5-32-579',
+ 'S-01-5-32-579': 'S-1-5-32-579',
+ 'S-000000001-5-32-579': 'S-1-5-32-579',
+ # some strings from https://bugzilla.samba.org/show_bug.cgi?id=14213
+ 'S-1-0': ldb.ERR_UNWILLING_TO_PERFORM,
+ 'S-1-22': ldb.ERR_UNWILLING_TO_PERFORM,
+ 'S-1-22-1': 'S-1-22-1',
+ 'S-1-22-1-0': 'S-1-22-1-0',
+ 'S-1-3-0': 'S-1-3-0',
+ 'S-1-3-99': 'S-1-3-99',
+ 'S-01-05-020-0243': 'S-1-5-20-243',
+ 'S-000000000001-5-20-243': 'S-1-5-20-243',
+ 'S-1-000000000000000005-20-243': 'S-1-5-20-243',
+ 'S-1-5-20-00000000000243': 'S-1-5-20-243',
+ }
+
+
+@DynamicTestCase
+class SidStringBehavioursThatWindowsAllows(SidStringBase):
+ """Windows interpretations that we probably don't want to follow"""
+ cases = {
+ # saturating sub-auth values at 32 bits
+ 'S-1-5-9999999999-579': 'S-1-5-4294967295-579',
+ 'S-1-0x500000000-0x500000000-579': 'S-1-0x500000000-4294967295-579',
+ 'S-1-5-11111111111111111111111111111111111-579': 'S-1-5-4294967295-579',
+ f'S-1-5-{(1 << 64) - 1}-579': 'S-1-5-4294967295-579',
+ f'S-1-5-{1 << 64}-579': 'S-1-5-4294967295-579',
+ # S-0x1- ?! on Windows this makes everything else a hex number.
+ 'S-0x1-5-40-579': 'S-1-5-64-1401',
+ 'S-0x1-0-0-579': 'S-1-0-0-1401',
+ 'S-0x1-500000000-20-243': 'S-1-0x500000000-32-579',
+ 'S-0x1-5-20-243': 'S-1-5-32-579',
+ 'S-0x1-0x5-020-0243': 'S-1-5-32-579',
+ 'S-1-0xABcDef123-0xABCDef123-579': 'S-1-0xabcdef123-4294967295-579',
+
+ 'S-0-5-32-579': late_ERR_CONSTRAINT_VIOLATION,
+ 'S-2-5-32-579': late_ERR_CONSTRAINT_VIOLATION,
+ 'S-10-5-32-579': late_ERR_CONSTRAINT_VIOLATION,
+ }
+
+
+@DynamicTestCase
+class SidStringBehavioursThatSambaPrefers(SidStringBase):
+ """Aspirational alternative answers to the
+ SidStringBehavioursThatWindowsAllows cases."""
+ cases = {
+ 'S-1-5-9999999999-579': ldb.ERR_UNWILLING_TO_PERFORM,
+ 'S-1-0x500000000-0x500000000-579': ldb.ERR_UNWILLING_TO_PERFORM,
+ 'S-1-5-11111111111111111111111111111111111-579': ldb.ERR_UNWILLING_TO_PERFORM,
+ f'S-1-5-{(1 << 64) - 1}-579': ldb.ERR_UNWILLING_TO_PERFORM,
+ f'S-1-5-{1 << 64}-579': ldb.ERR_UNWILLING_TO_PERFORM,
+ 'S-0x1-5-40-579': ldb.ERR_UNWILLING_TO_PERFORM,
+ 'S-0x1-0-0-579': ldb.ERR_UNWILLING_TO_PERFORM,
+ 'S-0x1-500000000-20-243': ldb.ERR_UNWILLING_TO_PERFORM,
+ 'S-0x1-5-20-243': ldb.ERR_UNWILLING_TO_PERFORM,
+ 'S-0x1-0x5-020-0243': ldb.ERR_UNWILLING_TO_PERFORM,
+ 'S-1-0xABcDef123-0xABCDef123-579': ldb.ERR_UNWILLING_TO_PERFORM,
+
+ 'S-0-5-32-579': ldb.ERR_UNWILLING_TO_PERFORM,
+ 'S-2-5-32-579': ldb.ERR_UNWILLING_TO_PERFORM,
+ 'S-10-5-32-579': ldb.ERR_UNWILLING_TO_PERFORM,
+ }
+
+
+@DynamicTestCase
+class SidStringsAsDnInSearchBase(SidStringBase):
+ """How does a bad <SID=x> dn work as a search base, if at all?
+
+ This suggests that Windows does the SID parsing
+ (INVALID_DN_SYNTAX) before starting the search (NO_SUCH_OBJECT).
+
+ Currently Samba does not.
+ """
+ skip_local = True
+ cases = {' S-1-1-1-1-1-1-1': ldb.ERR_INVALID_DN_SYNTAX,
+ 'S-0-5-32-579': ldb.ERR_INVALID_DN_SYNTAX,
+ 'S-000000000001-5-20-243': ldb.ERR_INVALID_DN_SYNTAX,
+ 'S-000000001-5-32-579': ldb.ERR_INVALID_DN_SYNTAX,
+ 'S-01-05-020-0243': ldb.ERR_NO_SUCH_OBJECT,
+ 'S-01-5-32-11579': ldb.ERR_NO_SUCH_OBJECT,
+ 'S-0x1-0-0-579': ldb.ERR_INVALID_DN_SYNTAX,
+ 'S-0x1-0x5-020-0243': ldb.ERR_INVALID_DN_SYNTAX,
+ 'S-0x1-5-20-243': ldb.ERR_INVALID_DN_SYNTAX,
+ 'S-0x1-5-40-579': ldb.ERR_INVALID_DN_SYNTAX,
+ 'S-0x1-500000000-20-243': ldb.ERR_INVALID_DN_SYNTAX,
+ 'S-1-0': ldb.ERR_NO_SUCH_OBJECT,
+ 'S-1-0-0-579': ldb.ERR_NO_SUCH_OBJECT,
+ 'S-1-0x05-32-11579': ldb.ERR_NO_SUCH_OBJECT,
+ 'S-1-0x5-0x20-0x243': ldb.ERR_NO_SUCH_OBJECT,
+ 'S-1-0x50000000-32-579': ldb.ERR_NO_SUCH_OBJECT,
+ 'S-1-0x500000000-0x500000000-579': ldb.ERR_NO_SUCH_OBJECT,
+ 'S-1-0x500000000-32-579': ldb.ERR_NO_SUCH_OBJECT,
+ 'S-1-0xABcDef123-0xABCDef123-579': ldb.ERR_NO_SUCH_OBJECT,
+ 'S-1-1-1-1-1-1-1': ldb.ERR_NO_SUCH_OBJECT,
+ 'S-1-21474836480-32-579': ldb.ERR_NO_SUCH_OBJECT,
+ 'S-1-22': ldb.ERR_NO_SUCH_OBJECT,
+ 'S-1-22-1': ldb.ERR_NO_SUCH_OBJECT,
+ 'S-1-22-1-0': ldb.ERR_NO_SUCH_OBJECT,
+ 'S-1-281474976710655-579': ldb.ERR_NO_SUCH_OBJECT,
+ 'S-1-281474976710656-579': ldb.ERR_INVALID_DN_SYNTAX,
+ 'S-1-3-0': ldb.ERR_NO_SUCH_OBJECT,
+ 'S-1-3-99': ldb.ERR_NO_SUCH_OBJECT,
+ 'S-1-5-0-579': ldb.ERR_NO_SUCH_OBJECT,
+ 'S-1-5-040-579': ldb.ERR_NO_SUCH_OBJECT,
+ 'S-1-5-0x20-579': ldb.ERR_NO_SUCH_OBJECT,
+ 'S-1-5-11111111111111111111111111111111111-579': ldb.ERR_INVALID_DN_SYNTAX,
+ 'S-1-5-18446744073709551615-579': ldb.ERR_INVALID_DN_SYNTAX,
+ 'S-1-5-18446744073709551616-579': ldb.ERR_INVALID_DN_SYNTAX,
+ 'S-1-5-3 2-579': ldb.ERR_NO_SUCH_OBJECT,
+ 'S-1-5-32 -11111579': None,
+ 'S-1-5-32- 579': ldb.ERR_INVALID_DN_SYNTAX,
+ 'S-1-5-32--579': ldb.ERR_INVALID_DN_SYNTAX,
+ 'S-1-5-32-11579': ldb.ERR_NO_SUCH_OBJECT,
+ 'S-1-5-4294967295-579': ldb.ERR_NO_SUCH_OBJECT,
+ 'S-1-5-9999999999-579': ldb.ERR_INVALID_DN_SYNTAX,
+ 'S-1-99999999999999999999999999999999999999-32-11111111111': ldb.ERR_INVALID_DN_SYNTAX,
+ 'S-10-5-32-579': ldb.ERR_INVALID_DN_SYNTAX,
+ 'S-2-5-32-579': ldb.ERR_INVALID_DN_SYNTAX,
+ 's-1-5-32-579': ldb.ERR_INVALID_DN_SYNTAX,
+ 'AA': ldb.ERR_INVALID_DN_SYNTAX,
+ }
+
+ def _test_sid_string_with_args(self, code, expected):
+ try:
+ self.ldb.search(base=f"<SID={code}>",
+ scope=ldb.SCOPE_BASE,
+ attrs=[])
+ except ldb.LdbError as e:
+ self.assertEqual(e.args[0], expected)
+ else:
+ self.assertIsNone(expected)
+
+
+@DynamicTestCase
+class SidStringsAsDnSearchWithDnObject(SidStringBase):
+ """How does a bad <SID=x> dn work as a search base, if at all?
+
+ This time we parse the DN in ldb first.
+ """
+ skip_local = True
+ cases = {' S-1-1-1-1-1-1-1': ('parse error', None),
+ 'S-0-5-32-579': (None, ldb.ERR_INVALID_DN_SYNTAX),
+ 'S-000000000001-5-20-243': ('parse error', None),
+ 'S-000000001-5-32-579': ('parse error', None),
+ 'S-01-05-020-0243': (None, ldb.ERR_NO_SUCH_OBJECT),
+ 'S-0x1-0-0-579': ('parse error', None),
+ 'S-0x1-0x5-020-0243': ('parse error', None),
+ 'S-0x1-5-20-243': ('parse error', None),
+ 'S-0x1-5-40-579': ('parse error', None),
+ 'S-0x1-500000000-20-243': ('parse error', None),
+ 'S-1-0': (None, ldb.ERR_NO_SUCH_OBJECT),
+ 'S-1-0-0-579': (None, ldb.ERR_NO_SUCH_OBJECT),
+ 'S-1-0x05-32-579': (None, None),
+ 'S-1-0x5-0x20-0x243': (None, ldb.ERR_NO_SUCH_OBJECT),
+ 'S-1-0x50000000-32-579': (None, ldb.ERR_NO_SUCH_OBJECT),
+ 'S-1-0x500000000-0x500000000-579': (None, ldb.ERR_NO_SUCH_OBJECT),
+ 'S-1-0x500000000-32-579': (None, ldb.ERR_NO_SUCH_OBJECT),
+ 'S-1-0xABcDef123-0xABCDef123-579': (None, ldb.ERR_NO_SUCH_OBJECT),
+ 'S-1-1-1-1-1-1-1': (None, ldb.ERR_NO_SUCH_OBJECT),
+ 'S-1-21474836480-32-579': (None, ldb.ERR_NO_SUCH_OBJECT),
+ 'S-1-22': (None, ldb.ERR_NO_SUCH_OBJECT),
+ 'S-1-22-1': (None, ldb.ERR_NO_SUCH_OBJECT),
+ 'S-1-22-1-0': (None, ldb.ERR_NO_SUCH_OBJECT),
+ 'S-1-281474976710655-579': (None, ldb.ERR_NO_SUCH_OBJECT),
+ 'S-1-281474976710656-579': ('parse error', None),
+ 'S-1-3-0': (None, ldb.ERR_NO_SUCH_OBJECT),
+ 'S-1-3-99': (None, ldb.ERR_NO_SUCH_OBJECT),
+ 'S-1-5-0-579': (None, ldb.ERR_NO_SUCH_OBJECT),
+ 'S-1-5-040-579': (None, ldb.ERR_NO_SUCH_OBJECT),
+ 'S-1-5-0x20-579': (None, ldb.ERR_NO_SUCH_OBJECT),
+ 'S-1-5-11111111111111111111111111111111111-579': ('parse error', None),
+ 'S-1-5-18446744073709551615-579': ('parse error', None),
+ 'S-1-5-18446744073709551616-579': ('parse error', None),
+ 'S-1-5-3 2-579': (None, ldb.ERR_NO_SUCH_OBJECT),
+ 'S-1-5-32- 579': ('parse error', None),
+ 'S-1-5-32--579': ('parse error', None),
+ 'S-1-5-4294967295-579': (None, ldb.ERR_NO_SUCH_OBJECT),
+ 'S-1-5-9999999999-579': ('parse error', None),
+ 'S-1-99999999999999999999999999999999999999-32-11111111111': ('parse error',
+ None),
+ 'S-10-5-32-579': (None, ldb.ERR_INVALID_DN_SYNTAX),
+ 'S-2-5-32-579': (None, ldb.ERR_INVALID_DN_SYNTAX),
+ 's-1-5-32-579': ('parse error', None),
+ }
+
+ def _test_sid_string_with_args(self, code, expected):
+ dn_err, search_err = expected
+ dn_str = f"<SID={code}>"
+ try:
+ dn = ldb.Dn(self.ldb, dn_str)
+ except ValueError:
+ self.assertEqual(dn_err, 'parse error')
+ return
+ except ldb.LdbError as e:
+ self.assertEqual(dn_err, e.args[0])
+ return
+
+ self.assertIsNone(dn_err)
+
+ try:
+ self.ldb.search(dn, scope=ldb.SCOPE_BASE, attrs=['*'])
+ except ldb.LdbError as e:
+ self.assertEqual(search_err, e.args[0])
+ return
+
+ self.assertIsNone(search_err)
+
+
+@DynamicTestCase
+class SidStringsAsDnInSearchFilter(SidStringBase):
+ """How does a bad <SID=x> dn work in a search filter?
+
+ Answer: on Windows it always works.
+ """
+ skip_local = True
+ cases = {}
+ cases.update(SidStringTests.cases)
+ cases.update(SidStringsThatStartWithS.cases)
+ cases.update(SidStringBehavioursThatSambaPrefers.cases)
+
+ def _test_sid_string_with_args(self, code, _dummy):
+ basedn = self.ldb.get_default_basedn()
+ try:
+ self.ldb.search(base=basedn,
+ scope=ldb.SCOPE_ONELEVEL,
+ expression=f"(distinguishedName=<SID={code}>)")
+ except ldb.LdbError as e:
+ self.fail(f"expected no failure, got {e}")
+
+
+@DynamicTestCase
+class SidStringsForSimpleBind(SidStringBase):
+ """Check whether dodgy SID strings work differently for simple-bind.
+
+ One of the many fallbacks for ldap simple bind is SID strings. We
+ just want to ensure that SIDs that might fail strangely in SID
+ parsing don't leak that strangeness (they don't).
+ """
+ skip_local = True
+ # here we are testing only the SID-like SIDs ("S-1-...", not "AA")
+ cases = {}
+ cases.update(SidStringsThatStartWithS.cases)
+ cases.update(SidStringBehavioursThatSambaPrefers.cases)
+
+ @classmethod
+ def setUpClass(cls):
+ super().setUpClass()
+ server = os.environ['DC_SERVER']
+ cls.lp = param.LoadParm()
+ cls.host = f'ldap://{server}'
+
+ def _test_sid_string_with_args(self, code, _dummy):
+ bind_creds = Credentials()
+ bind_creds.set_username(code)
+ bind_creds.set_password("please")
+
+ try:
+ SamDB(url=self.host,
+ lp=self.lp,
+ credentials=bind_creds)
+ self.fail(f"{code} seems to have connected properly")
+ except ldb.LdbError as e:
+ num, msg = e.args
+ self.assertIn("NT_STATUS_INVALID_PARAMETER", msg)
+
+
+if __name__ == '__main__':
+ global_asn1_print = False
+ global_hexdump = False
+ import unittest
+ unittest.main()
diff --git a/python/samba/tests/smb-notify.py b/python/samba/tests/smb-notify.py
new file mode 100755
index 0000000..4587a00
--- /dev/null
+++ b/python/samba/tests/smb-notify.py
@@ -0,0 +1,429 @@
+#!/usr/bin/env python3
+# Unix SMB/CIFS implementation. Tests for smb notify
+# Copyright (C) Björn Baumbach <bb@samba.org> 2020
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import sys
+import os
+
+sys.path.insert(0, "bin/python")
+os.environ["PYTHONUNBUFFERED"] = "1"
+
+import samba
+import random
+from samba.tests import TestCase
+from samba import credentials
+from samba.ntstatus import NT_STATUS_NOTIFY_CLEANUP
+from samba.samba3 import libsmb_samba_internal as libsmb
+from samba.samba3 import param as s3param
+from samba.dcerpc import security
+
+from samba import ntacls
+
+test_dir = os.path.join('notify_test_%d' % random.randint(0, 0xFFFF))
+
+class SMBNotifyTests(TestCase):
+ def setUp(self):
+ super().setUp()
+ self.server = samba.tests.env_get_var_value("SERVER")
+
+ # create an SMB connection to the server
+ self.lp = s3param.get_context()
+ self.lp.load(samba.tests.env_get_var_value("SMB_CONF_PATH"))
+
+ self.share = samba.tests.env_get_var_value("NOTIFY_SHARE")
+
+ creds = credentials.Credentials()
+ creds.guess(self.lp)
+ creds.set_username(samba.tests.env_get_var_value("USERNAME"))
+ creds.set_password(samba.tests.env_get_var_value("PASSWORD"))
+
+ strict_checking = samba.tests.env_get_var_value('STRICT_CHECKING', allow_missing=True)
+ if strict_checking is None:
+ strict_checking = '1'
+ self.strict_checking = bool(int(strict_checking))
+
+ self.smb_conn = libsmb.Conn(self.server, self.share, self.lp, creds)
+ self.smb_conn_unpriv = None
+
+ try:
+ self.smb_conn.deltree(test_dir)
+ except:
+ pass
+ self.smb_conn.mkdir(test_dir)
+
+ def connect_unpriv(self):
+ creds_unpriv = credentials.Credentials()
+ creds_unpriv.guess(self.lp)
+ creds_unpriv.set_username(samba.tests.env_get_var_value("USERNAME_UNPRIV"))
+ creds_unpriv.set_password(samba.tests.env_get_var_value("PASSWORD_UNPRIV"))
+
+ self.smb_conn_unpriv = libsmb.Conn(self.server, self.share, self.lp, creds_unpriv)
+
+ def tearDown(self):
+ super().tearDown()
+ try:
+ self.smb_conn.deltree(test_dir)
+ except:
+ pass
+
+ def make_path(self, dirpath, filename):
+ return os.path.join(dirpath, filename).replace('/', '\\')
+
+ def test_notify(self):
+ # setup notification request on the share root
+ root_fnum = self.smb_conn.create(Name="", ShareAccess=1)
+ root_notify = self.smb_conn.notify(fnum=root_fnum,
+ buffer_size=0xffff,
+ completion_filter=libsmb.FILE_NOTIFY_CHANGE_ALL,
+ recursive=True)
+ # setup notification request on the test_dir
+ test_dir_fnum = self.smb_conn.create(Name=test_dir, ShareAccess=1)
+ test_dir_notify = self.smb_conn.notify(fnum=test_dir_fnum,
+ buffer_size=0xffff,
+ completion_filter=libsmb.FILE_NOTIFY_CHANGE_ALL,
+ recursive=True)
+
+ # make sure we didn't receive any changes yet.
+ self.smb_conn.echo()
+ changes = root_notify.get_changes(wait=False)
+ self.assertIsNone(changes)
+ changes = test_dir_notify.get_changes(wait=False)
+ self.assertIsNone(changes)
+
+ # create a test directory
+ dir_name = "dir"
+ dir_path = self.make_path(test_dir, dir_name)
+ self.smb_conn.mkdir(dir_path)
+
+ # check for 'added' notifications
+ changes = root_notify.get_changes(wait=True)
+ self.assertIsNotNone(changes)
+ self.assertEqual(changes[0]['name'], dir_path)
+ self.assertEqual(changes[0]['action'], libsmb.NOTIFY_ACTION_ADDED)
+ self.assertEqual(len(changes), 1)
+ changes = test_dir_notify.get_changes(wait=True)
+ self.assertIsNotNone(changes)
+ self.assertEqual(changes[0]['name'], dir_name)
+ self.assertEqual(changes[0]['action'], libsmb.NOTIFY_ACTION_ADDED)
+ self.assertEqual(len(changes), 1)
+
+ # readd notification requests
+ root_notify = self.smb_conn.notify(fnum=root_fnum,
+ buffer_size=0xffff,
+ completion_filter=libsmb.FILE_NOTIFY_CHANGE_ALL,
+ recursive=True)
+ test_dir_notify = self.smb_conn.notify(fnum=test_dir_fnum,
+ buffer_size=0xffff,
+ completion_filter=libsmb.FILE_NOTIFY_CHANGE_ALL,
+ recursive=True)
+
+ # make sure we didn't receive any changes yet.
+ self.smb_conn.echo()
+ changes = root_notify.get_changes(wait=False)
+ self.assertIsNone(changes)
+ changes = test_dir_notify.get_changes(wait=False)
+ self.assertIsNone(changes)
+
+ # create subdir and trigger notifications
+ sub_name = "subdir"
+ sub_path_rel = self.make_path(dir_name, sub_name)
+ sub_path_full = self.make_path(dir_path, sub_name)
+ self.smb_conn.mkdir(sub_path_full)
+
+ # check for 'added' notifications
+ changes = root_notify.get_changes(wait=True)
+ self.assertIsNotNone(changes)
+ self.assertEqual(changes[0]['name'], sub_path_full)
+ self.assertEqual(changes[0]['action'], libsmb.NOTIFY_ACTION_ADDED)
+ self.assertEqual(len(changes), 1)
+ changes = test_dir_notify.get_changes(wait=True)
+ self.assertIsNotNone(changes)
+ self.assertEqual(changes[0]['name'], sub_path_rel)
+ self.assertEqual(changes[0]['action'], libsmb.NOTIFY_ACTION_ADDED)
+ self.assertEqual(len(changes), 1)
+
+ # readd notification requests
+ root_notify = self.smb_conn.notify(fnum=root_fnum,
+ buffer_size=0xffff,
+ completion_filter=libsmb.FILE_NOTIFY_CHANGE_ALL,
+ recursive=True)
+ test_dir_notify = self.smb_conn.notify(fnum=test_dir_fnum,
+ buffer_size=0xffff,
+ completion_filter=libsmb.FILE_NOTIFY_CHANGE_ALL,
+ recursive=True)
+
+ # make sure we didn't receive any changes yet.
+ self.smb_conn.echo()
+ changes = root_notify.get_changes(wait=False)
+ self.assertIsNone(changes)
+ changes = test_dir_notify.get_changes(wait=False)
+ self.assertIsNone(changes)
+
+ # remove test dir and trigger notifications
+ self.smb_conn.rmdir(sub_path_full)
+
+ # check for 'removed' notifications
+ changes = root_notify.get_changes(wait=True)
+ self.assertIsNotNone(changes)
+ self.assertEqual(changes[0]['name'], sub_path_full)
+ self.assertEqual(changes[0]['action'], libsmb.NOTIFY_ACTION_REMOVED)
+ self.assertEqual(len(changes), 1)
+ changes = test_dir_notify.get_changes(wait=True)
+ self.assertIsNotNone(changes)
+ self.assertEqual(changes[0]['name'], sub_path_rel)
+ self.assertEqual(changes[0]['action'], libsmb.NOTIFY_ACTION_REMOVED)
+ self.assertEqual(len(changes), 1)
+
+ # readd notification requests
+ root_notify = self.smb_conn.notify(fnum=root_fnum,
+ buffer_size=0xffff,
+ completion_filter=libsmb.FILE_NOTIFY_CHANGE_ALL,
+ recursive=True)
+ test_dir_notify = self.smb_conn.notify(fnum=test_dir_fnum,
+ buffer_size=0xffff,
+ completion_filter=libsmb.FILE_NOTIFY_CHANGE_ALL,
+ recursive=True)
+
+ # make sure we didn't receive any changes yet.
+ self.smb_conn.echo()
+ changes = root_notify.get_changes(wait=False)
+ self.assertIsNone(changes)
+ changes = test_dir_notify.get_changes(wait=False)
+ self.assertIsNone(changes)
+
+ # closing the handle on test_dir will trigger
+ # a NOTIFY_CLEANUP on test_dir_notify and
+ # it also seems to update something on test_dir it self
+ # and post a MODIFIED on root_notify
+ #
+ # TODO: find out why windows generates ACTION_MODIFIED
+ # and why Samba doesn't
+ self.smb_conn.close(test_dir_fnum)
+ try:
+ changes = test_dir_notify.get_changes(wait=True)
+ self.fail()
+ except samba.NTSTATUSError as err:
+ self.assertEqual(err.args[0], NT_STATUS_NOTIFY_CLEANUP)
+ self.smb_conn.echo()
+ changes = root_notify.get_changes(wait=False)
+ if self.strict_checking:
+ self.assertIsNotNone(changes)
+ if changes is not None:
+ self.assertEqual(changes[0]['name'], test_dir)
+ self.assertEqual(changes[0]['action'], libsmb.NOTIFY_ACTION_MODIFIED)
+ self.assertEqual(len(changes), 1)
+
+ # readd notification request
+ root_notify = self.smb_conn.notify(fnum=root_fnum,
+ buffer_size=0xffff,
+ completion_filter=libsmb.FILE_NOTIFY_CHANGE_ALL,
+ recursive=True)
+
+ # make sure we didn't receive any changes yet.
+ self.smb_conn.echo()
+ changes = root_notify.get_changes(wait=False)
+ self.assertIsNone(changes)
+
+ # remove test_dir
+ self.smb_conn.rmdir(dir_path)
+
+ # check for 'removed' notifications
+ changes = root_notify.get_changes(wait=True)
+ self.assertIsNotNone(changes)
+ self.assertEqual(changes[0]['name'], dir_path)
+ self.assertEqual(changes[0]['action'], libsmb.NOTIFY_ACTION_REMOVED)
+ self.assertEqual(len(changes), 1)
+
+ # readd notification request
+ root_notify = self.smb_conn.notify(fnum=root_fnum,
+ buffer_size=0xffff,
+ completion_filter=libsmb.FILE_NOTIFY_CHANGE_ALL,
+ recursive=True)
+ # closing the handle on test_dir will trigger
+ # a NOTIFY_CLEANUP on root_notify
+ self.smb_conn.close(root_fnum)
+ try:
+ changes = root_notify.get_changes(wait=True)
+ self.fail()
+ except samba.NTSTATUSError as err:
+ self.assertEqual(err.args[0], NT_STATUS_NOTIFY_CLEANUP)
+
+
+ def _test_notify_privileged_path(self,
+ monitor_path=None,
+ rel_prefix=None):
+ self.connect_unpriv()
+
+ domain_sid = security.dom_sid() # we just use S-0-0
+ smb_helper = ntacls.SMBHelper(self.smb_conn, domain_sid)
+
+ private_name = "private"
+ private_rel = self.make_path(rel_prefix, private_name)
+ private_path = self.make_path(test_dir, private_name)
+ # create a private test directory
+ self.smb_conn.mkdir(private_path)
+
+ # Get the security descriptor and replace it
+ # with a one that only grants access to SYSTEM and the
+ # owner.
+ private_path_sd_old = smb_helper.get_acl(private_path)
+ private_path_sd_new = security.descriptor()
+ private_path_sd_new.type = private_path_sd_old.type
+ private_path_sd_new.revision = private_path_sd_old.revision
+ private_path_sd_new = security.descriptor.from_sddl("G:BAD:(A;;0x%x;;;%s)(A;;0x%x;;;%s)" % (
+ security.SEC_RIGHTS_DIR_ALL,
+ security.SID_NT_SYSTEM,
+ security.SEC_RIGHTS_DIR_ALL,
+ str(private_path_sd_old.owner_sid)),
+ domain_sid)
+ private_path_sd_new.type |= security.SEC_DESC_SELF_RELATIVE
+ private_path_sd_new.type |= security.SEC_DESC_DACL_PROTECTED
+ set_secinfo = security.SECINFO_GROUP | security.SECINFO_DACL | security.SECINFO_PROTECTED_DACL
+ smb_helper.set_acl(private_path, private_path_sd_new, sinfo=set_secinfo)
+
+ # setup notification request as privileged user
+ monitor_priv_fnum = self.smb_conn.create(Name=monitor_path, ShareAccess=1)
+ notify_priv = self.smb_conn.notify(fnum=monitor_priv_fnum,
+ buffer_size=0xffff,
+ completion_filter=libsmb.FILE_NOTIFY_CHANGE_ALL,
+ recursive=True)
+
+ # setup notification request as unprivileged user
+ monitor_unpriv_fnum = self.smb_conn_unpriv.create(Name=monitor_path, ShareAccess=1)
+ notify_unpriv = self.smb_conn_unpriv.notify(fnum=monitor_unpriv_fnum,
+ buffer_size=0xffff,
+ completion_filter=libsmb.FILE_NOTIFY_CHANGE_ALL,
+ recursive=True)
+
+ # make sure we didn't receive any changes yet.
+ self.smb_conn.echo()
+ changes = notify_priv.get_changes(wait=False)
+ self.assertIsNone(changes)
+ self.smb_conn_unpriv.echo()
+ changes = notify_unpriv.get_changes(wait=False)
+ self.assertIsNone(changes)
+
+ # trigger notification in the private dir
+ new_name = 'test-new'
+ new_rel = self.make_path(private_rel, new_name)
+ new_path = self.make_path(private_path, new_name)
+ self.smb_conn.mkdir(new_path)
+
+ # check that only the privileged user received the changes
+ changes = notify_priv.get_changes(wait=True)
+ self.assertIsNotNone(changes)
+ self.assertEqual(changes[0]['name'], new_rel)
+ self.assertEqual(changes[0]['action'], libsmb.NOTIFY_ACTION_ADDED)
+ self.assertEqual(len(changes), 1)
+ notify_priv = self.smb_conn.notify(fnum=monitor_priv_fnum,
+ buffer_size=0xffff,
+ completion_filter=libsmb.FILE_NOTIFY_CHANGE_ALL,
+ recursive=True)
+
+ # check that the unprivileged user does not receives the changes
+ self.smb_conn_unpriv.echo()
+ changes = notify_unpriv.get_changes(wait=False)
+ self.assertIsNone(changes)
+ # and there's no additional change for the privileged user
+ self.smb_conn.echo()
+ changes = notify_priv.get_changes(wait=False)
+ self.assertIsNone(changes)
+
+ # trigger notification in the private dir
+ self.smb_conn.rmdir(new_path)
+
+ # check that only the privileged user received the changes
+ changes = notify_priv.get_changes(wait=True)
+ self.assertIsNotNone(changes)
+ self.assertEqual(changes[0]['name'], new_rel)
+ self.assertEqual(changes[0]['action'], libsmb.NOTIFY_ACTION_REMOVED)
+ self.assertEqual(len(changes), 1)
+ notify_priv = self.smb_conn.notify(fnum=monitor_priv_fnum,
+ buffer_size=0xffff,
+ completion_filter=libsmb.FILE_NOTIFY_CHANGE_ALL,
+ recursive=True)
+
+ # check that the unprivileged user does not receives the changes
+ self.smb_conn_unpriv.echo()
+ changes = notify_unpriv.get_changes(wait=False)
+ self.assertIsNone(changes)
+ # and there's no additional change for the privileged user
+ self.smb_conn.echo()
+ changes = notify_priv.get_changes(wait=False)
+ self.assertIsNone(changes)
+
+ # trigger notification for both
+ self.smb_conn.rmdir(private_path)
+
+ # check that both get the notification
+ changes = notify_unpriv.get_changes(wait=True)
+ self.assertIsNotNone(changes)
+ self.assertEqual(changes[0]['name'], private_rel)
+ self.assertEqual(changes[0]['action'], libsmb.NOTIFY_ACTION_REMOVED)
+ self.assertEqual(len(changes), 1)
+ notify_unpriv = self.smb_conn_unpriv.notify(fnum=monitor_unpriv_fnum,
+ buffer_size=0xffff,
+ completion_filter=libsmb.FILE_NOTIFY_CHANGE_ALL,
+ recursive=True)
+ changes = notify_priv.get_changes(wait=True)
+ self.assertIsNotNone(changes)
+ self.assertEqual(changes[0]['name'], private_rel)
+ self.assertEqual(changes[0]['action'], libsmb.NOTIFY_ACTION_REMOVED)
+ self.assertEqual(len(changes), 1)
+ notify_priv = self.smb_conn.notify(fnum=monitor_priv_fnum,
+ buffer_size=0xffff,
+ completion_filter=libsmb.FILE_NOTIFY_CHANGE_ALL,
+ recursive=True)
+
+ # check that the unprivileged user does not receives the changes
+ self.smb_conn_unpriv.echo()
+ changes = notify_unpriv.get_changes(wait=False)
+ self.assertIsNone(changes)
+ # and there's no additional change for the privileged user
+ self.smb_conn.echo()
+ changes = notify_priv.get_changes(wait=False)
+ self.assertIsNone(changes)
+
+ # closing the handle on will trigger a NOTIFY_CLEANUP
+ self.smb_conn_unpriv.close(monitor_unpriv_fnum)
+ try:
+ changes = notify_unpriv.get_changes(wait=True)
+ self.fail()
+ except samba.NTSTATUSError as err:
+ self.assertEqual(err.args[0], NT_STATUS_NOTIFY_CLEANUP)
+
+ # there's no additional change for the privileged user
+ self.smb_conn.echo()
+ changes = notify_priv.get_changes(wait=False)
+ self.assertIsNone(changes)
+
+ # closing the handle on will trigger a NOTIFY_CLEANUP
+ self.smb_conn.close(monitor_priv_fnum)
+ try:
+ changes = notify_priv.get_changes(wait=True)
+ self.fail()
+ except samba.NTSTATUSError as err:
+ self.assertEqual(err.args[0], NT_STATUS_NOTIFY_CLEANUP)
+
+ def test_notify_privileged_test(self):
+ return self._test_notify_privileged_path(monitor_path=test_dir, rel_prefix="")
+
+ def test_notify_privileged_root(self):
+ return self._test_notify_privileged_path(monitor_path="", rel_prefix=test_dir)
+
+if __name__ == "__main__":
+ import unittest
+ unittest.main()
diff --git a/python/samba/tests/smb.py b/python/samba/tests/smb.py
new file mode 100644
index 0000000..89b0aca
--- /dev/null
+++ b/python/samba/tests/smb.py
@@ -0,0 +1,236 @@
+# -*- coding: utf-8 -*-
+# Unix SMB/CIFS implementation. Tests for smb manipulation
+# Copyright (C) David Mulder <dmulder@suse.com> 2018
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import samba
+import os
+import random
+import sys
+from samba import NTSTATUSError
+from samba.ntstatus import (NT_STATUS_OBJECT_NAME_NOT_FOUND,
+ NT_STATUS_OBJECT_PATH_NOT_FOUND)
+from samba.samba3 import libsmb_samba_internal as libsmb
+from samba.samba3 import param as s3param
+
+PY3 = sys.version_info[0] == 3
+realm = os.environ.get('REALM')
+domain_dir = realm.lower() + '/'
+test_contents = 'abcd' * 256
+utf_contents = u'Süßigkeiten Äpfel ' * 128
+test_literal_bytes_embed_nulls = b'\xff\xfe\x14\x61\x00\x00\x62\x63\x64' * 256
+binary_contents = b'\xff\xfe'
+binary_contents = binary_contents + "Hello cruel world of python3".encode('utf8') * 128
+test_dir = os.path.join(domain_dir, 'testing_%d' % random.randint(0, 0xFFFF))
+test_file = os.path.join(test_dir, 'testing').replace('/', '\\')
+
+
+class SMBTests(samba.tests.TestCase):
+ def setUp(self):
+ super().setUp()
+ self.server = os.environ["SERVER"]
+ creds = self.insta_creds(template=self.get_credentials())
+
+ # create an SMB connection to the server
+ lp = s3param.get_context()
+ lp.load(os.getenv("SMB_CONF_PATH"))
+ self.smb_conn = libsmb.Conn(self.server, "sysvol", lp, creds)
+
+ self.smb_conn.mkdir(test_dir)
+
+ def tearDown(self):
+ super().tearDown()
+ try:
+ self.smb_conn.deltree(test_dir)
+ except:
+ pass
+
+ def test_list(self):
+ # check a basic listing returns the items we expect
+ ls = [f['name'] for f in self.smb_conn.list(domain_dir)]
+ self.assertIn('scripts', ls,
+ msg='"scripts" directory not found in sysvol')
+ self.assertIn('Policies', ls,
+ msg='"Policies" directory not found in sysvol')
+ self.assertNotIn('..', ls,
+ msg='Parent (..) found in directory listing')
+ self.assertNotIn('.', ls,
+ msg='Current dir (.) found in directory listing')
+
+ # using a '*' mask should be the same as using no mask
+ ls_wildcard = [f['name'] for f in self.smb_conn.list(domain_dir, "*")]
+ self.assertEqual(ls, ls_wildcard)
+
+ # applying a mask should only return items that match that mask
+ ls_pol = [f['name'] for f in self.smb_conn.list(domain_dir, "Pol*")]
+ expected = ["Policies"]
+ self.assertEqual(ls_pol, expected)
+
+ # each item in the listing is a has with expected keys
+ expected_keys = ['attrib', 'mtime', 'name', 'short_name', 'size']
+ for item in self.smb_conn.list(domain_dir):
+ for key in expected_keys:
+ self.assertIn(key, item,
+ msg="Key '%s' not in listing '%s'" % (key, item))
+
+ def test_deltree(self):
+ """The smb.deltree API should delete files and sub-dirs"""
+ # create some test sub-dirs
+ dirpaths = []
+ empty_dirs = []
+ cur_dir = test_dir
+
+ for subdir in ["subdir-X", "subdir-Y", "subdir-Z"]:
+ path = self.make_sysvol_path(cur_dir, subdir)
+ self.smb_conn.mkdir(path)
+ dirpaths.append(path)
+ cur_dir = path
+
+ # create another empty dir just for kicks
+ path = self.make_sysvol_path(cur_dir, "another")
+ self.smb_conn.mkdir(path)
+ empty_dirs.append(path)
+
+ # create some files in these directories
+ filepaths = []
+ for subdir in dirpaths:
+ for i in range(1, 4):
+ contents = "I'm file {0} in dir {1}!".format(i, subdir)
+ path = self.make_sysvol_path(subdir, "file-{0}.txt".format(i))
+ self.smb_conn.savefile(path, contents.encode('utf8'))
+ filepaths.append(path)
+
+ # sanity-check these dirs/files exist
+ for subdir in dirpaths + empty_dirs:
+ self.assertTrue(self.smb_conn.chkpath(subdir),
+ "Failed to create {0}".format(subdir))
+ for path in filepaths:
+ self.assertTrue(self.file_exists(path),
+ "Failed to create {0}".format(path))
+
+ # try using deltree to remove a single empty directory
+ path = empty_dirs.pop(0)
+ self.smb_conn.deltree(path)
+ self.assertFalse(self.smb_conn.chkpath(path),
+ "Failed to delete {0}".format(path))
+
+ # try using deltree to remove a single file
+ path = filepaths.pop(0)
+ self.smb_conn.deltree(path)
+ self.assertFalse(self.file_exists(path),
+ "Failed to delete {0}".format(path))
+
+ # delete the top-level dir
+ self.smb_conn.deltree(test_dir)
+
+ # now check that all the dirs/files are no longer there
+ for subdir in dirpaths + empty_dirs:
+ self.assertFalse(self.smb_conn.chkpath(subdir),
+ "Failed to delete {0}".format(subdir))
+ for path in filepaths:
+ self.assertFalse(self.file_exists(path),
+ "Failed to delete {0}".format(path))
+
+ def file_exists(self, filepath):
+ """Returns whether a regular file exists (by trying to open it)"""
+ try:
+ self.smb_conn.loadfile(filepath)
+ exists = True
+ except NTSTATUSError as err:
+ if (err.args[0] == NT_STATUS_OBJECT_NAME_NOT_FOUND or
+ err.args[0] == NT_STATUS_OBJECT_PATH_NOT_FOUND):
+ exists = False
+ else:
+ raise err
+ return exists
+
+ def test_unlink(self):
+ """
+ The smb.unlink API should delete file
+ """
+ # create the test file
+ self.assertFalse(self.file_exists(test_file))
+ self.smb_conn.savefile(test_file, binary_contents)
+ self.assertTrue(self.file_exists(test_file))
+
+ # delete it and check that it's gone
+ self.smb_conn.unlink(test_file)
+ self.assertFalse(self.file_exists(test_file))
+
+ def test_chkpath(self):
+ """Tests .chkpath determines whether or not a directory exists"""
+
+ self.assertTrue(self.smb_conn.chkpath(test_dir))
+
+ # should return False for a non-existent directory
+ bad_dir = self.make_sysvol_path(test_dir, 'dont_exist')
+ self.assertFalse(self.smb_conn.chkpath(bad_dir))
+
+ # should return False for files (because they're not directories)
+ self.smb_conn.savefile(test_file, binary_contents)
+ self.assertFalse(self.smb_conn.chkpath(test_file))
+
+ # check correct result after creating and then deleting a new dir
+ new_dir = self.make_sysvol_path(test_dir, 'test-new')
+ self.smb_conn.mkdir(new_dir)
+ self.assertTrue(self.smb_conn.chkpath(new_dir))
+ self.smb_conn.rmdir(new_dir)
+ self.assertFalse(self.smb_conn.chkpath(new_dir))
+
+ def test_save_load_text(self):
+
+ self.smb_conn.savefile(test_file, test_contents.encode('utf8'))
+
+ contents = self.smb_conn.loadfile(test_file)
+ self.assertEqual(contents.decode('utf8'), test_contents,
+ msg='contents of test file did not match what was written')
+
+ # check we can overwrite the file with new contents
+ new_contents = 'wxyz' * 128
+ self.smb_conn.savefile(test_file, new_contents.encode('utf8'))
+ contents = self.smb_conn.loadfile(test_file)
+ self.assertEqual(contents.decode('utf8'), new_contents,
+ msg='contents of test file did not match what was written')
+
+ # with python2 this will save/load str type (with embedded nulls)
+ # with python3 this will save/load bytes type
+ def test_save_load_string_bytes(self):
+ self.smb_conn.savefile(test_file, test_literal_bytes_embed_nulls)
+
+ contents = self.smb_conn.loadfile(test_file)
+ self.assertEqual(contents, test_literal_bytes_embed_nulls,
+ msg='contents of test file did not match what was written')
+
+ # python3 only this will save/load unicode
+ def test_save_load_utfcontents(self):
+ if PY3:
+ self.smb_conn.savefile(test_file, utf_contents.encode('utf8'))
+
+ contents = self.smb_conn.loadfile(test_file)
+ self.assertEqual(contents.decode('utf8'), utf_contents,
+ msg='contents of test file did not match what was written')
+
+ # with python2 this will save/load str type
+ # with python3 this will save/load bytes type
+ def test_save_binary_contents(self):
+ self.smb_conn.savefile(test_file, binary_contents)
+
+ contents = self.smb_conn.loadfile(test_file)
+ self.assertEqual(contents, binary_contents,
+ msg='contents of test file did not match what was written')
+
+ def make_sysvol_path(self, dirpath, filename):
+ # return the dir + filename as a sysvol path
+ return os.path.join(dirpath, filename).replace('/', '\\')
diff --git a/python/samba/tests/smb1posix.py b/python/samba/tests/smb1posix.py
new file mode 100644
index 0000000..22ca6a2
--- /dev/null
+++ b/python/samba/tests/smb1posix.py
@@ -0,0 +1,71 @@
+# Unix SMB/CIFS implementation.
+# Copyright Volker Lendecke <vl@samba.org> 2022
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from samba.samba3 import libsmb_samba_internal as libsmb
+from samba import (ntstatus,NTSTATUSError)
+import samba.tests.libsmb
+import stat
+
+class Smb1PosixTests(samba.tests.libsmb.LibsmbTests):
+
+ def test_directory_case_sensivity(self):
+ """Test that in smb1 posix dirs are case sensitive"""
+ conn = libsmb.Conn(
+ self.server_ip,
+ "posix_share",
+ self.lp,
+ self.creds,
+ force_smb1=True)
+ conn.smb1_posix()
+
+ try:
+ conn.mkdir("lower")
+ except NTSTATUSError as e:
+ if e.args[0] != ntstatus.NT_STATUS_OBJECT_NAME_COLLISION:
+ raise
+ try:
+ conn.mkdir("lower/second")
+ except NTSTATUSError as e:
+ if e.args[0] != ntstatus.NT_STATUS_OBJECT_NAME_COLLISION:
+ raise
+
+ self.assertFalse(conn.chkpath("Lower/second"))
+ conn.rmdir("lower/second")
+ conn.rmdir("lower")
+
+ def test_mknod(self):
+ """Test SMB1 posix mknod"""
+ conn = libsmb.Conn(
+ self.server_ip,
+ "posix_share",
+ self.lp,
+ self.creds,
+ force_smb1=True)
+ conn.smb1_posix()
+
+ def do_test(name, filetype):
+ conn.mknod(name, filetype | 0o755)
+ st = conn.smb1_stat(name)
+ self.assertEqual(st["mode"], filetype | 0o755)
+ conn.unlink(name)
+
+ do_test("fifo", stat.S_IFIFO)
+ do_test("sock", stat.S_IFSOCK)
+
+if __name__ == '__main__':
+ import unittest
+ unittest.main()
diff --git a/python/samba/tests/smb2symlink.py b/python/samba/tests/smb2symlink.py
new file mode 100644
index 0000000..83df78e
--- /dev/null
+++ b/python/samba/tests/smb2symlink.py
@@ -0,0 +1,216 @@
+# Unix SMB/CIFS implementation.
+# Copyright Volker Lendecke <vl@samba.org> 2022
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from samba.samba3 import libsmb_samba_internal as libsmb
+from samba import reparse_symlink
+from samba import (ntstatus,NTSTATUSError)
+from samba.dcerpc import security as sec
+import samba.tests.libsmb
+
+class Smb2SymlinkTests(samba.tests.libsmb.LibsmbTests):
+
+ def connections(self, smb1share=None, smb2share=None):
+ if not smb1share:
+ smb1share = samba.tests.env_get_var_value(
+ "SMB1_SHARE", allow_missing=True)
+ if not smb1share:
+ smb1share = "nosymlinks_smb1allow"
+
+ try:
+ smb1 = libsmb.Conn(
+ self.server_ip,
+ smb1share,
+ self.lp,
+ self.creds,
+ force_smb1=True)
+ except NTSTATUSError as e:
+ if e.args[0] != ntstatus.NT_STATUS_CONNECTION_RESET:
+ raise
+ smb1.smb1_posix()
+
+ if not smb2share:
+ smb2share = samba.tests.env_get_var_value(
+ "SMB2_SHARE", allow_missing=True)
+ if not smb2share:
+ smb2share = "nosymlinks"
+
+ smb2 = libsmb.Conn(
+ self.server_ip,
+ smb2share,
+ self.lp,
+ self.creds)
+ return (smb1, smb2)
+
+ def create_symlink(self, conn, target, symlink):
+ self.clean_file(conn, symlink)
+ if (conn.protocol() < libsmb.PROTOCOL_SMB2_02 and conn.have_posix()):
+ conn.smb1_symlink(target, symlink)
+ else:
+ flags = 0 if target[0]=='/' else 1
+ syml = conn.create(
+ symlink,
+ DesiredAccess=sec.SEC_FILE_READ_ATTRIBUTE|
+ sec.SEC_FILE_WRITE_ATTRIBUTE|
+ sec.SEC_STD_DELETE,
+ FileAttributes=libsmb.FILE_ATTRIBUTE_NORMAL,
+ CreateDisposition=libsmb.FILE_OPEN_IF,
+ CreateOptions=libsmb.FILE_OPEN_REPARSE_POINT)
+ b = reparse_symlink.symlink_put(target, target, 0, 1)
+ conn.fsctl(syml, libsmb.FSCTL_SET_REPARSE_POINT, b, 0)
+ conn.close(syml)
+
+ def assert_symlink_exception(self, e, expect):
+ self.assertEqual(e.args[0], ntstatus.NT_STATUS_STOPPED_ON_SYMLINK)
+ for k,v in expect.items():
+ if (k == "flags"):
+ # Ignore symlink trust flags for now
+ expected = v & ~libsmb.SYMLINK_TRUST_MASK
+ got = e.args[2].get(k) & ~libsmb.SYMLINK_TRUST_MASK
+ self.assertEqual((k,got), (k,expected))
+ else:
+ self.assertEqual((k,e.args[2].get(k)), (k,v))
+
+ def test_symlinkerror_directory(self):
+ """Test a symlink in a nonterminal path component"""
+ (smb1,smb2) = self.connections()
+ symlink="syml"
+ target="foo"
+ suffix="bar"
+
+ self.create_symlink(smb1, target, symlink)
+
+ with self.assertRaises(NTSTATUSError) as e:
+ fd = smb2.create_ex(f'{symlink}\\{suffix}')
+
+ self.assert_symlink_exception(
+ e.exception,
+ { 'unparsed_path_length' : len(suffix)+1,
+ 'substitute_name' : target,
+ 'print_name' : target,
+ 'flags' : 0x20000001 })
+
+ self.clean_file(smb1, symlink)
+
+ def test_symlinkerror_file(self):
+ """Test a simple symlink in a terminal path"""
+ (smb1,smb2) = self.connections()
+ symlink="syml"
+ target="foo"
+
+ self.create_symlink(smb1, target, symlink)
+
+ with self.assertRaises(NTSTATUSError) as e:
+ fd = smb2.create_ex(f'{symlink}')
+
+ self.assert_symlink_exception(
+ e.exception,
+ { 'unparsed_path_length' : 0,
+ 'substitute_name' : target,
+ 'print_name' : target,
+ 'flags' : 0x20000001 })
+
+ self.clean_file(smb1, symlink)
+
+ def test_symlinkerror_absolute_outside_share(self):
+ """
+ Test symlinks to outside of the share
+ We return the contents 1:1
+ """
+ (smb1,smb2) = self.connections()
+ symlink="syml"
+
+ for target in ["/etc", "//foo/bar", "/"]:
+
+ self.create_symlink(smb1, target, symlink)
+
+ with self.assertRaises(NTSTATUSError) as e:
+ fd = smb2.create_ex(f'{symlink}')
+
+ self.assert_symlink_exception(
+ e.exception,
+ { 'unparsed_path_length' : 0,
+ 'substitute_name' : target,
+ 'print_name' : target,
+ 'flags' : 0 })
+
+ self.clean_file(smb1, symlink)
+
+ def test_symlinkerror_absolute_inshare(self):
+ """Test an absolute symlink inside the share"""
+ (smb1,smb2) = self.connections()
+ symlink="syml"
+
+ localpath=samba.tests.env_get_var_value("LOCAL_PATH")
+ shareroot=f'{localpath}/nosymlinks'
+ rel_dest="dst"
+ target=f'{shareroot}/{rel_dest}'
+
+ self.create_symlink(smb1, target, symlink)
+
+ with self.assertRaises(NTSTATUSError) as e:
+ fd = smb2.create_ex(f'{symlink}')
+
+ self.assert_symlink_exception(
+ e.exception,
+ { 'unparsed_path_length' : 0,
+ 'substitute_name' : rel_dest,
+ 'print_name' : rel_dest,
+ 'flags' : 0 })
+
+ self.clean_file(smb1, symlink)
+
+ def test_symlink_reparse_data_buffer_parse(self):
+ """Test parsing a symlink reparse buffer coming from Windows"""
+
+ buf = (b'\x0c\x00\x00\xa0\x18\x00\x00\x00'
+ b'\x06\x00\x06\x00\x00\x00\x06\x00'
+ b'\x01\x00\x00\x00\x62\x00\x61\x00'
+ b'\x72\x00\x62\x00\x61\x00\x72\x00')
+
+ try:
+ syml = reparse_symlink.symlink_get(buf);
+ except:
+ self.fail("Could not parse symlink buffer")
+
+ self.assertEqual(syml, ('bar', 'bar', 0, 1));
+
+ def test_bug15505(self):
+ """Test an absolute intermediate symlink inside the share"""
+ (smb1,smb2) = self.connections(smb1share="tmp",smb2share="tmp")
+ symlink="syml"
+
+ localpath=samba.tests.env_get_var_value("LOCAL_PATH")
+
+ smb1.mkdir("sub")
+ self.addCleanup(self.clean_file, smb1, "sub")
+
+ self.create_symlink(smb1, f'{localpath}/sub1', "sub/lnk")
+ self.addCleanup(self.clean_file, smb1, "sub/lnk")
+
+ smb1.mkdir("sub1")
+ self.addCleanup(self.clean_file, smb1, "sub1")
+
+ fd = smb1.create("sub1/x", CreateDisposition=libsmb.FILE_CREATE);
+ smb1.close(fd)
+ self.addCleanup(self.clean_file, smb1, "sub1/x")
+
+ fd = smb2.create("sub\\lnk\\x")
+ smb2.close(fd)
+
+if __name__ == '__main__':
+ import unittest
+ unittest.main()
diff --git a/python/samba/tests/smb3unix.py b/python/samba/tests/smb3unix.py
new file mode 100644
index 0000000..d88bd43
--- /dev/null
+++ b/python/samba/tests/smb3unix.py
@@ -0,0 +1,418 @@
+# Unix SMB/CIFS implementation.
+# Copyright Volker Lendecke <vl@samba.org> 2022
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from samba.samba3 import libsmb_samba_internal as libsmb
+from samba import NTSTATUSError,ntstatus
+import samba.tests.libsmb
+from samba.dcerpc import security
+from samba.common import get_string
+from samba.dcerpc import smb3posix
+from samba.ndr import ndr_unpack
+from samba.dcerpc.security import dom_sid
+import os
+
+def posix_context(mode):
+ return (libsmb.SMB2_CREATE_TAG_POSIX, mode.to_bytes(4, 'little'))
+
+class Smb3UnixTests(samba.tests.libsmb.LibsmbTests):
+
+ def setUp(self):
+ super().setUp()
+
+ self.samsid = os.environ["SAMSID"]
+
+ def connections(self, share1=None, posix1=False, share2=None, posix2=True):
+ if not share1:
+ share1 = samba.tests.env_get_var_value(
+ "SHARE1", allow_missing=True)
+ if not share1:
+ share1 = "tmp"
+
+ if not share2:
+ share2 = samba.tests.env_get_var_value(
+ "SHARE2", allow_missing=True)
+ if not share2:
+ share2 = "tmp"
+
+ conn1 = libsmb.Conn(
+ self.server_ip,
+ share1,
+ self.lp,
+ self.creds,
+ posix=posix1)
+
+ conn2 = libsmb.Conn(
+ self.server_ip,
+ share2,
+ self.lp,
+ self.creds,
+ posix=posix2)
+
+ return (conn1, conn2)
+
+ def test_negotiate_context_posix(self):
+ c = libsmb.Conn(
+ self.server_ip,
+ "tmp",
+ self.lp,
+ self.creds,
+ posix=True)
+ self.assertTrue(c.have_posix())
+
+ def test_negotiate_context_posix_invalid_length(self):
+ with self.assertRaises(NTSTATUSError) as cm:
+ c = libsmb.Conn(
+ self.server_ip,
+ "tmp",
+ self.lp,
+ self.creds,
+ negotiate_contexts=[(0x100, b'01234')])
+
+ e = cm.exception
+ self.assertEqual(e.args[0], ntstatus.NT_STATUS_INVALID_PARAMETER)
+
+ def test_negotiate_context_posix_invalid_blob(self):
+ c = libsmb.Conn(
+ self.server_ip,
+ "tmp",
+ self.lp,
+ self.creds,
+ negotiate_contexts=[(0x100, b'0123456789012345')])
+ self.assertFalse(c.have_posix())
+
+ def test_posix_create_context(self):
+ c = libsmb.Conn(
+ self.server_ip,
+ "tmp",
+ self.lp,
+ self.creds,
+ posix=True)
+ self.assertTrue(c.have_posix())
+
+ cc_in=[(libsmb.SMB2_CREATE_TAG_POSIX,b'0000')]
+ fnum,_,cc_out = c.create_ex("",CreateContexts=cc_in)
+ self.assertEqual(cc_in[0][0],cc_out[0][0])
+
+ c.close(fnum)
+
+ def test_posix_create_invalid_context_length(self):
+ c = libsmb.Conn(
+ self.server_ip,
+ "tmp",
+ self.lp,
+ self.creds,
+ posix=True)
+ self.assertTrue(c.have_posix())
+
+ cc_in=[(libsmb.SMB2_CREATE_TAG_POSIX,b'00000')]
+
+ with self.assertRaises(NTSTATUSError) as cm:
+ fnum,_,cc_out = c.create_ex("",CreateContexts=cc_in)
+
+ e = cm.exception
+ self.assertEqual(e.args[0], ntstatus.NT_STATUS_INVALID_PARAMETER)
+
+ def delete_test_file(self, c, fname, mode=0):
+ f,_,cc_out = c.create_ex(fname,
+ DesiredAccess=security.SEC_STD_ALL,
+ CreateDisposition=libsmb.FILE_OPEN,
+ CreateContexts=[posix_context(mode)])
+ c.delete_on_close(f, True)
+ c.close(f)
+
+ def test_posix_query_dir(self):
+ test_files = []
+ try:
+ c = libsmb.Conn(
+ self.server_ip,
+ "smb3_posix_share",
+ self.lp,
+ self.creds,
+ posix=True)
+ self.assertTrue(c.have_posix())
+
+ for i in range(10):
+ fname = '\\test%d' % i
+ f,_,cc_out = c.create_ex(fname,
+ CreateDisposition=libsmb.FILE_OPEN_IF,
+ CreateContexts=[posix_context(0o744)])
+ c.close(f)
+ test_files.append(fname)
+
+ expected_count = len(c.list(''))
+ self.assertNotEqual(expected_count, 0, 'No files were found')
+
+ actual_count = len(c.list('',
+ info_level=libsmb.SMB2_FIND_POSIX_INFORMATION))
+ self.assertEqual(actual_count-2, expected_count,
+ 'SMB2_FIND_POSIX_INFORMATION failed to list contents')
+
+ finally:
+ if len(test_files) > 0:
+ for fname in test_files:
+ self.delete_test_file(c, fname)
+
+ def test_posix_reserved_char(self):
+ c = libsmb.Conn(
+ self.server_ip,
+ "smb3_posix_share",
+ self.lp,
+ self.creds,
+ posix=True)
+ self.assertTrue(c.have_posix())
+
+ test_files = ['a ', 'a ', '. ', '. ', 'a.',
+ '.a', ' \\ ', '>', '<' '?']
+
+ for fname in test_files:
+ try:
+ f,_,cc_out = c.create_ex('\\%s' % fname,
+ CreateDisposition=libsmb.FILE_CREATE,
+ DesiredAccess=security.SEC_STD_DELETE,
+ CreateContexts=[posix_context(0o744)])
+ except NTSTATUSError as e:
+ self.fail(e)
+ c.delete_on_close(f, True)
+ c.close(f)
+
+ def test_posix_delete_on_close(self):
+ c = libsmb.Conn(
+ self.server_ip,
+ "smb3_posix_share",
+ self.lp,
+ self.creds,
+ posix=True)
+ self.assertTrue(c.have_posix())
+
+ f,_,cc_out = c.create_ex('\\TESTING999',
+ DesiredAccess=security.SEC_STD_ALL,
+ CreateDisposition=libsmb.FILE_CREATE,
+ CreateContexts=[posix_context(0o744)])
+ c.delete_on_close(f, True)
+ c.close(f)
+
+ def test_posix_case_sensitive(self):
+ try:
+ c = libsmb.Conn(
+ self.server_ip,
+ "smb3_posix_share",
+ self.lp,
+ self.creds,
+ posix=True)
+ self.assertTrue(c.have_posix())
+
+ f,_,cc_out = c.create_ex('\\xx',
+ DesiredAccess=security.SEC_STD_ALL,
+ CreateDisposition=libsmb.FILE_CREATE,
+ CreateContexts=[posix_context(0o644)])
+ c.close(f)
+
+ fail = False
+ try:
+ f,_,cc_out = c.create_ex('\\XX',
+ DesiredAccess=security.SEC_STD_ALL,
+ CreateDisposition=libsmb.FILE_OPEN,
+ CreateContexts=[posix_context(0)])
+ except NTSTATUSError:
+ pass
+ else:
+ fail = True
+ c.close(f)
+
+ self.assertFalse(fail, "Opening uppercase file didn't fail")
+
+ finally:
+ self.delete_test_file(c, '\\xx')
+
+ def test_posix_perm_files(self):
+ test_files = {}
+ try:
+ c = libsmb.Conn(
+ self.server_ip,
+ "smb3_posix_share",
+ self.lp,
+ self.creds,
+ posix=True)
+ self.assertTrue(c.have_posix())
+
+ for perm in range(0o600, 0o7777+1):
+ # Owner write permission is required or cleanup will fail, and
+ # owner read is required to list the file if O_PATH is disabled
+ if perm & 0o600 != 0o600:
+ continue
+
+ # Don't create with setuid or setgid.
+ if perm & 0o6000 != 0:
+ continue
+
+ fname = 'testfile%04o' % perm
+ test_files[fname] = perm
+ f,_,cc_out = c.create_ex('\\%s' % fname,
+ DesiredAccess=security.SEC_FILE_ALL,
+ CreateDisposition=libsmb.FILE_CREATE,
+ CreateContexts=[posix_context(perm)])
+ if perm & 0o200 == 0o200:
+ c.write(f, buffer=b"data", offset=0)
+ c.close(f)
+
+ dname = 'testdir%04o' % perm
+ test_files[dname] = perm
+ f,_,cc_out = c.create_ex('\\%s' % dname,
+ DesiredAccess=security.SEC_STD_ALL,
+ CreateDisposition=libsmb.FILE_CREATE,
+ CreateOptions=libsmb.FILE_DIRECTORY_FILE,
+ CreateContexts=[posix_context(perm)])
+ c.close(f)
+
+ res = c.list("", info_level=libsmb.SMB2_FIND_POSIX_INFORMATION)
+
+ found_files = {get_string(i['name']): i for i in res}
+ for fname,perm in test_files.items():
+ self.assertIn(get_string(fname), found_files.keys(),
+ 'Test file not found')
+ self.assertEqual(test_files[fname], found_files[fname]['perms'],
+ 'Requested %04o, Received %04o' % \
+ (test_files[fname], found_files[fname]['perms']))
+
+ self.assertEqual(found_files[fname]['reparse_tag'],
+ libsmb.IO_REPARSE_TAG_RESERVED_ZERO)
+ self.assertEqual(found_files[fname]['perms'], perm)
+ self.assertEqual(found_files[fname]['owner_sid'],
+ self.samsid + "-1000")
+ self.assertTrue(found_files[fname]['group_sid'].startswith("S-1-22-2-"))
+
+ if fname.startswith("testfile"):
+ self.assertEqual(found_files[fname]['nlink'], 1)
+ self.assertEqual(found_files[fname]['size'], 4)
+ self.assertEqual(found_files[fname]['allocaction_size'],
+ 4096)
+ self.assertEqual(found_files[fname]['attrib'],
+ libsmb.FILE_ATTRIBUTE_ARCHIVE)
+ else:
+ self.assertEqual(found_files[fname]['nlink'], 2)
+ self.assertEqual(found_files[fname]['attrib'],
+ libsmb.FILE_ATTRIBUTE_DIRECTORY)
+
+ finally:
+ if len(test_files) > 0:
+ for fname in test_files.keys():
+ self.delete_test_file(c, '\\%s' % fname)
+
+ def test_share_root_null_sids_fid(self):
+ c = libsmb.Conn(
+ self.server_ip,
+ "smb3_posix_share",
+ self.lp,
+ self.creds,
+ posix=True)
+ self.assertTrue(c.have_posix())
+
+ res = c.list("", info_level=libsmb.SMB2_FIND_POSIX_INFORMATION)
+ found_files = {get_string(i['name']): i for i in res}
+ dotdot = found_files['..']
+ self.assertEqual('S-1-0-0', dotdot['owner_sid'],
+ 'The owner sid for .. was not NULL')
+ self.assertEqual('S-1-0-0', dotdot['group_sid'],
+ 'The group sid for .. was not NULL')
+ self.assertEqual(0, dotdot['ino'], 'The ino for .. was not 0')
+ self.assertEqual(0, dotdot['dev'], 'The dev for .. was not 0')
+
+ def test_create_context_basic1(self):
+ """
+ Check basic CreateContexts response
+ """
+ try:
+ c = libsmb.Conn(
+ self.server_ip,
+ "smb3_posix_share",
+ self.lp,
+ self.creds,
+ posix=True)
+ self.assertTrue(c.have_posix())
+
+ f,_,cc_out = c.create_ex('\\test_create_context_basic1_file',
+ DesiredAccess=security.SEC_STD_ALL,
+ CreateDisposition=libsmb.FILE_CREATE,
+ CreateContexts=[posix_context(0o600)])
+ c.close(f)
+
+ cc = ndr_unpack(smb3posix.smb3_posix_cc_info, cc_out[0][1])
+
+ self.assertEqual(cc.nlinks, 1)
+ self.assertEqual(cc.reparse_tag, libsmb.IO_REPARSE_TAG_RESERVED_ZERO)
+ self.assertEqual(cc.posix_perms, 0o600)
+ self.assertEqual(cc.owner, dom_sid(self.samsid + "-1000"))
+ self.assertTrue(str(cc.group).startswith("S-1-22-2-"))
+
+ f,_,cc_out = c.create_ex('\\test_create_context_basic1_dir',
+ DesiredAccess=security.SEC_STD_ALL,
+ CreateDisposition=libsmb.FILE_CREATE,
+ CreateOptions=libsmb.FILE_DIRECTORY_FILE,
+ CreateContexts=[posix_context(0o700)])
+
+ c.close(f)
+
+ cc = ndr_unpack(smb3posix.smb3_posix_cc_info, cc_out[0][1])
+
+ # Note: this fails on btrfs which always reports the link
+ # count of directories as one.
+ self.assertEqual(cc.nlinks, 2)
+
+ self.assertEqual(cc.reparse_tag, libsmb.IO_REPARSE_TAG_RESERVED_ZERO)
+ self.assertEqual(cc.posix_perms, 0o700)
+ self.assertEqual(cc.owner, dom_sid(self.samsid + "-1000"))
+ self.assertTrue(str(cc.group).startswith("S-1-22-2-"))
+
+ finally:
+ self.delete_test_file(c, '\\test_create_context_basic1_file')
+ self.delete_test_file(c, '\\test_create_context_basic1_dir')
+
+ def test_delete_on_close(self):
+ """
+ Test two opens with delete-on-close:
+ 1. Windows open
+ 2. POSIX open
+ Closing handle 1 should unlink the file, a subsequent directory
+ listing shouldn't list the deleted file.
+ """
+ (winconn,posixconn) = self.connections()
+
+ self.clean_file(winconn, 'test_delete_on_close')
+
+ fdw = winconn.create(
+ 'test_delete_on_close',
+ DesiredAccess=security.SEC_FILE_WRITE_ATTRIBUTE | security.SEC_STD_DELETE,
+ ShareAccess=0x07,
+ CreateDisposition=libsmb.FILE_CREATE)
+ self.addCleanup(self.clean_file, winconn, 'test_delete_on_close')
+
+ fdp,_,_ = posixconn.create_ex(
+ 'test_delete_on_close',
+ DesiredAccess=security.SEC_FILE_WRITE_ATTRIBUTE | security.SEC_STD_DELETE,
+ ShareAccess=0x07,
+ CreateDisposition=libsmb.FILE_OPEN,
+ CreateContexts=[posix_context(0o600)])
+
+ winconn.delete_on_close(fdw, 1)
+ posixconn.delete_on_close(fdp, 1)
+
+ winconn.close(fdw)
+
+ # The file should now already be deleted
+ l = winconn.list('', mask='test_delete_on_close')
+ found_files = {get_string(f['name']): f for f in l}
+ self.assertFalse('test_delete_on_close' in found_files)
diff --git a/python/samba/tests/smbconf.py b/python/samba/tests/smbconf.py
new file mode 100644
index 0000000..ce5f851
--- /dev/null
+++ b/python/samba/tests/smbconf.py
@@ -0,0 +1,352 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007
+# Copyright (C) John Mulligan <phlogistonjohn@asynchrono.us> 2022
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""
+Tests for samba.smbconf module
+"""
+
+from samba.samba3 import param as s3param
+import samba.tests
+
+
+class SMBConfTests(samba.tests.TestCase):
+ _smbconf = None
+ _s3smbconf = None
+
+ @property
+ def smbconf(self):
+ """Property to access module under test without
+ importing it at test module load-time.
+ """
+ if self._smbconf is not None:
+ return self._smbconf
+
+ import samba.smbconf
+
+ self._smbconf = samba.smbconf
+ return self._smbconf
+
+ @property
+ def s3smbconf(self):
+ if self._s3smbconf is not None:
+ return self._s3smbconf
+
+ import samba.samba3.smbconf
+
+ self._s3smbconf = samba.samba3.smbconf
+ return self._s3smbconf
+
+ @property
+ def example_conf_default(self):
+ return "./testdata/samba3/smb.conf"
+
+ def setUp(self):
+ super().setUp()
+ # fetch the configuration in the same style as other test suites
+ self.lp_ctx = samba.tests.env_loadparm()
+ # apply the configuration to the samba3 configuration
+ # (because there are two... and they're independent!)
+ # this is needed to make use of the registry
+ s3_lp = s3param.get_context()
+ s3_lp.load(self.lp_ctx.configfile)
+
+ def test_uninitalized_smbconf(self):
+ sconf = self.smbconf.SMBConf()
+ self.assertRaises(RuntimeError, sconf.requires_messaging)
+ self.assertRaises(RuntimeError, sconf.is_writeable)
+ self.assertRaises(RuntimeError, sconf.share_names)
+ self.assertRaises(RuntimeError, sconf.get_share, "foo")
+
+ def test_txt_backend_properties(self):
+ sconf = self.smbconf.init_txt(self.example_conf_default)
+ self.assertFalse(sconf.requires_messaging())
+ self.assertFalse(sconf.is_writeable())
+
+ def test_share_names(self):
+ sconf = self.smbconf.init_txt(self.example_conf_default)
+ names = sconf.share_names()
+ self.assertEqual(names, ["global", "cd1", "cd2", "media", "tmp"])
+
+ def test_get_share_cd1(self):
+ sconf = self.smbconf.init_txt(self.example_conf_default)
+ s1 = sconf.get_share("cd1")
+ self.assertEqual(s1, ("cd1", [("path", "/mnt/cd1"), ("public", "yes")]))
+
+ def test_get_share_cd2(self):
+ sconf = self.smbconf.init_txt(self.example_conf_default)
+ s1 = sconf.get_share("cd2")
+ self.assertEqual(s1, ("cd2", [("path", "/mnt/cd2"), ("public", "yes")]))
+
+ def test_get_config(self):
+ sconf = self.smbconf.init_txt(self.example_conf_default)
+ services = sconf.get_config()
+ self.assertEqual(len(services), 5)
+ self.assertEqual(
+ services[0],
+ (
+ "global",
+ [
+ ("workgroup", "SAMBA"),
+ ("security", "user"),
+ (
+ "passdb backend",
+ "smbpasswd:../testdata/samba3/smbpasswd "
+ "tdbsam:../testdata/samba3/passdb.tdb ldapsam:tdb://samba3.ldb",
+ ),
+ ("debug level", "5"),
+ ("netbios name", "BEDWYR"),
+ ],
+ ),
+ )
+ self.assertEqual(
+ services[1], ("cd1", [("path", "/mnt/cd1"), ("public", "yes")])
+ )
+
+ def test_init_reg(self):
+ sconf = self.s3smbconf.init_reg(None)
+ self.assertTrue(sconf.is_writeable())
+
+ def test_init_str_reg(self):
+ sconf = self.s3smbconf.init("registry:")
+ self.assertTrue(sconf.is_writeable())
+
+ def test_init_str_file(self):
+ sconf = self.s3smbconf.init(f"file:{self.example_conf_default}")
+ self.assertFalse(sconf.is_writeable())
+
+ def test_create_share(self):
+ sconf = self.s3smbconf.init_reg(None)
+ sconf.drop()
+ sconf.create_share("alice")
+ sconf.create_share("bob")
+ names = sconf.share_names()
+ self.assertEqual(names, ["alice", "bob"])
+ self.assertRaises(
+ self.smbconf.SMBConfError, sconf.create_share, "alice"
+ )
+
+ def test_drop_share(self):
+ sconf = self.s3smbconf.init_reg(None)
+ sconf.drop()
+ sconf.create_share("alice")
+ sconf.drop()
+ names = sconf.share_names()
+ self.assertEqual(names, [])
+
+ def test_set_parameter(self):
+ sconf = self.s3smbconf.init_reg(None)
+ sconf.drop()
+ sconf.create_share("foobar")
+ sconf.set_parameter("foobar", "path", "/mnt/foobar")
+ sconf.set_parameter("foobar", "browseable", "no")
+
+ s1 = sconf.get_share("foobar")
+ self.assertEqual(
+ s1, ("foobar", [("path", "/mnt/foobar"), ("browseable", "no")])
+ )
+
+ def test_set_global_parameter(self):
+ sconf = self.s3smbconf.init_reg(None)
+ sconf.drop()
+ sconf.set_global_parameter("workgroup", "EXAMPLE")
+ sconf.set_global_parameter("x:custom", "fake")
+
+ s1 = sconf.get_share("global")
+ self.assertEqual(
+ s1, ("global", [("workgroup", "EXAMPLE"), ("x:custom", "fake")])
+ )
+
+ def test_delete_share(self):
+ sconf = self.s3smbconf.init_reg(None)
+ sconf.drop()
+
+ sconf.create_share("alice")
+ sconf.create_share("bob")
+ names = sconf.share_names()
+ self.assertEqual(names, ["alice", "bob"])
+
+ sconf.delete_share("alice")
+ names = sconf.share_names()
+ self.assertEqual(names, ["bob"])
+
+ def test_create_set_share(self):
+ sconf = self.s3smbconf.init_reg(None)
+ sconf.drop()
+
+ params = [
+ ("path", "/mnt/baz"),
+ ("browseable", "yes"),
+ ("read only", "no"),
+ ]
+ sconf.create_set_share("baz", params)
+ self.assertEqual(sconf.get_share("baz"), ("baz", params))
+
+ self.assertRaises(
+ self.smbconf.SMBConfError, sconf.create_set_share, "baz", params
+ )
+ self.assertRaises(TypeError, sconf.create_set_share, "baz", None)
+ self.assertRaises(
+ ValueError, sconf.create_set_share, "baz", [None, None]
+ )
+ self.assertRaises(
+ TypeError, sconf.create_set_share, "baz", [("hi", None)]
+ )
+ self.assertRaises(
+ ValueError, sconf.create_set_share, "baz", [("a", "b", "c")]
+ )
+
+ def test_delete_parameter(self):
+ sconf = self.s3smbconf.init_reg(None)
+ sconf.drop()
+
+ params = [
+ ("path", "/mnt/baz"),
+ ("browseable", "yes"),
+ ("read only", "no"),
+ ]
+ sconf.create_set_share("baz", params)
+ self.assertEqual(sconf.get_share("baz"), ("baz", params))
+
+ sconf.delete_parameter("baz", "browseable")
+ self.assertEqual(
+ sconf.get_share("baz"),
+ (
+ "baz",
+ [
+ ("path", "/mnt/baz"),
+ ("read only", "no"),
+ ],
+ ),
+ )
+
+ def test_delete_global_parameter(self):
+ sconf = self.s3smbconf.init_reg(None)
+ sconf.drop()
+ sconf.set_global_parameter("workgroup", "EXAMPLE")
+ sconf.set_global_parameter("client min protocol", "NT1")
+ sconf.set_global_parameter("server min protocol", "SMB2")
+
+ s1 = sconf.get_share("global")
+ self.assertEqual(
+ s1,
+ (
+ "global",
+ [
+ ("workgroup", "EXAMPLE"),
+ ("client min protocol", "NT1"),
+ ("server min protocol", "SMB2"),
+ ],
+ ),
+ )
+
+ sconf.delete_global_parameter("server min protocol")
+ sconf.delete_global_parameter("client min protocol")
+ s1 = sconf.get_share("global")
+ self.assertEqual(s1, ("global", [("workgroup", "EXAMPLE")]))
+
+ def test_transaction_direct(self):
+ sconf = self.s3smbconf.init_reg(None)
+ sconf.drop()
+ sconf.set_global_parameter("workgroup", "EXAMPLE")
+
+ sconf.transaction_start()
+ sconf.set_global_parameter("client min protocol", "NT1")
+ sconf.set_global_parameter("server min protocol", "SMB2")
+ sconf.transaction_cancel()
+
+ s1 = sconf.get_share("global")
+ self.assertEqual(s1, ("global", [("workgroup", "EXAMPLE")]))
+
+ sconf.transaction_start()
+ sconf.set_global_parameter("client min protocol", "NT1")
+ sconf.set_global_parameter("server min protocol", "SMB2")
+ sconf.transaction_commit()
+
+ s1 = sconf.get_share("global")
+ self.assertEqual(
+ s1,
+ (
+ "global",
+ [
+ ("workgroup", "EXAMPLE"),
+ ("client min protocol", "NT1"),
+ ("server min protocol", "SMB2"),
+ ],
+ ),
+ )
+
+ def test_transaction_tryexc(self):
+ sconf = self.s3smbconf.init_reg(None)
+ sconf.drop()
+
+ def _mkshares(shares):
+ sconf.transaction_start()
+ try:
+ for name, params in shares:
+ sconf.create_set_share(name, params)
+ sconf.transaction_commit()
+ except Exception:
+ sconf.transaction_cancel()
+ raise
+
+ _mkshares(
+ [
+ ("hello", [("path", "/srv/world")]),
+ ("goodnight", [("path", "/srv/moon")]),
+ ]
+ )
+ # this call to _mkshares will fail the whole transaction because
+ # share name "goodnight" already exists
+ self.assertRaises(
+ self.smbconf.SMBConfError,
+ _mkshares,
+ [
+ ("mars", [("path", "/srv/mars")]),
+ ("goodnight", [("path", "/srv/phobos")]),
+ ],
+ )
+
+ names = sconf.share_names()
+ self.assertEqual(names, ["hello", "goodnight"])
+
+ def test_error_badfile(self):
+ with self.assertRaises(self.smbconf.SMBConfError) as raised:
+ self.smbconf.init_txt("/foo/bar/baz/_I-dont/.exist/-ok-")
+ self.assertEqual(
+ self.smbconf.SBC_ERR_BADFILE, raised.exception.error_code)
+
+ def test_error_not_supported(self):
+ sconf = self.smbconf.init_txt(self.example_conf_default)
+ with self.assertRaises(self.smbconf.SMBConfError) as raised:
+ sconf.set_global_parameter("client min protocol", "NT1")
+ self.assertEqual(
+ self.smbconf.SBC_ERR_NOT_SUPPORTED, raised.exception.error_code)
+
+ def test_error_no_such_service(self):
+ sconf = self.smbconf.init_txt(self.example_conf_default)
+ with self.assertRaises(self.smbconf.SMBConfError) as raised:
+ sconf.get_share("zilch")
+ self.assertEqual(
+ self.smbconf.SBC_ERR_NO_SUCH_SERVICE, raised.exception.error_code)
+
+
+
+if __name__ == "__main__":
+ import unittest
+
+ unittest.main()
diff --git a/python/samba/tests/smbd_base.py b/python/samba/tests/smbd_base.py
new file mode 100644
index 0000000..b67ba7e
--- /dev/null
+++ b/python/samba/tests/smbd_base.py
@@ -0,0 +1,48 @@
+# Unix SMB/CIFS implementation. Common code for smbd python bindings tests
+# Copyright (C) Catalyst.Net Ltd 2019
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+from samba.tests import TestCaseInTempDir
+import os
+
+TEST_UMASK = 0o042
+
+class SmbdBaseTests(TestCaseInTempDir):
+
+ def get_umask(self):
+ # we can only get the umask by setting it to something
+ curr_umask = os.umask(0)
+ # restore the old setting
+ os.umask(curr_umask)
+ return curr_umask
+
+ def setUp(self):
+ super().setUp()
+ self.orig_umask = self.get_umask()
+
+ # set an arbitrary umask - the underlying smbd code should override
+ # this, but it allows us to check if umask is left unset
+ os.umask(TEST_UMASK)
+
+ def tearDown(self):
+ # the current umask should be what we set it to earlier - if it's not,
+ # it indicates the code has changed it and not restored it
+ self.assertEqual(self.get_umask(), TEST_UMASK,
+ "umask unexpectedly overridden by test")
+
+ # restore the original umask value (before we interfered with it)
+ os.umask(self.orig_umask)
+
+ super().tearDown()
diff --git a/python/samba/tests/smbd_fuzztest.py b/python/samba/tests/smbd_fuzztest.py
new file mode 100644
index 0000000..5b0726e
--- /dev/null
+++ b/python/samba/tests/smbd_fuzztest.py
@@ -0,0 +1,76 @@
+# Unix SMB/CIFS implementation. Tests for smbd fuzzing.
+# Copyright (C) Jeremy Allison 2019.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import samba
+import os
+import binascii
+import socket
+
+class fuzzsmbd(samba.tests.TestCase):
+ def test_bug_14205(self):
+ #
+ # badblob consists of an incorrectly
+ # terminated SMB1 Negprot, with a valid SessionSetup after.
+ # BUG: #14205 causes the smbd server to crash.
+ #
+ state = True
+ badblob = binascii.a2b_base64("AAAA1P9TTUJyAAAAABhDyAAAAAAAAAAAAAAAACcA/v8AAAAAALEAAlBDIE5F"
+ "VFdPUksgUFJPR1JBTSD//jAAAk1JQ1JPU09GVCBOR1RXT1JLUyAxLjANDAJN"
+ "SR3hkXOl0mb+QXW4Da/jp0f+AAAA1P9TTUJyAAAAABgDyAAABDQAAAAAAAAA"
+ "ACcA/v8AAAAAALEAAlBDIE5FVFdPUksgUFJPR1JBFBX//jAAAk1JQ1JPU09G"
+ "VCBOR1RXT1JLUyAxLjANDAJNSR3hkUal0mb+QXW4Da/jp0f+AAAA1P9TTUJz"
+ "LTE0OEF1uA2v46dH/gqAIIwiAoRiVHWgODu8OdksJQAAAAAnAP7/AAAAAACx"
+ "AAJQQyBORVRXT1JLIFBST0dSQU0g//4wAAJNSUNST1NPRlQgTkdUV09SS1Mg"
+ "MS4wDQwCTUkd4ZFGpdJm/kF1uA2v46dH/gAAANT/U01Ccy0xNDgyMTIyOTE3"
+ "Nzk2MzIAAAAAGAPIAAAAAAAAAAAAAAAAJwD+/wAAAAAAsQACUEMgTkVUV09S"
+ "SyBQUk9HUkFNIP/+MAACTUlDUk9TT0ZUIE5HVFdPUktTIDEuMA0GAAAAAAAA"
+ "AKXSZv5BdbgNr+OnR/4AAADU/1NNQnMtMTQ4MjEyMjkxNzc5NjMyNDQ4NDNA"
+ "ujcyNjgAsQACUEMgTkVUF09SSyAgAAAAAAAAAP/+MAACTUlDUk9TT0bAIE5H"
+ "BwAtMjMxODIxMjE4MTM5OTU0ODA2OP5BdbgNr+OnR/4KgCCMIgKEYlR1oDg7"
+ "vDnZLCWy")
+ s = None
+ try:
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ s.connect(("fileserver", 445))
+ s.send(badblob)
+ # Read the 39-byte SMB1 reply to the SMB1 Negprot.
+ # This is an error message saying the Negprot was
+ # invalid.
+ rb = s.recv(1024)
+ try:
+ # Read again to wait for the server to exit.
+ rb = s.recv(1024)
+ except socket.error as e:
+ # We expect a socket error here as
+ # in both success and fail cases the
+ # server just resets the connection.
+ pass
+ finally:
+ pass
+ finally:
+ if s is not None:
+ s.close()
+ #
+ # If the server crashed there is the
+ # following message in the debug log.
+ #
+ for line in open(os.environ['SMBD_TEST_LOG']):
+ if "INTERNAL ERROR: Signal 11 in pid" in line:
+ print("Found crash in smbd log")
+ state = False
+ break
+ self.assertTrue(state)
diff --git a/python/samba/tests/source.py b/python/samba/tests/source.py
new file mode 100644
index 0000000..c3ff8e0
--- /dev/null
+++ b/python/samba/tests/source.py
@@ -0,0 +1,242 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2011
+#
+# Loosely based on bzrlib's test_source.py
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Source level Python tests."""
+
+import io
+import errno
+import os
+import re
+import warnings
+
+from samba.tests import (
+ TestCase,
+)
+
+
+def get_python_source_files():
+ """Iterate over all Python source files."""
+ library_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "samba"))
+ assert os.path.isdir(library_dir), library_dir
+
+ for root, dirs, files in os.walk(library_dir):
+ for f in files:
+ if f.endswith(".py"):
+ yield os.path.abspath(os.path.join(root, f))
+
+ bindir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "..", "..", "bin"))
+ assert os.path.isdir(bindir), bindir
+ for f in os.listdir(bindir):
+ p = os.path.abspath(os.path.join(bindir, f))
+ if not os.path.islink(p):
+ continue
+ target = os.readlink(p)
+ if os.path.dirname(target).endswith("scripting/bin"):
+ yield p
+ wafsambadir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "..", "..", "buildtools", "wafsamba"))
+ assert os.path.isdir(wafsambadir), wafsambadir
+ for root, dirs, files in os.walk(wafsambadir):
+ for f in files:
+ if f.endswith(".py"):
+ yield os.path.abspath(os.path.join(root, f))
+
+
+def get_source_file_contents():
+ """Iterate over the contents of all python files."""
+ for fname in get_python_source_files():
+ try:
+ f = io.open(fname, mode='r', encoding='utf-8')
+ except IOError as e:
+ if e.errno == errno.ENOENT:
+ warnings.warn("source file %s broken link?" % fname)
+ continue
+ else:
+ raise
+ try:
+ text = f.read()
+ finally:
+ f.close()
+ yield fname, text
+
+
+class TestSource(TestCase):
+
+ def test_copyright(self):
+ """Test that all Python files have a valid copyright statement."""
+ incorrect = []
+
+ copyright_re = re.compile('#\\s*copyright.*(?=\n)', re.I)
+
+ for fname, text in get_source_file_contents():
+ if fname.endswith("ms_schema.py"):
+ # FIXME: Not sure who holds copyright on ms_schema.py
+ continue
+ if "wafsamba" in fname:
+ # FIXME: No copyright headers in wafsamba
+ continue
+ if fname.endswith("python/samba/tests/krb5/kcrypto.py"):
+ # Imported from MIT testing repo
+ continue
+ if fname.endswith("python/samba/tests/krb5/rfc4120_pyasn1_generated.py"):
+ # Autogenerated
+ continue
+ match = copyright_re.search(text)
+ if not match:
+ incorrect.append((fname, 'no copyright line found\n'))
+
+ if incorrect:
+ help_text = [
+ "Some files have missing or incorrect copyright"
+ " statements.", ""]
+ for fname, comment in incorrect:
+ help_text.append(fname)
+ help_text.append((' ' * 4) + comment)
+
+ self.fail('\n'.join(help_text))
+
+ def test_gpl(self):
+ """Test that all .py files have a GPL disclaimer."""
+ incorrect = []
+
+ gpl_txts = [
+ """
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+""",
+"""
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <https://www.gnu.org/licenses/>.
+""",
+ ]
+ gpl_re = f'(?:{"|".join(map(re.escape, gpl_txts))})'
+ gpl_re = re.compile(gpl_re, re.MULTILINE)
+
+ for fname, text in get_source_file_contents():
+ if "wafsamba" in fname:
+ # FIXME: License to wafsamba hasn't been clarified yet
+ continue
+ if fname.endswith("/python/samba/subunit/run.py"):
+ # Imported from subunit/testtools, which are dual
+ # Apache2/BSD-3.
+ continue
+ if fname.endswith("python/samba/tests/krb5/kcrypto.py"):
+ # Imported from MIT testing repo
+ continue
+ if fname.endswith("python/samba/tests/krb5/rfc4120_pyasn1_generated.py"):
+ # Autogenerated
+ continue
+ if not gpl_re.search(text):
+ incorrect.append(fname)
+
+ if incorrect:
+ help_text = ['Some files have missing or incomplete GPL statement',
+ gpl_txts[-1]]
+ for fname in incorrect:
+ help_text.append((' ' * 4) + fname)
+
+ self.fail('\n'.join(help_text))
+
+ def _push_file(self, dict_, fname, line_no):
+ if fname not in dict_:
+ dict_[fname] = [line_no]
+ else:
+ dict_[fname].append(line_no)
+
+ def _format_message(self, dict_, message):
+ files = ["%s: %s" % (f, ', '.join([str(i + 1) for i in lines]))
+ for f, lines in dict_.items()]
+ files.sort()
+ return message + '\n\n %s' % ('\n '.join(files))
+
+ def _iter_source_files_lines(self):
+ for fname, text in get_source_file_contents():
+ lines = text.splitlines(True)
+ for line_no, line in enumerate(lines):
+ yield fname, line_no, line
+
+ def test_no_tabs(self):
+ """Check that there are no tabs in Python files."""
+ tabs = {}
+ for fname, line_no, line in self._iter_source_files_lines():
+ if '\t' in line:
+ self._push_file(tabs, fname, line_no)
+ if tabs:
+ self.fail(self._format_message(tabs,
+ 'Tab characters were found in the following source files.'
+ '\nThey should either be replaced by "\\t" or by spaces:'))
+
+ def test_unix_newlines(self):
+ """Check for unix new lines."""
+ illegal_newlines = {}
+ for fname, line_no, line in self._iter_source_files_lines():
+ if not line.endswith('\n') or line.endswith('\r\n'):
+ self._push_file(illegal_newlines, fname, line_no)
+ if illegal_newlines:
+ self.fail(self._format_message(illegal_newlines,
+ 'Non-unix newlines were found in the following source files:'))
+
+ def test_trailing_whitespace(self):
+ """Check that there is not trailing whitespace in Python files."""
+ trailing_whitespace = {}
+ for fname, line_no, line in self._iter_source_files_lines():
+ if line.rstrip("\n").endswith(" "):
+ self._push_file(trailing_whitespace, fname, line_no)
+ if trailing_whitespace:
+ self.fail(self._format_message(trailing_whitespace,
+ 'Trailing whitespace was found in the following source files.'))
+
+ def test_shebang_lines(self):
+ """Check that files with shebang lines and only those are executable."""
+ files_with_shebang = {}
+ files_without_shebang = {}
+ for fname, line_no, line in self._iter_source_files_lines():
+ if line_no >= 1:
+ continue
+ executable = (os.stat(fname).st_mode & 0o111)
+ has_shebang = line.startswith("#!")
+ if has_shebang and not executable:
+ self._push_file(files_with_shebang, fname, line_no)
+ if not has_shebang and executable:
+ self._push_file(files_without_shebang, fname, line_no)
+ if files_with_shebang:
+ self.fail(self._format_message(files_with_shebang,
+ 'Files with shebang line that are not executable:'))
+ if files_without_shebang:
+ self.fail(self._format_message(files_without_shebang,
+ 'Files without shebang line that are executable:'))
diff --git a/python/samba/tests/source_chars.py b/python/samba/tests/source_chars.py
new file mode 100755
index 0000000..4613088
--- /dev/null
+++ b/python/samba/tests/source_chars.py
@@ -0,0 +1,326 @@
+#!/usr/bin/env python3
+# Unix SMB/CIFS implementation.
+#
+# Copyright (C) Catalyst.Net Ltd. 2021
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import sys
+
+sys.path.insert(0, 'bin/python')
+os.environ['PYTHONUNBUFFERED'] = '1'
+
+import subprocess
+from collections import Counter
+from samba.colour import c_RED, c_GREEN, c_DARK_YELLOW, switch_colour_off
+import re
+import unicodedata as u
+from samba.tests import TestCase, SkipTest
+
+if not sys.stdout.isatty():
+ switch_colour_off()
+
+
+def _find_root():
+ try:
+ p = subprocess.run(['git', 'rev-parse', '--show-toplevel'],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ timeout=10)
+ except subprocess.CalledProcessError as err:
+ print(c_RED("Error running git (is this a git tree?): %s" % (err)))
+
+ SkipTest("This test is only useful in a git working tree")
+ sys.exit(0)
+
+ if p.returncode != 0:
+ raise SkipTest("This test is only useful in a git working tree")
+ sys.exit(0)
+
+ root = p.stdout.decode().strip()
+
+ should_be_roots = (
+ os.path.abspath(os.path.join(os.path.dirname(__file__),
+ "../../..")),
+ os.path.abspath(os.path.join(os.path.dirname(__file__),
+ "../../../..")),
+ )
+ if root not in should_be_roots:
+ print(c_RED("It looks like we have found the wrong git tree!"))
+ sys.exit(1)
+ return root
+
+
+ROOT = None
+
+IGNORED_FILES = (
+ 'source3/selftest/ktest-krb5_ccache-2',
+ 'source3/selftest/ktest-krb5_ccache-3',
+ 'testdata/source-chars-bad.c',
+)
+
+IGNORED_RE = (
+ r'^third_party/heimdal/lib/hcrypto/passwd_dialog',
+ r'^third_party/heimdal/lib/hx509/data/',
+ r'^third_party/heimdal/po',
+ r'^third_party/heimdal/tests/kdc/hdb-mitdb',
+ r'^testdata/compression/',
+ r'^third_party/heimdal/lib/asn1/fuzz-inputs/',
+)
+
+IGNORED_EXTENSIONS = {
+ 'bmp',
+ 'cer',
+ 'corrupt',
+ 'crl',
+ 'crt',
+ 'dat',
+ 'der',
+ 'dump',
+ 'gpg',
+ 'gz',
+ 'ico',
+ 'keytab',
+ 'ldb',
+ 'p12',
+ 'pdf',
+ 'pem',
+ 'png',
+ 'SAMBABACKUP',
+ 'sxd',
+ 'tdb',
+ 'tif',
+ 'reg',
+ 'req'
+}
+
+
+# This list is by no means exhaustive -- these are just the format
+# characters we actually use.
+SAFE_FORMAT_CHARS = {
+ '\u200b',
+ '\ufeff'
+}
+
+# These files legitimately mix left-to-right and right-to-left text.
+# In the real world mixing directions would be normal in bilingual
+# documents, but it is rare in Samba source code.
+BIDI_FILES = {
+ 'third_party/heimdal/lib/base/test_base.c',
+ 'third_party/heimdal/lib/wind/NormalizationTest.txt',
+ 'testdata/source-chars-bidi.py',
+}
+
+
+def get_git_files():
+ try:
+ p = subprocess.run(['git',
+ '-C', ROOT,
+ 'ls-files',
+ '-z'],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ timeout=10)
+ except subprocess.SubprocessError as e:
+ print(c_RED(f"Error running git (is this a git tree?): {e}"))
+ print("This test is only useful in a git working tree")
+ return []
+
+ filenames = p.stdout.split(b'\x00')
+ return [x.decode() for x in filenames[:-1]]
+
+
+def iter_source_files():
+ filenames = get_git_files()
+
+ for name in filenames:
+ ignore = False
+ if name in IGNORED_FILES:
+ print(c_DARK_YELLOW(f"ignoring (exact) {name}"))
+ continue
+
+ for ignored in IGNORED_RE:
+ ignore = (re.match(ignored, name))
+ if ignore:
+ break
+
+ if ignore:
+ print(c_DARK_YELLOW(f"ignoring (via RE) {name}"))
+ continue
+
+ if '.' in name:
+ ext = name.rsplit('.', 1)[1]
+ if ext in IGNORED_EXTENSIONS:
+ print(c_DARK_YELLOW(f"ignoring {name}"))
+ continue
+
+ yield name
+
+
+def is_latin1_file(name):
+ for pattern in (
+ r'^source4/setup/ad-schema/\w+.ldf$',
+ r'^source4/setup/display-specifiers/D[\w-]+.txt$',
+ r'^third_party/heimdal/cf/pkg.m4$',
+ r'^third_party/heimdal/doc/standardisation/',
+ ):
+ if re.match(pattern, name):
+ return True
+ return False
+
+
+def is_bad_latin1_file(fullname):
+ # In practice, the few latin-1 files we have have single non-ASCII
+ # byte islands in a sea of ASCII. The utf-8 sequences we are
+ # concerned about involve sequences of 3 high bytes. We can say a
+ # file is safe latin-1 if it has only individual high bytes.
+ with open(fullname, 'rb') as f:
+ b = f.read()
+ in_seq = False
+ for c in b:
+ if c > 0x7f:
+ if in_seq:
+ return True
+ in_seq = True
+ else:
+ in_seq = False
+ return False
+
+
+def is_bad_char(c):
+ if u.category(c) != 'Cf':
+ return False
+ if c in SAFE_FORMAT_CHARS:
+ return False
+ return True
+
+
+class CharacterTests(TestCase):
+ def setUp(self):
+ global ROOT
+ if not ROOT:
+ ROOT = _find_root()
+
+ def test_no_unexpected_format_chars(self):
+ """This test tries to ensure that no source file has unicode control
+ characters that can change the apparent order of other
+ characters. These characters could make code appear to have
+ different semantic meaning it really does.
+
+ This issue is sometimes called "Trojan Source", "CVE-2021-42574",
+ or "CVE-2021-42694".
+ """
+ for name in iter_source_files():
+ fullname = os.path.join(ROOT, name)
+ try:
+ with open(fullname) as f:
+ s = f.read()
+ except UnicodeDecodeError as e:
+ # probably a latin-1 encoding, which we tolerate in a few
+ # files for historical reasons, though we check that there
+ # are not long sequences of high bytes.
+ if is_latin1_file(name):
+ if is_bad_latin1_file(fullname):
+ self.fail(f"latin-1 file {name} has long sequences "
+ "of high bytes")
+ else:
+ self.fail(f"could not decode {name}: {e}")
+
+ dirs = set()
+ for c in set(s):
+ if is_bad_char(c):
+ self.fail(f"{name} has potentially bad format characters!")
+ dirs.add(u.bidirectional(c))
+
+ if 'L' in dirs and 'R' in dirs:
+ if name not in BIDI_FILES:
+ self.fail(f"{name} has LTR and RTL text ({dirs})")
+
+ def test_unexpected_format_chars_do_fail(self):
+ """Test the test"""
+ for name, n_bad in [
+ ('testdata/source-chars-bad.c', 3)
+ ]:
+ fullname = os.path.join(ROOT, name)
+ with open(fullname) as f:
+ s = f.read()
+ chars = set(s)
+ bad_chars = [c for c in chars if is_bad_char(c)]
+ self.assertEqual(len(bad_chars), n_bad)
+
+ def test_unexpected_bidi_fails(self):
+ """Test the test"""
+ for name in [
+ 'testdata/source-chars-bidi.py'
+ ]:
+ fullname = os.path.join(ROOT, name)
+ with open(fullname) as f:
+ s = f.read()
+
+ dirs = set()
+ for c in set(s):
+ dirs.add(u.bidirectional(c))
+ self.assertIn('L', dirs)
+ self.assertIn('R', dirs)
+
+
+def check_file_text():
+ """If called directly as a script, count the found characters."""
+ global ROOT
+ if not ROOT:
+ ROOT = _find_root()
+
+ counts = Counter()
+ for name in iter_source_files():
+ fullname = os.path.join(ROOT, name)
+ try:
+ with open(fullname) as f:
+ s = f.read()
+ except UnicodeDecodeError as e:
+ if is_latin1_file(name):
+ if is_bad_latin1_file(fullname):
+ print(c_RED(f"latin-1 file {name} has long sequences "
+ "of high bytes"))
+ else:
+ print(c_GREEN(f"latin-1 file {name} is fine"))
+ else:
+ print(c_RED(f"can't read {name}: {e}"))
+
+ counts.update(s)
+ chars = set(s)
+ for c in chars:
+ if u.category(c) == 'Cf':
+ print(c_GREEN(f"{name} has {u.name(c)}"))
+
+ print(len(counts))
+ controls = []
+ formats = []
+ others = []
+ for x in counts:
+ c = u.category(x)
+ if c == 'Cc':
+ controls.append(x)
+ elif c == 'Cf':
+ formats.append(x)
+ elif c[0] == 'C':
+ others.append(x)
+
+ print(f"normal control characters {controls}")
+ print(f"format characters {formats}")
+ print(f"other control characters {others}")
+
+
+if __name__ == '__main__':
+ check_file_text()
diff --git a/python/samba/tests/strings.py b/python/samba/tests/strings.py
new file mode 100644
index 0000000..e812df8
--- /dev/null
+++ b/python/samba/tests/strings.py
@@ -0,0 +1,99 @@
+# subunit test cases for Samba string functions.
+
+# Copyright (C) 2003 by Martin Pool <mbp@samba.org>
+# Copyright (C) 2011 Andrew Bartlett
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+# XXX: All this code assumes that the Unix character set is UTF-8,
+# which is the most common setting. I guess it would be better to
+# force it to that value while running the tests. I'm not sure of the
+# best way to do that yet.
+#
+# -- mbp
+import unicodedata
+import samba.tests
+from samba import strcasecmp_m, strstr_m
+
+
+KATAKANA_LETTER_A = unicodedata.lookup("KATAKANA LETTER A")
+
+
+def signum(a):
+ if a < 0:
+ return -1
+ elif a > 0:
+ return +1
+ else:
+ return 0
+
+class strcasecmp_m_Tests(samba.tests.TestCase):
+ """String comparisons in simple ASCII and unicode"""
+ def test_strcasecmp_m(self):
+ # A, B, strcasecmp(A, B)
+ cases = [('hello', 'hello', 0),
+ ('hello', 'goodbye', +1),
+ ('goodbye', 'hello', -1),
+ ('hell', 'hello', -1),
+ ('', '', 0),
+ ('a', '', +1),
+ ('', 'a', -1),
+ ('a', 'A', 0),
+ ('aa', 'aA', 0),
+ ('Aa', 'aa', 0),
+ ('longstring ' * 100, 'longstring ' * 100, 0),
+ ('longstring ' * 100, 'longstring ' * 100 + 'a', -1),
+ ('longstring ' * 100 + 'a', 'longstring ' * 100, +1),
+ (KATAKANA_LETTER_A, KATAKANA_LETTER_A, 0),
+ (KATAKANA_LETTER_A, 'a', 1),
+ ]
+ for a, b, expect in cases:
+ self.assertEqual(signum(strcasecmp_m(a, b)), expect)
+
+
+class strstr_m_Tests(samba.tests.TestCase):
+ """strstr_m tests in simple ASCII and unicode strings"""
+
+ def test_strstr_m(self):
+ # A, B, strstr_m(A, B)
+ cases = [('hello', 'hello', 'hello'),
+ ('hello', 'goodbye', None),
+ ('goodbye', 'hello', None),
+ ('hell', 'hello', None),
+ ('hello', 'hell', 'hello'),
+ ('', '', ''),
+ ('a', '', 'a'),
+ ('', 'a', None),
+ ('a', 'A', None),
+ ('aa', 'aA', None),
+ ('Aa', 'aa', None),
+ ('%v foo', '%v', '%v foo'),
+ ('foo %v foo', '%v', '%v foo'),
+ ('foo %v', '%v', '%v'),
+ ('longstring ' * 100, 'longstring ' * 99, 'longstring ' * 100),
+ ('longstring ' * 99, 'longstring ' * 100, None),
+ ('longstring a' * 99, 'longstring ' * 100 + 'a', None),
+ ('longstring ' * 100 + 'a', 'longstring ' * 100, 'longstring ' * 100 + 'a'),
+ (KATAKANA_LETTER_A, KATAKANA_LETTER_A + 'bcd', None),
+ (KATAKANA_LETTER_A + 'bcde', KATAKANA_LETTER_A + 'bcd', KATAKANA_LETTER_A + 'bcde'),
+ ('d' +KATAKANA_LETTER_A + 'bcd', KATAKANA_LETTER_A + 'bcd', KATAKANA_LETTER_A + 'bcd'),
+ ('d' +KATAKANA_LETTER_A + 'bd', KATAKANA_LETTER_A + 'bcd', None),
+
+ ('e' + KATAKANA_LETTER_A + 'bcdf', KATAKANA_LETTER_A + 'bcd', KATAKANA_LETTER_A + 'bcdf'),
+ (KATAKANA_LETTER_A, KATAKANA_LETTER_A + 'bcd', None),
+ (KATAKANA_LETTER_A * 3, 'a', None),
+ ]
+ for a, b, expect in cases:
+ self.assertEqual(strstr_m(a, b), expect)
diff --git a/python/samba/tests/subunitrun.py b/python/samba/tests/subunitrun.py
new file mode 100644
index 0000000..26d3300
--- /dev/null
+++ b/python/samba/tests/subunitrun.py
@@ -0,0 +1,63 @@
+# Simple subunit testrunner for python
+
+# NOTE: DO NOT USE THIS MODULE FOR NEW CODE.
+#
+# Instead, use the standard subunit runner - e.g. "python -m subunit.run
+# YOURMODULE".
+#
+# This wrapper will be removed once all tests can be run
+# without it. At the moment there are various tests which still
+# get e.g. credentials passed via command-line options to this
+# script.
+
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007-2014
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+# make sure the script dies immediately when hitting control-C,
+# rather than raising KeyboardInterrupt. As we do all database
+# operations using transactions, this is safe.
+import signal
+signal.signal(signal.SIGINT, signal.SIG_DFL)
+
+import optparse
+import sys
+
+from samba.subunit.run import TestProgram as BaseTestProgram
+
+
+class SubunitOptions(optparse.OptionGroup):
+ """Command line options for subunit test runners."""
+
+ def __init__(self, parser):
+ optparse.OptionGroup.__init__(self, parser, "Subunit Options")
+ self.add_option('-l', '--list', dest='listtests', default=False,
+ help='List tests rather than running them.',
+ action="store_true")
+ self.add_option('--load-list', dest='load_list', default=None,
+ help='Specify a filename containing the test ids to use.')
+
+
+class TestProgram(BaseTestProgram):
+
+ def __init__(self, module=None, args=None, opts=None):
+ if args is None:
+ args = []
+ if getattr(opts, "listtests", False):
+ args.insert(0, "--list")
+ if getattr(opts, 'load_list', None):
+ args.insert(0, "--load-list=%s" % opts.load_list)
+ argv = [sys.argv[0]] + args
+ super().__init__(module=module, argv=argv)
diff --git a/python/samba/tests/tdb_util.py b/python/samba/tests/tdb_util.py
new file mode 100644
index 0000000..e8e5d72
--- /dev/null
+++ b/python/samba/tests/tdb_util.py
@@ -0,0 +1,50 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Lumir Balhar <lbalhar@redhat.com> 2017
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import samba.tests
+from samba import ldb, Ldb
+from samba.tdb_util import tdb_copy
+import os
+
+
+class TDBUtilTests(samba.tests.TestCaseInTempDir):
+
+ def test_tdb_copy(self):
+ src_ldb_file = os.path.join(self.tempdir, "source.ldb")
+ dst_ldb_file = os.path.join(self.tempdir, "destination.ldb")
+
+ # Create LDB source file with some content
+ src_ldb = Ldb(src_ldb_file)
+ src_ldb.add({"dn": "f=dc", "b": "bla"})
+
+ # Copy source file to destination file and check return status
+ self.assertIsNone(tdb_copy(src_ldb_file, dst_ldb_file))
+
+ # Load copied file as LDB object
+ dst_ldb = Ldb(dst_ldb_file)
+
+ # Copmare contents of files
+ self.assertEqual(
+ src_ldb.searchone(basedn=ldb.Dn(src_ldb, "f=dc"), attribute="b"),
+ dst_ldb.searchone(basedn=ldb.Dn(dst_ldb, "f=dc"), attribute="b")
+ )
+
+ # Clean up
+ del src_ldb
+ del dst_ldb
+ os.unlink(src_ldb_file)
+ os.unlink(dst_ldb_file)
diff --git a/python/samba/tests/test_pam_winbind.sh b/python/samba/tests/test_pam_winbind.sh
new file mode 100755
index 0000000..a4b9b5b
--- /dev/null
+++ b/python/samba/tests/test_pam_winbind.sh
@@ -0,0 +1,46 @@
+#!/bin/sh
+
+PYTHON="$1"
+PAM_WRAPPER_SO_PATH="$2"
+shift 2
+
+DOMAIN="$1"
+export DOMAIN
+USERNAME="$2"
+export USERNAME
+PASSWORD="$3"
+export PASSWORD
+shift 3
+
+PAM_OPTIONS="$1"
+export PAM_OPTIONS
+shift 1
+
+PAM_WRAPPER_PATH="$BINDIR/default/third_party/pam_wrapper"
+
+pam_winbind="$BINDIR/plugins/pam_winbind.so"
+service_dir="$SELFTEST_TMPDIR/pam_services"
+service_file="$service_dir/samba"
+
+mkdir $service_dir
+echo "auth required $pam_winbind debug debug_state $PAM_OPTIONS" >$service_file
+echo "account required $pam_winbind debug debug_state $PAM_OPTIONS" >>$service_file
+echo "password required $pam_winbind debug debug_state $PAM_OPTIONS" >>$service_file
+echo "session required $pam_winbind debug debug_state $PAM_OPTIONS" >>$service_file
+
+PAM_WRAPPER="1"
+export PAM_WRAPPER
+PAM_WRAPPER_SERVICE_DIR="$service_dir"
+export PAM_WRAPPER_SERVICE_DIR
+LD_PRELOAD="$LD_PRELOAD:$PAM_WRAPPER_SO_PATH"
+export LD_PRELOAD
+
+PAM_WRAPPER_DEBUGLEVEL=${PAM_WRAPPER_DEBUGLEVEL:="2"}
+export PAM_WRAPPER_DEBUGLEVEL
+
+PYTHONPATH="$PYTHONPATH:$PAM_WRAPPER_PATH:$(dirname $0)" $PYTHON -m samba.subunit.run samba.tests.pam_winbind
+exit_code=$?
+
+rm -rf $service_dir
+
+exit $exit_code
diff --git a/python/samba/tests/test_pam_winbind_chauthtok.sh b/python/samba/tests/test_pam_winbind_chauthtok.sh
new file mode 100755
index 0000000..ea52992
--- /dev/null
+++ b/python/samba/tests/test_pam_winbind_chauthtok.sh
@@ -0,0 +1,77 @@
+#!/bin/sh
+
+PYTHON="$1"
+PAM_WRAPPER_SO_PATH="$2"
+PAM_SET_ITEMS_SO_PATH="$3"
+shift 3
+
+DOMAIN="$1"
+export DOMAIN
+USERNAME="$2"
+export USERNAME
+PASSWORD="$3"
+export PASSWORD
+NEWPASSWORD="$4"
+export NEWPASSWORD
+PAM_OPTIONS="$5"
+export PAM_OPTIONS
+CREATE_USER="$6"
+shift 6
+
+samba_bindir="$BINDIR"
+samba_tool="$samba_bindir/samba-tool"
+
+if [ "$CREATE_USER" = yes ]; then
+ CREATE_SERVER="$1"
+ CREATE_USERNAME="$2"
+ CREATE_PASSWORD="$3"
+ shift 3
+ $PYTHON $samba_tool user create "$USERNAME" "$PASSWORD" -H "ldap://$CREATE_SERVER" -U "$CREATE_USERNAME%$CREATE_PASSWORD"
+ # reset password policies beside of minimum password age of 0 days
+ $PYTHON $samba_tool domain passwordsettings set --complexity=default --history-length=default --min-pwd-length=default --min-pwd-age=0 --max-pwd-age=default -H "ldap://$CREATE_SERVER" -U "$CREATE_USERNAME%$CREATE_PASSWORD"
+fi
+
+PAM_WRAPPER_PATH="$BINDIR/default/third_party/pam_wrapper"
+
+pam_winbind="$BINDIR/plugins/pam_winbind.so"
+service_dir="$SELFTEST_TMPDIR/pam_services"
+service_file="$service_dir/samba"
+
+mkdir $service_dir
+echo "auth required $pam_winbind debug debug_state $PAM_OPTIONS" >$service_file
+echo "account required $pam_winbind debug debug_state $PAM_OPTIONS" >>$service_file
+echo "password required $PAM_SET_ITEMS_SO_PATH" >>$service_file
+echo "password required $pam_winbind debug debug_state $PAM_OPTIONS" >>$service_file
+echo "session required $pam_winbind debug debug_state $PAM_OPTIONS" >>$service_file
+
+PAM_WRAPPER_SERVICE_DIR="$service_dir"
+export PAM_WRAPPER_SERVICE_DIR
+LD_PRELOAD="$LD_PRELOAD:$PAM_WRAPPER_SO_PATH"
+export LD_PRELOAD
+
+PAM_WRAPPER_DEBUGLEVEL=${PAM_WRAPPER_DEBUGLEVEL:="2"}
+export PAM_WRAPPER_DEBUGLEVEL
+
+case $PAM_OPTIONS in
+*use_authtok*)
+ PAM_AUTHTOK="$NEWPASSWORD"
+ export PAM_AUTHTOK
+ ;;
+*try_authtok*)
+ PAM_AUTHTOK="$NEWPASSWORD"
+ export PAM_AUTHTOK
+ ;;
+esac
+
+PAM_WRAPPER="1" PYTHONPATH="$PYTHONPATH:$PAM_WRAPPER_PATH:$(dirname $0)" $PYTHON -m samba.subunit.run samba.tests.pam_winbind_chauthtok
+exit_code=$?
+
+rm -rf $service_dir
+
+if [ "$CREATE_USER" = yes ]; then
+ $PYTHON $samba_tool user delete "$USERNAME" -H "ldap://$CREATE_SERVER" -U "$CREATE_USERNAME%$CREATE_PASSWORD"
+ # reset password policies
+ $PYTHON $samba_tool domain passwordsettings set --complexity=default --history-length=default --min-pwd-length=default --min-pwd-age=default --max-pwd-age=default -H "ldap://$CREATE_SERVER" -U "$CREATE_USERNAME%$CREATE_PASSWORD"
+fi
+
+exit $exit_code
diff --git a/python/samba/tests/test_pam_winbind_setcred.sh b/python/samba/tests/test_pam_winbind_setcred.sh
new file mode 100755
index 0000000..7d7acc2
--- /dev/null
+++ b/python/samba/tests/test_pam_winbind_setcred.sh
@@ -0,0 +1,46 @@
+#!/bin/sh
+
+PYTHON="$1"
+PAM_WRAPPER_SO_PATH="$2"
+shift 2
+
+DOMAIN="$1"
+export DOMAIN
+USERNAME="$2"
+export USERNAME
+PASSWORD="$3"
+export PASSWORD
+shift 3
+
+PAM_OPTIONS="$1"
+export PAM_OPTIONS
+shift 1
+
+PAM_WRAPPER_PATH="$BINDIR/default/third_party/pam_wrapper"
+
+pam_winbind="$BINDIR/plugins/pam_winbind.so"
+service_dir="$SELFTEST_TMPDIR/pam_services"
+service_file="$service_dir/samba"
+
+mkdir $service_dir
+echo "auth required $pam_winbind debug debug_state $PAM_OPTIONS" > $service_file
+echo "account required $pam_winbind debug debug_state $PAM_OPTIONS" >> $service_file
+echo "password required $pam_winbind debug debug_state $PAM_OPTIONS" >> $service_file
+echo "session required $pam_winbind debug debug_state $PAM_OPTIONS" >> $service_file
+
+PAM_WRAPPER="1"
+export PAM_WRAPPER
+PAM_WRAPPER_SERVICE_DIR="$service_dir"
+export PAM_WRAPPER_SERVICE_DIR
+LD_PRELOAD="$LD_PRELOAD:$PAM_WRAPPER_SO_PATH"
+export LD_PRELOAD
+
+PAM_WRAPPER_DEBUGLEVEL=${PAM_WRAPPER_DEBUGLEVEL:="3"}
+export PAM_WRAPPER_DEBUGLEVEL
+
+PYTHONPATH="$PYTHONPATH:$PAM_WRAPPER_PATH:$(dirname $0)" $PYTHON -m samba.subunit.run samba.tests.pam_winbind_setcred
+exit_code=$?
+
+rm -rf $service_dir
+
+exit $exit_code
diff --git a/python/samba/tests/test_pam_winbind_warn_pwd_expire.sh b/python/samba/tests/test_pam_winbind_warn_pwd_expire.sh
new file mode 100755
index 0000000..f8db9d1
--- /dev/null
+++ b/python/samba/tests/test_pam_winbind_warn_pwd_expire.sh
@@ -0,0 +1,75 @@
+#!/bin/sh
+
+PYTHON="$1"
+PAM_WRAPPER_SO_PATH="$2"
+shift 2
+
+DOMAIN="$1"
+export DOMAIN
+USERNAME="$2"
+export USERNAME
+PASSWORD="$3"
+export PASSWORD
+shift 3
+
+PAM_OPTIONS="$1"
+export PAM_OPTIONS
+shift 1
+
+PAM_WRAPPER_PATH="$BINDIR/default/third_party/pam_wrapper"
+
+pam_winbind="$BINDIR/plugins/pam_winbind.so"
+service_dir="$SELFTEST_TMPDIR/pam_services"
+service_file="$service_dir/samba"
+
+mkdir $service_dir
+
+PAM_WRAPPER="1"
+export PAM_WRAPPER
+PAM_WRAPPER_SERVICE_DIR="$service_dir"
+export PAM_WRAPPER_SERVICE_DIR
+LD_PRELOAD="$LD_PRELOAD:$PAM_WRAPPER_SO_PATH"
+export LD_PRELOAD
+
+PAM_WRAPPER_DEBUGLEVEL=${PAM_WRAPPER_DEBUGLEVEL:="2"}
+export PAM_WRAPPER_DEBUGLEVEL
+
+# TEST with warn_pwd_expire=50
+#
+# This should produce a warning that the password will expire in 42 days
+#
+WARN_PWD_EXPIRE="50"
+export WARN_PWD_EXPIRE
+
+echo "auth required $pam_winbind debug debug_state warn_pwd_expire=$WARN_PWD_EXPIRE $PAM_OPTIONS" >$service_file
+echo "account required $pam_winbind debug debug_state warn_pwd_expire=$WARN_PWD_EXPIRE $PAM_OPTIONS" >>$service_file
+echo "password required $pam_winbind debug debug_state warn_pwd_expire=$WARN_PWD_EXPIRE $PAM_OPTIONS" >>$service_file
+echo "session required $pam_winbind debug debug_state warn_pwd_expire=$WARN_PWD_EXPIRE $PAM_OPTIONS" >>$service_file
+
+PYTHONPATH="$PYTHONPATH:$PAM_WRAPPER_PATH:$(dirname $0)" $PYTHON -m samba.subunit.run samba.tests.pam_winbind_warn_pwd_expire
+exit_code=$?
+if [ $exit_code -ne 0 ]; then
+ rm -rf $service_dir
+ exit $exit_code
+fi
+
+# TEST with warn_pwd_expire=0
+#
+WARN_PWD_EXPIRE="0"
+export WARN_PWD_EXPIRE
+
+echo "auth required $pam_winbind debug debug_state warn_pwd_expire=$WARN_PWD_EXPIRE $PAM_OPTIONS" >$service_file
+echo "account required $pam_winbind debug debug_state warn_pwd_expire=$WARN_PWD_EXPIRE $PAM_OPTIONS" >>$service_file
+echo "password required $pam_winbind debug debug_state warn_pwd_expire=$WARN_PWD_EXPIRE $PAM_OPTIONS" >>$service_file
+echo "session required $pam_winbind debug debug_state warn_pwd_expire=$WARN_PWD_EXPIRE $PAM_OPTIONS" >>$service_file
+
+PYTHONPATH="$PYTHONPATH:$PAM_WRAPPER_PATH:$(dirname $0)" $PYTHON -m samba.subunit.run samba.tests.pam_winbind_warn_pwd_expire
+exit_code=$?
+if [ $exit_code -ne 0 ]; then
+ rm -rf $service_dir
+ exit $exit_code
+fi
+
+rm -rf $service_dir
+
+exit $exit_code
diff --git a/python/samba/tests/token_factory.py b/python/samba/tests/token_factory.py
new file mode 100644
index 0000000..22f87f0
--- /dev/null
+++ b/python/samba/tests/token_factory.py
@@ -0,0 +1,256 @@
+# Unix SMB/CIFS implementation.
+# Copyright © Catalyst IT 2023
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+"""An API for creating arbitrary security tokens."""
+
+
+from samba.dcerpc import security
+
+
+CLAIM_VAL_TYPES = {
+ int: 0x0001,
+ 'uint': 0x0002,
+ str: 0x0003,
+ security.dom_sid: 0x0005,
+ bool: 0x0006,
+ bytes: 0x0010
+}
+
+
+def list_to_claim(k, v, case_sensitive=False):
+ if isinstance(v, security.CLAIM_SECURITY_ATTRIBUTE_RELATIVE_V1):
+ # make the name match
+ v.name = k
+ return v
+ if isinstance(v, (str, int)):
+ v = [v]
+ if not isinstance(v, list):
+ raise TypeError(f"expected list of claim values for '{k}', "
+ f"not {v!r} of type {type(v)}")
+
+ c = security.CLAIM_SECURITY_ATTRIBUTE_RELATIVE_V1()
+
+ if len(v) != 0:
+ t = type(v[0])
+ c.value_type = CLAIM_VAL_TYPES[t]
+ for val in v[1:]:
+ if type(val) != t:
+ raise TypeError(f"claim values for '{k}' "
+ "should all be the same type")
+ else:
+ # pick an arbitrary type
+ c.value_type = CLAIM_VAL_TYPES['uint']
+ c.name = k
+ c.values = v
+ c.value_count = len(v)
+ if case_sensitive:
+ c.flags |= security.CLAIM_SECURITY_ATTRIBUTE_VALUE_CASE_SENSITIVE
+
+ # The claims made here will not have the
+ # CLAIM_SECURITY_ATTRIBUTE_UNIQUE_AND_SORTED flag set, which makes
+ # them like resource attribute claims rather than real wire
+ # claims. It shouldn't matter much, as they will just be sorted
+ # and checked as if they were resource attribute claims.
+ return c
+
+
+def _normalise_claims(args):
+ if isinstance(args, security.CLAIM_SECURITY_ATTRIBUTE_RELATIVE_V1):
+ return [args]
+
+ if args is None or len(args) == 0:
+ return []
+
+ if isinstance(args, list):
+ for x in args:
+ if not isinstance(x, security.CLAIM_SECURITY_ATTRIBUTE_RELATIVE_V1):
+ raise TypeError(f"list should be of claims, not '{type(x)}'")
+ return args
+
+ claims_out = []
+
+ if isinstance(args, dict):
+ # the key is the name and the value is a list of claim values
+ for k, v in args.items():
+ c = list_to_claim(k, v)
+ claims_out.append(c)
+
+ return claims_out
+
+
+def str_to_sid(s):
+ lut = {
+ # These are a subset of two letter aliases that don't need a
+ # domain SID or other magic. (c.f. sid_strings test).
+ 'AA': security.SID_BUILTIN_ACCESS_CONTROL_ASSISTANCE_OPS, # S-1-5-32-579
+ 'AC': security.SID_SECURITY_BUILTIN_PACKAGE_ANY_PACKAGE, # S-1-15-2-1
+ 'AN': security.SID_NT_ANONYMOUS, # S-1-5-7
+ 'AO': security.SID_BUILTIN_ACCOUNT_OPERATORS, # S-1-5-32-548
+ 'AS': security.SID_AUTHENTICATION_AUTHORITY_ASSERTED_IDENTITY, # S-1-18-1
+ 'AU': security.SID_NT_AUTHENTICATED_USERS, # S-1-5-11
+ 'BA': security.SID_BUILTIN_ADMINISTRATORS, # S-1-5-32-544
+ 'BG': security.SID_BUILTIN_GUESTS, # S-1-5-32-546
+ 'BO': security.SID_BUILTIN_BACKUP_OPERATORS, # S-1-5-32-551
+ 'BU': security.SID_BUILTIN_USERS, # S-1-5-32-545
+ 'CD': security.SID_BUILTIN_CERT_SERV_DCOM_ACCESS, # S-1-5-32-574
+ 'CG': security.SID_CREATOR_GROUP, # S-1-3-1
+ 'CO': security.SID_CREATOR_OWNER, # S-1-3-0
+ 'CY': security.SID_BUILTIN_CRYPTO_OPERATORS, # S-1-5-32-569
+ 'ED': security.SID_NT_ENTERPRISE_DCS, # S-1-5-9
+ 'ER': security.SID_BUILTIN_EVENT_LOG_READERS, # S-1-5-32-573
+ 'ES': security.SID_BUILTIN_RDS_ENDPOINT_SERVERS, # S-1-5-32-576
+ 'HA': security.SID_BUILTIN_HYPER_V_ADMINS, # S-1-5-32-578
+ 'HI': security.SID_SECURITY_MANDATORY_HIGH, # S-1-16-12288
+ 'IS': security.SID_BUILTIN_IUSERS, # S-1-5-32-568
+ 'IU': security.SID_NT_INTERACTIVE, # S-1-5-4
+ 'LS': security.SID_NT_LOCAL_SERVICE, # S-1-5-19
+ 'LU': security.SID_BUILTIN_PERFLOG_USERS, # S-1-5-32-559
+ 'LW': security.SID_SECURITY_MANDATORY_LOW, # S-1-16-4096
+ 'ME': security.SID_SECURITY_MANDATORY_MEDIUM, # S-1-16-8192
+ 'MP': security.SID_SECURITY_MANDATORY_MEDIUM_PLUS, # S-1-16-8448
+ 'MS': security.SID_BUILTIN_RDS_MANAGEMENT_SERVERS, # S-1-5-32-577
+ 'MU': security.SID_BUILTIN_PERFMON_USERS, # S-1-5-32-558
+ 'NO': security.SID_BUILTIN_NETWORK_CONF_OPERATORS, # S-1-5-32-556
+ 'NS': security.SID_NT_NETWORK_SERVICE, # S-1-5-20
+ 'NU': security.SID_NT_NETWORK, # S-1-5-2
+ 'OW': security.SID_OWNER_RIGHTS, # S-1-3-4
+ 'PO': security.SID_BUILTIN_PRINT_OPERATORS, # S-1-5-32-550
+ 'PS': security.SID_NT_SELF, # S-1-5-10
+ 'PU': security.SID_BUILTIN_POWER_USERS, # S-1-5-32-547
+ 'RA': security.SID_BUILTIN_RDS_REMOTE_ACCESS_SERVERS, # S-1-5-32-575
+ 'RC': security.SID_NT_RESTRICTED, # S-1-5-12
+ 'RD': security.SID_BUILTIN_REMOTE_DESKTOP_USERS, # S-1-5-32-555
+ 'RE': security.SID_BUILTIN_REPLICATOR, # S-1-5-32-552
+ 'RM': security.SID_BUILTIN_REMOTE_MANAGEMENT_USERS, # S-1-5-32-580
+ 'RU': security.SID_BUILTIN_PREW2K, # S-1-5-32-554
+ 'SI': security.SID_SECURITY_MANDATORY_SYSTEM, # S-1-16-16384
+ 'SO': security.SID_BUILTIN_SERVER_OPERATORS, # S-1-5-32-549
+ 'SS': security.SID_SERVICE_ASSERTED_IDENTITY, # S-1-18-2
+ 'SU': security.SID_NT_SERVICE, # S-1-5-6
+ 'SY': security.SID_NT_SYSTEM, # S-1-5-18
+ 'WD': security.SID_WORLD, # S-1-1-0
+ 'WR': security.SID_SECURITY_RESTRICTED_CODE, # S-1-5-33
+ }
+ if s in lut:
+ s = lut[s]
+ return security.dom_sid(s)
+
+
+def _normalise_sids(args):
+ if isinstance(args, security.dom_sid):
+ return [args]
+ if isinstance(args, str):
+ return [str_to_sid(args)]
+
+ if not isinstance(args, list):
+ raise TypeError("expected a SID, sid string, or list of SIDs, "
+ f"not'{type(args)}'")
+
+ sids_out = []
+ for s in args:
+ if isinstance(s, str):
+ s = str_to_sid(s)
+ elif not isinstance(s, security.dom_sid):
+ raise TypeError(f"expected a SID, not'{type(s)}'")
+ sids_out.append(s)
+
+ return sids_out
+
+
+def _normalise_mask(mask, mask_type):
+ if isinstance(mask, int):
+ return mask
+
+ if not isinstance(mask, list):
+ raise TypeError("expected int mask or list of flags")
+
+ if mask_type == 'privileges':
+ prefix = 'SEC_PRIV_'
+ tail = '_BIT'
+ elif mask_type == 'rights':
+ prefix = 'LSA_POLICY_MODE_'
+ tail = ''
+ else:
+ raise ValueError(f"unknown mask_type value: {mask_type}")
+
+ mask_out = 0
+
+ for x in mask:
+ if isinstance(x, str) and x.startswith(prefix):
+ if not x.endswith(tail):
+ # we don't want security.SEC_PRIV_SHUTDOWN (19),
+ # we want security.SEC_PRIV_SHUTDOWN_BIT (1 << 20)
+ # but you can write "SEC_PRIV_SHUTDOWN"
+ x += tail
+ x = getattr(security, x)
+ mask_out |= x
+
+ return mask_out
+
+
+def token(sids=None, **kwargs):
+ """Return a security token with the specified attributes.
+
+ The security.token API is annoying and fragile; here we wrap it in
+ something nicer.
+
+ In general the arguments can either be objects of the correct
+ type, or Python strings or structures that clearly convert to that
+ type. For example, there two are equivalent:
+
+ >>> t = token([security.dom_sid("S-1-2")])
+ >>> t = token(["S-1-2"])
+
+ To add claims and device SIDs you do something like this:
+
+ >>> t = token(["AA", WD"],
+ device_sids=["WD"],
+ user_claims={"Title": ["PM"],
+ "ClearanceLevel": [1]}
+ """
+
+ claims_kws = ['device_claims',
+ 'local_claims',
+ 'user_claims']
+
+ sid_kws = ['sids', 'device_sids']
+
+ mask_kws = ['privileges',
+ 'rights']
+
+ if sids is not None:
+ kwargs['sids'] = sids
+
+ norm_args = {}
+
+ for k, v in kwargs.items():
+ if k in claims_kws:
+ norm_args[k] = _normalise_claims(v)
+ elif k in mask_kws:
+ norm_args[k] = _normalise_mask(v, k)
+ elif k in sid_kws:
+ norm_args[k] = _normalise_sids(v)
+ else:
+ raise TypeError(f"{k} is an invalid keyword argument")
+
+ t = security.token(evaluate_claims=security.CLAIMS_EVALUATION_ALWAYS)
+
+ for k, v in norm_args.items():
+ setattr(t, k, v)
+ if isinstance(v, list):
+ setattr(t, 'num_' + k, len(v))
+
+ return t
diff --git a/python/samba/tests/upgrade.py b/python/samba/tests/upgrade.py
new file mode 100644
index 0000000..0cca2d0
--- /dev/null
+++ b/python/samba/tests/upgrade.py
@@ -0,0 +1,40 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for samba.upgrade."""
+
+from samba.upgrade import import_wins
+from samba.tests import LdbTestCase
+
+
+class WinsUpgradeTests(LdbTestCase):
+
+ def test_upgrade(self):
+ winsdb = {
+ "FOO#20": (200, ["127.0.0.1", "127.0.0.2"], 0x60)
+ }
+ import_wins(self.ldb, winsdb)
+
+ self.assertEqual(
+ ['name=FOO,type=0x20'],
+ [str(m.dn) for m in
+ self.ldb.search(expression="(objectClass=winsRecord)")])
+
+ def test_version(self):
+ import_wins(self.ldb, {})
+ self.assertEqual("VERSION",
+ str(self.ldb.search(expression="(objectClass=winsMaxVersion)")[0]["cn"]))
diff --git a/python/samba/tests/upgradeprovision.py b/python/samba/tests/upgradeprovision.py
new file mode 100644
index 0000000..ba097fa
--- /dev/null
+++ b/python/samba/tests/upgradeprovision.py
@@ -0,0 +1,155 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007-2008
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for samba.upgradeprovision."""
+
+import os
+from samba.upgradehelpers import (usn_in_range, dn_sort,
+ update_secrets,
+ construct_existor_expr)
+from samba.descriptor import get_diff_sds
+from samba.tests.provision import create_dummy_secretsdb
+from samba.tests import TestCaseInTempDir
+from samba import Ldb
+from ldb import SCOPE_BASE
+import samba.tests
+from samba.dcerpc import security
+
+
+def dummymessage(a=None, b=None):
+ pass
+
+
+class UpgradeProvisionTestCase(TestCaseInTempDir):
+ """Some simple tests for individual functions in the provisioning code.
+ """
+ def test_usn_in_range(self):
+ range = [5, 25, 35, 55]
+
+ vals = [3, 26, 56]
+
+ for v in vals:
+ self.assertFalse(usn_in_range(v, range))
+
+ vals = [5, 20, 25, 35, 36]
+
+ for v in vals:
+ self.assertTrue(usn_in_range(v, range))
+
+ def test_dn_sort(self):
+ # higher level comes after lower even if lexicographicaly closer
+ # ie dc=tata,dc=toto (2 levels), comes after dc=toto
+ # even if dc=toto is lexicographicaly after dc=tata, dc=toto
+ self.assertEqual(dn_sort("dc=tata,dc=toto", "dc=toto"), 1)
+ self.assertEqual(dn_sort("dc=zata", "dc=tata"), 1)
+ self.assertEqual(dn_sort("dc=toto,dc=tata",
+ "cn=foo,dc=toto,dc=tata"), -1)
+ self.assertEqual(dn_sort("cn=bar, dc=toto,dc=tata",
+ "cn=foo, dc=toto,dc=tata"), -1)
+
+ def test_get_diff_sds(self):
+ domsid = security.dom_sid('S-1-5-21')
+
+ sddl = "O:SAG:DUD:AI(A;CI;CCLCSWRPWPLOCRRCWDWO;;;SA)\
+(A;CI;RPLCLORC;;;AU)(A;CI;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)S:AI(AU;CISA;WP;;;WD)"
+ sddl1 = "O:SAG:DUD:AI(A;CI;CCLCSWRPWPLOCRRCWDWO;;;SA)\
+(A;CI;RPLCLORC;;;AU)(A;CI;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)S:AI(AU;CISA;WP;;;WD)"
+ sddl2 = "O:BAG:DUD:AI(A;CI;CCLCSWRPWPLOCRRCWDWO;;;SA)\
+(A;CI;RPLCLORC;;;AU)(A;CI;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)S:AI(AU;CISA;WP;;;WD)"
+ sddl3 = "O:SAG:BAD:AI(A;CI;CCLCSWRPWPLOCRRCWDWO;;;SA)\
+(A;CI;RPLCLORC;;;AU)(A;CI;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)S:AI(AU;CISA;WP;;;WD)"
+ sddl4 = "O:SAG:DUD:AI(A;CI;CCLCSWRPWPLOCRRCWDWO;;;BA)\
+(A;CI;RPLCLORC;;;AU)(A;CI;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)S:AI(AU;CISA;WP;;;WD)"
+ sddl5 = "O:SAG:DUD:AI(A;CI;CCLCSWRPWPLOCRRCWDWO;;;SA)\
+(A;CI;RPLCLORC;;;AU)(A;CI;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)"
+ sddl6 = "O:SAG:DUD:AI(A;CIID;CCLCSWRPWPLOCRRCWDWO;;;SA)\
+(A;CIID;RPLCLORC;;;AU)(A;CIID;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)\
+(A;CI;CCLCSWRPWPLOCRRCWDWO;;;SA)\
+(A;CI;RPLCLORC;;;AU)(A;CI;RPWPCRCCDCLCLORCWOWDSDDTSW;;;SY)S:AI(AU;CISA;WP;;;WD)(AU;CIIDSA;WP;;;WD)"
+
+ self.assertEqual(get_diff_sds(security.descriptor.from_sddl(sddl, domsid),
+ security.descriptor.from_sddl(sddl1, domsid),
+ domsid), "")
+ txt = get_diff_sds(security.descriptor.from_sddl(sddl, domsid),
+ security.descriptor.from_sddl(sddl2, domsid),
+ domsid)
+ self.assertEqual(txt, "\tOwner mismatch: SA (in ref) BA(in current)\n")
+ txt = get_diff_sds(security.descriptor.from_sddl(sddl, domsid),
+ security.descriptor.from_sddl(sddl3, domsid),
+ domsid)
+ self.assertEqual(txt, "\tGroup mismatch: DU (in ref) BA(in current)\n")
+ txt = get_diff_sds(security.descriptor.from_sddl(sddl, domsid),
+ security.descriptor.from_sddl(sddl4, domsid),
+ domsid)
+ txtmsg = "\tPart dacl is different between reference and current here\
+ is the detail:\n\t\t(A;CI;CCLCSWRPWPLOCRRCWDWO;;;BA) ACE is not present in\
+ the reference\n\t\t(A;CI;CCLCSWRPWPLOCRRCWDWO;;;SA) ACE is not present in\
+ the current\n"
+ self.assertEqual(txt, txtmsg)
+
+ txt = get_diff_sds(security.descriptor.from_sddl(sddl, domsid),
+ security.descriptor.from_sddl(sddl5, domsid),
+ domsid)
+ self.assertEqual(txt, "\tCurrent ACL hasn't a sacl part\n")
+ self.assertEqual(get_diff_sds(security.descriptor.from_sddl(sddl, domsid),
+ security.descriptor.from_sddl(sddl6, domsid),
+ domsid), "")
+
+ def test_construct_existor_expr(self):
+ res = construct_existor_expr([])
+ self.assertEqual(res, "")
+
+ res = construct_existor_expr(["foo"])
+ self.assertEqual(res, "(|(foo=*))")
+
+ res = construct_existor_expr(["foo", "bar"])
+ self.assertEqual(res, "(|(foo=*)(bar=*))")
+
+
+class UpdateSecretsTests(samba.tests.TestCaseInTempDir):
+
+ def setUp(self):
+ super().setUp()
+ self.referencedb = create_dummy_secretsdb(
+ os.path.join(self.tempdir, "ref.ldb"))
+
+ def _getEmptyDb(self):
+ return Ldb(os.path.join(self.tempdir, "secrets.ldb"))
+
+ def _getCurrentFormatDb(self):
+ return create_dummy_secretsdb(
+ os.path.join(self.tempdir, "secrets.ldb"))
+
+ def test_trivial(self):
+ # Test that updating an already up-to-date secretsdb works fine
+ self.secretsdb = self._getCurrentFormatDb()
+ self.assertEqual(None,
+ update_secrets(self.referencedb, self.secretsdb, dummymessage))
+
+ def test_update_modules(self):
+ empty_db = self._getEmptyDb()
+ update_secrets(self.referencedb, empty_db, dummymessage)
+ newmodules = empty_db.search(base="@MODULES", scope=SCOPE_BASE)
+ refmodules = self.referencedb.search(base="@MODULES", scope=SCOPE_BASE)
+ self.assertEqual(newmodules.msgs, refmodules.msgs)
+
+ def tearDown(self):
+ for name in ["ref.ldb", "secrets.ldb", "secrets.tdb", "secrets.tdb.bak", "secrets.ntdb"]:
+ path = os.path.join(self.tempdir, name)
+ if os.path.exists(path):
+ os.unlink(path)
+ super().tearDown()
diff --git a/python/samba/tests/upgradeprovisionneeddc.py b/python/samba/tests/upgradeprovisionneeddc.py
new file mode 100644
index 0000000..8cabfa2
--- /dev/null
+++ b/python/samba/tests/upgradeprovisionneeddc.py
@@ -0,0 +1,181 @@
+# Unix SMB/CIFS implementation.
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007-2008
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for samba.upgradeprovision that need a DC."""
+
+import os
+import re
+import shutil
+
+from samba import param
+from samba.credentials import Credentials
+from samba.auth import system_session
+from samba.provision import getpolicypath, find_provision_key_parameters
+from samba.upgradehelpers import (get_paths, get_ldbs,
+ identic_rename,
+ updateOEMInfo, getOEMInfo, update_gpo,
+ delta_update_basesamdb,
+ update_dns_account_password,
+ search_constructed_attrs_stored,
+ increment_calculated_keyversion_number)
+from samba.tests import env_loadparm, TestCaseInTempDir
+from samba.tests.provision import create_dummy_secretsdb
+import ldb
+
+
+def dummymessage(a=None, b=None):
+ pass
+
+
+smb_conf_path = "%s/%s/%s" % (os.environ["SELFTEST_PREFIX"], "ad_dc_ntvfs", "etc/smb.conf")
+
+
+class UpgradeProvisionBasicLdbHelpersTestCase(TestCaseInTempDir):
+ """Some simple tests for individual functions in the provisioning code.
+ """
+
+ def test_get_ldbs(self):
+ paths = get_paths(param, None, smb_conf_path)
+ creds = Credentials()
+ lp = env_loadparm()
+ creds.guess(lp)
+ get_ldbs(paths, creds, system_session(), lp)
+
+ def test_find_key_param(self):
+ paths = get_paths(param, None, smb_conf_path)
+ creds = Credentials()
+ lp = env_loadparm()
+ creds.guess(lp)
+ rootdn = "dc=samba,dc=example,dc=com"
+ ldbs = get_ldbs(paths, creds, system_session(), lp)
+ names = find_provision_key_parameters(ldbs.sam, ldbs.secrets, ldbs.idmap,
+ paths, smb_conf_path, lp)
+ self.assertEqual(names.realm, "SAMBA.EXAMPLE.COM")
+ self.assertEqual(str(names.rootdn).lower(), rootdn.lower())
+ self.assertNotEqual(names.policyid_dc, None)
+ self.assertNotEqual(names.ntdsguid, "")
+
+
+class UpgradeProvisionWithLdbTestCase(TestCaseInTempDir):
+
+ def _getEmptyDbName(self):
+ return os.path.join(self.tempdir, "sam.ldb")
+
+ def setUp(self):
+ super().setUp()
+ paths = get_paths(param, None, smb_conf_path)
+ self.creds = Credentials()
+ self.lp = env_loadparm()
+ self.creds.guess(self.lp)
+ self.paths = paths
+ self.ldbs = get_ldbs(paths, self.creds, system_session(), self.lp)
+ self.names = find_provision_key_parameters(self.ldbs.sam,
+ self.ldbs.secrets, self.ldbs.idmap, paths, smb_conf_path,
+ self.lp)
+ self.referencedb = create_dummy_secretsdb(
+ os.path.join(self.tempdir, "ref.ldb"))
+
+ def test_search_constructed_attrs_stored(self):
+ hashAtt = search_constructed_attrs_stored(self.ldbs.sam,
+ self.names.rootdn,
+ ["msds-KeyVersionNumber"])
+ self.assertFalse("msds-KeyVersionNumber" in hashAtt)
+
+ def test_increment_calculated_keyversion_number(self):
+ dn = "CN=Administrator,CN=Users,%s" % self.names.rootdn
+ # We conctruct a simple hash for the user administrator
+ hash = {}
+ # And we want the version to be 140
+ hash[dn.lower()] = 140
+
+ increment_calculated_keyversion_number(self.ldbs.sam,
+ self.names.rootdn,
+ hash)
+ self.assertEqual(self.ldbs.sam.get_attribute_replmetadata_version(dn,
+ "unicodePwd"),
+ 140)
+ # This function should not decrement the version
+ hash[dn.lower()] = 130
+
+ increment_calculated_keyversion_number(self.ldbs.sam,
+ self.names.rootdn,
+ hash)
+ self.assertEqual(self.ldbs.sam.get_attribute_replmetadata_version(dn,
+ "unicodePwd"),
+ 140)
+
+ def test_identic_rename(self):
+ rootdn = "DC=samba,DC=example,DC=com"
+
+ guestDN = ldb.Dn(self.ldbs.sam, "CN=Guest,CN=Users,%s" % rootdn)
+ identic_rename(self.ldbs.sam, guestDN)
+ res = self.ldbs.sam.search(expression="(name=Guest)", base=rootdn,
+ scope=ldb.SCOPE_SUBTREE, attrs=["dn"])
+ self.assertEqual(len(res), 1)
+ self.assertEqual(str(res[0]["dn"]), "CN=Guest,CN=Users,%s" % rootdn)
+
+ def test_delta_update_basesamdb(self):
+ dummysampath = self._getEmptyDbName()
+ delta_update_basesamdb(self.paths.samdb, dummysampath,
+ self.creds, system_session(), self.lp,
+ dummymessage)
+
+ def test_update_gpo_simple(self):
+ dir = getpolicypath(self.paths.sysvol, self.names.dnsdomain,
+ self.names.policyid)
+ shutil.rmtree(dir)
+ self.assertFalse(os.path.isdir(dir))
+ update_gpo(self.paths, self.names)
+ self.assertTrue(os.path.isdir(dir))
+
+ def test_update_gpo_acl(self):
+ path = os.path.join(self.tempdir, "testupdategpo")
+ save = self.paths.sysvol
+ self.paths.sysvol = path
+ os.mkdir(path)
+ os.mkdir(os.path.join(path, self.names.dnsdomain))
+ os.mkdir(os.path.join(os.path.join(path, self.names.dnsdomain),
+ "Policies"))
+ update_gpo(self.paths, self.names)
+ shutil.rmtree(path)
+ self.paths.sysvol = save
+
+ def test_getOEMInfo(self):
+ realm = self.lp.get("realm")
+ basedn = "DC=%s" % realm.replace(".", ", DC=")
+ oem = getOEMInfo(self.ldbs.sam, basedn)
+ self.assertNotEqual(oem, "")
+
+ def test_update_dns_account(self):
+ update_dns_account_password(self.ldbs.sam, self.ldbs.secrets,
+ self.names)
+
+ def test_updateOEMInfo(self):
+ realm = self.lp.get("realm")
+ basedn = "DC=%s" % realm.replace(".", ", DC=")
+ oem = getOEMInfo(self.ldbs.sam, basedn)
+ updateOEMInfo(self.ldbs.sam, basedn)
+ oem2 = getOEMInfo(self.ldbs.sam, basedn)
+ self.assertNotEqual(str(oem), str(oem2))
+ self.assertTrue(re.match(".*upgrade to.*", str(oem2)))
+
+ def tearDown(self):
+ for name in ["ref.ldb", "secrets.ldb", "secrets.tdb", "secrets.tdb.bak", "secrets.ntdb", "sam.ldb"]:
+ path = os.path.join(self.tempdir, name)
+ if os.path.exists(path):
+ os.unlink(path)
+ super().tearDown()
diff --git a/python/samba/tests/usage.py b/python/samba/tests/usage.py
new file mode 100644
index 0000000..3312bfe
--- /dev/null
+++ b/python/samba/tests/usage.py
@@ -0,0 +1,380 @@
+# Unix SMB/CIFS implementation.
+# Copyright © Douglas Bagnall <douglas.bagnall@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import subprocess
+from samba.tests import TestCase, check_help_consistency
+import re
+import stat
+
+if 'SRCDIR_ABS' in os.environ:
+ BASEDIR = os.environ['SRCDIR_ABS']
+else:
+ BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
+ '../../..'))
+
+TEST_DIRS = [
+ "bootstrap",
+ "testdata",
+ "ctdb",
+ "dfs_server",
+ "pidl",
+ "auth",
+ "packaging",
+ "python",
+ "include",
+ "nsswitch",
+ "libcli",
+ "coverity",
+ "release-scripts",
+ "testprogs",
+ "bin",
+ "source3",
+ "docs-xml",
+ "buildtools",
+ "file_server",
+ "dynconfig",
+ "source4",
+ "tests",
+ "libds",
+ "selftest",
+ "lib",
+ "script",
+ "traffic",
+ "testsuite",
+ "libgpo",
+ "wintest",
+ "librpc",
+]
+
+
+EXCLUDE_USAGE = {
+ 'script/autobuild.py', # defaults to mount /memdisk/
+ 'script/bisect-test.py',
+ 'ctdb/utils/etcd/ctdb_etcd_lock',
+ 'selftest/filter-subunit',
+ 'selftest/format-subunit',
+ 'bin/gen_output.py', # too much output!
+ 'source4/scripting/bin/gen_output.py',
+ 'lib/ldb/tests/python/index.py',
+ 'lib/ldb/tests/python/api.py',
+ 'source4/selftest/tests.py',
+ 'buildtools/bin/waf',
+ 'selftest/tap2subunit',
+ 'script/show_test_time',
+ 'source4/scripting/bin/subunitrun',
+ 'bin/samba_downgrade_db',
+ 'source4/scripting/bin/samba_downgrade_db',
+ 'source3/selftest/tests.py',
+ 'selftest/tests.py',
+ 'python/samba/subunit/run.py',
+ 'bin/python/samba/subunit/run.py',
+ 'lib/compression/tests/scripts/three-byte-hash',
+}
+
+EXCLUDE_HELP = {
+ 'selftest/tap2subunit',
+ 'wintest/test-s3.py',
+ 'wintest/test-s4-howto.py',
+}
+
+
+EXCLUDE_DIRS = {
+ 'source3/script/tests',
+ 'python/examples',
+ 'source4/dsdb/tests/python',
+ 'bin/ab',
+ 'bin/python/samba/tests',
+ 'bin/python/samba/tests/blackbox',
+ 'bin/python/samba/tests/dcerpc',
+ 'bin/python/samba/tests/krb5',
+ 'bin/python/samba/tests/ndr',
+ 'python/samba/tests',
+ 'python/samba/tests/bin',
+ 'python/samba/tests/blackbox',
+ 'python/samba/tests/dcerpc',
+ 'python/samba/tests/krb5',
+ 'python/samba/tests/ndr',
+}
+
+
+def _init_git_file_finder():
+ """Generate a function that quickly answers the question:
+ 'is this a git file?'
+ """
+ git_file_cache = set()
+ p = subprocess.run(['git',
+ '-C', BASEDIR,
+ 'ls-files',
+ '-z'],
+ stdout=subprocess.PIPE)
+ if p.returncode == 0:
+ for fn in p.stdout.split(b'\0'):
+ git_file_cache.add(os.path.join(BASEDIR, fn.decode('utf-8')))
+ return git_file_cache.__contains__
+
+
+is_git_file = _init_git_file_finder()
+
+
+def script_iterator(d=BASEDIR, cache=None,
+ shebang_filter=None,
+ filename_filter=None,
+ subdirs=None):
+ if subdirs is None:
+ subdirs = TEST_DIRS
+ if not cache:
+ safename = re.compile(r'\W+').sub
+ for subdir in subdirs:
+ sd = os.path.join(d, subdir)
+ for root, dirs, files in os.walk(sd, followlinks=False):
+ for fn in files:
+ if fn.endswith('~'):
+ continue
+ if fn.endswith('.inst'):
+ continue
+ ffn = os.path.join(root, fn)
+ try:
+ s = os.stat(ffn)
+ except FileNotFoundError:
+ continue
+ if not s.st_mode & stat.S_IXUSR:
+ continue
+ if not (subdir == 'bin' or is_git_file(ffn)):
+ continue
+
+ if filename_filter is not None:
+ if not filename_filter(ffn):
+ continue
+
+ if shebang_filter is not None:
+ try:
+ f = open(ffn, 'rb')
+ except OSError as e:
+ print("could not open %s: %s" % (ffn, e))
+ continue
+ line = f.read(40)
+ f.close()
+ if not shebang_filter(line):
+ continue
+
+ name = safename('_', fn)
+ while name in cache:
+ name += '_'
+ cache[name] = ffn
+
+ return cache.items()
+
+# For ELF we only look at /bin/* top level.
+def elf_file_name(fn):
+ fn = fn.partition('bin/')[2]
+ return fn and '/' not in fn and 'test' not in fn and 'ldb' in fn
+
+def elf_shebang(x):
+ return x[:4] == b'\x7fELF'
+
+elf_cache = {}
+def elf_iterator():
+ return script_iterator(BASEDIR, elf_cache,
+ shebang_filter=elf_shebang,
+ filename_filter=elf_file_name,
+ subdirs=['bin'])
+
+
+perl_shebang = re.compile(br'#!.+perl').match
+
+perl_script_cache = {}
+def perl_script_iterator():
+ return script_iterator(BASEDIR, perl_script_cache, perl_shebang)
+
+
+python_shebang = re.compile(br'#!.+python').match
+
+python_script_cache = {}
+def python_script_iterator():
+ return script_iterator(BASEDIR, python_script_cache, python_shebang)
+
+
+class PerlScriptUsageTests(TestCase):
+ """Perl scripts run without arguments should print a usage string,
+ not fail with a traceback.
+ """
+
+ @classmethod
+ def initialise(cls):
+ for name, filename in perl_script_iterator():
+ print(name, filename)
+
+
+class PythonScriptUsageTests(TestCase):
+ """Python scripts run without arguments should print a usage string,
+ not fail with a traceback.
+ """
+
+ @classmethod
+ def initialise(cls):
+ for name, filename in python_script_iterator():
+ # We add the actual tests after the class definition so we
+ # can give individual names to them, so we can have a
+ # knownfail list.
+ fn = filename.replace(BASEDIR, '').lstrip('/')
+
+ if fn in EXCLUDE_USAGE:
+ print("skipping %s (EXCLUDE_USAGE)" % filename)
+ continue
+
+ if os.path.dirname(fn) in EXCLUDE_DIRS:
+ print("skipping %s (EXCLUDE_DIRS)" % filename)
+ continue
+
+ def _f(self, filename=filename):
+ print(filename)
+ try:
+ p = subprocess.Popen(['python3', filename],
+ stderr=subprocess.PIPE,
+ stdout=subprocess.PIPE)
+ out, err = p.communicate(timeout=5)
+ except OSError as e:
+ self.fail("Error: %s" % e)
+ except subprocess.SubprocessError as e:
+ self.fail("Subprocess error: %s" % e)
+
+ err = err.decode('utf-8')
+ out = out.decode('utf-8')
+ self.assertNotIn('Traceback', err)
+
+ self.assertIn('usage', out.lower() + err.lower(),
+ 'stdout:\n%s\nstderr:\n%s' % (out, err))
+
+ attr = 'test_%s' % name
+ if hasattr(cls, attr):
+ raise RuntimeError(f'Usage test ‘{attr}’ already exists!')
+ setattr(cls, attr, _f)
+
+
+class HelpTestSuper(TestCase):
+ """Python scripts run with -h or --help should print a help string,
+ and exit with success.
+ """
+ check_return_code = True
+ check_consistency = True
+ check_contains_usage = True
+ check_multiline = True
+ check_merged_out_and_err = False
+
+ interpreter = None
+
+ options_start = None
+ options_end = None
+ def iterator(self):
+ raise NotImplementedError("Subclass this "
+ "and add an iterator function!")
+
+ @classmethod
+ def initialise(cls):
+ for name, filename in cls.iterator():
+ # We add the actual tests after the class definition so we
+ # can give individual names to them, so we can have a
+ # knownfail list.
+ fn = filename.replace(BASEDIR, '').lstrip('/')
+
+ if fn in EXCLUDE_HELP:
+ print("skipping %s (EXCLUDE_HELP)" % filename)
+ continue
+
+ if os.path.dirname(fn) in EXCLUDE_DIRS:
+ print("skipping %s (EXCLUDE_DIRS)" % filename)
+ continue
+
+ def _f(self, filename=filename):
+ print(filename)
+ for h in ('--help', '-h'):
+ cmd = [filename, h]
+ if self.interpreter:
+ cmd.insert(0, self.interpreter)
+ try:
+ p = subprocess.Popen(cmd,
+ stderr=subprocess.PIPE,
+ stdout=subprocess.PIPE)
+ out, err = p.communicate(timeout=5)
+ except OSError as e:
+ self.fail("Error: %s" % e)
+ except subprocess.SubprocessError as e:
+ self.fail("Subprocess error: %s" % e)
+
+ err = err.decode('utf-8')
+ out = out.decode('utf-8')
+ if self.check_merged_out_and_err:
+ out = "%s\n%s" % (out, err)
+
+ outl = out[:500].lower()
+ # NOTE:
+ # These assertions are heuristics, not policy.
+ # If your script fails this test when it shouldn't
+ # just add it to EXCLUDE_HELP above or change the
+ # heuristic.
+
+ # --help should produce:
+ # * multiple lines of help on stdout (not stderr),
+ # * including a "Usage:" string,
+ # * not contradict itself or repeat options,
+ # * and return success.
+ #print(out.encode('utf8'))
+ #print(err.encode('utf8'))
+ if self.check_consistency:
+ errors = check_help_consistency(out,
+ self.options_start,
+ self.options_end)
+ if errors is not None:
+ self.fail(errors)
+
+ if self.check_return_code:
+ self.assertEqual(p.returncode, 0,
+ "%s %s\nreturncode should not be %d\n"
+ "err:\n%s\nout:\n%s" %
+ (filename, h, p.returncode, err, out))
+ if self.check_contains_usage:
+ self.assertIn('usage', outl, 'lacks "Usage:"\n')
+ if self.check_multiline:
+ self.assertIn('\n', out, 'expected multi-line output')
+
+ attr = 'test_%s' % name
+ if hasattr(cls, attr):
+ raise RuntimeError(f'Usage test ‘{attr}’ already exists!')
+ setattr(cls, attr, _f)
+
+
+class PythonScriptHelpTests(HelpTestSuper):
+ """Python scripts run with -h or --help should print a help string,
+ and exit with success.
+ """
+ iterator = python_script_iterator
+ interpreter = 'python3'
+
+
+class ElfHelpTests(HelpTestSuper):
+ """ELF binaries run with -h or --help should print a help string,
+ and exit with success.
+ """
+ iterator = elf_iterator
+ check_return_code = False
+ check_merged_out_and_err = True
+
+
+PerlScriptUsageTests.initialise()
+PythonScriptUsageTests.initialise()
+PythonScriptHelpTests.initialise()
+ElfHelpTests.initialise()
diff --git a/python/samba/tests/xattr.py b/python/samba/tests/xattr.py
new file mode 100644
index 0000000..745b753
--- /dev/null
+++ b/python/samba/tests/xattr.py
@@ -0,0 +1,159 @@
+# Unix SMB/CIFS implementation. Tests for xattr manipulation
+# Copyright (C) Matthieu Patou <mat@matws.net> 2009
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Tests for samba.xattr_native and samba.xattr_tdb."""
+
+import samba.xattr_native
+import samba.xattr_tdb
+import samba.posix_eadb
+from samba.xattr import copytree_with_xattrs
+from samba.dcerpc import xattr
+from samba.ndr import ndr_pack
+from samba.tests import (
+ SkipTest,
+ TestCase,
+ TestCaseInTempDir,
+)
+import random
+import shutil
+import os
+
+
+class XattrTests(TestCase):
+
+ def _tmpfilename(self):
+ random.seed()
+ path = os.environ['SELFTEST_PREFIX']
+ return os.path.join(path, "pytests" +str(int(100000 * random.random())))
+
+ def _eadbpath(self):
+ return os.path.join(os.environ['SELFTEST_PREFIX'], "eadb.tdb")
+
+ def test_set_xattr_native(self):
+ if not samba.xattr_native.is_xattr_supported():
+ raise SkipTest()
+ ntacl = xattr.NTACL()
+ ntacl.version = 1
+ tempf = self._tmpfilename()
+ open(tempf, 'w').write("empty")
+ try:
+ samba.xattr_native.wrap_setxattr(tempf, "user.unittests",
+ ndr_pack(ntacl))
+ except IOError:
+ raise SkipTest("the filesystem where the tests are run does not "
+ "support XATTR")
+ os.unlink(tempf)
+
+ def test_set_and_get_native(self):
+ if not samba.xattr_native.is_xattr_supported():
+ raise SkipTest()
+ tempf = self._tmpfilename()
+ reftxt = b"this is a test"
+ open(tempf, 'w').write("empty")
+ try:
+ samba.xattr_native.wrap_setxattr(tempf, "user.unittests", reftxt)
+ text = samba.xattr_native.wrap_getxattr(tempf, "user.unittests")
+ self.assertEqual(text, reftxt)
+ except IOError:
+ raise SkipTest("the filesystem where the tests are run does not "
+ "support XATTR")
+ os.unlink(tempf)
+
+ def test_set_xattr_tdb(self):
+ tempf = self._tmpfilename()
+ eadb_path = self._eadbpath()
+ ntacl = xattr.NTACL()
+ ntacl.version = 1
+ open(tempf, 'w').write("empty")
+ try:
+ samba.xattr_tdb.wrap_setxattr(eadb_path,
+ tempf, "user.unittests", ndr_pack(ntacl))
+ finally:
+ os.unlink(tempf)
+ os.unlink(eadb_path)
+
+ def test_set_tdb_not_open(self):
+ tempf = self._tmpfilename()
+ ntacl = xattr.NTACL()
+ ntacl.version = 1
+ open(tempf, 'w').write("empty")
+ try:
+ self.assertRaises(IOError, samba.xattr_tdb.wrap_setxattr,
+ os.path.join("nonexistent", "eadb.tdb"), tempf,
+ "user.unittests", ndr_pack(ntacl))
+ finally:
+ os.unlink(tempf)
+
+ def test_set_and_get_tdb(self):
+ tempf = self._tmpfilename()
+ eadb_path = self._eadbpath()
+ reftxt = b"this is a test"
+ open(tempf, 'w').write("empty")
+ try:
+ samba.xattr_tdb.wrap_setxattr(eadb_path, tempf, "user.unittests",
+ reftxt)
+ text = samba.xattr_tdb.wrap_getxattr(eadb_path, tempf,
+ "user.unittests")
+ self.assertEqual(text, reftxt)
+ finally:
+ os.unlink(tempf)
+ os.unlink(eadb_path)
+
+ def test_set_posix_eadb(self):
+ tempf = self._tmpfilename()
+ eadb_path = self._eadbpath()
+ ntacl = xattr.NTACL()
+ ntacl.version = 1
+ open(tempf, 'w').write("empty")
+ try:
+ samba.posix_eadb.wrap_setxattr(eadb_path,
+ tempf, "user.unittests", ndr_pack(ntacl))
+ finally:
+ os.unlink(tempf)
+ os.unlink(eadb_path)
+
+ def test_set_and_get_posix_eadb(self):
+ tempf = self._tmpfilename()
+ eadb_path = self._eadbpath()
+ reftxt = b"this is a test"
+ open(tempf, 'w').write("empty")
+ try:
+ samba.posix_eadb.wrap_setxattr(eadb_path, tempf, "user.unittests",
+ reftxt)
+ text = samba.posix_eadb.wrap_getxattr(eadb_path, tempf,
+ "user.unittests")
+ self.assertEqual(text, reftxt)
+ finally:
+ os.unlink(tempf)
+ os.unlink(eadb_path)
+
+
+class TestCopyTreeWithXattrs(TestCaseInTempDir):
+
+ def test_simple(self):
+ os.chdir(self.tempdir)
+ os.mkdir("a")
+ os.mkdir("a/b")
+ os.mkdir("a/b/c")
+ f = open('a/b/c/d', 'w')
+ try:
+ f.write("foo")
+ finally:
+ f.close()
+ copytree_with_xattrs("a", "b")
+ shutil.rmtree("a")
+ shutil.rmtree("b")
diff --git a/python/samba/trust_utils.py b/python/samba/trust_utils.py
new file mode 100644
index 0000000..b4df0fa
--- /dev/null
+++ b/python/samba/trust_utils.py
@@ -0,0 +1,62 @@
+# trust utils
+#
+# Copyright Isaac Boukris 2020
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+from samba.dcerpc import lsa, drsblobs
+from samba.ndr import ndr_pack
+from samba import arcfour_encrypt, string_to_byte_array
+import random
+from samba import crypto
+
+def CreateTrustedDomainRelax(lsaconn, policy, trust_info, mask, in_blob, out_blob):
+
+ def generate_AuthInfoInternal(session_key, incoming=None, outgoing=None):
+ confounder = [0] * 512
+ for i in range(len(confounder)):
+ confounder[i] = random.randint(0, 255)
+
+ trustpass = drsblobs.trustDomainPasswords()
+
+ trustpass.confounder = confounder
+ trustpass.outgoing = outgoing
+ trustpass.incoming = incoming
+
+ trustpass_blob = ndr_pack(trustpass)
+
+ encrypted_trustpass = arcfour_encrypt(session_key, trustpass_blob)
+
+ auth_blob = lsa.DATA_BUF2()
+ auth_blob.size = len(encrypted_trustpass)
+ auth_blob.data = string_to_byte_array(encrypted_trustpass)
+
+ auth_info = lsa.TrustDomainInfoAuthInfoInternal()
+ auth_info.auth_blob = auth_blob
+
+ return auth_info
+
+ session_key = lsaconn.session_key
+
+ try:
+ if lsaconn.transport_encrypted():
+ crypto.set_relax_mode()
+ auth_info = generate_AuthInfoInternal(session_key,
+ incoming=in_blob,
+ outgoing=out_blob)
+ finally:
+ crypto.set_strict_mode()
+
+ return lsaconn.CreateTrustedDomainEx2(policy, trust_info, auth_info, mask)
diff --git a/python/samba/upgrade.py b/python/samba/upgrade.py
new file mode 100644
index 0000000..969f8ab
--- /dev/null
+++ b/python/samba/upgrade.py
@@ -0,0 +1,849 @@
+# backend code for upgrading from Samba3
+# Copyright Jelmer Vernooij 2005-2007
+# Copyright Andrew Bartlett 2011
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""Support code for upgrading from Samba 3 to Samba 4."""
+
+__docformat__ = "restructuredText"
+
+import ldb
+import time
+import pwd
+
+from samba import Ldb, registry
+from samba.provision import provision, ProvisioningError, setsysvolacl
+from samba.provision.common import FILL_FULL
+from samba.samba3 import passdb
+from samba.samba3 import param as s3param
+from samba.dcerpc import lsa, samr, security
+from samba.dcerpc.security import dom_sid
+from samba.credentials import Credentials
+from samba import dsdb
+from samba.ndr import ndr_pack
+from samba import unix2nttime
+from samba import generate_random_password
+
+
+def import_sam_policy(samdb, policy, logger):
+ """Import a Samba 3 policy.
+
+ :param samdb: Samba4 SAM database
+ :param policy: Samba3 account policy
+ :param logger: Logger object
+ """
+
+ # Following entries are used -
+ # min password length, password history, minimum password age,
+ # maximum password age, lockout duration
+ #
+ # Following entries are not used -
+ # reset count minutes, user must logon to change password,
+ # bad lockout minutes, disconnect time
+
+ m = ldb.Message()
+ m.dn = samdb.get_default_basedn()
+
+ if 'min password length' in policy:
+ m['a01'] = ldb.MessageElement(str(policy['min password length']),
+ ldb.FLAG_MOD_REPLACE, 'minPwdLength')
+
+ if 'password history' in policy:
+ m['a02'] = ldb.MessageElement(str(policy['password history']),
+ ldb.FLAG_MOD_REPLACE, 'pwdHistoryLength')
+
+ if 'minimum password age' in policy:
+ min_pw_age_unix = policy['minimum password age']
+ min_pw_age_nt = int(-min_pw_age_unix * (1e7))
+ m['a03'] = ldb.MessageElement(str(min_pw_age_nt), ldb.FLAG_MOD_REPLACE,
+ 'minPwdAge')
+
+ if 'maximum password age' in policy:
+ max_pw_age_unix = policy['maximum password age']
+ if max_pw_age_unix == -1 or max_pw_age_unix == 0 or max_pw_age_unix == 0xFFFFFFFF:
+ max_pw_age_nt = -0x8000000000000000
+ else:
+ max_pw_age_nt = int(-max_pw_age_unix * (1e7))
+
+ m['a04'] = ldb.MessageElement(str(max_pw_age_nt), ldb.FLAG_MOD_REPLACE,
+ 'maxPwdAge')
+
+ if 'lockout duration' in policy:
+ lockout_duration_mins = policy['lockout duration']
+ lockout_duration_nt = unix2nttime(lockout_duration_mins * 60)
+
+ m['a05'] = ldb.MessageElement(str(lockout_duration_nt),
+ ldb.FLAG_MOD_REPLACE, 'lockoutDuration')
+
+ try:
+ samdb.modify(m)
+ except ldb.LdbError as e:
+ logger.warn("Could not set account policy, (%s)", str(e))
+
+
+def add_posix_attrs(logger, samdb, sid, nisdomain, xid_type, home=None,
+ shell=None, pgid=None):
+ """Add posix attributes for the user/group
+
+ :param samdb: Samba4 sam.ldb database
+ :param sid: user/group sid
+ :param nisdomain: name of the (fake) NIS domain
+ :param xid_type: type of id (ID_TYPE_UID/ID_TYPE_GID)
+ :param home: user homedir (Unix homepath)
+ :param shell: user shell
+ :param pgid: users primary group id
+ """
+
+ try:
+ m = ldb.Message()
+ m.dn = ldb.Dn(samdb, "<SID=%s>" % str(sid))
+ if xid_type == "ID_TYPE_UID":
+ m['unixHomeDirectory'] = ldb.MessageElement(
+ str(home), ldb.FLAG_MOD_REPLACE, 'unixHomeDirectory')
+ m['loginShell'] = ldb.MessageElement(
+ str(shell), ldb.FLAG_MOD_REPLACE, 'loginShell')
+ m['gidNumber'] = ldb.MessageElement(
+ str(pgid), ldb.FLAG_MOD_REPLACE, 'gidNumber')
+
+ m['msSFU30NisDomain'] = ldb.MessageElement(
+ str(nisdomain), ldb.FLAG_MOD_REPLACE, 'msSFU30NisDomain')
+
+ samdb.modify(m)
+ except ldb.LdbError as e:
+ logger.warn(
+ 'Could not add posix attrs for AD entry for sid=%s, (%s)',
+ str(sid), str(e))
+
+
+def add_ad_posix_idmap_entry(samdb, sid, xid, xid_type, logger):
+ """Create idmap entry
+
+ :param samdb: Samba4 sam.ldb database
+ :param sid: user/group sid
+ :param xid: user/group id
+ :param xid_type: type of id (ID_TYPE_UID/ID_TYPE_GID)
+ :param logger: Logger object
+ """
+
+ try:
+ m = ldb.Message()
+ m.dn = ldb.Dn(samdb, "<SID=%s>" % str(sid))
+ if xid_type == "ID_TYPE_UID":
+ m['uidNumber'] = ldb.MessageElement(
+ str(xid), ldb.FLAG_MOD_REPLACE, 'uidNumber')
+ m['objectClass'] = ldb.MessageElement(
+ "posixAccount", ldb.FLAG_MOD_ADD, 'objectClass')
+ elif xid_type == "ID_TYPE_GID":
+ m['gidNumber'] = ldb.MessageElement(
+ str(xid), ldb.FLAG_MOD_REPLACE, 'gidNumber')
+ m['objectClass'] = ldb.MessageElement(
+ "posixGroup", ldb.FLAG_MOD_ADD, 'objectClass')
+
+ samdb.modify(m)
+ except ldb.LdbError as e:
+ logger.warn(
+ 'Could not modify AD idmap entry for sid=%s, id=%s, type=%s (%s)',
+ str(sid), str(xid), xid_type, str(e))
+
+
+def add_idmap_entry(idmapdb, sid, xid, xid_type, logger):
+ """Create idmap entry
+
+ :param idmapdb: Samba4 IDMAP database
+ :param sid: user/group sid
+ :param xid: user/group id
+ :param xid_type: type of id (ID_TYPE_UID/ID_TYPE_GID)
+ :param logger: Logger object
+ """
+
+ # First try to see if we already have this entry
+ found = False
+ msg = idmapdb.search(expression='objectSid=%s' % str(sid))
+ if msg.count == 1:
+ found = True
+
+ if found:
+ try:
+ m = ldb.Message()
+ m.dn = msg[0]['dn']
+ m['xidNumber'] = ldb.MessageElement(
+ str(xid), ldb.FLAG_MOD_REPLACE, 'xidNumber')
+ m['type'] = ldb.MessageElement(
+ xid_type, ldb.FLAG_MOD_REPLACE, 'type')
+ idmapdb.modify(m)
+ except ldb.LdbError as e:
+ logger.warn(
+ 'Could not modify idmap entry for sid=%s, id=%s, type=%s (%s)',
+ str(sid), str(xid), xid_type, str(e))
+ else:
+ try:
+ idmapdb.add({"dn": "CN=%s" % str(sid),
+ "cn": str(sid),
+ "objectClass": "sidMap",
+ "objectSid": ndr_pack(sid),
+ "type": xid_type,
+ "xidNumber": str(xid)})
+ except ldb.LdbError as e:
+ logger.warn(
+ 'Could not add idmap entry for sid=%s, id=%s, type=%s (%s)',
+ str(sid), str(xid), xid_type, str(e))
+
+
+def import_idmap(idmapdb, samba3, logger):
+ """Import idmap data.
+
+ :param idmapdb: Samba4 IDMAP database
+ :param samba3_idmap: Samba3 IDMAP database to import from
+ :param logger: Logger object
+ """
+
+ try:
+ samba3_idmap = samba3.get_idmap_db()
+ except IOError as e:
+ logger.warn('Cannot open idmap database, Ignoring: %s', str(e))
+ return
+
+ currentxid = max(samba3_idmap.get_user_hwm(), samba3_idmap.get_group_hwm())
+ lowerbound = currentxid
+ # FIXME: upperbound
+
+ m = ldb.Message()
+ m.dn = ldb.Dn(idmapdb, 'CN=CONFIG')
+ m['lowerbound'] = ldb.MessageElement(
+ str(lowerbound), ldb.FLAG_MOD_REPLACE, 'lowerBound')
+ m['xidNumber'] = ldb.MessageElement(
+ str(currentxid), ldb.FLAG_MOD_REPLACE, 'xidNumber')
+ idmapdb.modify(m)
+
+ for id_type, xid in samba3_idmap.ids():
+ if id_type == 'UID':
+ xid_type = 'ID_TYPE_UID'
+ elif id_type == 'GID':
+ xid_type = 'ID_TYPE_GID'
+ else:
+ logger.warn('Wrong type of entry in idmap (%s), Ignoring', id_type)
+ continue
+
+ sid = samba3_idmap.get_sid(xid, id_type)
+ add_idmap_entry(idmapdb, dom_sid(sid), xid, xid_type, logger)
+
+
+def add_group_from_mapping_entry(samdb, groupmap, logger):
+ """Add or modify group from group mapping entry
+
+ param samdb: Samba4 SAM database
+ param groupmap: Groupmap entry
+ param logger: Logger object
+ """
+
+ # First try to see if we already have this entry
+ try:
+ msg = samdb.search(
+ base='<SID=%s>' % str(groupmap.sid), scope=ldb.SCOPE_BASE)
+ found = True
+ except ldb.LdbError as e1:
+ (ecode, emsg) = e1.args
+ if ecode == ldb.ERR_NO_SUCH_OBJECT:
+ found = False
+ else:
+ raise ldb.LdbError(ecode, emsg)
+
+ if found:
+ logger.warn('Group already exists sid=%s, groupname=%s existing_groupname=%s, Ignoring.',
+ str(groupmap.sid), groupmap.nt_name, msg[0]['sAMAccountName'][0])
+ else:
+ if groupmap.sid_name_use == lsa.SID_NAME_WKN_GRP:
+ # In a lot of Samba3 databases, aliases are marked as well known groups
+ (group_dom_sid, rid) = groupmap.sid.split()
+ if (group_dom_sid != security.dom_sid(security.SID_BUILTIN)):
+ return
+
+ m = ldb.Message()
+ # We avoid using the format string to avoid needing to escape the CN values
+ m.dn = ldb.Dn(samdb, "CN=X,CN=Users")
+ m.dn.set_component(0, "CN", groupmap.nt_name)
+ m.dn.add_base(samdb.get_default_basedn())
+ m['objectClass'] = ldb.MessageElement('group', ldb.FLAG_MOD_ADD, 'objectClass')
+ m['objectSid'] = ldb.MessageElement(ndr_pack(groupmap.sid), ldb.FLAG_MOD_ADD,
+ 'objectSid')
+ m['sAMAccountName'] = ldb.MessageElement(groupmap.nt_name, ldb.FLAG_MOD_ADD,
+ 'sAMAccountName')
+
+ if groupmap.comment:
+ m['description'] = ldb.MessageElement(groupmap.comment, ldb.FLAG_MOD_ADD,
+ 'description')
+
+ # Fix up incorrect 'well known' groups that are actually builtin (per test above) to be aliases
+ if groupmap.sid_name_use == lsa.SID_NAME_ALIAS or groupmap.sid_name_use == lsa.SID_NAME_WKN_GRP:
+ m['groupType'] = ldb.MessageElement(str(dsdb.GTYPE_SECURITY_DOMAIN_LOCAL_GROUP),
+ ldb.FLAG_MOD_ADD, 'groupType')
+
+ try:
+ samdb.add(m, controls=["relax:0"])
+ except ldb.LdbError as e:
+ logger.warn('Could not add group name=%s (%s)', groupmap.nt_name, str(e))
+
+
+def add_users_to_group(samdb, group, members, logger):
+ """Add user/member to group/alias
+
+ param samdb: Samba4 SAM database
+ param group: Groupmap object
+ param members: List of member SIDs
+ param logger: Logger object
+ """
+ for member_sid in members:
+ m = ldb.Message()
+ m.dn = ldb.Dn(samdb, "<SID=%s>" % str(group.sid))
+ m['a01'] = ldb.MessageElement("<SID=%s>" % str(member_sid), ldb.FLAG_MOD_ADD, 'member')
+
+ try:
+ samdb.modify(m)
+ except ldb.LdbError as e:
+ (ecode, emsg) = e.args
+ if ecode == ldb.ERR_ENTRY_ALREADY_EXISTS:
+ logger.debug("skipped re-adding member '%s' to group '%s': %s", member_sid, group.sid, emsg)
+ elif ecode == ldb.ERR_NO_SUCH_OBJECT:
+ raise ProvisioningError("Could not add member '%s' to group '%s' as either group or user record doesn't exist: %s" % (member_sid, group.sid, emsg))
+ else:
+ raise ProvisioningError("Could not add member '%s' to group '%s': %s" % (member_sid, group.sid, emsg))
+
+
+def import_wins(samba4_winsdb, samba3_winsdb):
+ """Import settings from a Samba3 WINS database.
+
+ :param samba4_winsdb: WINS database to import to
+ :param samba3_winsdb: WINS database to import from
+ """
+
+ version_id = 0
+
+ for (name, (ttl, ips, nb_flags)) in samba3_winsdb.items():
+ version_id += 1
+
+ type = int(name.split("#", 1)[1], 16)
+
+ if type == 0x1C:
+ rType = 0x2
+ elif type & 0x80:
+ if len(ips) > 1:
+ rType = 0x2
+ else:
+ rType = 0x1
+ else:
+ if len(ips) > 1:
+ rType = 0x3
+ else:
+ rType = 0x0
+
+ if ttl > time.time():
+ rState = 0x0 # active
+ else:
+ rState = 0x1 # released
+
+ nType = ((nb_flags & 0x60) >> 5)
+
+ samba4_winsdb.add({"dn": "name=%s,type=0x%s" % tuple(name.split("#")),
+ "type": name.split("#")[1],
+ "name": name.split("#")[0],
+ "objectClass": "winsRecord",
+ "recordType": str(rType),
+ "recordState": str(rState),
+ "nodeType": str(nType),
+ "expireTime": ldb.timestring(ttl),
+ "isStatic": "0",
+ "versionID": str(version_id),
+ "address": ips})
+
+ samba4_winsdb.add({"dn": "cn=VERSION",
+ "cn": "VERSION",
+ "objectClass": "winsMaxVersion",
+ "maxVersion": str(version_id)})
+
+
+SAMBA3_PREDEF_NAMES = {
+ 'HKLM': registry.HKEY_LOCAL_MACHINE,
+}
+
+
+def import_registry(samba4_registry, samba3_regdb):
+ """Import a Samba 3 registry database into the Samba 4 registry.
+
+ :param samba4_registry: Samba 4 registry handle.
+ :param samba3_regdb: Samba 3 registry database handle.
+ """
+ def ensure_key_exists(keypath):
+ (predef_name, keypath) = keypath.split("/", 1)
+ predef_id = SAMBA3_PREDEF_NAMES[predef_name]
+ keypath = keypath.replace("/", "\\")
+ return samba4_registry.create_key(predef_id, keypath)
+
+ for key in samba3_regdb.keys():
+ key_handle = ensure_key_exists(key)
+ for subkey in samba3_regdb.subkeys(key):
+ ensure_key_exists(subkey)
+ for (value_name, (value_type, value_data)) in samba3_regdb.values(key).items():
+ key_handle.set_value(value_name, value_type, value_data)
+
+
+def get_posix_attr_from_ldap_backend(logger, ldb_object, base_dn, user, attr):
+ """Get posix attributes from a samba3 ldap backend
+ :param ldbs: a list of ldb connection objects
+ :param base_dn: the base_dn of the connection
+ :param user: the user to get the attribute for
+ :param attr: the attribute to be retrieved
+ """
+ try:
+ msg = ldb_object.search(base_dn, scope=ldb.SCOPE_SUBTREE,
+ expression=("(&(objectClass=posixAccount)(uid=%s))"
+ % (user)), attrs=[attr])
+ except ldb.LdbError as e:
+ raise ProvisioningError("Failed to retrieve attribute %s for user %s, the error is: %s" % (attr, user, e))
+ else:
+ if msg.count <= 1:
+ # This will raise KeyError (which is what we want) if there isn't a entry for this user
+ return msg[0][attr][0]
+ else:
+ logger.warning("LDAP entry for user %s contains more than one %s", user, attr)
+ raise KeyError
+
+
+def upgrade_from_samba3(samba3, logger, targetdir, session_info=None,
+ useeadb=False, dns_backend=None, use_ntvfs=False):
+ """Upgrade from samba3 database to samba4 AD database
+
+ :param samba3: samba3 object
+ :param logger: Logger object
+ :param targetdir: samba4 database directory
+ :param session_info: Session information
+ """
+ serverrole = samba3.lp.server_role()
+
+ domainname = samba3.lp.get("workgroup")
+ realm = samba3.lp.get("realm")
+ netbiosname = samba3.lp.get("netbios name")
+
+ if samba3.lp.get("ldapsam:trusted") is None:
+ samba3.lp.set("ldapsam:trusted", "yes")
+
+ # secrets db
+ try:
+ secrets_db = samba3.get_secrets_db()
+ except IOError as e:
+ raise ProvisioningError("Could not open '%s', the Samba3 secrets database: %s. Perhaps you specified the incorrect smb.conf, --testparm or --dbdir option?" % (samba3.privatedir_path("secrets.tdb"), str(e)))
+
+ if not domainname:
+ domainname = secrets_db.domains()[0]
+ logger.warning("No workgroup specified in smb.conf file, assuming '%s'",
+ domainname)
+
+ if not realm:
+ if serverrole == "ROLE_DOMAIN_BDC" or serverrole == "ROLE_DOMAIN_PDC":
+ raise ProvisioningError("No realm specified in smb.conf file and being a DC. That upgrade path doesn't work! Please add a 'realm' directive to your old smb.conf to let us know which one you want to use (it is the DNS name of the AD domain you wish to create.")
+ else:
+ realm = domainname.upper()
+ logger.warning("No realm specified in smb.conf file, assuming '%s'",
+ realm)
+
+ # Find machine account and password
+ next_rid = 1000
+
+ try:
+ machinepass = secrets_db.get_machine_password(netbiosname)
+ except KeyError:
+ machinepass = None
+
+ if samba3.lp.get("passdb backend").split(":")[0].strip() == "ldapsam":
+ base_dn = samba3.lp.get("ldap suffix")
+ ldapuser = samba3.lp.get("ldap admin dn")
+ ldappass = secrets_db.get_ldap_bind_pw(ldapuser)
+ if ldappass is None:
+ raise ProvisioningError("ldapsam passdb backend detected but no LDAP Bind PW found in secrets.tdb for user %s. Please point this tool at the secrets.tdb that was used by the previous installation.")
+ ldappass = ldappass.decode('utf-8').strip('\x00')
+ ldap = True
+ else:
+ ldapuser = None
+ ldappass = None
+ ldap = False
+
+ # We must close the direct pytdb database before the C code loads it
+ secrets_db.close()
+
+ # Connect to old password backend
+ passdb.set_secrets_dir(samba3.lp.get("private dir"))
+ s3db = samba3.get_sam_db()
+
+ # Get domain sid
+ try:
+ domainsid = passdb.get_global_sam_sid()
+ except passdb.error:
+ raise Exception("Can't find domain sid for '%s', Exiting." % domainname)
+
+ # Get machine account, sid, rid
+ try:
+ machineacct = s3db.getsampwnam('%s$' % netbiosname)
+ except passdb.error:
+ machinerid = None
+ else:
+ _, machinerid = machineacct.user_sid.split()
+
+ # Export account policy
+ logger.info("Exporting account policy")
+ policy = s3db.get_account_policy()
+
+ # Export groups from old passdb backend
+ logger.info("Exporting groups")
+ grouplist = s3db.enum_group_mapping()
+ groupmembers = {}
+ for group in grouplist:
+ sid, rid = group.sid.split()
+ if sid == domainsid:
+ if rid >= next_rid:
+ next_rid = rid + 1
+
+ # Get members for each group/alias
+ if group.sid_name_use == lsa.SID_NAME_ALIAS:
+ try:
+ members = s3db.enum_aliasmem(group.sid)
+ groupmembers[str(group.sid)] = members
+ except passdb.error as e:
+ logger.warn("Ignoring group '%s' %s listed but then not found: %s",
+ group.nt_name, group.sid, e)
+ continue
+ elif group.sid_name_use == lsa.SID_NAME_DOM_GRP:
+ try:
+ members = s3db.enum_group_members(group.sid)
+ groupmembers[str(group.sid)] = members
+ except passdb.error as e:
+ logger.warn("Ignoring group '%s' %s listed but then not found: %s",
+ group.nt_name, group.sid, e)
+ continue
+ elif group.sid_name_use == lsa.SID_NAME_WKN_GRP:
+ (group_dom_sid, rid) = group.sid.split()
+ if (group_dom_sid != security.dom_sid(security.SID_BUILTIN)):
+ logger.warn("Ignoring 'well known' group '%s' (should already be in AD, and have no members)",
+ group.nt_name)
+ continue
+ # A number of buggy databases mix up well known groups and aliases.
+ try:
+ members = s3db.enum_aliasmem(group.sid)
+ groupmembers[str(group.sid)] = members
+ except passdb.error as e:
+ logger.warn("Ignoring group '%s' %s listed but then not found: %s",
+ group.nt_name, group.sid, e)
+ continue
+ else:
+ logger.warn("Ignoring group '%s' %s with sid_name_use=%d",
+ group.nt_name, group.sid, group.sid_name_use)
+ continue
+
+ # Export users from old passdb backend
+ logger.info("Exporting users")
+ userlist = s3db.search_users(0)
+ userdata = {}
+ uids = {}
+ admin_user = None
+ for entry in userlist:
+ if machinerid and machinerid == entry['rid']:
+ continue
+ username = entry['account_name']
+ if entry['rid'] < 1000:
+ logger.info(" Skipping wellknown rid=%d (for username=%s)", entry['rid'], username)
+ continue
+ if entry['rid'] >= next_rid:
+ next_rid = entry['rid'] + 1
+
+ user = s3db.getsampwnam(username)
+ acct_type = (user.acct_ctrl & (samr.ACB_NORMAL |
+ samr.ACB_WSTRUST |
+ samr.ACB_SVRTRUST |
+ samr.ACB_DOMTRUST))
+ if acct_type == samr.ACB_SVRTRUST:
+ logger.warn(" Demoting BDC account trust for %s, this DC must be elevated to an AD DC using 'samba-tool domain dcpromo'" % username[:-1])
+ user.acct_ctrl = (user.acct_ctrl & ~samr.ACB_SVRTRUST) | samr.ACB_WSTRUST
+
+ elif acct_type == samr.ACB_DOMTRUST:
+ logger.warn(" Skipping inter-domain trust from domain %s, this trust must be re-created as an AD trust" % username[:-1])
+ continue
+
+ elif acct_type == (samr.ACB_WSTRUST) and username[-1] != '$':
+ logger.warn(" Skipping account %s that has ACB_WSTRUST (W) set but does not end in $. This account can not have worked, and is probably left over from a misconfiguration." % username)
+ continue
+
+ elif acct_type == (samr.ACB_NORMAL | samr.ACB_WSTRUST) and username[-1] == '$':
+ logger.warn(" Fixing account %s which had both ACB_NORMAL (U) and ACB_WSTRUST (W) set. Account will be marked as ACB_WSTRUST (W), i.e. as a domain member" % username)
+ user.acct_ctrl = (user.acct_ctrl & ~samr.ACB_NORMAL)
+
+ elif acct_type == (samr.ACB_NORMAL | samr.ACB_SVRTRUST) and username[-1] == '$':
+ logger.warn(" Fixing account %s which had both ACB_NORMAL (U) and ACB_SVRTRUST (S) set. Account will be marked as ACB_WSTRUST (S), i.e. as a domain member" % username)
+ user.acct_ctrl = (user.acct_ctrl & ~samr.ACB_NORMAL)
+
+ elif acct_type == 0 and username[-1] != '$':
+ user.acct_ctrl = (user.acct_ctrl | samr.ACB_NORMAL)
+
+ elif (acct_type == samr.ACB_NORMAL or acct_type == samr.ACB_WSTRUST):
+ pass
+
+ else:
+ raise ProvisioningError("""Failed to upgrade due to invalid account %s, account control flags 0x%08X must have exactly one of
+ACB_NORMAL (N, 0x%08X), ACB_WSTRUST (W 0x%08X), ACB_SVRTRUST (S 0x%08X) or ACB_DOMTRUST (D 0x%08X).
+
+Please fix this account before attempting to upgrade again
+"""
+ % (username, user.acct_ctrl,
+ samr.ACB_NORMAL, samr.ACB_WSTRUST, samr.ACB_SVRTRUST, samr.ACB_DOMTRUST))
+
+ userdata[username] = user
+ try:
+ uids[username] = s3db.sid_to_id(user.user_sid)[0]
+ except passdb.error:
+ try:
+ uids[username] = pwd.getpwnam(username).pw_uid
+ except KeyError:
+ pass
+
+ if not admin_user and username.lower() == 'root':
+ admin_user = username
+ if username.lower() == 'administrator':
+ admin_user = username
+
+ try:
+ group_memberships = s3db.enum_group_memberships(user)
+ for group in group_memberships:
+ if str(group) in groupmembers:
+ if user.user_sid not in groupmembers[str(group)]:
+ groupmembers[str(group)].append(user.user_sid)
+ else:
+ groupmembers[str(group)] = [user.user_sid]
+ except passdb.error as e:
+ logger.warn("Ignoring group memberships of '%s' %s: %s",
+ username, user.user_sid, e)
+
+ logger.info("Next rid = %d", next_rid)
+
+ # Check for same username/groupname
+ group_names = set([g.nt_name for g in grouplist])
+ user_names = set([u['account_name'] for u in userlist])
+ common_names = group_names.intersection(user_names)
+ if common_names:
+ logger.error("Following names are both user names and group names:")
+ for name in common_names:
+ logger.error(" %s" % name)
+ raise ProvisioningError("Please remove common user/group names before upgrade.")
+
+ # Check for same user sid/group sid
+ group_sids = set([str(g.sid) for g in grouplist])
+ if len(grouplist) != len(group_sids):
+ raise ProvisioningError("Please remove duplicate group sid entries before upgrade.")
+ user_sids = set(["%s-%u" % (domainsid, u['rid']) for u in userlist])
+ if len(userlist) != len(user_sids):
+ raise ProvisioningError("Please remove duplicate user sid entries before upgrade.")
+ common_sids = group_sids.intersection(user_sids)
+ if common_sids:
+ logger.error("Following sids are both user and group sids:")
+ for sid in common_sids:
+ logger.error(" %s" % str(sid))
+ raise ProvisioningError("Please remove duplicate sid entries before upgrade.")
+
+ # Get posix attributes from ldap or the os
+ homes = {}
+ shells = {}
+ pgids = {}
+ if ldap:
+ creds = Credentials()
+ creds.guess(samba3.lp)
+ creds.set_bind_dn(ldapuser)
+ creds.set_password(ldappass)
+ urls = samba3.lp.get("passdb backend").split(":", 1)[1].strip('"')
+ for url in urls.split():
+ try:
+ ldb_object = Ldb(url, credentials=creds)
+ except ldb.LdbError as e:
+ raise ProvisioningError("Could not open ldb connection to %s, the error message is: %s" % (url, e))
+ else:
+ break
+ logger.info("Exporting posix attributes")
+ userlist = s3db.search_users(0)
+ for entry in userlist:
+ username = entry['account_name']
+ if username in uids.keys():
+ try:
+ if ldap:
+ homes[username] = get_posix_attr_from_ldap_backend(logger, ldb_object, base_dn, username, "homeDirectory")
+ else:
+ homes[username] = pwd.getpwnam(username).pw_dir
+ except KeyError:
+ pass
+ except IndexError:
+ pass
+
+ try:
+ if ldap:
+ shells[username] = get_posix_attr_from_ldap_backend(logger, ldb_object, base_dn, username, "loginShell")
+ else:
+ shells[username] = pwd.getpwnam(username).pw_shell
+ except KeyError:
+ pass
+ except IndexError:
+ pass
+
+ try:
+ if ldap:
+ pgids[username] = get_posix_attr_from_ldap_backend(logger, ldb_object, base_dn, username, "gidNumber")
+ else:
+ pgids[username] = pwd.getpwnam(username).pw_gid
+ except KeyError:
+ pass
+ except IndexError:
+ pass
+
+ logger.info("Reading WINS database")
+ samba3_winsdb = None
+ try:
+ samba3_winsdb = samba3.get_wins_db()
+ except IOError as e:
+ logger.warn('Cannot open wins database, Ignoring: %s', str(e))
+
+ if not (serverrole == "ROLE_DOMAIN_BDC" or serverrole == "ROLE_DOMAIN_PDC"):
+ dns_backend = "NONE"
+
+ # If we found an admin user, set a fake pw that we will override.
+ # This avoids us printing out an admin password that we won't actually
+ # set.
+ if admin_user:
+ adminpass = generate_random_password(12, 32)
+ else:
+ adminpass = None
+
+ # Do full provision
+ result = provision(logger, session_info,
+ targetdir=targetdir, realm=realm, domain=domainname,
+ domainsid=domainsid, next_rid=next_rid,
+ dc_rid=machinerid, adminpass=adminpass,
+ dom_for_fun_level=dsdb.DS_DOMAIN_FUNCTION_2003,
+ hostname=netbiosname.lower(), machinepass=machinepass,
+ serverrole=serverrole, samdb_fill=FILL_FULL,
+ useeadb=useeadb, dns_backend=dns_backend, use_rfc2307=True,
+ use_ntvfs=use_ntvfs, skip_sysvolacl=True)
+ result.report_logger(logger)
+
+ # Import WINS database
+ logger.info("Importing WINS database")
+
+ if samba3_winsdb:
+ import_wins(Ldb(result.paths.winsdb), samba3_winsdb)
+
+ # Set Account policy
+ logger.info("Importing Account policy")
+ import_sam_policy(result.samdb, policy, logger)
+
+ # Migrate IDMAP database
+ logger.info("Importing idmap database")
+ import_idmap(result.idmap, samba3, logger)
+
+ # Set the s3 context for samba4 configuration
+ new_lp_ctx = s3param.get_context()
+ new_lp_ctx.load(result.lp.configfile)
+ new_lp_ctx.set("private dir", result.lp.get("private dir"))
+ new_lp_ctx.set("state directory", result.lp.get("state directory"))
+ new_lp_ctx.set("lock directory", result.lp.get("lock directory"))
+
+ # Connect to samba4 backend
+ s4_passdb = passdb.PDB(new_lp_ctx.get("passdb backend"))
+
+ # Start a new transaction (should speed this up a little, due to index churn)
+ result.samdb.transaction_start()
+
+ logger.info("Adding groups")
+ try:
+ # Export groups to samba4 backend
+ logger.info("Importing groups")
+ for g in grouplist:
+ # Ignore uninitialized groups (gid = -1)
+ if g.gid != -1:
+ add_group_from_mapping_entry(result.samdb, g, logger)
+ add_ad_posix_idmap_entry(result.samdb, g.sid, g.gid, "ID_TYPE_GID", logger)
+ add_posix_attrs(samdb=result.samdb, sid=g.sid, nisdomain=domainname.lower(), xid_type="ID_TYPE_GID", logger=logger)
+
+ except:
+ # We need this, so that we do not give even more errors due to not cancelling the transaction
+ result.samdb.transaction_cancel()
+ raise
+
+ logger.info("Committing 'add groups' transaction to disk")
+ result.samdb.transaction_commit()
+
+ logger.info("Adding users")
+
+ # Export users to samba4 backend
+ logger.info("Importing users")
+ for username in userdata:
+ if username.lower() == 'administrator':
+ if userdata[username].user_sid != dom_sid(str(domainsid) + "-500"):
+ logger.error("User 'Administrator' in your existing directory has SID %s, expected it to be %s" % (userdata[username].user_sid, dom_sid(str(domainsid) + "-500")))
+ raise ProvisioningError("User 'Administrator' in your existing directory does not have SID ending in -500")
+ if username.lower() == 'root':
+ if userdata[username].user_sid == dom_sid(str(domainsid) + "-500"):
+ logger.warn('User root has been replaced by Administrator')
+ else:
+ logger.warn('User root has been kept in the directory, it should be removed in favour of the Administrator user')
+
+ s4_passdb.add_sam_account(userdata[username])
+ if username in uids:
+ add_ad_posix_idmap_entry(result.samdb, userdata[username].user_sid, uids[username], "ID_TYPE_UID", logger)
+ if (username in homes) and (homes[username] is not None) and \
+ (username in shells) and (shells[username] is not None) and \
+ (username in pgids) and (pgids[username] is not None):
+ add_posix_attrs(samdb=result.samdb, sid=userdata[username].user_sid, nisdomain=domainname.lower(), xid_type="ID_TYPE_UID", home=homes[username], shell=shells[username], pgid=pgids[username], logger=logger)
+
+ logger.info("Adding users to groups")
+ # Start a new transaction (should speed this up a little, due to index churn)
+ result.samdb.transaction_start()
+
+ try:
+ for g in grouplist:
+ if str(g.sid) in groupmembers:
+ add_users_to_group(result.samdb, g, groupmembers[str(g.sid)], logger)
+
+ except:
+ # We need this, so that we do not give even more errors due to not cancelling the transaction
+ result.samdb.transaction_cancel()
+ raise
+
+ logger.info("Committing 'add users to groups' transaction to disk")
+ result.samdb.transaction_commit()
+
+ # Set password for administrator
+ if admin_user:
+ logger.info("Setting password for administrator")
+ admin_userdata = s4_passdb.getsampwnam("administrator")
+ admin_userdata.nt_passwd = userdata[admin_user].nt_passwd
+ if userdata[admin_user].lanman_passwd:
+ admin_userdata.lanman_passwd = userdata[admin_user].lanman_passwd
+ admin_userdata.pass_last_set_time = userdata[admin_user].pass_last_set_time
+ if userdata[admin_user].pw_history:
+ admin_userdata.pw_history = userdata[admin_user].pw_history
+ s4_passdb.update_sam_account(admin_userdata)
+ logger.info("Administrator password has been set to password of user '%s'", admin_user)
+
+ if result.server_role == "active directory domain controller":
+ setsysvolacl(result.samdb, result.paths.sysvol,
+ result.paths.root_uid, result.paths.root_gid,
+ security.dom_sid(result.domainsid), result.names.dnsdomain,
+ result.names.domaindn, result.lp, use_ntvfs)
+
+ # FIXME: import_registry(registry.Registry(), samba3.get_registry())
+ # FIXME: shares
diff --git a/python/samba/upgradehelpers.py b/python/samba/upgradehelpers.py
new file mode 100644
index 0000000..badfc46
--- /dev/null
+++ b/python/samba/upgradehelpers.py
@@ -0,0 +1,834 @@
+# Helpers for provision stuff
+# Copyright (C) Matthieu Patou <mat@matws.net> 2009-2012
+#
+# Based on provision a Samba4 server by
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007-2008
+# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2008
+#
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""Helpers used for upgrading between different database formats."""
+
+import os
+import re
+import shutil
+import samba
+
+from samba.common import cmp
+from samba import Ldb, version
+from ldb import SCOPE_SUBTREE, SCOPE_ONELEVEL, SCOPE_BASE
+import ldb
+from samba.provision import (provision_paths_from_lp,
+ getpolicypath, create_gpo_struct,
+ provision, ProvisioningError,
+ secretsdb_self_join)
+from samba.provision.common import FILL_FULL
+from samba.dcerpc import drsblobs
+from samba.dcerpc.misc import SEC_CHAN_BDC
+from samba.ndr import ndr_unpack
+from samba.samdb import SamDB
+from samba import _glue
+import tempfile
+
+# All the ldb related to registry are commented because the path for them is
+# relative in the provisionPath object
+# And so opening them create a file in the current directory which is not what
+# we want
+# I still keep them commented because I plan soon to make more cleaner
+ERROR = -1
+SIMPLE = 0x00
+CHANGE = 0x01
+CHANGESD = 0x02
+GUESS = 0x04
+PROVISION = 0x08
+CHANGEALL = 0xff
+
+hashAttrNotCopied = set(["dn", "whenCreated", "whenChanged", "objectGUID",
+ "uSNCreated", "replPropertyMetaData", "uSNChanged", "parentGUID",
+ "objectCategory", "distinguishedName", "nTMixedDomain",
+ "showInAdvancedViewOnly", "instanceType", "msDS-Behavior-Version",
+ "nextRid", "cn", "versionNumber", "lmPwdHistory", "pwdLastSet",
+ "ntPwdHistory", "unicodePwd", "dBCSPwd", "supplementalCredentials",
+ "gPCUserExtensionNames", "gPCMachineExtensionNames", "maxPwdAge", "secret",
+ "possibleInferiors", "privilege", "sAMAccountType"])
+
+
+class ProvisionLDB(object):
+
+ def __init__(self):
+ self.sam = None
+ self.secrets = None
+ self.idmap = None
+ self.privilege = None
+ self.hkcr = None
+ self.hkcu = None
+ self.hku = None
+ self.hklm = None
+
+ def dbs(self):
+ return (self.sam, self.secrets, self.idmap, self.privilege)
+
+ def startTransactions(self):
+ for db in self.dbs():
+ db.transaction_start()
+# TO BE DONE
+# self.hkcr.transaction_start()
+# self.hkcu.transaction_start()
+# self.hku.transaction_start()
+# self.hklm.transaction_start()
+
+ def groupedRollback(self):
+ ok = True
+ for db in self.dbs():
+ try:
+ db.transaction_cancel()
+ except Exception:
+ ok = False
+ return ok
+# TO BE DONE
+# self.hkcr.transaction_cancel()
+# self.hkcu.transaction_cancel()
+# self.hku.transaction_cancel()
+# self.hklm.transaction_cancel()
+
+ def groupedCommit(self):
+ try:
+ for db in self.dbs():
+ db.transaction_prepare_commit()
+ except Exception:
+ return self.groupedRollback()
+# TO BE DONE
+# self.hkcr.transaction_prepare_commit()
+# self.hkcu.transaction_prepare_commit()
+# self.hku.transaction_prepare_commit()
+# self.hklm.transaction_prepare_commit()
+ try:
+ for db in self.dbs():
+ db.transaction_commit()
+ except Exception:
+ return self.groupedRollback()
+
+# TO BE DONE
+# self.hkcr.transaction_commit()
+# self.hkcu.transaction_commit()
+# self.hku.transaction_commit()
+# self.hklm.transaction_commit()
+ return True
+
+
+def get_ldbs(paths, creds, session, lp):
+ """Return LDB object mapped on most important databases
+
+ :param paths: An object holding the different importants paths for provision object
+ :param creds: Credential used for opening LDB files
+ :param session: Session to use for opening LDB files
+ :param lp: A loadparam object
+ :return: A ProvisionLDB object that contains LDB object for the different LDB files of the provision"""
+
+ ldbs = ProvisionLDB()
+
+ ldbs.sam = SamDB(paths.samdb,
+ session_info=session,
+ credentials=creds,
+ lp=lp,
+ options=["modules:samba_dsdb"],
+ flags=0)
+ ldbs.secrets = Ldb(paths.secrets, session_info=session, credentials=creds, lp=lp)
+ ldbs.idmap = Ldb(paths.idmapdb, session_info=session, credentials=creds, lp=lp)
+ ldbs.privilege = Ldb(paths.privilege, session_info=session, credentials=creds, lp=lp)
+# ldbs.hkcr = Ldb(paths.hkcr, session_info=session, credentials=creds, lp=lp)
+# ldbs.hkcu = Ldb(paths.hkcu, session_info=session, credentials=creds, lp=lp)
+# ldbs.hku = Ldb(paths.hku, session_info=session, credentials=creds, lp=lp)
+# ldbs.hklm = Ldb(paths.hklm, session_info=session, credentials=creds, lp=lp)
+
+ return ldbs
+
+
+def usn_in_range(usn, range):
+ """Check if the usn is in one of the range provided.
+ To do so, the value is checked to be between the lower bound and
+ higher bound of a range
+
+ :param usn: A integer value corresponding to the usn that we want to update
+ :param range: A list of integer representing ranges, lower bounds are in
+ the even indices, higher in odd indices
+ :return: True if the usn is in one of the range, False otherwise
+ """
+
+ idx = 0
+ cont = True
+ ok = False
+ while cont:
+ if idx == len(range):
+ cont = False
+ continue
+ if usn < int(range[idx]):
+ if idx % 2 == 1:
+ ok = True
+ cont = False
+ if usn == int(range[idx]):
+ cont = False
+ ok = True
+ idx = idx + 1
+ return ok
+
+
+def get_paths(param, targetdir=None, smbconf=None):
+ """Get paths to important provision objects (smb.conf, ldb files, ...)
+
+ :param param: Param object
+ :param targetdir: Directory where the provision is (or will be) stored
+ :param smbconf: Path to the smb.conf file
+ :return: A list with the path of important provision objects"""
+ if targetdir is not None:
+ if not os.path.exists(targetdir):
+ os.mkdir(targetdir)
+ etcdir = os.path.join(targetdir, "etc")
+ if not os.path.exists(etcdir):
+ os.makedirs(etcdir)
+ smbconf = os.path.join(etcdir, "smb.conf")
+ if smbconf is None:
+ smbconf = param.default_path()
+
+ if not os.path.exists(smbconf):
+ raise ProvisioningError("Unable to find smb.conf at %s" % smbconf)
+
+ lp = param.LoadParm()
+ lp.load(smbconf)
+ paths = provision_paths_from_lp(lp, lp.get("realm"))
+ return paths
+
+
+def update_policyids(names, samdb):
+ """Update policy ids that could have changed after sam update
+
+ :param names: List of key provision parameters
+ :param samdb: An Ldb object conntected with the sam DB
+ """
+ # policy guid
+ res = samdb.search(expression="(displayName=Default Domain Policy)",
+ base="CN=Policies,CN=System," + str(names.rootdn),
+ scope=SCOPE_ONELEVEL, attrs=["cn", "displayName"])
+ names.policyid = str(res[0]["cn"]).replace("{", "").replace("}", "")
+ # dc policy guid
+ res2 = samdb.search(expression="(displayName=Default Domain Controllers"
+ " Policy)",
+ base="CN=Policies,CN=System," + str(names.rootdn),
+ scope=SCOPE_ONELEVEL, attrs=["cn", "displayName"])
+ if len(res2) == 1:
+ names.policyid_dc = str(res2[0]["cn"]).replace("{", "").replace("}", "")
+ else:
+ names.policyid_dc = None
+
+
+def newprovision(names, session, smbconf, provdir, logger, base_schema=None, adprep_level=None):
+ """Create a new provision.
+
+ This provision will be the reference for knowing what has changed in the
+ since the latest upgrade in the current provision
+
+ :param names: List of provision parameters
+ :param creds: Credentials for the authentication
+ :param session: Session object
+ :param smbconf: Path to the smb.conf file
+ :param provdir: Directory where the provision will be stored
+ :param logger: A Logger
+ """
+ if os.path.isdir(provdir):
+ shutil.rmtree(provdir)
+ os.mkdir(provdir)
+ logger.info("Provision stored in %s", provdir)
+ return provision(logger, session, smbconf=smbconf,
+ targetdir=provdir, samdb_fill=FILL_FULL, realm=names.realm,
+ domain=names.domain, domainguid=names.domainguid,
+ domainsid=names.domainsid, ntdsguid=names.ntdsguid,
+ policyguid=names.policyid, policyguid_dc=names.policyid_dc,
+ hostname=names.netbiosname.lower(), hostip=None, hostip6=None,
+ invocationid=names.invocation, adminpass=names.adminpass,
+ krbtgtpass=None, machinepass=None, dnspass=None, root=None,
+ nobody=None, users=None,
+ serverrole="domain controller",
+ dom_for_fun_level=names.domainlevel, dns_backend=names.dns_backend,
+ useeadb=True, use_ntvfs=True, base_schema=base_schema,
+ adprep_level=adprep_level)
+
+
+def dn_sort(x, y):
+ """Sorts two DNs in the lexicographical order it and put higher level DN
+ before.
+
+ So given the dns cn=bar,cn=foo and cn=foo the later will be return as
+ smaller
+
+ :param x: First object to compare
+ :param y: Second object to compare
+ """
+ p = re.compile(r'(?<!\\), ?')
+ tab1 = p.split(str(x))
+ tab2 = p.split(str(y))
+ minimum = min(len(tab1), len(tab2))
+ len1 = len(tab1) - 1
+ len2 = len(tab2) - 1
+ # Note: python range go up to upper limit but do not include it
+ for i in range(0, minimum):
+ ret = cmp(tab1[len1 - i], tab2[len2 - i])
+ if ret != 0:
+ return ret
+ else:
+ if i == minimum - 1:
+ assert len1 != len2, "PB PB PB" + " ".join(tab1) + " / " + " ".join(tab2)
+ if len1 > len2:
+ return 1
+ else:
+ return -1
+ return ret
+
+
+def identic_rename(ldbobj, dn):
+ """Perform a back and forth rename to trigger renaming on attribute that
+ can't be directly modified.
+
+ :param lbdobj: An Ldb Object
+ :param dn: DN of the object to manipulate
+ """
+ (before, after) = str(dn).split('=', 1)
+ # we need to use relax to avoid the subtree_rename constraints
+ ldbobj.rename(dn, ldb.Dn(ldbobj, "%s=foo%s" % (before, after)), ["relax:0"])
+ ldbobj.rename(ldb.Dn(ldbobj, "%s=foo%s" % (before, after)), dn, ["relax:0"])
+
+
+def update_secrets(newsecrets_ldb, secrets_ldb, messagefunc):
+ """Update secrets.ldb
+
+ :param newsecrets_ldb: An LDB object that is connected to the secrets.ldb
+ of the reference provision
+ :param secrets_ldb: An LDB object that is connected to the secrets.ldb
+ of the updated provision
+ """
+
+ messagefunc(SIMPLE, "Update of secrets.ldb")
+ reference = newsecrets_ldb.search(base="@MODULES", scope=SCOPE_BASE)
+ current = secrets_ldb.search(base="@MODULES", scope=SCOPE_BASE)
+ assert reference, "Reference modules list can not be empty"
+ if len(current) == 0:
+ # No modules present
+ delta = secrets_ldb.msg_diff(ldb.Message(), reference[0])
+ delta.dn = reference[0].dn
+ secrets_ldb.add(reference[0])
+ else:
+ delta = secrets_ldb.msg_diff(current[0], reference[0])
+ delta.dn = current[0].dn
+ secrets_ldb.modify(delta)
+
+ reference = newsecrets_ldb.search(expression="objectClass=top", base="",
+ scope=SCOPE_SUBTREE, attrs=["dn"])
+ current = secrets_ldb.search(expression="objectClass=top", base="",
+ scope=SCOPE_SUBTREE, attrs=["dn"])
+ hash_new = {}
+ hash = {}
+ listMissing = []
+ listPresent = []
+
+ empty = ldb.Message()
+ for i in range(0, len(reference)):
+ hash_new[str(reference[i]["dn"]).lower()] = reference[i]["dn"]
+
+ # Create a hash for speeding the search of existing object in the
+ # current provision
+ for i in range(0, len(current)):
+ hash[str(current[i]["dn"]).lower()] = current[i]["dn"]
+
+ for k in hash_new.keys():
+ if k not in hash:
+ listMissing.append(hash_new[k])
+ else:
+ listPresent.append(hash_new[k])
+
+ for entry in listMissing:
+ reference = newsecrets_ldb.search(expression="distinguishedName=%s" % entry,
+ base="", scope=SCOPE_SUBTREE)
+ delta = secrets_ldb.msg_diff(empty, reference[0])
+ for att in hashAttrNotCopied:
+ delta.remove(att)
+ messagefunc(CHANGE, "Entry %s is missing from secrets.ldb" %
+ reference[0].dn)
+ for att in delta:
+ messagefunc(CHANGE, " Adding attribute %s" % att)
+ delta.dn = reference[0].dn
+ secrets_ldb.add(delta)
+
+ for entry in listPresent:
+ reference = newsecrets_ldb.search(expression="distinguishedName=%s" % entry,
+ base="", scope=SCOPE_SUBTREE)
+ current = secrets_ldb.search(expression="distinguishedName=%s" % entry, base="",
+ scope=SCOPE_SUBTREE)
+ delta = secrets_ldb.msg_diff(current[0], reference[0])
+ for att in hashAttrNotCopied:
+ delta.remove(att)
+ for att in delta:
+ if att == "name":
+ messagefunc(CHANGE, "Found attribute name on %s,"
+ " must rename the DN" % (current[0].dn))
+ identic_rename(secrets_ldb, reference[0].dn)
+ else:
+ delta.remove(att)
+
+ for entry in listPresent:
+ reference = newsecrets_ldb.search(expression="distinguishedName=%s" % entry, base="",
+ scope=SCOPE_SUBTREE)
+ current = secrets_ldb.search(expression="distinguishedName=%s" % entry, base="",
+ scope=SCOPE_SUBTREE)
+ delta = secrets_ldb.msg_diff(current[0], reference[0])
+ for att in hashAttrNotCopied:
+ delta.remove(att)
+ for att in delta:
+ if att == "msDS-KeyVersionNumber":
+ delta.remove(att)
+ if att != "dn":
+ messagefunc(CHANGE,
+ "Adding/Changing attribute %s to %s" %
+ (att, current[0].dn))
+
+ delta.dn = current[0].dn
+ secrets_ldb.modify(delta)
+
+ res2 = secrets_ldb.search(expression="(samaccountname=dns)",
+ scope=SCOPE_SUBTREE, attrs=["dn"])
+
+ if len(res2) == 1:
+ messagefunc(SIMPLE, "Remove old dns account")
+ secrets_ldb.delete(res2[0]["dn"])
+
+
+def getOEMInfo(samdb, rootdn):
+ """Return OEM Information on the top level Samba4 use to store version
+ info in this field
+
+ :param samdb: An LDB object connect to sam.ldb
+ :param rootdn: Root DN of the domain
+ :return: The content of the field oEMInformation (if any)
+ """
+ res = samdb.search(expression="(objectClass=*)", base=str(rootdn),
+ scope=SCOPE_BASE, attrs=["dn", "oEMInformation"])
+ if len(res) > 0 and res[0].get("oEMInformation"):
+ info = res[0]["oEMInformation"]
+ return info
+ else:
+ return ""
+
+
+def updateOEMInfo(samdb, rootdn):
+ """Update the OEMinfo field to add information about upgrade
+
+ :param samdb: an LDB object connected to the sam DB
+ :param rootdn: The string representation of the root DN of
+ the provision (ie. DC=...,DC=...)
+ """
+ res = samdb.search(expression="(objectClass=*)", base=rootdn,
+ scope=SCOPE_BASE, attrs=["dn", "oEMInformation"])
+ if len(res) > 0:
+ if res[0].get("oEMInformation"):
+ info = str(res[0]["oEMInformation"])
+ else:
+ info = ""
+ info = "%s, upgrade to %s" % (info, version)
+ delta = ldb.Message()
+ delta.dn = ldb.Dn(samdb, str(res[0]["dn"]))
+ delta["oEMInformation"] = ldb.MessageElement(info, ldb.FLAG_MOD_REPLACE,
+ "oEMInformation")
+ samdb.modify(delta)
+
+
+def update_gpo(paths, names):
+ """Create missing GPO file object if needed
+ """
+ dir = getpolicypath(paths.sysvol, names.dnsdomain, names.policyid)
+ if not os.path.isdir(dir):
+ create_gpo_struct(dir)
+
+ if names.policyid_dc is None:
+ raise ProvisioningError("Policy ID for Domain controller is missing")
+ dir = getpolicypath(paths.sysvol, names.dnsdomain, names.policyid_dc)
+ if not os.path.isdir(dir):
+ create_gpo_struct(dir)
+
+
+def increment_calculated_keyversion_number(samdb, rootdn, hashDns):
+ """For a given hash associating dn and a number, this function will
+ update the replPropertyMetaData of each dn in the hash, so that the
+ calculated value of the msDs-KeyVersionNumber is equal or superior to the
+ one associated to the given dn.
+
+ :param samdb: An SamDB object pointing to the sam
+ :param rootdn: The base DN where we want to start
+ :param hashDns: A hash with dn as key and number representing the
+ minimum value of msDs-KeyVersionNumber that we want to
+ have
+ """
+ entry = samdb.search(expression='(objectClass=user)',
+ base=ldb.Dn(samdb, str(rootdn)),
+ scope=SCOPE_SUBTREE, attrs=["msDs-KeyVersionNumber"],
+ controls=["search_options:1:2"])
+ done = 0
+ if len(entry) == 0:
+ raise ProvisioningError("Unable to find msDs-KeyVersionNumber")
+ else:
+ for e in entry:
+ if str(e.dn).lower() in hashDns:
+ val = e.get("msDs-KeyVersionNumber")
+ if not val:
+ val = "0"
+ version = int(str(hashDns[str(e.dn).lower()]))
+ if int(str(val)) < version:
+ done = done + 1
+ samdb.set_attribute_replmetadata_version(str(e.dn),
+ "unicodePwd",
+ version, True)
+
+
+def delta_update_basesamdb(refsampath, sampath, creds, session, lp, message):
+ """Update the provision container db: sam.ldb
+ This function is aimed for alpha9 and newer;
+
+ :param refsampath: Path to the samdb in the reference provision
+ :param sampath: Path to the samdb in the upgraded provision
+ :param creds: Credential used for opening LDB files
+ :param session: Session to use for opening LDB files
+ :param lp: A loadparam object
+ :return: A msg_diff object with the difference between the @ATTRIBUTES
+ of the current provision and the reference provision
+ """
+
+ message(SIMPLE,
+ "Update base samdb by searching difference with reference one")
+ refsam = Ldb(refsampath, session_info=session, credentials=creds,
+ lp=lp, options=["modules:"])
+ sam = Ldb(sampath, session_info=session, credentials=creds, lp=lp,
+ options=["modules:"])
+
+ empty = ldb.Message()
+ deltaattr = None
+ reference = refsam.search(expression="")
+
+ for refentry in reference:
+ entry = sam.search(expression="distinguishedName=%s" % refentry["dn"],
+ scope=SCOPE_SUBTREE)
+ if not len(entry):
+ delta = sam.msg_diff(empty, refentry)
+ message(CHANGE, "Adding %s to sam db" % str(refentry.dn))
+ if str(refentry.dn) == "@PROVISION" and\
+ delta.get(samba.provision.LAST_PROVISION_USN_ATTRIBUTE):
+ delta.remove(samba.provision.LAST_PROVISION_USN_ATTRIBUTE)
+ delta.dn = refentry.dn
+ sam.add(delta)
+ else:
+ delta = sam.msg_diff(entry[0], refentry)
+ if str(refentry.dn) == "@ATTRIBUTES":
+ deltaattr = sam.msg_diff(refentry, entry[0])
+ if str(refentry.dn) == "@PROVISION" and\
+ delta.get(samba.provision.LAST_PROVISION_USN_ATTRIBUTE):
+ delta.remove(samba.provision.LAST_PROVISION_USN_ATTRIBUTE)
+ if len(delta.items()) > 1:
+ delta.dn = refentry.dn
+ sam.modify(delta)
+
+ return deltaattr
+
+
+def construct_existor_expr(attrs):
+ """Construct a exists or LDAP search expression.
+
+ :param attrs: List of attribute on which we want to create the search
+ expression.
+ :return: A string representing the expression, if attrs is empty an
+ empty string is returned
+ """
+ expr = ""
+ if len(attrs) > 0:
+ expr = "(|"
+ for att in attrs:
+ expr = "%s(%s=*)" %(expr, att)
+ expr = "%s)" %expr
+ return expr
+
+
+def update_machine_account_password(samdb, secrets_ldb, names):
+ """Update (change) the password of the current DC both in the SAM db and in
+ secret one
+
+ :param samdb: An LDB object related to the sam.ldb file of a given provision
+ :param secrets_ldb: An LDB object related to the secrets.ldb file of a given
+ provision
+ :param names: List of key provision parameters"""
+
+ expression = "samAccountName=%s$" % names.netbiosname
+ secrets_msg = secrets_ldb.search(expression=expression,
+ attrs=["secureChannelType"])
+ if int(secrets_msg[0]["secureChannelType"][0]) == SEC_CHAN_BDC:
+ res = samdb.search(expression=expression, attrs=[])
+ assert(len(res) == 1)
+
+ msg = ldb.Message(res[0].dn)
+ machinepass = samba.generate_random_machine_password(120, 120)
+ mputf16 = machinepass.encode('utf-16-le')
+ msg["clearTextPassword"] = ldb.MessageElement(mputf16,
+ ldb.FLAG_MOD_REPLACE,
+ "clearTextPassword")
+ samdb.modify(msg)
+
+ res = samdb.search(expression=("samAccountName=%s$" % names.netbiosname),
+ attrs=["msDs-keyVersionNumber"])
+ assert(len(res) == 1)
+ kvno = int(str(res[0]["msDs-keyVersionNumber"]))
+ secChanType = int(secrets_msg[0]["secureChannelType"][0])
+
+ secretsdb_self_join(secrets_ldb, domain=names.domain,
+ realm=names.realm,
+ domainsid=names.domainsid,
+ dnsdomain=names.dnsdomain,
+ netbiosname=names.netbiosname,
+ machinepass=machinepass,
+ key_version_number=kvno,
+ secure_channel_type=secChanType)
+ else:
+ raise ProvisioningError("Unable to find a Secure Channel"
+ "of type SEC_CHAN_BDC")
+
+
+def update_dns_account_password(samdb, secrets_ldb, names):
+ """Update (change) the password of the dns both in the SAM db and in
+ secret one
+
+ :param samdb: An LDB object related to the sam.ldb file of a given provision
+ :param secrets_ldb: An LDB object related to the secrets.ldb file of a given
+ provision
+ :param names: List of key provision parameters"""
+
+ expression = "samAccountName=dns-%s" % names.netbiosname
+ secrets_msg = secrets_ldb.search(expression=expression)
+ if len(secrets_msg) == 1:
+ res = samdb.search(expression=expression, attrs=[])
+ assert(len(res) == 1)
+
+ msg = ldb.Message(res[0].dn)
+ machinepass = samba.generate_random_password(128, 255)
+ mputf16 = machinepass.encode('utf-16-le')
+ msg["clearTextPassword"] = ldb.MessageElement(mputf16,
+ ldb.FLAG_MOD_REPLACE,
+ "clearTextPassword")
+
+ samdb.modify(msg)
+
+ res = samdb.search(expression=expression,
+ attrs=["msDs-keyVersionNumber"])
+ assert(len(res) == 1)
+ kvno = str(res[0]["msDs-keyVersionNumber"])
+
+ msg = ldb.Message(secrets_msg[0].dn)
+ msg["secret"] = ldb.MessageElement(machinepass,
+ ldb.FLAG_MOD_REPLACE,
+ "secret")
+ msg["msDS-KeyVersionNumber"] = ldb.MessageElement(kvno,
+ ldb.FLAG_MOD_REPLACE,
+ "msDS-KeyVersionNumber")
+
+ secrets_ldb.modify(msg)
+
+
+def update_krbtgt_account_password(samdb):
+ """Update (change) the password of the krbtgt account
+
+ :param samdb: An LDB object related to the sam.ldb file of a given provision"""
+
+ expression = "samAccountName=krbtgt"
+ res = samdb.search(expression=expression, attrs=[])
+ assert(len(res) == 1)
+
+ msg = ldb.Message(res[0].dn)
+ # Note that the machinepass value is ignored
+ # as the backend (password_hash.c) will generate its
+ # own random values for the krbtgt keys
+ krbtgtpass = samba.generate_random_machine_password(128, 255)
+ kputf16 = krbtgtpass.encode('utf-16-le')
+ msg["clearTextPassword"] = ldb.MessageElement(kputf16,
+ ldb.FLAG_MOD_REPLACE,
+ "clearTextPassword")
+
+ samdb.modify(msg)
+
+
+def search_constructed_attrs_stored(samdb, rootdn, attrs):
+ """Search a given sam DB for calculated attributes that are
+ still stored in the db.
+
+ :param samdb: An LDB object pointing to the sam
+ :param rootdn: The base DN where the search should start
+ :param attrs: A list of attributes to be searched
+ :return: A hash with attributes as key and an array of
+ array. Each array contains the dn and the associated
+ values for this attribute as they are stored in the
+ sam."""
+
+ hashAtt = {}
+ expr = construct_existor_expr(attrs)
+ if expr == "":
+ return hashAtt
+ entry = samdb.search(expression=expr, base=ldb.Dn(samdb, str(rootdn)),
+ scope=SCOPE_SUBTREE, attrs=attrs,
+ controls=["search_options:1:2", "bypassoperational:0"])
+ if len(entry) == 0:
+ # Nothing anymore
+ return hashAtt
+
+ for ent in entry:
+ for att in attrs:
+ if ent.get(att):
+ if att in hashAtt:
+ hashAtt[att][str(ent.dn).lower()] = str(ent[att])
+ else:
+ hashAtt[att] = {}
+ hashAtt[att][str(ent.dn).lower()] = str(ent[att])
+
+ return hashAtt
+
+
+def findprovisionrange(samdb, basedn):
+ """ Find ranges of usn grouped by invocation id and then by timestamp
+ rouned at 1 minute
+
+ :param samdb: An LDB object pointing to the samdb
+ :param basedn: The DN of the forest
+
+ :return: A two level dictionary with invoication id as the
+ first level, timestamp as the second one and then
+ max, min, and number as subkeys, representing respectivily
+ the maximum usn for the range, the minimum usn and the number
+ of object with usn in this range.
+ """
+ nb_obj = 0
+ hash_id = {}
+
+ res = samdb.search(base=basedn, expression="objectClass=*",
+ scope=ldb.SCOPE_SUBTREE,
+ attrs=["replPropertyMetaData"],
+ controls=["search_options:1:2"])
+
+ for e in res:
+ nb_obj = nb_obj + 1
+ obj = ndr_unpack(drsblobs.replPropertyMetaDataBlob,
+ str(e["replPropertyMetaData"])).ctr
+
+ for o in obj.array:
+ # like a timestamp but with the resolution of 1 minute
+ minutestamp = _glue.nttime2unix(o.originating_change_time) // 60
+ hash_ts = hash_id.get(str(o.originating_invocation_id))
+
+ if hash_ts is None:
+ ob = {}
+ ob["min"] = o.originating_usn
+ ob["max"] = o.originating_usn
+ ob["num"] = 1
+ ob["list"] = [str(e.dn)]
+ hash_ts = {}
+ else:
+ ob = hash_ts.get(minutestamp)
+ if ob is None:
+ ob = {}
+ ob["min"] = o.originating_usn
+ ob["max"] = o.originating_usn
+ ob["num"] = 1
+ ob["list"] = [str(e.dn)]
+ else:
+ if ob["min"] > o.originating_usn:
+ ob["min"] = o.originating_usn
+ if ob["max"] < o.originating_usn:
+ ob["max"] = o.originating_usn
+ if not (str(e.dn) in ob["list"]):
+ ob["num"] = ob["num"] + 1
+ ob["list"].append(str(e.dn))
+ hash_ts[minutestamp] = ob
+ hash_id[str(o.originating_invocation_id)] = hash_ts
+
+ return (hash_id, nb_obj)
+
+
+def print_provision_ranges(dic, limit_print, dest, samdb_path, invocationid):
+ """ print the different ranges passed as parameter
+
+ :param dic: A dictionary as returned by findprovisionrange
+ :param limit_print: minimum number of object in a range in order to print it
+ :param dest: Destination directory
+ :param samdb_path: Path to the sam.ldb file
+ :param invoicationid: Invocation ID for the current provision
+ """
+ ldif = ""
+
+ for id in dic:
+ hash_ts = dic[id]
+ sorted_keys = []
+ sorted_keys.extend(hash_ts.keys())
+ sorted_keys.sort()
+
+ kept_record = []
+ for k in sorted_keys:
+ obj = hash_ts[k]
+ if obj["num"] > limit_print:
+ dt = _glue.nttime2string(_glue.unix2nttime(k * 60))
+ print("%s # of modification: %d \tmin: %d max: %d" % (dt, obj["num"],
+ obj["min"],
+ obj["max"]))
+ if hash_ts[k]["num"] > 600:
+ kept_record.append(k)
+
+ # Let's try to concatenate consecutive block if they are in the almost same minutestamp
+ for i in range(0, len(kept_record)):
+ if i != 0:
+ key1 = kept_record[i]
+ key2 = kept_record[i - 1]
+ if key1 - key2 == 1:
+ # previous record is just 1 minute away from current
+ if int(hash_ts[key1]["min"]) == int(hash_ts[key2]["max"]) + 1:
+ # Copy the highest USN in the previous record
+ # and mark the current as skipped
+ hash_ts[key2]["max"] = hash_ts[key1]["max"]
+ hash_ts[key1]["skipped"] = True
+
+ for k in kept_record:
+ obj = hash_ts[k]
+ if obj.get("skipped") is None:
+ ldif = "%slastProvisionUSN: %d-%d;%s\n" % (ldif, obj["min"],
+ obj["max"], id)
+
+ if ldif != "":
+ fd, file = tempfile.mkstemp(dir=dest, prefix="usnprov", suffix=".ldif")
+ print()
+ print("To track the USNs modified/created by provision and upgrade proivsion,")
+ print(" the following ranges are proposed to be added to your provision sam.ldb: \n%s" % ldif)
+ print("We recommend to review them, and if it's correct to integrate the following ldif: %s in your sam.ldb" % file)
+ print("You can load this file like this: ldbadd -H %s %s\n" %(str(samdb_path), file))
+ ldif = "dn: @PROVISION\nprovisionnerID: %s\n%s" % (invocationid, ldif)
+ os.write(fd, ldif)
+ os.close(fd)
+
+
+def int64range2str(value):
+ """Display the int64 range stored in value as xxx-yyy
+
+ :param value: The int64 range
+ :return: A string of the representation of the range
+ """
+ lvalue = int(value)
+ str = "%d-%d" % (lvalue &0xFFFFFFFF, lvalue >>32)
+ return str
diff --git a/python/samba/uptodateness.py b/python/samba/uptodateness.py
new file mode 100644
index 0000000..49c984a
--- /dev/null
+++ b/python/samba/uptodateness.py
@@ -0,0 +1,201 @@
+# Uptodateness utils
+#
+# Copyright (C) Andrew Bartlett 2015, 2018
+# Copyright (C) Douglas Bagnall <douglas.bagnall@catalyst.net.nz>
+# Copyright (C) Joe Guo <joeg@catalyst.net.nz>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import sys
+import time
+
+from ldb import SCOPE_BASE, LdbError
+
+from samba import nttime2unix, dsdb
+from samba.netcmd import CommandError
+from samba.samdb import SamDB
+from samba.kcc import KCC
+
+
+def get_kcc_and_dsas(url, lp, creds):
+ """Get a readonly KCC object and the list of DSAs it knows about."""
+ unix_now = int(time.time())
+ kcc = KCC(unix_now, readonly=True)
+ kcc.load_samdb(url, lp, creds)
+
+ dsa_list = kcc.list_dsas()
+ dsas = set(dsa_list)
+ if len(dsas) != len(dsa_list):
+ print("There seem to be duplicate dsas", file=sys.stderr)
+
+ return kcc, dsas
+
+
+def get_partition_maps(samdb):
+ """Generate dictionaries mapping short partition names to the
+ appropriate DNs."""
+ base_dn = samdb.domain_dn()
+ short_to_long = {
+ "DOMAIN": base_dn,
+ "CONFIGURATION": str(samdb.get_config_basedn()),
+ "SCHEMA": "CN=Schema,%s" % samdb.get_config_basedn(),
+ "DNSDOMAIN": "DC=DomainDnsZones,%s" % base_dn,
+ "DNSFOREST": "DC=ForestDnsZones,%s" % base_dn
+ }
+
+ long_to_short = {}
+ for s, l in short_to_long.items():
+ long_to_short[l] = s
+
+ return short_to_long, long_to_short
+
+
+def get_partition(samdb, part):
+ # Allow people to say "--partition=DOMAIN" rather than
+ # "--partition=DC=blah,DC=..."
+ if part is not None:
+ short_partitions, long_partitions = get_partition_maps(samdb)
+ part = short_partitions.get(part.upper(), part)
+ if part not in long_partitions:
+ raise CommandError("unknown partition %s" % part)
+ return part
+
+
+def get_utdv(samdb, dn):
+ """This finds the uptodateness vector in the database."""
+ cursors = []
+ config_dn = samdb.get_config_basedn()
+ for c in dsdb._dsdb_load_udv_v2(samdb, dn):
+ inv_id = str(c.source_dsa_invocation_id)
+ res = samdb.search(base=config_dn,
+ expression=("(&(invocationId=%s)"
+ "(objectClass=nTDSDSA))" % inv_id),
+ attrs=["distinguishedName", "invocationId"])
+ try:
+ settings_dn = str(res[0]["distinguishedName"][0])
+ prefix, dsa_dn = settings_dn.split(',', 1)
+ except IndexError as e:
+ continue
+ if prefix != 'CN=NTDS Settings':
+ raise CommandError("Expected NTDS Settings DN, got %s" %
+ settings_dn)
+
+ cursors.append((dsa_dn,
+ inv_id,
+ int(c.highest_usn),
+ nttime2unix(c.last_sync_success)))
+ return cursors
+
+
+def get_own_cursor(samdb):
+ res = samdb.search(base="",
+ scope=SCOPE_BASE,
+ attrs=["highestCommittedUSN"])
+ usn = int(res[0]["highestCommittedUSN"][0])
+ now = int(time.time())
+ return (usn, now)
+
+
+def get_utdv_edges(local_kcc, dsas, part_dn, lp, creds):
+ # we talk to each remote and make a matrix of the vectors
+ # for each partition
+ # normalise by oldest
+ utdv_edges = {}
+ for dsa_dn in dsas:
+ res = local_kcc.samdb.search(dsa_dn,
+ scope=SCOPE_BASE,
+ attrs=["dNSHostName"])
+ ldap_url = "ldap://%s" % res[0]["dNSHostName"][0]
+ try:
+ samdb = SamDB(url=ldap_url, credentials=creds, lp=lp)
+ cursors = get_utdv(samdb, part_dn)
+ own_usn, own_time = get_own_cursor(samdb)
+ remotes = {dsa_dn: own_usn}
+ for dn, guid, usn, t in cursors:
+ remotes[dn] = usn
+ except LdbError as e:
+ print("Could not contact %s (%s)" % (ldap_url, e),
+ file=sys.stderr)
+ continue
+ utdv_edges[dsa_dn] = remotes
+ return utdv_edges
+
+
+def get_utdv_distances(utdv_edges, dsas):
+ distances = {}
+ for dn1 in dsas:
+ try:
+ peak = utdv_edges[dn1][dn1]
+ except KeyError as e:
+ peak = 0
+ d = {}
+ distances[dn1] = d
+ for dn2 in dsas:
+ if dn2 in utdv_edges:
+ if dn1 in utdv_edges[dn2]:
+ dist = peak - utdv_edges[dn2][dn1]
+ d[dn2] = dist
+ else:
+ print(f"Missing dn {dn1} from UTD vector for dsa {dn2}",
+ file=sys.stderr)
+ else:
+ print("missing dn %s from UTD vector list" % dn2,
+ file=sys.stderr)
+ return distances
+
+
+def get_utdv_max_distance(distances):
+ max_distance = 0
+ for vector in distances.values():
+ for distance in vector.values():
+ max_distance = max(max_distance, distance)
+ return max_distance
+
+
+def get_utdv_summary(distances, filters=None):
+ maximum = failure = 0
+ median = 0.0 # could be average of 2 median values
+ values = []
+ # put all values into a list, exclude self to self ones
+ for dn_outer, vector in distances.items():
+ for dn_inner, distance in vector.items():
+ if dn_outer != dn_inner:
+ values.append(distance)
+
+ if values:
+ values.sort()
+ maximum = values[-1]
+ length = len(values)
+ if length % 2 == 0:
+ index = length//2 - 1
+ median = (values[index] + values[index+1])/2.0
+ median = round(median, 1) # keep only 1 decimal digit like 2.5
+ else:
+ index = (length - 1)//2
+ median = values[index]
+ median = float(median) # ensure median is always a float like 1.0
+ # if value not exist, that's a failure
+ expected_length = len(distances) * (len(distances) - 1)
+ failure = expected_length - length
+
+ summary = {
+ 'maximum': maximum,
+ 'median': median,
+ 'failure': failure,
+ }
+
+ if filters:
+ return {key: summary[key] for key in filters}
+ else:
+ return summary
diff --git a/python/samba/xattr.py b/python/samba/xattr.py
new file mode 100644
index 0000000..2f4813c
--- /dev/null
+++ b/python/samba/xattr.py
@@ -0,0 +1,60 @@
+# Utility code for dealing with POSIX extended attributes
+#
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2012
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from samba.dcerpc import xattr
+import os
+import samba.xattr_native
+import shutil
+
+
+def copyattrs(frompath, topath):
+ """Copy ACL related attributes from a path to another path."""
+ for attr_name in (xattr.XATTR_NTACL_NAME, "system.posix_acl_access"):
+ # Get the xattr attributes if any
+ try:
+ attribute = samba.xattr_native.wrap_getxattr(frompath,
+ attr_name)
+ samba.xattr_native.wrap_setxattr(topath,
+ attr_name,
+ attribute)
+ except Exception:
+ pass
+ # FIXME:Catch a specific exception
+
+
+def copytree_with_xattrs(src, dst):
+ """Recursively copy a directory tree using shutil.copy2(), preserving xattrs.
+
+ The destination directory must not already exist.
+ If exception(s) occur, an Error is raised with a list of reasons.
+ """
+ names = os.listdir(src)
+
+ os.makedirs(dst)
+ for name in names:
+ srcname = os.path.join(src, name)
+ dstname = os.path.join(dst, name)
+ if os.path.islink(srcname):
+ linkto = os.readlink(srcname)
+ os.symlink(linkto, dstname)
+ elif os.path.isdir(srcname):
+ copytree_with_xattrs(srcname, dstname)
+ else:
+ # Will raise a SpecialFileError for unsupported file types
+ shutil.copy2(srcname, dstname)
+ shutil.copystat(src, dst)
+ copyattrs(src, dst)
diff --git a/python/wscript b/python/wscript
new file mode 100644
index 0000000..3e64399
--- /dev/null
+++ b/python/wscript
@@ -0,0 +1,140 @@
+#!/usr/bin/env python
+
+import os
+from waflib import Options, Errors
+
+# work out what python external libraries we need to be successful
+selftest_pkgs = {
+ 'cryptography': 'python3-cryptography',
+ 'pyasn1': 'python3-pyasn1'
+}
+
+ad_dc_pkgs = {
+ 'markdown': 'python3-markdown',
+ 'dns': 'python3-dnspython (python3-dns on some systems)'
+}
+
+
+def find_third_party_module(conf, module, package, required=True):
+ conf.COMPOUND_START("Checking for system installation of Python module %s" % module)
+ try:
+ __import__(module)
+ except ImportError:
+ conf.COMPOUND_END(False)
+ if not required:
+ return False
+ raise Errors.WafError("""\
+ Unable to find Python module '%s'. Please install the system package: %s'.
+""" % (module, package))
+ else:
+ # Installed on the system
+ conf.COMPOUND_END("system")
+
+ return True
+
+
+def configure(conf):
+ if conf.env.disable_python:
+ return
+
+ kerberos_py = conf.srcnode.abspath() + "/python/samba/provision/kerberos_implementation.py"
+
+ f = open(kerberos_py, 'w')
+ try:
+ header = """#
+# Copyright (c) 2016 Andreas Schneider <asn@samba.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+"""
+ f.write(header)
+
+ data = """kdb_modules_dir = "{0}"
+"""
+
+ if conf.env.HEIMDAL_KRB5_CONFIG:
+ f.write(data.format(""))
+ else:
+ modulesdir = "%s/krb5/plugins/kdb" % conf.env.LIBDIR
+
+ f.write(data.format(modulesdir))
+ finally:
+ f.close()
+
+ if conf.CONFIG_GET('ENABLE_SELFTEST'):
+ for module, package in selftest_pkgs.items():
+ find_third_party_module(conf, module, package)
+
+ # Prefer dateutil.parser which is much more widely used.
+ if not find_third_party_module(conf,
+ 'dateutil.parser',
+ 'python3-dateutilis',
+ required=False):
+ if not find_third_party_module(conf,
+ 'iso8601',
+ 'python3-iso8601',
+ required=False):
+ raise Errors.WafError("Could not find Python package "
+ "'python3-dateutils' nor "
+ "'python3-iso8601'. Please install "
+ "one of the packages.")
+
+ if not Options.options.without_ad_dc:
+ for module, package in ad_dc_pkgs.items():
+ find_third_party_module(conf, module, package)
+
+
+def build(bld):
+
+
+ pytalloc_util = bld.pyembed_libname('pytalloc-util')
+ pyparam_util = bld.pyembed_libname('pyparam_util')
+ libpython = bld.pyembed_libname('LIBPYTHON')
+ pyrpc_util = bld.pyembed_libname('pyrpc_util')
+ samba_python = bld.pyembed_libname('samba_python')
+ bld.SAMBA_LIBRARY(samba_python,
+ source=[],
+ deps='%s %s %s' % (libpython, pytalloc_util, pyrpc_util),
+ grouping_library=True,
+ private_library=True,
+ pyembed=True,
+ enabled=bld.PYTHON_BUILD_IS_ENABLED())
+ bld.SAMBA_PYTHON('python_glue',
+ source='pyglue.c',
+ deps='''
+ %s
+ samba-util
+ netif
+ ndr
+ cmdline
+ gkdi
+ %s
+ ''' % (pyparam_util, pytalloc_util),
+ realname='samba/_glue.so')
+
+ bld.SAMBA_SUBSYSTEM(libpython,
+ source='modules.c',
+ public_deps='',
+ init_function_sentinel='{NULL,NULL}',
+ deps='talloc',
+ pyext=True,
+ enabled=bld.PYTHON_BUILD_IS_ENABLED())
+
+ if bld.PYTHON_BUILD_IS_ENABLED():
+ # install out various python scripts for use by make test
+ bld.SAMBA_SCRIPT('samba_python_files',
+ pattern='samba/**/*.py',
+ installdir='python')
+
+ bld.INSTALL_WILDCARD('${PYTHONARCHDIR}', 'samba/**/*.py', flat=False)